1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-attr.h"
45 #include "integrate.h"
48 #include "target-def.h"
51 extern int rtx_equal_function_value_matters
;
53 /* Specify which cpu to schedule for. */
55 enum processor_type alpha_cpu
;
56 static const char * const alpha_cpu_name
[] =
61 /* Specify how accurate floating-point traps need to be. */
63 enum alpha_trap_precision alpha_tp
;
65 /* Specify the floating-point rounding mode. */
67 enum alpha_fp_rounding_mode alpha_fprm
;
69 /* Specify which things cause traps. */
71 enum alpha_fp_trap_mode alpha_fptm
;
73 /* Strings decoded into the above options. */
75 const char *alpha_cpu_string
; /* -mcpu= */
76 const char *alpha_tune_string
; /* -mtune= */
77 const char *alpha_tp_string
; /* -mtrap-precision=[p|s|i] */
78 const char *alpha_fprm_string
; /* -mfp-rounding-mode=[n|m|c|d] */
79 const char *alpha_fptm_string
; /* -mfp-trap-mode=[n|u|su|sui] */
80 const char *alpha_mlat_string
; /* -mmemory-latency= */
82 /* Save information from a "cmpxx" operation until the branch or scc is
85 struct alpha_compare alpha_compare
;
87 /* Non-zero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function
= FALSE
;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency
= 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp
;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static int alpha_sr_alias_set
;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname
;
108 /* The next explicit relocation sequence number. */
109 int alpha_next_sequence_number
= 1;
111 /* The literal and gpdisp sequence numbers for this insn, as printed
112 by %# and %* respectively. */
113 int alpha_this_literal_sequence_number
;
114 int alpha_this_gpdisp_sequence_number
;
116 /* Declarations of static functions. */
117 static bool decl_in_text_section
119 static bool local_symbol_p
121 static void alpha_set_memflags_1
122 PARAMS ((rtx
, int, int, int));
123 static rtx alpha_emit_set_const_1
124 PARAMS ((rtx
, enum machine_mode
, HOST_WIDE_INT
, int));
125 static void alpha_expand_unaligned_load_words
126 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
127 static void alpha_expand_unaligned_store_words
128 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
129 static void alpha_sa_mask
130 PARAMS ((unsigned long *imaskP
, unsigned long *fmaskP
));
131 static int find_lo_sum
132 PARAMS ((rtx
*, void *));
133 static int alpha_does_function_need_gp
135 static int alpha_ra_ever_killed
137 static const char *get_trap_mode_suffix
139 static const char *get_round_mode_suffix
141 static rtx set_frame_related_p
143 static const char *alpha_lookup_xfloating_lib_func
144 PARAMS ((enum rtx_code
));
145 static int alpha_compute_xfloating_mode_arg
146 PARAMS ((enum rtx_code
, enum alpha_fp_rounding_mode
));
147 static void alpha_emit_xfloating_libcall
148 PARAMS ((const char *, rtx
, rtx
[], int, rtx
));
149 static rtx alpha_emit_xfloating_compare
150 PARAMS ((enum rtx_code
, rtx
, rtx
));
151 static void alpha_output_function_end_prologue
153 static int alpha_adjust_cost
154 PARAMS ((rtx
, rtx
, rtx
, int));
155 static int alpha_issue_rate
157 static int alpha_variable_issue
158 PARAMS ((FILE *, int, rtx
, int));
160 #if TARGET_ABI_UNICOSMK
161 static void alpha_init_machine_status
162 PARAMS ((struct function
*p
));
163 static void alpha_mark_machine_status
164 PARAMS ((struct function
*p
));
165 static void alpha_free_machine_status
166 PARAMS ((struct function
*p
));
169 static void unicosmk_output_deferred_case_vectors
PARAMS ((FILE *));
170 static void unicosmk_gen_dsib
PARAMS ((unsigned long *imaskP
));
171 static void unicosmk_output_ssib
PARAMS ((FILE *, const char *));
172 static int unicosmk_need_dex
PARAMS ((rtx
));
174 /* Get the number of args of a function in one of two ways. */
175 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
176 #define NUM_ARGS current_function_args_info.num_args
178 #define NUM_ARGS current_function_args_info
184 /* Initialize the GCC target structure. */
185 #if TARGET_ABI_OPEN_VMS
186 const struct attribute_spec vms_attribute_table
[];
187 static unsigned int vms_section_type_flags
PARAMS ((tree
, const char *, int));
188 static void vms_asm_named_section
PARAMS ((const char *, unsigned int));
189 static void vms_asm_out_constructor
PARAMS ((rtx
, int));
190 static void vms_asm_out_destructor
PARAMS ((rtx
, int));
191 # undef TARGET_ATTRIBUTE_TABLE
192 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
193 # undef TARGET_SECTION_TYPE_FLAGS
194 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
197 #if TARGET_ABI_UNICOSMK
198 static void unicosmk_asm_named_section
PARAMS ((const char *, unsigned int));
199 static void unicosmk_insert_attributes
PARAMS ((tree
, tree
*));
200 static unsigned int unicosmk_section_type_flags
PARAMS ((tree
, const char *,
202 # undef TARGET_INSERT_ATTRIBUTES
203 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
204 # undef TARGET_SECTION_TYPE_FLAGS
205 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
208 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
209 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
211 #undef TARGET_SCHED_ADJUST_COST
212 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
213 #undef TARGET_SCHED_ISSUE_RATE
214 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
215 #undef TARGET_SCHED_VARIABLE_ISSUE
216 #define TARGET_SCHED_VARIABLE_ISSUE alpha_variable_issue
218 struct gcc_target targetm
= TARGET_INITIALIZER
;
220 /* Parse target option strings. */
226 static const struct cpu_table
{
227 const char *const name
;
228 const enum processor_type processor
;
231 #define EV5_MASK (MASK_CPU_EV5)
232 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
233 { "ev4", PROCESSOR_EV4
, 0 },
234 { "ev45", PROCESSOR_EV4
, 0 },
235 { "21064", PROCESSOR_EV4
, 0 },
236 { "ev5", PROCESSOR_EV5
, EV5_MASK
},
237 { "21164", PROCESSOR_EV5
, EV5_MASK
},
238 { "ev56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
239 { "21164a", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
240 { "pca56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
241 { "21164PC",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
242 { "21164pc",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
243 { "ev6", PROCESSOR_EV6
, EV6_MASK
},
244 { "21264", PROCESSOR_EV6
, EV6_MASK
},
245 { "ev67", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
246 { "21264a", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
250 /* Unicos/Mk doesn't have shared libraries. */
251 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
253 warning ("-f%s ignored for Unicos/Mk (not supported)",
254 (flag_pic
> 1) ? "PIC" : "pic");
258 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
259 floating-point instructions. Make that the default for this target. */
260 if (TARGET_ABI_UNICOSMK
)
261 alpha_fprm
= ALPHA_FPRM_DYN
;
263 alpha_fprm
= ALPHA_FPRM_NORM
;
265 alpha_tp
= ALPHA_TP_PROG
;
266 alpha_fptm
= ALPHA_FPTM_N
;
268 /* We cannot use su and sui qualifiers for conversion instructions on
269 Unicos/Mk. I'm not sure if this is due to assembler or hardware
270 limitations. Right now, we issue a warning if -mieee is specified
271 and then ignore it; eventually, we should either get it right or
272 disable the option altogether. */
276 if (TARGET_ABI_UNICOSMK
)
277 warning ("-mieee not supported on Unicos/Mk");
280 alpha_tp
= ALPHA_TP_INSN
;
281 alpha_fptm
= ALPHA_FPTM_SU
;
285 if (TARGET_IEEE_WITH_INEXACT
)
287 if (TARGET_ABI_UNICOSMK
)
288 warning ("-mieee-with-inexact not supported on Unicos/Mk");
291 alpha_tp
= ALPHA_TP_INSN
;
292 alpha_fptm
= ALPHA_FPTM_SUI
;
298 if (! strcmp (alpha_tp_string
, "p"))
299 alpha_tp
= ALPHA_TP_PROG
;
300 else if (! strcmp (alpha_tp_string
, "f"))
301 alpha_tp
= ALPHA_TP_FUNC
;
302 else if (! strcmp (alpha_tp_string
, "i"))
303 alpha_tp
= ALPHA_TP_INSN
;
305 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string
);
308 if (alpha_fprm_string
)
310 if (! strcmp (alpha_fprm_string
, "n"))
311 alpha_fprm
= ALPHA_FPRM_NORM
;
312 else if (! strcmp (alpha_fprm_string
, "m"))
313 alpha_fprm
= ALPHA_FPRM_MINF
;
314 else if (! strcmp (alpha_fprm_string
, "c"))
315 alpha_fprm
= ALPHA_FPRM_CHOP
;
316 else if (! strcmp (alpha_fprm_string
,"d"))
317 alpha_fprm
= ALPHA_FPRM_DYN
;
319 error ("bad value `%s' for -mfp-rounding-mode switch",
323 if (alpha_fptm_string
)
325 if (strcmp (alpha_fptm_string
, "n") == 0)
326 alpha_fptm
= ALPHA_FPTM_N
;
327 else if (strcmp (alpha_fptm_string
, "u") == 0)
328 alpha_fptm
= ALPHA_FPTM_U
;
329 else if (strcmp (alpha_fptm_string
, "su") == 0)
330 alpha_fptm
= ALPHA_FPTM_SU
;
331 else if (strcmp (alpha_fptm_string
, "sui") == 0)
332 alpha_fptm
= ALPHA_FPTM_SUI
;
334 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string
);
338 = TARGET_CPU_DEFAULT
& MASK_CPU_EV6
? PROCESSOR_EV6
339 : (TARGET_CPU_DEFAULT
& MASK_CPU_EV5
? PROCESSOR_EV5
: PROCESSOR_EV4
);
341 if (alpha_cpu_string
)
343 for (i
= 0; cpu_table
[i
].name
; i
++)
344 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
346 alpha_cpu
= cpu_table
[i
].processor
;
347 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
348 | MASK_CPU_EV5
| MASK_CPU_EV6
);
349 target_flags
|= cpu_table
[i
].flags
;
352 if (! cpu_table
[i
].name
)
353 error ("bad value `%s' for -mcpu switch", alpha_cpu_string
);
356 if (alpha_tune_string
)
358 for (i
= 0; cpu_table
[i
].name
; i
++)
359 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
361 alpha_cpu
= cpu_table
[i
].processor
;
364 if (! cpu_table
[i
].name
)
365 error ("bad value `%s' for -mcpu switch", alpha_tune_string
);
368 /* Do some sanity checks on the above options. */
370 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
372 warning ("trap mode not supported on Unicos/Mk");
373 alpha_fptm
= ALPHA_FPTM_N
;
376 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
377 && alpha_tp
!= ALPHA_TP_INSN
&& ! TARGET_CPU_EV6
)
379 warning ("fp software completion requires -mtrap-precision=i");
380 alpha_tp
= ALPHA_TP_INSN
;
385 /* Except for EV6 pass 1 (not released), we always have precise
386 arithmetic traps. Which means we can do software completion
387 without minding trap shadows. */
388 alpha_tp
= ALPHA_TP_PROG
;
391 if (TARGET_FLOAT_VAX
)
393 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
395 warning ("rounding mode not supported for VAX floats");
396 alpha_fprm
= ALPHA_FPRM_NORM
;
398 if (alpha_fptm
== ALPHA_FPTM_SUI
)
400 warning ("trap mode not supported for VAX floats");
401 alpha_fptm
= ALPHA_FPTM_SU
;
409 if (!alpha_mlat_string
)
410 alpha_mlat_string
= "L1";
412 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
413 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
415 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
416 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
417 && alpha_mlat_string
[2] == '\0')
419 static int const cache_latency
[][4] =
421 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
422 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
423 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
426 lat
= alpha_mlat_string
[1] - '0';
427 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_cpu
][lat
-1] == -1)
429 warning ("L%d cache latency unknown for %s",
430 lat
, alpha_cpu_name
[alpha_cpu
]);
434 lat
= cache_latency
[alpha_cpu
][lat
-1];
436 else if (! strcmp (alpha_mlat_string
, "main"))
438 /* Most current memories have about 370ns latency. This is
439 a reasonable guess for a fast cpu. */
444 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string
);
448 alpha_memory_latency
= lat
;
451 /* Default the definition of "small data" to 8 bytes. */
455 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
457 target_flags
|= MASK_SMALL_DATA
;
458 else if (flag_pic
== 2)
459 target_flags
&= ~MASK_SMALL_DATA
;
461 /* Align labels and loops for optimal branching. */
462 /* ??? Kludge these by not doing anything if we don't optimize and also if
463 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
464 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
466 if (align_loops
<= 0)
468 if (align_jumps
<= 0)
471 if (align_functions
<= 0)
472 align_functions
= 16;
474 /* Acquire a unique set number for our register saves and restores. */
475 alpha_sr_alias_set
= new_alias_set ();
477 /* Register variables and functions with the garbage collector. */
479 #if TARGET_ABI_UNICOSMK
480 /* Set up function hooks. */
481 init_machine_status
= alpha_init_machine_status
;
482 mark_machine_status
= alpha_mark_machine_status
;
483 free_machine_status
= alpha_free_machine_status
;
487 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
495 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
497 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
503 /* Returns 1 if OP is either the constant zero or a register. If a
504 register, it must be in the proper mode unless MODE is VOIDmode. */
507 reg_or_0_operand (op
, mode
)
509 enum machine_mode mode
;
511 return op
== const0_rtx
|| register_operand (op
, mode
);
514 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
518 reg_or_6bit_operand (op
, mode
)
520 enum machine_mode mode
;
522 return ((GET_CODE (op
) == CONST_INT
523 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64)
524 || register_operand (op
, mode
));
528 /* Return 1 if OP is an 8-bit constant or any register. */
531 reg_or_8bit_operand (op
, mode
)
533 enum machine_mode mode
;
535 return ((GET_CODE (op
) == CONST_INT
536 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100)
537 || register_operand (op
, mode
));
540 /* Return 1 if OP is an 8-bit constant. */
543 cint8_operand (op
, mode
)
545 enum machine_mode mode ATTRIBUTE_UNUSED
;
547 return ((GET_CODE (op
) == CONST_INT
548 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100));
551 /* Return 1 if the operand is a valid second operand to an add insn. */
554 add_operand (op
, mode
)
556 enum machine_mode mode
;
558 if (GET_CODE (op
) == CONST_INT
)
559 /* Constraints I, J, O and P are covered by K. */
560 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'K')
561 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'L'));
563 return register_operand (op
, mode
);
566 /* Return 1 if the operand is a valid second operand to a sign-extending
570 sext_add_operand (op
, mode
)
572 enum machine_mode mode
;
574 if (GET_CODE (op
) == CONST_INT
)
575 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'I')
576 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'O'));
578 return reg_not_elim_operand (op
, mode
);
581 /* Return 1 if OP is the constant 4 or 8. */
584 const48_operand (op
, mode
)
586 enum machine_mode mode ATTRIBUTE_UNUSED
;
588 return (GET_CODE (op
) == CONST_INT
589 && (INTVAL (op
) == 4 || INTVAL (op
) == 8));
592 /* Return 1 if OP is a valid first operand to an AND insn. */
595 and_operand (op
, mode
)
597 enum machine_mode mode
;
599 if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == VOIDmode
)
600 return (zap_mask (CONST_DOUBLE_LOW (op
))
601 && zap_mask (CONST_DOUBLE_HIGH (op
)));
603 if (GET_CODE (op
) == CONST_INT
)
604 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
605 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100
606 || zap_mask (INTVAL (op
)));
608 return register_operand (op
, mode
);
611 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
614 or_operand (op
, mode
)
616 enum machine_mode mode
;
618 if (GET_CODE (op
) == CONST_INT
)
619 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
620 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100);
622 return register_operand (op
, mode
);
625 /* Return 1 if OP is a constant that is the width, in bits, of an integral
626 mode smaller than DImode. */
629 mode_width_operand (op
, mode
)
631 enum machine_mode mode ATTRIBUTE_UNUSED
;
633 return (GET_CODE (op
) == CONST_INT
634 && (INTVAL (op
) == 8 || INTVAL (op
) == 16
635 || INTVAL (op
) == 32 || INTVAL (op
) == 64));
638 /* Return 1 if OP is a constant that is the width of an integral machine mode
639 smaller than an integer. */
642 mode_mask_operand (op
, mode
)
644 enum machine_mode mode ATTRIBUTE_UNUSED
;
646 #if HOST_BITS_PER_WIDE_INT == 32
647 if (GET_CODE (op
) == CONST_DOUBLE
)
648 return (CONST_DOUBLE_LOW (op
) == -1
649 && (CONST_DOUBLE_HIGH (op
) == -1
650 || CONST_DOUBLE_HIGH (op
) == 0));
652 if (GET_CODE (op
) == CONST_DOUBLE
)
653 return (CONST_DOUBLE_LOW (op
) == -1 && CONST_DOUBLE_HIGH (op
) == 0);
656 return (GET_CODE (op
) == CONST_INT
657 && (INTVAL (op
) == 0xff
658 || INTVAL (op
) == 0xffff
659 || INTVAL (op
) == (HOST_WIDE_INT
)0xffffffff
660 #if HOST_BITS_PER_WIDE_INT == 64
666 /* Return 1 if OP is a multiple of 8 less than 64. */
669 mul8_operand (op
, mode
)
671 enum machine_mode mode ATTRIBUTE_UNUSED
;
673 return (GET_CODE (op
) == CONST_INT
674 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64
675 && (INTVAL (op
) & 7) == 0);
678 /* Return 1 if OP is the constant zero in floating-point. */
681 fp0_operand (op
, mode
)
683 enum machine_mode mode
;
685 return (GET_MODE (op
) == mode
686 && GET_MODE_CLASS (mode
) == MODE_FLOAT
&& op
== CONST0_RTX (mode
));
689 /* Return 1 if OP is the floating-point constant zero or a register. */
692 reg_or_fp0_operand (op
, mode
)
694 enum machine_mode mode
;
696 return fp0_operand (op
, mode
) || register_operand (op
, mode
);
699 /* Return 1 if OP is a hard floating-point register. */
702 hard_fp_register_operand (op
, mode
)
704 enum machine_mode mode
;
706 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
709 if (GET_CODE (op
) == SUBREG
)
710 op
= SUBREG_REG (op
);
711 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == FLOAT_REGS
;
714 /* Return 1 if OP is a hard general register. */
717 hard_int_register_operand (op
, mode
)
719 enum machine_mode mode
;
721 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
724 if (GET_CODE (op
) == SUBREG
)
725 op
= SUBREG_REG (op
);
726 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == GENERAL_REGS
;
729 /* Return 1 if OP is a register or a constant integer. */
733 reg_or_cint_operand (op
, mode
)
735 enum machine_mode mode
;
737 return (GET_CODE (op
) == CONST_INT
738 || register_operand (op
, mode
));
741 /* Return 1 if OP is something that can be reloaded into a register;
742 if it is a MEM, it need not be valid. */
745 some_operand (op
, mode
)
747 enum machine_mode mode
;
749 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
752 switch (GET_CODE (op
))
754 case REG
: case MEM
: case CONST_DOUBLE
: case CONST_INT
: case LABEL_REF
:
755 case SYMBOL_REF
: case CONST
:
759 return some_operand (SUBREG_REG (op
), VOIDmode
);
768 /* Likewise, but don't accept constants. */
771 some_ni_operand (op
, mode
)
773 enum machine_mode mode
;
775 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
778 if (GET_CODE (op
) == SUBREG
)
779 op
= SUBREG_REG (op
);
781 return (GET_CODE (op
) == REG
|| GET_CODE (op
) == MEM
);
784 /* Return 1 if OP is a valid operand for the source of a move insn. */
787 input_operand (op
, mode
)
789 enum machine_mode mode
;
791 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
794 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GET_MODE (op
) != mode
)
797 switch (GET_CODE (op
))
802 if (TARGET_EXPLICIT_RELOCS
)
805 /* This handles both the Windows/NT and OSF cases. */
806 return mode
== ptr_mode
|| mode
== DImode
;
813 if (register_operand (op
, mode
))
815 /* ... fall through ... */
817 return ((TARGET_BWX
|| (mode
!= HImode
&& mode
!= QImode
))
818 && general_operand (op
, mode
));
821 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& op
== CONST0_RTX (mode
);
824 return mode
== QImode
|| mode
== HImode
|| add_operand (op
, mode
);
836 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
837 file, and in the same section as the current function. */
840 current_file_function_operand (op
, mode
)
842 enum machine_mode mode ATTRIBUTE_UNUSED
;
844 if (GET_CODE (op
) != SYMBOL_REF
)
847 /* Easy test for recursion. */
848 if (op
== XEXP (DECL_RTL (current_function_decl
), 0))
851 /* Otherwise, we need the DECL for the SYMBOL_REF, which we can't get.
852 So SYMBOL_REF_FLAG has been declared to imply that the function is
853 in the default text section. So we must also check that the current
854 function is also in the text section. */
855 if (SYMBOL_REF_FLAG (op
) && decl_in_text_section (current_function_decl
))
861 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
864 direct_call_operand (op
, mode
)
866 enum machine_mode mode
;
868 /* Must be defined in this file. */
869 if (! current_file_function_operand (op
, mode
))
872 /* If profiling is implemented via linker tricks, we can't jump
873 to the nogp alternate entry point. */
874 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
875 but is approximately correct for the OSF ABIs. Don't know
876 what to do for VMS, NT, or UMK. */
877 if (! TARGET_PROFILING_NEEDS_GP
884 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
885 a variable known to be defined in this file. */
891 const char *str
= XSTR (op
, 0);
893 /* ??? SYMBOL_REF_FLAG is set for local function symbols, but we
894 run into problems with the rtl inliner in that the symbol was
895 once external, but is local after inlining, which results in
896 unrecognizable insns. */
898 return (CONSTANT_POOL_ADDRESS_P (op
)
899 /* If @, then ENCODE_SECTION_INFO sez it's local. */
901 /* If *$, then ASM_GENERATE_INTERNAL_LABEL sez it's local. */
902 || (str
[0] == '*' && str
[1] == '$'));
906 local_symbolic_operand (op
, mode
)
908 enum machine_mode mode
;
910 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
913 if (GET_CODE (op
) == LABEL_REF
)
916 if (GET_CODE (op
) == CONST
917 && GET_CODE (XEXP (op
, 0)) == PLUS
918 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
919 op
= XEXP (XEXP (op
, 0), 0);
921 if (GET_CODE (op
) != SYMBOL_REF
)
924 return local_symbol_p (op
);
927 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
928 known to be defined in this file in the small data area. */
931 small_symbolic_operand (op
, mode
)
933 enum machine_mode mode ATTRIBUTE_UNUSED
;
937 if (! TARGET_SMALL_DATA
)
940 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
943 if (GET_CODE (op
) == CONST
944 && GET_CODE (XEXP (op
, 0)) == PLUS
945 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
946 op
= XEXP (XEXP (op
, 0), 0);
948 if (GET_CODE (op
) != SYMBOL_REF
)
951 if (CONSTANT_POOL_ADDRESS_P (op
))
952 return GET_MODE_SIZE (get_pool_mode (op
)) <= (unsigned) g_switch_value
;
956 return str
[0] == '@' && str
[1] == 's';
960 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
961 not known (or known not) to be defined in this file. */
964 global_symbolic_operand (op
, mode
)
966 enum machine_mode mode
;
968 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
971 if (GET_CODE (op
) == CONST
972 && GET_CODE (XEXP (op
, 0)) == PLUS
973 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
974 op
= XEXP (XEXP (op
, 0), 0);
976 if (GET_CODE (op
) != SYMBOL_REF
)
979 return ! local_symbol_p (op
);
982 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
985 call_operand (op
, mode
)
987 enum machine_mode mode
;
992 if (GET_CODE (op
) == REG
)
995 return REGNO (op
) == 27;
999 if (TARGET_ABI_UNICOSMK
)
1001 if (GET_CODE (op
) == SYMBOL_REF
)
1007 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1008 possibly with an offset. */
1011 symbolic_operand (op
, mode
)
1013 enum machine_mode mode
;
1015 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1017 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
1019 if (GET_CODE (op
) == CONST
1020 && GET_CODE (XEXP (op
,0)) == PLUS
1021 && GET_CODE (XEXP (XEXP (op
,0), 0)) == SYMBOL_REF
1022 && GET_CODE (XEXP (XEXP (op
,0), 1)) == CONST_INT
)
1027 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1028 comparisons are valid in which insn. */
1031 alpha_comparison_operator (op
, mode
)
1033 enum machine_mode mode
;
1035 enum rtx_code code
= GET_CODE (op
);
1037 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1040 return (code
== EQ
|| code
== LE
|| code
== LT
1041 || code
== LEU
|| code
== LTU
);
1044 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1045 Here we know which comparisons are valid in which insn. */
1048 alpha_zero_comparison_operator (op
, mode
)
1050 enum machine_mode mode
;
1052 enum rtx_code code
= GET_CODE (op
);
1054 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1057 return (code
== EQ
|| code
== NE
|| code
== LE
|| code
== LT
1058 || code
== LEU
|| code
== LTU
);
1061 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1064 alpha_swapped_comparison_operator (op
, mode
)
1066 enum machine_mode mode
;
1068 enum rtx_code code
= GET_CODE (op
);
1070 if ((mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1071 || GET_RTX_CLASS (code
) != '<')
1074 code
= swap_condition (code
);
1075 return (code
== EQ
|| code
== LE
|| code
== LT
1076 || code
== LEU
|| code
== LTU
);
1079 /* Return 1 if OP is a signed comparison operation. */
1082 signed_comparison_operator (op
, mode
)
1084 enum machine_mode mode ATTRIBUTE_UNUSED
;
1086 enum rtx_code code
= GET_CODE (op
);
1088 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1091 return (code
== EQ
|| code
== NE
1092 || code
== LE
|| code
== LT
1093 || code
== GE
|| code
== GT
);
1096 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1097 Here we know which comparisons are valid in which insn. */
1100 alpha_fp_comparison_operator (op
, mode
)
1102 enum machine_mode mode
;
1104 enum rtx_code code
= GET_CODE (op
);
1106 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1109 return (code
== EQ
|| code
== LE
|| code
== LT
|| code
== UNORDERED
);
1112 /* Return 1 if this is a divide or modulus operator. */
1115 divmod_operator (op
, mode
)
1117 enum machine_mode mode ATTRIBUTE_UNUSED
;
1119 switch (GET_CODE (op
))
1121 case DIV
: case MOD
: case UDIV
: case UMOD
:
1131 /* Return 1 if this memory address is a known aligned register plus
1132 a constant. It must be a valid address. This means that we can do
1133 this as an aligned reference plus some offset.
1135 Take into account what reload will do. */
1138 aligned_memory_operand (op
, mode
)
1140 enum machine_mode mode
;
1144 if (reload_in_progress
)
1147 if (GET_CODE (tmp
) == SUBREG
)
1148 tmp
= SUBREG_REG (tmp
);
1149 if (GET_CODE (tmp
) == REG
1150 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1152 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1158 if (GET_CODE (op
) != MEM
1159 || GET_MODE (op
) != mode
)
1163 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1164 sorts of constructs. Dig for the real base register. */
1165 if (reload_in_progress
1166 && GET_CODE (op
) == PLUS
1167 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1168 base
= XEXP (XEXP (op
, 0), 0);
1171 if (! memory_address_p (mode
, op
))
1173 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1176 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) >= 32);
1179 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1182 unaligned_memory_operand (op
, mode
)
1184 enum machine_mode mode
;
1188 if (reload_in_progress
)
1191 if (GET_CODE (tmp
) == SUBREG
)
1192 tmp
= SUBREG_REG (tmp
);
1193 if (GET_CODE (tmp
) == REG
1194 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1196 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1202 if (GET_CODE (op
) != MEM
1203 || GET_MODE (op
) != mode
)
1207 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1208 sorts of constructs. Dig for the real base register. */
1209 if (reload_in_progress
1210 && GET_CODE (op
) == PLUS
1211 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1212 base
= XEXP (XEXP (op
, 0), 0);
1215 if (! memory_address_p (mode
, op
))
1217 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1220 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) < 32);
1223 /* Return 1 if OP is either a register or an unaligned memory location. */
1226 reg_or_unaligned_mem_operand (op
, mode
)
1228 enum machine_mode mode
;
1230 return register_operand (op
, mode
) || unaligned_memory_operand (op
, mode
);
1233 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1236 any_memory_operand (op
, mode
)
1238 enum machine_mode mode ATTRIBUTE_UNUSED
;
1240 return (GET_CODE (op
) == MEM
1241 || (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == REG
)
1242 || (reload_in_progress
&& GET_CODE (op
) == REG
1243 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)
1244 || (reload_in_progress
&& GET_CODE (op
) == SUBREG
1245 && GET_CODE (SUBREG_REG (op
)) == REG
1246 && REGNO (SUBREG_REG (op
)) >= FIRST_PSEUDO_REGISTER
));
1249 /* Returns 1 if OP is not an eliminable register.
1251 This exists to cure a pathological abort in the s8addq (et al) patterns,
1253 long foo () { long t; bar(); return (long) &t * 26107; }
1255 which run afoul of a hack in reload to cure a (presumably) similar
1256 problem with lea-type instructions on other targets. But there is
1257 one of us and many of them, so work around the problem by selectively
1258 preventing combine from making the optimization. */
1261 reg_not_elim_operand (op
, mode
)
1263 enum machine_mode mode
;
1266 if (GET_CODE (op
) == SUBREG
)
1267 inner
= SUBREG_REG (op
);
1268 if (inner
== frame_pointer_rtx
|| inner
== arg_pointer_rtx
)
1271 return register_operand (op
, mode
);
1274 /* Return 1 is OP is a memory location that is not a reference (using
1275 an AND) to an unaligned location. Take into account what reload
1279 normal_memory_operand (op
, mode
)
1281 enum machine_mode mode ATTRIBUTE_UNUSED
;
1283 if (reload_in_progress
)
1286 if (GET_CODE (tmp
) == SUBREG
)
1287 tmp
= SUBREG_REG (tmp
);
1288 if (GET_CODE (tmp
) == REG
1289 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1291 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1293 /* This may not have been assigned an equivalent address if it will
1294 be eliminated. In that case, it doesn't matter what we do. */
1300 return GET_CODE (op
) == MEM
&& GET_CODE (XEXP (op
, 0)) != AND
;
1303 /* Accept a register, but not a subreg of any kind. This allows us to
1304 avoid pathological cases in reload wrt data movement common in
1305 int->fp conversion. */
1308 reg_no_subreg_operand (op
, mode
)
1310 enum machine_mode mode
;
1312 if (GET_CODE (op
) == SUBREG
)
1314 return register_operand (op
, mode
);
1317 /* Recognize an addition operation that includes a constant. Used to
1318 convince reload to canonize (plus (plus reg c1) c2) during register
1322 addition_operation (op
, mode
)
1324 enum machine_mode mode
;
1326 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1328 if (GET_CODE (op
) == PLUS
1329 && register_operand (XEXP (op
, 0), mode
)
1330 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1331 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op
, 1)), 'K'))
1336 /* Return 1 if this function can directly return via $26. */
1341 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
1343 && alpha_sa_size () == 0
1344 && get_frame_size () == 0
1345 && current_function_outgoing_args_size
== 0
1346 && current_function_pretend_args_size
== 0);
1349 /* Return the ADDR_VEC associated with a tablejump insn. */
1352 alpha_tablejump_addr_vec (insn
)
1357 tmp
= JUMP_LABEL (insn
);
1360 tmp
= NEXT_INSN (tmp
);
1363 if (GET_CODE (tmp
) == JUMP_INSN
1364 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
1365 return PATTERN (tmp
);
1369 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1372 alpha_tablejump_best_label (insn
)
1375 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
1376 rtx best_label
= NULL_RTX
;
1378 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1379 there for edge frequency counts from profile data. */
1383 int n_labels
= XVECLEN (jump_table
, 1);
1384 int best_count
= -1;
1387 for (i
= 0; i
< n_labels
; i
++)
1391 for (j
= i
+ 1; j
< n_labels
; j
++)
1392 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
1393 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
1396 if (count
> best_count
)
1397 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
1401 return best_label
? best_label
: const0_rtx
;
1404 /* Return true if the function DECL will be placed in the default text
1406 /* ??? Ideally we'd be able to always move from a SYMBOL_REF back to the
1407 decl, as that would allow us to determine if two functions are in the
1408 same section, which is what we really want to know. */
1411 decl_in_text_section (decl
)
1414 return (DECL_SECTION_NAME (decl
) == NULL_TREE
1415 && ! (flag_function_sections
1416 || (targetm
.have_named_sections
1417 && DECL_ONE_ONLY (decl
))));
1420 /* If we are referencing a function that is static, make the SYMBOL_REF
1421 special. We use this to see indicate we can branch to this function
1422 without setting PV or restoring GP.
1424 If this is a variable that is known to be defined locally, add "@v"
1425 to the name. If in addition the variable is to go in .sdata/.sbss,
1426 then add "@s" instead. */
1429 alpha_encode_section_info (decl
)
1432 const char *symbol_str
;
1433 bool is_local
, is_small
;
1435 if (TREE_CODE (decl
) == FUNCTION_DECL
)
1437 /* We mark public functions once they are emitted; otherwise we
1438 don't know that they exist in this unit of translation. */
1439 if (TREE_PUBLIC (decl
))
1441 /* Do not mark functions that are not in .text; otherwise we
1442 don't know that they are near enough for a direct branch. */
1443 if (! decl_in_text_section (decl
))
1446 SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl
), 0)) = 1;
1450 /* Early out if we're not going to do anything with this data. */
1451 if (! TARGET_EXPLICIT_RELOCS
)
1454 /* Careful not to prod global register variables. */
1455 if (TREE_CODE (decl
) != VAR_DECL
1456 || GET_CODE (DECL_RTL (decl
)) != MEM
1457 || GET_CODE (XEXP (DECL_RTL (decl
), 0)) != SYMBOL_REF
)
1460 symbol_str
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
1462 /* A variable is considered "local" if it is defined in this module. */
1464 if (DECL_EXTERNAL (decl
))
1466 /* Linkonce and weak data is never local. */
1467 else if (DECL_ONE_ONLY (decl
) || DECL_WEAK (decl
))
1469 else if (! TREE_PUBLIC (decl
))
1471 /* If PIC, then assume that any global name can be overridden by
1472 symbols resolved from other modules. */
1475 /* Uninitialized COMMON variable may be unified with symbols
1476 resolved from other modules. */
1477 else if (DECL_COMMON (decl
)
1478 && (DECL_INITIAL (decl
) == NULL
1479 || DECL_INITIAL (decl
) == error_mark_node
))
1481 /* Otherwise we're left with initialized (or non-common) global data
1482 which is of necessity defined locally. */
1486 /* Determine if DECL will wind up in .sdata/.sbss. */
1489 if (DECL_SECTION_NAME (decl
))
1491 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
1492 if (strcmp (section
, ".sdata") == 0
1493 || strcmp (section
, ".sbss") == 0)
1498 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
1500 /* If the variable has already been defined in the output file, then it
1501 is too late to put it in sdata if it wasn't put there in the first
1502 place. The test is here rather than above, because if it is already
1503 in sdata, then it can stay there. */
1505 if (TREE_ASM_WRITTEN (decl
))
1508 /* If this is an incomplete type with size 0, then we can't put it in
1509 sdata because it might be too big when completed. */
1510 else if (size
> 0 && size
<= g_switch_value
)
1514 /* Finally, encode this into the symbol string. */
1521 if (symbol_str
[0] == '@')
1523 if (symbol_str
[1] == (is_small
? 's' : 'v'))
1528 len
= strlen (symbol_str
) + 1;
1529 newstr
= alloca (len
+ 2);
1532 newstr
[1] = (is_small
? 's' : 'v');
1533 memcpy (newstr
+ 2, symbol_str
, len
);
1535 string
= ggc_alloc_string (newstr
, len
+ 2 - 1);
1536 XSTR (XEXP (DECL_RTL (decl
), 0), 0) = string
;
1538 else if (symbol_str
[0] == '@')
1542 /* legitimate_address_p recognizes an RTL expression that is a valid
1543 memory address for an instruction. The MODE argument is the
1544 machine mode for the MEM expression that wants to use this address.
1546 For Alpha, we have either a constant address or the sum of a
1547 register and a constant address, or just a register. For DImode,
1548 any of those forms can be surrounded with an AND that clear the
1549 low-order three bits; this is an "unaligned" access. */
1552 alpha_legitimate_address_p (mode
, x
, strict
)
1553 enum machine_mode mode
;
1557 /* If this is an ldq_u type address, discard the outer AND. */
1559 && GET_CODE (x
) == AND
1560 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1561 && INTVAL (XEXP (x
, 1)) == -8)
1564 /* Discard non-paradoxical subregs. */
1565 if (GET_CODE (x
) == SUBREG
1566 && (GET_MODE_SIZE (GET_MODE (x
))
1567 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1570 /* Unadorned general registers are valid. */
1573 ? STRICT_REG_OK_FOR_BASE_P (x
)
1574 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
1577 /* Constant addresses (i.e. +/- 32k) are valid. */
1578 if (CONSTANT_ADDRESS_P (x
))
1581 /* Register plus a small constant offset is valid. */
1582 if (GET_CODE (x
) == PLUS
)
1584 rtx ofs
= XEXP (x
, 1);
1587 /* Discard non-paradoxical subregs. */
1588 if (GET_CODE (x
) == SUBREG
1589 && (GET_MODE_SIZE (GET_MODE (x
))
1590 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1596 && NONSTRICT_REG_OK_FP_BASE_P (x
)
1597 && GET_CODE (ofs
) == CONST_INT
)
1600 ? STRICT_REG_OK_FOR_BASE_P (x
)
1601 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
1602 && CONSTANT_ADDRESS_P (ofs
))
1605 else if (GET_CODE (x
) == ADDRESSOF
1606 && GET_CODE (ofs
) == CONST_INT
)
1610 /* If we're managing explicit relocations, LO_SUM is valid. */
1611 else if (TARGET_EXPLICIT_RELOCS
&& GET_CODE (x
) == LO_SUM
)
1613 rtx ofs
= XEXP (x
, 1);
1616 /* Discard non-paradoxical subregs. */
1617 if (GET_CODE (x
) == SUBREG
1618 && (GET_MODE_SIZE (GET_MODE (x
))
1619 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1622 /* Must have a valid base register. */
1625 ? STRICT_REG_OK_FOR_BASE_P (x
)
1626 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
1629 /* The symbol must be local. */
1630 if (local_symbolic_operand (ofs
, Pmode
))
1637 /* Try machine-dependent ways of modifying an illegitimate address
1638 to be legitimate. If we find one, return the new, valid address. */
1641 alpha_legitimize_address (x
, oldx
, mode
)
1643 rtx oldx ATTRIBUTE_UNUSED
;
1644 enum machine_mode mode ATTRIBUTE_UNUSED
;
1646 HOST_WIDE_INT addend
;
1648 /* If the address is (plus reg const_int) and the CONST_INT is not a
1649 valid offset, compute the high part of the constant and add it to
1650 the register. Then our address is (plus temp low-part-const). */
1651 if (GET_CODE (x
) == PLUS
1652 && GET_CODE (XEXP (x
, 0)) == REG
1653 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1654 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
1656 addend
= INTVAL (XEXP (x
, 1));
1661 /* If the address is (const (plus FOO const_int)), find the low-order
1662 part of the CONST_INT. Then load FOO plus any high-order part of the
1663 CONST_INT into a register. Our address is (plus reg low-part-const).
1664 This is done to reduce the number of GOT entries. */
1665 if (GET_CODE (x
) == CONST
1666 && GET_CODE (XEXP (x
, 0)) == PLUS
1667 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
1669 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
1670 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
1674 /* If we have a (plus reg const), emit the load as in (2), then add
1675 the two registers, and finally generate (plus reg low-part-const) as
1677 if (GET_CODE (x
) == PLUS
1678 && GET_CODE (XEXP (x
, 0)) == REG
1679 && GET_CODE (XEXP (x
, 1)) == CONST
1680 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
1681 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
1683 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
1684 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
1685 XEXP (XEXP (XEXP (x
, 1), 0), 0),
1686 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1690 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1691 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
1694 if (local_symbolic_operand (x
, Pmode
))
1696 if (small_symbolic_operand (x
, Pmode
))
1697 scratch
= pic_offset_table_rtx
;
1702 scratch
= gen_reg_rtx (Pmode
);
1704 tmp
= gen_rtx_HIGH (Pmode
, x
);
1705 tmp
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, tmp
);
1706 insn
= emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp
));
1707 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, tmp
,
1711 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1715 scratch
= gen_reg_rtx (Pmode
);
1716 emit_insn (gen_movdi_er_high_g (scratch
, pic_offset_table_rtx
,
1718 /* ??? FIXME: Tag the use of scratch with a lituse. */
1727 HOST_WIDE_INT lowpart
= (addend
& 0xffff) - 2 * (addend
& 0x8000);
1728 HOST_WIDE_INT highpart
= addend
- lowpart
;
1729 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (highpart
),
1730 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1731 return plus_constant (x
, lowpart
);
1735 /* Try a machine-dependent way of reloading an illegitimate address
1736 operand. If we find one, push the reload and return the new rtx. */
1739 alpha_legitimize_reload_address (x
, mode
, opnum
, type
, ind_levels
)
1741 enum machine_mode mode ATTRIBUTE_UNUSED
;
1744 int ind_levels ATTRIBUTE_UNUSED
;
1746 /* We must recognize output that we have already generated ourselves. */
1747 if (GET_CODE (x
) == PLUS
1748 && GET_CODE (XEXP (x
, 0)) == PLUS
1749 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1750 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1751 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1753 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1754 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1759 /* We wish to handle large displacements off a base register by
1760 splitting the addend across an ldah and the mem insn. This
1761 cuts number of extra insns needed from 3 to 1. */
1762 if (GET_CODE (x
) == PLUS
1763 && GET_CODE (XEXP (x
, 0)) == REG
1764 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1765 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1766 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1768 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1769 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1771 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1773 /* Check for 32-bit overflow. */
1774 if (high
+ low
!= val
)
1777 /* Reload the high part into a base reg; leave the low part
1778 in the mem directly. */
1779 x
= gen_rtx_PLUS (GET_MODE (x
),
1780 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1784 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1785 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1793 /* REF is an alignable memory location. Place an aligned SImode
1794 reference into *PALIGNED_MEM and the number of bits to shift into
1795 *PBITNUM. SCRATCH is a free register for use in reloading out
1796 of range stack slots. */
1799 get_aligned_mem (ref
, paligned_mem
, pbitnum
)
1801 rtx
*paligned_mem
, *pbitnum
;
1804 HOST_WIDE_INT offset
= 0;
1806 if (GET_CODE (ref
) != MEM
)
1809 if (reload_in_progress
1810 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1812 base
= find_replacement (&XEXP (ref
, 0));
1814 if (! memory_address_p (GET_MODE (ref
), base
))
1819 base
= XEXP (ref
, 0);
1822 if (GET_CODE (base
) == PLUS
)
1823 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1826 = widen_memory_access (ref
, SImode
, (offset
& ~3) - offset
);
1828 if (WORDS_BIG_ENDIAN
)
1829 *pbitnum
= GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref
))
1830 + (offset
& 3) * 8));
1832 *pbitnum
= GEN_INT ((offset
& 3) * 8);
1835 /* Similar, but just get the address. Handle the two reload cases.
1836 Add EXTRA_OFFSET to the address we return. */
1839 get_unaligned_address (ref
, extra_offset
)
1844 HOST_WIDE_INT offset
= 0;
1846 if (GET_CODE (ref
) != MEM
)
1849 if (reload_in_progress
1850 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1852 base
= find_replacement (&XEXP (ref
, 0));
1854 if (! memory_address_p (GET_MODE (ref
), base
))
1859 base
= XEXP (ref
, 0);
1862 if (GET_CODE (base
) == PLUS
)
1863 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1865 return plus_constant (base
, offset
+ extra_offset
);
1868 /* Loading and storing HImode or QImode values to and from memory
1869 usually requires a scratch register. The exceptions are loading
1870 QImode and HImode from an aligned address to a general register
1871 unless byte instructions are permitted.
1873 We also cannot load an unaligned address or a paradoxical SUBREG
1874 into an FP register.
1876 We also cannot do integral arithmetic into FP regs, as might result
1877 from register elimination into a DImode fp register. */
1880 secondary_reload_class (class, mode
, x
, in
)
1881 enum reg_class
class;
1882 enum machine_mode mode
;
1886 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
1888 if (GET_CODE (x
) == MEM
1889 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
1890 || (GET_CODE (x
) == SUBREG
1891 && (GET_CODE (SUBREG_REG (x
)) == MEM
1892 || (GET_CODE (SUBREG_REG (x
)) == REG
1893 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
1895 if (!in
|| !aligned_memory_operand(x
, mode
))
1896 return GENERAL_REGS
;
1900 if (class == FLOAT_REGS
)
1902 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
1903 return GENERAL_REGS
;
1905 if (GET_CODE (x
) == SUBREG
1906 && (GET_MODE_SIZE (GET_MODE (x
))
1907 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1908 return GENERAL_REGS
;
1910 if (in
&& INTEGRAL_MODE_P (mode
)
1911 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
1912 return GENERAL_REGS
;
1918 /* Subfunction of the following function. Update the flags of any MEM
1919 found in part of X. */
1922 alpha_set_memflags_1 (x
, in_struct_p
, volatile_p
, unchanging_p
)
1924 int in_struct_p
, volatile_p
, unchanging_p
;
1928 switch (GET_CODE (x
))
1932 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1933 alpha_set_memflags_1 (XVECEXP (x
, 0, i
), in_struct_p
, volatile_p
,
1938 alpha_set_memflags_1 (PATTERN (x
), in_struct_p
, volatile_p
,
1943 alpha_set_memflags_1 (SET_DEST (x
), in_struct_p
, volatile_p
,
1945 alpha_set_memflags_1 (SET_SRC (x
), in_struct_p
, volatile_p
,
1950 MEM_IN_STRUCT_P (x
) = in_struct_p
;
1951 MEM_VOLATILE_P (x
) = volatile_p
;
1952 RTX_UNCHANGING_P (x
) = unchanging_p
;
1953 /* Sadly, we cannot use alias sets because the extra aliasing
1954 produced by the AND interferes. Given that two-byte quantities
1955 are the only thing we would be able to differentiate anyway,
1956 there does not seem to be any point in convoluting the early
1957 out of the alias check. */
1965 /* Given INSN, which is either an INSN or a SEQUENCE generated to
1966 perform a memory operation, look for any MEMs in either a SET_DEST or
1967 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
1968 REF into each of the MEMs found. If REF is not a MEM, don't do
1972 alpha_set_memflags (insn
, ref
)
1976 int in_struct_p
, volatile_p
, unchanging_p
;
1978 if (GET_CODE (ref
) != MEM
)
1981 in_struct_p
= MEM_IN_STRUCT_P (ref
);
1982 volatile_p
= MEM_VOLATILE_P (ref
);
1983 unchanging_p
= RTX_UNCHANGING_P (ref
);
1985 /* This is only called from alpha.md, after having had something
1986 generated from one of the insn patterns. So if everything is
1987 zero, the pattern is already up-to-date. */
1988 if (! in_struct_p
&& ! volatile_p
&& ! unchanging_p
)
1991 alpha_set_memflags_1 (insn
, in_struct_p
, volatile_p
, unchanging_p
);
1994 /* Try to output insns to set TARGET equal to the constant C if it can be
1995 done in less than N insns. Do all computations in MODE. Returns the place
1996 where the output has been placed if it can be done and the insns have been
1997 emitted. If it would take more than N insns, zero is returned and no
1998 insns and emitted. */
2001 alpha_emit_set_const (target
, mode
, c
, n
)
2003 enum machine_mode mode
;
2010 /* Try 1 insn, then 2, then up to N. */
2011 for (i
= 1; i
<= n
; i
++)
2012 if ((pat
= alpha_emit_set_const_1 (target
, mode
, c
, i
)) != 0)
2018 /* Internal routine for the above to check for N or below insns. */
2021 alpha_emit_set_const_1 (target
, mode
, c
, n
)
2023 enum machine_mode mode
;
2029 /* Use a pseudo if highly optimizing and still generating RTL. */
2031 = (flag_expensive_optimizations
&& rtx_equal_function_value_matters
2035 #if HOST_BITS_PER_WIDE_INT == 64
2036 /* We are only called for SImode and DImode. If this is SImode, ensure that
2037 we are sign extended to a full word. This does not make any sense when
2038 cross-compiling on a narrow machine. */
2041 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2044 /* If this is a sign-extended 32-bit constant, we can do this in at most
2045 three insns, so do it if we have enough insns left. We always have
2046 a sign-extended 32-bit constant when compiling on a narrow machine. */
2048 if (HOST_BITS_PER_WIDE_INT
!= 64
2049 || c
>> 31 == -1 || c
>> 31 == 0)
2051 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
2052 HOST_WIDE_INT tmp1
= c
- low
;
2053 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
2054 HOST_WIDE_INT extra
= 0;
2056 /* If HIGH will be interpreted as negative but the constant is
2057 positive, we must adjust it to do two ldha insns. */
2059 if ((high
& 0x8000) != 0 && c
>= 0)
2063 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
2066 if (c
== low
|| (low
== 0 && extra
== 0))
2068 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2069 but that meant that we can't handle INT_MIN on 32-bit machines
2070 (like NT/Alpha), because we recurse indefinitely through
2071 emit_move_insn to gen_movdi. So instead, since we know exactly
2072 what we want, create it explicitly. */
2075 target
= gen_reg_rtx (mode
);
2076 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
2079 else if (n
>= 2 + (extra
!= 0))
2081 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16), subtarget
, mode
);
2084 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (extra
<< 16),
2085 subtarget
, 0, OPTAB_WIDEN
);
2087 return expand_binop (mode
, add_optab
, temp
, GEN_INT (low
),
2088 target
, 0, OPTAB_WIDEN
);
2092 /* If we couldn't do it that way, try some other methods. But if we have
2093 no instructions left, don't bother. Likewise, if this is SImode and
2094 we can't make pseudos, we can't do anything since the expand_binop
2095 and expand_unop calls will widen and try to make pseudos. */
2098 || (mode
== SImode
&& ! rtx_equal_function_value_matters
))
2101 /* Next, see if we can load a related constant and then shift and possibly
2102 negate it to get the constant we want. Try this once each increasing
2103 numbers of insns. */
2105 for (i
= 1; i
< n
; i
++)
2107 /* First, see if minus some low bits, we've an easy load of
2110 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
2112 && (temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
)) != 0)
2113 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
2114 target
, 0, OPTAB_WIDEN
);
2116 /* Next try complementing. */
2117 if ((temp
= alpha_emit_set_const (subtarget
, mode
, ~ c
, i
)) != 0)
2118 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
2120 /* Next try to form a constant and do a left shift. We can do this
2121 if some low-order bits are zero; the exact_log2 call below tells
2122 us that information. The bits we are shifting out could be any
2123 value, but here we'll just try the 0- and sign-extended forms of
2124 the constant. To try to increase the chance of having the same
2125 constant in more than one insn, start at the highest number of
2126 bits to shift, but try all possibilities in case a ZAPNOT will
2129 if ((bits
= exact_log2 (c
& - c
)) > 0)
2130 for (; bits
> 0; bits
--)
2131 if ((temp
= (alpha_emit_set_const
2132 (subtarget
, mode
, c
>> bits
, i
))) != 0
2133 || ((temp
= (alpha_emit_set_const
2135 ((unsigned HOST_WIDE_INT
) c
) >> bits
, i
)))
2137 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
2138 target
, 0, OPTAB_WIDEN
);
2140 /* Now try high-order zero bits. Here we try the shifted-in bits as
2141 all zero and all ones. Be careful to avoid shifting outside the
2142 mode and to avoid shifting outside the host wide int size. */
2143 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2144 confuse the recursive call and set all of the high 32 bits. */
2146 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2147 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64))) > 0)
2148 for (; bits
> 0; bits
--)
2149 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2151 || ((temp
= (alpha_emit_set_const
2153 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2156 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
2157 target
, 1, OPTAB_WIDEN
);
2159 /* Now try high-order 1 bits. We get that with a sign-extension.
2160 But one bit isn't enough here. Be careful to avoid shifting outside
2161 the mode and to avoid shifting outside the host wide int size. */
2163 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2164 - floor_log2 (~ c
) - 2)) > 0)
2165 for (; bits
> 0; bits
--)
2166 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2168 || ((temp
= (alpha_emit_set_const
2170 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2173 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
2174 target
, 0, OPTAB_WIDEN
);
2177 #if HOST_BITS_PER_WIDE_INT == 64
2178 /* Finally, see if can load a value into the target that is the same as the
2179 constant except that all bytes that are 0 are changed to be 0xff. If we
2180 can, then we can do a ZAPNOT to obtain the desired constant. */
2183 for (i
= 0; i
< 64; i
+= 8)
2184 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
2185 new |= (HOST_WIDE_INT
) 0xff << i
;
2187 /* We are only called for SImode and DImode. If this is SImode, ensure that
2188 we are sign extended to a full word. */
2191 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
2193 if (new != c
&& new != -1
2194 && (temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1)) != 0)
2195 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
2196 target
, 0, OPTAB_WIDEN
);
2202 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2203 fall back to a straight forward decomposition. We do this to avoid
2204 exponential run times encountered when looking for longer sequences
2205 with alpha_emit_set_const. */
2208 alpha_emit_set_long_const (target
, c1
, c2
)
2210 HOST_WIDE_INT c1
, c2
;
2212 HOST_WIDE_INT d1
, d2
, d3
, d4
;
2214 /* Decompose the entire word */
2215 #if HOST_BITS_PER_WIDE_INT >= 64
2216 if (c2
!= -(c1
< 0))
2218 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2220 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2221 c1
= (c1
- d2
) >> 32;
2222 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2224 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2228 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2230 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2234 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
2236 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2241 /* Construct the high word */
2244 emit_move_insn (target
, GEN_INT (d4
));
2246 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
2249 emit_move_insn (target
, GEN_INT (d3
));
2251 /* Shift it into place */
2252 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2254 /* Add in the low bits. */
2256 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2258 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2263 /* Expand a move instruction; return true if all work is done.
2264 We don't handle non-bwx subword loads here. */
2267 alpha_expand_mov (mode
, operands
)
2268 enum machine_mode mode
;
2271 /* If the output is not a register, the input must be. */
2272 if (GET_CODE (operands
[0]) == MEM
2273 && ! reg_or_0_operand (operands
[1], mode
))
2274 operands
[1] = force_reg (mode
, operands
[1]);
2276 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (operands
[1], mode
))
2278 if (local_symbolic_operand (operands
[1], mode
))
2282 if (small_symbolic_operand (operands
[1], Pmode
))
2283 scratch
= pic_offset_table_rtx
;
2288 scratch
= no_new_pseudos
? operands
[0] : gen_reg_rtx (Pmode
);
2290 tmp
= gen_rtx_HIGH (Pmode
, operands
[1]);
2291 tmp
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, tmp
);
2292 insn
= emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp
));
2293 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, tmp
,
2297 operands
[1] = gen_rtx_LO_SUM (Pmode
, scratch
, operands
[1]);
2302 emit_insn (gen_movdi_er_high_g (operands
[0], pic_offset_table_rtx
,
2303 operands
[1], const0_rtx
));
2308 /* Early out for non-constants and valid constants. */
2309 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2312 /* Split large integers. */
2313 if (GET_CODE (operands
[1]) == CONST_INT
2314 || GET_CODE (operands
[1]) == CONST_DOUBLE
)
2316 HOST_WIDE_INT i0
, i1
;
2317 rtx temp
= NULL_RTX
;
2319 if (GET_CODE (operands
[1]) == CONST_INT
)
2321 i0
= INTVAL (operands
[1]);
2324 else if (HOST_BITS_PER_WIDE_INT
>= 64)
2326 i0
= CONST_DOUBLE_LOW (operands
[1]);
2331 i0
= CONST_DOUBLE_LOW (operands
[1]);
2332 i1
= CONST_DOUBLE_HIGH (operands
[1]);
2335 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2336 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3);
2338 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2339 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2343 if (rtx_equal_p (operands
[0], temp
))
2350 /* Otherwise we've nothing left but to drop the thing to memory. */
2351 operands
[1] = force_const_mem (DImode
, operands
[1]);
2352 if (reload_in_progress
)
2354 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
2355 operands
[1] = copy_rtx (operands
[1]);
2356 XEXP (operands
[1], 0) = operands
[0];
2359 operands
[1] = validize_mem (operands
[1]);
2363 /* Expand a non-bwx QImode or HImode move instruction;
2364 return true if all work is done. */
2367 alpha_expand_mov_nobwx (mode
, operands
)
2368 enum machine_mode mode
;
2371 /* If the output is not a register, the input must be. */
2372 if (GET_CODE (operands
[0]) == MEM
)
2373 operands
[1] = force_reg (mode
, operands
[1]);
2375 /* Handle four memory cases, unaligned and aligned for either the input
2376 or the output. The only case where we can be called during reload is
2377 for aligned loads; all other cases require temporaries. */
2379 if (GET_CODE (operands
[1]) == MEM
2380 || (GET_CODE (operands
[1]) == SUBREG
2381 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
2382 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
2383 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
2384 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
2385 && GET_CODE (SUBREG_REG (operands
[1])) == REG
2386 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
2388 if (aligned_memory_operand (operands
[1], mode
))
2390 if (reload_in_progress
)
2392 emit_insn ((mode
== QImode
2393 ? gen_reload_inqi_help
2394 : gen_reload_inhi_help
)
2395 (operands
[0], operands
[1],
2396 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
2400 rtx aligned_mem
, bitnum
;
2401 rtx scratch
= gen_reg_rtx (SImode
);
2403 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2405 emit_insn ((mode
== QImode
2406 ? gen_aligned_loadqi
2407 : gen_aligned_loadhi
)
2408 (operands
[0], aligned_mem
, bitnum
, scratch
));
2413 /* Don't pass these as parameters since that makes the generated
2414 code depend on parameter evaluation order which will cause
2415 bootstrap failures. */
2417 rtx temp1
= gen_reg_rtx (DImode
);
2418 rtx temp2
= gen_reg_rtx (DImode
);
2419 rtx seq
= ((mode
== QImode
2420 ? gen_unaligned_loadqi
2421 : gen_unaligned_loadhi
)
2422 (operands
[0], get_unaligned_address (operands
[1], 0),
2425 alpha_set_memflags (seq
, operands
[1]);
2431 if (GET_CODE (operands
[0]) == MEM
2432 || (GET_CODE (operands
[0]) == SUBREG
2433 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
2434 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
2435 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
2436 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
2437 && GET_CODE (SUBREG_REG (operands
[0])) == REG
2438 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
2440 if (aligned_memory_operand (operands
[0], mode
))
2442 rtx aligned_mem
, bitnum
;
2443 rtx temp1
= gen_reg_rtx (SImode
);
2444 rtx temp2
= gen_reg_rtx (SImode
);
2446 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2448 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2453 rtx temp1
= gen_reg_rtx (DImode
);
2454 rtx temp2
= gen_reg_rtx (DImode
);
2455 rtx temp3
= gen_reg_rtx (DImode
);
2456 rtx seq
= ((mode
== QImode
2457 ? gen_unaligned_storeqi
2458 : gen_unaligned_storehi
)
2459 (get_unaligned_address (operands
[0], 0),
2460 operands
[1], temp1
, temp2
, temp3
));
2462 alpha_set_memflags (seq
, operands
[0]);
2471 /* Generate an unsigned DImode to FP conversion. This is the same code
2472 optabs would emit if we didn't have TFmode patterns.
2474 For SFmode, this is the only construction I've found that can pass
2475 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2476 intermediates will work, because you'll get intermediate rounding
2477 that ruins the end result. Some of this could be fixed by turning
2478 on round-to-positive-infinity, but that requires diddling the fpsr,
2479 which kills performance. I tried turning this around and converting
2480 to a negative number, so that I could turn on /m, but either I did
2481 it wrong or there's something else cause I wound up with the exact
2482 same single-bit error. There is a branch-less form of this same code:
2493 fcmoveq $f10,$f11,$f0
2495 I'm not using it because it's the same number of instructions as
2496 this branch-full form, and it has more serialized long latency
2497 instructions on the critical path.
2499 For DFmode, we can avoid rounding errors by breaking up the word
2500 into two pieces, converting them separately, and adding them back:
2502 LC0: .long 0,0x5f800000
2507 cpyse $f11,$f31,$f10
2508 cpyse $f31,$f11,$f11
2516 This doesn't seem to be a clear-cut win over the optabs form.
2517 It probably all depends on the distribution of numbers being
2518 converted -- in the optabs form, all but high-bit-set has a
2519 much lower minimum execution time. */
2522 alpha_emit_floatuns (operands
)
2525 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2526 enum machine_mode mode
;
2529 in
= force_reg (DImode
, operands
[1]);
2530 mode
= GET_MODE (out
);
2531 neglab
= gen_label_rtx ();
2532 donelab
= gen_label_rtx ();
2533 i0
= gen_reg_rtx (DImode
);
2534 i1
= gen_reg_rtx (DImode
);
2535 f0
= gen_reg_rtx (mode
);
2537 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2539 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2540 emit_jump_insn (gen_jump (donelab
));
2543 emit_label (neglab
);
2545 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2546 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2547 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2548 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2549 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2551 emit_label (donelab
);
2554 /* Generate the comparison for a conditional branch. */
2557 alpha_emit_conditional_branch (code
)
2560 enum rtx_code cmp_code
, branch_code
;
2561 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
2562 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2565 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
2567 if (! TARGET_HAS_XFLOATING_LIBS
)
2570 /* X_floating library comparison functions return
2574 Convert the compare against the raw return value. */
2576 if (code
== UNORDERED
|| code
== ORDERED
)
2581 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
2583 alpha_compare
.fp_p
= 0;
2585 if (code
== UNORDERED
)
2587 else if (code
== ORDERED
)
2593 /* The general case: fold the comparison code to the types of compares
2594 that we have, choosing the branch as necessary. */
2597 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2599 /* We have these compares: */
2600 cmp_code
= code
, branch_code
= NE
;
2605 /* These must be reversed. */
2606 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2609 case GE
: case GT
: case GEU
: case GTU
:
2610 /* For FP, we swap them, for INT, we reverse them. */
2611 if (alpha_compare
.fp_p
)
2613 cmp_code
= swap_condition (code
);
2615 tem
= op0
, op0
= op1
, op1
= tem
;
2619 cmp_code
= reverse_condition (code
);
2628 if (alpha_compare
.fp_p
)
2631 if (flag_unsafe_math_optimizations
)
2633 /* When we are not as concerned about non-finite values, and we
2634 are comparing against zero, we can branch directly. */
2635 if (op1
== CONST0_RTX (DFmode
))
2636 cmp_code
= NIL
, branch_code
= code
;
2637 else if (op0
== CONST0_RTX (DFmode
))
2639 /* Undo the swap we probably did just above. */
2640 tem
= op0
, op0
= op1
, op1
= tem
;
2641 branch_code
= swap_condition (cmp_code
);
2647 /* ??? We mark the the branch mode to be CCmode to prevent the
2648 compare and branch from being combined, since the compare
2649 insn follows IEEE rules that the branch does not. */
2650 branch_mode
= CCmode
;
2657 /* The following optimizations are only for signed compares. */
2658 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2660 /* Whee. Compare and branch against 0 directly. */
2661 if (op1
== const0_rtx
)
2662 cmp_code
= NIL
, branch_code
= code
;
2664 /* We want to use cmpcc/bcc when we can, since there is a zero delay
2665 bypass between logicals and br/cmov on EV5. But we don't want to
2666 force valid immediate constants into registers needlessly. */
2667 else if (GET_CODE (op1
) == CONST_INT
)
2669 HOST_WIDE_INT v
= INTVAL (op1
), n
= -v
;
2671 if (! CONST_OK_FOR_LETTER_P (v
, 'I')
2672 && (CONST_OK_FOR_LETTER_P (n
, 'K')
2673 || CONST_OK_FOR_LETTER_P (n
, 'L')))
2675 cmp_code
= PLUS
, branch_code
= code
;
2681 if (!reg_or_0_operand (op0
, DImode
))
2682 op0
= force_reg (DImode
, op0
);
2683 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2684 op1
= force_reg (DImode
, op1
);
2687 /* Emit an initial compare instruction, if necessary. */
2689 if (cmp_code
!= NIL
)
2691 tem
= gen_reg_rtx (cmp_mode
);
2692 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2695 /* Zero the operands. */
2696 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2698 /* Return the branch comparison. */
2699 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
2702 /* Certain simplifications can be done to make invalid setcc operations
2703 valid. Return the final comparison, or NULL if we can't work. */
2706 alpha_emit_setcc (code
)
2709 enum rtx_code cmp_code
;
2710 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2711 int fp_p
= alpha_compare
.fp_p
;
2714 /* Zero the operands. */
2715 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2717 if (fp_p
&& GET_MODE (op0
) == TFmode
)
2719 if (! TARGET_HAS_XFLOATING_LIBS
)
2722 /* X_floating library comparison functions return
2726 Convert the compare against the raw return value. */
2728 if (code
== UNORDERED
|| code
== ORDERED
)
2733 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
2737 if (code
== UNORDERED
)
2739 else if (code
== ORDERED
)
2745 if (fp_p
&& !TARGET_FIX
)
2748 /* The general case: fold the comparison code to the types of compares
2749 that we have, choosing the branch as necessary. */
2754 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2756 /* We have these compares. */
2758 cmp_code
= code
, code
= NE
;
2762 if (!fp_p
&& op1
== const0_rtx
)
2767 cmp_code
= reverse_condition (code
);
2771 case GE
: case GT
: case GEU
: case GTU
:
2772 /* These normally need swapping, but for integer zero we have
2773 special patterns that recognize swapped operands. */
2774 if (!fp_p
&& op1
== const0_rtx
)
2776 code
= swap_condition (code
);
2778 cmp_code
= code
, code
= NE
;
2779 tmp
= op0
, op0
= op1
, op1
= tmp
;
2788 if (!register_operand (op0
, DImode
))
2789 op0
= force_reg (DImode
, op0
);
2790 if (!reg_or_8bit_operand (op1
, DImode
))
2791 op1
= force_reg (DImode
, op1
);
2794 /* Emit an initial compare instruction, if necessary. */
2795 if (cmp_code
!= NIL
)
2797 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
2799 tmp
= gen_reg_rtx (mode
);
2800 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2801 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
2803 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
2807 /* Return the setcc comparison. */
2808 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
2812 /* Rewrite a comparison against zero CMP of the form
2813 (CODE (cc0) (const_int 0)) so it can be written validly in
2814 a conditional move (if_then_else CMP ...).
2815 If both of the operands that set cc0 are non-zero we must emit
2816 an insn to perform the compare (it can't be done within
2817 the conditional move). */
2819 alpha_emit_conditional_move (cmp
, mode
)
2821 enum machine_mode mode
;
2823 enum rtx_code code
= GET_CODE (cmp
);
2824 enum rtx_code cmov_code
= NE
;
2825 rtx op0
= alpha_compare
.op0
;
2826 rtx op1
= alpha_compare
.op1
;
2827 int fp_p
= alpha_compare
.fp_p
;
2828 enum machine_mode cmp_mode
2829 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
2830 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
2831 enum machine_mode cmov_mode
= VOIDmode
;
2832 int local_fast_math
= flag_unsafe_math_optimizations
;
2835 /* Zero the operands. */
2836 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2838 if (fp_p
!= FLOAT_MODE_P (mode
))
2840 enum rtx_code cmp_code
;
2845 /* If we have fp<->int register move instructions, do a cmov by
2846 performing the comparison in fp registers, and move the
2847 zero/non-zero value to integer registers, where we can then
2848 use a normal cmov, or vice-versa. */
2852 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2853 /* We have these compares. */
2854 cmp_code
= code
, code
= NE
;
2858 /* This must be reversed. */
2859 cmp_code
= EQ
, code
= EQ
;
2862 case GE
: case GT
: case GEU
: case GTU
:
2863 /* These normally need swapping, but for integer zero we have
2864 special patterns that recognize swapped operands. */
2865 if (!fp_p
&& op1
== const0_rtx
)
2866 cmp_code
= code
, code
= NE
;
2869 cmp_code
= swap_condition (code
);
2871 tem
= op0
, op0
= op1
, op1
= tem
;
2879 tem
= gen_reg_rtx (cmp_op_mode
);
2880 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
2881 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
2884 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
2885 op0
= gen_lowpart (cmp_op_mode
, tem
);
2886 op1
= CONST0_RTX (cmp_op_mode
);
2888 local_fast_math
= 1;
2891 /* We may be able to use a conditional move directly.
2892 This avoids emitting spurious compares. */
2893 if (signed_comparison_operator (cmp
, VOIDmode
)
2894 && (!fp_p
|| local_fast_math
)
2895 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
2896 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
2898 /* We can't put the comparison inside the conditional move;
2899 emit a compare instruction and put that inside the
2900 conditional move. Make sure we emit only comparisons we have;
2901 swap or reverse as necessary. */
2908 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2909 /* We have these compares: */
2913 /* This must be reversed. */
2914 code
= reverse_condition (code
);
2918 case GE
: case GT
: case GEU
: case GTU
:
2919 /* These must be swapped. */
2920 if (op1
!= CONST0_RTX (cmp_mode
))
2922 code
= swap_condition (code
);
2923 tem
= op0
, op0
= op1
, op1
= tem
;
2933 if (!reg_or_0_operand (op0
, DImode
))
2934 op0
= force_reg (DImode
, op0
);
2935 if (!reg_or_8bit_operand (op1
, DImode
))
2936 op1
= force_reg (DImode
, op1
);
2939 /* ??? We mark the branch mode to be CCmode to prevent the compare
2940 and cmov from being combined, since the compare insn follows IEEE
2941 rules that the cmov does not. */
2942 if (fp_p
&& !local_fast_math
)
2945 tem
= gen_reg_rtx (cmp_op_mode
);
2946 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
2947 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
2950 /* Simplify a conditional move of two constants into a setcc with
2951 arithmetic. This is done with a splitter since combine would
2952 just undo the work if done during code generation. It also catches
2953 cases we wouldn't have before cse. */
2956 alpha_split_conditional_move (code
, dest
, cond
, t_rtx
, f_rtx
)
2958 rtx dest
, cond
, t_rtx
, f_rtx
;
2960 HOST_WIDE_INT t
, f
, diff
;
2961 enum machine_mode mode
;
2962 rtx target
, subtarget
, tmp
;
2964 mode
= GET_MODE (dest
);
2969 if (((code
== NE
|| code
== EQ
) && diff
< 0)
2970 || (code
== GE
|| code
== GT
))
2972 code
= reverse_condition (code
);
2973 diff
= t
, t
= f
, f
= diff
;
2977 subtarget
= target
= dest
;
2980 target
= gen_lowpart (DImode
, dest
);
2981 if (! no_new_pseudos
)
2982 subtarget
= gen_reg_rtx (DImode
);
2987 if (f
== 0 && exact_log2 (diff
) > 0
2988 /* On EV6, we've got enough shifters to make non-arithmatic shifts
2989 viable over a longer latency cmove. On EV5, the E0 slot is a
2990 scarce resource, and on EV4 shift has the same latency as a cmove. */
2991 && (diff
<= 8 || alpha_cpu
== PROCESSOR_EV6
))
2993 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
2994 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
2996 tmp
= gen_rtx_ASHIFT (DImode
, subtarget
, GEN_INT (exact_log2 (t
)));
2997 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
2999 else if (f
== 0 && t
== -1)
3001 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3002 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3004 emit_insn (gen_negdi2 (target
, subtarget
));
3006 else if (diff
== 1 || diff
== 4 || diff
== 8)
3010 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3011 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3014 emit_insn (gen_adddi3 (target
, subtarget
, GEN_INT (f
)));
3017 add_op
= GEN_INT (f
);
3018 if (sext_add_operand (add_op
, mode
))
3020 tmp
= gen_rtx_MULT (DImode
, subtarget
, GEN_INT (diff
));
3021 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
3022 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3034 /* Look up the function X_floating library function name for the
3038 alpha_lookup_xfloating_lib_func (code
)
3043 const enum rtx_code code
;
3044 const char *const func
;
3047 static const struct xfloating_op vms_xfloating_ops
[] =
3049 { PLUS
, "OTS$ADD_X" },
3050 { MINUS
, "OTS$SUB_X" },
3051 { MULT
, "OTS$MUL_X" },
3052 { DIV
, "OTS$DIV_X" },
3053 { EQ
, "OTS$EQL_X" },
3054 { NE
, "OTS$NEQ_X" },
3055 { LT
, "OTS$LSS_X" },
3056 { LE
, "OTS$LEQ_X" },
3057 { GT
, "OTS$GTR_X" },
3058 { GE
, "OTS$GEQ_X" },
3059 { FIX
, "OTS$CVTXQ" },
3060 { FLOAT
, "OTS$CVTQX" },
3061 { UNSIGNED_FLOAT
, "OTS$CVTQUX" },
3062 { FLOAT_EXTEND
, "OTS$CVT_FLOAT_T_X" },
3063 { FLOAT_TRUNCATE
, "OTS$CVT_FLOAT_X_T" },
3066 static const struct xfloating_op osf_xfloating_ops
[] =
3068 { PLUS
, "_OtsAddX" },
3069 { MINUS
, "_OtsSubX" },
3070 { MULT
, "_OtsMulX" },
3071 { DIV
, "_OtsDivX" },
3078 { FIX
, "_OtsCvtXQ" },
3079 { FLOAT
, "_OtsCvtQX" },
3080 { UNSIGNED_FLOAT
, "_OtsCvtQUX" },
3081 { FLOAT_EXTEND
, "_OtsConvertFloatTX" },
3082 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT" },
3085 const struct xfloating_op
*ops
;
3086 const long n
= ARRAY_SIZE (osf_xfloating_ops
);
3089 /* How irritating. Nothing to key off for the table. Hardcode
3090 knowledge of the G_floating routines. */
3091 if (TARGET_FLOAT_VAX
)
3093 if (TARGET_ABI_OPEN_VMS
)
3095 if (code
== FLOAT_EXTEND
)
3096 return "OTS$CVT_FLOAT_G_X";
3097 if (code
== FLOAT_TRUNCATE
)
3098 return "OTS$CVT_FLOAT_X_G";
3102 if (code
== FLOAT_EXTEND
)
3103 return "_OtsConvertFloatGX";
3104 if (code
== FLOAT_TRUNCATE
)
3105 return "_OtsConvertFloatXG";
3109 if (TARGET_ABI_OPEN_VMS
)
3110 ops
= vms_xfloating_ops
;
3112 ops
= osf_xfloating_ops
;
3114 for (i
= 0; i
< n
; ++i
)
3115 if (ops
[i
].code
== code
)
3121 /* Most X_floating operations take the rounding mode as an argument.
3122 Compute that here. */
3125 alpha_compute_xfloating_mode_arg (code
, round
)
3127 enum alpha_fp_rounding_mode round
;
3133 case ALPHA_FPRM_NORM
:
3136 case ALPHA_FPRM_MINF
:
3139 case ALPHA_FPRM_CHOP
:
3142 case ALPHA_FPRM_DYN
:
3148 /* XXX For reference, round to +inf is mode = 3. */
3151 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3157 /* Emit an X_floating library function call.
3159 Note that these functions do not follow normal calling conventions:
3160 TFmode arguments are passed in two integer registers (as opposed to
3161 indirect); TFmode return values appear in R16+R17.
3163 FUNC is the function name to call.
3164 TARGET is where the output belongs.
3165 OPERANDS are the inputs.
3166 NOPERANDS is the count of inputs.
3167 EQUIV is the expression equivalent for the function.
3171 alpha_emit_xfloating_libcall (func
, target
, operands
, noperands
, equiv
)
3178 rtx usage
= NULL_RTX
, tmp
, reg
;
3183 for (i
= 0; i
< noperands
; ++i
)
3185 switch (GET_MODE (operands
[i
]))
3188 reg
= gen_rtx_REG (TFmode
, regno
);
3193 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
3198 if (GET_CODE (operands
[i
]) != CONST_INT
)
3202 reg
= gen_rtx_REG (DImode
, regno
);
3210 emit_move_insn (reg
, operands
[i
]);
3211 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
3214 switch (GET_MODE (target
))
3217 reg
= gen_rtx_REG (TFmode
, 16);
3220 reg
= gen_rtx_REG (DFmode
, 32);
3223 reg
= gen_rtx_REG (DImode
, 0);
3229 tmp
= gen_rtx_MEM (QImode
, gen_rtx_SYMBOL_REF (Pmode
, (char *) func
));
3230 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3231 const0_rtx
, const0_rtx
));
3232 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3237 emit_libcall_block (tmp
, target
, reg
, equiv
);
3240 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3243 alpha_emit_xfloating_arith (code
, operands
)
3249 rtx out_operands
[3];
3251 func
= alpha_lookup_xfloating_lib_func (code
);
3252 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3254 out_operands
[0] = operands
[1];
3255 out_operands
[1] = operands
[2];
3256 out_operands
[2] = GEN_INT (mode
);
3257 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3258 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3262 /* Emit an X_floating library function call for a comparison. */
3265 alpha_emit_xfloating_compare (code
, op0
, op1
)
3270 rtx out
, operands
[2];
3272 func
= alpha_lookup_xfloating_lib_func (code
);
3276 out
= gen_reg_rtx (DImode
);
3278 /* ??? Strange mode for equiv because what's actually returned
3279 is -1,0,1, not a proper boolean value. */
3280 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
3281 gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
));
3286 /* Emit an X_floating library function call for a conversion. */
3289 alpha_emit_xfloating_cvt (code
, operands
)
3293 int noperands
= 1, mode
;
3294 rtx out_operands
[2];
3297 func
= alpha_lookup_xfloating_lib_func (code
);
3299 out_operands
[0] = operands
[1];
3304 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3305 out_operands
[1] = GEN_INT (mode
);
3308 case FLOAT_TRUNCATE
:
3309 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3310 out_operands
[1] = GEN_INT (mode
);
3317 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3318 gen_rtx_fmt_e (code
, GET_MODE (operands
[0]),
3322 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3323 OP[0] into OP[0,1]. Naturally, output operand ordering is
3327 alpha_split_tfmode_pair (operands
)
3330 if (GET_CODE (operands
[1]) == REG
)
3332 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3333 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3335 else if (GET_CODE (operands
[1]) == MEM
)
3337 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3338 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3340 else if (operands
[1] == CONST0_RTX (TFmode
))
3341 operands
[2] = operands
[3] = const0_rtx
;
3345 if (GET_CODE (operands
[0]) == REG
)
3347 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3348 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3350 else if (GET_CODE (operands
[0]) == MEM
)
3352 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3353 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3359 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3360 op2 is a register containing the sign bit, operation is the
3361 logical operation to be performed. */
3364 alpha_split_tfmode_frobsign (operands
, operation
)
3366 rtx (*operation
) PARAMS ((rtx
, rtx
, rtx
));
3368 rtx high_bit
= operands
[2];
3372 alpha_split_tfmode_pair (operands
);
3374 /* Detect three flavours of operand overlap. */
3376 if (rtx_equal_p (operands
[0], operands
[2]))
3378 else if (rtx_equal_p (operands
[1], operands
[2]))
3380 if (rtx_equal_p (operands
[0], high_bit
))
3387 emit_move_insn (operands
[0], operands
[2]);
3389 /* ??? If the destination overlaps both source tf and high_bit, then
3390 assume source tf is dead in its entirety and use the other half
3391 for a scratch register. Otherwise "scratch" is just the proper
3392 destination register. */
3393 scratch
= operands
[move
< 2 ? 1 : 3];
3395 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3399 emit_move_insn (operands
[0], operands
[2]);
3401 emit_move_insn (operands
[1], scratch
);
3405 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3409 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3410 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3411 lda r3,X(r11) lda r3,X+2(r11)
3412 extwl r1,r3,r1 extql r1,r3,r1
3413 extwh r2,r3,r2 extqh r2,r3,r2
3414 or r1.r2.r1 or r1,r2,r1
3417 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3418 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3419 lda r3,X(r11) lda r3,X(r11)
3420 extll r1,r3,r1 extll r1,r3,r1
3421 extlh r2,r3,r2 extlh r2,r3,r2
3422 or r1.r2.r1 addl r1,r2,r1
3424 quad: ldq_u r1,X(r11)
3433 alpha_expand_unaligned_load (tgt
, mem
, size
, ofs
, sign
)
3435 HOST_WIDE_INT size
, ofs
;
3438 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3439 enum machine_mode mode
;
3441 meml
= gen_reg_rtx (DImode
);
3442 memh
= gen_reg_rtx (DImode
);
3443 addr
= gen_reg_rtx (DImode
);
3444 extl
= gen_reg_rtx (DImode
);
3445 exth
= gen_reg_rtx (DImode
);
3447 mema
= XEXP (mem
, 0);
3448 if (GET_CODE (mema
) == LO_SUM
)
3449 mema
= force_reg (Pmode
, mema
);
3451 /* AND addresses cannot be in any alias set, since they may implicitly
3452 alias surrounding code. Ideally we'd have some alias set that
3453 covered all types except those with alignment 8 or higher. */
3455 tmp
= change_address (mem
, DImode
,
3456 gen_rtx_AND (DImode
,
3457 plus_constant (mema
, ofs
),
3459 set_mem_alias_set (tmp
, 0);
3460 emit_move_insn (meml
, tmp
);
3462 tmp
= change_address (mem
, DImode
,
3463 gen_rtx_AND (DImode
,
3464 plus_constant (mema
, ofs
+ size
- 1),
3466 set_mem_alias_set (tmp
, 0);
3467 emit_move_insn (memh
, tmp
);
3469 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
3471 emit_move_insn (addr
, plus_constant (mema
, -1));
3473 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3474 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
3476 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3477 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
3478 addr
, 1, OPTAB_WIDEN
);
3480 else if (sign
&& size
== 2)
3482 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
3484 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
3485 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3487 /* We must use tgt here for the target. Alpha-vms port fails if we use
3488 addr for the target, because addr is marked as a pointer and combine
3489 knows that pointers are always sign-extended 32 bit values. */
3490 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3491 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3492 addr
, 1, OPTAB_WIDEN
);
3496 if (WORDS_BIG_ENDIAN
)
3498 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
3502 emit_insn (gen_extwh_be (extl
, meml
, addr
));
3507 emit_insn (gen_extlh_be (extl
, meml
, addr
));
3512 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3519 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
3523 emit_move_insn (addr
, plus_constant (mema
, ofs
));
3524 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
3528 emit_insn (gen_extwh_le (exth
, memh
, addr
));
3533 emit_insn (gen_extlh_le (exth
, memh
, addr
));
3538 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3547 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3548 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3553 emit_move_insn (tgt
, gen_lowpart(GET_MODE (tgt
), addr
));
3556 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3559 alpha_expand_unaligned_store (dst
, src
, size
, ofs
)
3561 HOST_WIDE_INT size
, ofs
;
3563 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3565 dstl
= gen_reg_rtx (DImode
);
3566 dsth
= gen_reg_rtx (DImode
);
3567 insl
= gen_reg_rtx (DImode
);
3568 insh
= gen_reg_rtx (DImode
);
3570 dsta
= XEXP (dst
, 0);
3571 if (GET_CODE (dsta
) == LO_SUM
)
3572 dsta
= force_reg (Pmode
, dsta
);
3574 /* AND addresses cannot be in any alias set, since they may implicitly
3575 alias surrounding code. Ideally we'd have some alias set that
3576 covered all types except those with alignment 8 or higher. */
3578 meml
= change_address (dst
, DImode
,
3579 gen_rtx_AND (DImode
,
3580 plus_constant (dsta
, ofs
),
3582 set_mem_alias_set (meml
, 0);
3584 memh
= change_address (dst
, DImode
,
3585 gen_rtx_AND (DImode
,
3586 plus_constant (dsta
, ofs
+ size
- 1),
3588 set_mem_alias_set (memh
, 0);
3590 emit_move_insn (dsth
, memh
);
3591 emit_move_insn (dstl
, meml
);
3592 if (WORDS_BIG_ENDIAN
)
3594 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
3596 if (src
!= const0_rtx
)
3601 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
3604 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
3607 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
3610 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
3611 GEN_INT (size
*8), addr
));
3617 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
3620 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffffffff), addr
));
3624 #if HOST_BITS_PER_WIDE_INT == 32
3625 rtx msk
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3627 rtx msk
= constm1_rtx
;
3629 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
3634 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
3638 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
3640 if (src
!= const0_rtx
)
3642 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3643 GEN_INT (size
*8), addr
));
3648 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
3651 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
3654 emit_insn (gen_insql_le (insl
, src
, addr
));
3659 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3664 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
3667 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffffffff), addr
));
3671 #if HOST_BITS_PER_WIDE_INT == 32
3672 rtx msk
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3674 rtx msk
= constm1_rtx
;
3676 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
3682 if (src
!= const0_rtx
)
3684 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3685 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3688 if (WORDS_BIG_ENDIAN
)
3690 emit_move_insn (meml
, dstl
);
3691 emit_move_insn (memh
, dsth
);
3695 /* Must store high before low for degenerate case of aligned. */
3696 emit_move_insn (memh
, dsth
);
3697 emit_move_insn (meml
, dstl
);
3701 /* The block move code tries to maximize speed by separating loads and
3702 stores at the expense of register pressure: we load all of the data
3703 before we store it back out. There are two secondary effects worth
3704 mentioning, that this speeds copying to/from aligned and unaligned
3705 buffers, and that it makes the code significantly easier to write. */
3707 #define MAX_MOVE_WORDS 8
3709 /* Load an integral number of consecutive unaligned quadwords. */
3712 alpha_expand_unaligned_load_words (out_regs
, smem
, words
, ofs
)
3715 HOST_WIDE_INT words
, ofs
;
3717 rtx
const im8
= GEN_INT (-8);
3718 rtx
const i64
= GEN_INT (64);
3719 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3720 rtx sreg
, areg
, tmp
, smema
;
3723 smema
= XEXP (smem
, 0);
3724 if (GET_CODE (smema
) == LO_SUM
)
3725 smema
= force_reg (Pmode
, smema
);
3727 /* Generate all the tmp registers we need. */
3728 for (i
= 0; i
< words
; ++i
)
3730 data_regs
[i
] = out_regs
[i
];
3731 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3733 data_regs
[words
] = gen_reg_rtx (DImode
);
3736 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3738 /* Load up all of the source data. */
3739 for (i
= 0; i
< words
; ++i
)
3741 tmp
= change_address (smem
, DImode
,
3742 gen_rtx_AND (DImode
,
3743 plus_constant (smema
, 8*i
),
3745 set_mem_alias_set (tmp
, 0);
3746 emit_move_insn (data_regs
[i
], tmp
);
3749 tmp
= change_address (smem
, DImode
,
3750 gen_rtx_AND (DImode
,
3751 plus_constant (smema
, 8*words
- 1),
3753 set_mem_alias_set (tmp
, 0);
3754 emit_move_insn (data_regs
[words
], tmp
);
3756 /* Extract the half-word fragments. Unfortunately DEC decided to make
3757 extxh with offset zero a noop instead of zeroing the register, so
3758 we must take care of that edge condition ourselves with cmov. */
3760 sreg
= copy_addr_to_reg (smema
);
3761 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3763 if (WORDS_BIG_ENDIAN
)
3764 emit_move_insn (sreg
, plus_constant (sreg
, 7));
3765 for (i
= 0; i
< words
; ++i
)
3767 if (WORDS_BIG_ENDIAN
)
3769 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
3770 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
3774 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
3775 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3777 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3778 gen_rtx_IF_THEN_ELSE (DImode
,
3779 gen_rtx_EQ (DImode
, areg
,
3781 const0_rtx
, ext_tmps
[i
])));
3784 /* Merge the half-words into whole words. */
3785 for (i
= 0; i
< words
; ++i
)
3787 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3788 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3792 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3793 may be NULL to store zeros. */
3796 alpha_expand_unaligned_store_words (data_regs
, dmem
, words
, ofs
)
3799 HOST_WIDE_INT words
, ofs
;
3801 rtx
const im8
= GEN_INT (-8);
3802 rtx
const i64
= GEN_INT (64);
3803 #if HOST_BITS_PER_WIDE_INT == 32
3804 rtx
const im1
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3806 rtx
const im1
= constm1_rtx
;
3808 rtx ins_tmps
[MAX_MOVE_WORDS
];
3809 rtx st_tmp_1
, st_tmp_2
, dreg
;
3810 rtx st_addr_1
, st_addr_2
, dmema
;
3813 dmema
= XEXP (dmem
, 0);
3814 if (GET_CODE (dmema
) == LO_SUM
)
3815 dmema
= force_reg (Pmode
, dmema
);
3817 /* Generate all the tmp registers we need. */
3818 if (data_regs
!= NULL
)
3819 for (i
= 0; i
< words
; ++i
)
3820 ins_tmps
[i
] = gen_reg_rtx(DImode
);
3821 st_tmp_1
= gen_reg_rtx(DImode
);
3822 st_tmp_2
= gen_reg_rtx(DImode
);
3825 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
3827 st_addr_2
= change_address (dmem
, DImode
,
3828 gen_rtx_AND (DImode
,
3829 plus_constant (dmema
, words
*8 - 1),
3831 set_mem_alias_set (st_addr_2
, 0);
3833 st_addr_1
= change_address (dmem
, DImode
,
3834 gen_rtx_AND (DImode
, dmema
, im8
));
3835 set_mem_alias_set (st_addr_1
, 0);
3837 /* Load up the destination end bits. */
3838 emit_move_insn (st_tmp_2
, st_addr_2
);
3839 emit_move_insn (st_tmp_1
, st_addr_1
);
3841 /* Shift the input data into place. */
3842 dreg
= copy_addr_to_reg (dmema
);
3843 if (WORDS_BIG_ENDIAN
)
3844 emit_move_insn (dreg
, plus_constant (dreg
, 7));
3845 if (data_regs
!= NULL
)
3847 for (i
= words
-1; i
>= 0; --i
)
3849 if (WORDS_BIG_ENDIAN
)
3851 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
3852 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
3856 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
3857 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
3860 for (i
= words
-1; i
> 0; --i
)
3862 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3863 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
3868 /* Split and merge the ends with the destination data. */
3869 if (WORDS_BIG_ENDIAN
)
3871 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, im1
, dreg
));
3872 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
3876 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
3877 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, im1
, dreg
));
3880 if (data_regs
!= NULL
)
3882 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
3883 st_tmp_2
, 1, OPTAB_WIDEN
);
3884 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
3885 st_tmp_1
, 1, OPTAB_WIDEN
);
3889 if (WORDS_BIG_ENDIAN
)
3890 emit_move_insn (st_addr_1
, st_tmp_1
);
3892 emit_move_insn (st_addr_2
, st_tmp_2
);
3893 for (i
= words
-1; i
> 0; --i
)
3895 rtx tmp
= change_address (dmem
, DImode
,
3896 gen_rtx_AND (DImode
,
3897 plus_constant(dmema
,
3898 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
3900 set_mem_alias_set (tmp
, 0);
3901 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
3903 if (WORDS_BIG_ENDIAN
)
3904 emit_move_insn (st_addr_2
, st_tmp_2
);
3906 emit_move_insn (st_addr_1
, st_tmp_1
);
3910 /* Expand string/block move operations.
3912 operands[0] is the pointer to the destination.
3913 operands[1] is the pointer to the source.
3914 operands[2] is the number of bytes to move.
3915 operands[3] is the alignment. */
3918 alpha_expand_block_move (operands
)
3921 rtx bytes_rtx
= operands
[2];
3922 rtx align_rtx
= operands
[3];
3923 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
3924 HOST_WIDE_INT bytes
= orig_bytes
;
3925 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
3926 HOST_WIDE_INT dst_align
= src_align
;
3927 rtx orig_src
= operands
[1];
3928 rtx orig_dst
= operands
[0];
3929 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
3931 unsigned int i
, words
, ofs
, nregs
= 0;
3933 if (orig_bytes
<= 0)
3935 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
3938 /* Look for additional alignment information from recorded register info. */
3940 tmp
= XEXP (orig_src
, 0);
3941 if (GET_CODE (tmp
) == REG
)
3942 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3943 else if (GET_CODE (tmp
) == PLUS
3944 && GET_CODE (XEXP (tmp
, 0)) == REG
3945 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3947 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3948 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3952 if (a
>= 64 && c
% 8 == 0)
3954 else if (a
>= 32 && c
% 4 == 0)
3956 else if (a
>= 16 && c
% 2 == 0)
3961 tmp
= XEXP (orig_dst
, 0);
3962 if (GET_CODE (tmp
) == REG
)
3963 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
3964 else if (GET_CODE (tmp
) == PLUS
3965 && GET_CODE (XEXP (tmp
, 0)) == REG
3966 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
3968 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
3969 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
3973 if (a
>= 64 && c
% 8 == 0)
3975 else if (a
>= 32 && c
% 4 == 0)
3977 else if (a
>= 16 && c
% 2 == 0)
3982 /* Load the entire block into registers. */
3983 if (GET_CODE (XEXP (orig_src
, 0)) == ADDRESSOF
)
3985 enum machine_mode mode
;
3987 tmp
= XEXP (XEXP (orig_src
, 0), 0);
3989 /* Don't use the existing register if we're reading more than
3990 is held in the register. Nor if there is not a mode that
3991 handles the exact size. */
3992 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
3994 && GET_MODE_SIZE (GET_MODE (tmp
)) >= bytes
)
3998 data_regs
[nregs
] = gen_lowpart (DImode
, tmp
);
3999 data_regs
[nregs
+ 1] = gen_highpart (DImode
, tmp
);
4003 data_regs
[nregs
++] = gen_lowpart (mode
, tmp
);
4008 /* No appropriate mode; fall back on memory. */
4009 orig_src
= replace_equiv_address (orig_src
,
4010 copy_addr_to_reg (XEXP (orig_src
, 0)));
4011 src_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4015 if (src_align
>= 64 && bytes
>= 8)
4019 for (i
= 0; i
< words
; ++i
)
4020 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4022 for (i
= 0; i
< words
; ++i
)
4023 emit_move_insn (data_regs
[nregs
+ i
],
4024 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
4031 if (src_align
>= 32 && bytes
>= 4)
4035 for (i
= 0; i
< words
; ++i
)
4036 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
4038 for (i
= 0; i
< words
; ++i
)
4039 emit_move_insn (data_regs
[nregs
+ i
],
4040 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
4051 for (i
= 0; i
< words
+1; ++i
)
4052 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4054 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
4062 if (! TARGET_BWX
&& bytes
>= 4)
4064 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
4065 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
4072 if (src_align
>= 16)
4075 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4076 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
4079 } while (bytes
>= 2);
4081 else if (! TARGET_BWX
)
4083 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4084 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
4092 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
4093 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
4100 if (nregs
> ARRAY_SIZE (data_regs
))
4103 /* Now save it back out again. */
4107 if (GET_CODE (XEXP (orig_dst
, 0)) == ADDRESSOF
)
4109 enum machine_mode mode
;
4110 tmp
= XEXP (XEXP (orig_dst
, 0), 0);
4112 mode
= mode_for_size (orig_bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4113 if (GET_MODE (tmp
) == mode
)
4117 emit_move_insn (tmp
, data_regs
[0]);
4122 else if (nregs
== 2 && mode
== TImode
)
4124 /* Undo the subregging done above when copying between
4125 two TImode registers. */
4126 if (GET_CODE (data_regs
[0]) == SUBREG
4127 && GET_MODE (SUBREG_REG (data_regs
[0])) == TImode
)
4128 emit_move_insn (tmp
, SUBREG_REG (data_regs
[0]));
4134 emit_move_insn (gen_lowpart (DImode
, tmp
), data_regs
[0]);
4135 emit_move_insn (gen_highpart (DImode
, tmp
), data_regs
[1]);
4139 emit_no_conflict_block (seq
, tmp
, data_regs
[0],
4140 data_regs
[1], NULL_RTX
);
4148 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4149 /* ??? Optimize mode < dst_mode with strict_low_part. */
4151 /* No appropriate mode; fall back on memory. We can speed things
4152 up by recognizing extra alignment information. */
4153 orig_dst
= replace_equiv_address (orig_dst
,
4154 copy_addr_to_reg (XEXP (orig_dst
, 0)));
4155 dst_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4158 /* Write out the data in whatever chunks reading the source allowed. */
4159 if (dst_align
>= 64)
4161 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4163 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
4170 if (dst_align
>= 32)
4172 /* If the source has remaining DImode regs, write them out in
4174 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4176 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
4177 NULL_RTX
, 1, OPTAB_WIDEN
);
4179 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4180 gen_lowpart (SImode
, data_regs
[i
]));
4181 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
4182 gen_lowpart (SImode
, tmp
));
4187 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4189 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4196 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4198 /* Write out a remaining block of words using unaligned methods. */
4200 for (words
= 1; i
+ words
< nregs
; words
++)
4201 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
4205 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
4207 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
4214 /* Due to the above, this won't be aligned. */
4215 /* ??? If we have more than one of these, consider constructing full
4216 words in registers and using alpha_expand_unaligned_store_words. */
4217 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4219 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
4224 if (dst_align
>= 16)
4225 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4227 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4232 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4234 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4239 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == QImode
)
4241 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4255 alpha_expand_block_clear (operands
)
4258 rtx bytes_rtx
= operands
[1];
4259 rtx align_rtx
= operands
[2];
4260 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4261 HOST_WIDE_INT bytes
= orig_bytes
;
4262 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4263 HOST_WIDE_INT alignofs
= 0;
4264 rtx orig_dst
= operands
[0];
4266 int i
, words
, ofs
= 0;
4268 if (orig_bytes
<= 0)
4270 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4273 /* Look for stricter alignment. */
4274 tmp
= XEXP (orig_dst
, 0);
4275 if (GET_CODE (tmp
) == REG
)
4276 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4277 else if (GET_CODE (tmp
) == PLUS
4278 && GET_CODE (XEXP (tmp
, 0)) == REG
4279 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4281 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4282 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4287 align
= a
, alignofs
= 8 - c
% 8;
4289 align
= a
, alignofs
= 4 - c
% 4;
4291 align
= a
, alignofs
= 2 - c
% 2;
4294 else if (GET_CODE (tmp
) == ADDRESSOF
)
4296 enum machine_mode mode
;
4298 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4299 if (GET_MODE (XEXP (tmp
, 0)) == mode
)
4301 emit_move_insn (XEXP (tmp
, 0), const0_rtx
);
4305 /* No appropriate mode; fall back on memory. */
4306 orig_dst
= replace_equiv_address (orig_dst
, copy_addr_to_reg (tmp
));
4307 align
= GET_MODE_BITSIZE (GET_MODE (XEXP (tmp
, 0)));
4310 /* Handle an unaligned prefix first. */
4314 #if HOST_BITS_PER_WIDE_INT >= 64
4315 /* Given that alignofs is bounded by align, the only time BWX could
4316 generate three stores is for a 7 byte fill. Prefer two individual
4317 stores over a load/mask/store sequence. */
4318 if ((!TARGET_BWX
|| alignofs
== 7)
4320 && !(alignofs
== 4 && bytes
>= 4))
4322 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4323 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4327 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4328 set_mem_alias_set (mem
, 0);
4330 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
4331 if (bytes
< alignofs
)
4333 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
4344 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4345 NULL_RTX
, 1, OPTAB_WIDEN
);
4347 emit_move_insn (mem
, tmp
);
4351 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4353 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4358 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4360 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4365 if (alignofs
== 4 && bytes
>= 4)
4367 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4373 /* If we've not used the extra lead alignment information by now,
4374 we won't be able to. Downgrade align to match what's left over. */
4377 alignofs
= alignofs
& -alignofs
;
4378 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4382 /* Handle a block of contiguous long-words. */
4384 if (align
>= 64 && bytes
>= 8)
4388 for (i
= 0; i
< words
; ++i
)
4389 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4396 /* If the block is large and appropriately aligned, emit a single
4397 store followed by a sequence of stq_u insns. */
4399 if (align
>= 32 && bytes
> 16)
4403 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4407 orig_dsta
= XEXP (orig_dst
, 0);
4408 if (GET_CODE (orig_dsta
) == LO_SUM
)
4409 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4412 for (i
= 0; i
< words
; ++i
)
4415 = change_address (orig_dst
, DImode
,
4416 gen_rtx_AND (DImode
,
4417 plus_constant (orig_dsta
, ofs
+ i
*8),
4419 set_mem_alias_set (mem
, 0);
4420 emit_move_insn (mem
, const0_rtx
);
4423 /* Depending on the alignment, the first stq_u may have overlapped
4424 with the initial stl, which means that the last stq_u didn't
4425 write as much as it would appear. Leave those questionable bytes
4427 bytes
-= words
* 8 - 4;
4428 ofs
+= words
* 8 - 4;
4431 /* Handle a smaller block of aligned words. */
4433 if ((align
>= 64 && bytes
== 4)
4434 || (align
== 32 && bytes
>= 4))
4438 for (i
= 0; i
< words
; ++i
)
4439 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4446 /* An unaligned block uses stq_u stores for as many as possible. */
4452 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4458 /* Next clean up any trailing pieces. */
4460 #if HOST_BITS_PER_WIDE_INT >= 64
4461 /* Count the number of bits in BYTES for which aligned stores could
4464 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4468 /* If we have appropriate alignment (and it wouldn't take too many
4469 instructions otherwise), mask out the bytes we need. */
4470 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4477 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4478 set_mem_alias_set (mem
, 0);
4480 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4482 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4483 NULL_RTX
, 1, OPTAB_WIDEN
);
4485 emit_move_insn (mem
, tmp
);
4488 else if (align
>= 32 && bytes
< 4)
4493 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4494 set_mem_alias_set (mem
, 0);
4496 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4498 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4499 NULL_RTX
, 1, OPTAB_WIDEN
);
4501 emit_move_insn (mem
, tmp
);
4507 if (!TARGET_BWX
&& bytes
>= 4)
4509 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4519 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4523 } while (bytes
>= 2);
4525 else if (! TARGET_BWX
)
4527 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4535 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4543 /* Adjust the cost of a scheduling dependency. Return the new cost of
4544 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4547 alpha_adjust_cost (insn
, link
, dep_insn
, cost
)
4554 enum attr_type insn_type
, dep_insn_type
;
4556 /* If the dependence is an anti-dependence, there is no cost. For an
4557 output dependence, there is sometimes a cost, but it doesn't seem
4558 worth handling those few cases. */
4560 if (REG_NOTE_KIND (link
) != 0)
4563 /* If we can't recognize the insns, we can't really do anything. */
4564 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4567 insn_type
= get_attr_type (insn
);
4568 dep_insn_type
= get_attr_type (dep_insn
);
4570 /* Bring in the user-defined memory latency. */
4571 if (dep_insn_type
== TYPE_ILD
4572 || dep_insn_type
== TYPE_FLD
4573 || dep_insn_type
== TYPE_LDSYM
)
4574 cost
+= alpha_memory_latency
-1;
4579 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
4580 being stored, we can sometimes lower the cost. */
4582 if ((insn_type
== TYPE_IST
|| insn_type
== TYPE_FST
)
4583 && (set
= single_set (dep_insn
)) != 0
4584 && GET_CODE (PATTERN (insn
)) == SET
4585 && rtx_equal_p (SET_DEST (set
), SET_SRC (PATTERN (insn
))))
4587 switch (dep_insn_type
)
4591 /* No savings here. */
4595 /* In these cases, we save one cycle. */
4599 /* In all other cases, we save two cycles. */
4600 return MAX (0, cost
- 2);
4604 /* Another case that needs adjustment is an arithmetic or logical
4605 operation. It's cost is usually one cycle, but we default it to
4606 two in the MD file. The only case that it is actually two is
4607 for the address in loads, stores, and jumps. */
4609 if (dep_insn_type
== TYPE_IADD
|| dep_insn_type
== TYPE_ILOG
)
4624 /* The final case is when a compare feeds into an integer branch;
4625 the cost is only one cycle in that case. */
4627 if (dep_insn_type
== TYPE_ICMP
&& insn_type
== TYPE_IBR
)
4632 /* And the lord DEC saith: "A special bypass provides an effective
4633 latency of 0 cycles for an ICMP or ILOG insn producing the test
4634 operand of an IBR or ICMOV insn." */
4636 if ((dep_insn_type
== TYPE_ICMP
|| dep_insn_type
== TYPE_ILOG
)
4637 && (set
= single_set (dep_insn
)) != 0)
4639 /* A branch only has one input. This must be it. */
4640 if (insn_type
== TYPE_IBR
)
4642 /* A conditional move has three, make sure it is the test. */
4643 if (insn_type
== TYPE_ICMOV
4644 && GET_CODE (set_src
= PATTERN (insn
)) == SET
4645 && GET_CODE (set_src
= SET_SRC (set_src
)) == IF_THEN_ELSE
4646 && rtx_equal_p (SET_DEST (set
), XEXP (set_src
, 0)))
4650 /* "The multiplier is unable to receive data from IEU bypass paths.
4651 The instruction issues at the expected time, but its latency is
4652 increased by the time it takes for the input data to become
4653 available to the multiplier" -- which happens in pipeline stage
4654 six, when results are comitted to the register file. */
4656 if (insn_type
== TYPE_IMUL
)
4658 switch (dep_insn_type
)
4660 /* These insns produce their results in pipeline stage five. */
4667 /* Other integer insns produce results in pipeline stage four. */
4675 /* There is additional latency to move the result of (most) FP
4676 operations anywhere but the FP register file. */
4678 if ((insn_type
== TYPE_FST
|| insn_type
== TYPE_FTOI
)
4679 && (dep_insn_type
== TYPE_FADD
||
4680 dep_insn_type
== TYPE_FMUL
||
4681 dep_insn_type
== TYPE_FCMOV
))
4687 /* Otherwise, return the default cost. */
4691 /* Function to initialize the issue rate used by the scheduler. */
4695 return (alpha_cpu
== PROCESSOR_EV4
? 2 : 4);
4699 alpha_variable_issue (dump
, verbose
, insn
, cim
)
4700 FILE *dump ATTRIBUTE_UNUSED
;
4701 int verbose ATTRIBUTE_UNUSED
;
4705 if (recog_memoized (insn
) < 0 || get_attr_type (insn
) == TYPE_MULTI
)
4712 /* Register global variables and machine-specific functions with the
4713 garbage collector. */
4715 #if TARGET_ABI_UNICOSMK
4717 alpha_init_machine_status (p
)
4721 (struct machine_function
*) xcalloc (1, sizeof (struct machine_function
));
4723 p
->machine
->first_ciw
= NULL_RTX
;
4724 p
->machine
->last_ciw
= NULL_RTX
;
4725 p
->machine
->ciw_count
= 0;
4726 p
->machine
->addr_list
= NULL_RTX
;
4730 alpha_mark_machine_status (p
)
4733 struct machine_function
*machine
= p
->machine
;
4737 ggc_mark_rtx (machine
->first_ciw
);
4738 ggc_mark_rtx (machine
->addr_list
);
4743 alpha_free_machine_status (p
)
4749 #endif /* TARGET_ABI_UNICOSMK */
4751 /* Functions to save and restore alpha_return_addr_rtx. */
4753 /* Start the ball rolling with RETURN_ADDR_RTX. */
4756 alpha_return_addr (count
, frame
)
4758 rtx frame ATTRIBUTE_UNUSED
;
4763 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4766 /* Return or create a pseudo containing the gp value for the current
4767 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4770 alpha_gp_save_rtx ()
4772 return get_hard_reg_initial_val (DImode
, 29);
4776 alpha_ra_ever_killed ()
4780 #ifdef ASM_OUTPUT_MI_THUNK
4781 if (current_function_is_thunk
)
4784 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4785 return regs_ever_live
[REG_RA
];
4787 push_topmost_sequence ();
4789 pop_topmost_sequence ();
4791 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4795 /* Return the trap mode suffix applicable to the current
4796 instruction, or NULL. */
4799 get_trap_mode_suffix ()
4801 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4805 case TRAP_SUFFIX_NONE
:
4808 case TRAP_SUFFIX_SU
:
4809 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4813 case TRAP_SUFFIX_SUI
:
4814 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
4818 case TRAP_SUFFIX_V_SV
:
4826 case ALPHA_FPTM_SUI
:
4831 case TRAP_SUFFIX_V_SV_SVI
:
4840 case ALPHA_FPTM_SUI
:
4845 case TRAP_SUFFIX_U_SU_SUI
:
4854 case ALPHA_FPTM_SUI
:
4862 /* Return the rounding mode suffix applicable to the current
4863 instruction, or NULL. */
4866 get_round_mode_suffix ()
4868 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
4872 case ROUND_SUFFIX_NONE
:
4874 case ROUND_SUFFIX_NORMAL
:
4877 case ALPHA_FPRM_NORM
:
4879 case ALPHA_FPRM_MINF
:
4881 case ALPHA_FPRM_CHOP
:
4883 case ALPHA_FPRM_DYN
:
4888 case ROUND_SUFFIX_C
:
4894 /* Print an operand. Recognize special options, documented below. */
4897 print_operand (file
, x
, code
)
4907 /* Print the assembler name of the current function. */
4908 assemble_name (file
, alpha_fnname
);
4913 const char *trap
= get_trap_mode_suffix ();
4914 const char *round
= get_round_mode_suffix ();
4917 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
4918 (trap
? trap
: ""), (round
? round
: ""));
4923 /* Generates single precision instruction suffix. */
4924 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
4928 /* Generates double precision instruction suffix. */
4929 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
4933 if (alpha_this_literal_sequence_number
== 0)
4934 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
4935 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
4939 if (alpha_this_gpdisp_sequence_number
== 0)
4940 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
4941 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
4945 if (GET_CODE (x
) == HIGH
)
4946 output_addr_const (file
, XEXP (x
, 0));
4948 output_operand_lossage ("invalid %%H value");
4952 /* If this operand is the constant zero, write it as "$31". */
4953 if (GET_CODE (x
) == REG
)
4954 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
4955 else if (x
== CONST0_RTX (GET_MODE (x
)))
4956 fprintf (file
, "$31");
4958 output_operand_lossage ("invalid %%r value");
4962 /* Similar, but for floating-point. */
4963 if (GET_CODE (x
) == REG
)
4964 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
4965 else if (x
== CONST0_RTX (GET_MODE (x
)))
4966 fprintf (file
, "$f31");
4968 output_operand_lossage ("invalid %%R value");
4972 /* Write the 1's complement of a constant. */
4973 if (GET_CODE (x
) != CONST_INT
)
4974 output_operand_lossage ("invalid %%N value");
4976 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
4980 /* Write 1 << C, for a constant C. */
4981 if (GET_CODE (x
) != CONST_INT
)
4982 output_operand_lossage ("invalid %%P value");
4984 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
4988 /* Write the high-order 16 bits of a constant, sign-extended. */
4989 if (GET_CODE (x
) != CONST_INT
)
4990 output_operand_lossage ("invalid %%h value");
4992 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
4996 /* Write the low-order 16 bits of a constant, sign-extended. */
4997 if (GET_CODE (x
) != CONST_INT
)
4998 output_operand_lossage ("invalid %%L value");
5000 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5001 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5005 /* Write mask for ZAP insn. */
5006 if (GET_CODE (x
) == CONST_DOUBLE
)
5008 HOST_WIDE_INT mask
= 0;
5009 HOST_WIDE_INT value
;
5011 value
= CONST_DOUBLE_LOW (x
);
5012 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5017 value
= CONST_DOUBLE_HIGH (x
);
5018 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5021 mask
|= (1 << (i
+ sizeof (int)));
5023 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5026 else if (GET_CODE (x
) == CONST_INT
)
5028 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5030 for (i
= 0; i
< 8; i
++, value
>>= 8)
5034 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5037 output_operand_lossage ("invalid %%m value");
5041 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5042 if (GET_CODE (x
) != CONST_INT
5043 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5044 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5045 output_operand_lossage ("invalid %%M value");
5047 fprintf (file
, "%s",
5048 (INTVAL (x
) == 8 ? "b"
5049 : INTVAL (x
) == 16 ? "w"
5050 : INTVAL (x
) == 32 ? "l"
5055 /* Similar, except do it from the mask. */
5056 if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xff)
5057 fprintf (file
, "b");
5058 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xffff)
5059 fprintf (file
, "w");
5060 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xffffffff)
5061 fprintf (file
, "l");
5062 #if HOST_BITS_PER_WIDE_INT == 32
5063 else if (GET_CODE (x
) == CONST_DOUBLE
5064 && CONST_DOUBLE_HIGH (x
) == 0
5065 && CONST_DOUBLE_LOW (x
) == -1)
5066 fprintf (file
, "l");
5067 else if (GET_CODE (x
) == CONST_DOUBLE
5068 && CONST_DOUBLE_HIGH (x
) == -1
5069 && CONST_DOUBLE_LOW (x
) == -1)
5070 fprintf (file
, "q");
5072 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == -1)
5073 fprintf (file
, "q");
5074 else if (GET_CODE (x
) == CONST_DOUBLE
5075 && CONST_DOUBLE_HIGH (x
) == 0
5076 && CONST_DOUBLE_LOW (x
) == -1)
5077 fprintf (file
, "q");
5080 output_operand_lossage ("invalid %%U value");
5084 /* Write the constant value divided by 8 for little-endian mode or
5085 (56 - value) / 8 for big-endian mode. */
5087 if (GET_CODE (x
) != CONST_INT
5088 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5091 || (INTVAL (x
) & 7) != 0)
5092 output_operand_lossage ("invalid %%s value");
5094 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5096 ? (56 - INTVAL (x
)) / 8
5101 /* Same, except compute (64 - c) / 8 */
5103 if (GET_CODE (x
) != CONST_INT
5104 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5105 && (INTVAL (x
) & 7) != 8)
5106 output_operand_lossage ("invalid %%s value");
5108 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5113 /* On Unicos/Mk systems: use a DEX expression if the symbol
5114 clashes with a register name. */
5115 int dex
= unicosmk_need_dex (x
);
5117 fprintf (file
, "DEX(%d)", dex
);
5119 output_addr_const (file
, x
);
5123 case 'C': case 'D': case 'c': case 'd':
5124 /* Write out comparison name. */
5126 enum rtx_code c
= GET_CODE (x
);
5128 if (GET_RTX_CLASS (c
) != '<')
5129 output_operand_lossage ("invalid %%C value");
5131 else if (code
== 'D')
5132 c
= reverse_condition (c
);
5133 else if (code
== 'c')
5134 c
= swap_condition (c
);
5135 else if (code
== 'd')
5136 c
= swap_condition (reverse_condition (c
));
5139 fprintf (file
, "ule");
5141 fprintf (file
, "ult");
5142 else if (c
== UNORDERED
)
5143 fprintf (file
, "un");
5145 fprintf (file
, "%s", GET_RTX_NAME (c
));
5150 /* Write the divide or modulus operator. */
5151 switch (GET_CODE (x
))
5154 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5157 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5160 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5163 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5166 output_operand_lossage ("invalid %%E value");
5172 /* Write "_u" for unaligned access. */
5173 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
5174 fprintf (file
, "_u");
5178 if (GET_CODE (x
) == REG
)
5179 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5180 else if (GET_CODE (x
) == MEM
)
5181 output_address (XEXP (x
, 0));
5183 output_addr_const (file
, x
);
5187 output_operand_lossage ("invalid %%xn code");
5192 print_operand_address (file
, addr
)
5197 HOST_WIDE_INT offset
= 0;
5199 if (GET_CODE (addr
) == AND
)
5200 addr
= XEXP (addr
, 0);
5202 if (GET_CODE (addr
) == PLUS
5203 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
5205 offset
= INTVAL (XEXP (addr
, 1));
5206 addr
= XEXP (addr
, 0);
5209 if (GET_CODE (addr
) == LO_SUM
)
5211 output_addr_const (file
, XEXP (addr
, 1));
5215 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
5218 addr
= XEXP (addr
, 0);
5219 if (GET_CODE (addr
) == REG
)
5220 basereg
= REGNO (addr
);
5221 else if (GET_CODE (addr
) == SUBREG
5222 && GET_CODE (SUBREG_REG (addr
)) == REG
)
5223 basereg
= subreg_regno (addr
);
5227 fprintf (file
, "($%d)\t\t!%s", basereg
,
5228 (basereg
== 29 ? "gprel" : "gprellow"));
5232 if (GET_CODE (addr
) == REG
)
5233 basereg
= REGNO (addr
);
5234 else if (GET_CODE (addr
) == SUBREG
5235 && GET_CODE (SUBREG_REG (addr
)) == REG
)
5236 basereg
= subreg_regno (addr
);
5237 else if (GET_CODE (addr
) == CONST_INT
)
5238 offset
= INTVAL (addr
);
5242 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
5243 fprintf (file
, "($%d)", basereg
);
5246 /* Emit RTL insns to initialize the variable parts of a trampoline at
5247 TRAMP. FNADDR is an RTX for the address of the function's pure
5248 code. CXT is an RTX for the static chain value for the function.
5250 The three offset parameters are for the individual template's
5251 layout. A JMPOFS < 0 indicates that the trampoline does not
5252 contain instructions at all.
5254 We assume here that a function will be called many more times than
5255 its address is taken (e.g., it might be passed to qsort), so we
5256 take the trouble to initialize the "hint" field in the JMP insn.
5257 Note that the hint field is PC (new) + 4 * bits 13:0. */
5260 alpha_initialize_trampoline (tramp
, fnaddr
, cxt
, fnofs
, cxtofs
, jmpofs
)
5261 rtx tramp
, fnaddr
, cxt
;
5262 int fnofs
, cxtofs
, jmpofs
;
5264 rtx temp
, temp1
, addr
;
5265 /* VMS really uses DImode pointers in memory at this point. */
5266 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
5268 #ifdef POINTERS_EXTEND_UNSIGNED
5269 fnaddr
= convert_memory_address (mode
, fnaddr
);
5270 cxt
= convert_memory_address (mode
, cxt
);
5273 /* Store function address and CXT. */
5274 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
5275 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
5276 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
5277 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
5279 /* This has been disabled since the hint only has a 32k range, and in
5280 no existing OS is the stack within 32k of the text segment. */
5281 if (0 && jmpofs
>= 0)
5283 /* Compute hint value. */
5284 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
5285 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
5287 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
5288 build_int_2 (2, 0), NULL_RTX
, 1);
5289 temp
= expand_and (gen_lowpart (SImode
, temp
), GEN_INT (0x3fff), 0);
5291 /* Merge in the hint. */
5292 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
5293 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
5294 temp1
= expand_and (temp1
, GEN_INT (0xffffc000), NULL_RTX
);
5295 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
5297 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
5300 #ifdef TRANSFER_FROM_TRAMPOLINE
5301 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
5302 0, VOIDmode
, 1, addr
, Pmode
);
5306 emit_insn (gen_imb ());
5309 /* Determine where to put an argument to a function.
5310 Value is zero to push the argument on the stack,
5311 or a hard register in which to store the argument.
5313 MODE is the argument's machine mode.
5314 TYPE is the data type of the argument (as a tree).
5315 This is null for libcalls where that information may
5317 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5318 the preceding args and about the function being called.
5319 NAMED is nonzero if this argument is a named parameter
5320 (otherwise it is an extra parameter matching an ellipsis).
5322 On Alpha the first 6 words of args are normally in registers
5323 and the rest are pushed. */
5326 function_arg (cum
, mode
, type
, named
)
5327 CUMULATIVE_ARGS cum
;
5328 enum machine_mode mode
;
5330 int named ATTRIBUTE_UNUSED
;
5335 /* Set up defaults for FP operands passed in FP registers, and
5336 integral operands passed in integer registers. */
5338 && (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5339 || GET_MODE_CLASS (mode
) == MODE_FLOAT
))
5344 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5345 the three platforms, so we can't avoid conditional compilation. */
5346 #if TARGET_ABI_OPEN_VMS
5348 if (mode
== VOIDmode
)
5349 return alpha_arg_info_reg_val (cum
);
5351 num_args
= cum
.num_args
;
5352 if (num_args
>= 6 || MUST_PASS_IN_STACK (mode
, type
))
5356 #if TARGET_ABI_UNICOSMK
5360 /* If this is the last argument, generate the call info word (CIW). */
5361 /* ??? We don't include the caller's line number in the CIW because
5362 I don't know how to determine it if debug infos are turned off. */
5363 if (mode
== VOIDmode
)
5372 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
5373 if (cum
.reg_args_type
[i
])
5374 lo
|= (1 << (7 - i
));
5376 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
5379 lo
|= cum
.num_reg_words
;
5381 #if HOST_BITS_PER_WIDE_INT == 32
5382 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
5384 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
5385 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
5388 ciw
= immed_double_const (lo
, hi
, DImode
);
5390 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
5391 UNSPEC_UMK_LOAD_CIW
);
5394 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
5395 num_args
= cum
.num_reg_words
;
5396 if (MUST_PASS_IN_STACK (mode
, type
)
5397 || cum
.num_reg_words
+ size
> 6 || cum
.force_stack
)
5399 else if (type
&& TYPE_MODE (type
) == BLKmode
)
5403 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
5404 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
5406 /* The argument fits in two registers. Note that we still need to
5407 reserve a register for empty structures. */
5411 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
5414 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
5415 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
5416 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
5426 /* VOID is passed as a special flag for "last argument". */
5427 if (type
== void_type_node
)
5429 else if (MUST_PASS_IN_STACK (mode
, type
))
5431 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum
, mode
, type
, named
))
5434 #endif /* TARGET_ABI_UNICOSMK */
5435 #endif /* TARGET_ABI_OPEN_VMS */
5437 return gen_rtx_REG (mode
, num_args
+ basereg
);
5441 alpha_build_va_list ()
5443 tree base
, ofs
, record
, type_decl
;
5445 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5446 return ptr_type_node
;
5448 record
= make_lang_type (RECORD_TYPE
);
5449 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5450 TREE_CHAIN (record
) = type_decl
;
5451 TYPE_NAME (record
) = type_decl
;
5453 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5455 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
5457 DECL_FIELD_CONTEXT (ofs
) = record
;
5459 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
5461 DECL_FIELD_CONTEXT (base
) = record
;
5462 TREE_CHAIN (base
) = ofs
;
5464 TYPE_FIELDS (record
) = base
;
5465 layout_type (record
);
5471 alpha_va_start (stdarg_p
, valist
, nextarg
)
5474 rtx nextarg ATTRIBUTE_UNUSED
;
5476 HOST_WIDE_INT offset
;
5477 tree t
, offset_field
, base_field
;
5479 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
5482 if (TARGET_ABI_UNICOSMK
)
5483 std_expand_builtin_va_start (stdarg_p
, valist
, nextarg
);
5485 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
5486 up by 48, storing fp arg registers in the first 48 bytes, and the
5487 integer arg registers in the next 48 bytes. This is only done,
5488 however, if any integer registers need to be stored.
5490 If no integer registers need be stored, then we must subtract 48
5491 in order to account for the integer arg registers which are counted
5492 in argsize above, but which are not actually stored on the stack. */
5494 if (NUM_ARGS
<= 5 + stdarg_p
)
5495 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
5497 offset
= -6 * UNITS_PER_WORD
;
5499 if (TARGET_ABI_OPEN_VMS
)
5501 nextarg
= plus_constant (nextarg
, offset
);
5502 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
5503 t
= build (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
5504 make_tree (ptr_type_node
, nextarg
));
5505 TREE_SIDE_EFFECTS (t
) = 1;
5507 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5511 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
5512 offset_field
= TREE_CHAIN (base_field
);
5514 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
5515 valist
, base_field
);
5516 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
5517 valist
, offset_field
);
5519 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
5520 t
= build (PLUS_EXPR
, ptr_type_node
, t
, build_int_2 (offset
, 0));
5521 t
= build (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
5522 TREE_SIDE_EFFECTS (t
) = 1;
5523 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5525 t
= build_int_2 (NUM_ARGS
* UNITS_PER_WORD
, 0);
5526 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
5527 TREE_SIDE_EFFECTS (t
) = 1;
5528 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5533 alpha_va_arg (valist
, type
)
5536 HOST_WIDE_INT tsize
;
5539 tree offset_field
, base_field
, addr_tree
, addend
;
5540 tree wide_type
, wide_ofs
;
5543 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5544 return std_expand_builtin_va_arg (valist
, type
);
5546 tsize
= ((TREE_INT_CST_LOW (TYPE_SIZE (type
)) / BITS_PER_UNIT
+ 7) / 8) * 8;
5548 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
5549 offset_field
= TREE_CHAIN (base_field
);
5551 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
5552 valist
, base_field
);
5553 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
5554 valist
, offset_field
);
5556 wide_type
= make_signed_type (64);
5557 wide_ofs
= save_expr (build1 (CONVERT_EXPR
, wide_type
, offset_field
));
5561 if (TYPE_MODE (type
) == TFmode
|| TYPE_MODE (type
) == TCmode
)
5564 tsize
= UNITS_PER_WORD
;
5566 else if (FLOAT_TYPE_P (type
))
5568 tree fpaddend
, cond
;
5570 fpaddend
= fold (build (PLUS_EXPR
, TREE_TYPE (addend
),
5571 addend
, build_int_2 (-6*8, 0)));
5573 cond
= fold (build (LT_EXPR
, integer_type_node
,
5574 wide_ofs
, build_int_2 (6*8, 0)));
5576 addend
= fold (build (COND_EXPR
, TREE_TYPE (addend
), cond
,
5580 addr_tree
= build (PLUS_EXPR
, TREE_TYPE (base_field
),
5581 base_field
, addend
);
5583 addr
= expand_expr (addr_tree
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
5584 addr
= copy_to_reg (addr
);
5586 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
5587 build (PLUS_EXPR
, TREE_TYPE (offset_field
),
5588 offset_field
, build_int_2 (tsize
, 0)));
5589 TREE_SIDE_EFFECTS (t
) = 1;
5590 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5594 addr
= force_reg (Pmode
, addr
);
5595 addr
= gen_rtx_MEM (Pmode
, addr
);
5601 /* This page contains routines that are used to determine what the function
5602 prologue and epilogue code will do and write them out. */
5604 /* Compute the size of the save area in the stack. */
5606 /* These variables are used for communication between the following functions.
5607 They indicate various things about the current function being compiled
5608 that are used to tell what kind of prologue, epilogue and procedure
5609 descriptior to generate. */
5611 /* Nonzero if we need a stack procedure. */
5612 static int alpha_is_stack_procedure
;
5614 /* Register number (either FP or SP) that is used to unwind the frame. */
5615 static int vms_unwind_regno
;
5617 /* Register number used to save FP. We need not have one for RA since
5618 we don't modify it for register procedures. This is only defined
5619 for register frame procedures. */
5620 static int vms_save_fp_regno
;
5622 /* Register number used to reference objects off our PV. */
5623 static int vms_base_regno
;
5625 /* Compute register masks for saved registers. */
5628 alpha_sa_mask (imaskP
, fmaskP
)
5629 unsigned long *imaskP
;
5630 unsigned long *fmaskP
;
5632 unsigned long imask
= 0;
5633 unsigned long fmask
= 0;
5636 #ifdef ASM_OUTPUT_MI_THUNK
5637 if (!current_function_is_thunk
)
5640 if (TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
5641 imask
|= (1L << HARD_FRAME_POINTER_REGNUM
);
5643 /* One for every register we have to save. */
5644 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5645 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5646 && regs_ever_live
[i
] && i
!= REG_RA
5647 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
5652 fmask
|= (1L << (i
- 32));
5655 /* We need to restore these for the handler. */
5656 if (current_function_calls_eh_return
)
5660 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
5661 if (regno
== INVALID_REGNUM
)
5663 imask
|= 1L << regno
;
5667 if (!TARGET_ABI_UNICOSMK
)
5669 /* If any register spilled, then spill the return address also. */
5670 /* ??? This is required by the Digital stack unwind specification
5671 and isn't needed if we're doing Dwarf2 unwinding. */
5672 if (imask
|| fmask
|| alpha_ra_ever_killed ())
5673 imask
|= (1L << REG_RA
);
5687 #ifdef ASM_OUTPUT_MI_THUNK
5688 if (current_function_is_thunk
)
5693 if (TARGET_ABI_UNICOSMK
)
5695 for (i
= 9; i
< 15 && sa_size
== 0; i
++)
5696 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5697 && regs_ever_live
[i
])
5699 for (i
= 32 + 2; i
< 32 + 10 && sa_size
== 0; i
++)
5700 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5701 && regs_ever_live
[i
])
5706 /* One for every register we have to save. */
5707 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5708 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5709 && regs_ever_live
[i
] && i
!= REG_RA
)
5714 if (TARGET_ABI_UNICOSMK
)
5716 /* We might not need to generate a frame if we don't make any calls
5717 (including calls to __T3E_MISMATCH if this is a vararg function),
5718 don't have any local variables which require stack slots, don't
5719 use alloca and have not determined that we need a frame for other
5722 alpha_is_stack_procedure
= sa_size
!= 0
5723 || alpha_ra_ever_killed ()
5724 || get_frame_size() != 0
5725 || current_function_outgoing_args_size
5726 || current_function_varargs
5727 || current_function_stdarg
5728 || current_function_calls_alloca
5729 || frame_pointer_needed
;
5731 /* Always reserve space for saving callee-saved registers if we
5732 need a frame as required by the calling convention. */
5733 if (alpha_is_stack_procedure
)
5736 else if (TARGET_ABI_OPEN_VMS
)
5738 /* Start by assuming we can use a register procedure if we don't
5739 make any calls (REG_RA not used) or need to save any
5740 registers and a stack procedure if we do. */
5741 alpha_is_stack_procedure
= sa_size
!= 0 || alpha_ra_ever_killed ();
5743 /* Decide whether to refer to objects off our PV via FP or PV.
5744 If we need FP for something else or if we receive a nonlocal
5745 goto (which expects PV to contain the value), we must use PV.
5746 Otherwise, start by assuming we can use FP. */
5747 vms_base_regno
= (frame_pointer_needed
5748 || current_function_has_nonlocal_label
5749 || alpha_is_stack_procedure
5750 || current_function_outgoing_args_size
5751 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
);
5753 /* If we want to copy PV into FP, we need to find some register
5754 in which to save FP. */
5756 vms_save_fp_regno
= -1;
5757 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
5758 for (i
= 0; i
< 32; i
++)
5759 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
5760 vms_save_fp_regno
= i
;
5762 if (vms_save_fp_regno
== -1)
5763 vms_base_regno
= REG_PV
, alpha_is_stack_procedure
= 1;
5765 /* Stack unwinding should be done via FP unless we use it for PV. */
5766 vms_unwind_regno
= (vms_base_regno
== REG_PV
5767 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
5769 /* If this is a stack procedure, allow space for saving FP and RA. */
5770 if (alpha_is_stack_procedure
)
5775 /* If some registers were saved but not RA, RA must also be saved,
5776 so leave space for it. */
5777 if (!TARGET_ABI_UNICOSMK
&& (sa_size
!= 0 || alpha_ra_ever_killed ()))
5780 /* Our size must be even (multiple of 16 bytes). */
5789 alpha_pv_save_size ()
5792 return alpha_is_stack_procedure
? 8 : 0;
5799 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
5802 #if TARGET_ABI_OPEN_VMS
5804 const struct attribute_spec vms_attribute_table
[] =
5806 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
5807 { "overlaid", 0, 0, true, false, false, NULL
},
5808 { NULL
, 0, 0, false, false, false, NULL
}
5814 find_lo_sum (px
, data
)
5816 void *data ATTRIBUTE_UNUSED
;
5818 return GET_CODE (*px
) == LO_SUM
;
5822 alpha_does_function_need_gp ()
5826 /* The GP being variable is an OSF abi thing. */
5827 if (! TARGET_ABI_OSF
)
5830 if (TARGET_PROFILING_NEEDS_GP
&& profile_flag
)
5833 #ifdef ASM_OUTPUT_MI_THUNK
5834 if (current_function_is_thunk
)
5838 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
5839 Even if we are a static function, we still need to do this in case
5840 our address is taken and passed to something like qsort. */
5842 push_topmost_sequence ();
5843 insn
= get_insns ();
5844 pop_topmost_sequence ();
5846 for (; insn
; insn
= NEXT_INSN (insn
))
5848 && GET_CODE (PATTERN (insn
)) != USE
5849 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
5851 enum attr_type type
= get_attr_type (insn
);
5852 if (type
== TYPE_LDSYM
|| type
== TYPE_JSR
)
5854 if (TARGET_EXPLICIT_RELOCS
5855 && for_each_rtx (&PATTERN (insn
), find_lo_sum
, NULL
) > 0)
5862 /* Write a version stamp. Don't write anything if we are running as a
5863 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
5870 alpha_write_verstamp (file
)
5871 FILE *file ATTRIBUTE_UNUSED
;
5874 fprintf (file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
5878 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
5882 set_frame_related_p ()
5884 rtx seq
= gen_sequence ();
5887 if (GET_CODE (seq
) == SEQUENCE
)
5889 int i
= XVECLEN (seq
, 0);
5891 RTX_FRAME_RELATED_P (XVECEXP (seq
, 0, i
)) = 1;
5892 return emit_insn (seq
);
5896 seq
= emit_insn (seq
);
5897 RTX_FRAME_RELATED_P (seq
) = 1;
5902 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
5904 /* Write function prologue. */
5906 /* On vms we have two kinds of functions:
5908 - stack frame (PROC_STACK)
5909 these are 'normal' functions with local vars and which are
5910 calling other functions
5911 - register frame (PROC_REGISTER)
5912 keeps all data in registers, needs no stack
5914 We must pass this to the assembler so it can generate the
5915 proper pdsc (procedure descriptor)
5916 This is done with the '.pdesc' command.
5918 On not-vms, we don't really differentiate between the two, as we can
5919 simply allocate stack without saving registers. */
5922 alpha_expand_prologue ()
5924 /* Registers to save. */
5925 unsigned long imask
= 0;
5926 unsigned long fmask
= 0;
5927 /* Stack space needed for pushing registers clobbered by us. */
5928 HOST_WIDE_INT sa_size
;
5929 /* Complete stack size needed. */
5930 HOST_WIDE_INT frame_size
;
5931 /* Offset from base reg to register save area. */
5932 HOST_WIDE_INT reg_offset
;
5936 sa_size
= alpha_sa_size ();
5938 frame_size
= get_frame_size ();
5939 if (TARGET_ABI_OPEN_VMS
)
5940 frame_size
= ALPHA_ROUND (sa_size
5941 + (alpha_is_stack_procedure
? 8 : 0)
5943 + current_function_pretend_args_size
);
5944 else if (TARGET_ABI_UNICOSMK
)
5945 /* We have to allocate space for the DSIB if we generate a frame. */
5946 frame_size
= ALPHA_ROUND (sa_size
5947 + (alpha_is_stack_procedure
? 48 : 0))
5948 + ALPHA_ROUND (frame_size
5949 + current_function_outgoing_args_size
);
5951 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
5953 + ALPHA_ROUND (frame_size
5954 + current_function_pretend_args_size
));
5956 if (TARGET_ABI_OPEN_VMS
)
5959 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
5961 alpha_sa_mask (&imask
, &fmask
);
5963 /* Emit an insn to reload GP, if needed. */
5966 alpha_function_needs_gp
= alpha_does_function_need_gp ();
5967 if (alpha_function_needs_gp
)
5968 emit_insn (gen_prologue_ldgp ());
5971 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
5972 the call to mcount ourselves, rather than having the linker do it
5973 magically in response to -pg. Since _mcount has special linkage,
5974 don't represent the call as a call. */
5975 if (TARGET_PROFILING_NEEDS_GP
&& profile_flag
)
5976 emit_insn (gen_prologue_mcount ());
5978 if (TARGET_ABI_UNICOSMK
)
5979 unicosmk_gen_dsib (&imask
);
5981 /* Adjust the stack by the frame size. If the frame size is > 4096
5982 bytes, we need to be sure we probe somewhere in the first and last
5983 4096 bytes (we can probably get away without the latter test) and
5984 every 8192 bytes in between. If the frame size is > 32768, we
5985 do this in a loop. Otherwise, we generate the explicit probe
5988 Note that we are only allowed to adjust sp once in the prologue. */
5990 if (frame_size
<= 32768)
5992 if (frame_size
> 4096)
5997 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
6000 while ((probed
+= 8192) < frame_size
);
6002 /* We only have to do this probe if we aren't saving registers. */
6003 if (sa_size
== 0 && probed
+ 4096 < frame_size
)
6004 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
6007 if (frame_size
!= 0)
6008 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
6009 GEN_INT (TARGET_ABI_UNICOSMK
6015 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
6016 number of 8192 byte blocks to probe. We then probe each block
6017 in the loop and then set SP to the proper location. If the
6018 amount remaining is > 4096, we have to do one more probe if we
6019 are not saving any registers. */
6021 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
6022 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
6023 rtx ptr
= gen_rtx_REG (DImode
, 22);
6024 rtx count
= gen_rtx_REG (DImode
, 23);
6027 emit_move_insn (count
, GEN_INT (blocks
));
6028 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
6029 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
6031 /* Because of the difficulty in emitting a new basic block this
6032 late in the compilation, generate the loop as a single insn. */
6033 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
6035 if (leftover
> 4096 && sa_size
== 0)
6037 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
6038 MEM_VOLATILE_P (last
) = 1;
6039 emit_move_insn (last
, const0_rtx
);
6042 if (TARGET_ABI_WINDOWS_NT
)
6044 /* For NT stack unwind (done by 'reverse execution'), it's
6045 not OK to take the result of a loop, even though the value
6046 is already in ptr, so we reload it via a single operation
6047 and subtract it to sp.
6049 Yes, that's correct -- we have to reload the whole constant
6050 into a temporary via ldah+lda then subtract from sp. To
6051 ensure we get ldah+lda, we use a special pattern. */
6053 HOST_WIDE_INT lo
, hi
;
6054 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
6055 hi
= frame_size
- lo
;
6057 emit_move_insn (ptr
, GEN_INT (hi
));
6058 emit_insn (gen_nt_lda (ptr
, GEN_INT (lo
)));
6059 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
6064 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
6065 GEN_INT (-leftover
)));
6068 /* This alternative is special, because the DWARF code cannot
6069 possibly intuit through the loop above. So we invent this
6070 note it looks at instead. */
6071 RTX_FRAME_RELATED_P (seq
) = 1;
6073 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
6074 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
6075 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
6076 GEN_INT (TARGET_ABI_UNICOSMK
6082 if (!TARGET_ABI_UNICOSMK
)
6084 /* Cope with very large offsets to the register save area. */
6085 sa_reg
= stack_pointer_rtx
;
6086 if (reg_offset
+ sa_size
> 0x8000)
6088 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
6091 if (low
+ sa_size
<= 0x8000)
6092 bias
= reg_offset
- low
, reg_offset
= low
;
6094 bias
= reg_offset
, reg_offset
= 0;
6096 sa_reg
= gen_rtx_REG (DImode
, 24);
6097 FRP (emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
,
6101 /* Save regs in stack order. Beginning with VMS PV. */
6102 if (TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
6104 mem
= gen_rtx_MEM (DImode
, stack_pointer_rtx
);
6105 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6106 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_PV
)));
6109 /* Save register RA next. */
6110 if (imask
& (1L << REG_RA
))
6112 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6113 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6114 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
6115 imask
&= ~(1L << REG_RA
);
6119 /* Now save any other registers required to be saved. */
6120 for (i
= 0; i
< 32; i
++)
6121 if (imask
& (1L << i
))
6123 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6124 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6125 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
6129 for (i
= 0; i
< 32; i
++)
6130 if (fmask
& (1L << i
))
6132 mem
= gen_rtx_MEM (DFmode
, plus_constant (sa_reg
, reg_offset
));
6133 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6134 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
6138 else if (TARGET_ABI_UNICOSMK
&& alpha_is_stack_procedure
)
6140 /* The standard frame on the T3E includes space for saving registers.
6141 We just have to use it. We don't have to save the return address and
6142 the old frame pointer here - they are saved in the DSIB. */
6145 for (i
= 9; i
< 15; i
++)
6146 if (imask
& (1L << i
))
6148 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
6150 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6151 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
6154 for (i
= 2; i
< 10; i
++)
6155 if (fmask
& (1L << i
))
6157 mem
= gen_rtx_MEM (DFmode
, plus_constant (hard_frame_pointer_rtx
,
6159 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6160 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
6165 if (TARGET_ABI_OPEN_VMS
)
6167 if (!alpha_is_stack_procedure
)
6168 /* Register frame procedures save the fp. */
6169 /* ??? Ought to have a dwarf2 save for this. */
6170 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
6171 hard_frame_pointer_rtx
);
6173 if (vms_base_regno
!= REG_PV
)
6174 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
6175 gen_rtx_REG (DImode
, REG_PV
)));
6177 if (vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
6178 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6180 /* If we have to allocate space for outgoing args, do it now. */
6181 if (current_function_outgoing_args_size
!= 0)
6184 plus_constant (hard_frame_pointer_rtx
,
6186 (current_function_outgoing_args_size
)))));
6188 else if (!TARGET_ABI_UNICOSMK
)
6190 /* If we need a frame pointer, set it from the stack pointer. */
6191 if (frame_pointer_needed
)
6193 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
6194 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6196 /* This must always be the last instruction in the
6197 prologue, thus we emit a special move + clobber. */
6198 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
6199 stack_pointer_rtx
, sa_reg
)));
6203 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
6204 the prologue, for exception handling reasons, we cannot do this for
6205 any insn that might fault. We could prevent this for mems with a
6206 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
6207 have to prevent all such scheduling with a blockage.
6209 Linux, on the other hand, never bothered to implement OSF/1's
6210 exception handling, and so doesn't care about such things. Anyone
6211 planning to use dwarf2 frame-unwind info can also omit the blockage. */
6213 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
6214 emit_insn (gen_blockage ());
6217 /* Output the textual info surrounding the prologue. */
6220 alpha_start_function (file
, fnname
, decl
)
6223 tree decl ATTRIBUTE_UNUSED
;
6225 unsigned long imask
= 0;
6226 unsigned long fmask
= 0;
6227 /* Stack space needed for pushing registers clobbered by us. */
6228 HOST_WIDE_INT sa_size
;
6229 /* Complete stack size needed. */
6230 HOST_WIDE_INT frame_size
;
6231 /* Offset from base reg to register save area. */
6232 HOST_WIDE_INT reg_offset
;
6233 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
6236 /* Don't emit an extern directive for functions defined in the same file. */
6237 if (TARGET_ABI_UNICOSMK
)
6240 name_tree
= get_identifier (fnname
);
6241 TREE_ASM_WRITTEN (name_tree
) = 1;
6244 alpha_fnname
= fnname
;
6245 sa_size
= alpha_sa_size ();
6247 frame_size
= get_frame_size ();
6248 if (TARGET_ABI_OPEN_VMS
)
6249 frame_size
= ALPHA_ROUND (sa_size
6250 + (alpha_is_stack_procedure
? 8 : 0)
6252 + current_function_pretend_args_size
);
6253 else if (TARGET_ABI_UNICOSMK
)
6254 frame_size
= ALPHA_ROUND (sa_size
6255 + (alpha_is_stack_procedure
? 48 : 0))
6256 + ALPHA_ROUND (frame_size
6257 + current_function_outgoing_args_size
);
6259 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6261 + ALPHA_ROUND (frame_size
6262 + current_function_pretend_args_size
));
6264 if (TARGET_ABI_OPEN_VMS
)
6267 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6269 alpha_sa_mask (&imask
, &fmask
);
6271 /* Ecoff can handle multiple .file directives, so put out file and lineno.
6272 We have to do that before the .ent directive as we cannot switch
6273 files within procedures with native ecoff because line numbers are
6274 linked to procedure descriptors.
6275 Outputting the lineno helps debugging of one line functions as they
6276 would otherwise get no line number at all. Please note that we would
6277 like to put out last_linenum from final.c, but it is not accessible. */
6279 if (write_symbols
== SDB_DEBUG
)
6281 #ifdef ASM_OUTPUT_SOURCE_FILENAME
6282 ASM_OUTPUT_SOURCE_FILENAME (file
,
6283 DECL_SOURCE_FILE (current_function_decl
));
6285 #ifdef ASM_OUTPUT_SOURCE_LINE
6286 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
6287 ASM_OUTPUT_SOURCE_LINE (file
,
6288 DECL_SOURCE_LINE (current_function_decl
));
6292 /* Issue function start and label. */
6293 if (TARGET_ABI_OPEN_VMS
6294 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
6296 fputs ("\t.ent ", file
);
6297 assemble_name (file
, fnname
);
6300 /* If the function needs GP, we'll write the "..ng" label there.
6301 Otherwise, do it here. */
6302 if (TARGET_ABI_OSF
&& ! alpha_function_needs_gp
)
6305 assemble_name (file
, fnname
);
6306 fputs ("..ng:\n", file
);
6310 strcpy (entry_label
, fnname
);
6311 if (TARGET_ABI_OPEN_VMS
)
6312 strcat (entry_label
, "..en");
6314 /* For public functions, the label must be globalized by appending an
6315 additional colon. */
6316 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
6317 strcat (entry_label
, ":");
6319 ASM_OUTPUT_LABEL (file
, entry_label
);
6320 inside_function
= TRUE
;
6322 if (TARGET_ABI_OPEN_VMS
)
6323 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
6325 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
6326 && !flag_inhibit_size_directive
)
6328 /* Set flags in procedure descriptor to request IEEE-conformant
6329 math-library routines. The value we set it to is PDSC_EXC_IEEE
6330 (/usr/include/pdsc.h). */
6331 fputs ("\t.eflag 48\n", file
);
6334 /* Set up offsets to alpha virtual arg/local debugging pointer. */
6335 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
6336 alpha_arg_offset
= -frame_size
+ 48;
6338 /* Describe our frame. If the frame size is larger than an integer,
6339 print it as zero to avoid an assembler error. We won't be
6340 properly describing such a frame, but that's the best we can do. */
6341 if (TARGET_ABI_UNICOSMK
)
6343 else if (TARGET_ABI_OPEN_VMS
)
6345 fprintf (file
, "\t.frame $%d,", vms_unwind_regno
);
6346 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6347 frame_size
>= ((HOST_WIDE_INT
) 1 << 31) ? 0 : frame_size
);
6348 fputs (",$26,", file
);
6349 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, reg_offset
);
6352 else if (!flag_inhibit_size_directive
)
6354 fprintf (file
, "\t.frame $%d,",
6355 (frame_pointer_needed
6356 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
));
6357 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6358 frame_size
>= (1l << 31) ? 0 : frame_size
);
6359 fprintf (file
, ",$26,%d\n", current_function_pretend_args_size
);
6362 /* Describe which registers were spilled. */
6363 if (TARGET_ABI_UNICOSMK
)
6365 else if (TARGET_ABI_OPEN_VMS
)
6368 /* ??? Does VMS care if mask contains ra? The old code didn't
6369 set it, so I don't here. */
6370 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1L << REG_RA
));
6372 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
6373 if (!alpha_is_stack_procedure
)
6374 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
6376 else if (!flag_inhibit_size_directive
)
6380 fprintf (file
, "\t.mask 0x%lx,", imask
);
6381 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6382 frame_size
>= (1l << 31) ? 0 : reg_offset
- frame_size
);
6385 for (i
= 0; i
< 32; ++i
)
6386 if (imask
& (1L << i
))
6392 fprintf (file
, "\t.fmask 0x%lx,", fmask
);
6393 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6394 frame_size
>= (1l << 31) ? 0 : reg_offset
- frame_size
);
6399 #if TARGET_ABI_OPEN_VMS
6400 /* Ifdef'ed cause readonly_section and link_section are only
6402 readonly_section ();
6403 fprintf (file
, "\t.align 3\n");
6404 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
6405 fputs ("\t.ascii \"", file
);
6406 assemble_name (file
, fnname
);
6407 fputs ("\\0\"\n", file
);
6410 fprintf (file
, "\t.align 3\n");
6411 fputs ("\t.name ", file
);
6412 assemble_name (file
, fnname
);
6413 fputs ("..na\n", file
);
6414 ASM_OUTPUT_LABEL (file
, fnname
);
6415 fprintf (file
, "\t.pdesc ");
6416 assemble_name (file
, fnname
);
6417 fprintf (file
, "..en,%s\n", alpha_is_stack_procedure
? "stack" : "reg");
6418 alpha_need_linkage (fnname
, 1);
6423 /* Emit the .prologue note at the scheduled end of the prologue. */
6426 alpha_output_function_end_prologue (file
)
6429 if (TARGET_ABI_UNICOSMK
)
6431 else if (TARGET_ABI_OPEN_VMS
)
6432 fputs ("\t.prologue\n", file
);
6433 else if (TARGET_ABI_WINDOWS_NT
)
6434 fputs ("\t.prologue 0\n", file
);
6435 else if (!flag_inhibit_size_directive
)
6436 fprintf (file
, "\t.prologue %d\n", alpha_function_needs_gp
);
6439 /* Write function epilogue. */
6441 /* ??? At some point we will want to support full unwind, and so will
6442 need to mark the epilogue as well. At the moment, we just confuse
6445 #define FRP(exp) exp
6448 alpha_expand_epilogue ()
6450 /* Registers to save. */
6451 unsigned long imask
= 0;
6452 unsigned long fmask
= 0;
6453 /* Stack space needed for pushing registers clobbered by us. */
6454 HOST_WIDE_INT sa_size
;
6455 /* Complete stack size needed. */
6456 HOST_WIDE_INT frame_size
;
6457 /* Offset from base reg to register save area. */
6458 HOST_WIDE_INT reg_offset
;
6459 int fp_is_frame_pointer
, fp_offset
;
6460 rtx sa_reg
, sa_reg_exp
= NULL
;
6461 rtx sp_adj1
, sp_adj2
, mem
;
6465 sa_size
= alpha_sa_size ();
6467 frame_size
= get_frame_size ();
6468 if (TARGET_ABI_OPEN_VMS
)
6469 frame_size
= ALPHA_ROUND (sa_size
6470 + (alpha_is_stack_procedure
? 8 : 0)
6472 + current_function_pretend_args_size
);
6473 else if (TARGET_ABI_UNICOSMK
)
6474 frame_size
= ALPHA_ROUND (sa_size
6475 + (alpha_is_stack_procedure
? 48 : 0))
6476 + ALPHA_ROUND (frame_size
6477 + current_function_outgoing_args_size
);
6479 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6481 + ALPHA_ROUND (frame_size
6482 + current_function_pretend_args_size
));
6484 if (TARGET_ABI_OPEN_VMS
)
6487 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6489 alpha_sa_mask (&imask
, &fmask
);
6491 fp_is_frame_pointer
= ((TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
6492 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
6494 sa_reg
= stack_pointer_rtx
;
6496 if (current_function_calls_eh_return
)
6497 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
6501 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
6503 /* If we have a frame pointer, restore SP from it. */
6504 if ((TARGET_ABI_OPEN_VMS
6505 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
6506 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
6507 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
6509 /* Cope with very large offsets to the register save area. */
6510 if (reg_offset
+ sa_size
> 0x8000)
6512 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
6515 if (low
+ sa_size
<= 0x8000)
6516 bias
= reg_offset
- low
, reg_offset
= low
;
6518 bias
= reg_offset
, reg_offset
= 0;
6520 sa_reg
= gen_rtx_REG (DImode
, 22);
6521 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
6523 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
6526 /* Restore registers in order, excepting a true frame pointer. */
6528 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6530 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6531 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
6534 imask
&= ~(1L << REG_RA
);
6536 for (i
= 0; i
< 32; ++i
)
6537 if (imask
& (1L << i
))
6539 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
6540 fp_offset
= reg_offset
;
6543 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
6544 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6545 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
6550 for (i
= 0; i
< 32; ++i
)
6551 if (fmask
& (1L << i
))
6553 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
6554 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6555 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
6559 else if (TARGET_ABI_UNICOSMK
&& alpha_is_stack_procedure
)
6561 /* Restore callee-saved general-purpose registers. */
6565 for (i
= 9; i
< 15; i
++)
6566 if (imask
& (1L << i
))
6568 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
6570 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6571 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
6575 for (i
= 2; i
< 10; i
++)
6576 if (fmask
& (1L << i
))
6578 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
6580 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6581 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
6585 /* Restore the return address from the DSIB. */
6587 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
6588 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6589 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
6592 if (frame_size
|| eh_ofs
)
6594 sp_adj1
= stack_pointer_rtx
;
6598 sp_adj1
= gen_rtx_REG (DImode
, 23);
6599 emit_move_insn (sp_adj1
,
6600 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
6603 /* If the stack size is large, begin computation into a temporary
6604 register so as not to interfere with a potential fp restore,
6605 which must be consecutive with an SP restore. */
6606 if (frame_size
< 32768
6607 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
6608 sp_adj2
= GEN_INT (frame_size
);
6609 else if (TARGET_ABI_UNICOSMK
)
6611 sp_adj1
= gen_rtx_REG (DImode
, 23);
6612 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
6613 sp_adj2
= const0_rtx
;
6615 else if (frame_size
< 0x40007fffL
)
6617 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
6619 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
6620 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
6624 sp_adj1
= gen_rtx_REG (DImode
, 23);
6625 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
6627 sp_adj2
= GEN_INT (low
);
6631 rtx tmp
= gen_rtx_REG (DImode
, 23);
6632 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3));
6635 /* We can't drop new things to memory this late, afaik,
6636 so build it up by pieces. */
6637 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
6638 -(frame_size
< 0)));
6644 /* From now on, things must be in order. So emit blockages. */
6646 /* Restore the frame pointer. */
6647 if (TARGET_ABI_UNICOSMK
)
6649 emit_insn (gen_blockage ());
6650 mem
= gen_rtx_MEM (DImode
,
6651 plus_constant (hard_frame_pointer_rtx
, -16));
6652 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6653 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
6655 else if (fp_is_frame_pointer
)
6657 emit_insn (gen_blockage ());
6658 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
6659 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6660 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
6662 else if (TARGET_ABI_OPEN_VMS
)
6664 emit_insn (gen_blockage ());
6665 FRP (emit_move_insn (hard_frame_pointer_rtx
,
6666 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
6669 /* Restore the stack pointer. */
6670 emit_insn (gen_blockage ());
6671 if (sp_adj2
== const0_rtx
)
6672 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
6674 FRP (emit_move_insn (stack_pointer_rtx
,
6675 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
6679 if (TARGET_ABI_OPEN_VMS
&& !alpha_is_stack_procedure
)
6681 emit_insn (gen_blockage ());
6682 FRP (emit_move_insn (hard_frame_pointer_rtx
,
6683 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
6685 else if (TARGET_ABI_UNICOSMK
&& !alpha_is_stack_procedure
)
6687 /* Decrement the frame pointer if the function does not have a
6690 emit_insn (gen_blockage ());
6691 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
6692 hard_frame_pointer_rtx
, GEN_INT (-1))));
6697 /* Output the rest of the textual info surrounding the epilogue. */
6700 alpha_end_function (file
, fnname
, decl
)
6703 tree decl ATTRIBUTE_UNUSED
;
6705 /* End the function. */
6706 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
6708 fputs ("\t.end ", file
);
6709 assemble_name (file
, fnname
);
6712 inside_function
= FALSE
;
6714 /* Show that we know this function if it is called again.
6716 Don't do this for global functions in object files destined for a
6717 shared library because the function may be overridden by the application
6718 or other libraries. Similarly, don't do this for weak functions.
6720 Don't do this for functions not defined in the .text section, as
6721 otherwise it's not unlikely that the destination is out of range
6722 for a direct branch. */
6724 if (!DECL_WEAK (current_function_decl
)
6725 && (!flag_pic
|| !TREE_PUBLIC (current_function_decl
))
6726 && decl_in_text_section (current_function_decl
))
6727 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl
), 0)) = 1;
6729 /* Output jump tables and the static subroutine information block. */
6730 if (TARGET_ABI_UNICOSMK
)
6732 unicosmk_output_ssib (file
, fnname
);
6733 unicosmk_output_deferred_case_vectors (file
);
6737 /* Debugging support. */
6741 /* Count the number of sdb related labels are generated (to find block
6742 start and end boundaries). */
6744 int sdb_label_count
= 0;
6746 /* Next label # for each statement. */
6748 static int sym_lineno
= 0;
6750 /* Count the number of .file directives, so that .loc is up to date. */
6752 static int num_source_filenames
= 0;
6754 /* Name of the file containing the current function. */
6756 static const char *current_function_file
= "";
6758 /* Offsets to alpha virtual arg/local debugging pointers. */
6760 long alpha_arg_offset
;
6761 long alpha_auto_offset
;
6763 /* Emit a new filename to a stream. */
6766 alpha_output_filename (stream
, name
)
6770 static int first_time
= TRUE
;
6771 char ltext_label_name
[100];
6776 ++num_source_filenames
;
6777 current_function_file
= name
;
6778 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6779 output_quoted_string (stream
, name
);
6780 fprintf (stream
, "\n");
6781 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
6782 fprintf (stream
, "\t#@stabs\n");
6785 else if (write_symbols
== DBX_DEBUG
)
6787 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name
, "Ltext", 0);
6788 fprintf (stream
, "%s", ASM_STABS_OP
);
6789 output_quoted_string (stream
, name
);
6790 fprintf (stream
, ",%d,0,0,%s\n", N_SOL
, <ext_label_name
[1]);
6793 else if (name
!= current_function_file
6794 && strcmp (name
, current_function_file
) != 0)
6796 if (inside_function
&& ! TARGET_GAS
)
6797 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
6800 ++num_source_filenames
;
6801 current_function_file
= name
;
6802 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6805 output_quoted_string (stream
, name
);
6806 fprintf (stream
, "\n");
6810 /* Emit a linenumber to a stream. */
6813 alpha_output_lineno (stream
, line
)
6817 if (write_symbols
== DBX_DEBUG
)
6819 /* mips-tfile doesn't understand .stabd directives. */
6821 fprintf (stream
, "$LM%d:\n%s%d,0,%d,$LM%d\n",
6822 sym_lineno
, ASM_STABN_OP
, N_SLINE
, line
, sym_lineno
);
6825 fprintf (stream
, "\n\t.loc\t%d %d\n", num_source_filenames
, line
);
6828 /* Structure to show the current status of registers and memory. */
6830 struct shadow_summary
6833 unsigned int i
: 31; /* Mask of int regs */
6834 unsigned int fp
: 31; /* Mask of fp regs */
6835 unsigned int mem
: 1; /* mem == imem | fpmem */
6839 static void summarize_insn
PARAMS ((rtx
, struct shadow_summary
*, int));
6840 static void alpha_handle_trap_shadows
PARAMS ((rtx
));
6842 /* Summary the effects of expression X on the machine. Update SUM, a pointer
6843 to the summary structure. SET is nonzero if the insn is setting the
6844 object, otherwise zero. */
6847 summarize_insn (x
, sum
, set
)
6849 struct shadow_summary
*sum
;
6852 const char *format_ptr
;
6858 switch (GET_CODE (x
))
6860 /* ??? Note that this case would be incorrect if the Alpha had a
6861 ZERO_EXTRACT in SET_DEST. */
6863 summarize_insn (SET_SRC (x
), sum
, 0);
6864 summarize_insn (SET_DEST (x
), sum
, 1);
6868 summarize_insn (XEXP (x
, 0), sum
, 1);
6872 summarize_insn (XEXP (x
, 0), sum
, 0);
6876 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
6877 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
6881 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
6882 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
6886 summarize_insn (SUBREG_REG (x
), sum
, 0);
6891 int regno
= REGNO (x
);
6892 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
6894 if (regno
== 31 || regno
== 63)
6900 sum
->defd
.i
|= mask
;
6902 sum
->defd
.fp
|= mask
;
6907 sum
->used
.i
|= mask
;
6909 sum
->used
.fp
|= mask
;
6920 /* Find the regs used in memory address computation: */
6921 summarize_insn (XEXP (x
, 0), sum
, 0);
6924 case CONST_INT
: case CONST_DOUBLE
:
6925 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
6926 case SCRATCH
: case ASM_INPUT
:
6929 /* Handle common unary and binary ops for efficiency. */
6930 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
6931 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
6932 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
6933 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
6934 case NE
: case EQ
: case GE
: case GT
: case LE
:
6935 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
6936 summarize_insn (XEXP (x
, 0), sum
, 0);
6937 summarize_insn (XEXP (x
, 1), sum
, 0);
6940 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
6941 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
6942 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
6943 case SQRT
: case FFS
:
6944 summarize_insn (XEXP (x
, 0), sum
, 0);
6948 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
6949 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6950 switch (format_ptr
[i
])
6953 summarize_insn (XEXP (x
, i
), sum
, 0);
6957 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6958 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
6970 /* Ensure a sufficient number of `trapb' insns are in the code when
6971 the user requests code with a trap precision of functions or
6974 In naive mode, when the user requests a trap-precision of
6975 "instruction", a trapb is needed after every instruction that may
6976 generate a trap. This ensures that the code is resumption safe but
6979 When optimizations are turned on, we delay issuing a trapb as long
6980 as possible. In this context, a trap shadow is the sequence of
6981 instructions that starts with a (potentially) trap generating
6982 instruction and extends to the next trapb or call_pal instruction
6983 (but GCC never generates call_pal by itself). We can delay (and
6984 therefore sometimes omit) a trapb subject to the following
6987 (a) On entry to the trap shadow, if any Alpha register or memory
6988 location contains a value that is used as an operand value by some
6989 instruction in the trap shadow (live on entry), then no instruction
6990 in the trap shadow may modify the register or memory location.
6992 (b) Within the trap shadow, the computation of the base register
6993 for a memory load or store instruction may not involve using the
6994 result of an instruction that might generate an UNPREDICTABLE
6997 (c) Within the trap shadow, no register may be used more than once
6998 as a destination register. (This is to make life easier for the
7001 (d) The trap shadow may not include any branch instructions. */
7004 alpha_handle_trap_shadows (insns
)
7007 struct shadow_summary shadow
;
7008 int trap_pending
, exception_nesting
;
7012 exception_nesting
= 0;
7015 shadow
.used
.mem
= 0;
7016 shadow
.defd
= shadow
.used
;
7018 for (i
= insns
; i
; i
= NEXT_INSN (i
))
7020 if (GET_CODE (i
) == NOTE
)
7022 switch (NOTE_LINE_NUMBER (i
))
7024 case NOTE_INSN_EH_REGION_BEG
:
7025 exception_nesting
++;
7030 case NOTE_INSN_EH_REGION_END
:
7031 exception_nesting
--;
7036 case NOTE_INSN_EPILOGUE_BEG
:
7037 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
7042 else if (trap_pending
)
7044 if (alpha_tp
== ALPHA_TP_FUNC
)
7046 if (GET_CODE (i
) == JUMP_INSN
7047 && GET_CODE (PATTERN (i
)) == RETURN
)
7050 else if (alpha_tp
== ALPHA_TP_INSN
)
7054 struct shadow_summary sum
;
7059 sum
.defd
= sum
.used
;
7061 switch (GET_CODE (i
))
7064 /* Annoyingly, get_attr_trap will abort on these. */
7065 if (GET_CODE (PATTERN (i
)) == USE
7066 || GET_CODE (PATTERN (i
)) == CLOBBER
)
7069 summarize_insn (PATTERN (i
), &sum
, 0);
7071 if ((sum
.defd
.i
& shadow
.defd
.i
)
7072 || (sum
.defd
.fp
& shadow
.defd
.fp
))
7074 /* (c) would be violated */
7078 /* Combine shadow with summary of current insn: */
7079 shadow
.used
.i
|= sum
.used
.i
;
7080 shadow
.used
.fp
|= sum
.used
.fp
;
7081 shadow
.used
.mem
|= sum
.used
.mem
;
7082 shadow
.defd
.i
|= sum
.defd
.i
;
7083 shadow
.defd
.fp
|= sum
.defd
.fp
;
7084 shadow
.defd
.mem
|= sum
.defd
.mem
;
7086 if ((sum
.defd
.i
& shadow
.used
.i
)
7087 || (sum
.defd
.fp
& shadow
.used
.fp
)
7088 || (sum
.defd
.mem
& shadow
.used
.mem
))
7090 /* (a) would be violated (also takes care of (b)) */
7091 if (get_attr_trap (i
) == TRAP_YES
7092 && ((sum
.defd
.i
& sum
.used
.i
)
7093 || (sum
.defd
.fp
& sum
.used
.fp
)))
7112 n
= emit_insn_before (gen_trapb (), i
);
7113 PUT_MODE (n
, TImode
);
7114 PUT_MODE (i
, TImode
);
7118 shadow
.used
.mem
= 0;
7119 shadow
.defd
= shadow
.used
;
7124 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
7125 && GET_CODE (i
) == INSN
7126 && GET_CODE (PATTERN (i
)) != USE
7127 && GET_CODE (PATTERN (i
)) != CLOBBER
7128 && get_attr_trap (i
) == TRAP_YES
)
7130 if (optimize
&& !trap_pending
)
7131 summarize_insn (PATTERN (i
), &shadow
, 0);
7137 /* Alpha can only issue instruction groups simultaneously if they are
7138 suitibly aligned. This is very processor-specific. */
7140 enum alphaev4_pipe
{
7147 enum alphaev5_pipe
{
7158 static enum alphaev4_pipe alphaev4_insn_pipe
PARAMS ((rtx
));
7159 static enum alphaev5_pipe alphaev5_insn_pipe
PARAMS ((rtx
));
7160 static rtx alphaev4_next_group
PARAMS ((rtx
, int *, int *));
7161 static rtx alphaev5_next_group
PARAMS ((rtx
, int *, int *));
7162 static rtx alphaev4_next_nop
PARAMS ((int *));
7163 static rtx alphaev5_next_nop
PARAMS ((int *));
7165 static void alpha_align_insns
7166 PARAMS ((rtx
, unsigned int, rtx (*)(rtx
, int *, int *), rtx (*)(int *)));
7168 static enum alphaev4_pipe
7169 alphaev4_insn_pipe (insn
)
7172 if (recog_memoized (insn
) < 0)
7174 if (get_attr_length (insn
) != 4)
7177 switch (get_attr_type (insn
))
7210 static enum alphaev5_pipe
7211 alphaev5_insn_pipe (insn
)
7214 if (recog_memoized (insn
) < 0)
7216 if (get_attr_length (insn
) != 4)
7219 switch (get_attr_type (insn
))
7259 /* IN_USE is a mask of the slots currently filled within the insn group.
7260 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
7261 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
7263 LEN is, of course, the length of the group in bytes. */
7266 alphaev4_next_group (insn
, pin_use
, plen
)
7268 int *pin_use
, *plen
;
7275 || GET_CODE (PATTERN (insn
)) == CLOBBER
7276 || GET_CODE (PATTERN (insn
)) == USE
)
7281 enum alphaev4_pipe pipe
;
7283 pipe
= alphaev4_insn_pipe (insn
);
7287 /* Force complex instructions to start new groups. */
7291 /* If this is a completely unrecognized insn, its an asm.
7292 We don't know how long it is, so record length as -1 to
7293 signal a needed realignment. */
7294 if (recog_memoized (insn
) < 0)
7297 len
= get_attr_length (insn
);
7301 if (in_use
& EV4_IB0
)
7303 if (in_use
& EV4_IB1
)
7308 in_use
|= EV4_IB0
| EV4_IBX
;
7312 if (in_use
& EV4_IB0
)
7314 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
7322 if (in_use
& EV4_IB1
)
7332 /* Haifa doesn't do well scheduling branches. */
7333 if (GET_CODE (insn
) == JUMP_INSN
)
7337 insn
= next_nonnote_insn (insn
);
7339 if (!insn
|| ! INSN_P (insn
))
7342 /* Let Haifa tell us where it thinks insn group boundaries are. */
7343 if (GET_MODE (insn
) == TImode
)
7346 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
7351 insn
= next_nonnote_insn (insn
);
7359 /* IN_USE is a mask of the slots currently filled within the insn group.
7360 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
7361 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
7363 LEN is, of course, the length of the group in bytes. */
7366 alphaev5_next_group (insn
, pin_use
, plen
)
7368 int *pin_use
, *plen
;
7375 || GET_CODE (PATTERN (insn
)) == CLOBBER
7376 || GET_CODE (PATTERN (insn
)) == USE
)
7381 enum alphaev5_pipe pipe
;
7383 pipe
= alphaev5_insn_pipe (insn
);
7387 /* Force complex instructions to start new groups. */
7391 /* If this is a completely unrecognized insn, its an asm.
7392 We don't know how long it is, so record length as -1 to
7393 signal a needed realignment. */
7394 if (recog_memoized (insn
) < 0)
7397 len
= get_attr_length (insn
);
7400 /* ??? Most of the places below, we would like to abort, as
7401 it would indicate an error either in Haifa, or in the
7402 scheduling description. Unfortunately, Haifa never
7403 schedules the last instruction of the BB, so we don't
7404 have an accurate TI bit to go off. */
7406 if (in_use
& EV5_E0
)
7408 if (in_use
& EV5_E1
)
7413 in_use
|= EV5_E0
| EV5_E01
;
7417 if (in_use
& EV5_E0
)
7419 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
7427 if (in_use
& EV5_E1
)
7433 if (in_use
& EV5_FA
)
7435 if (in_use
& EV5_FM
)
7440 in_use
|= EV5_FA
| EV5_FAM
;
7444 if (in_use
& EV5_FA
)
7450 if (in_use
& EV5_FM
)
7463 /* Haifa doesn't do well scheduling branches. */
7464 /* ??? If this is predicted not-taken, slotting continues, except
7465 that no more IBR, FBR, or JSR insns may be slotted. */
7466 if (GET_CODE (insn
) == JUMP_INSN
)
7470 insn
= next_nonnote_insn (insn
);
7472 if (!insn
|| ! INSN_P (insn
))
7475 /* Let Haifa tell us where it thinks insn group boundaries are. */
7476 if (GET_MODE (insn
) == TImode
)
7479 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
7484 insn
= next_nonnote_insn (insn
);
7493 alphaev4_next_nop (pin_use
)
7496 int in_use
= *pin_use
;
7499 if (!(in_use
& EV4_IB0
))
7504 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
7509 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
7522 alphaev5_next_nop (pin_use
)
7525 int in_use
= *pin_use
;
7528 if (!(in_use
& EV5_E1
))
7533 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
7538 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
7550 /* The instruction group alignment main loop. */
7553 alpha_align_insns (insns
, max_align
, next_group
, next_nop
)
7555 unsigned int max_align
;
7556 rtx (*next_group
) PARAMS ((rtx
, int *, int *));
7557 rtx (*next_nop
) PARAMS ((int *));
7559 /* ALIGN is the known alignment for the insn group. */
7561 /* OFS is the offset of the current insn in the insn group. */
7563 int prev_in_use
, in_use
, len
;
7566 /* Let shorten branches care for assigning alignments to code labels. */
7567 shorten_branches (insns
);
7569 if (align_functions
< 4)
7571 else if (align_functions
< max_align
)
7572 align
= align_functions
;
7576 ofs
= prev_in_use
= 0;
7578 if (GET_CODE (i
) == NOTE
)
7579 i
= next_nonnote_insn (i
);
7583 next
= (*next_group
) (i
, &in_use
, &len
);
7585 /* When we see a label, resync alignment etc. */
7586 if (GET_CODE (i
) == CODE_LABEL
)
7588 unsigned int new_align
= 1 << label_to_alignment (i
);
7590 if (new_align
>= align
)
7592 align
= new_align
< max_align
? new_align
: max_align
;
7596 else if (ofs
& (new_align
-1))
7597 ofs
= (ofs
| (new_align
-1)) + 1;
7602 /* Handle complex instructions special. */
7603 else if (in_use
== 0)
7605 /* Asms will have length < 0. This is a signal that we have
7606 lost alignment knowledge. Assume, however, that the asm
7607 will not mis-align instructions. */
7616 /* If the known alignment is smaller than the recognized insn group,
7617 realign the output. */
7618 else if ((int) align
< len
)
7620 unsigned int new_log_align
= len
> 8 ? 4 : 3;
7623 where
= prev
= prev_nonnote_insn (i
);
7624 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
7627 /* Can't realign between a call and its gp reload. */
7628 if (! (TARGET_EXPLICIT_RELOCS
7629 && prev
&& GET_CODE (prev
) == CALL_INSN
))
7631 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
7632 align
= 1 << new_log_align
;
7637 /* If the group won't fit in the same INT16 as the previous,
7638 we need to add padding to keep the group together. Rather
7639 than simply leaving the insn filling to the assembler, we
7640 can make use of the knowledge of what sorts of instructions
7641 were issued in the previous group to make sure that all of
7642 the added nops are really free. */
7643 else if (ofs
+ len
> (int) align
)
7645 int nop_count
= (align
- ofs
) / 4;
7648 /* Insert nops before labels, branches, and calls to truely merge
7649 the execution of the nops with the previous instruction group. */
7650 where
= prev_nonnote_insn (i
);
7653 if (GET_CODE (where
) == CODE_LABEL
)
7655 rtx where2
= prev_nonnote_insn (where
);
7656 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
7659 else if (GET_CODE (where
) == INSN
)
7666 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
7667 while (--nop_count
);
7671 ofs
= (ofs
+ len
) & (align
- 1);
7672 prev_in_use
= in_use
;
7677 /* Machine dependent reorg pass. */
7683 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
7684 alpha_handle_trap_shadows (insns
);
7686 /* Due to the number of extra trapb insns, don't bother fixing up
7687 alignment when trap precision is instruction. Moreover, we can
7688 only do our job when sched2 is run. */
7689 if (optimize
&& !optimize_size
7690 && alpha_tp
!= ALPHA_TP_INSN
7691 && flag_schedule_insns_after_reload
)
7693 if (alpha_cpu
== PROCESSOR_EV4
)
7694 alpha_align_insns (insns
, 8, alphaev4_next_group
, alphaev4_next_nop
);
7695 else if (alpha_cpu
== PROCESSOR_EV5
)
7696 alpha_align_insns (insns
, 16, alphaev5_next_group
, alphaev5_next_nop
);
7700 /* Check a floating-point value for validity for a particular machine mode. */
7702 static const char * const float_strings
[] =
7704 /* These are for FLOAT_VAX. */
7705 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
7706 "-1.70141173319264430e+38",
7707 "2.93873587705571877e-39", /* 2^-128 */
7708 "-2.93873587705571877e-39",
7709 /* These are for the default broken IEEE mode, which traps
7710 on infinity or denormal numbers. */
7711 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
7712 "-3.402823466385288598117e+38",
7713 "1.1754943508222875079687e-38", /* 2^-126 */
7714 "-1.1754943508222875079687e-38",
7717 static REAL_VALUE_TYPE float_values
[8];
7718 static int inited_float_values
= 0;
7721 check_float_value (mode
, d
, overflow
)
7722 enum machine_mode mode
;
7724 int overflow ATTRIBUTE_UNUSED
;
7727 if (TARGET_IEEE
|| TARGET_IEEE_CONFORMANT
|| TARGET_IEEE_WITH_INEXACT
)
7730 if (inited_float_values
== 0)
7733 for (i
= 0; i
< 8; i
++)
7734 float_values
[i
] = REAL_VALUE_ATOF (float_strings
[i
], DFmode
);
7736 inited_float_values
= 1;
7742 REAL_VALUE_TYPE
*fvptr
;
7744 if (TARGET_FLOAT_VAX
)
7745 fvptr
= &float_values
[0];
7747 fvptr
= &float_values
[4];
7749 memcpy (&r
, d
, sizeof (REAL_VALUE_TYPE
));
7750 if (REAL_VALUES_LESS (fvptr
[0], r
))
7752 memcpy (d
, &fvptr
[0], sizeof (REAL_VALUE_TYPE
));
7755 else if (REAL_VALUES_LESS (r
, fvptr
[1]))
7757 memcpy (d
, &fvptr
[1], sizeof (REAL_VALUE_TYPE
));
7760 else if (REAL_VALUES_LESS (dconst0
, r
)
7761 && REAL_VALUES_LESS (r
, fvptr
[2]))
7763 memcpy (d
, &dconst0
, sizeof (REAL_VALUE_TYPE
));
7766 else if (REAL_VALUES_LESS (r
, dconst0
)
7767 && REAL_VALUES_LESS (fvptr
[3], r
))
7769 memcpy (d
, &dconst0
, sizeof (REAL_VALUE_TYPE
));
7777 #if TARGET_ABI_OPEN_VMS
7779 /* Return the VMS argument type corresponding to MODE. */
7782 alpha_arg_type (mode
)
7783 enum machine_mode mode
;
7788 return TARGET_FLOAT_VAX
? FF
: FS
;
7790 return TARGET_FLOAT_VAX
? FD
: FT
;
7796 /* Return an rtx for an integer representing the VMS Argument Information
7800 alpha_arg_info_reg_val (cum
)
7801 CUMULATIVE_ARGS cum
;
7803 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
7806 for (i
= 0; i
< 6; i
++)
7807 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
7809 return GEN_INT (regval
);
7812 #include <splay-tree.h>
7814 /* Structure to collect function names for final output
7817 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
7822 enum links_kind kind
;
7825 static splay_tree alpha_links
;
7827 static int mark_alpha_links_node
PARAMS ((splay_tree_node
, void *));
7828 static void mark_alpha_links
PARAMS ((void *));
7829 static int alpha_write_one_linkage
PARAMS ((splay_tree_node
, void *));
7831 /* Protect alpha_links from garbage collection. */
7834 mark_alpha_links_node (node
, data
)
7835 splay_tree_node node
;
7836 void *data ATTRIBUTE_UNUSED
;
7838 struct alpha_links
*links
= (struct alpha_links
*) node
->value
;
7839 ggc_mark_rtx (links
->linkage
);
7844 mark_alpha_links (ptr
)
7847 splay_tree tree
= *(splay_tree
*) ptr
;
7848 splay_tree_foreach (tree
, mark_alpha_links_node
, NULL
);
7851 /* Make (or fake) .linkage entry for function call.
7853 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
7855 Return an SYMBOL_REF rtx for the linkage. */
7858 alpha_need_linkage (name
, is_local
)
7862 splay_tree_node node
;
7863 struct alpha_links
*al
;
7870 /* Is this name already defined? */
7872 node
= splay_tree_lookup (alpha_links
, (splay_tree_key
) name
);
7875 al
= (struct alpha_links
*) node
->value
;
7878 /* Defined here but external assumed. */
7879 if (al
->kind
== KIND_EXTERN
)
7880 al
->kind
= KIND_LOCAL
;
7884 /* Used here but unused assumed. */
7885 if (al
->kind
== KIND_UNUSED
)
7886 al
->kind
= KIND_LOCAL
;
7893 alpha_links
= splay_tree_new ((splay_tree_compare_fn
) strcmp
,
7894 (splay_tree_delete_key_fn
) free
,
7895 (splay_tree_delete_key_fn
) free
);
7896 ggc_add_root (&alpha_links
, 1, 1, mark_alpha_links
);
7899 al
= (struct alpha_links
*) xmalloc (sizeof (struct alpha_links
));
7900 name
= xstrdup (name
);
7902 /* Assume external if no definition. */
7903 al
->kind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
7905 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
7906 get_identifier (name
);
7908 /* Construct a SYMBOL_REF for us to call. */
7910 size_t name_len
= strlen (name
);
7911 char *linksym
= alloca (name_len
+ 6);
7913 memcpy (linksym
+ 1, name
, name_len
);
7914 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
7915 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
7916 ggc_alloc_string (linksym
, name_len
+ 5));
7919 splay_tree_insert (alpha_links
, (splay_tree_key
) name
,
7920 (splay_tree_value
) al
);
7926 alpha_write_one_linkage (node
, data
)
7927 splay_tree_node node
;
7930 const char *const name
= (const char *) node
->key
;
7931 struct alpha_links
*links
= (struct alpha_links
*) node
->value
;
7932 FILE *stream
= (FILE *) data
;
7934 if (links
->kind
== KIND_UNUSED
7935 || ! TREE_SYMBOL_REFERENCED (get_identifier (name
)))
7938 fprintf (stream
, "$%s..lk:\n", name
);
7939 if (links
->kind
== KIND_LOCAL
)
7941 /* Local and used, build linkage pair. */
7942 fprintf (stream
, "\t.quad %s..en\n", name
);
7943 fprintf (stream
, "\t.quad %s\n", name
);
7947 /* External and used, request linkage pair. */
7948 fprintf (stream
, "\t.linkage %s\n", name
);
7955 alpha_write_linkage (stream
)
7958 readonly_section ();
7959 fprintf (stream
, "\t.align 3\n");
7960 splay_tree_foreach (alpha_links
, alpha_write_one_linkage
, stream
);
7963 /* Given a decl, a section name, and whether the decl initializer
7964 has relocs, choose attributes for the section. */
7966 #define SECTION_VMS_OVERLAY SECTION_FORGET
7969 vms_section_type_flags (decl
, name
, reloc
)
7974 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
7976 if (decl
&& DECL_ATTRIBUTES (decl
)
7977 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
7978 flags
|= SECTION_VMS_OVERLAY
;
7983 /* Switch to an arbitrary section NAME with attributes as specified
7984 by FLAGS. ALIGN specifies any known alignment requirements for
7985 the section; 0 if the default should be used. */
7988 vms_asm_named_section (name
, flags
)
7992 const char *flag_str
= "";
7994 if (flags
& SECTION_VMS_OVERLAY
)
7996 else if (flags
& SECTION_DEBUG
)
7997 flag_str
= ",NOWRT";
7999 fprintf (asm_out_file
, ".section\t%s%s\n", name
, flag_str
);
8002 /* Record an element in the table of global constructors. SYMBOL is
8003 a SYMBOL_REF of the function to be called; PRIORITY is a number
8004 between 0 and MAX_INIT_PRIORITY.
8006 Differs from default_ctors_section_asm_out_constructor in that the
8007 width of the .ctors entry is always 64 bits, rather than the 32 bits
8008 used by a normal pointer. */
8011 vms_asm_out_constructor (symbol
, priority
)
8013 int priority ATTRIBUTE_UNUSED
;
8016 assemble_align (BITS_PER_WORD
);
8017 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
8021 vms_asm_out_destructor (symbol
, priority
)
8023 int priority ATTRIBUTE_UNUSED
;
8026 assemble_align (BITS_PER_WORD
);
8027 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
8032 alpha_need_linkage (name
, is_local
)
8033 const char *name ATTRIBUTE_UNUSED
;
8034 int is_local ATTRIBUTE_UNUSED
;
8039 #endif /* TARGET_ABI_OPEN_VMS */
8041 #if TARGET_ABI_UNICOSMK
8043 static void unicosmk_output_module_name
PARAMS ((FILE *));
8044 static void unicosmk_output_default_externs
PARAMS ((FILE *));
8045 static void unicosmk_output_dex
PARAMS ((FILE *));
8046 static void unicosmk_output_externs
PARAMS ((FILE *));
8047 static void unicosmk_output_addr_vec
PARAMS ((FILE *, rtx
));
8048 static const char *unicosmk_ssib_name
PARAMS ((void));
8049 static int unicosmk_special_name
PARAMS ((const char *));
8051 /* Define the offset between two registers, one to be eliminated, and the
8052 other its replacement, at the start of a routine. */
8055 unicosmk_initial_elimination_offset (from
, to
)
8061 fixed_size
= alpha_sa_size();
8062 if (fixed_size
!= 0)
8065 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8067 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8069 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8070 return (ALPHA_ROUND (current_function_outgoing_args_size
)
8071 + ALPHA_ROUND (get_frame_size()));
8072 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8073 return (ALPHA_ROUND (fixed_size
)
8074 + ALPHA_ROUND (get_frame_size()
8075 + current_function_outgoing_args_size
));
8080 /* Output the module name for .ident and .end directives. We have to strip
8081 directories and add make sure that the module name starts with a letter
8085 unicosmk_output_module_name (file
)
8090 /* Strip directories. */
8092 name
= strrchr (main_input_filename
, '/');
8096 name
= main_input_filename
;
8098 /* CAM only accepts module names that start with a letter or '$'. We
8099 prefix the module name with a '$' if necessary. */
8101 if (!ISALPHA (*name
))
8102 fprintf (file
, "$%s", name
);
8107 /* Output text that to appear at the beginning of an assembler file. */
8110 unicosmk_asm_file_start (file
)
8115 fputs ("\t.ident\t", file
);
8116 unicosmk_output_module_name (file
);
8117 fputs ("\n\n", file
);
8119 /* The Unicos/Mk assembler uses different register names. Instead of trying
8120 to support them, we simply use micro definitions. */
8122 /* CAM has different register names: rN for the integer register N and fN
8123 for the floating-point register N. Instead of trying to use these in
8124 alpha.md, we define the symbols $N and $fN to refer to the appropriate
8127 for (i
= 0; i
< 32; ++i
)
8128 fprintf (file
, "$%d <- r%d\n", i
, i
);
8130 for (i
= 0; i
< 32; ++i
)
8131 fprintf (file
, "$f%d <- f%d\n", i
, i
);
8135 /* The .align directive fill unused space with zeroes which does not work
8136 in code sections. We define the macro 'gcc@code@align' which uses nops
8137 instead. Note that it assumes that code sections always have the
8138 biggest possible alignment since . refers to the current offset from
8139 the beginning of the section. */
8141 fputs ("\t.macro gcc@code@align n\n", file
);
8142 fputs ("gcc@n@bytes = 1 << n\n", file
);
8143 fputs ("gcc@here = . % gcc@n@bytes\n", file
);
8144 fputs ("\t.if ne, gcc@here, 0\n", file
);
8145 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file
);
8146 fputs ("\tbis r31,r31,r31\n", file
);
8147 fputs ("\t.endr\n", file
);
8148 fputs ("\t.endif\n", file
);
8149 fputs ("\t.endm gcc@code@align\n\n", file
);
8151 /* Output extern declarations which should always be visible. */
8152 unicosmk_output_default_externs (file
);
8154 /* Open a dummy section. We always need to be inside a section for the
8155 section-switching code to work correctly.
8156 ??? This should be a module id or something like that. I still have to
8157 figure out what the rules for those are. */
8158 fputs ("\n\t.psect\t$SG00000,data\n", file
);
8161 /* Output text to appear at the end of an assembler file. This includes all
8162 pending extern declarations and DEX expressions. */
8165 unicosmk_asm_file_end (file
)
8168 fputs ("\t.endp\n\n", file
);
8170 /* Output all pending externs. */
8172 unicosmk_output_externs (file
);
8174 /* Output dex definitions used for functions whose names conflict with
8177 unicosmk_output_dex (file
);
8179 fputs ("\t.end\t", file
);
8180 unicosmk_output_module_name (file
);
8184 /* Output the definition of a common variable. */
8187 unicosmk_output_common (file
, name
, size
, align
)
8194 printf ("T3E__: common %s\n", name
);
8197 fputs("\t.endp\n\n\t.psect ", file
);
8198 assemble_name(file
, name
);
8199 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
8200 fprintf(file
, "\t.byte\t0:%d\n", size
);
8202 /* Mark the symbol as defined in this module. */
8203 name_tree
= get_identifier (name
);
8204 TREE_ASM_WRITTEN (name_tree
) = 1;
8207 #define SECTION_PUBLIC SECTION_MACH_DEP
8208 #define SECTION_MAIN (SECTION_PUBLIC << 1)
8209 static int current_section_align
;
8212 unicosmk_section_type_flags (decl
, name
, reloc
)
8215 int reloc ATTRIBUTE_UNUSED
;
8217 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
8222 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8224 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
8225 if (align_functions_log
> current_section_align
)
8226 current_section_align
= align_functions_log
;
8228 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
8229 flags
|= SECTION_MAIN
;
8232 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
8234 if (TREE_PUBLIC (decl
))
8235 flags
|= SECTION_PUBLIC
;
8240 /* Generate a section name for decl and associate it with the
8244 unicosmk_unique_section (decl
, reloc
)
8246 int reloc ATTRIBUTE_UNUSED
;
8254 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
8255 STRIP_NAME_ENCODING (name
, name
);
8256 len
= strlen (name
);
8258 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8262 /* It is essential that we prefix the section name here because
8263 otherwise the section names generated for constructors and
8264 destructors confuse collect2. */
8266 string
= alloca (len
+ 6);
8267 sprintf (string
, "code@%s", name
);
8268 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
8270 else if (TREE_PUBLIC (decl
))
8271 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
8276 string
= alloca (len
+ 6);
8277 sprintf (string
, "data@%s", name
);
8278 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
8282 /* Switch to an arbitrary section NAME with attributes as specified
8283 by FLAGS. ALIGN specifies any known alignment requirements for
8284 the section; 0 if the default should be used. */
8287 unicosmk_asm_named_section (name
, flags
)
8293 /* Close the previous section. */
8295 fputs ("\t.endp\n\n", asm_out_file
);
8297 /* Find out what kind of section we are opening. */
8299 if (flags
& SECTION_MAIN
)
8300 fputs ("\t.start\tmain\n", asm_out_file
);
8302 if (flags
& SECTION_CODE
)
8304 else if (flags
& SECTION_PUBLIC
)
8309 if (current_section_align
!= 0)
8310 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
8311 current_section_align
, kind
);
8313 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
8317 unicosmk_insert_attributes (decl
, attr_ptr
)
8319 tree
*attr_ptr ATTRIBUTE_UNUSED
;
8322 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
8323 UNIQUE_SECTION (decl
, 0);
8326 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
8327 in code sections because .align fill unused space with zeroes. */
8330 unicosmk_output_align (file
, align
)
8334 if (inside_function
)
8335 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
8337 fprintf (file
, "\t.align\t%d\n", align
);
8340 /* Add a case vector to the current function's list of deferred case
8341 vectors. Case vectors have to be put into a separate section because CAM
8342 does not allow data definitions in code sections. */
8345 unicosmk_defer_case_vector (lab
, vec
)
8349 struct machine_function
*machine
= cfun
->machine
;
8351 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
8352 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
8353 machine
->addr_list
);
8356 /* Output a case vector. */
8359 unicosmk_output_addr_vec (file
, vec
)
8363 rtx lab
= XEXP (vec
, 0);
8364 rtx body
= XEXP (vec
, 1);
8365 int vlen
= XVECLEN (body
, 0);
8368 ASM_OUTPUT_INTERNAL_LABEL (file
, "L", CODE_LABEL_NUMBER (lab
));
8370 for (idx
= 0; idx
< vlen
; idx
++)
8372 ASM_OUTPUT_ADDR_VEC_ELT
8373 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
8377 /* Output current function's deferred case vectors. */
8380 unicosmk_output_deferred_case_vectors (file
)
8383 struct machine_function
*machine
= cfun
->machine
;
8386 if (machine
->addr_list
== NULL_RTX
)
8390 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
8391 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
8394 /* Set up the dynamic subprogram information block (DSIB) and update the
8395 frame pointer register ($15) for subroutines which have a frame. If the
8396 subroutine doesn't have a frame, simply increment $15. */
8399 unicosmk_gen_dsib (imaskP
)
8400 unsigned long * imaskP
;
8402 if (alpha_is_stack_procedure
)
8404 const char *ssib_name
;
8407 /* Allocate 64 bytes for the DSIB. */
8409 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
8411 emit_insn (gen_blockage ());
8413 /* Save the return address. */
8415 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
8416 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8417 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
8418 (*imaskP
) &= ~(1L << REG_RA
);
8420 /* Save the old frame pointer. */
8422 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
8423 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8424 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
8425 (*imaskP
) &= ~(1L << HARD_FRAME_POINTER_REGNUM
);
8427 emit_insn (gen_blockage ());
8429 /* Store the SSIB pointer. */
8431 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
8432 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
8433 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8435 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
8436 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
8437 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
8439 /* Save the CIW index. */
8441 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
8442 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8443 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
8445 emit_insn (gen_blockage ());
8447 /* Set the new frame pointer. */
8449 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8450 stack_pointer_rtx
, GEN_INT (64))));
8455 /* Increment the frame pointer register to indicate that we do not
8458 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8459 hard_frame_pointer_rtx
, GEN_INT (1))));
8463 #define SSIB_PREFIX "__SSIB_"
8464 #define SSIB_PREFIX_LEN 7
8466 /* Generate the name of the SSIB section for the current function. */
8469 unicosmk_ssib_name ()
8471 /* This is ok since CAM won't be able to deal with names longer than that
8474 static char name
[256];
8480 x
= DECL_RTL (cfun
->decl
);
8481 if (GET_CODE (x
) != MEM
)
8484 if (GET_CODE (x
) != SYMBOL_REF
)
8486 fnname
= XSTR (x
, 0);
8487 STRIP_NAME_ENCODING (fnname
, fnname
);
8489 len
= strlen (fnname
);
8490 if (len
+ SSIB_PREFIX_LEN
> 255)
8491 len
= 255 - SSIB_PREFIX_LEN
;
8493 strcpy (name
, SSIB_PREFIX
);
8494 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
8495 name
[len
+ SSIB_PREFIX_LEN
] = 0;
8500 /* Output the static subroutine information block for the current
8504 unicosmk_output_ssib (file
, fnname
)
8512 struct machine_function
*machine
= cfun
->machine
;
8515 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
8516 unicosmk_ssib_name ());
8518 /* Some required stuff and the function name length. */
8520 len
= strlen (fnname
);
8521 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
8524 ??? We don't do that yet. */
8526 fputs ("\t.quad\t0\n", file
);
8528 /* Function address. */
8530 fputs ("\t.quad\t", file
);
8531 assemble_name (file
, fnname
);
8534 fputs ("\t.quad\t0\n", file
);
8535 fputs ("\t.quad\t0\n", file
);
8538 ??? We do it the same way Cray CC does it but this could be
8541 for( i
= 0; i
< len
; i
++ )
8542 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
8543 if( (len
% 8) == 0 )
8544 fputs ("\t.quad\t0\n", file
);
8546 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
8548 /* All call information words used in the function. */
8550 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
8553 fprintf (file
, "\t.quad\t");
8554 #if HOST_BITS_PER_WIDE_INT == 32
8555 fprintf (file
, HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
8556 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
8558 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (ciw
));
8560 fprintf (file
, "\n");
8564 /* Add a call information word (CIW) to the list of the current function's
8565 CIWs and return its index.
8567 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
8570 unicosmk_add_call_info_word (x
)
8574 struct machine_function
*machine
= cfun
->machine
;
8576 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
8577 if (machine
->first_ciw
== NULL_RTX
)
8578 machine
->first_ciw
= node
;
8580 XEXP (machine
->last_ciw
, 1) = node
;
8582 machine
->last_ciw
= node
;
8583 ++machine
->ciw_count
;
8585 return GEN_INT (machine
->ciw_count
8586 + strlen (current_function_name
)/8 + 5);
8589 static char unicosmk_section_buf
[100];
8592 unicosmk_text_section ()
8594 static int count
= 0;
8595 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
8597 return unicosmk_section_buf
;
8601 unicosmk_data_section ()
8603 static int count
= 1;
8604 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
8606 return unicosmk_section_buf
;
8609 /* The Cray assembler doesn't accept extern declarations for symbols which
8610 are defined in the same file. We have to keep track of all global
8611 symbols which are referenced and/or defined in a source file and output
8612 extern declarations for those which are referenced but not defined at
8615 /* List of identifiers for which an extern declaration might have to be
8618 struct unicosmk_extern_list
8620 struct unicosmk_extern_list
*next
;
8624 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
8626 /* Output extern declarations which are required for every asm file. */
8629 unicosmk_output_default_externs (file
)
8632 static const char *const externs
[] =
8633 { "__T3E_MISMATCH" };
8638 n
= ARRAY_SIZE (externs
);
8640 for (i
= 0; i
< n
; i
++)
8641 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
8644 /* Output extern declarations for global symbols which are have been
8645 referenced but not defined. */
8648 unicosmk_output_externs (file
)
8651 struct unicosmk_extern_list
*p
;
8652 const char *real_name
;
8656 len
= strlen (user_label_prefix
);
8657 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
8659 /* We have to strip the encoding and possibly remove user_label_prefix
8660 from the identifier in order to handle -fleading-underscore and
8661 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
8662 STRIP_NAME_ENCODING (real_name
, p
->name
);
8663 if (len
&& p
->name
[0] == '*'
8664 && !memcmp (real_name
, user_label_prefix
, len
))
8667 name_tree
= get_identifier (real_name
);
8668 if (! TREE_ASM_WRITTEN (name_tree
))
8670 TREE_ASM_WRITTEN (name_tree
) = 1;
8671 fputs ("\t.extern\t", file
);
8672 assemble_name (file
, p
->name
);
8678 /* Record an extern. */
8681 unicosmk_add_extern (name
)
8684 struct unicosmk_extern_list
*p
;
8686 p
= (struct unicosmk_extern_list
*)
8687 permalloc (sizeof (struct unicosmk_extern_list
));
8688 p
->next
= unicosmk_extern_head
;
8690 unicosmk_extern_head
= p
;
8693 /* The Cray assembler generates incorrect code if identifiers which
8694 conflict with register names are used as instruction operands. We have
8695 to replace such identifiers with DEX expressions. */
8697 /* Structure to collect identifiers which have been replaced by DEX
8700 struct unicosmk_dex
{
8701 struct unicosmk_dex
*next
;
8705 /* List of identifiers which have been replaced by DEX expressions. The DEX
8706 number is determined by the position in the list. */
8708 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
8710 /* The number of elements in the DEX list. */
8712 static int unicosmk_dex_count
= 0;
8714 /* Check if NAME must be replaced by a DEX expression. */
8717 unicosmk_special_name (name
)
8726 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
8732 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
8735 return (name
[2] == '\0'
8736 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
8739 return (ISDIGIT (name
[1]) && name
[2] == '\0');
8743 /* Return the DEX number if X must be replaced by a DEX expression and 0
8747 unicosmk_need_dex (x
)
8750 struct unicosmk_dex
*dex
;
8754 if (GET_CODE (x
) != SYMBOL_REF
)
8758 if (! unicosmk_special_name (name
))
8761 i
= unicosmk_dex_count
;
8762 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
8764 if (! strcmp (name
, dex
->name
))
8769 dex
= (struct unicosmk_dex
*) permalloc (sizeof (struct unicosmk_dex
));
8771 dex
->next
= unicosmk_dex_list
;
8772 unicosmk_dex_list
= dex
;
8774 ++unicosmk_dex_count
;
8775 return unicosmk_dex_count
;
8778 /* Output the DEX definitions for this file. */
8781 unicosmk_output_dex (file
)
8784 struct unicosmk_dex
*dex
;
8787 if (unicosmk_dex_list
== NULL
)
8790 fprintf (file
, "\t.dexstart\n");
8792 i
= unicosmk_dex_count
;
8793 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
8795 fprintf (file
, "\tDEX (%d) = ", i
);
8796 assemble_name (file
, dex
->name
);
8801 fprintf (file
, "\t.dexend\n");
8807 unicosmk_output_deferred_case_vectors (file
)
8808 FILE *file ATTRIBUTE_UNUSED
;
8812 unicosmk_gen_dsib (imaskP
)
8813 unsigned long * imaskP ATTRIBUTE_UNUSED
;
8817 unicosmk_output_ssib (file
, fnname
)
8818 FILE * file ATTRIBUTE_UNUSED
;
8819 const char * fnname ATTRIBUTE_UNUSED
;
8823 unicosmk_add_call_info_word (x
)
8824 rtx x ATTRIBUTE_UNUSED
;
8830 unicosmk_need_dex (x
)
8831 rtx x ATTRIBUTE_UNUSED
;
8836 #endif /* TARGET_ABI_UNICOSMK */