]> gcc.gnu.org Git - gcc.git/blob - gcc/config/alpha/alpha.c
calls.c: Include target.h.
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GNU CC.
7
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "flags.h"
36 #include "recog.h"
37 #include "expr.h"
38 #include "optabs.h"
39 #include "reload.h"
40 #include "obstack.h"
41 #include "except.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "integrate.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51
52 /* Specify which cpu to schedule for. */
53
54 enum processor_type alpha_cpu;
55 static const char * const alpha_cpu_name[] =
56 {
57 "ev4", "ev5", "ev6"
58 };
59
60 /* Specify how accurate floating-point traps need to be. */
61
62 enum alpha_trap_precision alpha_tp;
63
64 /* Specify the floating-point rounding mode. */
65
66 enum alpha_fp_rounding_mode alpha_fprm;
67
68 /* Specify which things cause traps. */
69
70 enum alpha_fp_trap_mode alpha_fptm;
71
72 /* Specify bit size of immediate TLS offsets. */
73
74 int alpha_tls_size = 32;
75
76 /* Strings decoded into the above options. */
77
78 const char *alpha_cpu_string; /* -mcpu= */
79 const char *alpha_tune_string; /* -mtune= */
80 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
81 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
82 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
83 const char *alpha_mlat_string; /* -mmemory-latency= */
84 const char *alpha_tls_size_string; /* -mtls-size=[16|32|64] */
85
86 /* Save information from a "cmpxx" operation until the branch or scc is
87 emitted. */
88
89 struct alpha_compare alpha_compare;
90
91 /* Non-zero if inside of a function, because the Alpha asm can't
92 handle .files inside of functions. */
93
94 static int inside_function = FALSE;
95
96 /* The number of cycles of latency we should assume on memory reads. */
97
98 int alpha_memory_latency = 3;
99
100 /* Whether the function needs the GP. */
101
102 static int alpha_function_needs_gp;
103
104 /* The alias set for prologue/epilogue register save/restore. */
105
106 static int alpha_sr_alias_set;
107
108 /* The assembler name of the current function. */
109
110 static const char *alpha_fnname;
111
112 /* The next explicit relocation sequence number. */
113 int alpha_next_sequence_number = 1;
114
115 /* The literal and gpdisp sequence numbers for this insn, as printed
116 by %# and %* respectively. */
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
119
120 /* Declarations of static functions. */
121 static int tls_symbolic_operand_1
122 PARAMS ((rtx, enum machine_mode, int, int));
123 static enum tls_model tls_symbolic_operand_type
124 PARAMS ((rtx));
125 static bool decl_in_text_section
126 PARAMS ((tree));
127 static bool alpha_in_small_data_p
128 PARAMS ((tree));
129 static void alpha_encode_section_info
130 PARAMS ((tree, int));
131 static const char *alpha_strip_name_encoding
132 PARAMS ((const char *));
133 static int some_small_symbolic_operand_1
134 PARAMS ((rtx *, void *));
135 static int split_small_symbolic_operand_1
136 PARAMS ((rtx *, void *));
137 static void alpha_set_memflags_1
138 PARAMS ((rtx, int, int, int));
139 static rtx alpha_emit_set_const_1
140 PARAMS ((rtx, enum machine_mode, HOST_WIDE_INT, int));
141 static void alpha_expand_unaligned_load_words
142 PARAMS ((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
143 static void alpha_expand_unaligned_store_words
144 PARAMS ((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
145 static void alpha_init_builtins
146 PARAMS ((void));
147 static rtx alpha_expand_builtin
148 PARAMS ((tree, rtx, rtx, enum machine_mode, int));
149 static void alpha_sa_mask
150 PARAMS ((unsigned long *imaskP, unsigned long *fmaskP));
151 static int find_lo_sum
152 PARAMS ((rtx *, void *));
153 static int alpha_does_function_need_gp
154 PARAMS ((void));
155 static int alpha_ra_ever_killed
156 PARAMS ((void));
157 static const char *get_trap_mode_suffix
158 PARAMS ((void));
159 static const char *get_round_mode_suffix
160 PARAMS ((void));
161 static const char *get_some_local_dynamic_name
162 PARAMS ((void));
163 static int get_some_local_dynamic_name_1
164 PARAMS ((rtx *, void *));
165 static rtx set_frame_related_p
166 PARAMS ((void));
167 static const char *alpha_lookup_xfloating_lib_func
168 PARAMS ((enum rtx_code));
169 static int alpha_compute_xfloating_mode_arg
170 PARAMS ((enum rtx_code, enum alpha_fp_rounding_mode));
171 static void alpha_emit_xfloating_libcall
172 PARAMS ((const char *, rtx, rtx[], int, rtx));
173 static rtx alpha_emit_xfloating_compare
174 PARAMS ((enum rtx_code, rtx, rtx));
175 static void alpha_output_function_end_prologue
176 PARAMS ((FILE *));
177 static int alpha_adjust_cost
178 PARAMS ((rtx, rtx, rtx, int));
179 static int alpha_issue_rate
180 PARAMS ((void));
181 static int alpha_use_dfa_pipeline_interface
182 PARAMS ((void));
183 static int alpha_multipass_dfa_lookahead
184 PARAMS ((void));
185
186 #ifdef OBJECT_FORMAT_ELF
187 static void alpha_elf_select_rtx_section
188 PARAMS ((enum machine_mode, rtx, unsigned HOST_WIDE_INT));
189 #endif
190
191 static struct machine_function * alpha_init_machine_status
192 PARAMS ((void));
193
194 static void unicosmk_output_deferred_case_vectors PARAMS ((FILE *));
195 static void unicosmk_gen_dsib PARAMS ((unsigned long *imaskP));
196 static void unicosmk_output_ssib PARAMS ((FILE *, const char *));
197 static int unicosmk_need_dex PARAMS ((rtx));
198
199 /* Get the number of args of a function in one of two ways. */
200 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
201 #define NUM_ARGS current_function_args_info.num_args
202 #else
203 #define NUM_ARGS current_function_args_info
204 #endif
205
206 #define REG_PV 27
207 #define REG_RA 26
208 \f
209 /* Initialize the GCC target structure. */
210 #if TARGET_ABI_OPEN_VMS
211 const struct attribute_spec vms_attribute_table[];
212 static unsigned int vms_section_type_flags PARAMS ((tree, const char *, int));
213 static void vms_asm_named_section PARAMS ((const char *, unsigned int));
214 static void vms_asm_out_constructor PARAMS ((rtx, int));
215 static void vms_asm_out_destructor PARAMS ((rtx, int));
216 # undef TARGET_ATTRIBUTE_TABLE
217 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
218 # undef TARGET_SECTION_TYPE_FLAGS
219 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
220 #endif
221
222 #undef TARGET_IN_SMALL_DATA_P
223 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
224 #undef TARGET_ENCODE_SECTION_INFO
225 #define TARGET_ENCODE_SECTION_INFO alpha_encode_section_info
226 #undef TARGET_STRIP_NAME_ENCODING
227 #define TARGET_STRIP_NAME_ENCODING alpha_strip_name_encoding
228
229 #if TARGET_ABI_UNICOSMK
230 static void unicosmk_asm_named_section PARAMS ((const char *, unsigned int));
231 static void unicosmk_insert_attributes PARAMS ((tree, tree *));
232 static unsigned int unicosmk_section_type_flags PARAMS ((tree, const char *,
233 int));
234 static void unicosmk_unique_section PARAMS ((tree, int));
235 # undef TARGET_INSERT_ATTRIBUTES
236 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
237 # undef TARGET_SECTION_TYPE_FLAGS
238 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
239 # undef TARGET_ASM_UNIQUE_SECTION
240 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
241 #endif
242
243 #undef TARGET_ASM_ALIGNED_HI_OP
244 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
245 #undef TARGET_ASM_ALIGNED_DI_OP
246 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
247
248 /* Default unaligned ops are provided for ELF systems. To get unaligned
249 data for non-ELF systems, we have to turn off auto alignment. */
250 #ifndef OBJECT_FORMAT_ELF
251 #undef TARGET_ASM_UNALIGNED_HI_OP
252 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
253 #undef TARGET_ASM_UNALIGNED_SI_OP
254 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
255 #undef TARGET_ASM_UNALIGNED_DI_OP
256 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
257 #endif
258
259 #ifdef OBJECT_FORMAT_ELF
260 #undef TARGET_ASM_SELECT_RTX_SECTION
261 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
262 #endif
263
264 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
265 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
266
267 #undef TARGET_SCHED_ADJUST_COST
268 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
269 #undef TARGET_SCHED_ISSUE_RATE
270 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
271 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
272 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
273 alpha_use_dfa_pipeline_interface
274 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
275 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
276 alpha_multipass_dfa_lookahead
277
278 #undef TARGET_HAVE_TLS
279 #define TARGET_HAVE_TLS HAVE_AS_TLS
280
281 #undef TARGET_INIT_BUILTINS
282 #define TARGET_INIT_BUILTINS alpha_init_builtins
283 #undef TARGET_EXPAND_BUILTIN
284 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
285
286 struct gcc_target targetm = TARGET_INITIALIZER;
287 \f
288 /* Parse target option strings. */
289
290 void
291 override_options ()
292 {
293 int i;
294 static const struct cpu_table {
295 const char *const name;
296 const enum processor_type processor;
297 const int flags;
298 } cpu_table[] = {
299 #define EV5_MASK (MASK_CPU_EV5)
300 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
301 { "ev4", PROCESSOR_EV4, 0 },
302 { "ev45", PROCESSOR_EV4, 0 },
303 { "21064", PROCESSOR_EV4, 0 },
304 { "ev5", PROCESSOR_EV5, EV5_MASK },
305 { "21164", PROCESSOR_EV5, EV5_MASK },
306 { "ev56", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
307 { "21164a", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
308 { "pca56", PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
309 { "21164PC",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
310 { "21164pc",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
311 { "ev6", PROCESSOR_EV6, EV6_MASK },
312 { "21264", PROCESSOR_EV6, EV6_MASK },
313 { "ev67", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
314 { "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
315 { 0, 0, 0 }
316 };
317
318 /* Unicos/Mk doesn't have shared libraries. */
319 if (TARGET_ABI_UNICOSMK && flag_pic)
320 {
321 warning ("-f%s ignored for Unicos/Mk (not supported)",
322 (flag_pic > 1) ? "PIC" : "pic");
323 flag_pic = 0;
324 }
325
326 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
327 floating-point instructions. Make that the default for this target. */
328 if (TARGET_ABI_UNICOSMK)
329 alpha_fprm = ALPHA_FPRM_DYN;
330 else
331 alpha_fprm = ALPHA_FPRM_NORM;
332
333 alpha_tp = ALPHA_TP_PROG;
334 alpha_fptm = ALPHA_FPTM_N;
335
336 /* We cannot use su and sui qualifiers for conversion instructions on
337 Unicos/Mk. I'm not sure if this is due to assembler or hardware
338 limitations. Right now, we issue a warning if -mieee is specified
339 and then ignore it; eventually, we should either get it right or
340 disable the option altogether. */
341
342 if (TARGET_IEEE)
343 {
344 if (TARGET_ABI_UNICOSMK)
345 warning ("-mieee not supported on Unicos/Mk");
346 else
347 {
348 alpha_tp = ALPHA_TP_INSN;
349 alpha_fptm = ALPHA_FPTM_SU;
350 }
351 }
352
353 if (TARGET_IEEE_WITH_INEXACT)
354 {
355 if (TARGET_ABI_UNICOSMK)
356 warning ("-mieee-with-inexact not supported on Unicos/Mk");
357 else
358 {
359 alpha_tp = ALPHA_TP_INSN;
360 alpha_fptm = ALPHA_FPTM_SUI;
361 }
362 }
363
364 if (alpha_tp_string)
365 {
366 if (! strcmp (alpha_tp_string, "p"))
367 alpha_tp = ALPHA_TP_PROG;
368 else if (! strcmp (alpha_tp_string, "f"))
369 alpha_tp = ALPHA_TP_FUNC;
370 else if (! strcmp (alpha_tp_string, "i"))
371 alpha_tp = ALPHA_TP_INSN;
372 else
373 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
374 }
375
376 if (alpha_fprm_string)
377 {
378 if (! strcmp (alpha_fprm_string, "n"))
379 alpha_fprm = ALPHA_FPRM_NORM;
380 else if (! strcmp (alpha_fprm_string, "m"))
381 alpha_fprm = ALPHA_FPRM_MINF;
382 else if (! strcmp (alpha_fprm_string, "c"))
383 alpha_fprm = ALPHA_FPRM_CHOP;
384 else if (! strcmp (alpha_fprm_string,"d"))
385 alpha_fprm = ALPHA_FPRM_DYN;
386 else
387 error ("bad value `%s' for -mfp-rounding-mode switch",
388 alpha_fprm_string);
389 }
390
391 if (alpha_fptm_string)
392 {
393 if (strcmp (alpha_fptm_string, "n") == 0)
394 alpha_fptm = ALPHA_FPTM_N;
395 else if (strcmp (alpha_fptm_string, "u") == 0)
396 alpha_fptm = ALPHA_FPTM_U;
397 else if (strcmp (alpha_fptm_string, "su") == 0)
398 alpha_fptm = ALPHA_FPTM_SU;
399 else if (strcmp (alpha_fptm_string, "sui") == 0)
400 alpha_fptm = ALPHA_FPTM_SUI;
401 else
402 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
403 }
404
405 if (alpha_tls_size_string)
406 {
407 if (strcmp (alpha_tls_size_string, "16") == 0)
408 alpha_tls_size = 16;
409 else if (strcmp (alpha_tls_size_string, "32") == 0)
410 alpha_tls_size = 32;
411 else if (strcmp (alpha_tls_size_string, "64") == 0)
412 alpha_tls_size = 64;
413 else
414 error ("bad value `%s' for -mtls-size switch", alpha_tls_size_string);
415 }
416
417 alpha_cpu
418 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
419 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
420
421 if (alpha_cpu_string)
422 {
423 for (i = 0; cpu_table [i].name; i++)
424 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
425 {
426 alpha_cpu = cpu_table [i].processor;
427 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX
428 | MASK_CPU_EV5 | MASK_CPU_EV6);
429 target_flags |= cpu_table [i].flags;
430 break;
431 }
432 if (! cpu_table [i].name)
433 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
434 }
435
436 if (alpha_tune_string)
437 {
438 for (i = 0; cpu_table [i].name; i++)
439 if (! strcmp (alpha_tune_string, cpu_table [i].name))
440 {
441 alpha_cpu = cpu_table [i].processor;
442 break;
443 }
444 if (! cpu_table [i].name)
445 error ("bad value `%s' for -mcpu switch", alpha_tune_string);
446 }
447
448 /* Do some sanity checks on the above options. */
449
450 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
451 {
452 warning ("trap mode not supported on Unicos/Mk");
453 alpha_fptm = ALPHA_FPTM_N;
454 }
455
456 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
457 && alpha_tp != ALPHA_TP_INSN && ! TARGET_CPU_EV6)
458 {
459 warning ("fp software completion requires -mtrap-precision=i");
460 alpha_tp = ALPHA_TP_INSN;
461 }
462
463 if (TARGET_CPU_EV6)
464 {
465 /* Except for EV6 pass 1 (not released), we always have precise
466 arithmetic traps. Which means we can do software completion
467 without minding trap shadows. */
468 alpha_tp = ALPHA_TP_PROG;
469 }
470
471 if (TARGET_FLOAT_VAX)
472 {
473 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
474 {
475 warning ("rounding mode not supported for VAX floats");
476 alpha_fprm = ALPHA_FPRM_NORM;
477 }
478 if (alpha_fptm == ALPHA_FPTM_SUI)
479 {
480 warning ("trap mode not supported for VAX floats");
481 alpha_fptm = ALPHA_FPTM_SU;
482 }
483 }
484
485 {
486 char *end;
487 int lat;
488
489 if (!alpha_mlat_string)
490 alpha_mlat_string = "L1";
491
492 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
493 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
494 ;
495 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
496 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
497 && alpha_mlat_string[2] == '\0')
498 {
499 static int const cache_latency[][4] =
500 {
501 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
502 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
503 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
504 };
505
506 lat = alpha_mlat_string[1] - '0';
507 if (lat <= 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
508 {
509 warning ("L%d cache latency unknown for %s",
510 lat, alpha_cpu_name[alpha_cpu]);
511 lat = 3;
512 }
513 else
514 lat = cache_latency[alpha_cpu][lat-1];
515 }
516 else if (! strcmp (alpha_mlat_string, "main"))
517 {
518 /* Most current memories have about 370ns latency. This is
519 a reasonable guess for a fast cpu. */
520 lat = 150;
521 }
522 else
523 {
524 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
525 lat = 3;
526 }
527
528 alpha_memory_latency = lat;
529 }
530
531 /* Default the definition of "small data" to 8 bytes. */
532 if (!g_switch_set)
533 g_switch_value = 8;
534
535 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
536 if (flag_pic == 1)
537 target_flags |= MASK_SMALL_DATA;
538 else if (flag_pic == 2)
539 target_flags &= ~MASK_SMALL_DATA;
540
541 /* Align labels and loops for optimal branching. */
542 /* ??? Kludge these by not doing anything if we don't optimize and also if
543 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
544 if (optimize > 0 && write_symbols != SDB_DEBUG)
545 {
546 if (align_loops <= 0)
547 align_loops = 16;
548 if (align_jumps <= 0)
549 align_jumps = 16;
550 }
551 if (align_functions <= 0)
552 align_functions = 16;
553
554 /* Acquire a unique set number for our register saves and restores. */
555 alpha_sr_alias_set = new_alias_set ();
556
557 /* Register variables and functions with the garbage collector. */
558
559 /* Set up function hooks. */
560 init_machine_status = alpha_init_machine_status;
561 }
562 \f
563 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
564
565 int
566 zap_mask (value)
567 HOST_WIDE_INT value;
568 {
569 int i;
570
571 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
572 i++, value >>= 8)
573 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
574 return 0;
575
576 return 1;
577 }
578
579 /* Returns 1 if OP is either the constant zero or a register. If a
580 register, it must be in the proper mode unless MODE is VOIDmode. */
581
582 int
583 reg_or_0_operand (op, mode)
584 register rtx op;
585 enum machine_mode mode;
586 {
587 return op == CONST0_RTX (mode) || register_operand (op, mode);
588 }
589
590 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
591 any register. */
592
593 int
594 reg_or_6bit_operand (op, mode)
595 register rtx op;
596 enum machine_mode mode;
597 {
598 return ((GET_CODE (op) == CONST_INT
599 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
600 || register_operand (op, mode));
601 }
602
603
604 /* Return 1 if OP is an 8-bit constant or any register. */
605
606 int
607 reg_or_8bit_operand (op, mode)
608 register rtx op;
609 enum machine_mode mode;
610 {
611 return ((GET_CODE (op) == CONST_INT
612 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
613 || register_operand (op, mode));
614 }
615
616 /* Return 1 if OP is a constant or any register. */
617
618 int
619 reg_or_const_int_operand (op, mode)
620 register rtx op;
621 enum machine_mode mode;
622 {
623 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
624 }
625
626 /* Return 1 if OP is an 8-bit constant. */
627
628 int
629 cint8_operand (op, mode)
630 register rtx op;
631 enum machine_mode mode ATTRIBUTE_UNUSED;
632 {
633 return ((GET_CODE (op) == CONST_INT
634 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100));
635 }
636
637 /* Return 1 if the operand is a valid second operand to an add insn. */
638
639 int
640 add_operand (op, mode)
641 register rtx op;
642 enum machine_mode mode;
643 {
644 if (GET_CODE (op) == CONST_INT)
645 /* Constraints I, J, O and P are covered by K. */
646 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
647 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
648
649 return register_operand (op, mode);
650 }
651
652 /* Return 1 if the operand is a valid second operand to a sign-extending
653 add insn. */
654
655 int
656 sext_add_operand (op, mode)
657 register rtx op;
658 enum machine_mode mode;
659 {
660 if (GET_CODE (op) == CONST_INT)
661 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
662 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
663
664 return reg_not_elim_operand (op, mode);
665 }
666
667 /* Return 1 if OP is the constant 4 or 8. */
668
669 int
670 const48_operand (op, mode)
671 register rtx op;
672 enum machine_mode mode ATTRIBUTE_UNUSED;
673 {
674 return (GET_CODE (op) == CONST_INT
675 && (INTVAL (op) == 4 || INTVAL (op) == 8));
676 }
677
678 /* Return 1 if OP is a valid first operand to an AND insn. */
679
680 int
681 and_operand (op, mode)
682 register rtx op;
683 enum machine_mode mode;
684 {
685 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
686 return (zap_mask (CONST_DOUBLE_LOW (op))
687 && zap_mask (CONST_DOUBLE_HIGH (op)));
688
689 if (GET_CODE (op) == CONST_INT)
690 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
691 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
692 || zap_mask (INTVAL (op)));
693
694 return register_operand (op, mode);
695 }
696
697 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
698
699 int
700 or_operand (op, mode)
701 register rtx op;
702 enum machine_mode mode;
703 {
704 if (GET_CODE (op) == CONST_INT)
705 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
706 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
707
708 return register_operand (op, mode);
709 }
710
711 /* Return 1 if OP is a constant that is the width, in bits, of an integral
712 mode smaller than DImode. */
713
714 int
715 mode_width_operand (op, mode)
716 register rtx op;
717 enum machine_mode mode ATTRIBUTE_UNUSED;
718 {
719 return (GET_CODE (op) == CONST_INT
720 && (INTVAL (op) == 8 || INTVAL (op) == 16
721 || INTVAL (op) == 32 || INTVAL (op) == 64));
722 }
723
724 /* Return 1 if OP is a constant that is the width of an integral machine mode
725 smaller than an integer. */
726
727 int
728 mode_mask_operand (op, mode)
729 register rtx op;
730 enum machine_mode mode ATTRIBUTE_UNUSED;
731 {
732 if (GET_CODE (op) == CONST_INT)
733 {
734 HOST_WIDE_INT value = INTVAL (op);
735
736 if (value == 0xff)
737 return 1;
738 if (value == 0xffff)
739 return 1;
740 if (value == 0xffffffff)
741 return 1;
742 if (value == -1)
743 return 1;
744 }
745 else if (HOST_BITS_PER_WIDE_INT == 32 && GET_CODE (op) == CONST_DOUBLE)
746 {
747 if (CONST_DOUBLE_LOW (op) == 0xffffffff && CONST_DOUBLE_HIGH (op) == 0)
748 return 1;
749 }
750
751 return 0;
752 }
753
754 /* Return 1 if OP is a multiple of 8 less than 64. */
755
756 int
757 mul8_operand (op, mode)
758 register rtx op;
759 enum machine_mode mode ATTRIBUTE_UNUSED;
760 {
761 return (GET_CODE (op) == CONST_INT
762 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
763 && (INTVAL (op) & 7) == 0);
764 }
765
766 /* Return 1 if OP is the zero constant for MODE. */
767
768 int
769 const0_operand (op, mode)
770 register rtx op;
771 enum machine_mode mode;
772 {
773 return op == CONST0_RTX (mode);
774 }
775
776 /* Return 1 if OP is a hard floating-point register. */
777
778 int
779 hard_fp_register_operand (op, mode)
780 register rtx op;
781 enum machine_mode mode;
782 {
783 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
784 return 0;
785
786 if (GET_CODE (op) == SUBREG)
787 op = SUBREG_REG (op);
788 return GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS;
789 }
790
791 /* Return 1 if OP is a hard general register. */
792
793 int
794 hard_int_register_operand (op, mode)
795 register rtx op;
796 enum machine_mode mode;
797 {
798 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
799 return 0;
800
801 if (GET_CODE (op) == SUBREG)
802 op = SUBREG_REG (op);
803 return GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS;
804 }
805
806 /* Return 1 if OP is a register or a constant integer. */
807
808
809 int
810 reg_or_cint_operand (op, mode)
811 register rtx op;
812 enum machine_mode mode;
813 {
814 return (GET_CODE (op) == CONST_INT
815 || register_operand (op, mode));
816 }
817
818 /* Return 1 if OP is something that can be reloaded into a register;
819 if it is a MEM, it need not be valid. */
820
821 int
822 some_operand (op, mode)
823 register rtx op;
824 enum machine_mode mode;
825 {
826 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
827 return 0;
828
829 switch (GET_CODE (op))
830 {
831 case REG:
832 case MEM:
833 case CONST_INT:
834 case CONST_DOUBLE:
835 case CONST_VECTOR:
836 case LABEL_REF:
837 case SYMBOL_REF:
838 case CONST:
839 case HIGH:
840 return 1;
841
842 case SUBREG:
843 return some_operand (SUBREG_REG (op), VOIDmode);
844
845 default:
846 break;
847 }
848
849 return 0;
850 }
851
852 /* Likewise, but don't accept constants. */
853
854 int
855 some_ni_operand (op, mode)
856 register rtx op;
857 enum machine_mode mode;
858 {
859 if (GET_MODE (op) != mode && mode != VOIDmode)
860 return 0;
861
862 if (GET_CODE (op) == SUBREG)
863 op = SUBREG_REG (op);
864
865 return (GET_CODE (op) == REG || GET_CODE (op) == MEM);
866 }
867
868 /* Return 1 if OP is a valid operand for the source of a move insn. */
869
870 int
871 input_operand (op, mode)
872 register rtx op;
873 enum machine_mode mode;
874 {
875 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
876 return 0;
877
878 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
879 return 0;
880
881 switch (GET_CODE (op))
882 {
883 case LABEL_REF:
884 case SYMBOL_REF:
885 case CONST:
886 if (TARGET_EXPLICIT_RELOCS)
887 {
888 /* We don't split symbolic operands into something unintelligable
889 until after reload, but we do not wish non-small, non-global
890 symbolic operands to be reconstructed from their high/lo_sum
891 form. */
892 return (small_symbolic_operand (op, mode)
893 || global_symbolic_operand (op, mode)
894 || gotdtp_symbolic_operand (op, mode)
895 || gottp_symbolic_operand (op, mode));
896 }
897
898 /* This handles both the Windows/NT and OSF cases. */
899 return mode == ptr_mode || mode == DImode;
900
901 case HIGH:
902 return (TARGET_EXPLICIT_RELOCS
903 && local_symbolic_operand (XEXP (op, 0), mode));
904
905 case REG:
906 case ADDRESSOF:
907 return 1;
908
909 case SUBREG:
910 if (register_operand (op, mode))
911 return 1;
912 /* ... fall through ... */
913 case MEM:
914 return ((TARGET_BWX || (mode != HImode && mode != QImode))
915 && general_operand (op, mode));
916
917 case CONST_DOUBLE:
918 case CONST_VECTOR:
919 return op == CONST0_RTX (mode);
920
921 case CONST_INT:
922 return mode == QImode || mode == HImode || add_operand (op, mode);
923
924 case CONSTANT_P_RTX:
925 return 1;
926
927 default:
928 break;
929 }
930
931 return 0;
932 }
933
934 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
935 file, and in the same section as the current function. */
936
937 int
938 current_file_function_operand (op, mode)
939 rtx op;
940 enum machine_mode mode ATTRIBUTE_UNUSED;
941 {
942 if (GET_CODE (op) != SYMBOL_REF)
943 return 0;
944
945 /* Easy test for recursion. */
946 if (op == XEXP (DECL_RTL (current_function_decl), 0))
947 return 1;
948
949 /* Otherwise, we need the DECL for the SYMBOL_REF, which we can't get.
950 So SYMBOL_REF_FLAG has been declared to imply that the function is
951 in the default text section. So we must also check that the current
952 function is also in the text section. */
953 if (SYMBOL_REF_FLAG (op) && decl_in_text_section (current_function_decl))
954 return 1;
955
956 return 0;
957 }
958
959 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
960
961 int
962 direct_call_operand (op, mode)
963 rtx op;
964 enum machine_mode mode;
965 {
966 /* Must be defined in this file. */
967 if (! current_file_function_operand (op, mode))
968 return 0;
969
970 /* If profiling is implemented via linker tricks, we can't jump
971 to the nogp alternate entry point. */
972 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
973 but is approximately correct for the OSF ABIs. Don't know
974 what to do for VMS, NT, or UMK. */
975 if (! TARGET_PROFILING_NEEDS_GP
976 && ! current_function_profile)
977 return 0;
978
979 return 1;
980 }
981
982 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
983 a (non-tls) variable known to be defined in this file. */
984
985 int
986 local_symbolic_operand (op, mode)
987 rtx op;
988 enum machine_mode mode;
989 {
990 const char *str;
991
992 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
993 return 0;
994
995 if (GET_CODE (op) == LABEL_REF)
996 return 1;
997
998 if (GET_CODE (op) == CONST
999 && GET_CODE (XEXP (op, 0)) == PLUS
1000 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1001 op = XEXP (XEXP (op, 0), 0);
1002
1003 if (GET_CODE (op) != SYMBOL_REF)
1004 return 0;
1005
1006 /* Easy pickings. */
1007 if (CONSTANT_POOL_ADDRESS_P (op) || STRING_POOL_ADDRESS_P (op))
1008 return 1;
1009
1010 /* ??? SYMBOL_REF_FLAG is set for local function symbols, but we
1011 run into problems with the rtl inliner in that the symbol was
1012 once external, but is local after inlining, which results in
1013 unrecognizable insns. */
1014
1015 str = XSTR (op, 0);
1016
1017 /* If @[LS], then alpha_encode_section_info sez it's local. */
1018 if (str[0] == '@' && (str[1] == 'L' || str[1] == 'S'))
1019 return 1;
1020
1021 /* If *$, then ASM_GENERATE_INTERNAL_LABEL sez it's local. */
1022 if (str[0] == '*' && str[1] == '$')
1023 return 1;
1024
1025 return 0;
1026 }
1027
1028 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1029 known to be defined in this file in the small data area. */
1030
1031 int
1032 small_symbolic_operand (op, mode)
1033 rtx op;
1034 enum machine_mode mode ATTRIBUTE_UNUSED;
1035 {
1036 const char *str;
1037
1038 if (! TARGET_SMALL_DATA)
1039 return 0;
1040
1041 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1042 return 0;
1043
1044 if (GET_CODE (op) == CONST
1045 && GET_CODE (XEXP (op, 0)) == PLUS
1046 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1047 op = XEXP (XEXP (op, 0), 0);
1048
1049 if (GET_CODE (op) != SYMBOL_REF)
1050 return 0;
1051
1052 if (CONSTANT_POOL_ADDRESS_P (op))
1053 return GET_MODE_SIZE (get_pool_mode (op)) <= (unsigned) g_switch_value;
1054 else
1055 {
1056 str = XSTR (op, 0);
1057 return str[0] == '@' && str[1] == 'S';
1058 }
1059 }
1060
1061 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1062 not known (or known not) to be defined in this file. */
1063
1064 int
1065 global_symbolic_operand (op, mode)
1066 rtx op;
1067 enum machine_mode mode;
1068 {
1069 const char *str;
1070
1071 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1072 return 0;
1073
1074 if (GET_CODE (op) == CONST
1075 && GET_CODE (XEXP (op, 0)) == PLUS
1076 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
1077 op = XEXP (XEXP (op, 0), 0);
1078
1079 if (GET_CODE (op) != SYMBOL_REF)
1080 return 0;
1081
1082 if (local_symbolic_operand (op, mode))
1083 return 0;
1084
1085 /* Also verify that it's not a TLS symbol. */
1086 str = XSTR (op, 0);
1087 return str[0] != '%' && str[0] != '@';
1088 }
1089
1090 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
1091
1092 int
1093 call_operand (op, mode)
1094 rtx op;
1095 enum machine_mode mode;
1096 {
1097 if (mode != Pmode)
1098 return 0;
1099
1100 if (GET_CODE (op) == REG)
1101 {
1102 if (TARGET_ABI_OSF)
1103 {
1104 /* Disallow virtual registers to cope with pathalogical test cases
1105 such as compile/930117-1.c in which the virtual reg decomposes
1106 to the frame pointer. Which is a hard reg that is not $27. */
1107 return (REGNO (op) == 27 || REGNO (op) > LAST_VIRTUAL_REGISTER);
1108 }
1109 else
1110 return 1;
1111 }
1112 if (TARGET_ABI_UNICOSMK)
1113 return 0;
1114 if (GET_CODE (op) == SYMBOL_REF)
1115 return 1;
1116
1117 return 0;
1118 }
1119
1120 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1121 possibly with an offset. */
1122
1123 int
1124 symbolic_operand (op, mode)
1125 register rtx op;
1126 enum machine_mode mode;
1127 {
1128 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1129 return 0;
1130 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
1131 return 1;
1132 if (GET_CODE (op) == CONST
1133 && GET_CODE (XEXP (op,0)) == PLUS
1134 && GET_CODE (XEXP (XEXP (op,0), 0)) == SYMBOL_REF
1135 && GET_CODE (XEXP (XEXP (op,0), 1)) == CONST_INT)
1136 return 1;
1137 return 0;
1138 }
1139
1140 /* Return true if OP is valid for a particular TLS relocation. */
1141
1142 static int
1143 tls_symbolic_operand_1 (op, mode, size, unspec)
1144 rtx op;
1145 enum machine_mode mode;
1146 int size, unspec;
1147 {
1148 const char *str;
1149 int letter;
1150
1151 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1152 return 0;
1153
1154 if (GET_CODE (op) != CONST)
1155 return 0;
1156 op = XEXP (op, 0);
1157
1158 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
1159 return 0;
1160 op = XVECEXP (op, 0, 0);
1161
1162 if (GET_CODE (op) != SYMBOL_REF)
1163 return 0;
1164 str = XSTR (op, 0);
1165
1166 if (str[0] == '%')
1167 {
1168 if (size != 64)
1169 return 0;
1170 }
1171 else if (str[0] == '@')
1172 {
1173 if (alpha_tls_size > size)
1174 return 0;
1175 }
1176 else
1177 return 0;
1178
1179 letter = (unspec == UNSPEC_DTPREL ? 'D' : 'T');
1180
1181 return str[1] == letter;
1182 }
1183
1184 /* Return true if OP is valid for 16-bit DTP relative relocations. */
1185
1186 int
1187 dtp16_symbolic_operand (op, mode)
1188 rtx op;
1189 enum machine_mode mode;
1190 {
1191 return tls_symbolic_operand_1 (op, mode, 16, UNSPEC_DTPREL);
1192 }
1193
1194 /* Return true if OP is valid for 32-bit DTP relative relocations. */
1195
1196 int
1197 dtp32_symbolic_operand (op, mode)
1198 rtx op;
1199 enum machine_mode mode;
1200 {
1201 return tls_symbolic_operand_1 (op, mode, 32, UNSPEC_DTPREL);
1202 }
1203
1204 /* Return true if OP is valid for 64-bit DTP relative relocations. */
1205
1206 int
1207 gotdtp_symbolic_operand (op, mode)
1208 rtx op;
1209 enum machine_mode mode;
1210 {
1211 return tls_symbolic_operand_1 (op, mode, 64, UNSPEC_DTPREL);
1212 }
1213
1214 /* Return true if OP is valid for 16-bit TP relative relocations. */
1215
1216 int
1217 tp16_symbolic_operand (op, mode)
1218 rtx op;
1219 enum machine_mode mode;
1220 {
1221 return tls_symbolic_operand_1 (op, mode, 16, UNSPEC_TPREL);
1222 }
1223
1224 /* Return true if OP is valid for 32-bit TP relative relocations. */
1225
1226 int
1227 tp32_symbolic_operand (op, mode)
1228 rtx op;
1229 enum machine_mode mode;
1230 {
1231 return tls_symbolic_operand_1 (op, mode, 32, UNSPEC_TPREL);
1232 }
1233
1234 /* Return true if OP is valid for 64-bit TP relative relocations. */
1235
1236 int
1237 gottp_symbolic_operand (op, mode)
1238 rtx op;
1239 enum machine_mode mode;
1240 {
1241 return tls_symbolic_operand_1 (op, mode, 64, UNSPEC_TPREL);
1242 }
1243
1244 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1245 comparisons are valid in which insn. */
1246
1247 int
1248 alpha_comparison_operator (op, mode)
1249 register rtx op;
1250 enum machine_mode mode;
1251 {
1252 enum rtx_code code = GET_CODE (op);
1253
1254 if (mode != GET_MODE (op) && mode != VOIDmode)
1255 return 0;
1256
1257 return (code == EQ || code == LE || code == LT
1258 || code == LEU || code == LTU);
1259 }
1260
1261 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1262 Here we know which comparisons are valid in which insn. */
1263
1264 int
1265 alpha_zero_comparison_operator (op, mode)
1266 register rtx op;
1267 enum machine_mode mode;
1268 {
1269 enum rtx_code code = GET_CODE (op);
1270
1271 if (mode != GET_MODE (op) && mode != VOIDmode)
1272 return 0;
1273
1274 return (code == EQ || code == NE || code == LE || code == LT
1275 || code == LEU || code == LTU);
1276 }
1277
1278 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1279
1280 int
1281 alpha_swapped_comparison_operator (op, mode)
1282 register rtx op;
1283 enum machine_mode mode;
1284 {
1285 enum rtx_code code = GET_CODE (op);
1286
1287 if ((mode != GET_MODE (op) && mode != VOIDmode)
1288 || GET_RTX_CLASS (code) != '<')
1289 return 0;
1290
1291 code = swap_condition (code);
1292 return (code == EQ || code == LE || code == LT
1293 || code == LEU || code == LTU);
1294 }
1295
1296 /* Return 1 if OP is a signed comparison operation. */
1297
1298 int
1299 signed_comparison_operator (op, mode)
1300 register rtx op;
1301 enum machine_mode mode ATTRIBUTE_UNUSED;
1302 {
1303 enum rtx_code code = GET_CODE (op);
1304
1305 if (mode != GET_MODE (op) && mode != VOIDmode)
1306 return 0;
1307
1308 return (code == EQ || code == NE
1309 || code == LE || code == LT
1310 || code == GE || code == GT);
1311 }
1312
1313 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1314 Here we know which comparisons are valid in which insn. */
1315
1316 int
1317 alpha_fp_comparison_operator (op, mode)
1318 register rtx op;
1319 enum machine_mode mode;
1320 {
1321 enum rtx_code code = GET_CODE (op);
1322
1323 if (mode != GET_MODE (op) && mode != VOIDmode)
1324 return 0;
1325
1326 return (code == EQ || code == LE || code == LT || code == UNORDERED);
1327 }
1328
1329 /* Return 1 if this is a divide or modulus operator. */
1330
1331 int
1332 divmod_operator (op, mode)
1333 register rtx op;
1334 enum machine_mode mode ATTRIBUTE_UNUSED;
1335 {
1336 switch (GET_CODE (op))
1337 {
1338 case DIV: case MOD: case UDIV: case UMOD:
1339 return 1;
1340
1341 default:
1342 break;
1343 }
1344
1345 return 0;
1346 }
1347
1348 /* Return 1 if this memory address is a known aligned register plus
1349 a constant. It must be a valid address. This means that we can do
1350 this as an aligned reference plus some offset.
1351
1352 Take into account what reload will do. */
1353
1354 int
1355 aligned_memory_operand (op, mode)
1356 register rtx op;
1357 enum machine_mode mode;
1358 {
1359 rtx base;
1360
1361 if (reload_in_progress)
1362 {
1363 rtx tmp = op;
1364 if (GET_CODE (tmp) == SUBREG)
1365 tmp = SUBREG_REG (tmp);
1366 if (GET_CODE (tmp) == REG
1367 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1368 {
1369 op = reg_equiv_memory_loc[REGNO (tmp)];
1370 if (op == 0)
1371 return 0;
1372 }
1373 }
1374
1375 if (GET_CODE (op) != MEM
1376 || GET_MODE (op) != mode)
1377 return 0;
1378 op = XEXP (op, 0);
1379
1380 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1381 sorts of constructs. Dig for the real base register. */
1382 if (reload_in_progress
1383 && GET_CODE (op) == PLUS
1384 && GET_CODE (XEXP (op, 0)) == PLUS)
1385 base = XEXP (XEXP (op, 0), 0);
1386 else
1387 {
1388 if (! memory_address_p (mode, op))
1389 return 0;
1390 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
1391 }
1392
1393 return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) >= 32);
1394 }
1395
1396 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1397
1398 int
1399 unaligned_memory_operand (op, mode)
1400 register rtx op;
1401 enum machine_mode mode;
1402 {
1403 rtx base;
1404
1405 if (reload_in_progress)
1406 {
1407 rtx tmp = op;
1408 if (GET_CODE (tmp) == SUBREG)
1409 tmp = SUBREG_REG (tmp);
1410 if (GET_CODE (tmp) == REG
1411 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1412 {
1413 op = reg_equiv_memory_loc[REGNO (tmp)];
1414 if (op == 0)
1415 return 0;
1416 }
1417 }
1418
1419 if (GET_CODE (op) != MEM
1420 || GET_MODE (op) != mode)
1421 return 0;
1422 op = XEXP (op, 0);
1423
1424 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1425 sorts of constructs. Dig for the real base register. */
1426 if (reload_in_progress
1427 && GET_CODE (op) == PLUS
1428 && GET_CODE (XEXP (op, 0)) == PLUS)
1429 base = XEXP (XEXP (op, 0), 0);
1430 else
1431 {
1432 if (! memory_address_p (mode, op))
1433 return 0;
1434 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
1435 }
1436
1437 return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) < 32);
1438 }
1439
1440 /* Return 1 if OP is either a register or an unaligned memory location. */
1441
1442 int
1443 reg_or_unaligned_mem_operand (op, mode)
1444 rtx op;
1445 enum machine_mode mode;
1446 {
1447 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
1448 }
1449
1450 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1451
1452 int
1453 any_memory_operand (op, mode)
1454 register rtx op;
1455 enum machine_mode mode ATTRIBUTE_UNUSED;
1456 {
1457 return (GET_CODE (op) == MEM
1458 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
1459 || (reload_in_progress && GET_CODE (op) == REG
1460 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1461 || (reload_in_progress && GET_CODE (op) == SUBREG
1462 && GET_CODE (SUBREG_REG (op)) == REG
1463 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
1464 }
1465
1466 /* Returns 1 if OP is not an eliminable register.
1467
1468 This exists to cure a pathological abort in the s8addq (et al) patterns,
1469
1470 long foo () { long t; bar(); return (long) &t * 26107; }
1471
1472 which run afoul of a hack in reload to cure a (presumably) similar
1473 problem with lea-type instructions on other targets. But there is
1474 one of us and many of them, so work around the problem by selectively
1475 preventing combine from making the optimization. */
1476
1477 int
1478 reg_not_elim_operand (op, mode)
1479 register rtx op;
1480 enum machine_mode mode;
1481 {
1482 rtx inner = op;
1483 if (GET_CODE (op) == SUBREG)
1484 inner = SUBREG_REG (op);
1485 if (inner == frame_pointer_rtx || inner == arg_pointer_rtx)
1486 return 0;
1487
1488 return register_operand (op, mode);
1489 }
1490
1491 /* Return 1 is OP is a memory location that is not a reference (using
1492 an AND) to an unaligned location. Take into account what reload
1493 will do. */
1494
1495 int
1496 normal_memory_operand (op, mode)
1497 register rtx op;
1498 enum machine_mode mode ATTRIBUTE_UNUSED;
1499 {
1500 if (reload_in_progress)
1501 {
1502 rtx tmp = op;
1503 if (GET_CODE (tmp) == SUBREG)
1504 tmp = SUBREG_REG (tmp);
1505 if (GET_CODE (tmp) == REG
1506 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1507 {
1508 op = reg_equiv_memory_loc[REGNO (tmp)];
1509
1510 /* This may not have been assigned an equivalent address if it will
1511 be eliminated. In that case, it doesn't matter what we do. */
1512 if (op == 0)
1513 return 1;
1514 }
1515 }
1516
1517 return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) != AND;
1518 }
1519
1520 /* Accept a register, but not a subreg of any kind. This allows us to
1521 avoid pathological cases in reload wrt data movement common in
1522 int->fp conversion. */
1523
1524 int
1525 reg_no_subreg_operand (op, mode)
1526 register rtx op;
1527 enum machine_mode mode;
1528 {
1529 if (GET_CODE (op) != REG)
1530 return 0;
1531 return register_operand (op, mode);
1532 }
1533
1534 /* Recognize an addition operation that includes a constant. Used to
1535 convince reload to canonize (plus (plus reg c1) c2) during register
1536 elimination. */
1537
1538 int
1539 addition_operation (op, mode)
1540 register rtx op;
1541 enum machine_mode mode;
1542 {
1543 if (GET_MODE (op) != mode && mode != VOIDmode)
1544 return 0;
1545 if (GET_CODE (op) == PLUS
1546 && register_operand (XEXP (op, 0), mode)
1547 && GET_CODE (XEXP (op, 1)) == CONST_INT
1548 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op, 1)), 'K'))
1549 return 1;
1550 return 0;
1551 }
1552
1553 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
1554 the range defined for C in [I-P]. */
1555
1556 bool
1557 alpha_const_ok_for_letter_p (value, c)
1558 HOST_WIDE_INT value;
1559 int c;
1560 {
1561 switch (c)
1562 {
1563 case 'I':
1564 /* An unsigned 8 bit constant. */
1565 return (unsigned HOST_WIDE_INT) value < 0x100;
1566 case 'J':
1567 /* The constant zero. */
1568 return value == 0;
1569 case 'K':
1570 /* A signed 16 bit constant. */
1571 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
1572 case 'L':
1573 /* A shifted signed 16 bit constant appropriate for LDAH. */
1574 return ((value & 0xffff) == 0
1575 && ((value) >> 31 == -1 || value >> 31 == 0));
1576 case 'M':
1577 /* A constant that can be AND'ed with using a ZAP insn. */
1578 return zap_mask (value);
1579 case 'N':
1580 /* A complemented unsigned 8 bit constant. */
1581 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
1582 case 'O':
1583 /* A negated unsigned 8 bit constant. */
1584 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
1585 case 'P':
1586 /* The constant 1, 2 or 3. */
1587 return value == 1 || value == 2 || value == 3;
1588
1589 default:
1590 return false;
1591 }
1592 }
1593
1594 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1595 matches for C in [GH]. */
1596
1597 bool
1598 alpha_const_double_ok_for_letter_p (value, c)
1599 rtx value;
1600 int c;
1601 {
1602 switch (c)
1603 {
1604 case 'G':
1605 /* The floating point zero constant. */
1606 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
1607 && value == CONST0_RTX (GET_MODE (value)));
1608
1609 case 'H':
1610 /* A valid operand of a ZAP insn. */
1611 return (GET_MODE (value) == VOIDmode
1612 && zap_mask (CONST_DOUBLE_LOW (value))
1613 && zap_mask (CONST_DOUBLE_HIGH (value)));
1614
1615 default:
1616 return false;
1617 }
1618 }
1619
1620 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1621 matches for C. */
1622
1623 bool
1624 alpha_extra_constraint (value, c)
1625 rtx value;
1626 int c;
1627 {
1628 switch (c)
1629 {
1630 case 'Q':
1631 return normal_memory_operand (value, VOIDmode);
1632 case 'R':
1633 return direct_call_operand (value, Pmode);
1634 case 'S':
1635 return (GET_CODE (value) == CONST_INT
1636 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
1637 case 'T':
1638 return GET_CODE (value) == HIGH;
1639 case 'U':
1640 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
1641 case 'W':
1642 return (GET_CODE (value) == CONST_VECTOR
1643 && value == CONST0_RTX (GET_MODE (value)));
1644 default:
1645 return false;
1646 }
1647 }
1648
1649 /* Return 1 if this function can directly return via $26. */
1650
1651 int
1652 direct_return ()
1653 {
1654 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
1655 && reload_completed
1656 && alpha_sa_size () == 0
1657 && get_frame_size () == 0
1658 && current_function_outgoing_args_size == 0
1659 && current_function_pretend_args_size == 0);
1660 }
1661
1662 /* Return the ADDR_VEC associated with a tablejump insn. */
1663
1664 rtx
1665 alpha_tablejump_addr_vec (insn)
1666 rtx insn;
1667 {
1668 rtx tmp;
1669
1670 tmp = JUMP_LABEL (insn);
1671 if (!tmp)
1672 return NULL_RTX;
1673 tmp = NEXT_INSN (tmp);
1674 if (!tmp)
1675 return NULL_RTX;
1676 if (GET_CODE (tmp) == JUMP_INSN
1677 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
1678 return PATTERN (tmp);
1679 return NULL_RTX;
1680 }
1681
1682 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1683
1684 rtx
1685 alpha_tablejump_best_label (insn)
1686 rtx insn;
1687 {
1688 rtx jump_table = alpha_tablejump_addr_vec (insn);
1689 rtx best_label = NULL_RTX;
1690
1691 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1692 there for edge frequency counts from profile data. */
1693
1694 if (jump_table)
1695 {
1696 int n_labels = XVECLEN (jump_table, 1);
1697 int best_count = -1;
1698 int i, j;
1699
1700 for (i = 0; i < n_labels; i++)
1701 {
1702 int count = 1;
1703
1704 for (j = i + 1; j < n_labels; j++)
1705 if (XEXP (XVECEXP (jump_table, 1, i), 0)
1706 == XEXP (XVECEXP (jump_table, 1, j), 0))
1707 count++;
1708
1709 if (count > best_count)
1710 best_count = count, best_label = XVECEXP (jump_table, 1, i);
1711 }
1712 }
1713
1714 return best_label ? best_label : const0_rtx;
1715 }
1716
1717 /* Return the TLS model to use for SYMBOL. */
1718
1719 static enum tls_model
1720 tls_symbolic_operand_type (symbol)
1721 rtx symbol;
1722 {
1723 const char *str;
1724
1725 if (GET_CODE (symbol) != SYMBOL_REF)
1726 return 0;
1727 str = XSTR (symbol, 0);
1728
1729 if (str[0] == '%')
1730 {
1731 /* ??? Be prepared for -ftls-model=local-dynamic. Perhaps we shouldn't
1732 have separately encoded local-ness. On well, maybe the user will use
1733 attribute visibility next time. At least we don't crash... */
1734 if (str[1] == 'G' || str[1] == 'D')
1735 return TLS_MODEL_GLOBAL_DYNAMIC;
1736 if (str[1] == 'T')
1737 return TLS_MODEL_INITIAL_EXEC;
1738 }
1739 else if (str[0] == '@')
1740 {
1741 if (str[1] == 'D')
1742 {
1743 /* Local dynamic is a waste if we're not going to combine
1744 the __tls_get_addr calls. So avoid it if not optimizing. */
1745 if (optimize)
1746 return TLS_MODEL_LOCAL_DYNAMIC;
1747 else
1748 return TLS_MODEL_GLOBAL_DYNAMIC;
1749 }
1750 if (str[1] == 'T')
1751 {
1752 /* 64-bit local exec is the same as initial exec except without
1753 the dynamic relocation. In either case we use a got entry. */
1754 if (alpha_tls_size == 64)
1755 return TLS_MODEL_INITIAL_EXEC;
1756 else
1757 return TLS_MODEL_LOCAL_EXEC;
1758 }
1759 }
1760
1761 return 0;
1762 }
1763
1764 \f
1765 /* Return true if the function DECL will be placed in the default text
1766 section. */
1767 /* ??? Ideally we'd be able to always move from a SYMBOL_REF back to the
1768 decl, as that would allow us to determine if two functions are in the
1769 same section, which is what we really want to know. */
1770
1771 static bool
1772 decl_in_text_section (decl)
1773 tree decl;
1774 {
1775 return (DECL_SECTION_NAME (decl) == NULL_TREE
1776 && ! (flag_function_sections
1777 || (targetm.have_named_sections
1778 && DECL_ONE_ONLY (decl))));
1779 }
1780
1781 /* Return true if EXP should be placed in the small data section. */
1782
1783 static bool
1784 alpha_in_small_data_p (exp)
1785 tree exp;
1786 {
1787 /* We want to merge strings, so we never consider them small data. */
1788 if (TREE_CODE (exp) == STRING_CST)
1789 return false;
1790
1791 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
1792 {
1793 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
1794 if (strcmp (section, ".sdata") == 0
1795 || strcmp (section, ".sbss") == 0)
1796 return true;
1797 }
1798 else
1799 {
1800 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
1801
1802 /* If this is an incomplete type with size 0, then we can't put it
1803 in sdata because it might be too big when completed. */
1804 if (size > 0 && size <= g_switch_value)
1805 return true;
1806 }
1807
1808 return false;
1809 }
1810
1811 /* If we are referencing a function that is static, make the SYMBOL_REF
1812 special. We use this to see indicate we can branch to this function
1813 without setting PV or restoring GP.
1814
1815 If this is a variable that is known to be defined locally, add "@v"
1816 to the name. If in addition the variable is to go in .sdata/.sbss,
1817 then add "@s" instead. */
1818
1819 static void
1820 alpha_encode_section_info (decl, first)
1821 tree decl;
1822 int first ATTRIBUTE_UNUSED;
1823 {
1824 const char *symbol_str;
1825 bool is_local;
1826 char encoding = 0;
1827 rtx rtl, symbol;
1828
1829 rtl = DECL_P (decl) ? DECL_RTL (decl) : TREE_CST_RTL (decl);
1830
1831 /* Careful not to prod global register variables. */
1832 if (GET_CODE (rtl) != MEM)
1833 return;
1834 symbol = XEXP (rtl, 0);
1835 if (GET_CODE (symbol) != SYMBOL_REF)
1836 return;
1837
1838 if (TREE_CODE (decl) == FUNCTION_DECL)
1839 {
1840 /* We mark public functions once they are emitted; otherwise we
1841 don't know that they exist in this unit of translation. */
1842 if (TREE_PUBLIC (decl))
1843 return;
1844
1845 /* Do not mark functions that are not in .text; otherwise we
1846 don't know that they are near enough for a direct branch. */
1847 if (! decl_in_text_section (decl))
1848 return;
1849
1850 SYMBOL_REF_FLAG (symbol) = 1;
1851 return;
1852 }
1853
1854 /* Early out if we're not going to do anything with this data. */
1855 if (! TARGET_EXPLICIT_RELOCS)
1856 return;
1857
1858 symbol_str = XSTR (symbol, 0);
1859
1860 /* A variable is considered "local" if it is defined in this module. */
1861 is_local = (*targetm.binds_local_p) (decl);
1862
1863 /* Care for TLS variables. */
1864 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl))
1865 {
1866 enum tls_model kind;
1867 if (!flag_pic)
1868 {
1869 if (is_local)
1870 kind = TLS_MODEL_LOCAL_EXEC;
1871 else
1872 kind = TLS_MODEL_INITIAL_EXEC;
1873 }
1874 else if (is_local)
1875 kind = TLS_MODEL_LOCAL_DYNAMIC;
1876 else
1877 kind = TLS_MODEL_GLOBAL_DYNAMIC;
1878 if (kind < flag_tls_default)
1879 kind = flag_tls_default;
1880
1881 switch (kind)
1882 {
1883 case TLS_MODEL_GLOBAL_DYNAMIC:
1884 encoding = 'G';
1885 break;
1886 case TLS_MODEL_LOCAL_DYNAMIC:
1887 encoding = 'D';
1888 break;
1889 case TLS_MODEL_INITIAL_EXEC:
1890 case TLS_MODEL_LOCAL_EXEC:
1891 encoding = 'T';
1892 break;
1893 }
1894 }
1895 else if (is_local)
1896 {
1897 /* Determine if DECL will wind up in .sdata/.sbss. */
1898 if (alpha_in_small_data_p (decl))
1899 encoding = 'S';
1900 else
1901 encoding = 'L';
1902 }
1903
1904 /* Finally, encode this into the symbol string. */
1905 if (encoding)
1906 {
1907 char *newstr;
1908 size_t len;
1909
1910 if (symbol_str[0] == (is_local ? '@' : '%'))
1911 {
1912 if (symbol_str[1] == encoding)
1913 return;
1914 symbol_str += 2;
1915 }
1916
1917 len = strlen (symbol_str) + 1;
1918 newstr = alloca (len + 2);
1919
1920 newstr[0] = (is_local ? '@' : '%');
1921 newstr[1] = encoding;
1922 memcpy (newstr + 2, symbol_str, len);
1923
1924 XSTR (symbol, 0) = ggc_alloc_string (newstr, len + 2 - 1);
1925 }
1926 }
1927
1928 /* Undo the effects of the above. */
1929
1930 static const char *
1931 alpha_strip_name_encoding (str)
1932 const char *str;
1933 {
1934 if (str[0] == '@' || str[0] == '%')
1935 str += 2;
1936 if (str[0] == '*')
1937 str++;
1938 return str;
1939 }
1940
1941 /* legitimate_address_p recognizes an RTL expression that is a valid
1942 memory address for an instruction. The MODE argument is the
1943 machine mode for the MEM expression that wants to use this address.
1944
1945 For Alpha, we have either a constant address or the sum of a
1946 register and a constant address, or just a register. For DImode,
1947 any of those forms can be surrounded with an AND that clear the
1948 low-order three bits; this is an "unaligned" access. */
1949
1950 bool
1951 alpha_legitimate_address_p (mode, x, strict)
1952 enum machine_mode mode;
1953 rtx x;
1954 int strict;
1955 {
1956 /* If this is an ldq_u type address, discard the outer AND. */
1957 if (mode == DImode
1958 && GET_CODE (x) == AND
1959 && GET_CODE (XEXP (x, 1)) == CONST_INT
1960 && INTVAL (XEXP (x, 1)) == -8)
1961 x = XEXP (x, 0);
1962
1963 /* Discard non-paradoxical subregs. */
1964 if (GET_CODE (x) == SUBREG
1965 && (GET_MODE_SIZE (GET_MODE (x))
1966 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1967 x = SUBREG_REG (x);
1968
1969 /* Unadorned general registers are valid. */
1970 if (REG_P (x)
1971 && (strict
1972 ? STRICT_REG_OK_FOR_BASE_P (x)
1973 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
1974 return true;
1975
1976 /* Constant addresses (i.e. +/- 32k) are valid. */
1977 if (CONSTANT_ADDRESS_P (x))
1978 return true;
1979
1980 /* Register plus a small constant offset is valid. */
1981 if (GET_CODE (x) == PLUS)
1982 {
1983 rtx ofs = XEXP (x, 1);
1984 x = XEXP (x, 0);
1985
1986 /* Discard non-paradoxical subregs. */
1987 if (GET_CODE (x) == SUBREG
1988 && (GET_MODE_SIZE (GET_MODE (x))
1989 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1990 x = SUBREG_REG (x);
1991
1992 if (REG_P (x))
1993 {
1994 if (! strict
1995 && NONSTRICT_REG_OK_FP_BASE_P (x)
1996 && GET_CODE (ofs) == CONST_INT)
1997 return true;
1998 if ((strict
1999 ? STRICT_REG_OK_FOR_BASE_P (x)
2000 : NONSTRICT_REG_OK_FOR_BASE_P (x))
2001 && CONSTANT_ADDRESS_P (ofs))
2002 return true;
2003 }
2004 else if (GET_CODE (x) == ADDRESSOF
2005 && GET_CODE (ofs) == CONST_INT)
2006 return true;
2007 }
2008
2009 /* If we're managing explicit relocations, LO_SUM is valid, as
2010 are small data symbols. */
2011 else if (TARGET_EXPLICIT_RELOCS)
2012 {
2013 if (small_symbolic_operand (x, Pmode))
2014 return true;
2015
2016 if (GET_CODE (x) == LO_SUM)
2017 {
2018 rtx ofs = XEXP (x, 1);
2019 x = XEXP (x, 0);
2020
2021 /* Discard non-paradoxical subregs. */
2022 if (GET_CODE (x) == SUBREG
2023 && (GET_MODE_SIZE (GET_MODE (x))
2024 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2025 x = SUBREG_REG (x);
2026
2027 /* Must have a valid base register. */
2028 if (! (REG_P (x)
2029 && (strict
2030 ? STRICT_REG_OK_FOR_BASE_P (x)
2031 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
2032 return false;
2033
2034 /* The symbol must be local. */
2035 if (local_symbolic_operand (ofs, Pmode)
2036 || dtp32_symbolic_operand (ofs, Pmode)
2037 || tp32_symbolic_operand (ofs, Pmode))
2038 return true;
2039 }
2040 }
2041
2042 return false;
2043 }
2044
2045 /* Try machine-dependent ways of modifying an illegitimate address
2046 to be legitimate. If we find one, return the new, valid address. */
2047
2048 rtx
2049 alpha_legitimize_address (x, scratch, mode)
2050 rtx x;
2051 rtx scratch;
2052 enum machine_mode mode ATTRIBUTE_UNUSED;
2053 {
2054 HOST_WIDE_INT addend;
2055
2056 /* If the address is (plus reg const_int) and the CONST_INT is not a
2057 valid offset, compute the high part of the constant and add it to
2058 the register. Then our address is (plus temp low-part-const). */
2059 if (GET_CODE (x) == PLUS
2060 && GET_CODE (XEXP (x, 0)) == REG
2061 && GET_CODE (XEXP (x, 1)) == CONST_INT
2062 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
2063 {
2064 addend = INTVAL (XEXP (x, 1));
2065 x = XEXP (x, 0);
2066 goto split_addend;
2067 }
2068
2069 /* If the address is (const (plus FOO const_int)), find the low-order
2070 part of the CONST_INT. Then load FOO plus any high-order part of the
2071 CONST_INT into a register. Our address is (plus reg low-part-const).
2072 This is done to reduce the number of GOT entries. */
2073 if (!no_new_pseudos
2074 && GET_CODE (x) == CONST
2075 && GET_CODE (XEXP (x, 0)) == PLUS
2076 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2077 {
2078 addend = INTVAL (XEXP (XEXP (x, 0), 1));
2079 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
2080 goto split_addend;
2081 }
2082
2083 /* If we have a (plus reg const), emit the load as in (2), then add
2084 the two registers, and finally generate (plus reg low-part-const) as
2085 our address. */
2086 if (!no_new_pseudos
2087 && GET_CODE (x) == PLUS
2088 && GET_CODE (XEXP (x, 0)) == REG
2089 && GET_CODE (XEXP (x, 1)) == CONST
2090 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
2091 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
2092 {
2093 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
2094 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
2095 XEXP (XEXP (XEXP (x, 1), 0), 0),
2096 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2097 goto split_addend;
2098 }
2099
2100 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
2101 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
2102 {
2103 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
2104
2105 switch (tls_symbolic_operand_type (x))
2106 {
2107 case TLS_MODEL_GLOBAL_DYNAMIC:
2108 start_sequence ();
2109
2110 r0 = gen_rtx_REG (Pmode, 0);
2111 r16 = gen_rtx_REG (Pmode, 16);
2112 tga = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
2113 dest = gen_reg_rtx (Pmode);
2114 seq = GEN_INT (alpha_next_sequence_number++);
2115
2116 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
2117 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
2118 insn = emit_call_insn (insn);
2119 CONST_OR_PURE_CALL_P (insn) = 1;
2120 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
2121
2122 insn = get_insns ();
2123 end_sequence ();
2124
2125 emit_libcall_block (insn, dest, r0, x);
2126 return dest;
2127
2128 case TLS_MODEL_LOCAL_DYNAMIC:
2129 start_sequence ();
2130
2131 r0 = gen_rtx_REG (Pmode, 0);
2132 r16 = gen_rtx_REG (Pmode, 16);
2133 tga = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
2134 scratch = gen_reg_rtx (Pmode);
2135 seq = GEN_INT (alpha_next_sequence_number++);
2136
2137 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
2138 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
2139 insn = emit_call_insn (insn);
2140 CONST_OR_PURE_CALL_P (insn) = 1;
2141 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
2142
2143 insn = get_insns ();
2144 end_sequence ();
2145
2146 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2147 UNSPEC_TLSLDM_CALL);
2148 emit_libcall_block (insn, scratch, r0, eqv);
2149
2150 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
2151 eqv = gen_rtx_CONST (Pmode, eqv);
2152
2153 if (alpha_tls_size == 64)
2154 {
2155 dest = gen_reg_rtx (Pmode);
2156 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
2157 emit_insn (gen_adddi3 (dest, dest, scratch));
2158 return dest;
2159 }
2160 if (alpha_tls_size == 32)
2161 {
2162 insn = gen_rtx_HIGH (Pmode, eqv);
2163 insn = gen_rtx_PLUS (Pmode, scratch, insn);
2164 scratch = gen_reg_rtx (Pmode);
2165 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
2166 }
2167 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
2168
2169 case TLS_MODEL_INITIAL_EXEC:
2170 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
2171 eqv = gen_rtx_CONST (Pmode, eqv);
2172 tp = gen_reg_rtx (Pmode);
2173 scratch = gen_reg_rtx (Pmode);
2174 dest = gen_reg_rtx (Pmode);
2175
2176 emit_insn (gen_load_tp (tp));
2177 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
2178 emit_insn (gen_adddi3 (dest, tp, scratch));
2179 return dest;
2180
2181 case TLS_MODEL_LOCAL_EXEC:
2182 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
2183 eqv = gen_rtx_CONST (Pmode, eqv);
2184 tp = gen_reg_rtx (Pmode);
2185
2186 emit_insn (gen_load_tp (tp));
2187 if (alpha_tls_size == 32)
2188 {
2189 insn = gen_rtx_HIGH (Pmode, eqv);
2190 insn = gen_rtx_PLUS (Pmode, tp, insn);
2191 tp = gen_reg_rtx (Pmode);
2192 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
2193 }
2194 return gen_rtx_LO_SUM (Pmode, tp, eqv);
2195 }
2196
2197 if (local_symbolic_operand (x, Pmode))
2198 {
2199 if (small_symbolic_operand (x, Pmode))
2200 return x;
2201 else
2202 {
2203 if (!no_new_pseudos)
2204 scratch = gen_reg_rtx (Pmode);
2205 emit_insn (gen_rtx_SET (VOIDmode, scratch,
2206 gen_rtx_HIGH (Pmode, x)));
2207 return gen_rtx_LO_SUM (Pmode, scratch, x);
2208 }
2209 }
2210 }
2211
2212 return NULL;
2213
2214 split_addend:
2215 {
2216 HOST_WIDE_INT low, high;
2217
2218 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
2219 addend -= low;
2220 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
2221 addend -= high;
2222
2223 if (addend)
2224 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
2225 (no_new_pseudos ? scratch : NULL_RTX),
2226 1, OPTAB_LIB_WIDEN);
2227 if (high)
2228 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
2229 (no_new_pseudos ? scratch : NULL_RTX),
2230 1, OPTAB_LIB_WIDEN);
2231
2232 return plus_constant (x, low);
2233 }
2234 }
2235
2236 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
2237 small symbolic operand until after reload. At which point we need
2238 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
2239 so that sched2 has the proper dependency information. */
2240
2241 int
2242 some_small_symbolic_operand (x, mode)
2243 rtx x;
2244 enum machine_mode mode ATTRIBUTE_UNUSED;
2245 {
2246 return for_each_rtx (&x, some_small_symbolic_operand_1, NULL);
2247 }
2248
2249 static int
2250 some_small_symbolic_operand_1 (px, data)
2251 rtx *px;
2252 void *data ATTRIBUTE_UNUSED;
2253 {
2254 rtx x = *px;
2255
2256 /* Don't re-split. */
2257 if (GET_CODE (x) == LO_SUM)
2258 return -1;
2259
2260 return small_symbolic_operand (x, Pmode) != 0;
2261 }
2262
2263 rtx
2264 split_small_symbolic_operand (x)
2265 rtx x;
2266 {
2267 x = copy_insn (x);
2268 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
2269 return x;
2270 }
2271
2272 static int
2273 split_small_symbolic_operand_1 (px, data)
2274 rtx *px;
2275 void *data ATTRIBUTE_UNUSED;
2276 {
2277 rtx x = *px;
2278
2279 /* Don't re-split. */
2280 if (GET_CODE (x) == LO_SUM)
2281 return -1;
2282
2283 if (small_symbolic_operand (x, Pmode))
2284 {
2285 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
2286 *px = x;
2287 return -1;
2288 }
2289
2290 return 0;
2291 }
2292
2293 /* Try a machine-dependent way of reloading an illegitimate address
2294 operand. If we find one, push the reload and return the new rtx. */
2295
2296 rtx
2297 alpha_legitimize_reload_address (x, mode, opnum, type, ind_levels)
2298 rtx x;
2299 enum machine_mode mode ATTRIBUTE_UNUSED;
2300 int opnum;
2301 int type;
2302 int ind_levels ATTRIBUTE_UNUSED;
2303 {
2304 /* We must recognize output that we have already generated ourselves. */
2305 if (GET_CODE (x) == PLUS
2306 && GET_CODE (XEXP (x, 0)) == PLUS
2307 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2308 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2309 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2310 {
2311 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2312 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
2313 opnum, type);
2314 return x;
2315 }
2316
2317 /* We wish to handle large displacements off a base register by
2318 splitting the addend across an ldah and the mem insn. This
2319 cuts number of extra insns needed from 3 to 1. */
2320 if (GET_CODE (x) == PLUS
2321 && GET_CODE (XEXP (x, 0)) == REG
2322 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
2323 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
2324 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2325 {
2326 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2327 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
2328 HOST_WIDE_INT high
2329 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
2330
2331 /* Check for 32-bit overflow. */
2332 if (high + low != val)
2333 return NULL_RTX;
2334
2335 /* Reload the high part into a base reg; leave the low part
2336 in the mem directly. */
2337 x = gen_rtx_PLUS (GET_MODE (x),
2338 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
2339 GEN_INT (high)),
2340 GEN_INT (low));
2341
2342 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2343 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
2344 opnum, type);
2345 return x;
2346 }
2347
2348 return NULL_RTX;
2349 }
2350 \f
2351 /* REF is an alignable memory location. Place an aligned SImode
2352 reference into *PALIGNED_MEM and the number of bits to shift into
2353 *PBITNUM. SCRATCH is a free register for use in reloading out
2354 of range stack slots. */
2355
2356 void
2357 get_aligned_mem (ref, paligned_mem, pbitnum)
2358 rtx ref;
2359 rtx *paligned_mem, *pbitnum;
2360 {
2361 rtx base;
2362 HOST_WIDE_INT offset = 0;
2363
2364 if (GET_CODE (ref) != MEM)
2365 abort ();
2366
2367 if (reload_in_progress
2368 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
2369 {
2370 base = find_replacement (&XEXP (ref, 0));
2371
2372 if (! memory_address_p (GET_MODE (ref), base))
2373 abort ();
2374 }
2375 else
2376 {
2377 base = XEXP (ref, 0);
2378 }
2379
2380 if (GET_CODE (base) == PLUS)
2381 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
2382
2383 *paligned_mem
2384 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
2385
2386 if (WORDS_BIG_ENDIAN)
2387 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
2388 + (offset & 3) * 8));
2389 else
2390 *pbitnum = GEN_INT ((offset & 3) * 8);
2391 }
2392
2393 /* Similar, but just get the address. Handle the two reload cases.
2394 Add EXTRA_OFFSET to the address we return. */
2395
2396 rtx
2397 get_unaligned_address (ref, extra_offset)
2398 rtx ref;
2399 int extra_offset;
2400 {
2401 rtx base;
2402 HOST_WIDE_INT offset = 0;
2403
2404 if (GET_CODE (ref) != MEM)
2405 abort ();
2406
2407 if (reload_in_progress
2408 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
2409 {
2410 base = find_replacement (&XEXP (ref, 0));
2411
2412 if (! memory_address_p (GET_MODE (ref), base))
2413 abort ();
2414 }
2415 else
2416 {
2417 base = XEXP (ref, 0);
2418 }
2419
2420 if (GET_CODE (base) == PLUS)
2421 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
2422
2423 return plus_constant (base, offset + extra_offset);
2424 }
2425
2426 /* On the Alpha, all (non-symbolic) constants except zero go into
2427 a floating-point register via memory. Note that we cannot
2428 return anything that is not a subset of CLASS, and that some
2429 symbolic constants cannot be dropped to memory. */
2430
2431 enum reg_class
2432 alpha_preferred_reload_class(x, class)
2433 rtx x;
2434 enum reg_class class;
2435 {
2436 /* Zero is present in any register class. */
2437 if (x == CONST0_RTX (GET_MODE (x)))
2438 return class;
2439
2440 /* These sorts of constants we can easily drop to memory. */
2441 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
2442 {
2443 if (class == FLOAT_REGS)
2444 return NO_REGS;
2445 if (class == ALL_REGS)
2446 return GENERAL_REGS;
2447 return class;
2448 }
2449
2450 /* All other kinds of constants should not (and in the case of HIGH
2451 cannot) be dropped to memory -- instead we use a GENERAL_REGS
2452 secondary reload. */
2453 if (CONSTANT_P (x))
2454 return (class == ALL_REGS ? GENERAL_REGS : class);
2455
2456 return class;
2457 }
2458
2459 /* Loading and storing HImode or QImode values to and from memory
2460 usually requires a scratch register. The exceptions are loading
2461 QImode and HImode from an aligned address to a general register
2462 unless byte instructions are permitted.
2463
2464 We also cannot load an unaligned address or a paradoxical SUBREG
2465 into an FP register.
2466
2467 We also cannot do integral arithmetic into FP regs, as might result
2468 from register elimination into a DImode fp register. */
2469
2470 enum reg_class
2471 secondary_reload_class (class, mode, x, in)
2472 enum reg_class class;
2473 enum machine_mode mode;
2474 rtx x;
2475 int in;
2476 {
2477 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
2478 {
2479 if (GET_CODE (x) == MEM
2480 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
2481 || (GET_CODE (x) == SUBREG
2482 && (GET_CODE (SUBREG_REG (x)) == MEM
2483 || (GET_CODE (SUBREG_REG (x)) == REG
2484 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
2485 {
2486 if (!in || !aligned_memory_operand(x, mode))
2487 return GENERAL_REGS;
2488 }
2489 }
2490
2491 if (class == FLOAT_REGS)
2492 {
2493 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2494 return GENERAL_REGS;
2495
2496 if (GET_CODE (x) == SUBREG
2497 && (GET_MODE_SIZE (GET_MODE (x))
2498 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2499 return GENERAL_REGS;
2500
2501 if (in && INTEGRAL_MODE_P (mode)
2502 && ! (memory_operand (x, mode) || x == const0_rtx))
2503 return GENERAL_REGS;
2504 }
2505
2506 return NO_REGS;
2507 }
2508 \f
2509 /* Subfunction of the following function. Update the flags of any MEM
2510 found in part of X. */
2511
2512 static void
2513 alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
2514 rtx x;
2515 int in_struct_p, volatile_p, unchanging_p;
2516 {
2517 int i;
2518
2519 switch (GET_CODE (x))
2520 {
2521 case SEQUENCE:
2522 abort ();
2523
2524 case PARALLEL:
2525 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2526 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
2527 unchanging_p);
2528 break;
2529
2530 case INSN:
2531 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
2532 unchanging_p);
2533 break;
2534
2535 case SET:
2536 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
2537 unchanging_p);
2538 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
2539 unchanging_p);
2540 break;
2541
2542 case MEM:
2543 MEM_IN_STRUCT_P (x) = in_struct_p;
2544 MEM_VOLATILE_P (x) = volatile_p;
2545 RTX_UNCHANGING_P (x) = unchanging_p;
2546 /* Sadly, we cannot use alias sets because the extra aliasing
2547 produced by the AND interferes. Given that two-byte quantities
2548 are the only thing we would be able to differentiate anyway,
2549 there does not seem to be any point in convoluting the early
2550 out of the alias check. */
2551 break;
2552
2553 default:
2554 break;
2555 }
2556 }
2557
2558 /* Given INSN, which is an INSN list or the PATTERN of a single insn
2559 generated to perform a memory operation, look for any MEMs in either
2560 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
2561 volatile flags from REF into each of the MEMs found. If REF is not
2562 a MEM, don't do anything. */
2563
2564 void
2565 alpha_set_memflags (insn, ref)
2566 rtx insn;
2567 rtx ref;
2568 {
2569 int in_struct_p, volatile_p, unchanging_p;
2570
2571 if (GET_CODE (ref) != MEM)
2572 return;
2573
2574 in_struct_p = MEM_IN_STRUCT_P (ref);
2575 volatile_p = MEM_VOLATILE_P (ref);
2576 unchanging_p = RTX_UNCHANGING_P (ref);
2577
2578 /* This is only called from alpha.md, after having had something
2579 generated from one of the insn patterns. So if everything is
2580 zero, the pattern is already up-to-date. */
2581 if (! in_struct_p && ! volatile_p && ! unchanging_p)
2582 return;
2583
2584 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
2585 }
2586 \f
2587 /* Try to output insns to set TARGET equal to the constant C if it can be
2588 done in less than N insns. Do all computations in MODE. Returns the place
2589 where the output has been placed if it can be done and the insns have been
2590 emitted. If it would take more than N insns, zero is returned and no
2591 insns and emitted. */
2592
2593 rtx
2594 alpha_emit_set_const (target, mode, c, n)
2595 rtx target;
2596 enum machine_mode mode;
2597 HOST_WIDE_INT c;
2598 int n;
2599 {
2600 rtx result = 0;
2601 rtx orig_target = target;
2602 int i;
2603
2604 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2605 can't load this constant in one insn, do this in DImode. */
2606 if (no_new_pseudos && mode == SImode
2607 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER
2608 && (result = alpha_emit_set_const_1 (target, mode, c, 1)) == 0)
2609 {
2610 target = gen_lowpart (DImode, target);
2611 mode = DImode;
2612 }
2613
2614 /* Try 1 insn, then 2, then up to N. */
2615 for (i = 1; i <= n; i++)
2616 {
2617 result = alpha_emit_set_const_1 (target, mode, c, i);
2618 if (result)
2619 {
2620 rtx insn = get_last_insn ();
2621 rtx set = single_set (insn);
2622 if (! CONSTANT_P (SET_SRC (set)))
2623 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2624 break;
2625 }
2626 }
2627
2628 /* Allow for the case where we changed the mode of TARGET. */
2629 if (result == target)
2630 result = orig_target;
2631
2632 return result;
2633 }
2634
2635 /* Internal routine for the above to check for N or below insns. */
2636
2637 static rtx
2638 alpha_emit_set_const_1 (target, mode, c, n)
2639 rtx target;
2640 enum machine_mode mode;
2641 HOST_WIDE_INT c;
2642 int n;
2643 {
2644 HOST_WIDE_INT new;
2645 int i, bits;
2646 /* Use a pseudo if highly optimizing and still generating RTL. */
2647 rtx subtarget
2648 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
2649 rtx temp, insn;
2650
2651 /* If this is a sign-extended 32-bit constant, we can do this in at most
2652 three insns, so do it if we have enough insns left. We always have
2653 a sign-extended 32-bit constant when compiling on a narrow machine. */
2654
2655 if (HOST_BITS_PER_WIDE_INT != 64
2656 || c >> 31 == -1 || c >> 31 == 0)
2657 {
2658 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
2659 HOST_WIDE_INT tmp1 = c - low;
2660 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
2661 HOST_WIDE_INT extra = 0;
2662
2663 /* If HIGH will be interpreted as negative but the constant is
2664 positive, we must adjust it to do two ldha insns. */
2665
2666 if ((high & 0x8000) != 0 && c >= 0)
2667 {
2668 extra = 0x4000;
2669 tmp1 -= 0x40000000;
2670 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2671 }
2672
2673 if (c == low || (low == 0 && extra == 0))
2674 {
2675 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2676 but that meant that we can't handle INT_MIN on 32-bit machines
2677 (like NT/Alpha), because we recurse indefinitely through
2678 emit_move_insn to gen_movdi. So instead, since we know exactly
2679 what we want, create it explicitly. */
2680
2681 if (target == NULL)
2682 target = gen_reg_rtx (mode);
2683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
2684 return target;
2685 }
2686 else if (n >= 2 + (extra != 0))
2687 {
2688 temp = copy_to_suggested_reg (GEN_INT (high << 16), subtarget, mode);
2689
2690 /* As of 2002-02-23, addsi3 is only available when not optimizing.
2691 This means that if we go through expand_binop, we'll try to
2692 generate extensions, etc, which will require new pseudos, which
2693 will fail during some split phases. The SImode add patterns
2694 still exist, but are not named. So build the insns by hand. */
2695
2696 if (extra != 0)
2697 {
2698 if (! subtarget)
2699 subtarget = gen_reg_rtx (mode);
2700 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
2701 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
2702 emit_insn (insn);
2703 temp = subtarget;
2704 }
2705
2706 if (target == NULL)
2707 target = gen_reg_rtx (mode);
2708 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2709 insn = gen_rtx_SET (VOIDmode, target, insn);
2710 emit_insn (insn);
2711 return target;
2712 }
2713 }
2714
2715 /* If we couldn't do it that way, try some other methods. But if we have
2716 no instructions left, don't bother. Likewise, if this is SImode and
2717 we can't make pseudos, we can't do anything since the expand_binop
2718 and expand_unop calls will widen and try to make pseudos. */
2719
2720 if (n == 1 || (mode == SImode && no_new_pseudos))
2721 return 0;
2722
2723 /* Next, see if we can load a related constant and then shift and possibly
2724 negate it to get the constant we want. Try this once each increasing
2725 numbers of insns. */
2726
2727 for (i = 1; i < n; i++)
2728 {
2729 /* First, see if minus some low bits, we've an easy load of
2730 high bits. */
2731
2732 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
2733 if (new != 0
2734 && (temp = alpha_emit_set_const (subtarget, mode, c - new, i)) != 0)
2735 return expand_binop (mode, add_optab, temp, GEN_INT (new),
2736 target, 0, OPTAB_WIDEN);
2737
2738 /* Next try complementing. */
2739 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
2740 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
2741
2742 /* Next try to form a constant and do a left shift. We can do this
2743 if some low-order bits are zero; the exact_log2 call below tells
2744 us that information. The bits we are shifting out could be any
2745 value, but here we'll just try the 0- and sign-extended forms of
2746 the constant. To try to increase the chance of having the same
2747 constant in more than one insn, start at the highest number of
2748 bits to shift, but try all possibilities in case a ZAPNOT will
2749 be useful. */
2750
2751 if ((bits = exact_log2 (c & - c)) > 0)
2752 for (; bits > 0; bits--)
2753 if ((temp = (alpha_emit_set_const
2754 (subtarget, mode, c >> bits, i))) != 0
2755 || ((temp = (alpha_emit_set_const
2756 (subtarget, mode,
2757 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
2758 != 0))
2759 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
2760 target, 0, OPTAB_WIDEN);
2761
2762 /* Now try high-order zero bits. Here we try the shifted-in bits as
2763 all zero and all ones. Be careful to avoid shifting outside the
2764 mode and to avoid shifting outside the host wide int size. */
2765 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2766 confuse the recursive call and set all of the high 32 bits. */
2767
2768 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
2769 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
2770 for (; bits > 0; bits--)
2771 if ((temp = alpha_emit_set_const (subtarget, mode,
2772 c << bits, i)) != 0
2773 || ((temp = (alpha_emit_set_const
2774 (subtarget, mode,
2775 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
2776 i)))
2777 != 0))
2778 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
2779 target, 1, OPTAB_WIDEN);
2780
2781 /* Now try high-order 1 bits. We get that with a sign-extension.
2782 But one bit isn't enough here. Be careful to avoid shifting outside
2783 the mode and to avoid shifting outside the host wide int size. */
2784
2785 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
2786 - floor_log2 (~ c) - 2)) > 0)
2787 for (; bits > 0; bits--)
2788 if ((temp = alpha_emit_set_const (subtarget, mode,
2789 c << bits, i)) != 0
2790 || ((temp = (alpha_emit_set_const
2791 (subtarget, mode,
2792 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
2793 i)))
2794 != 0))
2795 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
2796 target, 0, OPTAB_WIDEN);
2797 }
2798
2799 #if HOST_BITS_PER_WIDE_INT == 64
2800 /* Finally, see if can load a value into the target that is the same as the
2801 constant except that all bytes that are 0 are changed to be 0xff. If we
2802 can, then we can do a ZAPNOT to obtain the desired constant. */
2803
2804 new = c;
2805 for (i = 0; i < 64; i += 8)
2806 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
2807 new |= (HOST_WIDE_INT) 0xff << i;
2808
2809 /* We are only called for SImode and DImode. If this is SImode, ensure that
2810 we are sign extended to a full word. */
2811
2812 if (mode == SImode)
2813 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
2814
2815 if (new != c && new != -1
2816 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
2817 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
2818 target, 0, OPTAB_WIDEN);
2819 #endif
2820
2821 return 0;
2822 }
2823
2824 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2825 fall back to a straight forward decomposition. We do this to avoid
2826 exponential run times encountered when looking for longer sequences
2827 with alpha_emit_set_const. */
2828
2829 rtx
2830 alpha_emit_set_long_const (target, c1, c2)
2831 rtx target;
2832 HOST_WIDE_INT c1, c2;
2833 {
2834 HOST_WIDE_INT d1, d2, d3, d4;
2835
2836 /* Decompose the entire word */
2837 #if HOST_BITS_PER_WIDE_INT >= 64
2838 if (c2 != -(c1 < 0))
2839 abort ();
2840 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2841 c1 -= d1;
2842 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2843 c1 = (c1 - d2) >> 32;
2844 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2845 c1 -= d3;
2846 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2847 if (c1 != d4)
2848 abort ();
2849 #else
2850 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2851 c1 -= d1;
2852 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2853 if (c1 != d2)
2854 abort ();
2855 c2 += (d2 < 0);
2856 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2857 c2 -= d3;
2858 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2859 if (c2 != d4)
2860 abort ();
2861 #endif
2862
2863 /* Construct the high word */
2864 if (d4)
2865 {
2866 emit_move_insn (target, GEN_INT (d4));
2867 if (d3)
2868 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2869 }
2870 else
2871 emit_move_insn (target, GEN_INT (d3));
2872
2873 /* Shift it into place */
2874 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2875
2876 /* Add in the low bits. */
2877 if (d2)
2878 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2879 if (d1)
2880 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2881
2882 return target;
2883 }
2884
2885 /* Expand a move instruction; return true if all work is done.
2886 We don't handle non-bwx subword loads here. */
2887
2888 bool
2889 alpha_expand_mov (mode, operands)
2890 enum machine_mode mode;
2891 rtx *operands;
2892 {
2893 /* If the output is not a register, the input must be. */
2894 if (GET_CODE (operands[0]) == MEM
2895 && ! reg_or_0_operand (operands[1], mode))
2896 operands[1] = force_reg (mode, operands[1]);
2897
2898 /* Allow legitimize_address to perform some simplifications. */
2899 if (mode == Pmode && symbolic_operand (operands[1], mode))
2900 {
2901 rtx tmp;
2902
2903 /* With RTL inlining, at -O3, rtl is generated, stored, then actually
2904 compiled at the end of compilation. In the meantime, someone can
2905 re-encode-section-info on some symbol changing it e.g. from global
2906 to local-not-small. If this happens, we'd have emitted a plain
2907 load rather than a high+losum load and not recognize the insn.
2908
2909 So if rtl inlining is in effect, we delay the global/not-global
2910 decision until rest_of_compilation by wrapping it in an
2911 UNSPEC_SYMBOL. */
2912 if (TARGET_EXPLICIT_RELOCS && flag_inline_functions
2913 && rtx_equal_function_value_matters
2914 && global_symbolic_operand (operands[1], mode))
2915 {
2916 emit_insn (gen_movdi_er_maybe_g (operands[0], operands[1]));
2917 return true;
2918 }
2919
2920 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2921 if (tmp)
2922 {
2923 if (tmp == operands[0])
2924 return true;
2925 operands[1] = tmp;
2926 return false;
2927 }
2928 }
2929
2930 /* Early out for non-constants and valid constants. */
2931 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2932 return false;
2933
2934 /* Split large integers. */
2935 if (GET_CODE (operands[1]) == CONST_INT
2936 || GET_CODE (operands[1]) == CONST_DOUBLE)
2937 {
2938 HOST_WIDE_INT i0, i1;
2939 rtx temp = NULL_RTX;
2940
2941 if (GET_CODE (operands[1]) == CONST_INT)
2942 {
2943 i0 = INTVAL (operands[1]);
2944 i1 = -(i0 < 0);
2945 }
2946 else if (HOST_BITS_PER_WIDE_INT >= 64)
2947 {
2948 i0 = CONST_DOUBLE_LOW (operands[1]);
2949 i1 = -(i0 < 0);
2950 }
2951 else
2952 {
2953 i0 = CONST_DOUBLE_LOW (operands[1]);
2954 i1 = CONST_DOUBLE_HIGH (operands[1]);
2955 }
2956
2957 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2958 temp = alpha_emit_set_const (operands[0], mode, i0, 3);
2959
2960 if (!temp && TARGET_BUILD_CONSTANTS)
2961 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2962
2963 if (temp)
2964 {
2965 if (rtx_equal_p (operands[0], temp))
2966 return true;
2967 operands[1] = temp;
2968 return false;
2969 }
2970 }
2971
2972 /* Otherwise we've nothing left but to drop the thing to memory. */
2973 operands[1] = force_const_mem (DImode, operands[1]);
2974 if (reload_in_progress)
2975 {
2976 emit_move_insn (operands[0], XEXP (operands[1], 0));
2977 operands[1] = copy_rtx (operands[1]);
2978 XEXP (operands[1], 0) = operands[0];
2979 }
2980 else
2981 operands[1] = validize_mem (operands[1]);
2982 return false;
2983 }
2984
2985 /* Expand a non-bwx QImode or HImode move instruction;
2986 return true if all work is done. */
2987
2988 bool
2989 alpha_expand_mov_nobwx (mode, operands)
2990 enum machine_mode mode;
2991 rtx *operands;
2992 {
2993 /* If the output is not a register, the input must be. */
2994 if (GET_CODE (operands[0]) == MEM)
2995 operands[1] = force_reg (mode, operands[1]);
2996
2997 /* Handle four memory cases, unaligned and aligned for either the input
2998 or the output. The only case where we can be called during reload is
2999 for aligned loads; all other cases require temporaries. */
3000
3001 if (GET_CODE (operands[1]) == MEM
3002 || (GET_CODE (operands[1]) == SUBREG
3003 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
3004 || (reload_in_progress && GET_CODE (operands[1]) == REG
3005 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
3006 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
3007 && GET_CODE (SUBREG_REG (operands[1])) == REG
3008 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
3009 {
3010 if (aligned_memory_operand (operands[1], mode))
3011 {
3012 if (reload_in_progress)
3013 {
3014 emit_insn ((mode == QImode
3015 ? gen_reload_inqi_help
3016 : gen_reload_inhi_help)
3017 (operands[0], operands[1],
3018 gen_rtx_REG (SImode, REGNO (operands[0]))));
3019 }
3020 else
3021 {
3022 rtx aligned_mem, bitnum;
3023 rtx scratch = gen_reg_rtx (SImode);
3024
3025 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
3026
3027 emit_insn ((mode == QImode
3028 ? gen_aligned_loadqi
3029 : gen_aligned_loadhi)
3030 (operands[0], aligned_mem, bitnum, scratch));
3031 }
3032 }
3033 else
3034 {
3035 /* Don't pass these as parameters since that makes the generated
3036 code depend on parameter evaluation order which will cause
3037 bootstrap failures. */
3038
3039 rtx temp1 = gen_reg_rtx (DImode);
3040 rtx temp2 = gen_reg_rtx (DImode);
3041 rtx seq = ((mode == QImode
3042 ? gen_unaligned_loadqi
3043 : gen_unaligned_loadhi)
3044 (operands[0], get_unaligned_address (operands[1], 0),
3045 temp1, temp2));
3046
3047 alpha_set_memflags (seq, operands[1]);
3048 emit_insn (seq);
3049 }
3050 return true;
3051 }
3052
3053 if (GET_CODE (operands[0]) == MEM
3054 || (GET_CODE (operands[0]) == SUBREG
3055 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
3056 || (reload_in_progress && GET_CODE (operands[0]) == REG
3057 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
3058 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
3059 && GET_CODE (SUBREG_REG (operands[0])) == REG
3060 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
3061 {
3062 if (aligned_memory_operand (operands[0], mode))
3063 {
3064 rtx aligned_mem, bitnum;
3065 rtx temp1 = gen_reg_rtx (SImode);
3066 rtx temp2 = gen_reg_rtx (SImode);
3067
3068 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
3069
3070 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
3071 temp1, temp2));
3072 }
3073 else
3074 {
3075 rtx temp1 = gen_reg_rtx (DImode);
3076 rtx temp2 = gen_reg_rtx (DImode);
3077 rtx temp3 = gen_reg_rtx (DImode);
3078 rtx seq = ((mode == QImode
3079 ? gen_unaligned_storeqi
3080 : gen_unaligned_storehi)
3081 (get_unaligned_address (operands[0], 0),
3082 operands[1], temp1, temp2, temp3));
3083
3084 alpha_set_memflags (seq, operands[0]);
3085 emit_insn (seq);
3086 }
3087 return true;
3088 }
3089
3090 return false;
3091 }
3092
3093 /* Generate an unsigned DImode to FP conversion. This is the same code
3094 optabs would emit if we didn't have TFmode patterns.
3095
3096 For SFmode, this is the only construction I've found that can pass
3097 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
3098 intermediates will work, because you'll get intermediate rounding
3099 that ruins the end result. Some of this could be fixed by turning
3100 on round-to-positive-infinity, but that requires diddling the fpsr,
3101 which kills performance. I tried turning this around and converting
3102 to a negative number, so that I could turn on /m, but either I did
3103 it wrong or there's something else cause I wound up with the exact
3104 same single-bit error. There is a branch-less form of this same code:
3105
3106 srl $16,1,$1
3107 and $16,1,$2
3108 cmplt $16,0,$3
3109 or $1,$2,$2
3110 cmovge $16,$16,$2
3111 itoft $3,$f10
3112 itoft $2,$f11
3113 cvtqs $f11,$f11
3114 adds $f11,$f11,$f0
3115 fcmoveq $f10,$f11,$f0
3116
3117 I'm not using it because it's the same number of instructions as
3118 this branch-full form, and it has more serialized long latency
3119 instructions on the critical path.
3120
3121 For DFmode, we can avoid rounding errors by breaking up the word
3122 into two pieces, converting them separately, and adding them back:
3123
3124 LC0: .long 0,0x5f800000
3125
3126 itoft $16,$f11
3127 lda $2,LC0
3128 cmplt $16,0,$1
3129 cpyse $f11,$f31,$f10
3130 cpyse $f31,$f11,$f11
3131 s4addq $1,$2,$1
3132 lds $f12,0($1)
3133 cvtqt $f10,$f10
3134 cvtqt $f11,$f11
3135 addt $f12,$f10,$f0
3136 addt $f0,$f11,$f0
3137
3138 This doesn't seem to be a clear-cut win over the optabs form.
3139 It probably all depends on the distribution of numbers being
3140 converted -- in the optabs form, all but high-bit-set has a
3141 much lower minimum execution time. */
3142
3143 void
3144 alpha_emit_floatuns (operands)
3145 rtx operands[2];
3146 {
3147 rtx neglab, donelab, i0, i1, f0, in, out;
3148 enum machine_mode mode;
3149
3150 out = operands[0];
3151 in = force_reg (DImode, operands[1]);
3152 mode = GET_MODE (out);
3153 neglab = gen_label_rtx ();
3154 donelab = gen_label_rtx ();
3155 i0 = gen_reg_rtx (DImode);
3156 i1 = gen_reg_rtx (DImode);
3157 f0 = gen_reg_rtx (mode);
3158
3159 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
3160
3161 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
3162 emit_jump_insn (gen_jump (donelab));
3163 emit_barrier ();
3164
3165 emit_label (neglab);
3166
3167 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
3168 emit_insn (gen_anddi3 (i1, in, const1_rtx));
3169 emit_insn (gen_iordi3 (i0, i0, i1));
3170 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
3171 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
3172
3173 emit_label (donelab);
3174 }
3175
3176 /* Generate the comparison for a conditional branch. */
3177
3178 rtx
3179 alpha_emit_conditional_branch (code)
3180 enum rtx_code code;
3181 {
3182 enum rtx_code cmp_code, branch_code;
3183 enum machine_mode cmp_mode, branch_mode = VOIDmode;
3184 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3185 rtx tem;
3186
3187 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
3188 {
3189 if (! TARGET_HAS_XFLOATING_LIBS)
3190 abort ();
3191
3192 /* X_floating library comparison functions return
3193 -1 unordered
3194 0 false
3195 1 true
3196 Convert the compare against the raw return value. */
3197
3198 switch (code)
3199 {
3200 case UNORDERED:
3201 cmp_code = EQ;
3202 code = LT;
3203 break;
3204 case ORDERED:
3205 cmp_code = EQ;
3206 code = GE;
3207 break;
3208 case NE:
3209 cmp_code = NE;
3210 code = NE;
3211 break;
3212 default:
3213 cmp_code = code;
3214 code = GT;
3215 break;
3216 }
3217
3218 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
3219 op1 = const0_rtx;
3220 alpha_compare.fp_p = 0;
3221 }
3222
3223 /* The general case: fold the comparison code to the types of compares
3224 that we have, choosing the branch as necessary. */
3225 switch (code)
3226 {
3227 case EQ: case LE: case LT: case LEU: case LTU:
3228 case UNORDERED:
3229 /* We have these compares: */
3230 cmp_code = code, branch_code = NE;
3231 break;
3232
3233 case NE:
3234 case ORDERED:
3235 /* These must be reversed. */
3236 cmp_code = reverse_condition (code), branch_code = EQ;
3237 break;
3238
3239 case GE: case GT: case GEU: case GTU:
3240 /* For FP, we swap them, for INT, we reverse them. */
3241 if (alpha_compare.fp_p)
3242 {
3243 cmp_code = swap_condition (code);
3244 branch_code = NE;
3245 tem = op0, op0 = op1, op1 = tem;
3246 }
3247 else
3248 {
3249 cmp_code = reverse_condition (code);
3250 branch_code = EQ;
3251 }
3252 break;
3253
3254 default:
3255 abort ();
3256 }
3257
3258 if (alpha_compare.fp_p)
3259 {
3260 cmp_mode = DFmode;
3261 if (flag_unsafe_math_optimizations)
3262 {
3263 /* When we are not as concerned about non-finite values, and we
3264 are comparing against zero, we can branch directly. */
3265 if (op1 == CONST0_RTX (DFmode))
3266 cmp_code = NIL, branch_code = code;
3267 else if (op0 == CONST0_RTX (DFmode))
3268 {
3269 /* Undo the swap we probably did just above. */
3270 tem = op0, op0 = op1, op1 = tem;
3271 branch_code = swap_condition (cmp_code);
3272 cmp_code = NIL;
3273 }
3274 }
3275 else
3276 {
3277 /* ??? We mark the the branch mode to be CCmode to prevent the
3278 compare and branch from being combined, since the compare
3279 insn follows IEEE rules that the branch does not. */
3280 branch_mode = CCmode;
3281 }
3282 }
3283 else
3284 {
3285 cmp_mode = DImode;
3286
3287 /* The following optimizations are only for signed compares. */
3288 if (code != LEU && code != LTU && code != GEU && code != GTU)
3289 {
3290 /* Whee. Compare and branch against 0 directly. */
3291 if (op1 == const0_rtx)
3292 cmp_code = NIL, branch_code = code;
3293
3294 /* We want to use cmpcc/bcc when we can, since there is a zero delay
3295 bypass between logicals and br/cmov on EV5. But we don't want to
3296 force valid immediate constants into registers needlessly. */
3297 else if (GET_CODE (op1) == CONST_INT)
3298 {
3299 HOST_WIDE_INT v = INTVAL (op1), n = -v;
3300
3301 if (! CONST_OK_FOR_LETTER_P (v, 'I')
3302 && (CONST_OK_FOR_LETTER_P (n, 'K')
3303 || CONST_OK_FOR_LETTER_P (n, 'L')))
3304 {
3305 cmp_code = PLUS, branch_code = code;
3306 op1 = GEN_INT (n);
3307 }
3308 }
3309 }
3310
3311 if (!reg_or_0_operand (op0, DImode))
3312 op0 = force_reg (DImode, op0);
3313 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
3314 op1 = force_reg (DImode, op1);
3315 }
3316
3317 /* Emit an initial compare instruction, if necessary. */
3318 tem = op0;
3319 if (cmp_code != NIL)
3320 {
3321 tem = gen_reg_rtx (cmp_mode);
3322 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
3323 }
3324
3325 /* Zero the operands. */
3326 memset (&alpha_compare, 0, sizeof (alpha_compare));
3327
3328 /* Return the branch comparison. */
3329 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
3330 }
3331
3332 /* Certain simplifications can be done to make invalid setcc operations
3333 valid. Return the final comparison, or NULL if we can't work. */
3334
3335 rtx
3336 alpha_emit_setcc (code)
3337 enum rtx_code code;
3338 {
3339 enum rtx_code cmp_code;
3340 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3341 int fp_p = alpha_compare.fp_p;
3342 rtx tmp;
3343
3344 /* Zero the operands. */
3345 memset (&alpha_compare, 0, sizeof (alpha_compare));
3346
3347 if (fp_p && GET_MODE (op0) == TFmode)
3348 {
3349 if (! TARGET_HAS_XFLOATING_LIBS)
3350 abort ();
3351
3352 /* X_floating library comparison functions return
3353 -1 unordered
3354 0 false
3355 1 true
3356 Convert the compare against the raw return value. */
3357
3358 if (code == UNORDERED || code == ORDERED)
3359 cmp_code = EQ;
3360 else
3361 cmp_code = code;
3362
3363 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
3364 op1 = const0_rtx;
3365 fp_p = 0;
3366
3367 if (code == UNORDERED)
3368 code = LT;
3369 else if (code == ORDERED)
3370 code = GE;
3371 else
3372 code = GT;
3373 }
3374
3375 if (fp_p && !TARGET_FIX)
3376 return NULL_RTX;
3377
3378 /* The general case: fold the comparison code to the types of compares
3379 that we have, choosing the branch as necessary. */
3380
3381 cmp_code = NIL;
3382 switch (code)
3383 {
3384 case EQ: case LE: case LT: case LEU: case LTU:
3385 case UNORDERED:
3386 /* We have these compares. */
3387 if (fp_p)
3388 cmp_code = code, code = NE;
3389 break;
3390
3391 case NE:
3392 if (!fp_p && op1 == const0_rtx)
3393 break;
3394 /* FALLTHRU */
3395
3396 case ORDERED:
3397 cmp_code = reverse_condition (code);
3398 code = EQ;
3399 break;
3400
3401 case GE: case GT: case GEU: case GTU:
3402 /* These normally need swapping, but for integer zero we have
3403 special patterns that recognize swapped operands. */
3404 if (!fp_p && op1 == const0_rtx)
3405 break;
3406 code = swap_condition (code);
3407 if (fp_p)
3408 cmp_code = code, code = NE;
3409 tmp = op0, op0 = op1, op1 = tmp;
3410 break;
3411
3412 default:
3413 abort ();
3414 }
3415
3416 if (!fp_p)
3417 {
3418 if (!register_operand (op0, DImode))
3419 op0 = force_reg (DImode, op0);
3420 if (!reg_or_8bit_operand (op1, DImode))
3421 op1 = force_reg (DImode, op1);
3422 }
3423
3424 /* Emit an initial compare instruction, if necessary. */
3425 if (cmp_code != NIL)
3426 {
3427 enum machine_mode mode = fp_p ? DFmode : DImode;
3428
3429 tmp = gen_reg_rtx (mode);
3430 emit_insn (gen_rtx_SET (VOIDmode, tmp,
3431 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
3432
3433 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
3434 op1 = const0_rtx;
3435 }
3436
3437 /* Return the setcc comparison. */
3438 return gen_rtx_fmt_ee (code, DImode, op0, op1);
3439 }
3440
3441
3442 /* Rewrite a comparison against zero CMP of the form
3443 (CODE (cc0) (const_int 0)) so it can be written validly in
3444 a conditional move (if_then_else CMP ...).
3445 If both of the operands that set cc0 are non-zero we must emit
3446 an insn to perform the compare (it can't be done within
3447 the conditional move). */
3448 rtx
3449 alpha_emit_conditional_move (cmp, mode)
3450 rtx cmp;
3451 enum machine_mode mode;
3452 {
3453 enum rtx_code code = GET_CODE (cmp);
3454 enum rtx_code cmov_code = NE;
3455 rtx op0 = alpha_compare.op0;
3456 rtx op1 = alpha_compare.op1;
3457 int fp_p = alpha_compare.fp_p;
3458 enum machine_mode cmp_mode
3459 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
3460 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
3461 enum machine_mode cmov_mode = VOIDmode;
3462 int local_fast_math = flag_unsafe_math_optimizations;
3463 rtx tem;
3464
3465 /* Zero the operands. */
3466 memset (&alpha_compare, 0, sizeof (alpha_compare));
3467
3468 if (fp_p != FLOAT_MODE_P (mode))
3469 {
3470 enum rtx_code cmp_code;
3471
3472 if (! TARGET_FIX)
3473 return 0;
3474
3475 /* If we have fp<->int register move instructions, do a cmov by
3476 performing the comparison in fp registers, and move the
3477 zero/non-zero value to integer registers, where we can then
3478 use a normal cmov, or vice-versa. */
3479
3480 switch (code)
3481 {
3482 case EQ: case LE: case LT: case LEU: case LTU:
3483 /* We have these compares. */
3484 cmp_code = code, code = NE;
3485 break;
3486
3487 case NE:
3488 /* This must be reversed. */
3489 cmp_code = EQ, code = EQ;
3490 break;
3491
3492 case GE: case GT: case GEU: case GTU:
3493 /* These normally need swapping, but for integer zero we have
3494 special patterns that recognize swapped operands. */
3495 if (!fp_p && op1 == const0_rtx)
3496 cmp_code = code, code = NE;
3497 else
3498 {
3499 cmp_code = swap_condition (code);
3500 code = NE;
3501 tem = op0, op0 = op1, op1 = tem;
3502 }
3503 break;
3504
3505 default:
3506 abort ();
3507 }
3508
3509 tem = gen_reg_rtx (cmp_op_mode);
3510 emit_insn (gen_rtx_SET (VOIDmode, tem,
3511 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
3512 op0, op1)));
3513
3514 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
3515 op0 = gen_lowpart (cmp_op_mode, tem);
3516 op1 = CONST0_RTX (cmp_op_mode);
3517 fp_p = !fp_p;
3518 local_fast_math = 1;
3519 }
3520
3521 /* We may be able to use a conditional move directly.
3522 This avoids emitting spurious compares. */
3523 if (signed_comparison_operator (cmp, VOIDmode)
3524 && (!fp_p || local_fast_math)
3525 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
3526 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3527
3528 /* We can't put the comparison inside the conditional move;
3529 emit a compare instruction and put that inside the
3530 conditional move. Make sure we emit only comparisons we have;
3531 swap or reverse as necessary. */
3532
3533 if (no_new_pseudos)
3534 return NULL_RTX;
3535
3536 switch (code)
3537 {
3538 case EQ: case LE: case LT: case LEU: case LTU:
3539 /* We have these compares: */
3540 break;
3541
3542 case NE:
3543 /* This must be reversed. */
3544 code = reverse_condition (code);
3545 cmov_code = EQ;
3546 break;
3547
3548 case GE: case GT: case GEU: case GTU:
3549 /* These must be swapped. */
3550 if (op1 != CONST0_RTX (cmp_mode))
3551 {
3552 code = swap_condition (code);
3553 tem = op0, op0 = op1, op1 = tem;
3554 }
3555 break;
3556
3557 default:
3558 abort ();
3559 }
3560
3561 if (!fp_p)
3562 {
3563 if (!reg_or_0_operand (op0, DImode))
3564 op0 = force_reg (DImode, op0);
3565 if (!reg_or_8bit_operand (op1, DImode))
3566 op1 = force_reg (DImode, op1);
3567 }
3568
3569 /* ??? We mark the branch mode to be CCmode to prevent the compare
3570 and cmov from being combined, since the compare insn follows IEEE
3571 rules that the cmov does not. */
3572 if (fp_p && !local_fast_math)
3573 cmov_mode = CCmode;
3574
3575 tem = gen_reg_rtx (cmp_op_mode);
3576 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
3577 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
3578 }
3579
3580 /* Simplify a conditional move of two constants into a setcc with
3581 arithmetic. This is done with a splitter since combine would
3582 just undo the work if done during code generation. It also catches
3583 cases we wouldn't have before cse. */
3584
3585 int
3586 alpha_split_conditional_move (code, dest, cond, t_rtx, f_rtx)
3587 enum rtx_code code;
3588 rtx dest, cond, t_rtx, f_rtx;
3589 {
3590 HOST_WIDE_INT t, f, diff;
3591 enum machine_mode mode;
3592 rtx target, subtarget, tmp;
3593
3594 mode = GET_MODE (dest);
3595 t = INTVAL (t_rtx);
3596 f = INTVAL (f_rtx);
3597 diff = t - f;
3598
3599 if (((code == NE || code == EQ) && diff < 0)
3600 || (code == GE || code == GT))
3601 {
3602 code = reverse_condition (code);
3603 diff = t, t = f, f = diff;
3604 diff = t - f;
3605 }
3606
3607 subtarget = target = dest;
3608 if (mode != DImode)
3609 {
3610 target = gen_lowpart (DImode, dest);
3611 if (! no_new_pseudos)
3612 subtarget = gen_reg_rtx (DImode);
3613 else
3614 subtarget = target;
3615 }
3616 /* Below, we must be careful to use copy_rtx on target and subtarget
3617 in intermediate insns, as they may be a subreg rtx, which may not
3618 be shared. */
3619
3620 if (f == 0 && exact_log2 (diff) > 0
3621 /* On EV6, we've got enough shifters to make non-arithmatic shifts
3622 viable over a longer latency cmove. On EV5, the E0 slot is a
3623 scarce resource, and on EV4 shift has the same latency as a cmove. */
3624 && (diff <= 8 || alpha_cpu == PROCESSOR_EV6))
3625 {
3626 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3627 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3628
3629 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
3630 GEN_INT (exact_log2 (t)));
3631 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
3632 }
3633 else if (f == 0 && t == -1)
3634 {
3635 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3636 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3637
3638 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
3639 }
3640 else if (diff == 1 || diff == 4 || diff == 8)
3641 {
3642 rtx add_op;
3643
3644 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3645 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3646
3647 if (diff == 1)
3648 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
3649 else
3650 {
3651 add_op = GEN_INT (f);
3652 if (sext_add_operand (add_op, mode))
3653 {
3654 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
3655 GEN_INT (diff));
3656 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
3657 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
3658 }
3659 else
3660 return 0;
3661 }
3662 }
3663 else
3664 return 0;
3665
3666 return 1;
3667 }
3668 \f
3669 /* Look up the function X_floating library function name for the
3670 given operation. */
3671
3672 static const char *
3673 alpha_lookup_xfloating_lib_func (code)
3674 enum rtx_code code;
3675 {
3676 struct xfloating_op
3677 {
3678 const enum rtx_code code;
3679 const char *const func;
3680 };
3681
3682 static const struct xfloating_op vms_xfloating_ops[] =
3683 {
3684 { PLUS, "OTS$ADD_X" },
3685 { MINUS, "OTS$SUB_X" },
3686 { MULT, "OTS$MUL_X" },
3687 { DIV, "OTS$DIV_X" },
3688 { EQ, "OTS$EQL_X" },
3689 { NE, "OTS$NEQ_X" },
3690 { LT, "OTS$LSS_X" },
3691 { LE, "OTS$LEQ_X" },
3692 { GT, "OTS$GTR_X" },
3693 { GE, "OTS$GEQ_X" },
3694 { FIX, "OTS$CVTXQ" },
3695 { FLOAT, "OTS$CVTQX" },
3696 { UNSIGNED_FLOAT, "OTS$CVTQUX" },
3697 { FLOAT_EXTEND, "OTS$CVT_FLOAT_T_X" },
3698 { FLOAT_TRUNCATE, "OTS$CVT_FLOAT_X_T" },
3699 };
3700
3701 static const struct xfloating_op osf_xfloating_ops[] =
3702 {
3703 { PLUS, "_OtsAddX" },
3704 { MINUS, "_OtsSubX" },
3705 { MULT, "_OtsMulX" },
3706 { DIV, "_OtsDivX" },
3707 { EQ, "_OtsEqlX" },
3708 { NE, "_OtsNeqX" },
3709 { LT, "_OtsLssX" },
3710 { LE, "_OtsLeqX" },
3711 { GT, "_OtsGtrX" },
3712 { GE, "_OtsGeqX" },
3713 { FIX, "_OtsCvtXQ" },
3714 { FLOAT, "_OtsCvtQX" },
3715 { UNSIGNED_FLOAT, "_OtsCvtQUX" },
3716 { FLOAT_EXTEND, "_OtsConvertFloatTX" },
3717 { FLOAT_TRUNCATE, "_OtsConvertFloatXT" },
3718 };
3719
3720 const struct xfloating_op *ops;
3721 const long n = ARRAY_SIZE (osf_xfloating_ops);
3722 long i;
3723
3724 /* How irritating. Nothing to key off for the table. Hardcode
3725 knowledge of the G_floating routines. */
3726 if (TARGET_FLOAT_VAX)
3727 {
3728 if (TARGET_ABI_OPEN_VMS)
3729 {
3730 if (code == FLOAT_EXTEND)
3731 return "OTS$CVT_FLOAT_G_X";
3732 if (code == FLOAT_TRUNCATE)
3733 return "OTS$CVT_FLOAT_X_G";
3734 }
3735 else
3736 {
3737 if (code == FLOAT_EXTEND)
3738 return "_OtsConvertFloatGX";
3739 if (code == FLOAT_TRUNCATE)
3740 return "_OtsConvertFloatXG";
3741 }
3742 }
3743
3744 if (TARGET_ABI_OPEN_VMS)
3745 ops = vms_xfloating_ops;
3746 else
3747 ops = osf_xfloating_ops;
3748
3749 for (i = 0; i < n; ++i)
3750 if (ops[i].code == code)
3751 return ops[i].func;
3752
3753 abort();
3754 }
3755
3756 /* Most X_floating operations take the rounding mode as an argument.
3757 Compute that here. */
3758
3759 static int
3760 alpha_compute_xfloating_mode_arg (code, round)
3761 enum rtx_code code;
3762 enum alpha_fp_rounding_mode round;
3763 {
3764 int mode;
3765
3766 switch (round)
3767 {
3768 case ALPHA_FPRM_NORM:
3769 mode = 2;
3770 break;
3771 case ALPHA_FPRM_MINF:
3772 mode = 1;
3773 break;
3774 case ALPHA_FPRM_CHOP:
3775 mode = 0;
3776 break;
3777 case ALPHA_FPRM_DYN:
3778 mode = 4;
3779 break;
3780 default:
3781 abort ();
3782
3783 /* XXX For reference, round to +inf is mode = 3. */
3784 }
3785
3786 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3787 mode |= 0x10000;
3788
3789 return mode;
3790 }
3791
3792 /* Emit an X_floating library function call.
3793
3794 Note that these functions do not follow normal calling conventions:
3795 TFmode arguments are passed in two integer registers (as opposed to
3796 indirect); TFmode return values appear in R16+R17.
3797
3798 FUNC is the function name to call.
3799 TARGET is where the output belongs.
3800 OPERANDS are the inputs.
3801 NOPERANDS is the count of inputs.
3802 EQUIV is the expression equivalent for the function.
3803 */
3804
3805 static void
3806 alpha_emit_xfloating_libcall (func, target, operands, noperands, equiv)
3807 const char *func;
3808 rtx target;
3809 rtx operands[];
3810 int noperands;
3811 rtx equiv;
3812 {
3813 rtx usage = NULL_RTX, tmp, reg;
3814 int regno = 16, i;
3815
3816 start_sequence ();
3817
3818 for (i = 0; i < noperands; ++i)
3819 {
3820 switch (GET_MODE (operands[i]))
3821 {
3822 case TFmode:
3823 reg = gen_rtx_REG (TFmode, regno);
3824 regno += 2;
3825 break;
3826
3827 case DFmode:
3828 reg = gen_rtx_REG (DFmode, regno + 32);
3829 regno += 1;
3830 break;
3831
3832 case VOIDmode:
3833 if (GET_CODE (operands[i]) != CONST_INT)
3834 abort ();
3835 /* FALLTHRU */
3836 case DImode:
3837 reg = gen_rtx_REG (DImode, regno);
3838 regno += 1;
3839 break;
3840
3841 default:
3842 abort ();
3843 }
3844
3845 emit_move_insn (reg, operands[i]);
3846 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3847 }
3848
3849 switch (GET_MODE (target))
3850 {
3851 case TFmode:
3852 reg = gen_rtx_REG (TFmode, 16);
3853 break;
3854 case DFmode:
3855 reg = gen_rtx_REG (DFmode, 32);
3856 break;
3857 case DImode:
3858 reg = gen_rtx_REG (DImode, 0);
3859 break;
3860 default:
3861 abort ();
3862 }
3863
3864 tmp = gen_rtx_MEM (QImode, gen_rtx_SYMBOL_REF (Pmode, (char *) func));
3865 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3866 const0_rtx, const0_rtx));
3867 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3868
3869 tmp = get_insns ();
3870 end_sequence ();
3871
3872 emit_libcall_block (tmp, target, reg, equiv);
3873 }
3874
3875 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3876
3877 void
3878 alpha_emit_xfloating_arith (code, operands)
3879 enum rtx_code code;
3880 rtx operands[];
3881 {
3882 const char *func;
3883 int mode;
3884 rtx out_operands[3];
3885
3886 func = alpha_lookup_xfloating_lib_func (code);
3887 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3888
3889 out_operands[0] = operands[1];
3890 out_operands[1] = operands[2];
3891 out_operands[2] = GEN_INT (mode);
3892 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3893 gen_rtx_fmt_ee (code, TFmode, operands[1],
3894 operands[2]));
3895 }
3896
3897 /* Emit an X_floating library function call for a comparison. */
3898
3899 static rtx
3900 alpha_emit_xfloating_compare (code, op0, op1)
3901 enum rtx_code code;
3902 rtx op0, op1;
3903 {
3904 const char *func;
3905 rtx out, operands[2];
3906
3907 func = alpha_lookup_xfloating_lib_func (code);
3908
3909 operands[0] = op0;
3910 operands[1] = op1;
3911 out = gen_reg_rtx (DImode);
3912
3913 /* ??? Strange mode for equiv because what's actually returned
3914 is -1,0,1, not a proper boolean value. */
3915 alpha_emit_xfloating_libcall (func, out, operands, 2,
3916 gen_rtx_fmt_ee (code, CCmode, op0, op1));
3917
3918 return out;
3919 }
3920
3921 /* Emit an X_floating library function call for a conversion. */
3922
3923 void
3924 alpha_emit_xfloating_cvt (code, operands)
3925 enum rtx_code code;
3926 rtx operands[];
3927 {
3928 int noperands = 1, mode;
3929 rtx out_operands[2];
3930 const char *func;
3931
3932 func = alpha_lookup_xfloating_lib_func (code);
3933
3934 out_operands[0] = operands[1];
3935
3936 switch (code)
3937 {
3938 case FIX:
3939 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3940 out_operands[1] = GEN_INT (mode);
3941 noperands = 2;
3942 break;
3943 case FLOAT_TRUNCATE:
3944 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3945 out_operands[1] = GEN_INT (mode);
3946 noperands = 2;
3947 break;
3948 default:
3949 break;
3950 }
3951
3952 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3953 gen_rtx_fmt_e (code, GET_MODE (operands[0]),
3954 operands[1]));
3955 }
3956
3957 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3958 OP[0] into OP[0,1]. Naturally, output operand ordering is
3959 little-endian. */
3960
3961 void
3962 alpha_split_tfmode_pair (operands)
3963 rtx operands[4];
3964 {
3965 if (GET_CODE (operands[1]) == REG)
3966 {
3967 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3968 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3969 }
3970 else if (GET_CODE (operands[1]) == MEM)
3971 {
3972 operands[3] = adjust_address (operands[1], DImode, 8);
3973 operands[2] = adjust_address (operands[1], DImode, 0);
3974 }
3975 else if (operands[1] == CONST0_RTX (TFmode))
3976 operands[2] = operands[3] = const0_rtx;
3977 else
3978 abort ();
3979
3980 if (GET_CODE (operands[0]) == REG)
3981 {
3982 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3983 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3984 }
3985 else if (GET_CODE (operands[0]) == MEM)
3986 {
3987 operands[1] = adjust_address (operands[0], DImode, 8);
3988 operands[0] = adjust_address (operands[0], DImode, 0);
3989 }
3990 else
3991 abort ();
3992 }
3993
3994 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3995 op2 is a register containing the sign bit, operation is the
3996 logical operation to be performed. */
3997
3998 void
3999 alpha_split_tfmode_frobsign (operands, operation)
4000 rtx operands[3];
4001 rtx (*operation) PARAMS ((rtx, rtx, rtx));
4002 {
4003 rtx high_bit = operands[2];
4004 rtx scratch;
4005 int move;
4006
4007 alpha_split_tfmode_pair (operands);
4008
4009 /* Detect three flavours of operand overlap. */
4010 move = 1;
4011 if (rtx_equal_p (operands[0], operands[2]))
4012 move = 0;
4013 else if (rtx_equal_p (operands[1], operands[2]))
4014 {
4015 if (rtx_equal_p (operands[0], high_bit))
4016 move = 2;
4017 else
4018 move = -1;
4019 }
4020
4021 if (move < 0)
4022 emit_move_insn (operands[0], operands[2]);
4023
4024 /* ??? If the destination overlaps both source tf and high_bit, then
4025 assume source tf is dead in its entirety and use the other half
4026 for a scratch register. Otherwise "scratch" is just the proper
4027 destination register. */
4028 scratch = operands[move < 2 ? 1 : 3];
4029
4030 emit_insn ((*operation) (scratch, high_bit, operands[3]));
4031
4032 if (move > 0)
4033 {
4034 emit_move_insn (operands[0], operands[2]);
4035 if (move > 1)
4036 emit_move_insn (operands[1], scratch);
4037 }
4038 }
4039 \f
4040 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
4041 unaligned data:
4042
4043 unsigned: signed:
4044 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
4045 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
4046 lda r3,X(r11) lda r3,X+2(r11)
4047 extwl r1,r3,r1 extql r1,r3,r1
4048 extwh r2,r3,r2 extqh r2,r3,r2
4049 or r1.r2.r1 or r1,r2,r1
4050 sra r1,48,r1
4051
4052 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
4053 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
4054 lda r3,X(r11) lda r3,X(r11)
4055 extll r1,r3,r1 extll r1,r3,r1
4056 extlh r2,r3,r2 extlh r2,r3,r2
4057 or r1.r2.r1 addl r1,r2,r1
4058
4059 quad: ldq_u r1,X(r11)
4060 ldq_u r2,X+7(r11)
4061 lda r3,X(r11)
4062 extql r1,r3,r1
4063 extqh r2,r3,r2
4064 or r1.r2.r1
4065 */
4066
4067 void
4068 alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
4069 rtx tgt, mem;
4070 HOST_WIDE_INT size, ofs;
4071 int sign;
4072 {
4073 rtx meml, memh, addr, extl, exth, tmp, mema;
4074 enum machine_mode mode;
4075
4076 meml = gen_reg_rtx (DImode);
4077 memh = gen_reg_rtx (DImode);
4078 addr = gen_reg_rtx (DImode);
4079 extl = gen_reg_rtx (DImode);
4080 exth = gen_reg_rtx (DImode);
4081
4082 mema = XEXP (mem, 0);
4083 if (GET_CODE (mema) == LO_SUM)
4084 mema = force_reg (Pmode, mema);
4085
4086 /* AND addresses cannot be in any alias set, since they may implicitly
4087 alias surrounding code. Ideally we'd have some alias set that
4088 covered all types except those with alignment 8 or higher. */
4089
4090 tmp = change_address (mem, DImode,
4091 gen_rtx_AND (DImode,
4092 plus_constant (mema, ofs),
4093 GEN_INT (-8)));
4094 set_mem_alias_set (tmp, 0);
4095 emit_move_insn (meml, tmp);
4096
4097 tmp = change_address (mem, DImode,
4098 gen_rtx_AND (DImode,
4099 plus_constant (mema, ofs + size - 1),
4100 GEN_INT (-8)));
4101 set_mem_alias_set (tmp, 0);
4102 emit_move_insn (memh, tmp);
4103
4104 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
4105 {
4106 emit_move_insn (addr, plus_constant (mema, -1));
4107
4108 emit_insn (gen_extqh_be (extl, meml, addr));
4109 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
4110
4111 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
4112 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
4113 addr, 1, OPTAB_WIDEN);
4114 }
4115 else if (sign && size == 2)
4116 {
4117 emit_move_insn (addr, plus_constant (mema, ofs+2));
4118
4119 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
4120 emit_insn (gen_extqh_le (exth, memh, addr));
4121
4122 /* We must use tgt here for the target. Alpha-vms port fails if we use
4123 addr for the target, because addr is marked as a pointer and combine
4124 knows that pointers are always sign-extended 32 bit values. */
4125 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
4126 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
4127 addr, 1, OPTAB_WIDEN);
4128 }
4129 else
4130 {
4131 if (WORDS_BIG_ENDIAN)
4132 {
4133 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
4134 switch ((int) size)
4135 {
4136 case 2:
4137 emit_insn (gen_extwh_be (extl, meml, addr));
4138 mode = HImode;
4139 break;
4140
4141 case 4:
4142 emit_insn (gen_extlh_be (extl, meml, addr));
4143 mode = SImode;
4144 break;
4145
4146 case 8:
4147 emit_insn (gen_extqh_be (extl, meml, addr));
4148 mode = DImode;
4149 break;
4150
4151 default:
4152 abort ();
4153 }
4154 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
4155 }
4156 else
4157 {
4158 emit_move_insn (addr, plus_constant (mema, ofs));
4159 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
4160 switch ((int) size)
4161 {
4162 case 2:
4163 emit_insn (gen_extwh_le (exth, memh, addr));
4164 mode = HImode;
4165 break;
4166
4167 case 4:
4168 emit_insn (gen_extlh_le (exth, memh, addr));
4169 mode = SImode;
4170 break;
4171
4172 case 8:
4173 emit_insn (gen_extqh_le (exth, memh, addr));
4174 mode = DImode;
4175 break;
4176
4177 default:
4178 abort();
4179 }
4180 }
4181
4182 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
4183 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
4184 sign, OPTAB_WIDEN);
4185 }
4186
4187 if (addr != tgt)
4188 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
4189 }
4190
4191 /* Similarly, use ins and msk instructions to perform unaligned stores. */
4192
4193 void
4194 alpha_expand_unaligned_store (dst, src, size, ofs)
4195 rtx dst, src;
4196 HOST_WIDE_INT size, ofs;
4197 {
4198 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
4199
4200 dstl = gen_reg_rtx (DImode);
4201 dsth = gen_reg_rtx (DImode);
4202 insl = gen_reg_rtx (DImode);
4203 insh = gen_reg_rtx (DImode);
4204
4205 dsta = XEXP (dst, 0);
4206 if (GET_CODE (dsta) == LO_SUM)
4207 dsta = force_reg (Pmode, dsta);
4208
4209 /* AND addresses cannot be in any alias set, since they may implicitly
4210 alias surrounding code. Ideally we'd have some alias set that
4211 covered all types except those with alignment 8 or higher. */
4212
4213 meml = change_address (dst, DImode,
4214 gen_rtx_AND (DImode,
4215 plus_constant (dsta, ofs),
4216 GEN_INT (-8)));
4217 set_mem_alias_set (meml, 0);
4218
4219 memh = change_address (dst, DImode,
4220 gen_rtx_AND (DImode,
4221 plus_constant (dsta, ofs + size - 1),
4222 GEN_INT (-8)));
4223 set_mem_alias_set (memh, 0);
4224
4225 emit_move_insn (dsth, memh);
4226 emit_move_insn (dstl, meml);
4227 if (WORDS_BIG_ENDIAN)
4228 {
4229 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
4230
4231 if (src != const0_rtx)
4232 {
4233 switch ((int) size)
4234 {
4235 case 2:
4236 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
4237 break;
4238 case 4:
4239 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
4240 break;
4241 case 8:
4242 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
4243 break;
4244 }
4245 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
4246 GEN_INT (size*8), addr));
4247 }
4248
4249 switch ((int) size)
4250 {
4251 case 2:
4252 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
4253 break;
4254 case 4:
4255 {
4256 rtx msk = immed_double_const (0xffffffff, 0, DImode);
4257 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
4258 break;
4259 }
4260 case 8:
4261 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
4262 break;
4263 }
4264
4265 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
4266 }
4267 else
4268 {
4269 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
4270
4271 if (src != const0_rtx)
4272 {
4273 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
4274 GEN_INT (size*8), addr));
4275
4276 switch ((int) size)
4277 {
4278 case 2:
4279 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
4280 break;
4281 case 4:
4282 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
4283 break;
4284 case 8:
4285 emit_insn (gen_insql_le (insl, src, addr));
4286 break;
4287 }
4288 }
4289
4290 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
4291
4292 switch ((int) size)
4293 {
4294 case 2:
4295 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
4296 break;
4297 case 4:
4298 {
4299 rtx msk = immed_double_const (0xffffffff, 0, DImode);
4300 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
4301 break;
4302 }
4303 case 8:
4304 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
4305 break;
4306 }
4307 }
4308
4309 if (src != const0_rtx)
4310 {
4311 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
4312 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
4313 }
4314
4315 if (WORDS_BIG_ENDIAN)
4316 {
4317 emit_move_insn (meml, dstl);
4318 emit_move_insn (memh, dsth);
4319 }
4320 else
4321 {
4322 /* Must store high before low for degenerate case of aligned. */
4323 emit_move_insn (memh, dsth);
4324 emit_move_insn (meml, dstl);
4325 }
4326 }
4327
4328 /* The block move code tries to maximize speed by separating loads and
4329 stores at the expense of register pressure: we load all of the data
4330 before we store it back out. There are two secondary effects worth
4331 mentioning, that this speeds copying to/from aligned and unaligned
4332 buffers, and that it makes the code significantly easier to write. */
4333
4334 #define MAX_MOVE_WORDS 8
4335
4336 /* Load an integral number of consecutive unaligned quadwords. */
4337
4338 static void
4339 alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
4340 rtx *out_regs;
4341 rtx smem;
4342 HOST_WIDE_INT words, ofs;
4343 {
4344 rtx const im8 = GEN_INT (-8);
4345 rtx const i64 = GEN_INT (64);
4346 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
4347 rtx sreg, areg, tmp, smema;
4348 HOST_WIDE_INT i;
4349
4350 smema = XEXP (smem, 0);
4351 if (GET_CODE (smema) == LO_SUM)
4352 smema = force_reg (Pmode, smema);
4353
4354 /* Generate all the tmp registers we need. */
4355 for (i = 0; i < words; ++i)
4356 {
4357 data_regs[i] = out_regs[i];
4358 ext_tmps[i] = gen_reg_rtx (DImode);
4359 }
4360 data_regs[words] = gen_reg_rtx (DImode);
4361
4362 if (ofs != 0)
4363 smem = adjust_address (smem, GET_MODE (smem), ofs);
4364
4365 /* Load up all of the source data. */
4366 for (i = 0; i < words; ++i)
4367 {
4368 tmp = change_address (smem, DImode,
4369 gen_rtx_AND (DImode,
4370 plus_constant (smema, 8*i),
4371 im8));
4372 set_mem_alias_set (tmp, 0);
4373 emit_move_insn (data_regs[i], tmp);
4374 }
4375
4376 tmp = change_address (smem, DImode,
4377 gen_rtx_AND (DImode,
4378 plus_constant (smema, 8*words - 1),
4379 im8));
4380 set_mem_alias_set (tmp, 0);
4381 emit_move_insn (data_regs[words], tmp);
4382
4383 /* Extract the half-word fragments. Unfortunately DEC decided to make
4384 extxh with offset zero a noop instead of zeroing the register, so
4385 we must take care of that edge condition ourselves with cmov. */
4386
4387 sreg = copy_addr_to_reg (smema);
4388 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
4389 1, OPTAB_WIDEN);
4390 if (WORDS_BIG_ENDIAN)
4391 emit_move_insn (sreg, plus_constant (sreg, 7));
4392 for (i = 0; i < words; ++i)
4393 {
4394 if (WORDS_BIG_ENDIAN)
4395 {
4396 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
4397 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
4398 }
4399 else
4400 {
4401 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
4402 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
4403 }
4404 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
4405 gen_rtx_IF_THEN_ELSE (DImode,
4406 gen_rtx_EQ (DImode, areg,
4407 const0_rtx),
4408 const0_rtx, ext_tmps[i])));
4409 }
4410
4411 /* Merge the half-words into whole words. */
4412 for (i = 0; i < words; ++i)
4413 {
4414 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
4415 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
4416 }
4417 }
4418
4419 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
4420 may be NULL to store zeros. */
4421
4422 static void
4423 alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
4424 rtx *data_regs;
4425 rtx dmem;
4426 HOST_WIDE_INT words, ofs;
4427 {
4428 rtx const im8 = GEN_INT (-8);
4429 rtx const i64 = GEN_INT (64);
4430 rtx ins_tmps[MAX_MOVE_WORDS];
4431 rtx st_tmp_1, st_tmp_2, dreg;
4432 rtx st_addr_1, st_addr_2, dmema;
4433 HOST_WIDE_INT i;
4434
4435 dmema = XEXP (dmem, 0);
4436 if (GET_CODE (dmema) == LO_SUM)
4437 dmema = force_reg (Pmode, dmema);
4438
4439 /* Generate all the tmp registers we need. */
4440 if (data_regs != NULL)
4441 for (i = 0; i < words; ++i)
4442 ins_tmps[i] = gen_reg_rtx(DImode);
4443 st_tmp_1 = gen_reg_rtx(DImode);
4444 st_tmp_2 = gen_reg_rtx(DImode);
4445
4446 if (ofs != 0)
4447 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
4448
4449 st_addr_2 = change_address (dmem, DImode,
4450 gen_rtx_AND (DImode,
4451 plus_constant (dmema, words*8 - 1),
4452 im8));
4453 set_mem_alias_set (st_addr_2, 0);
4454
4455 st_addr_1 = change_address (dmem, DImode,
4456 gen_rtx_AND (DImode, dmema, im8));
4457 set_mem_alias_set (st_addr_1, 0);
4458
4459 /* Load up the destination end bits. */
4460 emit_move_insn (st_tmp_2, st_addr_2);
4461 emit_move_insn (st_tmp_1, st_addr_1);
4462
4463 /* Shift the input data into place. */
4464 dreg = copy_addr_to_reg (dmema);
4465 if (WORDS_BIG_ENDIAN)
4466 emit_move_insn (dreg, plus_constant (dreg, 7));
4467 if (data_regs != NULL)
4468 {
4469 for (i = words-1; i >= 0; --i)
4470 {
4471 if (WORDS_BIG_ENDIAN)
4472 {
4473 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
4474 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
4475 }
4476 else
4477 {
4478 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
4479 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
4480 }
4481 }
4482 for (i = words-1; i > 0; --i)
4483 {
4484 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
4485 ins_tmps[i-1], ins_tmps[i-1], 1,
4486 OPTAB_WIDEN);
4487 }
4488 }
4489
4490 /* Split and merge the ends with the destination data. */
4491 if (WORDS_BIG_ENDIAN)
4492 {
4493 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
4494 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
4495 }
4496 else
4497 {
4498 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
4499 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
4500 }
4501
4502 if (data_regs != NULL)
4503 {
4504 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
4505 st_tmp_2, 1, OPTAB_WIDEN);
4506 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
4507 st_tmp_1, 1, OPTAB_WIDEN);
4508 }
4509
4510 /* Store it all. */
4511 if (WORDS_BIG_ENDIAN)
4512 emit_move_insn (st_addr_1, st_tmp_1);
4513 else
4514 emit_move_insn (st_addr_2, st_tmp_2);
4515 for (i = words-1; i > 0; --i)
4516 {
4517 rtx tmp = change_address (dmem, DImode,
4518 gen_rtx_AND (DImode,
4519 plus_constant(dmema,
4520 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
4521 im8));
4522 set_mem_alias_set (tmp, 0);
4523 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
4524 }
4525 if (WORDS_BIG_ENDIAN)
4526 emit_move_insn (st_addr_2, st_tmp_2);
4527 else
4528 emit_move_insn (st_addr_1, st_tmp_1);
4529 }
4530
4531
4532 /* Expand string/block move operations.
4533
4534 operands[0] is the pointer to the destination.
4535 operands[1] is the pointer to the source.
4536 operands[2] is the number of bytes to move.
4537 operands[3] is the alignment. */
4538
4539 int
4540 alpha_expand_block_move (operands)
4541 rtx operands[];
4542 {
4543 rtx bytes_rtx = operands[2];
4544 rtx align_rtx = operands[3];
4545 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4546 HOST_WIDE_INT bytes = orig_bytes;
4547 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
4548 HOST_WIDE_INT dst_align = src_align;
4549 rtx orig_src = operands[1];
4550 rtx orig_dst = operands[0];
4551 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
4552 rtx tmp;
4553 unsigned int i, words, ofs, nregs = 0;
4554
4555 if (orig_bytes <= 0)
4556 return 1;
4557 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4558 return 0;
4559
4560 /* Look for additional alignment information from recorded register info. */
4561
4562 tmp = XEXP (orig_src, 0);
4563 if (GET_CODE (tmp) == REG)
4564 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4565 else if (GET_CODE (tmp) == PLUS
4566 && GET_CODE (XEXP (tmp, 0)) == REG
4567 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4568 {
4569 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4570 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4571
4572 if (a > src_align)
4573 {
4574 if (a >= 64 && c % 8 == 0)
4575 src_align = 64;
4576 else if (a >= 32 && c % 4 == 0)
4577 src_align = 32;
4578 else if (a >= 16 && c % 2 == 0)
4579 src_align = 16;
4580 }
4581 }
4582
4583 tmp = XEXP (orig_dst, 0);
4584 if (GET_CODE (tmp) == REG)
4585 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4586 else if (GET_CODE (tmp) == PLUS
4587 && GET_CODE (XEXP (tmp, 0)) == REG
4588 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4589 {
4590 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4591 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4592
4593 if (a > dst_align)
4594 {
4595 if (a >= 64 && c % 8 == 0)
4596 dst_align = 64;
4597 else if (a >= 32 && c % 4 == 0)
4598 dst_align = 32;
4599 else if (a >= 16 && c % 2 == 0)
4600 dst_align = 16;
4601 }
4602 }
4603
4604 /* Load the entire block into registers. */
4605 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
4606 {
4607 enum machine_mode mode;
4608
4609 tmp = XEXP (XEXP (orig_src, 0), 0);
4610
4611 /* Don't use the existing register if we're reading more than
4612 is held in the register. Nor if there is not a mode that
4613 handles the exact size. */
4614 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
4615 if (mode != BLKmode
4616 && GET_MODE_SIZE (GET_MODE (tmp)) >= bytes)
4617 {
4618 if (mode == TImode)
4619 {
4620 data_regs[nregs] = gen_lowpart (DImode, tmp);
4621 data_regs[nregs + 1] = gen_highpart (DImode, tmp);
4622 nregs += 2;
4623 }
4624 else
4625 data_regs[nregs++] = gen_lowpart (mode, tmp);
4626
4627 goto src_done;
4628 }
4629
4630 /* No appropriate mode; fall back on memory. */
4631 orig_src = replace_equiv_address (orig_src,
4632 copy_addr_to_reg (XEXP (orig_src, 0)));
4633 src_align = GET_MODE_BITSIZE (GET_MODE (tmp));
4634 }
4635
4636 ofs = 0;
4637 if (src_align >= 64 && bytes >= 8)
4638 {
4639 words = bytes / 8;
4640
4641 for (i = 0; i < words; ++i)
4642 data_regs[nregs + i] = gen_reg_rtx (DImode);
4643
4644 for (i = 0; i < words; ++i)
4645 emit_move_insn (data_regs[nregs + i],
4646 adjust_address (orig_src, DImode, ofs + i * 8));
4647
4648 nregs += words;
4649 bytes -= words * 8;
4650 ofs += words * 8;
4651 }
4652
4653 if (src_align >= 32 && bytes >= 4)
4654 {
4655 words = bytes / 4;
4656
4657 for (i = 0; i < words; ++i)
4658 data_regs[nregs + i] = gen_reg_rtx (SImode);
4659
4660 for (i = 0; i < words; ++i)
4661 emit_move_insn (data_regs[nregs + i],
4662 adjust_address (orig_src, SImode, ofs + i * 4));
4663
4664 nregs += words;
4665 bytes -= words * 4;
4666 ofs += words * 4;
4667 }
4668
4669 if (bytes >= 8)
4670 {
4671 words = bytes / 8;
4672
4673 for (i = 0; i < words+1; ++i)
4674 data_regs[nregs + i] = gen_reg_rtx (DImode);
4675
4676 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
4677 words, ofs);
4678
4679 nregs += words;
4680 bytes -= words * 8;
4681 ofs += words * 8;
4682 }
4683
4684 if (! TARGET_BWX && bytes >= 4)
4685 {
4686 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
4687 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
4688 bytes -= 4;
4689 ofs += 4;
4690 }
4691
4692 if (bytes >= 2)
4693 {
4694 if (src_align >= 16)
4695 {
4696 do {
4697 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4698 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
4699 bytes -= 2;
4700 ofs += 2;
4701 } while (bytes >= 2);
4702 }
4703 else if (! TARGET_BWX)
4704 {
4705 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4706 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
4707 bytes -= 2;
4708 ofs += 2;
4709 }
4710 }
4711
4712 while (bytes > 0)
4713 {
4714 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4715 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4716 bytes -= 1;
4717 ofs += 1;
4718 }
4719
4720 src_done:
4721
4722 if (nregs > ARRAY_SIZE (data_regs))
4723 abort ();
4724
4725 /* Now save it back out again. */
4726
4727 i = 0, ofs = 0;
4728
4729 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
4730 {
4731 enum machine_mode mode;
4732 tmp = XEXP (XEXP (orig_dst, 0), 0);
4733
4734 mode = mode_for_size (orig_bytes * BITS_PER_UNIT, MODE_INT, 1);
4735 if (GET_MODE (tmp) == mode)
4736 {
4737 if (nregs == 1)
4738 {
4739 emit_move_insn (tmp, data_regs[0]);
4740 i = 1;
4741 goto dst_done;
4742 }
4743
4744 else if (nregs == 2 && mode == TImode)
4745 {
4746 /* Undo the subregging done above when copying between
4747 two TImode registers. */
4748 if (GET_CODE (data_regs[0]) == SUBREG
4749 && GET_MODE (SUBREG_REG (data_regs[0])) == TImode)
4750 emit_move_insn (tmp, SUBREG_REG (data_regs[0]));
4751 else
4752 {
4753 rtx seq;
4754
4755 start_sequence ();
4756 emit_move_insn (gen_lowpart (DImode, tmp), data_regs[0]);
4757 emit_move_insn (gen_highpart (DImode, tmp), data_regs[1]);
4758 seq = get_insns ();
4759 end_sequence ();
4760
4761 emit_no_conflict_block (seq, tmp, data_regs[0],
4762 data_regs[1], NULL_RTX);
4763 }
4764
4765 i = 2;
4766 goto dst_done;
4767 }
4768 }
4769
4770 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4771 /* ??? Optimize mode < dst_mode with strict_low_part. */
4772
4773 /* No appropriate mode; fall back on memory. We can speed things
4774 up by recognizing extra alignment information. */
4775 orig_dst = replace_equiv_address (orig_dst,
4776 copy_addr_to_reg (XEXP (orig_dst, 0)));
4777 dst_align = GET_MODE_BITSIZE (GET_MODE (tmp));
4778 }
4779
4780 /* Write out the data in whatever chunks reading the source allowed. */
4781 if (dst_align >= 64)
4782 {
4783 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4784 {
4785 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4786 data_regs[i]);
4787 ofs += 8;
4788 i++;
4789 }
4790 }
4791
4792 if (dst_align >= 32)
4793 {
4794 /* If the source has remaining DImode regs, write them out in
4795 two pieces. */
4796 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4797 {
4798 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4799 NULL_RTX, 1, OPTAB_WIDEN);
4800
4801 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4802 gen_lowpart (SImode, data_regs[i]));
4803 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4804 gen_lowpart (SImode, tmp));
4805 ofs += 8;
4806 i++;
4807 }
4808
4809 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4810 {
4811 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4812 data_regs[i]);
4813 ofs += 4;
4814 i++;
4815 }
4816 }
4817
4818 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4819 {
4820 /* Write out a remaining block of words using unaligned methods. */
4821
4822 for (words = 1; i + words < nregs; words++)
4823 if (GET_MODE (data_regs[i + words]) != DImode)
4824 break;
4825
4826 if (words == 1)
4827 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4828 else
4829 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4830 words, ofs);
4831
4832 i += words;
4833 ofs += words * 8;
4834 }
4835
4836 /* Due to the above, this won't be aligned. */
4837 /* ??? If we have more than one of these, consider constructing full
4838 words in registers and using alpha_expand_unaligned_store_words. */
4839 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4840 {
4841 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4842 ofs += 4;
4843 i++;
4844 }
4845
4846 if (dst_align >= 16)
4847 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4848 {
4849 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4850 i++;
4851 ofs += 2;
4852 }
4853 else
4854 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4855 {
4856 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4857 i++;
4858 ofs += 2;
4859 }
4860
4861 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
4862 {
4863 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4864 i++;
4865 ofs += 1;
4866 }
4867
4868 dst_done:
4869
4870 if (i != nregs)
4871 abort ();
4872
4873 return 1;
4874 }
4875
4876 int
4877 alpha_expand_block_clear (operands)
4878 rtx operands[];
4879 {
4880 rtx bytes_rtx = operands[1];
4881 rtx align_rtx = operands[2];
4882 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4883 HOST_WIDE_INT bytes = orig_bytes;
4884 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4885 HOST_WIDE_INT alignofs = 0;
4886 rtx orig_dst = operands[0];
4887 rtx tmp;
4888 int i, words, ofs = 0;
4889
4890 if (orig_bytes <= 0)
4891 return 1;
4892 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4893 return 0;
4894
4895 /* Look for stricter alignment. */
4896 tmp = XEXP (orig_dst, 0);
4897 if (GET_CODE (tmp) == REG)
4898 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4899 else if (GET_CODE (tmp) == PLUS
4900 && GET_CODE (XEXP (tmp, 0)) == REG
4901 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4902 {
4903 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4904 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4905
4906 if (a > align)
4907 {
4908 if (a >= 64)
4909 align = a, alignofs = 8 - c % 8;
4910 else if (a >= 32)
4911 align = a, alignofs = 4 - c % 4;
4912 else if (a >= 16)
4913 align = a, alignofs = 2 - c % 2;
4914 }
4915 }
4916 else if (GET_CODE (tmp) == ADDRESSOF)
4917 {
4918 enum machine_mode mode;
4919
4920 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
4921 if (GET_MODE (XEXP (tmp, 0)) == mode)
4922 {
4923 emit_move_insn (XEXP (tmp, 0), const0_rtx);
4924 return 1;
4925 }
4926
4927 /* No appropriate mode; fall back on memory. */
4928 orig_dst = replace_equiv_address (orig_dst, copy_addr_to_reg (tmp));
4929 align = GET_MODE_BITSIZE (GET_MODE (XEXP (tmp, 0)));
4930 }
4931
4932 /* Handle an unaligned prefix first. */
4933
4934 if (alignofs > 0)
4935 {
4936 #if HOST_BITS_PER_WIDE_INT >= 64
4937 /* Given that alignofs is bounded by align, the only time BWX could
4938 generate three stores is for a 7 byte fill. Prefer two individual
4939 stores over a load/mask/store sequence. */
4940 if ((!TARGET_BWX || alignofs == 7)
4941 && align >= 32
4942 && !(alignofs == 4 && bytes >= 4))
4943 {
4944 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4945 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4946 rtx mem, tmp;
4947 HOST_WIDE_INT mask;
4948
4949 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4950 set_mem_alias_set (mem, 0);
4951
4952 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4953 if (bytes < alignofs)
4954 {
4955 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4956 ofs += bytes;
4957 bytes = 0;
4958 }
4959 else
4960 {
4961 bytes -= alignofs;
4962 ofs += alignofs;
4963 }
4964 alignofs = 0;
4965
4966 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4967 NULL_RTX, 1, OPTAB_WIDEN);
4968
4969 emit_move_insn (mem, tmp);
4970 }
4971 #endif
4972
4973 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4974 {
4975 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4976 bytes -= 1;
4977 ofs += 1;
4978 alignofs -= 1;
4979 }
4980 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4981 {
4982 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4983 bytes -= 2;
4984 ofs += 2;
4985 alignofs -= 2;
4986 }
4987 if (alignofs == 4 && bytes >= 4)
4988 {
4989 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4990 bytes -= 4;
4991 ofs += 4;
4992 alignofs = 0;
4993 }
4994
4995 /* If we've not used the extra lead alignment information by now,
4996 we won't be able to. Downgrade align to match what's left over. */
4997 if (alignofs > 0)
4998 {
4999 alignofs = alignofs & -alignofs;
5000 align = MIN (align, alignofs * BITS_PER_UNIT);
5001 }
5002 }
5003
5004 /* Handle a block of contiguous long-words. */
5005
5006 if (align >= 64 && bytes >= 8)
5007 {
5008 words = bytes / 8;
5009
5010 for (i = 0; i < words; ++i)
5011 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
5012 const0_rtx);
5013
5014 bytes -= words * 8;
5015 ofs += words * 8;
5016 }
5017
5018 /* If the block is large and appropriately aligned, emit a single
5019 store followed by a sequence of stq_u insns. */
5020
5021 if (align >= 32 && bytes > 16)
5022 {
5023 rtx orig_dsta;
5024
5025 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
5026 bytes -= 4;
5027 ofs += 4;
5028
5029 orig_dsta = XEXP (orig_dst, 0);
5030 if (GET_CODE (orig_dsta) == LO_SUM)
5031 orig_dsta = force_reg (Pmode, orig_dsta);
5032
5033 words = bytes / 8;
5034 for (i = 0; i < words; ++i)
5035 {
5036 rtx mem
5037 = change_address (orig_dst, DImode,
5038 gen_rtx_AND (DImode,
5039 plus_constant (orig_dsta, ofs + i*8),
5040 GEN_INT (-8)));
5041 set_mem_alias_set (mem, 0);
5042 emit_move_insn (mem, const0_rtx);
5043 }
5044
5045 /* Depending on the alignment, the first stq_u may have overlapped
5046 with the initial stl, which means that the last stq_u didn't
5047 write as much as it would appear. Leave those questionable bytes
5048 unaccounted for. */
5049 bytes -= words * 8 - 4;
5050 ofs += words * 8 - 4;
5051 }
5052
5053 /* Handle a smaller block of aligned words. */
5054
5055 if ((align >= 64 && bytes == 4)
5056 || (align == 32 && bytes >= 4))
5057 {
5058 words = bytes / 4;
5059
5060 for (i = 0; i < words; ++i)
5061 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
5062 const0_rtx);
5063
5064 bytes -= words * 4;
5065 ofs += words * 4;
5066 }
5067
5068 /* An unaligned block uses stq_u stores for as many as possible. */
5069
5070 if (bytes >= 8)
5071 {
5072 words = bytes / 8;
5073
5074 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
5075
5076 bytes -= words * 8;
5077 ofs += words * 8;
5078 }
5079
5080 /* Next clean up any trailing pieces. */
5081
5082 #if HOST_BITS_PER_WIDE_INT >= 64
5083 /* Count the number of bits in BYTES for which aligned stores could
5084 be emitted. */
5085 words = 0;
5086 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
5087 if (bytes & i)
5088 words += 1;
5089
5090 /* If we have appropriate alignment (and it wouldn't take too many
5091 instructions otherwise), mask out the bytes we need. */
5092 if (TARGET_BWX ? words > 2 : bytes > 0)
5093 {
5094 if (align >= 64)
5095 {
5096 rtx mem, tmp;
5097 HOST_WIDE_INT mask;
5098
5099 mem = adjust_address (orig_dst, DImode, ofs);
5100 set_mem_alias_set (mem, 0);
5101
5102 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
5103
5104 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
5105 NULL_RTX, 1, OPTAB_WIDEN);
5106
5107 emit_move_insn (mem, tmp);
5108 return 1;
5109 }
5110 else if (align >= 32 && bytes < 4)
5111 {
5112 rtx mem, tmp;
5113 HOST_WIDE_INT mask;
5114
5115 mem = adjust_address (orig_dst, SImode, ofs);
5116 set_mem_alias_set (mem, 0);
5117
5118 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
5119
5120 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
5121 NULL_RTX, 1, OPTAB_WIDEN);
5122
5123 emit_move_insn (mem, tmp);
5124 return 1;
5125 }
5126 }
5127 #endif
5128
5129 if (!TARGET_BWX && bytes >= 4)
5130 {
5131 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
5132 bytes -= 4;
5133 ofs += 4;
5134 }
5135
5136 if (bytes >= 2)
5137 {
5138 if (align >= 16)
5139 {
5140 do {
5141 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
5142 const0_rtx);
5143 bytes -= 2;
5144 ofs += 2;
5145 } while (bytes >= 2);
5146 }
5147 else if (! TARGET_BWX)
5148 {
5149 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
5150 bytes -= 2;
5151 ofs += 2;
5152 }
5153 }
5154
5155 while (bytes > 0)
5156 {
5157 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
5158 bytes -= 1;
5159 ofs += 1;
5160 }
5161
5162 return 1;
5163 }
5164
5165 /* Returns a mask so that zap(x, value) == x & mask. */
5166
5167 rtx
5168 alpha_expand_zap_mask (value)
5169 HOST_WIDE_INT value;
5170 {
5171 rtx result;
5172 int i;
5173
5174 if (HOST_BITS_PER_WIDE_INT >= 64)
5175 {
5176 HOST_WIDE_INT mask = 0;
5177
5178 for (i = 7; i >= 0; --i)
5179 {
5180 mask <<= 8;
5181 if (!((value >> i) & 1))
5182 mask |= 0xff;
5183 }
5184
5185 result = gen_int_mode (mask, DImode);
5186 }
5187 else if (HOST_BITS_PER_WIDE_INT == 32)
5188 {
5189 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
5190
5191 for (i = 7; i >= 4; --i)
5192 {
5193 mask_hi <<= 8;
5194 if (!((value >> i) & 1))
5195 mask_hi |= 0xff;
5196 }
5197
5198 for (i = 3; i >= 0; --i)
5199 {
5200 mask_lo <<= 8;
5201 if (!((value >> i) & 1))
5202 mask_lo |= 0xff;
5203 }
5204
5205 result = immed_double_const (mask_lo, mask_hi, DImode);
5206 }
5207 else
5208 abort ();
5209
5210 return result;
5211 }
5212
5213 void
5214 alpha_expand_builtin_vector_binop (gen, mode, op0, op1, op2)
5215 rtx (*gen) PARAMS ((rtx, rtx, rtx));
5216 enum machine_mode mode;
5217 rtx op0, op1, op2;
5218 {
5219 op0 = gen_lowpart (mode, op0);
5220
5221 if (op1 == const0_rtx)
5222 op1 = CONST0_RTX (mode);
5223 else
5224 op1 = gen_lowpart (mode, op1);
5225
5226 if (op2 == const0_rtx)
5227 op2 = CONST0_RTX (mode);
5228 else
5229 op2 = gen_lowpart (mode, op2);
5230
5231 emit_insn ((*gen) (op0, op1, op2));
5232 }
5233 \f
5234 /* Adjust the cost of a scheduling dependency. Return the new cost of
5235 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5236
5237 static int
5238 alpha_adjust_cost (insn, link, dep_insn, cost)
5239 rtx insn;
5240 rtx link;
5241 rtx dep_insn;
5242 int cost;
5243 {
5244 enum attr_type insn_type, dep_insn_type;
5245
5246 /* If the dependence is an anti-dependence, there is no cost. For an
5247 output dependence, there is sometimes a cost, but it doesn't seem
5248 worth handling those few cases. */
5249 if (REG_NOTE_KIND (link) != 0)
5250 return cost;
5251
5252 /* If we can't recognize the insns, we can't really do anything. */
5253 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
5254 return cost;
5255
5256 insn_type = get_attr_type (insn);
5257 dep_insn_type = get_attr_type (dep_insn);
5258
5259 /* Bring in the user-defined memory latency. */
5260 if (dep_insn_type == TYPE_ILD
5261 || dep_insn_type == TYPE_FLD
5262 || dep_insn_type == TYPE_LDSYM)
5263 cost += alpha_memory_latency-1;
5264
5265 /* Everything else handled in DFA bypasses now. */
5266
5267 return cost;
5268 }
5269
5270 /* The number of instructions that can be issued per cycle. */
5271
5272 static int
5273 alpha_issue_rate ()
5274 {
5275 return (alpha_cpu == PROCESSOR_EV4 ? 2 : 4);
5276 }
5277
5278 static int
5279 alpha_use_dfa_pipeline_interface ()
5280 {
5281 return true;
5282 }
5283
5284 /* How many alternative schedules to try. This should be as wide as the
5285 scheduling freedom in the DFA, but no wider. Making this value too
5286 large results extra work for the scheduler.
5287
5288 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
5289 alternative schedules. For EV5, we can choose between E0/E1 and
5290 FA/FM. For EV6, an arithmatic insn can be issued to U0/U1/L0/L1. */
5291
5292 static int
5293 alpha_multipass_dfa_lookahead ()
5294 {
5295 return (alpha_cpu == PROCESSOR_EV6 ? 4 : 2);
5296 }
5297 \f
5298 /* Machine-specific function data. */
5299
5300 struct machine_function GTY(())
5301 {
5302 /* For unicosmk. */
5303 /* List of call information words for calls from this function. */
5304 struct rtx_def *first_ciw;
5305 struct rtx_def *last_ciw;
5306 int ciw_count;
5307
5308 /* List of deferred case vectors. */
5309 struct rtx_def *addr_list;
5310
5311 /* For OSF. */
5312 const char *some_ld_name;
5313 };
5314
5315 /* How to allocate a 'struct machine_function'. */
5316
5317 static struct machine_function *
5318 alpha_init_machine_status ()
5319 {
5320 return ((struct machine_function *)
5321 ggc_alloc_cleared (sizeof (struct machine_function)));
5322 }
5323
5324 /* Functions to save and restore alpha_return_addr_rtx. */
5325
5326 /* Start the ball rolling with RETURN_ADDR_RTX. */
5327
5328 rtx
5329 alpha_return_addr (count, frame)
5330 int count;
5331 rtx frame ATTRIBUTE_UNUSED;
5332 {
5333 if (count != 0)
5334 return const0_rtx;
5335
5336 return get_hard_reg_initial_val (Pmode, REG_RA);
5337 }
5338
5339 /* Return or create a pseudo containing the gp value for the current
5340 function. Needed only if TARGET_LD_BUGGY_LDGP. */
5341
5342 rtx
5343 alpha_gp_save_rtx ()
5344 {
5345 rtx r = get_hard_reg_initial_val (DImode, 29);
5346 if (GET_CODE (r) != MEM)
5347 r = gen_mem_addressof (r, NULL_TREE);
5348 return r;
5349 }
5350
5351 static int
5352 alpha_ra_ever_killed ()
5353 {
5354 rtx top;
5355
5356 if (!has_hard_reg_initial_val (Pmode, REG_RA))
5357 return regs_ever_live[REG_RA];
5358
5359 push_topmost_sequence ();
5360 top = get_insns ();
5361 pop_topmost_sequence ();
5362
5363 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
5364 }
5365
5366 \f
5367 /* Return the trap mode suffix applicable to the current
5368 instruction, or NULL. */
5369
5370 static const char *
5371 get_trap_mode_suffix ()
5372 {
5373 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
5374
5375 switch (s)
5376 {
5377 case TRAP_SUFFIX_NONE:
5378 return NULL;
5379
5380 case TRAP_SUFFIX_SU:
5381 if (alpha_fptm >= ALPHA_FPTM_SU)
5382 return "su";
5383 return NULL;
5384
5385 case TRAP_SUFFIX_SUI:
5386 if (alpha_fptm >= ALPHA_FPTM_SUI)
5387 return "sui";
5388 return NULL;
5389
5390 case TRAP_SUFFIX_V_SV:
5391 switch (alpha_fptm)
5392 {
5393 case ALPHA_FPTM_N:
5394 return NULL;
5395 case ALPHA_FPTM_U:
5396 return "v";
5397 case ALPHA_FPTM_SU:
5398 case ALPHA_FPTM_SUI:
5399 return "sv";
5400 }
5401 break;
5402
5403 case TRAP_SUFFIX_V_SV_SVI:
5404 switch (alpha_fptm)
5405 {
5406 case ALPHA_FPTM_N:
5407 return NULL;
5408 case ALPHA_FPTM_U:
5409 return "v";
5410 case ALPHA_FPTM_SU:
5411 return "sv";
5412 case ALPHA_FPTM_SUI:
5413 return "svi";
5414 }
5415 break;
5416
5417 case TRAP_SUFFIX_U_SU_SUI:
5418 switch (alpha_fptm)
5419 {
5420 case ALPHA_FPTM_N:
5421 return NULL;
5422 case ALPHA_FPTM_U:
5423 return "u";
5424 case ALPHA_FPTM_SU:
5425 return "su";
5426 case ALPHA_FPTM_SUI:
5427 return "sui";
5428 }
5429 break;
5430 }
5431 abort ();
5432 }
5433
5434 /* Return the rounding mode suffix applicable to the current
5435 instruction, or NULL. */
5436
5437 static const char *
5438 get_round_mode_suffix ()
5439 {
5440 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5441
5442 switch (s)
5443 {
5444 case ROUND_SUFFIX_NONE:
5445 return NULL;
5446 case ROUND_SUFFIX_NORMAL:
5447 switch (alpha_fprm)
5448 {
5449 case ALPHA_FPRM_NORM:
5450 return NULL;
5451 case ALPHA_FPRM_MINF:
5452 return "m";
5453 case ALPHA_FPRM_CHOP:
5454 return "c";
5455 case ALPHA_FPRM_DYN:
5456 return "d";
5457 }
5458 break;
5459
5460 case ROUND_SUFFIX_C:
5461 return "c";
5462 }
5463 abort ();
5464 }
5465
5466 /* Locate some local-dynamic symbol still in use by this function
5467 so that we can print its name in some movdi_er_tlsldm pattern. */
5468
5469 static const char *
5470 get_some_local_dynamic_name ()
5471 {
5472 rtx insn;
5473
5474 if (cfun->machine->some_ld_name)
5475 return cfun->machine->some_ld_name;
5476
5477 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5478 if (INSN_P (insn)
5479 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5480 return cfun->machine->some_ld_name;
5481
5482 abort ();
5483 }
5484
5485 static int
5486 get_some_local_dynamic_name_1 (px, data)
5487 rtx *px;
5488 void *data ATTRIBUTE_UNUSED;
5489 {
5490 rtx x = *px;
5491
5492 if (GET_CODE (x) == SYMBOL_REF)
5493 {
5494 const char *str = XSTR (x, 0);
5495 if (str[0] == '@' && str[1] == 'D')
5496 {
5497 cfun->machine->some_ld_name = str;
5498 return 1;
5499 }
5500 }
5501
5502 return 0;
5503 }
5504
5505 /* Print an operand. Recognize special options, documented below. */
5506
5507 void
5508 print_operand (file, x, code)
5509 FILE *file;
5510 rtx x;
5511 int code;
5512 {
5513 int i;
5514
5515 switch (code)
5516 {
5517 case '~':
5518 /* Print the assembler name of the current function. */
5519 assemble_name (file, alpha_fnname);
5520 break;
5521
5522 case '&':
5523 assemble_name (file, get_some_local_dynamic_name ());
5524 break;
5525
5526 case '/':
5527 {
5528 const char *trap = get_trap_mode_suffix ();
5529 const char *round = get_round_mode_suffix ();
5530
5531 if (trap || round)
5532 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5533 (trap ? trap : ""), (round ? round : ""));
5534 break;
5535 }
5536
5537 case ',':
5538 /* Generates single precision instruction suffix. */
5539 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5540 break;
5541
5542 case '-':
5543 /* Generates double precision instruction suffix. */
5544 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5545 break;
5546
5547 case '#':
5548 if (alpha_this_literal_sequence_number == 0)
5549 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5550 fprintf (file, "%d", alpha_this_literal_sequence_number);
5551 break;
5552
5553 case '*':
5554 if (alpha_this_gpdisp_sequence_number == 0)
5555 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5556 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5557 break;
5558
5559 case 'H':
5560 if (GET_CODE (x) == HIGH)
5561 output_addr_const (file, XEXP (x, 0));
5562 else
5563 output_operand_lossage ("invalid %%H value");
5564 break;
5565
5566 case 'J':
5567 {
5568 const char *lituse;
5569
5570 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5571 {
5572 x = XVECEXP (x, 0, 0);
5573 lituse = "lituse_tlsgd";
5574 }
5575 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5576 {
5577 x = XVECEXP (x, 0, 0);
5578 lituse = "lituse_tlsldm";
5579 }
5580 else if (GET_CODE (x) == CONST_INT)
5581 lituse = "lituse_jsr";
5582 else
5583 {
5584 output_operand_lossage ("invalid %%J value");
5585 break;
5586 }
5587
5588 if (x != const0_rtx)
5589 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5590 }
5591 break;
5592
5593 case 'r':
5594 /* If this operand is the constant zero, write it as "$31". */
5595 if (GET_CODE (x) == REG)
5596 fprintf (file, "%s", reg_names[REGNO (x)]);
5597 else if (x == CONST0_RTX (GET_MODE (x)))
5598 fprintf (file, "$31");
5599 else
5600 output_operand_lossage ("invalid %%r value");
5601 break;
5602
5603 case 'R':
5604 /* Similar, but for floating-point. */
5605 if (GET_CODE (x) == REG)
5606 fprintf (file, "%s", reg_names[REGNO (x)]);
5607 else if (x == CONST0_RTX (GET_MODE (x)))
5608 fprintf (file, "$f31");
5609 else
5610 output_operand_lossage ("invalid %%R value");
5611 break;
5612
5613 case 'N':
5614 /* Write the 1's complement of a constant. */
5615 if (GET_CODE (x) != CONST_INT)
5616 output_operand_lossage ("invalid %%N value");
5617
5618 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5619 break;
5620
5621 case 'P':
5622 /* Write 1 << C, for a constant C. */
5623 if (GET_CODE (x) != CONST_INT)
5624 output_operand_lossage ("invalid %%P value");
5625
5626 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5627 break;
5628
5629 case 'h':
5630 /* Write the high-order 16 bits of a constant, sign-extended. */
5631 if (GET_CODE (x) != CONST_INT)
5632 output_operand_lossage ("invalid %%h value");
5633
5634 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5635 break;
5636
5637 case 'L':
5638 /* Write the low-order 16 bits of a constant, sign-extended. */
5639 if (GET_CODE (x) != CONST_INT)
5640 output_operand_lossage ("invalid %%L value");
5641
5642 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5643 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5644 break;
5645
5646 case 'm':
5647 /* Write mask for ZAP insn. */
5648 if (GET_CODE (x) == CONST_DOUBLE)
5649 {
5650 HOST_WIDE_INT mask = 0;
5651 HOST_WIDE_INT value;
5652
5653 value = CONST_DOUBLE_LOW (x);
5654 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5655 i++, value >>= 8)
5656 if (value & 0xff)
5657 mask |= (1 << i);
5658
5659 value = CONST_DOUBLE_HIGH (x);
5660 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5661 i++, value >>= 8)
5662 if (value & 0xff)
5663 mask |= (1 << (i + sizeof (int)));
5664
5665 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5666 }
5667
5668 else if (GET_CODE (x) == CONST_INT)
5669 {
5670 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5671
5672 for (i = 0; i < 8; i++, value >>= 8)
5673 if (value & 0xff)
5674 mask |= (1 << i);
5675
5676 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5677 }
5678 else
5679 output_operand_lossage ("invalid %%m value");
5680 break;
5681
5682 case 'M':
5683 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5684 if (GET_CODE (x) != CONST_INT
5685 || (INTVAL (x) != 8 && INTVAL (x) != 16
5686 && INTVAL (x) != 32 && INTVAL (x) != 64))
5687 output_operand_lossage ("invalid %%M value");
5688
5689 fprintf (file, "%s",
5690 (INTVAL (x) == 8 ? "b"
5691 : INTVAL (x) == 16 ? "w"
5692 : INTVAL (x) == 32 ? "l"
5693 : "q"));
5694 break;
5695
5696 case 'U':
5697 /* Similar, except do it from the mask. */
5698 if (GET_CODE (x) == CONST_INT)
5699 {
5700 HOST_WIDE_INT value = INTVAL (x);
5701
5702 if (value == 0xff)
5703 {
5704 fputc ('b', file);
5705 break;
5706 }
5707 if (value == 0xffff)
5708 {
5709 fputc ('w', file);
5710 break;
5711 }
5712 if (value == 0xffffffff)
5713 {
5714 fputc ('l', file);
5715 break;
5716 }
5717 if (value == -1)
5718 {
5719 fputc ('q', file);
5720 break;
5721 }
5722 }
5723 else if (HOST_BITS_PER_WIDE_INT == 32
5724 && GET_CODE (x) == CONST_DOUBLE
5725 && CONST_DOUBLE_LOW (x) == 0xffffffff
5726 && CONST_DOUBLE_HIGH (x) == 0)
5727 {
5728 fputc ('l', file);
5729 break;
5730 }
5731 output_operand_lossage ("invalid %%U value");
5732 break;
5733
5734 case 's':
5735 /* Write the constant value divided by 8 for little-endian mode or
5736 (56 - value) / 8 for big-endian mode. */
5737
5738 if (GET_CODE (x) != CONST_INT
5739 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5740 ? 56
5741 : 64)
5742 || (INTVAL (x) & 7) != 0)
5743 output_operand_lossage ("invalid %%s value");
5744
5745 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5746 WORDS_BIG_ENDIAN
5747 ? (56 - INTVAL (x)) / 8
5748 : INTVAL (x) / 8);
5749 break;
5750
5751 case 'S':
5752 /* Same, except compute (64 - c) / 8 */
5753
5754 if (GET_CODE (x) != CONST_INT
5755 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5756 && (INTVAL (x) & 7) != 8)
5757 output_operand_lossage ("invalid %%s value");
5758
5759 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5760 break;
5761
5762 case 't':
5763 {
5764 /* On Unicos/Mk systems: use a DEX expression if the symbol
5765 clashes with a register name. */
5766 int dex = unicosmk_need_dex (x);
5767 if (dex)
5768 fprintf (file, "DEX(%d)", dex);
5769 else
5770 output_addr_const (file, x);
5771 }
5772 break;
5773
5774 case 'C': case 'D': case 'c': case 'd':
5775 /* Write out comparison name. */
5776 {
5777 enum rtx_code c = GET_CODE (x);
5778
5779 if (GET_RTX_CLASS (c) != '<')
5780 output_operand_lossage ("invalid %%C value");
5781
5782 else if (code == 'D')
5783 c = reverse_condition (c);
5784 else if (code == 'c')
5785 c = swap_condition (c);
5786 else if (code == 'd')
5787 c = swap_condition (reverse_condition (c));
5788
5789 if (c == LEU)
5790 fprintf (file, "ule");
5791 else if (c == LTU)
5792 fprintf (file, "ult");
5793 else if (c == UNORDERED)
5794 fprintf (file, "un");
5795 else
5796 fprintf (file, "%s", GET_RTX_NAME (c));
5797 }
5798 break;
5799
5800 case 'E':
5801 /* Write the divide or modulus operator. */
5802 switch (GET_CODE (x))
5803 {
5804 case DIV:
5805 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5806 break;
5807 case UDIV:
5808 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5809 break;
5810 case MOD:
5811 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5812 break;
5813 case UMOD:
5814 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5815 break;
5816 default:
5817 output_operand_lossage ("invalid %%E value");
5818 break;
5819 }
5820 break;
5821
5822 case 'A':
5823 /* Write "_u" for unaligned access. */
5824 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5825 fprintf (file, "_u");
5826 break;
5827
5828 case 0:
5829 if (GET_CODE (x) == REG)
5830 fprintf (file, "%s", reg_names[REGNO (x)]);
5831 else if (GET_CODE (x) == MEM)
5832 output_address (XEXP (x, 0));
5833 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5834 {
5835 switch (XINT (XEXP (x, 0), 1))
5836 {
5837 case UNSPEC_DTPREL:
5838 case UNSPEC_TPREL:
5839 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5840 break;
5841 default:
5842 output_operand_lossage ("unknown relocation unspec");
5843 break;
5844 }
5845 }
5846 else
5847 output_addr_const (file, x);
5848 break;
5849
5850 default:
5851 output_operand_lossage ("invalid %%xn code");
5852 }
5853 }
5854
5855 void
5856 print_operand_address (file, addr)
5857 FILE *file;
5858 rtx addr;
5859 {
5860 int basereg = 31;
5861 HOST_WIDE_INT offset = 0;
5862
5863 if (GET_CODE (addr) == AND)
5864 addr = XEXP (addr, 0);
5865
5866 if (GET_CODE (addr) == PLUS
5867 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5868 {
5869 offset = INTVAL (XEXP (addr, 1));
5870 addr = XEXP (addr, 0);
5871 }
5872
5873 if (GET_CODE (addr) == LO_SUM)
5874 {
5875 const char *reloc16, *reloclo;
5876 rtx op1 = XEXP (addr, 1);
5877
5878 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5879 {
5880 op1 = XEXP (op1, 0);
5881 switch (XINT (op1, 1))
5882 {
5883 case UNSPEC_DTPREL:
5884 reloc16 = NULL;
5885 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5886 break;
5887 case UNSPEC_TPREL:
5888 reloc16 = NULL;
5889 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5890 break;
5891 default:
5892 output_operand_lossage ("unknown relocation unspec");
5893 return;
5894 }
5895
5896 output_addr_const (file, XVECEXP (op1, 0, 0));
5897 }
5898 else
5899 {
5900 reloc16 = "gprel";
5901 reloclo = "gprellow";
5902 output_addr_const (file, op1);
5903 }
5904
5905 if (offset)
5906 {
5907 fputc ('+', file);
5908 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
5909 }
5910
5911 addr = XEXP (addr, 0);
5912 if (GET_CODE (addr) == REG)
5913 basereg = REGNO (addr);
5914 else if (GET_CODE (addr) == SUBREG
5915 && GET_CODE (SUBREG_REG (addr)) == REG)
5916 basereg = subreg_regno (addr);
5917 else
5918 abort ();
5919
5920 fprintf (file, "($%d)\t\t!%s", basereg,
5921 (basereg == 29 ? reloc16 : reloclo));
5922 return;
5923 }
5924
5925 if (GET_CODE (addr) == REG)
5926 basereg = REGNO (addr);
5927 else if (GET_CODE (addr) == SUBREG
5928 && GET_CODE (SUBREG_REG (addr)) == REG)
5929 basereg = subreg_regno (addr);
5930 else if (GET_CODE (addr) == CONST_INT)
5931 offset = INTVAL (addr);
5932 else
5933 abort ();
5934
5935 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
5936 fprintf (file, "($%d)", basereg);
5937 }
5938 \f
5939 /* Emit RTL insns to initialize the variable parts of a trampoline at
5940 TRAMP. FNADDR is an RTX for the address of the function's pure
5941 code. CXT is an RTX for the static chain value for the function.
5942
5943 The three offset parameters are for the individual template's
5944 layout. A JMPOFS < 0 indicates that the trampoline does not
5945 contain instructions at all.
5946
5947 We assume here that a function will be called many more times than
5948 its address is taken (e.g., it might be passed to qsort), so we
5949 take the trouble to initialize the "hint" field in the JMP insn.
5950 Note that the hint field is PC (new) + 4 * bits 13:0. */
5951
5952 void
5953 alpha_initialize_trampoline (tramp, fnaddr, cxt, fnofs, cxtofs, jmpofs)
5954 rtx tramp, fnaddr, cxt;
5955 int fnofs, cxtofs, jmpofs;
5956 {
5957 rtx temp, temp1, addr;
5958 /* VMS really uses DImode pointers in memory at this point. */
5959 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5960
5961 #ifdef POINTERS_EXTEND_UNSIGNED
5962 fnaddr = convert_memory_address (mode, fnaddr);
5963 cxt = convert_memory_address (mode, cxt);
5964 #endif
5965
5966 /* Store function address and CXT. */
5967 addr = memory_address (mode, plus_constant (tramp, fnofs));
5968 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5969 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5970 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5971
5972 /* This has been disabled since the hint only has a 32k range, and in
5973 no existing OS is the stack within 32k of the text segment. */
5974 if (0 && jmpofs >= 0)
5975 {
5976 /* Compute hint value. */
5977 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5978 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5979 OPTAB_WIDEN);
5980 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5981 build_int_2 (2, 0), NULL_RTX, 1);
5982 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5983 GEN_INT (0x3fff), 0);
5984
5985 /* Merge in the hint. */
5986 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5987 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5988 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5989 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5990 OPTAB_WIDEN);
5991 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5992 }
5993
5994 #ifdef TRANSFER_FROM_TRAMPOLINE
5995 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
5996 0, VOIDmode, 1, addr, Pmode);
5997 #endif
5998
5999 if (jmpofs >= 0)
6000 emit_insn (gen_imb ());
6001 }
6002 \f
6003 /* Determine where to put an argument to a function.
6004 Value is zero to push the argument on the stack,
6005 or a hard register in which to store the argument.
6006
6007 MODE is the argument's machine mode.
6008 TYPE is the data type of the argument (as a tree).
6009 This is null for libcalls where that information may
6010 not be available.
6011 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6012 the preceding args and about the function being called.
6013 NAMED is nonzero if this argument is a named parameter
6014 (otherwise it is an extra parameter matching an ellipsis).
6015
6016 On Alpha the first 6 words of args are normally in registers
6017 and the rest are pushed. */
6018
6019 rtx
6020 function_arg (cum, mode, type, named)
6021 CUMULATIVE_ARGS cum;
6022 enum machine_mode mode;
6023 tree type;
6024 int named ATTRIBUTE_UNUSED;
6025 {
6026 int basereg;
6027 int num_args;
6028
6029 /* Set up defaults for FP operands passed in FP registers, and
6030 integral operands passed in integer registers. */
6031 if (TARGET_FPREGS
6032 && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6033 || GET_MODE_CLASS (mode) == MODE_FLOAT))
6034 basereg = 32 + 16;
6035 else
6036 basereg = 16;
6037
6038 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
6039 the three platforms, so we can't avoid conditional compilation. */
6040 #if TARGET_ABI_OPEN_VMS
6041 {
6042 if (mode == VOIDmode)
6043 return alpha_arg_info_reg_val (cum);
6044
6045 num_args = cum.num_args;
6046 if (num_args >= 6 || MUST_PASS_IN_STACK (mode, type))
6047 return NULL_RTX;
6048 }
6049 #else
6050 #if TARGET_ABI_UNICOSMK
6051 {
6052 int size;
6053
6054 /* If this is the last argument, generate the call info word (CIW). */
6055 /* ??? We don't include the caller's line number in the CIW because
6056 I don't know how to determine it if debug infos are turned off. */
6057 if (mode == VOIDmode)
6058 {
6059 int i;
6060 HOST_WIDE_INT lo;
6061 HOST_WIDE_INT hi;
6062 rtx ciw;
6063
6064 lo = 0;
6065
6066 for (i = 0; i < cum.num_reg_words && i < 5; i++)
6067 if (cum.reg_args_type[i])
6068 lo |= (1 << (7 - i));
6069
6070 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
6071 lo |= 7;
6072 else
6073 lo |= cum.num_reg_words;
6074
6075 #if HOST_BITS_PER_WIDE_INT == 32
6076 hi = (cum.num_args << 20) | cum.num_arg_words;
6077 #else
6078 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
6079 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
6080 hi = 0;
6081 #endif
6082 ciw = immed_double_const (lo, hi, DImode);
6083
6084 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
6085 UNSPEC_UMK_LOAD_CIW);
6086 }
6087
6088 size = ALPHA_ARG_SIZE (mode, type, named);
6089 num_args = cum.num_reg_words;
6090 if (MUST_PASS_IN_STACK (mode, type)
6091 || cum.num_reg_words + size > 6 || cum.force_stack)
6092 return NULL_RTX;
6093 else if (type && TYPE_MODE (type) == BLKmode)
6094 {
6095 rtx reg1, reg2;
6096
6097 reg1 = gen_rtx_REG (DImode, num_args + 16);
6098 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
6099
6100 /* The argument fits in two registers. Note that we still need to
6101 reserve a register for empty structures. */
6102 if (size == 0)
6103 return NULL_RTX;
6104 else if (size == 1)
6105 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
6106 else
6107 {
6108 reg2 = gen_rtx_REG (DImode, num_args + 17);
6109 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
6110 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
6111 }
6112 }
6113 }
6114 #else
6115 {
6116 if (cum >= 6)
6117 return NULL_RTX;
6118 num_args = cum;
6119
6120 /* VOID is passed as a special flag for "last argument". */
6121 if (type == void_type_node)
6122 basereg = 16;
6123 else if (MUST_PASS_IN_STACK (mode, type))
6124 return NULL_RTX;
6125 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum, mode, type, named))
6126 basereg = 16;
6127 }
6128 #endif /* TARGET_ABI_UNICOSMK */
6129 #endif /* TARGET_ABI_OPEN_VMS */
6130
6131 return gen_rtx_REG (mode, num_args + basereg);
6132 }
6133
6134 tree
6135 alpha_build_va_list ()
6136 {
6137 tree base, ofs, record, type_decl;
6138
6139 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6140 return ptr_type_node;
6141
6142 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6143 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6144 TREE_CHAIN (record) = type_decl;
6145 TYPE_NAME (record) = type_decl;
6146
6147 /* C++? SET_IS_AGGR_TYPE (record, 1); */
6148
6149 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
6150 integer_type_node);
6151 DECL_FIELD_CONTEXT (ofs) = record;
6152
6153 base = build_decl (FIELD_DECL, get_identifier ("__base"),
6154 ptr_type_node);
6155 DECL_FIELD_CONTEXT (base) = record;
6156 TREE_CHAIN (base) = ofs;
6157
6158 TYPE_FIELDS (record) = base;
6159 layout_type (record);
6160
6161 return record;
6162 }
6163
6164 void
6165 alpha_va_start (valist, nextarg)
6166 tree valist;
6167 rtx nextarg ATTRIBUTE_UNUSED;
6168 {
6169 HOST_WIDE_INT offset;
6170 tree t, offset_field, base_field;
6171
6172 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6173 return;
6174
6175 if (TARGET_ABI_UNICOSMK)
6176 std_expand_builtin_va_start (valist, nextarg);
6177
6178 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
6179 up by 48, storing fp arg registers in the first 48 bytes, and the
6180 integer arg registers in the next 48 bytes. This is only done,
6181 however, if any integer registers need to be stored.
6182
6183 If no integer registers need be stored, then we must subtract 48
6184 in order to account for the integer arg registers which are counted
6185 in argsize above, but which are not actually stored on the stack. */
6186
6187 if (NUM_ARGS <= 6)
6188 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6189 else
6190 offset = -6 * UNITS_PER_WORD;
6191
6192 if (TARGET_ABI_OPEN_VMS)
6193 {
6194 nextarg = plus_constant (nextarg, offset);
6195 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6196 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6197 make_tree (ptr_type_node, nextarg));
6198 TREE_SIDE_EFFECTS (t) = 1;
6199
6200 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6201 }
6202 else
6203 {
6204 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6205 offset_field = TREE_CHAIN (base_field);
6206
6207 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6208 valist, base_field);
6209 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6210 valist, offset_field);
6211
6212 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6213 t = build (PLUS_EXPR, ptr_type_node, t, build_int_2 (offset, 0));
6214 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6215 TREE_SIDE_EFFECTS (t) = 1;
6216 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6217
6218 t = build_int_2 (NUM_ARGS * UNITS_PER_WORD, 0);
6219 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6220 TREE_SIDE_EFFECTS (t) = 1;
6221 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6222 }
6223 }
6224
6225 rtx
6226 alpha_va_arg (valist, type)
6227 tree valist, type;
6228 {
6229 rtx addr;
6230 tree t, type_size, rounded_size;
6231 tree offset_field, base_field, addr_tree, addend;
6232 tree wide_type, wide_ofs;
6233 int indirect = 0;
6234
6235 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6236 return std_expand_builtin_va_arg (valist, type);
6237
6238 if (type == error_mark_node
6239 || (type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type))) == NULL
6240 || TREE_OVERFLOW (type_size))
6241 rounded_size = size_zero_node;
6242 else
6243 rounded_size = fold (build (MULT_EXPR, sizetype,
6244 fold (build (TRUNC_DIV_EXPR, sizetype,
6245 fold (build (PLUS_EXPR, sizetype,
6246 type_size,
6247 size_int (7))),
6248 size_int (8))),
6249 size_int (8)));
6250
6251 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6252 offset_field = TREE_CHAIN (base_field);
6253
6254 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6255 valist, base_field);
6256 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6257 valist, offset_field);
6258
6259 /* If the type could not be passed in registers, skip the block
6260 reserved for the registers. */
6261 if (MUST_PASS_IN_STACK (TYPE_MODE (type), type))
6262 {
6263 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field,
6264 build (MAX_EXPR, TREE_TYPE (offset_field),
6265 offset_field, build_int_2 (6*8, 0)));
6266 TREE_SIDE_EFFECTS (t) = 1;
6267 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6268 }
6269
6270 wide_type = make_signed_type (64);
6271 wide_ofs = save_expr (build1 (CONVERT_EXPR, wide_type, offset_field));
6272
6273 addend = wide_ofs;
6274
6275 if (TYPE_MODE (type) == TFmode || TYPE_MODE (type) == TCmode)
6276 {
6277 indirect = 1;
6278 rounded_size = size_int (UNITS_PER_WORD);
6279 }
6280 else if (FLOAT_TYPE_P (type))
6281 {
6282 tree fpaddend, cond;
6283
6284 fpaddend = fold (build (PLUS_EXPR, TREE_TYPE (addend),
6285 addend, build_int_2 (-6*8, 0)));
6286
6287 cond = fold (build (LT_EXPR, integer_type_node,
6288 wide_ofs, build_int_2 (6*8, 0)));
6289
6290 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6291 fpaddend, addend));
6292 }
6293
6294 addr_tree = build (PLUS_EXPR, TREE_TYPE (base_field),
6295 base_field, addend);
6296
6297 addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
6298 addr = copy_to_reg (addr);
6299
6300 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field,
6301 build (PLUS_EXPR, TREE_TYPE (offset_field),
6302 offset_field, rounded_size));
6303 TREE_SIDE_EFFECTS (t) = 1;
6304 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6305
6306 if (indirect)
6307 {
6308 addr = force_reg (Pmode, addr);
6309 addr = gen_rtx_MEM (Pmode, addr);
6310 }
6311
6312 return addr;
6313 }
6314 \f
6315 /* Builtins. */
6316
6317 enum alpha_builtin
6318 {
6319 ALPHA_BUILTIN_CMPBGE,
6320 ALPHA_BUILTIN_EXTBL,
6321 ALPHA_BUILTIN_EXTWL,
6322 ALPHA_BUILTIN_EXTLL,
6323 ALPHA_BUILTIN_EXTQL,
6324 ALPHA_BUILTIN_EXTWH,
6325 ALPHA_BUILTIN_EXTLH,
6326 ALPHA_BUILTIN_EXTQH,
6327 ALPHA_BUILTIN_INSBL,
6328 ALPHA_BUILTIN_INSWL,
6329 ALPHA_BUILTIN_INSLL,
6330 ALPHA_BUILTIN_INSQL,
6331 ALPHA_BUILTIN_INSWH,
6332 ALPHA_BUILTIN_INSLH,
6333 ALPHA_BUILTIN_INSQH,
6334 ALPHA_BUILTIN_MSKBL,
6335 ALPHA_BUILTIN_MSKWL,
6336 ALPHA_BUILTIN_MSKLL,
6337 ALPHA_BUILTIN_MSKQL,
6338 ALPHA_BUILTIN_MSKWH,
6339 ALPHA_BUILTIN_MSKLH,
6340 ALPHA_BUILTIN_MSKQH,
6341 ALPHA_BUILTIN_UMULH,
6342 ALPHA_BUILTIN_ZAP,
6343 ALPHA_BUILTIN_ZAPNOT,
6344 ALPHA_BUILTIN_AMASK,
6345 ALPHA_BUILTIN_IMPLVER,
6346 ALPHA_BUILTIN_RPCC,
6347 ALPHA_BUILTIN_THREAD_POINTER,
6348 ALPHA_BUILTIN_SET_THREAD_POINTER,
6349
6350 /* TARGET_MAX */
6351 ALPHA_BUILTIN_MINUB8,
6352 ALPHA_BUILTIN_MINSB8,
6353 ALPHA_BUILTIN_MINUW4,
6354 ALPHA_BUILTIN_MINSW4,
6355 ALPHA_BUILTIN_MAXUB8,
6356 ALPHA_BUILTIN_MAXSB8,
6357 ALPHA_BUILTIN_MAXUW4,
6358 ALPHA_BUILTIN_MAXSW4,
6359 ALPHA_BUILTIN_PERR,
6360 ALPHA_BUILTIN_PKLB,
6361 ALPHA_BUILTIN_PKWB,
6362 ALPHA_BUILTIN_UNPKBL,
6363 ALPHA_BUILTIN_UNPKBW,
6364
6365 /* TARGET_CIX */
6366 ALPHA_BUILTIN_CTTZ,
6367 ALPHA_BUILTIN_CTLZ,
6368 ALPHA_BUILTIN_CTPOP,
6369
6370 ALPHA_BUILTIN_max
6371 };
6372
6373 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6374 CODE_FOR_builtin_cmpbge,
6375 CODE_FOR_builtin_extbl,
6376 CODE_FOR_builtin_extwl,
6377 CODE_FOR_builtin_extll,
6378 CODE_FOR_builtin_extql,
6379 CODE_FOR_builtin_extwh,
6380 CODE_FOR_builtin_extlh,
6381 CODE_FOR_builtin_extqh,
6382 CODE_FOR_builtin_insbl,
6383 CODE_FOR_builtin_inswl,
6384 CODE_FOR_builtin_insll,
6385 CODE_FOR_builtin_insql,
6386 CODE_FOR_builtin_inswh,
6387 CODE_FOR_builtin_inslh,
6388 CODE_FOR_builtin_insqh,
6389 CODE_FOR_builtin_mskbl,
6390 CODE_FOR_builtin_mskwl,
6391 CODE_FOR_builtin_mskll,
6392 CODE_FOR_builtin_mskql,
6393 CODE_FOR_builtin_mskwh,
6394 CODE_FOR_builtin_msklh,
6395 CODE_FOR_builtin_mskqh,
6396 CODE_FOR_umuldi3_highpart,
6397 CODE_FOR_builtin_zap,
6398 CODE_FOR_builtin_zapnot,
6399 CODE_FOR_builtin_amask,
6400 CODE_FOR_builtin_implver,
6401 CODE_FOR_builtin_rpcc,
6402 CODE_FOR_load_tp,
6403 CODE_FOR_set_tp,
6404
6405 /* TARGET_MAX */
6406 CODE_FOR_builtin_minub8,
6407 CODE_FOR_builtin_minsb8,
6408 CODE_FOR_builtin_minuw4,
6409 CODE_FOR_builtin_minsw4,
6410 CODE_FOR_builtin_maxub8,
6411 CODE_FOR_builtin_maxsb8,
6412 CODE_FOR_builtin_maxuw4,
6413 CODE_FOR_builtin_maxsw4,
6414 CODE_FOR_builtin_perr,
6415 CODE_FOR_builtin_pklb,
6416 CODE_FOR_builtin_pkwb,
6417 CODE_FOR_builtin_unpkbl,
6418 CODE_FOR_builtin_unpkbw,
6419
6420 /* TARGET_CIX */
6421 CODE_FOR_builtin_cttz,
6422 CODE_FOR_builtin_ctlz,
6423 CODE_FOR_builtin_ctpop
6424 };
6425
6426 struct alpha_builtin_def
6427 {
6428 const char *name;
6429 enum alpha_builtin code;
6430 unsigned int target_mask;
6431 };
6432
6433 static struct alpha_builtin_def const zero_arg_builtins[] = {
6434 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0 },
6435 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0 }
6436 };
6437
6438 static struct alpha_builtin_def const one_arg_builtins[] = {
6439 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0 },
6440 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX },
6441 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX },
6442 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX },
6443 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX },
6444 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX },
6445 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX },
6446 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX }
6447 };
6448
6449 static struct alpha_builtin_def const two_arg_builtins[] = {
6450 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0 },
6451 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0 },
6452 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0 },
6453 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0 },
6454 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0 },
6455 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0 },
6456 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0 },
6457 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0 },
6458 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0 },
6459 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0 },
6460 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0 },
6461 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0 },
6462 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0 },
6463 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0 },
6464 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0 },
6465 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0 },
6466 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0 },
6467 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0 },
6468 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0 },
6469 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0 },
6470 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0 },
6471 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0 },
6472 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0 },
6473 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0 },
6474 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0 },
6475 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX },
6476 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX },
6477 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX },
6478 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX },
6479 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX },
6480 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX },
6481 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX },
6482 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX },
6483 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX }
6484 };
6485
6486 static void
6487 alpha_init_builtins ()
6488 {
6489 const struct alpha_builtin_def *p;
6490 tree ftype;
6491 size_t i;
6492
6493 ftype = build_function_type (long_integer_type_node, void_list_node);
6494
6495 p = zero_arg_builtins;
6496 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6497 if ((target_flags & p->target_mask) == p->target_mask)
6498 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6499 NULL, NULL_TREE);
6500
6501 ftype = build_function_type_list (long_integer_type_node,
6502 long_integer_type_node, NULL_TREE);
6503
6504 p = one_arg_builtins;
6505 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6506 if ((target_flags & p->target_mask) == p->target_mask)
6507 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6508 NULL, NULL_TREE);
6509
6510 ftype = build_function_type_list (long_integer_type_node,
6511 long_integer_type_node,
6512 long_integer_type_node, NULL_TREE);
6513
6514 p = two_arg_builtins;
6515 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6516 if ((target_flags & p->target_mask) == p->target_mask)
6517 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6518 NULL, NULL_TREE);
6519
6520 ftype = build_function_type (ptr_type_node, void_list_node);
6521 builtin_function ("__builtin_thread_pointer", ftype,
6522 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6523 NULL, NULL_TREE);
6524
6525 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6526 builtin_function ("__builtin_set_thread_pointer", ftype,
6527 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6528 NULL, NULL_TREE);
6529 }
6530
6531 /* Expand an expression EXP that calls a built-in function,
6532 with result going to TARGET if that's convenient
6533 (and in mode MODE if that's convenient).
6534 SUBTARGET may be used as the target for computing one of EXP's operands.
6535 IGNORE is nonzero if the value is to be ignored. */
6536
6537 static rtx
6538 alpha_expand_builtin (exp, target, subtarget, mode, ignore)
6539 tree exp;
6540 rtx target;
6541 rtx subtarget ATTRIBUTE_UNUSED;
6542 enum machine_mode mode ATTRIBUTE_UNUSED;
6543 int ignore ATTRIBUTE_UNUSED;
6544 {
6545 #define MAX_ARGS 2
6546
6547 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6548 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6549 tree arglist = TREE_OPERAND (exp, 1);
6550 enum insn_code icode;
6551 rtx op[MAX_ARGS], pat;
6552 int arity;
6553 bool nonvoid;
6554
6555 if (fcode >= ALPHA_BUILTIN_max)
6556 internal_error ("bad builtin fcode");
6557 icode = code_for_builtin[fcode];
6558 if (icode == 0)
6559 internal_error ("bad builtin fcode");
6560
6561 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6562
6563 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6564 arglist;
6565 arglist = TREE_CHAIN (arglist), arity++)
6566 {
6567 const struct insn_operand_data *insn_op;
6568
6569 tree arg = TREE_VALUE (arglist);
6570 if (arg == error_mark_node)
6571 return NULL_RTX;
6572 if (arity > MAX_ARGS)
6573 return NULL_RTX;
6574
6575 insn_op = &insn_data[icode].operand[arity + nonvoid];
6576
6577 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6578
6579 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6580 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6581 }
6582
6583 if (nonvoid)
6584 {
6585 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6586 if (!target
6587 || GET_MODE (target) != tmode
6588 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6589 target = gen_reg_rtx (tmode);
6590 }
6591
6592 switch (arity)
6593 {
6594 case 0:
6595 pat = GEN_FCN (icode) (target);
6596 break;
6597 case 1:
6598 if (nonvoid)
6599 pat = GEN_FCN (icode) (target, op[0]);
6600 else
6601 pat = GEN_FCN (icode) (op[0]);
6602 break;
6603 case 2:
6604 pat = GEN_FCN (icode) (target, op[0], op[1]);
6605 break;
6606 default:
6607 abort ();
6608 }
6609 if (!pat)
6610 return NULL_RTX;
6611 emit_insn (pat);
6612
6613 if (nonvoid)
6614 return target;
6615 else
6616 return const0_rtx;
6617 }
6618 \f
6619 /* This page contains routines that are used to determine what the function
6620 prologue and epilogue code will do and write them out. */
6621
6622 /* Compute the size of the save area in the stack. */
6623
6624 /* These variables are used for communication between the following functions.
6625 They indicate various things about the current function being compiled
6626 that are used to tell what kind of prologue, epilogue and procedure
6627 descriptior to generate. */
6628
6629 /* Nonzero if we need a stack procedure. */
6630 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
6631 static enum alpha_procedure_types alpha_procedure_type;
6632
6633 /* Register number (either FP or SP) that is used to unwind the frame. */
6634 static int vms_unwind_regno;
6635
6636 /* Register number used to save FP. We need not have one for RA since
6637 we don't modify it for register procedures. This is only defined
6638 for register frame procedures. */
6639 static int vms_save_fp_regno;
6640
6641 /* Register number used to reference objects off our PV. */
6642 static int vms_base_regno;
6643
6644 /* Compute register masks for saved registers. */
6645
6646 static void
6647 alpha_sa_mask (imaskP, fmaskP)
6648 unsigned long *imaskP;
6649 unsigned long *fmaskP;
6650 {
6651 unsigned long imask = 0;
6652 unsigned long fmask = 0;
6653 unsigned int i;
6654
6655 /* Irritatingly, there are two kinds of thunks -- those created with
6656 ASM_OUTPUT_MI_THUNK and those with DECL_THUNK_P that go through
6657 the regular part of the compiler. In the ASM_OUTPUT_MI_THUNK case
6658 we don't have valid register life info, but assemble_start_function
6659 wants to output .frame and .mask directives. */
6660 if (current_function_is_thunk && !no_new_pseudos)
6661 {
6662 *imaskP = 0;
6663 *fmaskP = 0;
6664 return;
6665 }
6666
6667 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6668 imask |= (1L << HARD_FRAME_POINTER_REGNUM);
6669
6670 /* One for every register we have to save. */
6671 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6672 if (! fixed_regs[i] && ! call_used_regs[i]
6673 && regs_ever_live[i] && i != REG_RA
6674 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
6675 {
6676 if (i < 32)
6677 imask |= (1L << i);
6678 else
6679 fmask |= (1L << (i - 32));
6680 }
6681
6682 /* We need to restore these for the handler. */
6683 if (current_function_calls_eh_return)
6684 for (i = 0; ; ++i)
6685 {
6686 unsigned regno = EH_RETURN_DATA_REGNO (i);
6687 if (regno == INVALID_REGNUM)
6688 break;
6689 imask |= 1L << regno;
6690 }
6691
6692 /* If any register spilled, then spill the return address also. */
6693 /* ??? This is required by the Digital stack unwind specification
6694 and isn't needed if we're doing Dwarf2 unwinding. */
6695 if (imask || fmask || alpha_ra_ever_killed ())
6696 imask |= (1L << REG_RA);
6697
6698 *imaskP = imask;
6699 *fmaskP = fmask;
6700 }
6701
6702 int
6703 alpha_sa_size ()
6704 {
6705 unsigned long mask[2];
6706 int sa_size = 0;
6707 int i, j;
6708
6709 alpha_sa_mask (&mask[0], &mask[1]);
6710
6711 if (TARGET_ABI_UNICOSMK)
6712 {
6713 if (mask[0] || mask[1])
6714 sa_size = 14;
6715 }
6716 else
6717 {
6718 for (j = 0; j < 2; ++j)
6719 for (i = 0; i < 32; ++i)
6720 if ((mask[j] >> i) & 1)
6721 sa_size++;
6722 }
6723
6724 if (TARGET_ABI_UNICOSMK)
6725 {
6726 /* We might not need to generate a frame if we don't make any calls
6727 (including calls to __T3E_MISMATCH if this is a vararg function),
6728 don't have any local variables which require stack slots, don't
6729 use alloca and have not determined that we need a frame for other
6730 reasons. */
6731
6732 alpha_procedure_type
6733 = (sa_size || get_frame_size() != 0
6734 || current_function_outgoing_args_size
6735 || current_function_stdarg || current_function_calls_alloca
6736 || frame_pointer_needed)
6737 ? PT_STACK : PT_REGISTER;
6738
6739 /* Always reserve space for saving callee-saved registers if we
6740 need a frame as required by the calling convention. */
6741 if (alpha_procedure_type == PT_STACK)
6742 sa_size = 14;
6743 }
6744 else if (TARGET_ABI_OPEN_VMS)
6745 {
6746 /* Start by assuming we can use a register procedure if we don't
6747 make any calls (REG_RA not used) or need to save any
6748 registers and a stack procedure if we do. */
6749 if ((mask[0] >> REG_RA) & 1)
6750 alpha_procedure_type = PT_STACK;
6751 else if (get_frame_size() != 0)
6752 alpha_procedure_type = PT_REGISTER;
6753 else
6754 alpha_procedure_type = PT_NULL;
6755
6756 /* Don't reserve space for saving FP & RA yet. Do that later after we've
6757 made the final decision on stack procedure vs register procedure. */
6758 if (alpha_procedure_type == PT_STACK)
6759 sa_size -= 2;
6760
6761 /* Decide whether to refer to objects off our PV via FP or PV.
6762 If we need FP for something else or if we receive a nonlocal
6763 goto (which expects PV to contain the value), we must use PV.
6764 Otherwise, start by assuming we can use FP. */
6765
6766 vms_base_regno
6767 = (frame_pointer_needed
6768 || current_function_has_nonlocal_label
6769 || alpha_procedure_type == PT_STACK
6770 || current_function_outgoing_args_size)
6771 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
6772
6773 /* If we want to copy PV into FP, we need to find some register
6774 in which to save FP. */
6775
6776 vms_save_fp_regno = -1;
6777 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
6778 for (i = 0; i < 32; i++)
6779 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
6780 vms_save_fp_regno = i;
6781
6782 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
6783 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
6784 else if (alpha_procedure_type == PT_NULL)
6785 vms_base_regno = REG_PV;
6786
6787 /* Stack unwinding should be done via FP unless we use it for PV. */
6788 vms_unwind_regno = (vms_base_regno == REG_PV
6789 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
6790
6791 /* If this is a stack procedure, allow space for saving FP and RA. */
6792 if (alpha_procedure_type == PT_STACK)
6793 sa_size += 2;
6794 }
6795 else
6796 {
6797 /* Our size must be even (multiple of 16 bytes). */
6798 if (sa_size & 1)
6799 sa_size++;
6800 }
6801
6802 return sa_size * 8;
6803 }
6804
6805 int
6806 alpha_pv_save_size ()
6807 {
6808 alpha_sa_size ();
6809 return alpha_procedure_type == PT_STACK ? 8 : 0;
6810 }
6811
6812 int
6813 alpha_using_fp ()
6814 {
6815 alpha_sa_size ();
6816 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
6817 }
6818
6819 #if TARGET_ABI_OPEN_VMS
6820
6821 const struct attribute_spec vms_attribute_table[] =
6822 {
6823 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
6824 { "overlaid", 0, 0, true, false, false, NULL },
6825 { "global", 0, 0, true, false, false, NULL },
6826 { "initialize", 0, 0, true, false, false, NULL },
6827 { NULL, 0, 0, false, false, false, NULL }
6828 };
6829
6830 #endif
6831
6832 static int
6833 find_lo_sum (px, data)
6834 rtx *px;
6835 void *data ATTRIBUTE_UNUSED;
6836 {
6837 return GET_CODE (*px) == LO_SUM;
6838 }
6839
6840 static int
6841 alpha_does_function_need_gp ()
6842 {
6843 rtx insn;
6844
6845 /* The GP being variable is an OSF abi thing. */
6846 if (! TARGET_ABI_OSF)
6847 return 0;
6848
6849 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6850 return 1;
6851
6852 if (current_function_is_thunk)
6853 return 1;
6854
6855 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
6856 Even if we are a static function, we still need to do this in case
6857 our address is taken and passed to something like qsort. */
6858
6859 push_topmost_sequence ();
6860 insn = get_insns ();
6861 pop_topmost_sequence ();
6862
6863 for (; insn; insn = NEXT_INSN (insn))
6864 if (INSN_P (insn)
6865 && GET_CODE (PATTERN (insn)) != USE
6866 && GET_CODE (PATTERN (insn)) != CLOBBER)
6867 {
6868 enum attr_type type = get_attr_type (insn);
6869 if (type == TYPE_LDSYM || type == TYPE_JSR)
6870 return 1;
6871 if (TARGET_EXPLICIT_RELOCS
6872 && for_each_rtx (&PATTERN (insn), find_lo_sum, NULL) > 0)
6873 return 1;
6874 }
6875
6876 return 0;
6877 }
6878
6879 /* Write a version stamp. Don't write anything if we are running as a
6880 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
6881
6882 #ifdef HAVE_STAMP_H
6883 #include <stamp.h>
6884 #endif
6885
6886 void
6887 alpha_write_verstamp (file)
6888 FILE *file ATTRIBUTE_UNUSED;
6889 {
6890 #ifdef MS_STAMP
6891 fprintf (file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
6892 #endif
6893 }
6894 \f
6895 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
6896 sequences. */
6897
6898 static rtx
6899 set_frame_related_p ()
6900 {
6901 rtx seq = get_insns ();
6902 rtx insn;
6903
6904 end_sequence ();
6905
6906 if (!seq)
6907 return NULL_RTX;
6908
6909 if (INSN_P (seq))
6910 {
6911 insn = seq;
6912 while (insn != NULL_RTX)
6913 {
6914 RTX_FRAME_RELATED_P (insn) = 1;
6915 insn = NEXT_INSN (insn);
6916 }
6917 seq = emit_insn (seq);
6918 }
6919 else
6920 {
6921 seq = emit_insn (seq);
6922 RTX_FRAME_RELATED_P (seq) = 1;
6923 }
6924 return seq;
6925 }
6926
6927 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
6928
6929 /* Write function prologue. */
6930
6931 /* On vms we have two kinds of functions:
6932
6933 - stack frame (PROC_STACK)
6934 these are 'normal' functions with local vars and which are
6935 calling other functions
6936 - register frame (PROC_REGISTER)
6937 keeps all data in registers, needs no stack
6938
6939 We must pass this to the assembler so it can generate the
6940 proper pdsc (procedure descriptor)
6941 This is done with the '.pdesc' command.
6942
6943 On not-vms, we don't really differentiate between the two, as we can
6944 simply allocate stack without saving registers. */
6945
6946 void
6947 alpha_expand_prologue ()
6948 {
6949 /* Registers to save. */
6950 unsigned long imask = 0;
6951 unsigned long fmask = 0;
6952 /* Stack space needed for pushing registers clobbered by us. */
6953 HOST_WIDE_INT sa_size;
6954 /* Complete stack size needed. */
6955 HOST_WIDE_INT frame_size;
6956 /* Offset from base reg to register save area. */
6957 HOST_WIDE_INT reg_offset;
6958 rtx sa_reg, mem;
6959 int i;
6960
6961 sa_size = alpha_sa_size ();
6962
6963 frame_size = get_frame_size ();
6964 if (TARGET_ABI_OPEN_VMS)
6965 frame_size = ALPHA_ROUND (sa_size
6966 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6967 + frame_size
6968 + current_function_pretend_args_size);
6969 else if (TARGET_ABI_UNICOSMK)
6970 /* We have to allocate space for the DSIB if we generate a frame. */
6971 frame_size = ALPHA_ROUND (sa_size
6972 + (alpha_procedure_type == PT_STACK ? 48 : 0))
6973 + ALPHA_ROUND (frame_size
6974 + current_function_outgoing_args_size);
6975 else
6976 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
6977 + sa_size
6978 + ALPHA_ROUND (frame_size
6979 + current_function_pretend_args_size));
6980
6981 if (TARGET_ABI_OPEN_VMS)
6982 reg_offset = 8;
6983 else
6984 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
6985
6986 alpha_sa_mask (&imask, &fmask);
6987
6988 /* Emit an insn to reload GP, if needed. */
6989 if (TARGET_ABI_OSF)
6990 {
6991 alpha_function_needs_gp = alpha_does_function_need_gp ();
6992 if (alpha_function_needs_gp)
6993 emit_insn (gen_prologue_ldgp ());
6994 }
6995
6996 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
6997 the call to mcount ourselves, rather than having the linker do it
6998 magically in response to -pg. Since _mcount has special linkage,
6999 don't represent the call as a call. */
7000 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7001 emit_insn (gen_prologue_mcount ());
7002
7003 if (TARGET_ABI_UNICOSMK)
7004 unicosmk_gen_dsib (&imask);
7005
7006 /* Adjust the stack by the frame size. If the frame size is > 4096
7007 bytes, we need to be sure we probe somewhere in the first and last
7008 4096 bytes (we can probably get away without the latter test) and
7009 every 8192 bytes in between. If the frame size is > 32768, we
7010 do this in a loop. Otherwise, we generate the explicit probe
7011 instructions.
7012
7013 Note that we are only allowed to adjust sp once in the prologue. */
7014
7015 if (frame_size <= 32768)
7016 {
7017 if (frame_size > 4096)
7018 {
7019 int probed = 4096;
7020
7021 do
7022 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7023 ? -probed + 64
7024 : -probed)));
7025 while ((probed += 8192) < frame_size);
7026
7027 /* We only have to do this probe if we aren't saving registers. */
7028 if (sa_size == 0 && probed + 4096 < frame_size)
7029 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7030 }
7031
7032 if (frame_size != 0)
7033 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7034 GEN_INT (TARGET_ABI_UNICOSMK
7035 ? -frame_size + 64
7036 : -frame_size))));
7037 }
7038 else
7039 {
7040 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7041 number of 8192 byte blocks to probe. We then probe each block
7042 in the loop and then set SP to the proper location. If the
7043 amount remaining is > 4096, we have to do one more probe if we
7044 are not saving any registers. */
7045
7046 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7047 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7048 rtx ptr = gen_rtx_REG (DImode, 22);
7049 rtx count = gen_rtx_REG (DImode, 23);
7050 rtx seq;
7051
7052 emit_move_insn (count, GEN_INT (blocks));
7053 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7054 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7055
7056 /* Because of the difficulty in emitting a new basic block this
7057 late in the compilation, generate the loop as a single insn. */
7058 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7059
7060 if (leftover > 4096 && sa_size == 0)
7061 {
7062 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7063 MEM_VOLATILE_P (last) = 1;
7064 emit_move_insn (last, const0_rtx);
7065 }
7066
7067 if (TARGET_ABI_WINDOWS_NT)
7068 {
7069 /* For NT stack unwind (done by 'reverse execution'), it's
7070 not OK to take the result of a loop, even though the value
7071 is already in ptr, so we reload it via a single operation
7072 and subtract it to sp.
7073
7074 Yes, that's correct -- we have to reload the whole constant
7075 into a temporary via ldah+lda then subtract from sp. To
7076 ensure we get ldah+lda, we use a special pattern. */
7077
7078 HOST_WIDE_INT lo, hi;
7079 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7080 hi = frame_size - lo;
7081
7082 emit_move_insn (ptr, GEN_INT (hi));
7083 emit_insn (gen_nt_lda (ptr, GEN_INT (lo)));
7084 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7085 ptr));
7086 }
7087 else
7088 {
7089 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7090 GEN_INT (-leftover)));
7091 }
7092
7093 /* This alternative is special, because the DWARF code cannot
7094 possibly intuit through the loop above. So we invent this
7095 note it looks at instead. */
7096 RTX_FRAME_RELATED_P (seq) = 1;
7097 REG_NOTES (seq)
7098 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7099 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7100 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7101 GEN_INT (TARGET_ABI_UNICOSMK
7102 ? -frame_size + 64
7103 : -frame_size))),
7104 REG_NOTES (seq));
7105 }
7106
7107 if (!TARGET_ABI_UNICOSMK)
7108 {
7109 /* Cope with very large offsets to the register save area. */
7110 sa_reg = stack_pointer_rtx;
7111 if (reg_offset + sa_size > 0x8000)
7112 {
7113 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7114 HOST_WIDE_INT bias;
7115
7116 if (low + sa_size <= 0x8000)
7117 bias = reg_offset - low, reg_offset = low;
7118 else
7119 bias = reg_offset, reg_offset = 0;
7120
7121 sa_reg = gen_rtx_REG (DImode, 24);
7122 FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
7123 GEN_INT (bias))));
7124 }
7125
7126 /* Save regs in stack order. Beginning with VMS PV. */
7127 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7128 {
7129 mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
7130 set_mem_alias_set (mem, alpha_sr_alias_set);
7131 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
7132 }
7133
7134 /* Save register RA next. */
7135 if (imask & (1L << REG_RA))
7136 {
7137 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7138 set_mem_alias_set (mem, alpha_sr_alias_set);
7139 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
7140 imask &= ~(1L << REG_RA);
7141 reg_offset += 8;
7142 }
7143
7144 /* Now save any other registers required to be saved. */
7145 for (i = 0; i < 32; i++)
7146 if (imask & (1L << i))
7147 {
7148 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7149 set_mem_alias_set (mem, alpha_sr_alias_set);
7150 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
7151 reg_offset += 8;
7152 }
7153
7154 for (i = 0; i < 32; i++)
7155 if (fmask & (1L << i))
7156 {
7157 mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
7158 set_mem_alias_set (mem, alpha_sr_alias_set);
7159 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
7160 reg_offset += 8;
7161 }
7162 }
7163 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7164 {
7165 /* The standard frame on the T3E includes space for saving registers.
7166 We just have to use it. We don't have to save the return address and
7167 the old frame pointer here - they are saved in the DSIB. */
7168
7169 reg_offset = -56;
7170 for (i = 9; i < 15; i++)
7171 if (imask & (1L << i))
7172 {
7173 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7174 reg_offset));
7175 set_mem_alias_set (mem, alpha_sr_alias_set);
7176 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
7177 reg_offset -= 8;
7178 }
7179 for (i = 2; i < 10; i++)
7180 if (fmask & (1L << i))
7181 {
7182 mem = gen_rtx_MEM (DFmode, plus_constant (hard_frame_pointer_rtx,
7183 reg_offset));
7184 set_mem_alias_set (mem, alpha_sr_alias_set);
7185 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
7186 reg_offset -= 8;
7187 }
7188 }
7189
7190 if (TARGET_ABI_OPEN_VMS)
7191 {
7192 if (alpha_procedure_type == PT_REGISTER)
7193 /* Register frame procedures save the fp.
7194 ?? Ought to have a dwarf2 save for this. */
7195 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7196 hard_frame_pointer_rtx);
7197
7198 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7199 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7200 gen_rtx_REG (DImode, REG_PV)));
7201
7202 if (alpha_procedure_type != PT_NULL
7203 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7204 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7205
7206 /* If we have to allocate space for outgoing args, do it now. */
7207 if (current_function_outgoing_args_size != 0)
7208 FRP (emit_move_insn
7209 (stack_pointer_rtx,
7210 plus_constant (hard_frame_pointer_rtx,
7211 - (ALPHA_ROUND
7212 (current_function_outgoing_args_size)))));
7213 }
7214 else if (!TARGET_ABI_UNICOSMK)
7215 {
7216 /* If we need a frame pointer, set it from the stack pointer. */
7217 if (frame_pointer_needed)
7218 {
7219 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7220 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7221 else
7222 /* This must always be the last instruction in the
7223 prologue, thus we emit a special move + clobber. */
7224 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7225 stack_pointer_rtx, sa_reg)));
7226 }
7227 }
7228
7229 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7230 the prologue, for exception handling reasons, we cannot do this for
7231 any insn that might fault. We could prevent this for mems with a
7232 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7233 have to prevent all such scheduling with a blockage.
7234
7235 Linux, on the other hand, never bothered to implement OSF/1's
7236 exception handling, and so doesn't care about such things. Anyone
7237 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7238
7239 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7240 emit_insn (gen_blockage ());
7241 }
7242
7243 /* Output the textual info surrounding the prologue. */
7244
7245 void
7246 alpha_start_function (file, fnname, decl)
7247 FILE *file;
7248 const char *fnname;
7249 tree decl ATTRIBUTE_UNUSED;
7250 {
7251 unsigned long imask = 0;
7252 unsigned long fmask = 0;
7253 /* Stack space needed for pushing registers clobbered by us. */
7254 HOST_WIDE_INT sa_size;
7255 /* Complete stack size needed. */
7256 HOST_WIDE_INT frame_size;
7257 /* Offset from base reg to register save area. */
7258 HOST_WIDE_INT reg_offset;
7259 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7260 int i;
7261
7262 /* Don't emit an extern directive for functions defined in the same file. */
7263 if (TARGET_ABI_UNICOSMK)
7264 {
7265 tree name_tree;
7266 name_tree = get_identifier (fnname);
7267 TREE_ASM_WRITTEN (name_tree) = 1;
7268 }
7269
7270 alpha_fnname = fnname;
7271 sa_size = alpha_sa_size ();
7272
7273 frame_size = get_frame_size ();
7274 if (TARGET_ABI_OPEN_VMS)
7275 frame_size = ALPHA_ROUND (sa_size
7276 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7277 + frame_size
7278 + current_function_pretend_args_size);
7279 else if (TARGET_ABI_UNICOSMK)
7280 frame_size = ALPHA_ROUND (sa_size
7281 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7282 + ALPHA_ROUND (frame_size
7283 + current_function_outgoing_args_size);
7284 else
7285 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7286 + sa_size
7287 + ALPHA_ROUND (frame_size
7288 + current_function_pretend_args_size));
7289
7290 if (TARGET_ABI_OPEN_VMS)
7291 reg_offset = 8;
7292 else
7293 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7294
7295 alpha_sa_mask (&imask, &fmask);
7296
7297 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7298 We have to do that before the .ent directive as we cannot switch
7299 files within procedures with native ecoff because line numbers are
7300 linked to procedure descriptors.
7301 Outputting the lineno helps debugging of one line functions as they
7302 would otherwise get no line number at all. Please note that we would
7303 like to put out last_linenum from final.c, but it is not accessible. */
7304
7305 if (write_symbols == SDB_DEBUG)
7306 {
7307 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7308 ASM_OUTPUT_SOURCE_FILENAME (file,
7309 DECL_SOURCE_FILE (current_function_decl));
7310 #endif
7311 #ifdef ASM_OUTPUT_SOURCE_LINE
7312 if (debug_info_level != DINFO_LEVEL_TERSE)
7313 ASM_OUTPUT_SOURCE_LINE (file,
7314 DECL_SOURCE_LINE (current_function_decl));
7315 #endif
7316 }
7317
7318 /* Issue function start and label. */
7319 if (TARGET_ABI_OPEN_VMS
7320 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7321 {
7322 fputs ("\t.ent ", file);
7323 assemble_name (file, fnname);
7324 putc ('\n', file);
7325
7326 /* If the function needs GP, we'll write the "..ng" label there.
7327 Otherwise, do it here. */
7328 if (TARGET_ABI_OSF
7329 && ! alpha_function_needs_gp
7330 && ! current_function_is_thunk)
7331 {
7332 putc ('$', file);
7333 assemble_name (file, fnname);
7334 fputs ("..ng:\n", file);
7335 }
7336 }
7337
7338 strcpy (entry_label, fnname);
7339 if (TARGET_ABI_OPEN_VMS)
7340 strcat (entry_label, "..en");
7341
7342 /* For public functions, the label must be globalized by appending an
7343 additional colon. */
7344 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7345 strcat (entry_label, ":");
7346
7347 ASM_OUTPUT_LABEL (file, entry_label);
7348 inside_function = TRUE;
7349
7350 if (TARGET_ABI_OPEN_VMS)
7351 fprintf (file, "\t.base $%d\n", vms_base_regno);
7352
7353 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7354 && !flag_inhibit_size_directive)
7355 {
7356 /* Set flags in procedure descriptor to request IEEE-conformant
7357 math-library routines. The value we set it to is PDSC_EXC_IEEE
7358 (/usr/include/pdsc.h). */
7359 fputs ("\t.eflag 48\n", file);
7360 }
7361
7362 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7363 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7364 alpha_arg_offset = -frame_size + 48;
7365
7366 /* Describe our frame. If the frame size is larger than an integer,
7367 print it as zero to avoid an assembler error. We won't be
7368 properly describing such a frame, but that's the best we can do. */
7369 if (TARGET_ABI_UNICOSMK)
7370 ;
7371 else if (TARGET_ABI_OPEN_VMS)
7372 {
7373 fprintf (file, "\t.frame $%d,", vms_unwind_regno);
7374 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7375 frame_size >= ((HOST_WIDE_INT) 1 << 31) ? 0 : frame_size);
7376 fputs (",$26,", file);
7377 fprintf (file, HOST_WIDE_INT_PRINT_DEC, reg_offset);
7378 fputs ("\n", file);
7379 }
7380 else if (!flag_inhibit_size_directive)
7381 {
7382 fprintf (file, "\t.frame $%d,",
7383 (frame_pointer_needed
7384 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM));
7385 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7386 frame_size >= (1l << 31) ? 0 : frame_size);
7387 fprintf (file, ",$26,%d\n", current_function_pretend_args_size);
7388 }
7389
7390 /* Describe which registers were spilled. */
7391 if (TARGET_ABI_UNICOSMK)
7392 ;
7393 else if (TARGET_ABI_OPEN_VMS)
7394 {
7395 if (imask)
7396 /* ??? Does VMS care if mask contains ra? The old code didn't
7397 set it, so I don't here. */
7398 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1L << REG_RA));
7399 if (fmask)
7400 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7401 if (alpha_procedure_type == PT_REGISTER)
7402 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7403 }
7404 else if (!flag_inhibit_size_directive)
7405 {
7406 if (imask)
7407 {
7408 fprintf (file, "\t.mask 0x%lx,", imask);
7409 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7410 frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
7411 putc ('\n', file);
7412
7413 for (i = 0; i < 32; ++i)
7414 if (imask & (1L << i))
7415 reg_offset += 8;
7416 }
7417
7418 if (fmask)
7419 {
7420 fprintf (file, "\t.fmask 0x%lx,", fmask);
7421 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7422 frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
7423 putc ('\n', file);
7424 }
7425 }
7426
7427 #if TARGET_ABI_OPEN_VMS
7428 /* Ifdef'ed cause link_section are only available then. */
7429 readonly_data_section ();
7430 fprintf (file, "\t.align 3\n");
7431 assemble_name (file, fnname); fputs ("..na:\n", file);
7432 fputs ("\t.ascii \"", file);
7433 assemble_name (file, fnname);
7434 fputs ("\\0\"\n", file);
7435
7436 link_section ();
7437 fprintf (file, "\t.align 3\n");
7438 fputs ("\t.name ", file);
7439 assemble_name (file, fnname);
7440 fputs ("..na\n", file);
7441 ASM_OUTPUT_LABEL (file, fnname);
7442 fprintf (file, "\t.pdesc ");
7443 assemble_name (file, fnname);
7444 fprintf (file, "..en,%s\n",
7445 alpha_procedure_type == PT_STACK ? "stack"
7446 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
7447 alpha_need_linkage (fnname, 1);
7448 text_section ();
7449 #endif
7450 }
7451
7452 /* Emit the .prologue note at the scheduled end of the prologue. */
7453
7454 static void
7455 alpha_output_function_end_prologue (file)
7456 FILE *file;
7457 {
7458 if (TARGET_ABI_UNICOSMK)
7459 ;
7460 else if (TARGET_ABI_OPEN_VMS)
7461 fputs ("\t.prologue\n", file);
7462 else if (TARGET_ABI_WINDOWS_NT)
7463 fputs ("\t.prologue 0\n", file);
7464 else if (!flag_inhibit_size_directive)
7465 fprintf (file, "\t.prologue %d\n",
7466 alpha_function_needs_gp || current_function_is_thunk);
7467 }
7468
7469 /* Write function epilogue. */
7470
7471 /* ??? At some point we will want to support full unwind, and so will
7472 need to mark the epilogue as well. At the moment, we just confuse
7473 dwarf2out. */
7474 #undef FRP
7475 #define FRP(exp) exp
7476
7477 void
7478 alpha_expand_epilogue ()
7479 {
7480 /* Registers to save. */
7481 unsigned long imask = 0;
7482 unsigned long fmask = 0;
7483 /* Stack space needed for pushing registers clobbered by us. */
7484 HOST_WIDE_INT sa_size;
7485 /* Complete stack size needed. */
7486 HOST_WIDE_INT frame_size;
7487 /* Offset from base reg to register save area. */
7488 HOST_WIDE_INT reg_offset;
7489 int fp_is_frame_pointer, fp_offset;
7490 rtx sa_reg, sa_reg_exp = NULL;
7491 rtx sp_adj1, sp_adj2, mem;
7492 rtx eh_ofs;
7493 int i;
7494
7495 sa_size = alpha_sa_size ();
7496
7497 frame_size = get_frame_size ();
7498 if (TARGET_ABI_OPEN_VMS)
7499 frame_size = ALPHA_ROUND (sa_size
7500 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7501 + frame_size
7502 + current_function_pretend_args_size);
7503 else if (TARGET_ABI_UNICOSMK)
7504 frame_size = ALPHA_ROUND (sa_size
7505 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7506 + ALPHA_ROUND (frame_size
7507 + current_function_outgoing_args_size);
7508 else
7509 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7510 + sa_size
7511 + ALPHA_ROUND (frame_size
7512 + current_function_pretend_args_size));
7513
7514 if (TARGET_ABI_OPEN_VMS)
7515 {
7516 if (alpha_procedure_type == PT_STACK)
7517 reg_offset = 8;
7518 else
7519 reg_offset = 0;
7520 }
7521 else
7522 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7523
7524 alpha_sa_mask (&imask, &fmask);
7525
7526 fp_is_frame_pointer
7527 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7528 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
7529 fp_offset = 0;
7530 sa_reg = stack_pointer_rtx;
7531
7532 if (current_function_calls_eh_return)
7533 eh_ofs = EH_RETURN_STACKADJ_RTX;
7534 else
7535 eh_ofs = NULL_RTX;
7536
7537 if (!TARGET_ABI_UNICOSMK && sa_size)
7538 {
7539 /* If we have a frame pointer, restore SP from it. */
7540 if ((TARGET_ABI_OPEN_VMS
7541 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7542 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
7543 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
7544
7545 /* Cope with very large offsets to the register save area. */
7546 if (reg_offset + sa_size > 0x8000)
7547 {
7548 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7549 HOST_WIDE_INT bias;
7550
7551 if (low + sa_size <= 0x8000)
7552 bias = reg_offset - low, reg_offset = low;
7553 else
7554 bias = reg_offset, reg_offset = 0;
7555
7556 sa_reg = gen_rtx_REG (DImode, 22);
7557 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
7558
7559 FRP (emit_move_insn (sa_reg, sa_reg_exp));
7560 }
7561
7562 /* Restore registers in order, excepting a true frame pointer. */
7563
7564 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7565 if (! eh_ofs)
7566 set_mem_alias_set (mem, alpha_sr_alias_set);
7567 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7568
7569 reg_offset += 8;
7570 imask &= ~(1L << REG_RA);
7571
7572 for (i = 0; i < 32; ++i)
7573 if (imask & (1L << i))
7574 {
7575 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
7576 fp_offset = reg_offset;
7577 else
7578 {
7579 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
7580 set_mem_alias_set (mem, alpha_sr_alias_set);
7581 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7582 }
7583 reg_offset += 8;
7584 }
7585
7586 for (i = 0; i < 32; ++i)
7587 if (fmask & (1L << i))
7588 {
7589 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
7590 set_mem_alias_set (mem, alpha_sr_alias_set);
7591 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7592 reg_offset += 8;
7593 }
7594 }
7595 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7596 {
7597 /* Restore callee-saved general-purpose registers. */
7598
7599 reg_offset = -56;
7600
7601 for (i = 9; i < 15; i++)
7602 if (imask & (1L << i))
7603 {
7604 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7605 reg_offset));
7606 set_mem_alias_set (mem, alpha_sr_alias_set);
7607 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7608 reg_offset -= 8;
7609 }
7610
7611 for (i = 2; i < 10; i++)
7612 if (fmask & (1L << i))
7613 {
7614 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
7615 reg_offset));
7616 set_mem_alias_set (mem, alpha_sr_alias_set);
7617 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7618 reg_offset -= 8;
7619 }
7620
7621 /* Restore the return address from the DSIB. */
7622
7623 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
7624 set_mem_alias_set (mem, alpha_sr_alias_set);
7625 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7626 }
7627
7628 if (frame_size || eh_ofs)
7629 {
7630 sp_adj1 = stack_pointer_rtx;
7631
7632 if (eh_ofs)
7633 {
7634 sp_adj1 = gen_rtx_REG (DImode, 23);
7635 emit_move_insn (sp_adj1,
7636 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
7637 }
7638
7639 /* If the stack size is large, begin computation into a temporary
7640 register so as not to interfere with a potential fp restore,
7641 which must be consecutive with an SP restore. */
7642 if (frame_size < 32768
7643 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
7644 sp_adj2 = GEN_INT (frame_size);
7645 else if (TARGET_ABI_UNICOSMK)
7646 {
7647 sp_adj1 = gen_rtx_REG (DImode, 23);
7648 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
7649 sp_adj2 = const0_rtx;
7650 }
7651 else if (frame_size < 0x40007fffL)
7652 {
7653 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7654
7655 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
7656 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
7657 sp_adj1 = sa_reg;
7658 else
7659 {
7660 sp_adj1 = gen_rtx_REG (DImode, 23);
7661 FRP (emit_move_insn (sp_adj1, sp_adj2));
7662 }
7663 sp_adj2 = GEN_INT (low);
7664 }
7665 else
7666 {
7667 rtx tmp = gen_rtx_REG (DImode, 23);
7668 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
7669 if (!sp_adj2)
7670 {
7671 /* We can't drop new things to memory this late, afaik,
7672 so build it up by pieces. */
7673 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
7674 -(frame_size < 0)));
7675 if (!sp_adj2)
7676 abort ();
7677 }
7678 }
7679
7680 /* From now on, things must be in order. So emit blockages. */
7681
7682 /* Restore the frame pointer. */
7683 if (TARGET_ABI_UNICOSMK)
7684 {
7685 emit_insn (gen_blockage ());
7686 mem = gen_rtx_MEM (DImode,
7687 plus_constant (hard_frame_pointer_rtx, -16));
7688 set_mem_alias_set (mem, alpha_sr_alias_set);
7689 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7690 }
7691 else if (fp_is_frame_pointer)
7692 {
7693 emit_insn (gen_blockage ());
7694 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
7695 set_mem_alias_set (mem, alpha_sr_alias_set);
7696 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7697 }
7698 else if (TARGET_ABI_OPEN_VMS)
7699 {
7700 emit_insn (gen_blockage ());
7701 FRP (emit_move_insn (hard_frame_pointer_rtx,
7702 gen_rtx_REG (DImode, vms_save_fp_regno)));
7703 }
7704
7705 /* Restore the stack pointer. */
7706 emit_insn (gen_blockage ());
7707 if (sp_adj2 == const0_rtx)
7708 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
7709 else
7710 FRP (emit_move_insn (stack_pointer_rtx,
7711 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
7712 }
7713 else
7714 {
7715 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
7716 {
7717 emit_insn (gen_blockage ());
7718 FRP (emit_move_insn (hard_frame_pointer_rtx,
7719 gen_rtx_REG (DImode, vms_save_fp_regno)));
7720 }
7721 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
7722 {
7723 /* Decrement the frame pointer if the function does not have a
7724 frame. */
7725
7726 emit_insn (gen_blockage ());
7727 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
7728 hard_frame_pointer_rtx, GEN_INT (-1))));
7729 }
7730 }
7731 }
7732
7733 /* Output the rest of the textual info surrounding the epilogue. */
7734
7735 void
7736 alpha_end_function (file, fnname, decl)
7737 FILE *file;
7738 const char *fnname;
7739 tree decl;
7740 {
7741 /* End the function. */
7742 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
7743 {
7744 fputs ("\t.end ", file);
7745 assemble_name (file, fnname);
7746 putc ('\n', file);
7747 }
7748 inside_function = FALSE;
7749
7750 /* Show that we know this function if it is called again.
7751
7752 Do this only for functions whose symbols bind locally.
7753
7754 Don't do this for functions not defined in the .text section, as
7755 otherwise it's not unlikely that the destination is out of range
7756 for a direct branch. */
7757
7758 if ((*targetm.binds_local_p) (decl) && decl_in_text_section (decl))
7759 SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl), 0)) = 1;
7760
7761 /* Output jump tables and the static subroutine information block. */
7762 if (TARGET_ABI_UNICOSMK)
7763 {
7764 unicosmk_output_ssib (file, fnname);
7765 unicosmk_output_deferred_case_vectors (file);
7766 }
7767 }
7768
7769 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
7770
7771 In order to avoid the hordes of differences between generated code
7772 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
7773 lots of code loading up large constants, generate rtl and emit it
7774 instead of going straight to text.
7775
7776 Not sure why this idea hasn't been explored before... */
7777
7778 void
7779 alpha_output_mi_thunk_osf (file, thunk_fndecl, delta, function)
7780 FILE *file;
7781 tree thunk_fndecl ATTRIBUTE_UNUSED;
7782 HOST_WIDE_INT delta;
7783 tree function;
7784 {
7785 HOST_WIDE_INT hi, lo;
7786 rtx this, insn, funexp;
7787
7788 /* We always require a valid GP. */
7789 emit_insn (gen_prologue_ldgp ());
7790 emit_note (NULL, NOTE_INSN_PROLOGUE_END);
7791
7792 /* Find the "this" pointer. If the function returns a structure,
7793 the structure return pointer is in $16. */
7794 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function))))
7795 this = gen_rtx_REG (Pmode, 17);
7796 else
7797 this = gen_rtx_REG (Pmode, 16);
7798
7799 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
7800 entire constant for the add. */
7801 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
7802 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7803 if (hi + lo == delta)
7804 {
7805 if (hi)
7806 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
7807 if (lo)
7808 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
7809 }
7810 else
7811 {
7812 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
7813 delta, -(delta < 0));
7814 emit_insn (gen_adddi3 (this, this, tmp));
7815 }
7816
7817 /* Generate a tail call to the target function. */
7818 if (! TREE_USED (function))
7819 {
7820 assemble_external (function);
7821 TREE_USED (function) = 1;
7822 }
7823 funexp = XEXP (DECL_RTL (function), 0);
7824 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
7825 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
7826 SIBLING_CALL_P (insn) = 1;
7827
7828 /* Run just enough of rest_of_compilation to get the insns emitted.
7829 There's not really enough bulk here to make other passes such as
7830 instruction scheduling worth while. Note that use_thunk calls
7831 assemble_start_function and assemble_end_function. */
7832 insn = get_insns ();
7833 shorten_branches (insn);
7834 final_start_function (insn, file, 1);
7835 final (insn, file, 1, 0);
7836 final_end_function ();
7837 }
7838 \f
7839 /* Debugging support. */
7840
7841 #include "gstab.h"
7842
7843 /* Count the number of sdb related labels are generated (to find block
7844 start and end boundaries). */
7845
7846 int sdb_label_count = 0;
7847
7848 /* Next label # for each statement. */
7849
7850 static int sym_lineno = 0;
7851
7852 /* Count the number of .file directives, so that .loc is up to date. */
7853
7854 static int num_source_filenames = 0;
7855
7856 /* Name of the file containing the current function. */
7857
7858 static const char *current_function_file = "";
7859
7860 /* Offsets to alpha virtual arg/local debugging pointers. */
7861
7862 long alpha_arg_offset;
7863 long alpha_auto_offset;
7864 \f
7865 /* Emit a new filename to a stream. */
7866
7867 void
7868 alpha_output_filename (stream, name)
7869 FILE *stream;
7870 const char *name;
7871 {
7872 static int first_time = TRUE;
7873 char ltext_label_name[100];
7874
7875 if (first_time)
7876 {
7877 first_time = FALSE;
7878 ++num_source_filenames;
7879 current_function_file = name;
7880 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7881 output_quoted_string (stream, name);
7882 fprintf (stream, "\n");
7883 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
7884 fprintf (stream, "\t#@stabs\n");
7885 }
7886
7887 else if (write_symbols == DBX_DEBUG)
7888 {
7889 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
7890 fprintf (stream, "%s", ASM_STABS_OP);
7891 output_quoted_string (stream, name);
7892 fprintf (stream, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
7893 }
7894
7895 else if (name != current_function_file
7896 && strcmp (name, current_function_file) != 0)
7897 {
7898 if (inside_function && ! TARGET_GAS)
7899 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
7900 else
7901 {
7902 ++num_source_filenames;
7903 current_function_file = name;
7904 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7905 }
7906
7907 output_quoted_string (stream, name);
7908 fprintf (stream, "\n");
7909 }
7910 }
7911 \f
7912 /* Emit a linenumber to a stream. */
7913
7914 void
7915 alpha_output_lineno (stream, line)
7916 FILE *stream;
7917 int line;
7918 {
7919 if (write_symbols == DBX_DEBUG)
7920 {
7921 /* mips-tfile doesn't understand .stabd directives. */
7922 ++sym_lineno;
7923 fprintf (stream, "$LM%d:\n%s%d,0,%d,$LM%d\n",
7924 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
7925 }
7926 else
7927 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
7928 }
7929 \f
7930 /* Structure to show the current status of registers and memory. */
7931
7932 struct shadow_summary
7933 {
7934 struct {
7935 unsigned int i : 31; /* Mask of int regs */
7936 unsigned int fp : 31; /* Mask of fp regs */
7937 unsigned int mem : 1; /* mem == imem | fpmem */
7938 } used, defd;
7939 };
7940
7941 static void summarize_insn PARAMS ((rtx, struct shadow_summary *, int));
7942 static void alpha_handle_trap_shadows PARAMS ((rtx));
7943
7944 /* Summary the effects of expression X on the machine. Update SUM, a pointer
7945 to the summary structure. SET is nonzero if the insn is setting the
7946 object, otherwise zero. */
7947
7948 static void
7949 summarize_insn (x, sum, set)
7950 rtx x;
7951 struct shadow_summary *sum;
7952 int set;
7953 {
7954 const char *format_ptr;
7955 int i, j;
7956
7957 if (x == 0)
7958 return;
7959
7960 switch (GET_CODE (x))
7961 {
7962 /* ??? Note that this case would be incorrect if the Alpha had a
7963 ZERO_EXTRACT in SET_DEST. */
7964 case SET:
7965 summarize_insn (SET_SRC (x), sum, 0);
7966 summarize_insn (SET_DEST (x), sum, 1);
7967 break;
7968
7969 case CLOBBER:
7970 summarize_insn (XEXP (x, 0), sum, 1);
7971 break;
7972
7973 case USE:
7974 summarize_insn (XEXP (x, 0), sum, 0);
7975 break;
7976
7977 case ASM_OPERANDS:
7978 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
7979 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
7980 break;
7981
7982 case PARALLEL:
7983 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7984 summarize_insn (XVECEXP (x, 0, i), sum, 0);
7985 break;
7986
7987 case SUBREG:
7988 summarize_insn (SUBREG_REG (x), sum, 0);
7989 break;
7990
7991 case REG:
7992 {
7993 int regno = REGNO (x);
7994 unsigned long mask = ((unsigned long) 1) << (regno % 32);
7995
7996 if (regno == 31 || regno == 63)
7997 break;
7998
7999 if (set)
8000 {
8001 if (regno < 32)
8002 sum->defd.i |= mask;
8003 else
8004 sum->defd.fp |= mask;
8005 }
8006 else
8007 {
8008 if (regno < 32)
8009 sum->used.i |= mask;
8010 else
8011 sum->used.fp |= mask;
8012 }
8013 }
8014 break;
8015
8016 case MEM:
8017 if (set)
8018 sum->defd.mem = 1;
8019 else
8020 sum->used.mem = 1;
8021
8022 /* Find the regs used in memory address computation: */
8023 summarize_insn (XEXP (x, 0), sum, 0);
8024 break;
8025
8026 case CONST_INT: case CONST_DOUBLE:
8027 case SYMBOL_REF: case LABEL_REF: case CONST:
8028 case SCRATCH: case ASM_INPUT:
8029 break;
8030
8031 /* Handle common unary and binary ops for efficiency. */
8032 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8033 case MOD: case UDIV: case UMOD: case AND: case IOR:
8034 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8035 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8036 case NE: case EQ: case GE: case GT: case LE:
8037 case LT: case GEU: case GTU: case LEU: case LTU:
8038 summarize_insn (XEXP (x, 0), sum, 0);
8039 summarize_insn (XEXP (x, 1), sum, 0);
8040 break;
8041
8042 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8043 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8044 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8045 case SQRT: case FFS:
8046 summarize_insn (XEXP (x, 0), sum, 0);
8047 break;
8048
8049 default:
8050 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8051 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8052 switch (format_ptr[i])
8053 {
8054 case 'e':
8055 summarize_insn (XEXP (x, i), sum, 0);
8056 break;
8057
8058 case 'E':
8059 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8060 summarize_insn (XVECEXP (x, i, j), sum, 0);
8061 break;
8062
8063 case 'i':
8064 break;
8065
8066 default:
8067 abort ();
8068 }
8069 }
8070 }
8071
8072 /* Ensure a sufficient number of `trapb' insns are in the code when
8073 the user requests code with a trap precision of functions or
8074 instructions.
8075
8076 In naive mode, when the user requests a trap-precision of
8077 "instruction", a trapb is needed after every instruction that may
8078 generate a trap. This ensures that the code is resumption safe but
8079 it is also slow.
8080
8081 When optimizations are turned on, we delay issuing a trapb as long
8082 as possible. In this context, a trap shadow is the sequence of
8083 instructions that starts with a (potentially) trap generating
8084 instruction and extends to the next trapb or call_pal instruction
8085 (but GCC never generates call_pal by itself). We can delay (and
8086 therefore sometimes omit) a trapb subject to the following
8087 conditions:
8088
8089 (a) On entry to the trap shadow, if any Alpha register or memory
8090 location contains a value that is used as an operand value by some
8091 instruction in the trap shadow (live on entry), then no instruction
8092 in the trap shadow may modify the register or memory location.
8093
8094 (b) Within the trap shadow, the computation of the base register
8095 for a memory load or store instruction may not involve using the
8096 result of an instruction that might generate an UNPREDICTABLE
8097 result.
8098
8099 (c) Within the trap shadow, no register may be used more than once
8100 as a destination register. (This is to make life easier for the
8101 trap-handler.)
8102
8103 (d) The trap shadow may not include any branch instructions. */
8104
8105 static void
8106 alpha_handle_trap_shadows (insns)
8107 rtx insns;
8108 {
8109 struct shadow_summary shadow;
8110 int trap_pending, exception_nesting;
8111 rtx i, n;
8112
8113 trap_pending = 0;
8114 exception_nesting = 0;
8115 shadow.used.i = 0;
8116 shadow.used.fp = 0;
8117 shadow.used.mem = 0;
8118 shadow.defd = shadow.used;
8119
8120 for (i = insns; i ; i = NEXT_INSN (i))
8121 {
8122 if (GET_CODE (i) == NOTE)
8123 {
8124 switch (NOTE_LINE_NUMBER (i))
8125 {
8126 case NOTE_INSN_EH_REGION_BEG:
8127 exception_nesting++;
8128 if (trap_pending)
8129 goto close_shadow;
8130 break;
8131
8132 case NOTE_INSN_EH_REGION_END:
8133 exception_nesting--;
8134 if (trap_pending)
8135 goto close_shadow;
8136 break;
8137
8138 case NOTE_INSN_EPILOGUE_BEG:
8139 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8140 goto close_shadow;
8141 break;
8142 }
8143 }
8144 else if (trap_pending)
8145 {
8146 if (alpha_tp == ALPHA_TP_FUNC)
8147 {
8148 if (GET_CODE (i) == JUMP_INSN
8149 && GET_CODE (PATTERN (i)) == RETURN)
8150 goto close_shadow;
8151 }
8152 else if (alpha_tp == ALPHA_TP_INSN)
8153 {
8154 if (optimize > 0)
8155 {
8156 struct shadow_summary sum;
8157
8158 sum.used.i = 0;
8159 sum.used.fp = 0;
8160 sum.used.mem = 0;
8161 sum.defd = sum.used;
8162
8163 switch (GET_CODE (i))
8164 {
8165 case INSN:
8166 /* Annoyingly, get_attr_trap will abort on these. */
8167 if (GET_CODE (PATTERN (i)) == USE
8168 || GET_CODE (PATTERN (i)) == CLOBBER)
8169 break;
8170
8171 summarize_insn (PATTERN (i), &sum, 0);
8172
8173 if ((sum.defd.i & shadow.defd.i)
8174 || (sum.defd.fp & shadow.defd.fp))
8175 {
8176 /* (c) would be violated */
8177 goto close_shadow;
8178 }
8179
8180 /* Combine shadow with summary of current insn: */
8181 shadow.used.i |= sum.used.i;
8182 shadow.used.fp |= sum.used.fp;
8183 shadow.used.mem |= sum.used.mem;
8184 shadow.defd.i |= sum.defd.i;
8185 shadow.defd.fp |= sum.defd.fp;
8186 shadow.defd.mem |= sum.defd.mem;
8187
8188 if ((sum.defd.i & shadow.used.i)
8189 || (sum.defd.fp & shadow.used.fp)
8190 || (sum.defd.mem & shadow.used.mem))
8191 {
8192 /* (a) would be violated (also takes care of (b)) */
8193 if (get_attr_trap (i) == TRAP_YES
8194 && ((sum.defd.i & sum.used.i)
8195 || (sum.defd.fp & sum.used.fp)))
8196 abort ();
8197
8198 goto close_shadow;
8199 }
8200 break;
8201
8202 case JUMP_INSN:
8203 case CALL_INSN:
8204 case CODE_LABEL:
8205 goto close_shadow;
8206
8207 default:
8208 abort ();
8209 }
8210 }
8211 else
8212 {
8213 close_shadow:
8214 n = emit_insn_before (gen_trapb (), i);
8215 PUT_MODE (n, TImode);
8216 PUT_MODE (i, TImode);
8217 trap_pending = 0;
8218 shadow.used.i = 0;
8219 shadow.used.fp = 0;
8220 shadow.used.mem = 0;
8221 shadow.defd = shadow.used;
8222 }
8223 }
8224 }
8225
8226 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8227 && GET_CODE (i) == INSN
8228 && GET_CODE (PATTERN (i)) != USE
8229 && GET_CODE (PATTERN (i)) != CLOBBER
8230 && get_attr_trap (i) == TRAP_YES)
8231 {
8232 if (optimize && !trap_pending)
8233 summarize_insn (PATTERN (i), &shadow, 0);
8234 trap_pending = 1;
8235 }
8236 }
8237 }
8238 \f
8239 /* Alpha can only issue instruction groups simultaneously if they are
8240 suitibly aligned. This is very processor-specific. */
8241
8242 enum alphaev4_pipe {
8243 EV4_STOP = 0,
8244 EV4_IB0 = 1,
8245 EV4_IB1 = 2,
8246 EV4_IBX = 4
8247 };
8248
8249 enum alphaev5_pipe {
8250 EV5_STOP = 0,
8251 EV5_NONE = 1,
8252 EV5_E01 = 2,
8253 EV5_E0 = 4,
8254 EV5_E1 = 8,
8255 EV5_FAM = 16,
8256 EV5_FA = 32,
8257 EV5_FM = 64
8258 };
8259
8260 static enum alphaev4_pipe alphaev4_insn_pipe PARAMS ((rtx));
8261 static enum alphaev5_pipe alphaev5_insn_pipe PARAMS ((rtx));
8262 static rtx alphaev4_next_group PARAMS ((rtx, int *, int *));
8263 static rtx alphaev5_next_group PARAMS ((rtx, int *, int *));
8264 static rtx alphaev4_next_nop PARAMS ((int *));
8265 static rtx alphaev5_next_nop PARAMS ((int *));
8266
8267 static void alpha_align_insns
8268 PARAMS ((rtx, unsigned int, rtx (*)(rtx, int *, int *), rtx (*)(int *)));
8269
8270 static enum alphaev4_pipe
8271 alphaev4_insn_pipe (insn)
8272 rtx insn;
8273 {
8274 if (recog_memoized (insn) < 0)
8275 return EV4_STOP;
8276 if (get_attr_length (insn) != 4)
8277 return EV4_STOP;
8278
8279 switch (get_attr_type (insn))
8280 {
8281 case TYPE_ILD:
8282 case TYPE_FLD:
8283 return EV4_IBX;
8284
8285 case TYPE_LDSYM:
8286 case TYPE_IADD:
8287 case TYPE_ILOG:
8288 case TYPE_ICMOV:
8289 case TYPE_ICMP:
8290 case TYPE_IST:
8291 case TYPE_FST:
8292 case TYPE_SHIFT:
8293 case TYPE_IMUL:
8294 case TYPE_FBR:
8295 return EV4_IB0;
8296
8297 case TYPE_MISC:
8298 case TYPE_IBR:
8299 case TYPE_JSR:
8300 case TYPE_FCPYS:
8301 case TYPE_FCMOV:
8302 case TYPE_FADD:
8303 case TYPE_FDIV:
8304 case TYPE_FMUL:
8305 return EV4_IB1;
8306
8307 default:
8308 abort ();
8309 }
8310 }
8311
8312 static enum alphaev5_pipe
8313 alphaev5_insn_pipe (insn)
8314 rtx insn;
8315 {
8316 if (recog_memoized (insn) < 0)
8317 return EV5_STOP;
8318 if (get_attr_length (insn) != 4)
8319 return EV5_STOP;
8320
8321 switch (get_attr_type (insn))
8322 {
8323 case TYPE_ILD:
8324 case TYPE_FLD:
8325 case TYPE_LDSYM:
8326 case TYPE_IADD:
8327 case TYPE_ILOG:
8328 case TYPE_ICMOV:
8329 case TYPE_ICMP:
8330 return EV5_E01;
8331
8332 case TYPE_IST:
8333 case TYPE_FST:
8334 case TYPE_SHIFT:
8335 case TYPE_IMUL:
8336 case TYPE_MISC:
8337 case TYPE_MVI:
8338 return EV5_E0;
8339
8340 case TYPE_IBR:
8341 case TYPE_JSR:
8342 return EV5_E1;
8343
8344 case TYPE_FCPYS:
8345 return EV5_FAM;
8346
8347 case TYPE_FBR:
8348 case TYPE_FCMOV:
8349 case TYPE_FADD:
8350 case TYPE_FDIV:
8351 return EV5_FA;
8352
8353 case TYPE_FMUL:
8354 return EV5_FM;
8355
8356 default:
8357 abort();
8358 }
8359 }
8360
8361 /* IN_USE is a mask of the slots currently filled within the insn group.
8362 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8363 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8364
8365 LEN is, of course, the length of the group in bytes. */
8366
8367 static rtx
8368 alphaev4_next_group (insn, pin_use, plen)
8369 rtx insn;
8370 int *pin_use, *plen;
8371 {
8372 int len, in_use;
8373
8374 len = in_use = 0;
8375
8376 if (! INSN_P (insn)
8377 || GET_CODE (PATTERN (insn)) == CLOBBER
8378 || GET_CODE (PATTERN (insn)) == USE)
8379 goto next_and_done;
8380
8381 while (1)
8382 {
8383 enum alphaev4_pipe pipe;
8384
8385 pipe = alphaev4_insn_pipe (insn);
8386 switch (pipe)
8387 {
8388 case EV4_STOP:
8389 /* Force complex instructions to start new groups. */
8390 if (in_use)
8391 goto done;
8392
8393 /* If this is a completely unrecognized insn, its an asm.
8394 We don't know how long it is, so record length as -1 to
8395 signal a needed realignment. */
8396 if (recog_memoized (insn) < 0)
8397 len = -1;
8398 else
8399 len = get_attr_length (insn);
8400 goto next_and_done;
8401
8402 case EV4_IBX:
8403 if (in_use & EV4_IB0)
8404 {
8405 if (in_use & EV4_IB1)
8406 goto done;
8407 in_use |= EV4_IB1;
8408 }
8409 else
8410 in_use |= EV4_IB0 | EV4_IBX;
8411 break;
8412
8413 case EV4_IB0:
8414 if (in_use & EV4_IB0)
8415 {
8416 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8417 goto done;
8418 in_use |= EV4_IB1;
8419 }
8420 in_use |= EV4_IB0;
8421 break;
8422
8423 case EV4_IB1:
8424 if (in_use & EV4_IB1)
8425 goto done;
8426 in_use |= EV4_IB1;
8427 break;
8428
8429 default:
8430 abort();
8431 }
8432 len += 4;
8433
8434 /* Haifa doesn't do well scheduling branches. */
8435 if (GET_CODE (insn) == JUMP_INSN)
8436 goto next_and_done;
8437
8438 next:
8439 insn = next_nonnote_insn (insn);
8440
8441 if (!insn || ! INSN_P (insn))
8442 goto done;
8443
8444 /* Let Haifa tell us where it thinks insn group boundaries are. */
8445 if (GET_MODE (insn) == TImode)
8446 goto done;
8447
8448 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8449 goto next;
8450 }
8451
8452 next_and_done:
8453 insn = next_nonnote_insn (insn);
8454
8455 done:
8456 *plen = len;
8457 *pin_use = in_use;
8458 return insn;
8459 }
8460
8461 /* IN_USE is a mask of the slots currently filled within the insn group.
8462 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8463 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8464
8465 LEN is, of course, the length of the group in bytes. */
8466
8467 static rtx
8468 alphaev5_next_group (insn, pin_use, plen)
8469 rtx insn;
8470 int *pin_use, *plen;
8471 {
8472 int len, in_use;
8473
8474 len = in_use = 0;
8475
8476 if (! INSN_P (insn)
8477 || GET_CODE (PATTERN (insn)) == CLOBBER
8478 || GET_CODE (PATTERN (insn)) == USE)
8479 goto next_and_done;
8480
8481 while (1)
8482 {
8483 enum alphaev5_pipe pipe;
8484
8485 pipe = alphaev5_insn_pipe (insn);
8486 switch (pipe)
8487 {
8488 case EV5_STOP:
8489 /* Force complex instructions to start new groups. */
8490 if (in_use)
8491 goto done;
8492
8493 /* If this is a completely unrecognized insn, its an asm.
8494 We don't know how long it is, so record length as -1 to
8495 signal a needed realignment. */
8496 if (recog_memoized (insn) < 0)
8497 len = -1;
8498 else
8499 len = get_attr_length (insn);
8500 goto next_and_done;
8501
8502 /* ??? Most of the places below, we would like to abort, as
8503 it would indicate an error either in Haifa, or in the
8504 scheduling description. Unfortunately, Haifa never
8505 schedules the last instruction of the BB, so we don't
8506 have an accurate TI bit to go off. */
8507 case EV5_E01:
8508 if (in_use & EV5_E0)
8509 {
8510 if (in_use & EV5_E1)
8511 goto done;
8512 in_use |= EV5_E1;
8513 }
8514 else
8515 in_use |= EV5_E0 | EV5_E01;
8516 break;
8517
8518 case EV5_E0:
8519 if (in_use & EV5_E0)
8520 {
8521 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8522 goto done;
8523 in_use |= EV5_E1;
8524 }
8525 in_use |= EV5_E0;
8526 break;
8527
8528 case EV5_E1:
8529 if (in_use & EV5_E1)
8530 goto done;
8531 in_use |= EV5_E1;
8532 break;
8533
8534 case EV5_FAM:
8535 if (in_use & EV5_FA)
8536 {
8537 if (in_use & EV5_FM)
8538 goto done;
8539 in_use |= EV5_FM;
8540 }
8541 else
8542 in_use |= EV5_FA | EV5_FAM;
8543 break;
8544
8545 case EV5_FA:
8546 if (in_use & EV5_FA)
8547 goto done;
8548 in_use |= EV5_FA;
8549 break;
8550
8551 case EV5_FM:
8552 if (in_use & EV5_FM)
8553 goto done;
8554 in_use |= EV5_FM;
8555 break;
8556
8557 case EV5_NONE:
8558 break;
8559
8560 default:
8561 abort();
8562 }
8563 len += 4;
8564
8565 /* Haifa doesn't do well scheduling branches. */
8566 /* ??? If this is predicted not-taken, slotting continues, except
8567 that no more IBR, FBR, or JSR insns may be slotted. */
8568 if (GET_CODE (insn) == JUMP_INSN)
8569 goto next_and_done;
8570
8571 next:
8572 insn = next_nonnote_insn (insn);
8573
8574 if (!insn || ! INSN_P (insn))
8575 goto done;
8576
8577 /* Let Haifa tell us where it thinks insn group boundaries are. */
8578 if (GET_MODE (insn) == TImode)
8579 goto done;
8580
8581 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8582 goto next;
8583 }
8584
8585 next_and_done:
8586 insn = next_nonnote_insn (insn);
8587
8588 done:
8589 *plen = len;
8590 *pin_use = in_use;
8591 return insn;
8592 }
8593
8594 static rtx
8595 alphaev4_next_nop (pin_use)
8596 int *pin_use;
8597 {
8598 int in_use = *pin_use;
8599 rtx nop;
8600
8601 if (!(in_use & EV4_IB0))
8602 {
8603 in_use |= EV4_IB0;
8604 nop = gen_nop ();
8605 }
8606 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
8607 {
8608 in_use |= EV4_IB1;
8609 nop = gen_nop ();
8610 }
8611 else if (TARGET_FP && !(in_use & EV4_IB1))
8612 {
8613 in_use |= EV4_IB1;
8614 nop = gen_fnop ();
8615 }
8616 else
8617 nop = gen_unop ();
8618
8619 *pin_use = in_use;
8620 return nop;
8621 }
8622
8623 static rtx
8624 alphaev5_next_nop (pin_use)
8625 int *pin_use;
8626 {
8627 int in_use = *pin_use;
8628 rtx nop;
8629
8630 if (!(in_use & EV5_E1))
8631 {
8632 in_use |= EV5_E1;
8633 nop = gen_nop ();
8634 }
8635 else if (TARGET_FP && !(in_use & EV5_FA))
8636 {
8637 in_use |= EV5_FA;
8638 nop = gen_fnop ();
8639 }
8640 else if (TARGET_FP && !(in_use & EV5_FM))
8641 {
8642 in_use |= EV5_FM;
8643 nop = gen_fnop ();
8644 }
8645 else
8646 nop = gen_unop ();
8647
8648 *pin_use = in_use;
8649 return nop;
8650 }
8651
8652 /* The instruction group alignment main loop. */
8653
8654 static void
8655 alpha_align_insns (insns, max_align, next_group, next_nop)
8656 rtx insns;
8657 unsigned int max_align;
8658 rtx (*next_group) PARAMS ((rtx, int *, int *));
8659 rtx (*next_nop) PARAMS ((int *));
8660 {
8661 /* ALIGN is the known alignment for the insn group. */
8662 unsigned int align;
8663 /* OFS is the offset of the current insn in the insn group. */
8664 int ofs;
8665 int prev_in_use, in_use, len;
8666 rtx i, next;
8667
8668 /* Let shorten branches care for assigning alignments to code labels. */
8669 shorten_branches (insns);
8670
8671 if (align_functions < 4)
8672 align = 4;
8673 else if ((unsigned int) align_functions < max_align)
8674 align = align_functions;
8675 else
8676 align = max_align;
8677
8678 ofs = prev_in_use = 0;
8679 i = insns;
8680 if (GET_CODE (i) == NOTE)
8681 i = next_nonnote_insn (i);
8682
8683 while (i)
8684 {
8685 next = (*next_group) (i, &in_use, &len);
8686
8687 /* When we see a label, resync alignment etc. */
8688 if (GET_CODE (i) == CODE_LABEL)
8689 {
8690 unsigned int new_align = 1 << label_to_alignment (i);
8691
8692 if (new_align >= align)
8693 {
8694 align = new_align < max_align ? new_align : max_align;
8695 ofs = 0;
8696 }
8697
8698 else if (ofs & (new_align-1))
8699 ofs = (ofs | (new_align-1)) + 1;
8700 if (len != 0)
8701 abort();
8702 }
8703
8704 /* Handle complex instructions special. */
8705 else if (in_use == 0)
8706 {
8707 /* Asms will have length < 0. This is a signal that we have
8708 lost alignment knowledge. Assume, however, that the asm
8709 will not mis-align instructions. */
8710 if (len < 0)
8711 {
8712 ofs = 0;
8713 align = 4;
8714 len = 0;
8715 }
8716 }
8717
8718 /* If the known alignment is smaller than the recognized insn group,
8719 realign the output. */
8720 else if ((int) align < len)
8721 {
8722 unsigned int new_log_align = len > 8 ? 4 : 3;
8723 rtx prev, where;
8724
8725 where = prev = prev_nonnote_insn (i);
8726 if (!where || GET_CODE (where) != CODE_LABEL)
8727 where = i;
8728
8729 /* Can't realign between a call and its gp reload. */
8730 if (! (TARGET_EXPLICIT_RELOCS
8731 && prev && GET_CODE (prev) == CALL_INSN))
8732 {
8733 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
8734 align = 1 << new_log_align;
8735 ofs = 0;
8736 }
8737 }
8738
8739 /* If the group won't fit in the same INT16 as the previous,
8740 we need to add padding to keep the group together. Rather
8741 than simply leaving the insn filling to the assembler, we
8742 can make use of the knowledge of what sorts of instructions
8743 were issued in the previous group to make sure that all of
8744 the added nops are really free. */
8745 else if (ofs + len > (int) align)
8746 {
8747 int nop_count = (align - ofs) / 4;
8748 rtx where;
8749
8750 /* Insert nops before labels, branches, and calls to truely merge
8751 the execution of the nops with the previous instruction group. */
8752 where = prev_nonnote_insn (i);
8753 if (where)
8754 {
8755 if (GET_CODE (where) == CODE_LABEL)
8756 {
8757 rtx where2 = prev_nonnote_insn (where);
8758 if (where2 && GET_CODE (where2) == JUMP_INSN)
8759 where = where2;
8760 }
8761 else if (GET_CODE (where) == INSN)
8762 where = i;
8763 }
8764 else
8765 where = i;
8766
8767 do
8768 emit_insn_before ((*next_nop)(&prev_in_use), where);
8769 while (--nop_count);
8770 ofs = 0;
8771 }
8772
8773 ofs = (ofs + len) & (align - 1);
8774 prev_in_use = in_use;
8775 i = next;
8776 }
8777 }
8778 \f
8779 /* Machine dependent reorg pass. */
8780
8781 void
8782 alpha_reorg (insns)
8783 rtx insns;
8784 {
8785 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
8786 alpha_handle_trap_shadows (insns);
8787
8788 /* Due to the number of extra trapb insns, don't bother fixing up
8789 alignment when trap precision is instruction. Moreover, we can
8790 only do our job when sched2 is run. */
8791 if (optimize && !optimize_size
8792 && alpha_tp != ALPHA_TP_INSN
8793 && flag_schedule_insns_after_reload)
8794 {
8795 if (alpha_cpu == PROCESSOR_EV4)
8796 alpha_align_insns (insns, 8, alphaev4_next_group, alphaev4_next_nop);
8797 else if (alpha_cpu == PROCESSOR_EV5)
8798 alpha_align_insns (insns, 16, alphaev5_next_group, alphaev5_next_nop);
8799 }
8800 }
8801 \f
8802 /* Check a floating-point value for validity for a particular machine mode. */
8803
8804 static const char * const float_strings[] =
8805 {
8806 /* These are for FLOAT_VAX. */
8807 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
8808 "-1.70141173319264430e+38",
8809 "2.93873587705571877e-39", /* 2^-128 */
8810 "-2.93873587705571877e-39",
8811 /* These are for the default broken IEEE mode, which traps
8812 on infinity or denormal numbers. */
8813 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
8814 "-3.402823466385288598117e+38",
8815 "1.1754943508222875079687e-38", /* 2^-126 */
8816 "-1.1754943508222875079687e-38",
8817 };
8818
8819 static REAL_VALUE_TYPE float_values[8];
8820 static int inited_float_values = 0;
8821
8822 int
8823 check_float_value (mode, d, overflow)
8824 enum machine_mode mode;
8825 REAL_VALUE_TYPE *d;
8826 int overflow ATTRIBUTE_UNUSED;
8827 {
8828
8829 if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
8830 return 0;
8831
8832 if (inited_float_values == 0)
8833 {
8834 int i;
8835 for (i = 0; i < 8; i++)
8836 float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
8837
8838 inited_float_values = 1;
8839 }
8840
8841 if (mode == SFmode)
8842 {
8843 REAL_VALUE_TYPE r;
8844 REAL_VALUE_TYPE *fvptr;
8845
8846 if (TARGET_FLOAT_VAX)
8847 fvptr = &float_values[0];
8848 else
8849 fvptr = &float_values[4];
8850
8851 memcpy (&r, d, sizeof (REAL_VALUE_TYPE));
8852 if (REAL_VALUES_LESS (fvptr[0], r))
8853 {
8854 memcpy (d, &fvptr[0], sizeof (REAL_VALUE_TYPE));
8855 return 1;
8856 }
8857 else if (REAL_VALUES_LESS (r, fvptr[1]))
8858 {
8859 memcpy (d, &fvptr[1], sizeof (REAL_VALUE_TYPE));
8860 return 1;
8861 }
8862 else if (REAL_VALUES_LESS (dconst0, r)
8863 && REAL_VALUES_LESS (r, fvptr[2]))
8864 {
8865 memcpy (d, &dconst0, sizeof (REAL_VALUE_TYPE));
8866 return 1;
8867 }
8868 else if (REAL_VALUES_LESS (r, dconst0)
8869 && REAL_VALUES_LESS (fvptr[3], r))
8870 {
8871 memcpy (d, &dconst0, sizeof (REAL_VALUE_TYPE));
8872 return 1;
8873 }
8874 }
8875
8876 return 0;
8877 }
8878 \f
8879 #ifdef OBJECT_FORMAT_ELF
8880
8881 /* Switch to the section to which we should output X. The only thing
8882 special we do here is to honor small data. */
8883
8884 static void
8885 alpha_elf_select_rtx_section (mode, x, align)
8886 enum machine_mode mode;
8887 rtx x;
8888 unsigned HOST_WIDE_INT align;
8889 {
8890 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
8891 /* ??? Consider using mergable sdata sections. */
8892 sdata_section ();
8893 else
8894 default_elf_select_rtx_section (mode, x, align);
8895 }
8896
8897 #endif /* OBJECT_FORMAT_ELF */
8898 \f
8899 #if TARGET_ABI_OPEN_VMS
8900
8901 /* Return the VMS argument type corresponding to MODE. */
8902
8903 enum avms_arg_type
8904 alpha_arg_type (mode)
8905 enum machine_mode mode;
8906 {
8907 switch (mode)
8908 {
8909 case SFmode:
8910 return TARGET_FLOAT_VAX ? FF : FS;
8911 case DFmode:
8912 return TARGET_FLOAT_VAX ? FD : FT;
8913 default:
8914 return I64;
8915 }
8916 }
8917
8918 /* Return an rtx for an integer representing the VMS Argument Information
8919 register value. */
8920
8921 rtx
8922 alpha_arg_info_reg_val (cum)
8923 CUMULATIVE_ARGS cum;
8924 {
8925 unsigned HOST_WIDE_INT regval = cum.num_args;
8926 int i;
8927
8928 for (i = 0; i < 6; i++)
8929 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
8930
8931 return GEN_INT (regval);
8932 }
8933 \f
8934 #include <splay-tree.h>
8935
8936 /* Structure to collect function names for final output
8937 in link section. */
8938
8939 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
8940
8941 struct alpha_links
8942 {
8943 rtx linkage;
8944 enum links_kind kind;
8945 };
8946
8947 static splay_tree alpha_links;
8948
8949 static int mark_alpha_links_node PARAMS ((splay_tree_node, void *));
8950 static void mark_alpha_links PARAMS ((void *));
8951 static int alpha_write_one_linkage PARAMS ((splay_tree_node, void *));
8952
8953 /* Protect alpha_links from garbage collection. */
8954
8955 static int
8956 mark_alpha_links_node (node, data)
8957 splay_tree_node node;
8958 void *data ATTRIBUTE_UNUSED;
8959 {
8960 struct alpha_links *links = (struct alpha_links *) node->value;
8961 ggc_mark_rtx (links->linkage);
8962 return 0;
8963 }
8964
8965 static void
8966 mark_alpha_links (ptr)
8967 void *ptr;
8968 {
8969 splay_tree tree = *(splay_tree *) ptr;
8970 splay_tree_foreach (tree, mark_alpha_links_node, NULL);
8971 }
8972
8973 /* Make (or fake) .linkage entry for function call.
8974
8975 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8976
8977 Return an SYMBOL_REF rtx for the linkage. */
8978
8979 rtx
8980 alpha_need_linkage (name, is_local)
8981 const char *name;
8982 int is_local;
8983 {
8984 splay_tree_node node;
8985 struct alpha_links *al;
8986
8987 if (name[0] == '*')
8988 name++;
8989
8990 if (alpha_links)
8991 {
8992 /* Is this name already defined? */
8993
8994 node = splay_tree_lookup (alpha_links, (splay_tree_key) name);
8995 if (node)
8996 {
8997 al = (struct alpha_links *) node->value;
8998 if (is_local)
8999 {
9000 /* Defined here but external assumed. */
9001 if (al->kind == KIND_EXTERN)
9002 al->kind = KIND_LOCAL;
9003 }
9004 else
9005 {
9006 /* Used here but unused assumed. */
9007 if (al->kind == KIND_UNUSED)
9008 al->kind = KIND_LOCAL;
9009 }
9010 return al->linkage;
9011 }
9012 }
9013 else
9014 {
9015 alpha_links = splay_tree_new ((splay_tree_compare_fn) strcmp,
9016 (splay_tree_delete_key_fn) free,
9017 (splay_tree_delete_key_fn) free);
9018 ggc_add_root (&alpha_links, 1, 1, mark_alpha_links);
9019 }
9020
9021 al = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
9022 name = xstrdup (name);
9023
9024 /* Assume external if no definition. */
9025 al->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9026
9027 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9028 get_identifier (name);
9029
9030 /* Construct a SYMBOL_REF for us to call. */
9031 {
9032 size_t name_len = strlen (name);
9033 char *linksym = alloca (name_len + 6);
9034 linksym[0] = '$';
9035 memcpy (linksym + 1, name, name_len);
9036 memcpy (linksym + 1 + name_len, "..lk", 5);
9037 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9038 ggc_alloc_string (linksym, name_len + 5));
9039 }
9040
9041 splay_tree_insert (alpha_links, (splay_tree_key) name,
9042 (splay_tree_value) al);
9043
9044 return al->linkage;
9045 }
9046
9047 static int
9048 alpha_write_one_linkage (node, data)
9049 splay_tree_node node;
9050 void *data;
9051 {
9052 const char *const name = (const char *) node->key;
9053 struct alpha_links *links = (struct alpha_links *) node->value;
9054 FILE *stream = (FILE *) data;
9055
9056 if (links->kind == KIND_UNUSED
9057 || ! TREE_SYMBOL_REFERENCED (get_identifier (name)))
9058 return 0;
9059
9060 fprintf (stream, "$%s..lk:\n", name);
9061 if (links->kind == KIND_LOCAL)
9062 {
9063 /* Local and used, build linkage pair. */
9064 fprintf (stream, "\t.quad %s..en\n", name);
9065 fprintf (stream, "\t.quad %s\n", name);
9066 }
9067 else
9068 {
9069 /* External and used, request linkage pair. */
9070 fprintf (stream, "\t.linkage %s\n", name);
9071 }
9072
9073 return 0;
9074 }
9075
9076 void
9077 alpha_write_linkage (stream)
9078 FILE *stream;
9079 {
9080 if (alpha_links)
9081 {
9082 readonly_data_section ();
9083 fprintf (stream, "\t.align 3\n");
9084 splay_tree_foreach (alpha_links, alpha_write_one_linkage, stream);
9085 }
9086 }
9087
9088 /* Given a decl, a section name, and whether the decl initializer
9089 has relocs, choose attributes for the section. */
9090
9091 #define SECTION_VMS_OVERLAY SECTION_FORGET
9092 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9093 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9094
9095 static unsigned int
9096 vms_section_type_flags (decl, name, reloc)
9097 tree decl;
9098 const char *name;
9099 int reloc;
9100 {
9101 unsigned int flags = default_section_type_flags (decl, name, reloc);
9102
9103 if (decl && DECL_ATTRIBUTES (decl)
9104 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9105 flags |= SECTION_VMS_OVERLAY;
9106 if (decl && DECL_ATTRIBUTES (decl)
9107 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9108 flags |= SECTION_VMS_GLOBAL;
9109 if (decl && DECL_ATTRIBUTES (decl)
9110 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9111 flags |= SECTION_VMS_INITIALIZE;
9112
9113 return flags;
9114 }
9115
9116 /* Switch to an arbitrary section NAME with attributes as specified
9117 by FLAGS. ALIGN specifies any known alignment requirements for
9118 the section; 0 if the default should be used. */
9119
9120 static void
9121 vms_asm_named_section (name, flags)
9122 const char *name;
9123 unsigned int flags;
9124 {
9125 fputc ('\n', asm_out_file);
9126 fprintf (asm_out_file, ".section\t%s", name);
9127
9128 if (flags & SECTION_VMS_OVERLAY)
9129 fprintf (asm_out_file, ",OVR");
9130 if (flags & SECTION_VMS_GLOBAL)
9131 fprintf (asm_out_file, ",GBL");
9132 if (flags & SECTION_VMS_INITIALIZE)
9133 fprintf (asm_out_file, ",NOMOD");
9134 if (flags & SECTION_DEBUG)
9135 fprintf (asm_out_file, ",NOWRT");
9136
9137 fputc ('\n', asm_out_file);
9138 }
9139
9140 /* Record an element in the table of global constructors. SYMBOL is
9141 a SYMBOL_REF of the function to be called; PRIORITY is a number
9142 between 0 and MAX_INIT_PRIORITY.
9143
9144 Differs from default_ctors_section_asm_out_constructor in that the
9145 width of the .ctors entry is always 64 bits, rather than the 32 bits
9146 used by a normal pointer. */
9147
9148 static void
9149 vms_asm_out_constructor (symbol, priority)
9150 rtx symbol;
9151 int priority ATTRIBUTE_UNUSED;
9152 {
9153 ctors_section ();
9154 assemble_align (BITS_PER_WORD);
9155 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9156 }
9157
9158 static void
9159 vms_asm_out_destructor (symbol, priority)
9160 rtx symbol;
9161 int priority ATTRIBUTE_UNUSED;
9162 {
9163 dtors_section ();
9164 assemble_align (BITS_PER_WORD);
9165 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9166 }
9167 #else
9168
9169 rtx
9170 alpha_need_linkage (name, is_local)
9171 const char *name ATTRIBUTE_UNUSED;
9172 int is_local ATTRIBUTE_UNUSED;
9173 {
9174 return NULL_RTX;
9175 }
9176
9177 #endif /* TARGET_ABI_OPEN_VMS */
9178 \f
9179 #if TARGET_ABI_UNICOSMK
9180
9181 static void unicosmk_output_module_name PARAMS ((FILE *));
9182 static void unicosmk_output_default_externs PARAMS ((FILE *));
9183 static void unicosmk_output_dex PARAMS ((FILE *));
9184 static void unicosmk_output_externs PARAMS ((FILE *));
9185 static void unicosmk_output_addr_vec PARAMS ((FILE *, rtx));
9186 static const char *unicosmk_ssib_name PARAMS ((void));
9187 static int unicosmk_special_name PARAMS ((const char *));
9188
9189 /* Define the offset between two registers, one to be eliminated, and the
9190 other its replacement, at the start of a routine. */
9191
9192 int
9193 unicosmk_initial_elimination_offset (from, to)
9194 int from;
9195 int to;
9196 {
9197 int fixed_size;
9198
9199 fixed_size = alpha_sa_size();
9200 if (fixed_size != 0)
9201 fixed_size += 48;
9202
9203 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9204 return -fixed_size;
9205 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9206 return 0;
9207 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9208 return (ALPHA_ROUND (current_function_outgoing_args_size)
9209 + ALPHA_ROUND (get_frame_size()));
9210 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9211 return (ALPHA_ROUND (fixed_size)
9212 + ALPHA_ROUND (get_frame_size()
9213 + current_function_outgoing_args_size));
9214 else
9215 abort ();
9216 }
9217
9218 /* Output the module name for .ident and .end directives. We have to strip
9219 directories and add make sure that the module name starts with a letter
9220 or '$'. */
9221
9222 static void
9223 unicosmk_output_module_name (file)
9224 FILE *file;
9225 {
9226 const char *name;
9227
9228 /* Strip directories. */
9229
9230 name = strrchr (main_input_filename, '/');
9231 if (name)
9232 ++name;
9233 else
9234 name = main_input_filename;
9235
9236 /* CAM only accepts module names that start with a letter or '$'. We
9237 prefix the module name with a '$' if necessary. */
9238
9239 if (!ISALPHA (*name))
9240 putc ('$', file);
9241 output_clean_symbol_name (file, name);
9242 }
9243
9244 /* Output text that to appear at the beginning of an assembler file. */
9245
9246 void
9247 unicosmk_asm_file_start (file)
9248 FILE *file;
9249 {
9250 int i;
9251
9252 fputs ("\t.ident\t", file);
9253 unicosmk_output_module_name (file);
9254 fputs ("\n\n", file);
9255
9256 /* The Unicos/Mk assembler uses different register names. Instead of trying
9257 to support them, we simply use micro definitions. */
9258
9259 /* CAM has different register names: rN for the integer register N and fN
9260 for the floating-point register N. Instead of trying to use these in
9261 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9262 register. */
9263
9264 for (i = 0; i < 32; ++i)
9265 fprintf (file, "$%d <- r%d\n", i, i);
9266
9267 for (i = 0; i < 32; ++i)
9268 fprintf (file, "$f%d <- f%d\n", i, i);
9269
9270 putc ('\n', file);
9271
9272 /* The .align directive fill unused space with zeroes which does not work
9273 in code sections. We define the macro 'gcc@code@align' which uses nops
9274 instead. Note that it assumes that code sections always have the
9275 biggest possible alignment since . refers to the current offset from
9276 the beginning of the section. */
9277
9278 fputs ("\t.macro gcc@code@align n\n", file);
9279 fputs ("gcc@n@bytes = 1 << n\n", file);
9280 fputs ("gcc@here = . % gcc@n@bytes\n", file);
9281 fputs ("\t.if ne, gcc@here, 0\n", file);
9282 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file);
9283 fputs ("\tbis r31,r31,r31\n", file);
9284 fputs ("\t.endr\n", file);
9285 fputs ("\t.endif\n", file);
9286 fputs ("\t.endm gcc@code@align\n\n", file);
9287
9288 /* Output extern declarations which should always be visible. */
9289 unicosmk_output_default_externs (file);
9290
9291 /* Open a dummy section. We always need to be inside a section for the
9292 section-switching code to work correctly.
9293 ??? This should be a module id or something like that. I still have to
9294 figure out what the rules for those are. */
9295 fputs ("\n\t.psect\t$SG00000,data\n", file);
9296 }
9297
9298 /* Output text to appear at the end of an assembler file. This includes all
9299 pending extern declarations and DEX expressions. */
9300
9301 void
9302 unicosmk_asm_file_end (file)
9303 FILE *file;
9304 {
9305 fputs ("\t.endp\n\n", file);
9306
9307 /* Output all pending externs. */
9308
9309 unicosmk_output_externs (file);
9310
9311 /* Output dex definitions used for functions whose names conflict with
9312 register names. */
9313
9314 unicosmk_output_dex (file);
9315
9316 fputs ("\t.end\t", file);
9317 unicosmk_output_module_name (file);
9318 putc ('\n', file);
9319 }
9320
9321 /* Output the definition of a common variable. */
9322
9323 void
9324 unicosmk_output_common (file, name, size, align)
9325 FILE *file;
9326 const char *name;
9327 int size;
9328 int align;
9329 {
9330 tree name_tree;
9331 printf ("T3E__: common %s\n", name);
9332
9333 common_section ();
9334 fputs("\t.endp\n\n\t.psect ", file);
9335 assemble_name(file, name);
9336 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9337 fprintf(file, "\t.byte\t0:%d\n", size);
9338
9339 /* Mark the symbol as defined in this module. */
9340 name_tree = get_identifier (name);
9341 TREE_ASM_WRITTEN (name_tree) = 1;
9342 }
9343
9344 #define SECTION_PUBLIC SECTION_MACH_DEP
9345 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9346 static int current_section_align;
9347
9348 static unsigned int
9349 unicosmk_section_type_flags (decl, name, reloc)
9350 tree decl;
9351 const char *name;
9352 int reloc ATTRIBUTE_UNUSED;
9353 {
9354 unsigned int flags = default_section_type_flags (decl, name, reloc);
9355
9356 if (!decl)
9357 return flags;
9358
9359 if (TREE_CODE (decl) == FUNCTION_DECL)
9360 {
9361 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9362 if (align_functions_log > current_section_align)
9363 current_section_align = align_functions_log;
9364
9365 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9366 flags |= SECTION_MAIN;
9367 }
9368 else
9369 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9370
9371 if (TREE_PUBLIC (decl))
9372 flags |= SECTION_PUBLIC;
9373
9374 return flags;
9375 }
9376
9377 /* Generate a section name for decl and associate it with the
9378 declaration. */
9379
9380 static void
9381 unicosmk_unique_section (decl, reloc)
9382 tree decl;
9383 int reloc ATTRIBUTE_UNUSED;
9384 {
9385 const char *name;
9386 int len;
9387
9388 if (!decl)
9389 abort ();
9390
9391 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9392 name = alpha_strip_name_encoding (name);
9393 len = strlen (name);
9394
9395 if (TREE_CODE (decl) == FUNCTION_DECL)
9396 {
9397 char *string;
9398
9399 /* It is essential that we prefix the section name here because
9400 otherwise the section names generated for constructors and
9401 destructors confuse collect2. */
9402
9403 string = alloca (len + 6);
9404 sprintf (string, "code@%s", name);
9405 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9406 }
9407 else if (TREE_PUBLIC (decl))
9408 DECL_SECTION_NAME (decl) = build_string (len, name);
9409 else
9410 {
9411 char *string;
9412
9413 string = alloca (len + 6);
9414 sprintf (string, "data@%s", name);
9415 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9416 }
9417 }
9418
9419 /* Switch to an arbitrary section NAME with attributes as specified
9420 by FLAGS. ALIGN specifies any known alignment requirements for
9421 the section; 0 if the default should be used. */
9422
9423 static void
9424 unicosmk_asm_named_section (name, flags)
9425 const char *name;
9426 unsigned int flags;
9427 {
9428 const char *kind;
9429
9430 /* Close the previous section. */
9431
9432 fputs ("\t.endp\n\n", asm_out_file);
9433
9434 /* Find out what kind of section we are opening. */
9435
9436 if (flags & SECTION_MAIN)
9437 fputs ("\t.start\tmain\n", asm_out_file);
9438
9439 if (flags & SECTION_CODE)
9440 kind = "code";
9441 else if (flags & SECTION_PUBLIC)
9442 kind = "common";
9443 else
9444 kind = "data";
9445
9446 if (current_section_align != 0)
9447 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9448 current_section_align, kind);
9449 else
9450 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9451 }
9452
9453 static void
9454 unicosmk_insert_attributes (decl, attr_ptr)
9455 tree decl;
9456 tree *attr_ptr ATTRIBUTE_UNUSED;
9457 {
9458 if (DECL_P (decl)
9459 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9460 unicosmk_unique_section (decl, 0);
9461 }
9462
9463 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9464 in code sections because .align fill unused space with zeroes. */
9465
9466 void
9467 unicosmk_output_align (file, align)
9468 FILE *file;
9469 int align;
9470 {
9471 if (inside_function)
9472 fprintf (file, "\tgcc@code@align\t%d\n", align);
9473 else
9474 fprintf (file, "\t.align\t%d\n", align);
9475 }
9476
9477 /* Add a case vector to the current function's list of deferred case
9478 vectors. Case vectors have to be put into a separate section because CAM
9479 does not allow data definitions in code sections. */
9480
9481 void
9482 unicosmk_defer_case_vector (lab, vec)
9483 rtx lab;
9484 rtx vec;
9485 {
9486 struct machine_function *machine = cfun->machine;
9487
9488 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9489 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9490 machine->addr_list);
9491 }
9492
9493 /* Output a case vector. */
9494
9495 static void
9496 unicosmk_output_addr_vec (file, vec)
9497 FILE *file;
9498 rtx vec;
9499 {
9500 rtx lab = XEXP (vec, 0);
9501 rtx body = XEXP (vec, 1);
9502 int vlen = XVECLEN (body, 0);
9503 int idx;
9504
9505 ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (lab));
9506
9507 for (idx = 0; idx < vlen; idx++)
9508 {
9509 ASM_OUTPUT_ADDR_VEC_ELT
9510 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9511 }
9512 }
9513
9514 /* Output current function's deferred case vectors. */
9515
9516 static void
9517 unicosmk_output_deferred_case_vectors (file)
9518 FILE *file;
9519 {
9520 struct machine_function *machine = cfun->machine;
9521 rtx t;
9522
9523 if (machine->addr_list == NULL_RTX)
9524 return;
9525
9526 data_section ();
9527 for (t = machine->addr_list; t; t = XEXP (t, 1))
9528 unicosmk_output_addr_vec (file, XEXP (t, 0));
9529 }
9530
9531 /* Set up the dynamic subprogram information block (DSIB) and update the
9532 frame pointer register ($15) for subroutines which have a frame. If the
9533 subroutine doesn't have a frame, simply increment $15. */
9534
9535 static void
9536 unicosmk_gen_dsib (imaskP)
9537 unsigned long * imaskP;
9538 {
9539 if (alpha_procedure_type == PT_STACK)
9540 {
9541 const char *ssib_name;
9542 rtx mem;
9543
9544 /* Allocate 64 bytes for the DSIB. */
9545
9546 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9547 GEN_INT (-64))));
9548 emit_insn (gen_blockage ());
9549
9550 /* Save the return address. */
9551
9552 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
9553 set_mem_alias_set (mem, alpha_sr_alias_set);
9554 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
9555 (*imaskP) &= ~(1L << REG_RA);
9556
9557 /* Save the old frame pointer. */
9558
9559 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
9560 set_mem_alias_set (mem, alpha_sr_alias_set);
9561 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
9562 (*imaskP) &= ~(1L << HARD_FRAME_POINTER_REGNUM);
9563
9564 emit_insn (gen_blockage ());
9565
9566 /* Store the SSIB pointer. */
9567
9568 ssib_name = ggc_strdup (unicosmk_ssib_name ());
9569 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
9570 set_mem_alias_set (mem, alpha_sr_alias_set);
9571
9572 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
9573 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
9574 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
9575
9576 /* Save the CIW index. */
9577
9578 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
9579 set_mem_alias_set (mem, alpha_sr_alias_set);
9580 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
9581
9582 emit_insn (gen_blockage ());
9583
9584 /* Set the new frame pointer. */
9585
9586 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9587 stack_pointer_rtx, GEN_INT (64))));
9588
9589 }
9590 else
9591 {
9592 /* Increment the frame pointer register to indicate that we do not
9593 have a frame. */
9594
9595 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9596 hard_frame_pointer_rtx, GEN_INT (1))));
9597 }
9598 }
9599
9600 #define SSIB_PREFIX "__SSIB_"
9601 #define SSIB_PREFIX_LEN 7
9602
9603 /* Generate the name of the SSIB section for the current function. */
9604
9605 static const char *
9606 unicosmk_ssib_name ()
9607 {
9608 /* This is ok since CAM won't be able to deal with names longer than that
9609 anyway. */
9610
9611 static char name[256];
9612
9613 rtx x;
9614 const char *fnname;
9615 int len;
9616
9617 x = DECL_RTL (cfun->decl);
9618 if (GET_CODE (x) != MEM)
9619 abort ();
9620 x = XEXP (x, 0);
9621 if (GET_CODE (x) != SYMBOL_REF)
9622 abort ();
9623 fnname = alpha_strip_name_encoding (XSTR (x, 0));
9624
9625 len = strlen (fnname);
9626 if (len + SSIB_PREFIX_LEN > 255)
9627 len = 255 - SSIB_PREFIX_LEN;
9628
9629 strcpy (name, SSIB_PREFIX);
9630 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
9631 name[len + SSIB_PREFIX_LEN] = 0;
9632
9633 return name;
9634 }
9635
9636 /* Output the static subroutine information block for the current
9637 function. */
9638
9639 static void
9640 unicosmk_output_ssib (file, fnname)
9641 FILE *file;
9642 const char *fnname;
9643 {
9644 int len;
9645 int i;
9646 rtx x;
9647 rtx ciw;
9648 struct machine_function *machine = cfun->machine;
9649
9650 ssib_section ();
9651 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
9652 unicosmk_ssib_name ());
9653
9654 /* Some required stuff and the function name length. */
9655
9656 len = strlen (fnname);
9657 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
9658
9659 /* Saved registers
9660 ??? We don't do that yet. */
9661
9662 fputs ("\t.quad\t0\n", file);
9663
9664 /* Function address. */
9665
9666 fputs ("\t.quad\t", file);
9667 assemble_name (file, fnname);
9668 putc ('\n', file);
9669
9670 fputs ("\t.quad\t0\n", file);
9671 fputs ("\t.quad\t0\n", file);
9672
9673 /* Function name.
9674 ??? We do it the same way Cray CC does it but this could be
9675 simplified. */
9676
9677 for( i = 0; i < len; i++ )
9678 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
9679 if( (len % 8) == 0 )
9680 fputs ("\t.quad\t0\n", file);
9681 else
9682 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
9683
9684 /* All call information words used in the function. */
9685
9686 for (x = machine->first_ciw; x; x = XEXP (x, 1))
9687 {
9688 ciw = XEXP (x, 0);
9689 fprintf (file, "\t.quad\t");
9690 #if HOST_BITS_PER_WIDE_INT == 32
9691 fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
9692 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
9693 #else
9694 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (ciw));
9695 #endif
9696 fprintf (file, "\n");
9697 }
9698 }
9699
9700 /* Add a call information word (CIW) to the list of the current function's
9701 CIWs and return its index.
9702
9703 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
9704
9705 rtx
9706 unicosmk_add_call_info_word (x)
9707 rtx x;
9708 {
9709 rtx node;
9710 struct machine_function *machine = cfun->machine;
9711
9712 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
9713 if (machine->first_ciw == NULL_RTX)
9714 machine->first_ciw = node;
9715 else
9716 XEXP (machine->last_ciw, 1) = node;
9717
9718 machine->last_ciw = node;
9719 ++machine->ciw_count;
9720
9721 return GEN_INT (machine->ciw_count
9722 + strlen (current_function_name)/8 + 5);
9723 }
9724
9725 static char unicosmk_section_buf[100];
9726
9727 char *
9728 unicosmk_text_section ()
9729 {
9730 static int count = 0;
9731 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
9732 count++);
9733 return unicosmk_section_buf;
9734 }
9735
9736 char *
9737 unicosmk_data_section ()
9738 {
9739 static int count = 1;
9740 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
9741 count++);
9742 return unicosmk_section_buf;
9743 }
9744
9745 /* The Cray assembler doesn't accept extern declarations for symbols which
9746 are defined in the same file. We have to keep track of all global
9747 symbols which are referenced and/or defined in a source file and output
9748 extern declarations for those which are referenced but not defined at
9749 the end of file. */
9750
9751 /* List of identifiers for which an extern declaration might have to be
9752 emitted. */
9753
9754 struct unicosmk_extern_list
9755 {
9756 struct unicosmk_extern_list *next;
9757 const char *name;
9758 };
9759
9760 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
9761
9762 /* Output extern declarations which are required for every asm file. */
9763
9764 static void
9765 unicosmk_output_default_externs (file)
9766 FILE *file;
9767 {
9768 static const char *const externs[] =
9769 { "__T3E_MISMATCH" };
9770
9771 int i;
9772 int n;
9773
9774 n = ARRAY_SIZE (externs);
9775
9776 for (i = 0; i < n; i++)
9777 fprintf (file, "\t.extern\t%s\n", externs[i]);
9778 }
9779
9780 /* Output extern declarations for global symbols which are have been
9781 referenced but not defined. */
9782
9783 static void
9784 unicosmk_output_externs (file)
9785 FILE *file;
9786 {
9787 struct unicosmk_extern_list *p;
9788 const char *real_name;
9789 int len;
9790 tree name_tree;
9791
9792 len = strlen (user_label_prefix);
9793 for (p = unicosmk_extern_head; p != 0; p = p->next)
9794 {
9795 /* We have to strip the encoding and possibly remove user_label_prefix
9796 from the identifier in order to handle -fleading-underscore and
9797 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
9798 real_name = alpha_strip_name_encoding (p->name);
9799 if (len && p->name[0] == '*'
9800 && !memcmp (real_name, user_label_prefix, len))
9801 real_name += len;
9802
9803 name_tree = get_identifier (real_name);
9804 if (! TREE_ASM_WRITTEN (name_tree))
9805 {
9806 TREE_ASM_WRITTEN (name_tree) = 1;
9807 fputs ("\t.extern\t", file);
9808 assemble_name (file, p->name);
9809 putc ('\n', file);
9810 }
9811 }
9812 }
9813
9814 /* Record an extern. */
9815
9816 void
9817 unicosmk_add_extern (name)
9818 const char *name;
9819 {
9820 struct unicosmk_extern_list *p;
9821
9822 p = (struct unicosmk_extern_list *)
9823 xmalloc (sizeof (struct unicosmk_extern_list));
9824 p->next = unicosmk_extern_head;
9825 p->name = name;
9826 unicosmk_extern_head = p;
9827 }
9828
9829 /* The Cray assembler generates incorrect code if identifiers which
9830 conflict with register names are used as instruction operands. We have
9831 to replace such identifiers with DEX expressions. */
9832
9833 /* Structure to collect identifiers which have been replaced by DEX
9834 expressions. */
9835
9836 struct unicosmk_dex {
9837 struct unicosmk_dex *next;
9838 const char *name;
9839 };
9840
9841 /* List of identifiers which have been replaced by DEX expressions. The DEX
9842 number is determined by the position in the list. */
9843
9844 static struct unicosmk_dex *unicosmk_dex_list = NULL;
9845
9846 /* The number of elements in the DEX list. */
9847
9848 static int unicosmk_dex_count = 0;
9849
9850 /* Check if NAME must be replaced by a DEX expression. */
9851
9852 static int
9853 unicosmk_special_name (name)
9854 const char *name;
9855 {
9856 if (name[0] == '*')
9857 ++name;
9858
9859 if (name[0] == '$')
9860 ++name;
9861
9862 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
9863 return 0;
9864
9865 switch (name[1])
9866 {
9867 case '1': case '2':
9868 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
9869
9870 case '3':
9871 return (name[2] == '\0'
9872 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
9873
9874 default:
9875 return (ISDIGIT (name[1]) && name[2] == '\0');
9876 }
9877 }
9878
9879 /* Return the DEX number if X must be replaced by a DEX expression and 0
9880 otherwise. */
9881
9882 static int
9883 unicosmk_need_dex (x)
9884 rtx x;
9885 {
9886 struct unicosmk_dex *dex;
9887 const char *name;
9888 int i;
9889
9890 if (GET_CODE (x) != SYMBOL_REF)
9891 return 0;
9892
9893 name = XSTR (x,0);
9894 if (! unicosmk_special_name (name))
9895 return 0;
9896
9897 i = unicosmk_dex_count;
9898 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9899 {
9900 if (! strcmp (name, dex->name))
9901 return i;
9902 --i;
9903 }
9904
9905 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9906 dex->name = name;
9907 dex->next = unicosmk_dex_list;
9908 unicosmk_dex_list = dex;
9909
9910 ++unicosmk_dex_count;
9911 return unicosmk_dex_count;
9912 }
9913
9914 /* Output the DEX definitions for this file. */
9915
9916 static void
9917 unicosmk_output_dex (file)
9918 FILE *file;
9919 {
9920 struct unicosmk_dex *dex;
9921 int i;
9922
9923 if (unicosmk_dex_list == NULL)
9924 return;
9925
9926 fprintf (file, "\t.dexstart\n");
9927
9928 i = unicosmk_dex_count;
9929 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9930 {
9931 fprintf (file, "\tDEX (%d) = ", i);
9932 assemble_name (file, dex->name);
9933 putc ('\n', file);
9934 --i;
9935 }
9936
9937 fprintf (file, "\t.dexend\n");
9938 }
9939
9940 #else
9941
9942 static void
9943 unicosmk_output_deferred_case_vectors (file)
9944 FILE *file ATTRIBUTE_UNUSED;
9945 {}
9946
9947 static void
9948 unicosmk_gen_dsib (imaskP)
9949 unsigned long * imaskP ATTRIBUTE_UNUSED;
9950 {}
9951
9952 static void
9953 unicosmk_output_ssib (file, fnname)
9954 FILE * file ATTRIBUTE_UNUSED;
9955 const char * fnname ATTRIBUTE_UNUSED;
9956 {}
9957
9958 rtx
9959 unicosmk_add_call_info_word (x)
9960 rtx x ATTRIBUTE_UNUSED;
9961 {
9962 return NULL_RTX;
9963 }
9964
9965 static int
9966 unicosmk_need_dex (x)
9967 rtx x ATTRIBUTE_UNUSED;
9968 {
9969 return 0;
9970 }
9971
9972 #endif /* TARGET_ABI_UNICOSMK */
9973
9974 #include "gt-alpha.h"
9975
This page took 0.435494 seconds and 6 git commands to generate.