]> gcc.gnu.org Git - gcc.git/blob - gcc/config/pa/pa.c
re PR c++/14607 (Duplicate symbol "vtable for node" in files div.o and env.o)
[gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
55 {
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
59
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
64
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
66
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
70
71 other_mode = GET_MODE (SET_SRC (set));
72
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
74 }
75
76
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
84
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static int hppa_address_cost (rtx);
88 static bool hppa_rtx_costs (rtx, int, int, int *);
89 static inline rtx force_mode (enum machine_mode, rtx);
90 static void pa_reorg (void);
91 static void pa_combine_instructions (void);
92 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
93 static int forward_branch_p (rtx);
94 static int shadd_constant_p (int);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
111 ATTRIBUTE_UNUSED;
112 static void pa_encode_section_info (tree, rtx, int);
113 static const char *pa_strip_name_encoding (const char *);
114 static bool pa_function_ok_for_sibcall (tree, tree);
115 static void pa_globalize_label (FILE *, const char *)
116 ATTRIBUTE_UNUSED;
117 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
118 HOST_WIDE_INT, tree);
119 #if !defined(USE_COLLECT2)
120 static void pa_asm_out_constructor (rtx, int);
121 static void pa_asm_out_destructor (rtx, int);
122 #endif
123 static void pa_init_builtins (void);
124 static rtx hppa_builtin_saveregs (void);
125 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
126 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
127 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
128 static struct deferred_plabel *get_plabel (const char *)
129 ATTRIBUTE_UNUSED;
130 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
134 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
135 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
139 static void output_deferred_plabels (void);
140 #ifdef HPUX_LONG_DOUBLE_LIBRARY
141 static void pa_hpux_init_libfuncs (void);
142 #endif
143 static rtx pa_struct_value_rtx (tree, int);
144 static bool pa_pass_by_reference (CUMULATIVE_ARGS *ca, enum machine_mode,
145 tree, bool);
146 static struct machine_function * pa_init_machine_status (void);
147
148
149 /* Save the operands last given to a compare for use when we
150 generate a scc or bcc insn. */
151 rtx hppa_compare_op0, hppa_compare_op1;
152 enum cmp_type hppa_branch_type;
153
154 /* Which architecture we are generating code for. */
155 enum architecture_type pa_arch;
156
157 /* String to hold which architecture we are generating code for. */
158 const char *pa_arch_string;
159
160 /* String used with the -mfixed-range= option. */
161 const char *pa_fixed_range_string;
162
163 /* Which cpu we are scheduling for. */
164 enum processor_type pa_cpu;
165
166 /* String to hold which cpu we are scheduling for. */
167 const char *pa_cpu_string;
168
169 /* Counts for the number of callee-saved general and floating point
170 registers which were saved by the current function's prologue. */
171 static int gr_saved, fr_saved;
172
173 static rtx find_addr_reg (rtx);
174
175 /* Keep track of the number of bytes we have output in the CODE subspace
176 during this compilation so we'll know when to emit inline long-calls. */
177 unsigned long total_code_bytes;
178
179 /* The last address of the previous function plus the number of bytes in
180 associated thunks that have been output. This is used to determine if
181 a thunk can use an IA-relative branch to reach its target function. */
182 static int last_address;
183
184 /* Variables to handle plabels that we discover are necessary at assembly
185 output time. They are output after the current function. */
186 struct deferred_plabel GTY(())
187 {
188 rtx internal_label;
189 const char *name;
190 };
191 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
192 deferred_plabels;
193 static size_t n_deferred_plabels = 0;
194
195 \f
196 /* Initialize the GCC target structure. */
197
198 #undef TARGET_ASM_ALIGNED_HI_OP
199 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
200 #undef TARGET_ASM_ALIGNED_SI_OP
201 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
202 #undef TARGET_ASM_ALIGNED_DI_OP
203 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
204 #undef TARGET_ASM_UNALIGNED_HI_OP
205 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
206 #undef TARGET_ASM_UNALIGNED_SI_OP
207 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
208 #undef TARGET_ASM_UNALIGNED_DI_OP
209 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
210 #undef TARGET_ASM_INTEGER
211 #define TARGET_ASM_INTEGER pa_assemble_integer
212
213 #undef TARGET_ASM_FUNCTION_PROLOGUE
214 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
215 #undef TARGET_ASM_FUNCTION_EPILOGUE
216 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
217
218 #undef TARGET_SCHED_ADJUST_COST
219 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
220 #undef TARGET_SCHED_ADJUST_PRIORITY
221 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
222 #undef TARGET_SCHED_ISSUE_RATE
223 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
224
225 #undef TARGET_ENCODE_SECTION_INFO
226 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
227 #undef TARGET_STRIP_NAME_ENCODING
228 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
229
230 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
231 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
232
233 #undef TARGET_ASM_OUTPUT_MI_THUNK
234 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
235 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
236 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
237
238 #undef TARGET_ASM_FILE_END
239 #define TARGET_ASM_FILE_END output_deferred_plabels
240
241 #if !defined(USE_COLLECT2)
242 #undef TARGET_ASM_CONSTRUCTOR
243 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
244 #undef TARGET_ASM_DESTRUCTOR
245 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
246 #endif
247
248 #undef TARGET_INIT_BUILTINS
249 #define TARGET_INIT_BUILTINS pa_init_builtins
250
251 #undef TARGET_RTX_COSTS
252 #define TARGET_RTX_COSTS hppa_rtx_costs
253 #undef TARGET_ADDRESS_COST
254 #define TARGET_ADDRESS_COST hppa_address_cost
255
256 #undef TARGET_MACHINE_DEPENDENT_REORG
257 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
258
259 #ifdef HPUX_LONG_DOUBLE_LIBRARY
260 #undef TARGET_INIT_LIBFUNCS
261 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
262 #endif
263
264 #undef TARGET_PROMOTE_FUNCTION_RETURN
265 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
266 #undef TARGET_PROMOTE_PROTOTYPES
267 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
268
269 #undef TARGET_STRUCT_VALUE_RTX
270 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
271 #undef TARGET_RETURN_IN_MEMORY
272 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
273 #undef TARGET_MUST_PASS_IN_STACK
274 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
275 #undef TARGET_PASS_BY_REFERENCE
276 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
277
278 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
279 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
280 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
281 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
282
283 struct gcc_target targetm = TARGET_INITIALIZER;
284 \f
285 /* Parse the -mfixed-range= option string. */
286
287 static void
288 fix_range (const char *const_str)
289 {
290 int i, first, last;
291 char *str, *dash, *comma;
292
293 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
294 REG2 are either register names or register numbers. The effect
295 of this option is to mark the registers in the range from REG1 to
296 REG2 as ``fixed'' so they won't be used by the compiler. This is
297 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
298
299 i = strlen (const_str);
300 str = (char *) alloca (i + 1);
301 memcpy (str, const_str, i + 1);
302
303 while (1)
304 {
305 dash = strchr (str, '-');
306 if (!dash)
307 {
308 warning ("value of -mfixed-range must have form REG1-REG2");
309 return;
310 }
311 *dash = '\0';
312
313 comma = strchr (dash + 1, ',');
314 if (comma)
315 *comma = '\0';
316
317 first = decode_reg_name (str);
318 if (first < 0)
319 {
320 warning ("unknown register name: %s", str);
321 return;
322 }
323
324 last = decode_reg_name (dash + 1);
325 if (last < 0)
326 {
327 warning ("unknown register name: %s", dash + 1);
328 return;
329 }
330
331 *dash = '-';
332
333 if (first > last)
334 {
335 warning ("%s-%s is an empty range", str, dash + 1);
336 return;
337 }
338
339 for (i = first; i <= last; ++i)
340 fixed_regs[i] = call_used_regs[i] = 1;
341
342 if (!comma)
343 break;
344
345 *comma = ',';
346 str = comma + 1;
347 }
348
349 /* Check if all floating point registers have been fixed. */
350 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
351 if (!fixed_regs[i])
352 break;
353
354 if (i > FP_REG_LAST)
355 target_flags |= MASK_DISABLE_FPREGS;
356 }
357
358 void
359 override_options (void)
360 {
361 if (pa_cpu_string == NULL)
362 pa_cpu_string = TARGET_SCHED_DEFAULT;
363
364 if (! strcmp (pa_cpu_string, "8000"))
365 {
366 pa_cpu_string = "8000";
367 pa_cpu = PROCESSOR_8000;
368 }
369 else if (! strcmp (pa_cpu_string, "7100"))
370 {
371 pa_cpu_string = "7100";
372 pa_cpu = PROCESSOR_7100;
373 }
374 else if (! strcmp (pa_cpu_string, "700"))
375 {
376 pa_cpu_string = "700";
377 pa_cpu = PROCESSOR_700;
378 }
379 else if (! strcmp (pa_cpu_string, "7100LC"))
380 {
381 pa_cpu_string = "7100LC";
382 pa_cpu = PROCESSOR_7100LC;
383 }
384 else if (! strcmp (pa_cpu_string, "7200"))
385 {
386 pa_cpu_string = "7200";
387 pa_cpu = PROCESSOR_7200;
388 }
389 else if (! strcmp (pa_cpu_string, "7300"))
390 {
391 pa_cpu_string = "7300";
392 pa_cpu = PROCESSOR_7300;
393 }
394 else
395 {
396 warning ("unknown -mschedule= option (%s).\nValid options are 700, 7100, 7100LC, 7200, 7300, and 8000\n", pa_cpu_string);
397 }
398
399 /* Set the instruction architecture. */
400 if (pa_arch_string && ! strcmp (pa_arch_string, "1.0"))
401 {
402 pa_arch_string = "1.0";
403 pa_arch = ARCHITECTURE_10;
404 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
405 }
406 else if (pa_arch_string && ! strcmp (pa_arch_string, "1.1"))
407 {
408 pa_arch_string = "1.1";
409 pa_arch = ARCHITECTURE_11;
410 target_flags &= ~MASK_PA_20;
411 target_flags |= MASK_PA_11;
412 }
413 else if (pa_arch_string && ! strcmp (pa_arch_string, "2.0"))
414 {
415 pa_arch_string = "2.0";
416 pa_arch = ARCHITECTURE_20;
417 target_flags |= MASK_PA_11 | MASK_PA_20;
418 }
419 else if (pa_arch_string)
420 {
421 warning ("unknown -march= option (%s).\nValid options are 1.0, 1.1, and 2.0\n", pa_arch_string);
422 }
423
424 if (pa_fixed_range_string)
425 fix_range (pa_fixed_range_string);
426
427 /* Unconditional branches in the delay slot are not compatible with dwarf2
428 call frame information. There is no benefit in using this optimization
429 on PA8000 and later processors. */
430 if (pa_cpu >= PROCESSOR_8000
431 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
432 || flag_unwind_tables)
433 target_flags &= ~MASK_JUMP_IN_DELAY;
434
435 if (flag_pic && TARGET_PORTABLE_RUNTIME)
436 {
437 warning ("PIC code generation is not supported in the portable runtime model\n");
438 }
439
440 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
441 {
442 warning ("PIC code generation is not compatible with fast indirect calls\n");
443 }
444
445 if (! TARGET_GAS && write_symbols != NO_DEBUG)
446 {
447 warning ("-g is only supported when using GAS on this processor,");
448 warning ("-g option disabled");
449 write_symbols = NO_DEBUG;
450 }
451
452 /* We only support the "big PIC" model now. And we always generate PIC
453 code when in 64bit mode. */
454 if (flag_pic == 1 || TARGET_64BIT)
455 flag_pic = 2;
456
457 /* We can't guarantee that .dword is available for 32-bit targets. */
458 if (UNITS_PER_WORD == 4)
459 targetm.asm_out.aligned_op.di = NULL;
460
461 /* The unaligned ops are only available when using GAS. */
462 if (!TARGET_GAS)
463 {
464 targetm.asm_out.unaligned_op.hi = NULL;
465 targetm.asm_out.unaligned_op.si = NULL;
466 targetm.asm_out.unaligned_op.di = NULL;
467 }
468
469 init_machine_status = pa_init_machine_status;
470 }
471
472 static void
473 pa_init_builtins (void)
474 {
475 #ifdef DONT_HAVE_FPUTC_UNLOCKED
476 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
477 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
478 #endif
479 }
480
481 /* Function to init struct machine_function.
482 This will be called, via a pointer variable,
483 from push_function_context. */
484
485 static struct machine_function *
486 pa_init_machine_status (void)
487 {
488 return ggc_alloc_cleared (sizeof (machine_function));
489 }
490
491 /* If FROM is a probable pointer register, mark TO as a probable
492 pointer register with the same pointer alignment as FROM. */
493
494 static void
495 copy_reg_pointer (rtx to, rtx from)
496 {
497 if (REG_POINTER (from))
498 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
499 }
500
501 /* Return nonzero only if OP is a register of mode MODE,
502 or CONST0_RTX. */
503 int
504 reg_or_0_operand (rtx op, enum machine_mode mode)
505 {
506 return (op == CONST0_RTX (mode) || register_operand (op, mode));
507 }
508
509 /* Return nonzero if OP is suitable for use in a call to a named
510 function.
511
512 For 2.5 try to eliminate either call_operand_address or
513 function_label_operand, they perform very similar functions. */
514 int
515 call_operand_address (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
516 {
517 return (GET_MODE (op) == word_mode
518 && CONSTANT_P (op) && ! TARGET_PORTABLE_RUNTIME);
519 }
520
521 /* Return 1 if X contains a symbolic expression. We know these
522 expressions will have one of a few well defined forms, so
523 we need only check those forms. */
524 int
525 symbolic_expression_p (rtx x)
526 {
527
528 /* Strip off any HIGH. */
529 if (GET_CODE (x) == HIGH)
530 x = XEXP (x, 0);
531
532 return (symbolic_operand (x, VOIDmode));
533 }
534
535 int
536 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
537 {
538 switch (GET_CODE (op))
539 {
540 case SYMBOL_REF:
541 case LABEL_REF:
542 return 1;
543 case CONST:
544 op = XEXP (op, 0);
545 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
546 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
547 && GET_CODE (XEXP (op, 1)) == CONST_INT);
548 default:
549 return 0;
550 }
551 }
552
553 /* Return truth value of statement that OP is a symbolic memory
554 operand of mode MODE. */
555
556 int
557 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
558 {
559 if (GET_CODE (op) == SUBREG)
560 op = SUBREG_REG (op);
561 if (GET_CODE (op) != MEM)
562 return 0;
563 op = XEXP (op, 0);
564 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
565 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
566 }
567
568 /* Return 1 if the operand is either a register, zero, or a memory operand
569 that is not symbolic. */
570
571 int
572 reg_or_0_or_nonsymb_mem_operand (rtx op, enum machine_mode mode)
573 {
574 if (register_operand (op, mode))
575 return 1;
576
577 if (op == CONST0_RTX (mode))
578 return 1;
579
580 if (GET_CODE (op) == SUBREG)
581 op = SUBREG_REG (op);
582
583 if (GET_CODE (op) != MEM)
584 return 0;
585
586 /* Until problems with management of the REG_POINTER flag are resolved,
587 we need to delay creating move insns with unscaled indexed addresses
588 until CSE is not expected. */
589 if (!TARGET_NO_SPACE_REGS
590 && !cse_not_expected
591 && GET_CODE (XEXP (op, 0)) == PLUS
592 && REG_P (XEXP (XEXP (op, 0), 0))
593 && REG_P (XEXP (XEXP (op, 0), 1)))
594 return 0;
595
596 return (!symbolic_memory_operand (op, mode)
597 && memory_address_p (mode, XEXP (op, 0)));
598 }
599
600 /* Return 1 if the operand is a register operand or a non-symbolic memory
601 operand after reload. This predicate is used for branch patterns that
602 internally handle register reloading. We need to accept non-symbolic
603 memory operands after reload to ensure that the pattern is still valid
604 if reload didn't find a hard register for the operand. */
605
606 int
607 reg_before_reload_operand (rtx op, enum machine_mode mode)
608 {
609 /* Don't accept a SUBREG since it will need a reload. */
610 if (GET_CODE (op) == SUBREG)
611 return 0;
612
613 if (register_operand (op, mode))
614 return 1;
615
616 if (reload_completed
617 && memory_operand (op, mode)
618 && !symbolic_memory_operand (op, mode))
619 return 1;
620
621 return 0;
622 }
623
624 /* Accept any constant that can be moved in one instruction into a
625 general register. */
626 int
627 cint_ok_for_move (HOST_WIDE_INT intval)
628 {
629 /* OK if ldo, ldil, or zdepi, can be used. */
630 return (CONST_OK_FOR_LETTER_P (intval, 'J')
631 || CONST_OK_FOR_LETTER_P (intval, 'N')
632 || CONST_OK_FOR_LETTER_P (intval, 'K'));
633 }
634
635 /* Return 1 iff OP is an indexed memory operand. */
636 int
637 indexed_memory_operand (rtx op, enum machine_mode mode)
638 {
639 if (GET_MODE (op) != mode)
640 return 0;
641
642 /* Before reload, a (SUBREG (MEM...)) forces reloading into a register. */
643 if (reload_completed && GET_CODE (op) == SUBREG)
644 op = SUBREG_REG (op);
645
646 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
647 return 0;
648
649 op = XEXP (op, 0);
650
651 return (memory_address_p (mode, op) && IS_INDEX_ADDR_P (op));
652 }
653
654 /* Accept anything that can be used as a destination operand for a
655 move instruction. We don't accept indexed memory operands since
656 they are supported only for floating point stores. */
657 int
658 move_dest_operand (rtx op, enum machine_mode mode)
659 {
660 if (register_operand (op, mode))
661 return 1;
662
663 if (GET_MODE (op) != mode)
664 return 0;
665
666 if (GET_CODE (op) == SUBREG)
667 op = SUBREG_REG (op);
668
669 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
670 return 0;
671
672 op = XEXP (op, 0);
673
674 return (memory_address_p (mode, op)
675 && !IS_INDEX_ADDR_P (op)
676 && !IS_LO_SUM_DLT_ADDR_P (op));
677 }
678
679 /* Accept anything that can be used as a source operand for a move
680 instruction. */
681 int
682 move_src_operand (rtx op, enum machine_mode mode)
683 {
684 if (register_operand (op, mode))
685 return 1;
686
687 if (GET_CODE (op) == CONST_INT)
688 return cint_ok_for_move (INTVAL (op));
689
690 if (GET_MODE (op) != mode)
691 return 0;
692
693 if (GET_CODE (op) == SUBREG)
694 op = SUBREG_REG (op);
695
696 if (GET_CODE (op) != MEM)
697 return 0;
698
699 /* Until problems with management of the REG_POINTER flag are resolved,
700 we need to delay creating move insns with unscaled indexed addresses
701 until CSE is not expected. */
702 if (!TARGET_NO_SPACE_REGS
703 && !cse_not_expected
704 && GET_CODE (XEXP (op, 0)) == PLUS
705 && REG_P (XEXP (XEXP (op, 0), 0))
706 && REG_P (XEXP (XEXP (op, 0), 1)))
707 return 0;
708
709 return memory_address_p (mode, XEXP (op, 0));
710 }
711
712 /* Accept anything that can be used as the source operand for a prefetch
713 instruction. */
714 int
715 prefetch_operand (rtx op, enum machine_mode mode)
716 {
717 if (GET_CODE (op) != MEM)
718 return 0;
719
720 /* Until problems with management of the REG_POINTER flag are resolved,
721 we need to delay creating prefetch insns with unscaled indexed addresses
722 until CSE is not expected. */
723 if (!TARGET_NO_SPACE_REGS
724 && !cse_not_expected
725 && GET_CODE (XEXP (op, 0)) == PLUS
726 && REG_P (XEXP (XEXP (op, 0), 0))
727 && REG_P (XEXP (XEXP (op, 0), 1)))
728 return 0;
729
730 return memory_address_p (mode, XEXP (op, 0));
731 }
732
733 /* Accept REG and any CONST_INT that can be moved in one instruction into a
734 general register. */
735 int
736 reg_or_cint_move_operand (rtx op, enum machine_mode mode)
737 {
738 if (register_operand (op, mode))
739 return 1;
740
741 return (GET_CODE (op) == CONST_INT && cint_ok_for_move (INTVAL (op)));
742 }
743
744 int
745 pic_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
746 {
747 if (!flag_pic)
748 return 0;
749
750 switch (GET_CODE (op))
751 {
752 case LABEL_REF:
753 return 1;
754 case CONST:
755 op = XEXP (op, 0);
756 return (GET_CODE (XEXP (op, 0)) == LABEL_REF
757 && GET_CODE (XEXP (op, 1)) == CONST_INT);
758 default:
759 return 0;
760 }
761 }
762
763 int
764 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
765 {
766 return reg_renumber && FP_REG_P (op);
767 }
768
769 \f
770
771 /* Return truth value of whether OP can be used as an operand in a
772 three operand arithmetic insn that accepts registers of mode MODE
773 or 14-bit signed integers. */
774 int
775 arith_operand (rtx op, enum machine_mode mode)
776 {
777 return (register_operand (op, mode)
778 || (GET_CODE (op) == CONST_INT && INT_14_BITS (op)));
779 }
780
781 /* Return truth value of whether OP can be used as an operand in a
782 three operand arithmetic insn that accepts registers of mode MODE
783 or 11-bit signed integers. */
784 int
785 arith11_operand (rtx op, enum machine_mode mode)
786 {
787 return (register_operand (op, mode)
788 || (GET_CODE (op) == CONST_INT && INT_11_BITS (op)));
789 }
790
791 /* Return truth value of whether OP can be used as an operand in a
792 adddi3 insn. */
793 int
794 adddi3_operand (rtx op, enum machine_mode mode)
795 {
796 return (register_operand (op, mode)
797 || (GET_CODE (op) == CONST_INT
798 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
799 }
800
801 /* A constant integer suitable for use in a PRE_MODIFY memory
802 reference. */
803 int
804 pre_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
805 {
806 return (GET_CODE (op) == CONST_INT
807 && INTVAL (op) >= -0x2000 && INTVAL (op) < 0x10);
808 }
809
810 /* A constant integer suitable for use in a POST_MODIFY memory
811 reference. */
812 int
813 post_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
814 {
815 return (GET_CODE (op) == CONST_INT
816 && INTVAL (op) < 0x2000 && INTVAL (op) >= -0x10);
817 }
818
819 int
820 arith_double_operand (rtx op, enum machine_mode mode)
821 {
822 return (register_operand (op, mode)
823 || (GET_CODE (op) == CONST_DOUBLE
824 && GET_MODE (op) == mode
825 && VAL_14_BITS_P (CONST_DOUBLE_LOW (op))
826 && ((CONST_DOUBLE_HIGH (op) >= 0)
827 == ((CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
828 }
829
830 /* Return truth value of whether OP is an integer which fits the
831 range constraining immediate operands in three-address insns, or
832 is an integer register. */
833
834 int
835 ireg_or_int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
836 {
837 return ((GET_CODE (op) == CONST_INT && INT_5_BITS (op))
838 || (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32));
839 }
840
841 /* Return nonzero if OP is an integer register, else return zero. */
842 int
843 ireg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
844 {
845 return (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32);
846 }
847
848 /* Return truth value of whether OP is an integer which fits the
849 range constraining immediate operands in three-address insns. */
850
851 int
852 int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
853 {
854 return (GET_CODE (op) == CONST_INT && INT_5_BITS (op));
855 }
856
857 int
858 uint5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
859 {
860 return (GET_CODE (op) == CONST_INT && INT_U5_BITS (op));
861 }
862
863 int
864 int11_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
865 {
866 return (GET_CODE (op) == CONST_INT && INT_11_BITS (op));
867 }
868
869 int
870 uint32_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
871 {
872 #if HOST_BITS_PER_WIDE_INT > 32
873 /* All allowed constants will fit a CONST_INT. */
874 return (GET_CODE (op) == CONST_INT
875 && (INTVAL (op) >= 0 && INTVAL (op) < (HOST_WIDE_INT) 1 << 32));
876 #else
877 return (GET_CODE (op) == CONST_INT
878 || (GET_CODE (op) == CONST_DOUBLE
879 && CONST_DOUBLE_HIGH (op) == 0));
880 #endif
881 }
882
883 int
884 arith5_operand (rtx op, enum machine_mode mode)
885 {
886 return register_operand (op, mode) || int5_operand (op, mode);
887 }
888
889 /* True iff zdepi can be used to generate this CONST_INT.
890 zdepi first sign extends a 5 bit signed number to a given field
891 length, then places this field anywhere in a zero. */
892 int
893 zdepi_cint_p (unsigned HOST_WIDE_INT x)
894 {
895 unsigned HOST_WIDE_INT lsb_mask, t;
896
897 /* This might not be obvious, but it's at least fast.
898 This function is critical; we don't have the time loops would take. */
899 lsb_mask = x & -x;
900 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
901 /* Return true iff t is a power of two. */
902 return ((t & (t - 1)) == 0);
903 }
904
905 /* True iff depi or extru can be used to compute (reg & mask).
906 Accept bit pattern like these:
907 0....01....1
908 1....10....0
909 1..10..01..1 */
910 int
911 and_mask_p (unsigned HOST_WIDE_INT mask)
912 {
913 mask = ~mask;
914 mask += mask & -mask;
915 return (mask & (mask - 1)) == 0;
916 }
917
918 /* True iff depi or extru can be used to compute (reg & OP). */
919 int
920 and_operand (rtx op, enum machine_mode mode)
921 {
922 return (register_operand (op, mode)
923 || (GET_CODE (op) == CONST_INT && and_mask_p (INTVAL (op))));
924 }
925
926 /* True iff depi can be used to compute (reg | MASK). */
927 int
928 ior_mask_p (unsigned HOST_WIDE_INT mask)
929 {
930 mask += mask & -mask;
931 return (mask & (mask - 1)) == 0;
932 }
933
934 /* True iff depi can be used to compute (reg | OP). */
935 int
936 ior_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
937 {
938 return (GET_CODE (op) == CONST_INT && ior_mask_p (INTVAL (op)));
939 }
940
941 int
942 lhs_lshift_operand (rtx op, enum machine_mode mode)
943 {
944 return register_operand (op, mode) || lhs_lshift_cint_operand (op, mode);
945 }
946
947 /* True iff OP is a CONST_INT of the forms 0...0xxxx or 0...01...1xxxx.
948 Such values can be the left hand side x in (x << r), using the zvdepi
949 instruction. */
950 int
951 lhs_lshift_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
952 {
953 unsigned HOST_WIDE_INT x;
954 if (GET_CODE (op) != CONST_INT)
955 return 0;
956 x = INTVAL (op) >> 4;
957 return (x & (x + 1)) == 0;
958 }
959
960 int
961 arith32_operand (rtx op, enum machine_mode mode)
962 {
963 return register_operand (op, mode) || GET_CODE (op) == CONST_INT;
964 }
965
966 int
967 pc_or_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
968 {
969 return (GET_CODE (op) == PC || GET_CODE (op) == LABEL_REF);
970 }
971 \f
972 /* Legitimize PIC addresses. If the address is already
973 position-independent, we return ORIG. Newly generated
974 position-independent addresses go to REG. If we need more
975 than one register, we lose. */
976
977 rtx
978 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
979 {
980 rtx pic_ref = orig;
981
982 /* Labels need special handling. */
983 if (pic_label_operand (orig, mode))
984 {
985 /* We do not want to go through the movXX expanders here since that
986 would create recursion.
987
988 Nor do we really want to call a generator for a named pattern
989 since that requires multiple patterns if we want to support
990 multiple word sizes.
991
992 So instead we just emit the raw set, which avoids the movXX
993 expanders completely. */
994 mark_reg_pointer (reg, BITS_PER_UNIT);
995 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
996 current_function_uses_pic_offset_table = 1;
997 return reg;
998 }
999 if (GET_CODE (orig) == SYMBOL_REF)
1000 {
1001 rtx insn, tmp_reg;
1002
1003 if (reg == 0)
1004 abort ();
1005
1006 /* Before reload, allocate a temporary register for the intermediate
1007 result. This allows the sequence to be deleted when the final
1008 result is unused and the insns are trivially dead. */
1009 tmp_reg = ((reload_in_progress || reload_completed)
1010 ? reg : gen_reg_rtx (Pmode));
1011
1012 emit_move_insn (tmp_reg,
1013 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
1014 gen_rtx_HIGH (word_mode, orig)));
1015 pic_ref
1016 = gen_rtx_MEM (Pmode,
1017 gen_rtx_LO_SUM (Pmode, tmp_reg,
1018 gen_rtx_UNSPEC (Pmode,
1019 gen_rtvec (1, orig),
1020 UNSPEC_DLTIND14R)));
1021
1022 current_function_uses_pic_offset_table = 1;
1023 MEM_NOTRAP_P (pic_ref) = 1;
1024 RTX_UNCHANGING_P (pic_ref) = 1;
1025 mark_reg_pointer (reg, BITS_PER_UNIT);
1026 insn = emit_move_insn (reg, pic_ref);
1027
1028 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
1029 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
1030
1031 return reg;
1032 }
1033 else if (GET_CODE (orig) == CONST)
1034 {
1035 rtx base;
1036
1037 if (GET_CODE (XEXP (orig, 0)) == PLUS
1038 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
1039 return orig;
1040
1041 if (reg == 0)
1042 abort ();
1043
1044 if (GET_CODE (XEXP (orig, 0)) == PLUS)
1045 {
1046 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
1047 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
1048 base == reg ? 0 : reg);
1049 }
1050 else
1051 abort ();
1052
1053 if (GET_CODE (orig) == CONST_INT)
1054 {
1055 if (INT_14_BITS (orig))
1056 return plus_constant (base, INTVAL (orig));
1057 orig = force_reg (Pmode, orig);
1058 }
1059 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
1060 /* Likewise, should we set special REG_NOTEs here? */
1061 }
1062
1063 return pic_ref;
1064 }
1065
1066 /* Try machine-dependent ways of modifying an illegitimate address
1067 to be legitimate. If we find one, return the new, valid address.
1068 This macro is used in only one place: `memory_address' in explow.c.
1069
1070 OLDX is the address as it was before break_out_memory_refs was called.
1071 In some cases it is useful to look at this to decide what needs to be done.
1072
1073 MODE and WIN are passed so that this macro can use
1074 GO_IF_LEGITIMATE_ADDRESS.
1075
1076 It is always safe for this macro to do nothing. It exists to recognize
1077 opportunities to optimize the output.
1078
1079 For the PA, transform:
1080
1081 memory(X + <large int>)
1082
1083 into:
1084
1085 if (<large int> & mask) >= 16
1086 Y = (<large int> & ~mask) + mask + 1 Round up.
1087 else
1088 Y = (<large int> & ~mask) Round down.
1089 Z = X + Y
1090 memory (Z + (<large int> - Y));
1091
1092 This is for CSE to find several similar references, and only use one Z.
1093
1094 X can either be a SYMBOL_REF or REG, but because combine can not
1095 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1096 D will not fit in 14 bits.
1097
1098 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1099 0x1f as the mask.
1100
1101 MODE_INT references allow displacements which fit in 14 bits, so use
1102 0x3fff as the mask.
1103
1104 This relies on the fact that most mode MODE_FLOAT references will use FP
1105 registers and most mode MODE_INT references will use integer registers.
1106 (In the rare case of an FP register used in an integer MODE, we depend
1107 on secondary reloads to clean things up.)
1108
1109
1110 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1111 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1112 addressing modes to be used).
1113
1114 Put X and Z into registers. Then put the entire expression into
1115 a register. */
1116
1117 rtx
1118 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1119 enum machine_mode mode)
1120 {
1121 rtx orig = x;
1122
1123 /* We need to canonicalize the order of operands in unscaled indexed
1124 addresses since the code that checks if an address is valid doesn't
1125 always try both orders. */
1126 if (!TARGET_NO_SPACE_REGS
1127 && GET_CODE (x) == PLUS
1128 && GET_MODE (x) == Pmode
1129 && REG_P (XEXP (x, 0))
1130 && REG_P (XEXP (x, 1))
1131 && REG_POINTER (XEXP (x, 0))
1132 && !REG_POINTER (XEXP (x, 1)))
1133 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1134
1135 if (flag_pic)
1136 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1137
1138 /* Strip off CONST. */
1139 if (GET_CODE (x) == CONST)
1140 x = XEXP (x, 0);
1141
1142 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1143 That should always be safe. */
1144 if (GET_CODE (x) == PLUS
1145 && GET_CODE (XEXP (x, 0)) == REG
1146 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1147 {
1148 rtx reg = force_reg (Pmode, XEXP (x, 1));
1149 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1150 }
1151
1152 /* Note we must reject symbols which represent function addresses
1153 since the assembler/linker can't handle arithmetic on plabels. */
1154 if (GET_CODE (x) == PLUS
1155 && GET_CODE (XEXP (x, 1)) == CONST_INT
1156 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1157 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1158 || GET_CODE (XEXP (x, 0)) == REG))
1159 {
1160 rtx int_part, ptr_reg;
1161 int newoffset;
1162 int offset = INTVAL (XEXP (x, 1));
1163 int mask;
1164
1165 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1166 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
1167
1168 /* Choose which way to round the offset. Round up if we
1169 are >= halfway to the next boundary. */
1170 if ((offset & mask) >= ((mask + 1) / 2))
1171 newoffset = (offset & ~ mask) + mask + 1;
1172 else
1173 newoffset = (offset & ~ mask);
1174
1175 /* If the newoffset will not fit in 14 bits (ldo), then
1176 handling this would take 4 or 5 instructions (2 to load
1177 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1178 add the new offset and the SYMBOL_REF.) Combine can
1179 not handle 4->2 or 5->2 combinations, so do not create
1180 them. */
1181 if (! VAL_14_BITS_P (newoffset)
1182 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1183 {
1184 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
1185 rtx tmp_reg
1186 = force_reg (Pmode,
1187 gen_rtx_HIGH (Pmode, const_part));
1188 ptr_reg
1189 = force_reg (Pmode,
1190 gen_rtx_LO_SUM (Pmode,
1191 tmp_reg, const_part));
1192 }
1193 else
1194 {
1195 if (! VAL_14_BITS_P (newoffset))
1196 int_part = force_reg (Pmode, GEN_INT (newoffset));
1197 else
1198 int_part = GEN_INT (newoffset);
1199
1200 ptr_reg = force_reg (Pmode,
1201 gen_rtx_PLUS (Pmode,
1202 force_reg (Pmode, XEXP (x, 0)),
1203 int_part));
1204 }
1205 return plus_constant (ptr_reg, offset - newoffset);
1206 }
1207
1208 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1209
1210 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1212 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1213 && (OBJECT_P (XEXP (x, 1))
1214 || GET_CODE (XEXP (x, 1)) == SUBREG)
1215 && GET_CODE (XEXP (x, 1)) != CONST)
1216 {
1217 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1218 rtx reg1, reg2;
1219
1220 reg1 = XEXP (x, 1);
1221 if (GET_CODE (reg1) != REG)
1222 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1223
1224 reg2 = XEXP (XEXP (x, 0), 0);
1225 if (GET_CODE (reg2) != REG)
1226 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1227
1228 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1229 gen_rtx_MULT (Pmode,
1230 reg2,
1231 GEN_INT (val)),
1232 reg1));
1233 }
1234
1235 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1236
1237 Only do so for floating point modes since this is more speculative
1238 and we lose if it's an integer store. */
1239 if (GET_CODE (x) == PLUS
1240 && GET_CODE (XEXP (x, 0)) == PLUS
1241 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1242 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1243 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1244 && (mode == SFmode || mode == DFmode))
1245 {
1246
1247 /* First, try and figure out what to use as a base register. */
1248 rtx reg1, reg2, base, idx, orig_base;
1249
1250 reg1 = XEXP (XEXP (x, 0), 1);
1251 reg2 = XEXP (x, 1);
1252 base = NULL_RTX;
1253 idx = NULL_RTX;
1254
1255 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1256 then emit_move_sequence will turn on REG_POINTER so we'll know
1257 it's a base register below. */
1258 if (GET_CODE (reg1) != REG)
1259 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1260
1261 if (GET_CODE (reg2) != REG)
1262 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1263
1264 /* Figure out what the base and index are. */
1265
1266 if (GET_CODE (reg1) == REG
1267 && REG_POINTER (reg1))
1268 {
1269 base = reg1;
1270 orig_base = XEXP (XEXP (x, 0), 1);
1271 idx = gen_rtx_PLUS (Pmode,
1272 gen_rtx_MULT (Pmode,
1273 XEXP (XEXP (XEXP (x, 0), 0), 0),
1274 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1275 XEXP (x, 1));
1276 }
1277 else if (GET_CODE (reg2) == REG
1278 && REG_POINTER (reg2))
1279 {
1280 base = reg2;
1281 orig_base = XEXP (x, 1);
1282 idx = XEXP (x, 0);
1283 }
1284
1285 if (base == 0)
1286 return orig;
1287
1288 /* If the index adds a large constant, try to scale the
1289 constant so that it can be loaded with only one insn. */
1290 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1291 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1292 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1293 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1294 {
1295 /* Divide the CONST_INT by the scale factor, then add it to A. */
1296 int val = INTVAL (XEXP (idx, 1));
1297
1298 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1299 reg1 = XEXP (XEXP (idx, 0), 0);
1300 if (GET_CODE (reg1) != REG)
1301 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1302
1303 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1304
1305 /* We can now generate a simple scaled indexed address. */
1306 return
1307 force_reg
1308 (Pmode, gen_rtx_PLUS (Pmode,
1309 gen_rtx_MULT (Pmode, reg1,
1310 XEXP (XEXP (idx, 0), 1)),
1311 base));
1312 }
1313
1314 /* If B + C is still a valid base register, then add them. */
1315 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1316 && INTVAL (XEXP (idx, 1)) <= 4096
1317 && INTVAL (XEXP (idx, 1)) >= -4096)
1318 {
1319 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1320 rtx reg1, reg2;
1321
1322 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1323
1324 reg2 = XEXP (XEXP (idx, 0), 0);
1325 if (GET_CODE (reg2) != CONST_INT)
1326 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1327
1328 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1329 gen_rtx_MULT (Pmode,
1330 reg2,
1331 GEN_INT (val)),
1332 reg1));
1333 }
1334
1335 /* Get the index into a register, then add the base + index and
1336 return a register holding the result. */
1337
1338 /* First get A into a register. */
1339 reg1 = XEXP (XEXP (idx, 0), 0);
1340 if (GET_CODE (reg1) != REG)
1341 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1342
1343 /* And get B into a register. */
1344 reg2 = XEXP (idx, 1);
1345 if (GET_CODE (reg2) != REG)
1346 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1347
1348 reg1 = force_reg (Pmode,
1349 gen_rtx_PLUS (Pmode,
1350 gen_rtx_MULT (Pmode, reg1,
1351 XEXP (XEXP (idx, 0), 1)),
1352 reg2));
1353
1354 /* Add the result to our base register and return. */
1355 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1356
1357 }
1358
1359 /* Uh-oh. We might have an address for x[n-100000]. This needs
1360 special handling to avoid creating an indexed memory address
1361 with x-100000 as the base.
1362
1363 If the constant part is small enough, then it's still safe because
1364 there is a guard page at the beginning and end of the data segment.
1365
1366 Scaled references are common enough that we want to try and rearrange the
1367 terms so that we can use indexing for these addresses too. Only
1368 do the optimization for floatint point modes. */
1369
1370 if (GET_CODE (x) == PLUS
1371 && symbolic_expression_p (XEXP (x, 1)))
1372 {
1373 /* Ugly. We modify things here so that the address offset specified
1374 by the index expression is computed first, then added to x to form
1375 the entire address. */
1376
1377 rtx regx1, regx2, regy1, regy2, y;
1378
1379 /* Strip off any CONST. */
1380 y = XEXP (x, 1);
1381 if (GET_CODE (y) == CONST)
1382 y = XEXP (y, 0);
1383
1384 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1385 {
1386 /* See if this looks like
1387 (plus (mult (reg) (shadd_const))
1388 (const (plus (symbol_ref) (const_int))))
1389
1390 Where const_int is small. In that case the const
1391 expression is a valid pointer for indexing.
1392
1393 If const_int is big, but can be divided evenly by shadd_const
1394 and added to (reg). This allows more scaled indexed addresses. */
1395 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1396 && GET_CODE (XEXP (x, 0)) == MULT
1397 && GET_CODE (XEXP (y, 1)) == CONST_INT
1398 && INTVAL (XEXP (y, 1)) >= -4096
1399 && INTVAL (XEXP (y, 1)) <= 4095
1400 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1401 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1402 {
1403 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1404 rtx reg1, reg2;
1405
1406 reg1 = XEXP (x, 1);
1407 if (GET_CODE (reg1) != REG)
1408 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1409
1410 reg2 = XEXP (XEXP (x, 0), 0);
1411 if (GET_CODE (reg2) != REG)
1412 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1413
1414 return force_reg (Pmode,
1415 gen_rtx_PLUS (Pmode,
1416 gen_rtx_MULT (Pmode,
1417 reg2,
1418 GEN_INT (val)),
1419 reg1));
1420 }
1421 else if ((mode == DFmode || mode == SFmode)
1422 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1423 && GET_CODE (XEXP (x, 0)) == MULT
1424 && GET_CODE (XEXP (y, 1)) == CONST_INT
1425 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1426 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1427 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1428 {
1429 regx1
1430 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1431 / INTVAL (XEXP (XEXP (x, 0), 1))));
1432 regx2 = XEXP (XEXP (x, 0), 0);
1433 if (GET_CODE (regx2) != REG)
1434 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1435 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1436 regx2, regx1));
1437 return
1438 force_reg (Pmode,
1439 gen_rtx_PLUS (Pmode,
1440 gen_rtx_MULT (Pmode, regx2,
1441 XEXP (XEXP (x, 0), 1)),
1442 force_reg (Pmode, XEXP (y, 0))));
1443 }
1444 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1445 && INTVAL (XEXP (y, 1)) >= -4096
1446 && INTVAL (XEXP (y, 1)) <= 4095)
1447 {
1448 /* This is safe because of the guard page at the
1449 beginning and end of the data space. Just
1450 return the original address. */
1451 return orig;
1452 }
1453 else
1454 {
1455 /* Doesn't look like one we can optimize. */
1456 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1457 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1458 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1459 regx1 = force_reg (Pmode,
1460 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1461 regx1, regy2));
1462 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1463 }
1464 }
1465 }
1466
1467 return orig;
1468 }
1469
1470 /* For the HPPA, REG and REG+CONST is cost 0
1471 and addresses involving symbolic constants are cost 2.
1472
1473 PIC addresses are very expensive.
1474
1475 It is no coincidence that this has the same structure
1476 as GO_IF_LEGITIMATE_ADDRESS. */
1477
1478 static int
1479 hppa_address_cost (rtx X)
1480 {
1481 switch (GET_CODE (X))
1482 {
1483 case REG:
1484 case PLUS:
1485 case LO_SUM:
1486 return 1;
1487 case HIGH:
1488 return 2;
1489 default:
1490 return 4;
1491 }
1492 }
1493
1494 /* Compute a (partial) cost for rtx X. Return true if the complete
1495 cost has been computed, and false if subexpressions should be
1496 scanned. In either case, *TOTAL contains the cost result. */
1497
1498 static bool
1499 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1500 {
1501 switch (code)
1502 {
1503 case CONST_INT:
1504 if (INTVAL (x) == 0)
1505 *total = 0;
1506 else if (INT_14_BITS (x))
1507 *total = 1;
1508 else
1509 *total = 2;
1510 return true;
1511
1512 case HIGH:
1513 *total = 2;
1514 return true;
1515
1516 case CONST:
1517 case LABEL_REF:
1518 case SYMBOL_REF:
1519 *total = 4;
1520 return true;
1521
1522 case CONST_DOUBLE:
1523 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1524 && outer_code != SET)
1525 *total = 0;
1526 else
1527 *total = 8;
1528 return true;
1529
1530 case MULT:
1531 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1532 *total = COSTS_N_INSNS (3);
1533 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1534 *total = COSTS_N_INSNS (8);
1535 else
1536 *total = COSTS_N_INSNS (20);
1537 return true;
1538
1539 case DIV:
1540 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1541 {
1542 *total = COSTS_N_INSNS (14);
1543 return true;
1544 }
1545 /* FALLTHRU */
1546
1547 case UDIV:
1548 case MOD:
1549 case UMOD:
1550 *total = COSTS_N_INSNS (60);
1551 return true;
1552
1553 case PLUS: /* this includes shNadd insns */
1554 case MINUS:
1555 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1556 *total = COSTS_N_INSNS (3);
1557 else
1558 *total = COSTS_N_INSNS (1);
1559 return true;
1560
1561 case ASHIFT:
1562 case ASHIFTRT:
1563 case LSHIFTRT:
1564 *total = COSTS_N_INSNS (1);
1565 return true;
1566
1567 default:
1568 return false;
1569 }
1570 }
1571
1572 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1573 new rtx with the correct mode. */
1574 static inline rtx
1575 force_mode (enum machine_mode mode, rtx orig)
1576 {
1577 if (mode == GET_MODE (orig))
1578 return orig;
1579
1580 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
1581 abort ();
1582
1583 return gen_rtx_REG (mode, REGNO (orig));
1584 }
1585
1586 /* Emit insns to move operands[1] into operands[0].
1587
1588 Return 1 if we have written out everything that needs to be done to
1589 do the move. Otherwise, return 0 and the caller will emit the move
1590 normally.
1591
1592 Note SCRATCH_REG may not be in the proper mode depending on how it
1593 will be used. This routine is responsible for creating a new copy
1594 of SCRATCH_REG in the proper mode. */
1595
1596 int
1597 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1598 {
1599 register rtx operand0 = operands[0];
1600 register rtx operand1 = operands[1];
1601 register rtx tem;
1602
1603 /* We can only handle indexed addresses in the destination operand
1604 of floating point stores. Thus, we need to break out indexed
1605 addresses from the destination operand. */
1606 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1607 {
1608 /* This is only safe up to the beginning of life analysis. */
1609 if (no_new_pseudos)
1610 abort ();
1611
1612 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1613 operand0 = replace_equiv_address (operand0, tem);
1614 }
1615
1616 /* On targets with non-equivalent space registers, break out unscaled
1617 indexed addresses from the source operand before the final CSE.
1618 We have to do this because the REG_POINTER flag is not correctly
1619 carried through various optimization passes and CSE may substitute
1620 a pseudo without the pointer set for one with the pointer set. As
1621 a result, we loose various opportunities to create insns with
1622 unscaled indexed addresses. */
1623 if (!TARGET_NO_SPACE_REGS
1624 && !cse_not_expected
1625 && GET_CODE (operand1) == MEM
1626 && GET_CODE (XEXP (operand1, 0)) == PLUS
1627 && REG_P (XEXP (XEXP (operand1, 0), 0))
1628 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1629 operand1
1630 = replace_equiv_address (operand1,
1631 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1632
1633 if (scratch_reg
1634 && reload_in_progress && GET_CODE (operand0) == REG
1635 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1636 operand0 = reg_equiv_mem[REGNO (operand0)];
1637 else if (scratch_reg
1638 && reload_in_progress && GET_CODE (operand0) == SUBREG
1639 && GET_CODE (SUBREG_REG (operand0)) == REG
1640 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1641 {
1642 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1643 the code which tracks sets/uses for delete_output_reload. */
1644 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1645 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1646 SUBREG_BYTE (operand0));
1647 operand0 = alter_subreg (&temp);
1648 }
1649
1650 if (scratch_reg
1651 && reload_in_progress && GET_CODE (operand1) == REG
1652 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1653 operand1 = reg_equiv_mem[REGNO (operand1)];
1654 else if (scratch_reg
1655 && reload_in_progress && GET_CODE (operand1) == SUBREG
1656 && GET_CODE (SUBREG_REG (operand1)) == REG
1657 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1658 {
1659 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1660 the code which tracks sets/uses for delete_output_reload. */
1661 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1662 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1663 SUBREG_BYTE (operand1));
1664 operand1 = alter_subreg (&temp);
1665 }
1666
1667 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1668 && ((tem = find_replacement (&XEXP (operand0, 0)))
1669 != XEXP (operand0, 0)))
1670 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1671
1672 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1673 && ((tem = find_replacement (&XEXP (operand1, 0)))
1674 != XEXP (operand1, 0)))
1675 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1676
1677 /* Handle secondary reloads for loads/stores of FP registers from
1678 REG+D addresses where D does not fit in 5 or 14 bits, including
1679 (subreg (mem (addr))) cases. */
1680 if (scratch_reg
1681 && fp_reg_operand (operand0, mode)
1682 && ((GET_CODE (operand1) == MEM
1683 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1684 XEXP (operand1, 0)))
1685 || ((GET_CODE (operand1) == SUBREG
1686 && GET_CODE (XEXP (operand1, 0)) == MEM
1687 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1688 ? SFmode : DFmode),
1689 XEXP (XEXP (operand1, 0), 0))))))
1690 {
1691 if (GET_CODE (operand1) == SUBREG)
1692 operand1 = XEXP (operand1, 0);
1693
1694 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1695 it in WORD_MODE regardless of what mode it was originally given
1696 to us. */
1697 scratch_reg = force_mode (word_mode, scratch_reg);
1698
1699 /* D might not fit in 14 bits either; for such cases load D into
1700 scratch reg. */
1701 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1702 {
1703 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1704 emit_move_insn (scratch_reg,
1705 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1706 Pmode,
1707 XEXP (XEXP (operand1, 0), 0),
1708 scratch_reg));
1709 }
1710 else
1711 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1712 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1713 gen_rtx_MEM (mode, scratch_reg)));
1714 return 1;
1715 }
1716 else if (scratch_reg
1717 && fp_reg_operand (operand1, mode)
1718 && ((GET_CODE (operand0) == MEM
1719 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1720 ? SFmode : DFmode),
1721 XEXP (operand0, 0)))
1722 || ((GET_CODE (operand0) == SUBREG)
1723 && GET_CODE (XEXP (operand0, 0)) == MEM
1724 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1725 ? SFmode : DFmode),
1726 XEXP (XEXP (operand0, 0), 0)))))
1727 {
1728 if (GET_CODE (operand0) == SUBREG)
1729 operand0 = XEXP (operand0, 0);
1730
1731 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1732 it in WORD_MODE regardless of what mode it was originally given
1733 to us. */
1734 scratch_reg = force_mode (word_mode, scratch_reg);
1735
1736 /* D might not fit in 14 bits either; for such cases load D into
1737 scratch reg. */
1738 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1739 {
1740 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1741 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1742 0)),
1743 Pmode,
1744 XEXP (XEXP (operand0, 0),
1745 0),
1746 scratch_reg));
1747 }
1748 else
1749 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1750 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1751 operand1));
1752 return 1;
1753 }
1754 /* Handle secondary reloads for loads of FP registers from constant
1755 expressions by forcing the constant into memory.
1756
1757 Use scratch_reg to hold the address of the memory location.
1758
1759 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1760 NO_REGS when presented with a const_int and a register class
1761 containing only FP registers. Doing so unfortunately creates
1762 more problems than it solves. Fix this for 2.5. */
1763 else if (scratch_reg
1764 && CONSTANT_P (operand1)
1765 && fp_reg_operand (operand0, mode))
1766 {
1767 rtx xoperands[2];
1768
1769 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1770 it in WORD_MODE regardless of what mode it was originally given
1771 to us. */
1772 scratch_reg = force_mode (word_mode, scratch_reg);
1773
1774 /* Force the constant into memory and put the address of the
1775 memory location into scratch_reg. */
1776 xoperands[0] = scratch_reg;
1777 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1778 emit_move_sequence (xoperands, Pmode, 0);
1779
1780 /* Now load the destination register. */
1781 emit_insn (gen_rtx_SET (mode, operand0,
1782 gen_rtx_MEM (mode, scratch_reg)));
1783 return 1;
1784 }
1785 /* Handle secondary reloads for SAR. These occur when trying to load
1786 the SAR from memory, FP register, or with a constant. */
1787 else if (scratch_reg
1788 && GET_CODE (operand0) == REG
1789 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1790 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1791 && (GET_CODE (operand1) == MEM
1792 || GET_CODE (operand1) == CONST_INT
1793 || (GET_CODE (operand1) == REG
1794 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1795 {
1796 /* D might not fit in 14 bits either; for such cases load D into
1797 scratch reg. */
1798 if (GET_CODE (operand1) == MEM
1799 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1800 {
1801 /* We are reloading the address into the scratch register, so we
1802 want to make sure the scratch register is a full register. */
1803 scratch_reg = force_mode (word_mode, scratch_reg);
1804
1805 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1806 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1807 0)),
1808 Pmode,
1809 XEXP (XEXP (operand1, 0),
1810 0),
1811 scratch_reg));
1812
1813 /* Now we are going to load the scratch register from memory,
1814 we want to load it in the same width as the original MEM,
1815 which must be the same as the width of the ultimate destination,
1816 OPERAND0. */
1817 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1818
1819 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1820 scratch_reg));
1821 }
1822 else
1823 {
1824 /* We want to load the scratch register using the same mode as
1825 the ultimate destination. */
1826 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1827
1828 emit_move_insn (scratch_reg, operand1);
1829 }
1830
1831 /* And emit the insn to set the ultimate destination. We know that
1832 the scratch register has the same mode as the destination at this
1833 point. */
1834 emit_move_insn (operand0, scratch_reg);
1835 return 1;
1836 }
1837 /* Handle the most common case: storing into a register. */
1838 else if (register_operand (operand0, mode))
1839 {
1840 if (register_operand (operand1, mode)
1841 || (GET_CODE (operand1) == CONST_INT
1842 && cint_ok_for_move (INTVAL (operand1)))
1843 || (operand1 == CONST0_RTX (mode))
1844 || (GET_CODE (operand1) == HIGH
1845 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1846 /* Only `general_operands' can come here, so MEM is ok. */
1847 || GET_CODE (operand1) == MEM)
1848 {
1849 /* Various sets are created during RTL generation which don't
1850 have the REG_POINTER flag correctly set. After the CSE pass,
1851 instruction recognition can fail if we don't consistently
1852 set this flag when performing register copies. This should
1853 also improve the opportunities for creating insns that use
1854 unscaled indexing. */
1855 if (REG_P (operand0) && REG_P (operand1))
1856 {
1857 if (REG_POINTER (operand1)
1858 && !REG_POINTER (operand0)
1859 && !HARD_REGISTER_P (operand0))
1860 copy_reg_pointer (operand0, operand1);
1861 else if (REG_POINTER (operand0)
1862 && !REG_POINTER (operand1)
1863 && !HARD_REGISTER_P (operand1))
1864 copy_reg_pointer (operand1, operand0);
1865 }
1866
1867 /* When MEMs are broken out, the REG_POINTER flag doesn't
1868 get set. In some cases, we can set the REG_POINTER flag
1869 from the declaration for the MEM. */
1870 if (REG_P (operand0)
1871 && GET_CODE (operand1) == MEM
1872 && !REG_POINTER (operand0))
1873 {
1874 tree decl = MEM_EXPR (operand1);
1875
1876 /* Set the register pointer flag and register alignment
1877 if the declaration for this memory reference is a
1878 pointer type. Fortran indirect argument references
1879 are ignored. */
1880 if (decl
1881 && !(flag_argument_noalias > 1
1882 && TREE_CODE (decl) == INDIRECT_REF
1883 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1884 {
1885 tree type;
1886
1887 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1888 tree operand 1. */
1889 if (TREE_CODE (decl) == COMPONENT_REF)
1890 decl = TREE_OPERAND (decl, 1);
1891
1892 type = TREE_TYPE (decl);
1893 if (TREE_CODE (type) == ARRAY_TYPE)
1894 type = get_inner_array_type (type);
1895
1896 if (POINTER_TYPE_P (type))
1897 {
1898 int align;
1899
1900 type = TREE_TYPE (type);
1901 /* Using TYPE_ALIGN_OK is rather conservative as
1902 only the ada frontend actually sets it. */
1903 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1904 : BITS_PER_UNIT);
1905 mark_reg_pointer (operand0, align);
1906 }
1907 }
1908 }
1909
1910 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1911 return 1;
1912 }
1913 }
1914 else if (GET_CODE (operand0) == MEM)
1915 {
1916 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1917 && !(reload_in_progress || reload_completed))
1918 {
1919 rtx temp = gen_reg_rtx (DFmode);
1920
1921 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1922 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1923 return 1;
1924 }
1925 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1926 {
1927 /* Run this case quickly. */
1928 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1929 return 1;
1930 }
1931 if (! (reload_in_progress || reload_completed))
1932 {
1933 operands[0] = validize_mem (operand0);
1934 operands[1] = operand1 = force_reg (mode, operand1);
1935 }
1936 }
1937
1938 /* Simplify the source if we need to.
1939 Note we do have to handle function labels here, even though we do
1940 not consider them legitimate constants. Loop optimizations can
1941 call the emit_move_xxx with one as a source. */
1942 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1943 || function_label_operand (operand1, mode)
1944 || (GET_CODE (operand1) == HIGH
1945 && symbolic_operand (XEXP (operand1, 0), mode)))
1946 {
1947 int ishighonly = 0;
1948
1949 if (GET_CODE (operand1) == HIGH)
1950 {
1951 ishighonly = 1;
1952 operand1 = XEXP (operand1, 0);
1953 }
1954 if (symbolic_operand (operand1, mode))
1955 {
1956 /* Argh. The assembler and linker can't handle arithmetic
1957 involving plabels.
1958
1959 So we force the plabel into memory, load operand0 from
1960 the memory location, then add in the constant part. */
1961 if ((GET_CODE (operand1) == CONST
1962 && GET_CODE (XEXP (operand1, 0)) == PLUS
1963 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1964 || function_label_operand (operand1, mode))
1965 {
1966 rtx temp, const_part;
1967
1968 /* Figure out what (if any) scratch register to use. */
1969 if (reload_in_progress || reload_completed)
1970 {
1971 scratch_reg = scratch_reg ? scratch_reg : operand0;
1972 /* SCRATCH_REG will hold an address and maybe the actual
1973 data. We want it in WORD_MODE regardless of what mode it
1974 was originally given to us. */
1975 scratch_reg = force_mode (word_mode, scratch_reg);
1976 }
1977 else if (flag_pic)
1978 scratch_reg = gen_reg_rtx (Pmode);
1979
1980 if (GET_CODE (operand1) == CONST)
1981 {
1982 /* Save away the constant part of the expression. */
1983 const_part = XEXP (XEXP (operand1, 0), 1);
1984 if (GET_CODE (const_part) != CONST_INT)
1985 abort ();
1986
1987 /* Force the function label into memory. */
1988 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1989 }
1990 else
1991 {
1992 /* No constant part. */
1993 const_part = NULL_RTX;
1994
1995 /* Force the function label into memory. */
1996 temp = force_const_mem (mode, operand1);
1997 }
1998
1999
2000 /* Get the address of the memory location. PIC-ify it if
2001 necessary. */
2002 temp = XEXP (temp, 0);
2003 if (flag_pic)
2004 temp = legitimize_pic_address (temp, mode, scratch_reg);
2005
2006 /* Put the address of the memory location into our destination
2007 register. */
2008 operands[1] = temp;
2009 emit_move_sequence (operands, mode, scratch_reg);
2010
2011 /* Now load from the memory location into our destination
2012 register. */
2013 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2014 emit_move_sequence (operands, mode, scratch_reg);
2015
2016 /* And add back in the constant part. */
2017 if (const_part != NULL_RTX)
2018 expand_inc (operand0, const_part);
2019
2020 return 1;
2021 }
2022
2023 if (flag_pic)
2024 {
2025 rtx temp;
2026
2027 if (reload_in_progress || reload_completed)
2028 {
2029 temp = scratch_reg ? scratch_reg : operand0;
2030 /* TEMP will hold an address and maybe the actual
2031 data. We want it in WORD_MODE regardless of what mode it
2032 was originally given to us. */
2033 temp = force_mode (word_mode, temp);
2034 }
2035 else
2036 temp = gen_reg_rtx (Pmode);
2037
2038 /* (const (plus (symbol) (const_int))) must be forced to
2039 memory during/after reload if the const_int will not fit
2040 in 14 bits. */
2041 if (GET_CODE (operand1) == CONST
2042 && GET_CODE (XEXP (operand1, 0)) == PLUS
2043 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2044 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2045 && (reload_completed || reload_in_progress)
2046 && flag_pic)
2047 {
2048 operands[1] = force_const_mem (mode, operand1);
2049 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
2050 mode, temp);
2051 operands[1] = gen_rtx_MEM (mode, operands[1]);
2052 emit_move_sequence (operands, mode, temp);
2053 }
2054 else
2055 {
2056 operands[1] = legitimize_pic_address (operand1, mode, temp);
2057 if (REG_P (operand0) && REG_P (operands[1]))
2058 copy_reg_pointer (operand0, operands[1]);
2059 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2060 }
2061 }
2062 /* On the HPPA, references to data space are supposed to use dp,
2063 register 27, but showing it in the RTL inhibits various cse
2064 and loop optimizations. */
2065 else
2066 {
2067 rtx temp, set;
2068
2069 if (reload_in_progress || reload_completed)
2070 {
2071 temp = scratch_reg ? scratch_reg : operand0;
2072 /* TEMP will hold an address and maybe the actual
2073 data. We want it in WORD_MODE regardless of what mode it
2074 was originally given to us. */
2075 temp = force_mode (word_mode, temp);
2076 }
2077 else
2078 temp = gen_reg_rtx (mode);
2079
2080 /* Loading a SYMBOL_REF into a register makes that register
2081 safe to be used as the base in an indexed address.
2082
2083 Don't mark hard registers though. That loses. */
2084 if (GET_CODE (operand0) == REG
2085 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2086 mark_reg_pointer (operand0, BITS_PER_UNIT);
2087 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2088 mark_reg_pointer (temp, BITS_PER_UNIT);
2089
2090 if (ishighonly)
2091 set = gen_rtx_SET (mode, operand0, temp);
2092 else
2093 set = gen_rtx_SET (VOIDmode,
2094 operand0,
2095 gen_rtx_LO_SUM (mode, temp, operand1));
2096
2097 emit_insn (gen_rtx_SET (VOIDmode,
2098 temp,
2099 gen_rtx_HIGH (mode, operand1)));
2100 emit_insn (set);
2101
2102 }
2103 return 1;
2104 }
2105 else if (GET_CODE (operand1) != CONST_INT
2106 || !cint_ok_for_move (INTVAL (operand1)))
2107 {
2108 rtx insn, temp;
2109 rtx op1 = operand1;
2110 HOST_WIDE_INT value = 0;
2111 HOST_WIDE_INT insv = 0;
2112 int insert = 0;
2113
2114 if (GET_CODE (operand1) == CONST_INT)
2115 value = INTVAL (operand1);
2116
2117 if (TARGET_64BIT
2118 && GET_CODE (operand1) == CONST_INT
2119 && HOST_BITS_PER_WIDE_INT > 32
2120 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2121 {
2122 HOST_WIDE_INT nval;
2123
2124 /* Extract the low order 32 bits of the value and sign extend.
2125 If the new value is the same as the original value, we can
2126 can use the original value as-is. If the new value is
2127 different, we use it and insert the most-significant 32-bits
2128 of the original value into the final result. */
2129 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2130 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2131 if (value != nval)
2132 {
2133 #if HOST_BITS_PER_WIDE_INT > 32
2134 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2135 #endif
2136 insert = 1;
2137 value = nval;
2138 operand1 = GEN_INT (nval);
2139 }
2140 }
2141
2142 if (reload_in_progress || reload_completed)
2143 temp = scratch_reg ? scratch_reg : operand0;
2144 else
2145 temp = gen_reg_rtx (mode);
2146
2147 /* We don't directly split DImode constants on 32-bit targets
2148 because PLUS uses an 11-bit immediate and the insn sequence
2149 generated is not as efficient as the one using HIGH/LO_SUM. */
2150 if (GET_CODE (operand1) == CONST_INT
2151 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2152 && !insert)
2153 {
2154 /* Directly break constant into high and low parts. This
2155 provides better optimization opportunities because various
2156 passes recognize constants split with PLUS but not LO_SUM.
2157 We use a 14-bit signed low part except when the addition
2158 of 0x4000 to the high part might change the sign of the
2159 high part. */
2160 HOST_WIDE_INT low = value & 0x3fff;
2161 HOST_WIDE_INT high = value & ~ 0x3fff;
2162
2163 if (low >= 0x2000)
2164 {
2165 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2166 high += 0x2000;
2167 else
2168 high += 0x4000;
2169 }
2170
2171 low = value - high;
2172
2173 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2174 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2175 }
2176 else
2177 {
2178 emit_insn (gen_rtx_SET (VOIDmode, temp,
2179 gen_rtx_HIGH (mode, operand1)));
2180 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2181 }
2182
2183 insn = emit_move_insn (operands[0], operands[1]);
2184
2185 /* Now insert the most significant 32 bits of the value
2186 into the register. When we don't have a second register
2187 available, it could take up to nine instructions to load
2188 a 64-bit integer constant. Prior to reload, we force
2189 constants that would take more than three instructions
2190 to load to the constant pool. During and after reload,
2191 we have to handle all possible values. */
2192 if (insert)
2193 {
2194 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2195 register and the value to be inserted is outside the
2196 range that can be loaded with three depdi instructions. */
2197 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2198 {
2199 operand1 = GEN_INT (insv);
2200
2201 emit_insn (gen_rtx_SET (VOIDmode, temp,
2202 gen_rtx_HIGH (mode, operand1)));
2203 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2204 emit_insn (gen_insv (operand0, GEN_INT (32),
2205 const0_rtx, temp));
2206 }
2207 else
2208 {
2209 int len = 5, pos = 27;
2210
2211 /* Insert the bits using the depdi instruction. */
2212 while (pos >= 0)
2213 {
2214 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2215 HOST_WIDE_INT sign = v5 < 0;
2216
2217 /* Left extend the insertion. */
2218 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2219 while (pos > 0 && (insv & 1) == sign)
2220 {
2221 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2222 len += 1;
2223 pos -= 1;
2224 }
2225
2226 emit_insn (gen_insv (operand0, GEN_INT (len),
2227 GEN_INT (pos), GEN_INT (v5)));
2228
2229 len = pos > 0 && pos < 5 ? pos : 5;
2230 pos -= len;
2231 }
2232 }
2233 }
2234
2235 REG_NOTES (insn)
2236 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
2237
2238 return 1;
2239 }
2240 }
2241 /* Now have insn-emit do whatever it normally does. */
2242 return 0;
2243 }
2244
2245 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2246 it will need a link/runtime reloc). */
2247
2248 int
2249 reloc_needed (tree exp)
2250 {
2251 int reloc = 0;
2252
2253 switch (TREE_CODE (exp))
2254 {
2255 case ADDR_EXPR:
2256 return 1;
2257
2258 case PLUS_EXPR:
2259 case MINUS_EXPR:
2260 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2261 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2262 break;
2263
2264 case NOP_EXPR:
2265 case CONVERT_EXPR:
2266 case NON_LVALUE_EXPR:
2267 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2268 break;
2269
2270 case CONSTRUCTOR:
2271 {
2272 register tree link;
2273 for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
2274 if (TREE_VALUE (link) != 0)
2275 reloc |= reloc_needed (TREE_VALUE (link));
2276 }
2277 break;
2278
2279 case ERROR_MARK:
2280 break;
2281
2282 default:
2283 break;
2284 }
2285 return reloc;
2286 }
2287
2288 /* Does operand (which is a symbolic_operand) live in text space?
2289 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2290 will be true. */
2291
2292 int
2293 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2294 {
2295 if (GET_CODE (operand) == CONST)
2296 operand = XEXP (XEXP (operand, 0), 0);
2297 if (flag_pic)
2298 {
2299 if (GET_CODE (operand) == SYMBOL_REF)
2300 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2301 }
2302 else
2303 {
2304 if (GET_CODE (operand) == SYMBOL_REF)
2305 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2306 }
2307 return 1;
2308 }
2309
2310 \f
2311 /* Return the best assembler insn template
2312 for moving operands[1] into operands[0] as a fullword. */
2313 const char *
2314 singlemove_string (rtx *operands)
2315 {
2316 HOST_WIDE_INT intval;
2317
2318 if (GET_CODE (operands[0]) == MEM)
2319 return "stw %r1,%0";
2320 if (GET_CODE (operands[1]) == MEM)
2321 return "ldw %1,%0";
2322 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2323 {
2324 long i;
2325 REAL_VALUE_TYPE d;
2326
2327 if (GET_MODE (operands[1]) != SFmode)
2328 abort ();
2329
2330 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2331 bit pattern. */
2332 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2333 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2334
2335 operands[1] = GEN_INT (i);
2336 /* Fall through to CONST_INT case. */
2337 }
2338 if (GET_CODE (operands[1]) == CONST_INT)
2339 {
2340 intval = INTVAL (operands[1]);
2341
2342 if (VAL_14_BITS_P (intval))
2343 return "ldi %1,%0";
2344 else if ((intval & 0x7ff) == 0)
2345 return "ldil L'%1,%0";
2346 else if (zdepi_cint_p (intval))
2347 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2348 else
2349 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2350 }
2351 return "copy %1,%0";
2352 }
2353 \f
2354
2355 /* Compute position (in OP[1]) and width (in OP[2])
2356 useful for copying IMM to a register using the zdepi
2357 instructions. Store the immediate value to insert in OP[0]. */
2358 static void
2359 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2360 {
2361 int lsb, len;
2362
2363 /* Find the least significant set bit in IMM. */
2364 for (lsb = 0; lsb < 32; lsb++)
2365 {
2366 if ((imm & 1) != 0)
2367 break;
2368 imm >>= 1;
2369 }
2370
2371 /* Choose variants based on *sign* of the 5-bit field. */
2372 if ((imm & 0x10) == 0)
2373 len = (lsb <= 28) ? 4 : 32 - lsb;
2374 else
2375 {
2376 /* Find the width of the bitstring in IMM. */
2377 for (len = 5; len < 32; len++)
2378 {
2379 if ((imm & (1 << len)) == 0)
2380 break;
2381 }
2382
2383 /* Sign extend IMM as a 5-bit value. */
2384 imm = (imm & 0xf) - 0x10;
2385 }
2386
2387 op[0] = imm;
2388 op[1] = 31 - lsb;
2389 op[2] = len;
2390 }
2391
2392 /* Compute position (in OP[1]) and width (in OP[2])
2393 useful for copying IMM to a register using the depdi,z
2394 instructions. Store the immediate value to insert in OP[0]. */
2395 void
2396 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2397 {
2398 HOST_WIDE_INT lsb, len;
2399
2400 /* Find the least significant set bit in IMM. */
2401 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2402 {
2403 if ((imm & 1) != 0)
2404 break;
2405 imm >>= 1;
2406 }
2407
2408 /* Choose variants based on *sign* of the 5-bit field. */
2409 if ((imm & 0x10) == 0)
2410 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2411 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2412 else
2413 {
2414 /* Find the width of the bitstring in IMM. */
2415 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2416 {
2417 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2418 break;
2419 }
2420
2421 /* Sign extend IMM as a 5-bit value. */
2422 imm = (imm & 0xf) - 0x10;
2423 }
2424
2425 op[0] = imm;
2426 op[1] = 63 - lsb;
2427 op[2] = len;
2428 }
2429
2430 /* Output assembler code to perform a doubleword move insn
2431 with operands OPERANDS. */
2432
2433 const char *
2434 output_move_double (rtx *operands)
2435 {
2436 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2437 rtx latehalf[2];
2438 rtx addreg0 = 0, addreg1 = 0;
2439
2440 /* First classify both operands. */
2441
2442 if (REG_P (operands[0]))
2443 optype0 = REGOP;
2444 else if (offsettable_memref_p (operands[0]))
2445 optype0 = OFFSOP;
2446 else if (GET_CODE (operands[0]) == MEM)
2447 optype0 = MEMOP;
2448 else
2449 optype0 = RNDOP;
2450
2451 if (REG_P (operands[1]))
2452 optype1 = REGOP;
2453 else if (CONSTANT_P (operands[1]))
2454 optype1 = CNSTOP;
2455 else if (offsettable_memref_p (operands[1]))
2456 optype1 = OFFSOP;
2457 else if (GET_CODE (operands[1]) == MEM)
2458 optype1 = MEMOP;
2459 else
2460 optype1 = RNDOP;
2461
2462 /* Check for the cases that the operand constraints are not
2463 supposed to allow to happen. Abort if we get one,
2464 because generating code for these cases is painful. */
2465
2466 if (optype0 != REGOP && optype1 != REGOP)
2467 abort ();
2468
2469 /* Handle auto decrementing and incrementing loads and stores
2470 specifically, since the structure of the function doesn't work
2471 for them without major modification. Do it better when we learn
2472 this port about the general inc/dec addressing of PA.
2473 (This was written by tege. Chide him if it doesn't work.) */
2474
2475 if (optype0 == MEMOP)
2476 {
2477 /* We have to output the address syntax ourselves, since print_operand
2478 doesn't deal with the addresses we want to use. Fix this later. */
2479
2480 rtx addr = XEXP (operands[0], 0);
2481 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2482 {
2483 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2484
2485 operands[0] = XEXP (addr, 0);
2486 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2487 abort ();
2488
2489 if (!reg_overlap_mentioned_p (high_reg, addr))
2490 {
2491 /* No overlap between high target register and address
2492 register. (We do this in a non-obvious way to
2493 save a register file writeback) */
2494 if (GET_CODE (addr) == POST_INC)
2495 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2496 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2497 }
2498 else
2499 abort ();
2500 }
2501 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2502 {
2503 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2504
2505 operands[0] = XEXP (addr, 0);
2506 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2507 abort ();
2508
2509 if (!reg_overlap_mentioned_p (high_reg, addr))
2510 {
2511 /* No overlap between high target register and address
2512 register. (We do this in a non-obvious way to
2513 save a register file writeback) */
2514 if (GET_CODE (addr) == PRE_INC)
2515 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2516 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2517 }
2518 else
2519 abort ();
2520 }
2521 }
2522 if (optype1 == MEMOP)
2523 {
2524 /* We have to output the address syntax ourselves, since print_operand
2525 doesn't deal with the addresses we want to use. Fix this later. */
2526
2527 rtx addr = XEXP (operands[1], 0);
2528 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2529 {
2530 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2531
2532 operands[1] = XEXP (addr, 0);
2533 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2534 abort ();
2535
2536 if (!reg_overlap_mentioned_p (high_reg, addr))
2537 {
2538 /* No overlap between high target register and address
2539 register. (We do this in a non-obvious way to
2540 save a register file writeback) */
2541 if (GET_CODE (addr) == POST_INC)
2542 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2543 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2544 }
2545 else
2546 {
2547 /* This is an undefined situation. We should load into the
2548 address register *and* update that register. Probably
2549 we don't need to handle this at all. */
2550 if (GET_CODE (addr) == POST_INC)
2551 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2552 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2553 }
2554 }
2555 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2556 {
2557 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2558
2559 operands[1] = XEXP (addr, 0);
2560 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2561 abort ();
2562
2563 if (!reg_overlap_mentioned_p (high_reg, addr))
2564 {
2565 /* No overlap between high target register and address
2566 register. (We do this in a non-obvious way to
2567 save a register file writeback) */
2568 if (GET_CODE (addr) == PRE_INC)
2569 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2570 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2571 }
2572 else
2573 {
2574 /* This is an undefined situation. We should load into the
2575 address register *and* update that register. Probably
2576 we don't need to handle this at all. */
2577 if (GET_CODE (addr) == PRE_INC)
2578 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2579 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2580 }
2581 }
2582 else if (GET_CODE (addr) == PLUS
2583 && GET_CODE (XEXP (addr, 0)) == MULT)
2584 {
2585 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2586
2587 if (!reg_overlap_mentioned_p (high_reg, addr))
2588 {
2589 rtx xoperands[3];
2590
2591 xoperands[0] = high_reg;
2592 xoperands[1] = XEXP (addr, 1);
2593 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2594 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2595 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2596 xoperands);
2597 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2598 }
2599 else
2600 {
2601 rtx xoperands[3];
2602
2603 xoperands[0] = high_reg;
2604 xoperands[1] = XEXP (addr, 1);
2605 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2606 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2607 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2608 xoperands);
2609 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2610 }
2611 }
2612 }
2613
2614 /* If an operand is an unoffsettable memory ref, find a register
2615 we can increment temporarily to make it refer to the second word. */
2616
2617 if (optype0 == MEMOP)
2618 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2619
2620 if (optype1 == MEMOP)
2621 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2622
2623 /* Ok, we can do one word at a time.
2624 Normally we do the low-numbered word first.
2625
2626 In either case, set up in LATEHALF the operands to use
2627 for the high-numbered word and in some cases alter the
2628 operands in OPERANDS to be suitable for the low-numbered word. */
2629
2630 if (optype0 == REGOP)
2631 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2632 else if (optype0 == OFFSOP)
2633 latehalf[0] = adjust_address (operands[0], SImode, 4);
2634 else
2635 latehalf[0] = operands[0];
2636
2637 if (optype1 == REGOP)
2638 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2639 else if (optype1 == OFFSOP)
2640 latehalf[1] = adjust_address (operands[1], SImode, 4);
2641 else if (optype1 == CNSTOP)
2642 split_double (operands[1], &operands[1], &latehalf[1]);
2643 else
2644 latehalf[1] = operands[1];
2645
2646 /* If the first move would clobber the source of the second one,
2647 do them in the other order.
2648
2649 This can happen in two cases:
2650
2651 mem -> register where the first half of the destination register
2652 is the same register used in the memory's address. Reload
2653 can create such insns.
2654
2655 mem in this case will be either register indirect or register
2656 indirect plus a valid offset.
2657
2658 register -> register move where REGNO(dst) == REGNO(src + 1)
2659 someone (Tim/Tege?) claimed this can happen for parameter loads.
2660
2661 Handle mem -> register case first. */
2662 if (optype0 == REGOP
2663 && (optype1 == MEMOP || optype1 == OFFSOP)
2664 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2665 operands[1], 0))
2666 {
2667 /* Do the late half first. */
2668 if (addreg1)
2669 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2670 output_asm_insn (singlemove_string (latehalf), latehalf);
2671
2672 /* Then clobber. */
2673 if (addreg1)
2674 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2675 return singlemove_string (operands);
2676 }
2677
2678 /* Now handle register -> register case. */
2679 if (optype0 == REGOP && optype1 == REGOP
2680 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2681 {
2682 output_asm_insn (singlemove_string (latehalf), latehalf);
2683 return singlemove_string (operands);
2684 }
2685
2686 /* Normal case: do the two words, low-numbered first. */
2687
2688 output_asm_insn (singlemove_string (operands), operands);
2689
2690 /* Make any unoffsettable addresses point at high-numbered word. */
2691 if (addreg0)
2692 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2693 if (addreg1)
2694 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2695
2696 /* Do that word. */
2697 output_asm_insn (singlemove_string (latehalf), latehalf);
2698
2699 /* Undo the adds we just did. */
2700 if (addreg0)
2701 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2702 if (addreg1)
2703 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2704
2705 return "";
2706 }
2707 \f
2708 const char *
2709 output_fp_move_double (rtx *operands)
2710 {
2711 if (FP_REG_P (operands[0]))
2712 {
2713 if (FP_REG_P (operands[1])
2714 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2715 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2716 else
2717 output_asm_insn ("fldd%F1 %1,%0", operands);
2718 }
2719 else if (FP_REG_P (operands[1]))
2720 {
2721 output_asm_insn ("fstd%F0 %1,%0", operands);
2722 }
2723 else if (operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2724 {
2725 if (GET_CODE (operands[0]) == REG)
2726 {
2727 rtx xoperands[2];
2728 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2729 xoperands[0] = operands[0];
2730 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2731 }
2732 /* This is a pain. You have to be prepared to deal with an
2733 arbitrary address here including pre/post increment/decrement.
2734
2735 so avoid this in the MD. */
2736 else
2737 abort ();
2738 }
2739 else abort ();
2740 return "";
2741 }
2742 \f
2743 /* Return a REG that occurs in ADDR with coefficient 1.
2744 ADDR can be effectively incremented by incrementing REG. */
2745
2746 static rtx
2747 find_addr_reg (rtx addr)
2748 {
2749 while (GET_CODE (addr) == PLUS)
2750 {
2751 if (GET_CODE (XEXP (addr, 0)) == REG)
2752 addr = XEXP (addr, 0);
2753 else if (GET_CODE (XEXP (addr, 1)) == REG)
2754 addr = XEXP (addr, 1);
2755 else if (CONSTANT_P (XEXP (addr, 0)))
2756 addr = XEXP (addr, 1);
2757 else if (CONSTANT_P (XEXP (addr, 1)))
2758 addr = XEXP (addr, 0);
2759 else
2760 abort ();
2761 }
2762 if (GET_CODE (addr) == REG)
2763 return addr;
2764 abort ();
2765 }
2766
2767 /* Emit code to perform a block move.
2768
2769 OPERANDS[0] is the destination pointer as a REG, clobbered.
2770 OPERANDS[1] is the source pointer as a REG, clobbered.
2771 OPERANDS[2] is a register for temporary storage.
2772 OPERANDS[3] is a register for temporary storage.
2773 OPERANDS[4] is the size as a CONST_INT
2774 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2775 OPERANDS[6] is another temporary register. */
2776
2777 const char *
2778 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2779 {
2780 int align = INTVAL (operands[5]);
2781 unsigned long n_bytes = INTVAL (operands[4]);
2782
2783 /* We can't move more than a word at a time because the PA
2784 has no longer integer move insns. (Could use fp mem ops?) */
2785 if (align > (TARGET_64BIT ? 8 : 4))
2786 align = (TARGET_64BIT ? 8 : 4);
2787
2788 /* Note that we know each loop below will execute at least twice
2789 (else we would have open-coded the copy). */
2790 switch (align)
2791 {
2792 case 8:
2793 /* Pre-adjust the loop counter. */
2794 operands[4] = GEN_INT (n_bytes - 16);
2795 output_asm_insn ("ldi %4,%2", operands);
2796
2797 /* Copying loop. */
2798 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2799 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2800 output_asm_insn ("std,ma %3,8(%0)", operands);
2801 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2802 output_asm_insn ("std,ma %6,8(%0)", operands);
2803
2804 /* Handle the residual. There could be up to 7 bytes of
2805 residual to copy! */
2806 if (n_bytes % 16 != 0)
2807 {
2808 operands[4] = GEN_INT (n_bytes % 8);
2809 if (n_bytes % 16 >= 8)
2810 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2811 if (n_bytes % 8 != 0)
2812 output_asm_insn ("ldd 0(%1),%6", operands);
2813 if (n_bytes % 16 >= 8)
2814 output_asm_insn ("std,ma %3,8(%0)", operands);
2815 if (n_bytes % 8 != 0)
2816 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2817 }
2818 return "";
2819
2820 case 4:
2821 /* Pre-adjust the loop counter. */
2822 operands[4] = GEN_INT (n_bytes - 8);
2823 output_asm_insn ("ldi %4,%2", operands);
2824
2825 /* Copying loop. */
2826 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2827 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2828 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2829 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2830 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2831
2832 /* Handle the residual. There could be up to 7 bytes of
2833 residual to copy! */
2834 if (n_bytes % 8 != 0)
2835 {
2836 operands[4] = GEN_INT (n_bytes % 4);
2837 if (n_bytes % 8 >= 4)
2838 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2839 if (n_bytes % 4 != 0)
2840 output_asm_insn ("ldw 0(%1),%6", operands);
2841 if (n_bytes % 8 >= 4)
2842 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2843 if (n_bytes % 4 != 0)
2844 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2845 }
2846 return "";
2847
2848 case 2:
2849 /* Pre-adjust the loop counter. */
2850 operands[4] = GEN_INT (n_bytes - 4);
2851 output_asm_insn ("ldi %4,%2", operands);
2852
2853 /* Copying loop. */
2854 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2855 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2856 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2857 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2858 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2859
2860 /* Handle the residual. */
2861 if (n_bytes % 4 != 0)
2862 {
2863 if (n_bytes % 4 >= 2)
2864 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2865 if (n_bytes % 2 != 0)
2866 output_asm_insn ("ldb 0(%1),%6", operands);
2867 if (n_bytes % 4 >= 2)
2868 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2869 if (n_bytes % 2 != 0)
2870 output_asm_insn ("stb %6,0(%0)", operands);
2871 }
2872 return "";
2873
2874 case 1:
2875 /* Pre-adjust the loop counter. */
2876 operands[4] = GEN_INT (n_bytes - 2);
2877 output_asm_insn ("ldi %4,%2", operands);
2878
2879 /* Copying loop. */
2880 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2881 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2882 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2883 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2884 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2885
2886 /* Handle the residual. */
2887 if (n_bytes % 2 != 0)
2888 {
2889 output_asm_insn ("ldb 0(%1),%3", operands);
2890 output_asm_insn ("stb %3,0(%0)", operands);
2891 }
2892 return "";
2893
2894 default:
2895 abort ();
2896 }
2897 }
2898
2899 /* Count the number of insns necessary to handle this block move.
2900
2901 Basic structure is the same as emit_block_move, except that we
2902 count insns rather than emit them. */
2903
2904 static int
2905 compute_movmem_length (rtx insn)
2906 {
2907 rtx pat = PATTERN (insn);
2908 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2909 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2910 unsigned int n_insns = 0;
2911
2912 /* We can't move more than four bytes at a time because the PA
2913 has no longer integer move insns. (Could use fp mem ops?) */
2914 if (align > (TARGET_64BIT ? 8 : 4))
2915 align = (TARGET_64BIT ? 8 : 4);
2916
2917 /* The basic copying loop. */
2918 n_insns = 6;
2919
2920 /* Residuals. */
2921 if (n_bytes % (2 * align) != 0)
2922 {
2923 if ((n_bytes % (2 * align)) >= align)
2924 n_insns += 2;
2925
2926 if ((n_bytes % align) != 0)
2927 n_insns += 2;
2928 }
2929
2930 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2931 return n_insns * 4;
2932 }
2933
2934 /* Emit code to perform a block clear.
2935
2936 OPERANDS[0] is the destination pointer as a REG, clobbered.
2937 OPERANDS[1] is a register for temporary storage.
2938 OPERANDS[2] is the size as a CONST_INT
2939 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2940
2941 const char *
2942 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2943 {
2944 int align = INTVAL (operands[3]);
2945 unsigned long n_bytes = INTVAL (operands[2]);
2946
2947 /* We can't clear more than a word at a time because the PA
2948 has no longer integer move insns. */
2949 if (align > (TARGET_64BIT ? 8 : 4))
2950 align = (TARGET_64BIT ? 8 : 4);
2951
2952 /* Note that we know each loop below will execute at least twice
2953 (else we would have open-coded the copy). */
2954 switch (align)
2955 {
2956 case 8:
2957 /* Pre-adjust the loop counter. */
2958 operands[2] = GEN_INT (n_bytes - 16);
2959 output_asm_insn ("ldi %2,%1", operands);
2960
2961 /* Loop. */
2962 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2963 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2964 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2965
2966 /* Handle the residual. There could be up to 7 bytes of
2967 residual to copy! */
2968 if (n_bytes % 16 != 0)
2969 {
2970 operands[2] = GEN_INT (n_bytes % 8);
2971 if (n_bytes % 16 >= 8)
2972 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2973 if (n_bytes % 8 != 0)
2974 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2975 }
2976 return "";
2977
2978 case 4:
2979 /* Pre-adjust the loop counter. */
2980 operands[2] = GEN_INT (n_bytes - 8);
2981 output_asm_insn ("ldi %2,%1", operands);
2982
2983 /* Loop. */
2984 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2985 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2986 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2987
2988 /* Handle the residual. There could be up to 7 bytes of
2989 residual to copy! */
2990 if (n_bytes % 8 != 0)
2991 {
2992 operands[2] = GEN_INT (n_bytes % 4);
2993 if (n_bytes % 8 >= 4)
2994 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2995 if (n_bytes % 4 != 0)
2996 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2997 }
2998 return "";
2999
3000 case 2:
3001 /* Pre-adjust the loop counter. */
3002 operands[2] = GEN_INT (n_bytes - 4);
3003 output_asm_insn ("ldi %2,%1", operands);
3004
3005 /* Loop. */
3006 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3007 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3008 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3009
3010 /* Handle the residual. */
3011 if (n_bytes % 4 != 0)
3012 {
3013 if (n_bytes % 4 >= 2)
3014 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3015 if (n_bytes % 2 != 0)
3016 output_asm_insn ("stb %%r0,0(%0)", operands);
3017 }
3018 return "";
3019
3020 case 1:
3021 /* Pre-adjust the loop counter. */
3022 operands[2] = GEN_INT (n_bytes - 2);
3023 output_asm_insn ("ldi %2,%1", operands);
3024
3025 /* Loop. */
3026 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3027 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3028 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3029
3030 /* Handle the residual. */
3031 if (n_bytes % 2 != 0)
3032 output_asm_insn ("stb %%r0,0(%0)", operands);
3033
3034 return "";
3035
3036 default:
3037 abort ();
3038 }
3039 }
3040
3041 /* Count the number of insns necessary to handle this block move.
3042
3043 Basic structure is the same as emit_block_move, except that we
3044 count insns rather than emit them. */
3045
3046 static int
3047 compute_clrmem_length (rtx insn)
3048 {
3049 rtx pat = PATTERN (insn);
3050 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3051 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3052 unsigned int n_insns = 0;
3053
3054 /* We can't clear more than a word at a time because the PA
3055 has no longer integer move insns. */
3056 if (align > (TARGET_64BIT ? 8 : 4))
3057 align = (TARGET_64BIT ? 8 : 4);
3058
3059 /* The basic loop. */
3060 n_insns = 4;
3061
3062 /* Residuals. */
3063 if (n_bytes % (2 * align) != 0)
3064 {
3065 if ((n_bytes % (2 * align)) >= align)
3066 n_insns++;
3067
3068 if ((n_bytes % align) != 0)
3069 n_insns++;
3070 }
3071
3072 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3073 return n_insns * 4;
3074 }
3075 \f
3076
3077 const char *
3078 output_and (rtx *operands)
3079 {
3080 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3081 {
3082 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3083 int ls0, ls1, ms0, p, len;
3084
3085 for (ls0 = 0; ls0 < 32; ls0++)
3086 if ((mask & (1 << ls0)) == 0)
3087 break;
3088
3089 for (ls1 = ls0; ls1 < 32; ls1++)
3090 if ((mask & (1 << ls1)) != 0)
3091 break;
3092
3093 for (ms0 = ls1; ms0 < 32; ms0++)
3094 if ((mask & (1 << ms0)) == 0)
3095 break;
3096
3097 if (ms0 != 32)
3098 abort ();
3099
3100 if (ls1 == 32)
3101 {
3102 len = ls0;
3103
3104 if (len == 0)
3105 abort ();
3106
3107 operands[2] = GEN_INT (len);
3108 return "{extru|extrw,u} %1,31,%2,%0";
3109 }
3110 else
3111 {
3112 /* We could use this `depi' for the case above as well, but `depi'
3113 requires one more register file access than an `extru'. */
3114
3115 p = 31 - ls0;
3116 len = ls1 - ls0;
3117
3118 operands[2] = GEN_INT (p);
3119 operands[3] = GEN_INT (len);
3120 return "{depi|depwi} 0,%2,%3,%0";
3121 }
3122 }
3123 else
3124 return "and %1,%2,%0";
3125 }
3126
3127 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3128 storing the result in operands[0]. */
3129 const char *
3130 output_64bit_and (rtx *operands)
3131 {
3132 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3133 {
3134 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3135 int ls0, ls1, ms0, p, len;
3136
3137 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3138 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3139 break;
3140
3141 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3142 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3143 break;
3144
3145 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3146 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3147 break;
3148
3149 if (ms0 != HOST_BITS_PER_WIDE_INT)
3150 abort ();
3151
3152 if (ls1 == HOST_BITS_PER_WIDE_INT)
3153 {
3154 len = ls0;
3155
3156 if (len == 0)
3157 abort ();
3158
3159 operands[2] = GEN_INT (len);
3160 return "extrd,u %1,63,%2,%0";
3161 }
3162 else
3163 {
3164 /* We could use this `depi' for the case above as well, but `depi'
3165 requires one more register file access than an `extru'. */
3166
3167 p = 63 - ls0;
3168 len = ls1 - ls0;
3169
3170 operands[2] = GEN_INT (p);
3171 operands[3] = GEN_INT (len);
3172 return "depdi 0,%2,%3,%0";
3173 }
3174 }
3175 else
3176 return "and %1,%2,%0";
3177 }
3178
3179 const char *
3180 output_ior (rtx *operands)
3181 {
3182 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3183 int bs0, bs1, p, len;
3184
3185 if (INTVAL (operands[2]) == 0)
3186 return "copy %1,%0";
3187
3188 for (bs0 = 0; bs0 < 32; bs0++)
3189 if ((mask & (1 << bs0)) != 0)
3190 break;
3191
3192 for (bs1 = bs0; bs1 < 32; bs1++)
3193 if ((mask & (1 << bs1)) == 0)
3194 break;
3195
3196 if (bs1 != 32 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3197 abort ();
3198
3199 p = 31 - bs0;
3200 len = bs1 - bs0;
3201
3202 operands[2] = GEN_INT (p);
3203 operands[3] = GEN_INT (len);
3204 return "{depi|depwi} -1,%2,%3,%0";
3205 }
3206
3207 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3208 storing the result in operands[0]. */
3209 const char *
3210 output_64bit_ior (rtx *operands)
3211 {
3212 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3213 int bs0, bs1, p, len;
3214
3215 if (INTVAL (operands[2]) == 0)
3216 return "copy %1,%0";
3217
3218 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3219 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3220 break;
3221
3222 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3223 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3224 break;
3225
3226 if (bs1 != HOST_BITS_PER_WIDE_INT
3227 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3228 abort ();
3229
3230 p = 63 - bs0;
3231 len = bs1 - bs0;
3232
3233 operands[2] = GEN_INT (p);
3234 operands[3] = GEN_INT (len);
3235 return "depdi -1,%2,%3,%0";
3236 }
3237 \f
3238 /* Target hook for assembling integer objects. This code handles
3239 aligned SI and DI integers specially, since function references must
3240 be preceded by P%. */
3241
3242 static bool
3243 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3244 {
3245 if (size == UNITS_PER_WORD && aligned_p
3246 && function_label_operand (x, VOIDmode))
3247 {
3248 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3249 output_addr_const (asm_out_file, x);
3250 fputc ('\n', asm_out_file);
3251 return true;
3252 }
3253 return default_assemble_integer (x, size, aligned_p);
3254 }
3255 \f
3256 /* Output an ascii string. */
3257 void
3258 output_ascii (FILE *file, const char *p, int size)
3259 {
3260 int i;
3261 int chars_output;
3262 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3263
3264 /* The HP assembler can only take strings of 256 characters at one
3265 time. This is a limitation on input line length, *not* the
3266 length of the string. Sigh. Even worse, it seems that the
3267 restriction is in number of input characters (see \xnn &
3268 \whatever). So we have to do this very carefully. */
3269
3270 fputs ("\t.STRING \"", file);
3271
3272 chars_output = 0;
3273 for (i = 0; i < size; i += 4)
3274 {
3275 int co = 0;
3276 int io = 0;
3277 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3278 {
3279 register unsigned int c = (unsigned char) p[i + io];
3280
3281 if (c == '\"' || c == '\\')
3282 partial_output[co++] = '\\';
3283 if (c >= ' ' && c < 0177)
3284 partial_output[co++] = c;
3285 else
3286 {
3287 unsigned int hexd;
3288 partial_output[co++] = '\\';
3289 partial_output[co++] = 'x';
3290 hexd = c / 16 - 0 + '0';
3291 if (hexd > '9')
3292 hexd -= '9' - 'a' + 1;
3293 partial_output[co++] = hexd;
3294 hexd = c % 16 - 0 + '0';
3295 if (hexd > '9')
3296 hexd -= '9' - 'a' + 1;
3297 partial_output[co++] = hexd;
3298 }
3299 }
3300 if (chars_output + co > 243)
3301 {
3302 fputs ("\"\n\t.STRING \"", file);
3303 chars_output = 0;
3304 }
3305 fwrite (partial_output, 1, (size_t) co, file);
3306 chars_output += co;
3307 co = 0;
3308 }
3309 fputs ("\"\n", file);
3310 }
3311
3312 /* Try to rewrite floating point comparisons & branches to avoid
3313 useless add,tr insns.
3314
3315 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3316 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3317 first attempt to remove useless add,tr insns. It is zero
3318 for the second pass as reorg sometimes leaves bogus REG_DEAD
3319 notes lying around.
3320
3321 When CHECK_NOTES is zero we can only eliminate add,tr insns
3322 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3323 instructions. */
3324 static void
3325 remove_useless_addtr_insns (int check_notes)
3326 {
3327 rtx insn;
3328 static int pass = 0;
3329
3330 /* This is fairly cheap, so always run it when optimizing. */
3331 if (optimize > 0)
3332 {
3333 int fcmp_count = 0;
3334 int fbranch_count = 0;
3335
3336 /* Walk all the insns in this function looking for fcmp & fbranch
3337 instructions. Keep track of how many of each we find. */
3338 for (insn = get_insns (); insn; insn = next_insn (insn))
3339 {
3340 rtx tmp;
3341
3342 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3343 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3344 continue;
3345
3346 tmp = PATTERN (insn);
3347
3348 /* It must be a set. */
3349 if (GET_CODE (tmp) != SET)
3350 continue;
3351
3352 /* If the destination is CCFP, then we've found an fcmp insn. */
3353 tmp = SET_DEST (tmp);
3354 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3355 {
3356 fcmp_count++;
3357 continue;
3358 }
3359
3360 tmp = PATTERN (insn);
3361 /* If this is an fbranch instruction, bump the fbranch counter. */
3362 if (GET_CODE (tmp) == SET
3363 && SET_DEST (tmp) == pc_rtx
3364 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3365 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3366 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3367 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3368 {
3369 fbranch_count++;
3370 continue;
3371 }
3372 }
3373
3374
3375 /* Find all floating point compare + branch insns. If possible,
3376 reverse the comparison & the branch to avoid add,tr insns. */
3377 for (insn = get_insns (); insn; insn = next_insn (insn))
3378 {
3379 rtx tmp, next;
3380
3381 /* Ignore anything that isn't an INSN. */
3382 if (GET_CODE (insn) != INSN)
3383 continue;
3384
3385 tmp = PATTERN (insn);
3386
3387 /* It must be a set. */
3388 if (GET_CODE (tmp) != SET)
3389 continue;
3390
3391 /* The destination must be CCFP, which is register zero. */
3392 tmp = SET_DEST (tmp);
3393 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3394 continue;
3395
3396 /* INSN should be a set of CCFP.
3397
3398 See if the result of this insn is used in a reversed FP
3399 conditional branch. If so, reverse our condition and
3400 the branch. Doing so avoids useless add,tr insns. */
3401 next = next_insn (insn);
3402 while (next)
3403 {
3404 /* Jumps, calls and labels stop our search. */
3405 if (GET_CODE (next) == JUMP_INSN
3406 || GET_CODE (next) == CALL_INSN
3407 || GET_CODE (next) == CODE_LABEL)
3408 break;
3409
3410 /* As does another fcmp insn. */
3411 if (GET_CODE (next) == INSN
3412 && GET_CODE (PATTERN (next)) == SET
3413 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3414 && REGNO (SET_DEST (PATTERN (next))) == 0)
3415 break;
3416
3417 next = next_insn (next);
3418 }
3419
3420 /* Is NEXT_INSN a branch? */
3421 if (next
3422 && GET_CODE (next) == JUMP_INSN)
3423 {
3424 rtx pattern = PATTERN (next);
3425
3426 /* If it a reversed fp conditional branch (eg uses add,tr)
3427 and CCFP dies, then reverse our conditional and the branch
3428 to avoid the add,tr. */
3429 if (GET_CODE (pattern) == SET
3430 && SET_DEST (pattern) == pc_rtx
3431 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3432 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3433 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3434 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3435 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3436 && (fcmp_count == fbranch_count
3437 || (check_notes
3438 && find_regno_note (next, REG_DEAD, 0))))
3439 {
3440 /* Reverse the branch. */
3441 tmp = XEXP (SET_SRC (pattern), 1);
3442 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3443 XEXP (SET_SRC (pattern), 2) = tmp;
3444 INSN_CODE (next) = -1;
3445
3446 /* Reverse our condition. */
3447 tmp = PATTERN (insn);
3448 PUT_CODE (XEXP (tmp, 1),
3449 (reverse_condition_maybe_unordered
3450 (GET_CODE (XEXP (tmp, 1)))));
3451 }
3452 }
3453 }
3454 }
3455
3456 pass = !pass;
3457
3458 }
3459 \f
3460 /* You may have trouble believing this, but this is the 32 bit HP-PA
3461 stack layout. Wow.
3462
3463 Offset Contents
3464
3465 Variable arguments (optional; any number may be allocated)
3466
3467 SP-(4*(N+9)) arg word N
3468 : :
3469 SP-56 arg word 5
3470 SP-52 arg word 4
3471
3472 Fixed arguments (must be allocated; may remain unused)
3473
3474 SP-48 arg word 3
3475 SP-44 arg word 2
3476 SP-40 arg word 1
3477 SP-36 arg word 0
3478
3479 Frame Marker
3480
3481 SP-32 External Data Pointer (DP)
3482 SP-28 External sr4
3483 SP-24 External/stub RP (RP')
3484 SP-20 Current RP
3485 SP-16 Static Link
3486 SP-12 Clean up
3487 SP-8 Calling Stub RP (RP'')
3488 SP-4 Previous SP
3489
3490 Top of Frame
3491
3492 SP-0 Stack Pointer (points to next available address)
3493
3494 */
3495
3496 /* This function saves registers as follows. Registers marked with ' are
3497 this function's registers (as opposed to the previous function's).
3498 If a frame_pointer isn't needed, r4 is saved as a general register;
3499 the space for the frame pointer is still allocated, though, to keep
3500 things simple.
3501
3502
3503 Top of Frame
3504
3505 SP (FP') Previous FP
3506 SP + 4 Alignment filler (sigh)
3507 SP + 8 Space for locals reserved here.
3508 .
3509 .
3510 .
3511 SP + n All call saved register used.
3512 .
3513 .
3514 .
3515 SP + o All call saved fp registers used.
3516 .
3517 .
3518 .
3519 SP + p (SP') points to next available address.
3520
3521 */
3522
3523 /* Global variables set by output_function_prologue(). */
3524 /* Size of frame. Need to know this to emit return insns from
3525 leaf procedures. */
3526 static HOST_WIDE_INT actual_fsize, local_fsize;
3527 static int save_fregs;
3528
3529 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3530 Handle case where DISP > 8k by using the add_high_const patterns.
3531
3532 Note in DISP > 8k case, we will leave the high part of the address
3533 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3534
3535 static void
3536 store_reg (int reg, HOST_WIDE_INT disp, int base)
3537 {
3538 rtx insn, dest, src, basereg;
3539
3540 src = gen_rtx_REG (word_mode, reg);
3541 basereg = gen_rtx_REG (Pmode, base);
3542 if (VAL_14_BITS_P (disp))
3543 {
3544 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3545 insn = emit_move_insn (dest, src);
3546 }
3547 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3548 {
3549 rtx delta = GEN_INT (disp);
3550 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3551
3552 emit_move_insn (tmpreg, delta);
3553 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3554 dest = gen_rtx_MEM (word_mode, tmpreg);
3555 insn = emit_move_insn (dest, src);
3556 if (DO_FRAME_NOTES)
3557 {
3558 REG_NOTES (insn)
3559 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3560 gen_rtx_SET (VOIDmode,
3561 gen_rtx_MEM (word_mode,
3562 gen_rtx_PLUS (word_mode, basereg,
3563 delta)),
3564 src),
3565 REG_NOTES (insn));
3566 }
3567 }
3568 else
3569 {
3570 rtx delta = GEN_INT (disp);
3571 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3572 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3573
3574 emit_move_insn (tmpreg, high);
3575 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3576 insn = emit_move_insn (dest, src);
3577 if (DO_FRAME_NOTES)
3578 {
3579 REG_NOTES (insn)
3580 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3581 gen_rtx_SET (VOIDmode,
3582 gen_rtx_MEM (word_mode,
3583 gen_rtx_PLUS (word_mode, basereg,
3584 delta)),
3585 src),
3586 REG_NOTES (insn));
3587 }
3588 }
3589
3590 if (DO_FRAME_NOTES)
3591 RTX_FRAME_RELATED_P (insn) = 1;
3592 }
3593
3594 /* Emit RTL to store REG at the memory location specified by BASE and then
3595 add MOD to BASE. MOD must be <= 8k. */
3596
3597 static void
3598 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3599 {
3600 rtx insn, basereg, srcreg, delta;
3601
3602 if (!VAL_14_BITS_P (mod))
3603 abort ();
3604
3605 basereg = gen_rtx_REG (Pmode, base);
3606 srcreg = gen_rtx_REG (word_mode, reg);
3607 delta = GEN_INT (mod);
3608
3609 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3610 if (DO_FRAME_NOTES)
3611 {
3612 RTX_FRAME_RELATED_P (insn) = 1;
3613
3614 /* RTX_FRAME_RELATED_P must be set on each frame related set
3615 in a parallel with more than one element. Don't set
3616 RTX_FRAME_RELATED_P in the first set if reg is temporary
3617 register 1. The effect of this operation is recorded in
3618 the initial copy. */
3619 if (reg != 1)
3620 {
3621 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3622 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3623 }
3624 else
3625 {
3626 /* The first element of a PARALLEL is always processed if it is
3627 a SET. Thus, we need an expression list for this case. */
3628 REG_NOTES (insn)
3629 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3630 gen_rtx_SET (VOIDmode, basereg,
3631 gen_rtx_PLUS (word_mode, basereg, delta)),
3632 REG_NOTES (insn));
3633 }
3634 }
3635 }
3636
3637 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3638 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3639 whether to add a frame note or not.
3640
3641 In the DISP > 8k case, we leave the high part of the address in %r1.
3642 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3643
3644 static void
3645 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3646 {
3647 rtx insn;
3648
3649 if (VAL_14_BITS_P (disp))
3650 {
3651 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3652 plus_constant (gen_rtx_REG (Pmode, base), disp));
3653 }
3654 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3655 {
3656 rtx basereg = gen_rtx_REG (Pmode, base);
3657 rtx delta = GEN_INT (disp);
3658 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3659
3660 emit_move_insn (tmpreg, delta);
3661 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3662 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3663 }
3664 else
3665 {
3666 rtx basereg = gen_rtx_REG (Pmode, base);
3667 rtx delta = GEN_INT (disp);
3668 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3669
3670 emit_move_insn (tmpreg,
3671 gen_rtx_PLUS (Pmode, basereg,
3672 gen_rtx_HIGH (Pmode, delta)));
3673 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3674 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3675 }
3676
3677 if (DO_FRAME_NOTES && note)
3678 RTX_FRAME_RELATED_P (insn) = 1;
3679 }
3680
3681 HOST_WIDE_INT
3682 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3683 {
3684 int freg_saved = 0;
3685 int i, j;
3686
3687 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3688 be consistent with the rounding and size calculation done here.
3689 Change them at the same time. */
3690
3691 /* We do our own stack alignment. First, round the size of the
3692 stack locals up to a word boundary. */
3693 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3694
3695 /* Space for previous frame pointer + filler. If any frame is
3696 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3697 waste some space here for the sake of HP compatibility. The
3698 first slot is only used when the frame pointer is needed. */
3699 if (size || frame_pointer_needed)
3700 size += STARTING_FRAME_OFFSET;
3701
3702 /* If the current function calls __builtin_eh_return, then we need
3703 to allocate stack space for registers that will hold data for
3704 the exception handler. */
3705 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3706 {
3707 unsigned int i;
3708
3709 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3710 continue;
3711 size += i * UNITS_PER_WORD;
3712 }
3713
3714 /* Account for space used by the callee general register saves. */
3715 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3716 if (regs_ever_live[i])
3717 size += UNITS_PER_WORD;
3718
3719 /* Account for space used by the callee floating point register saves. */
3720 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3721 if (regs_ever_live[i]
3722 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3723 {
3724 freg_saved = 1;
3725
3726 /* We always save both halves of the FP register, so always
3727 increment the frame size by 8 bytes. */
3728 size += 8;
3729 }
3730
3731 /* If any of the floating registers are saved, account for the
3732 alignment needed for the floating point register save block. */
3733 if (freg_saved)
3734 {
3735 size = (size + 7) & ~7;
3736 if (fregs_live)
3737 *fregs_live = 1;
3738 }
3739
3740 /* The various ABIs include space for the outgoing parameters in the
3741 size of the current function's stack frame. We don't need to align
3742 for the outgoing arguments as their alignment is set by the final
3743 rounding for the frame as a whole. */
3744 size += current_function_outgoing_args_size;
3745
3746 /* Allocate space for the fixed frame marker. This space must be
3747 allocated for any function that makes calls or allocates
3748 stack space. */
3749 if (!current_function_is_leaf || size)
3750 size += TARGET_64BIT ? 48 : 32;
3751
3752 /* Finally, round to the preferred stack boundary. */
3753 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3754 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3755 }
3756
3757 /* Generate the assembly code for function entry. FILE is a stdio
3758 stream to output the code to. SIZE is an int: how many units of
3759 temporary storage to allocate.
3760
3761 Refer to the array `regs_ever_live' to determine which registers to
3762 save; `regs_ever_live[I]' is nonzero if register number I is ever
3763 used in the function. This function is responsible for knowing
3764 which registers should not be saved even if used. */
3765
3766 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3767 of memory. If any fpu reg is used in the function, we allocate
3768 such a block here, at the bottom of the frame, just in case it's needed.
3769
3770 If this function is a leaf procedure, then we may choose not
3771 to do a "save" insn. The decision about whether or not
3772 to do this is made in regclass.c. */
3773
3774 static void
3775 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3776 {
3777 /* The function's label and associated .PROC must never be
3778 separated and must be output *after* any profiling declarations
3779 to avoid changing spaces/subspaces within a procedure. */
3780 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3781 fputs ("\t.PROC\n", file);
3782
3783 /* hppa_expand_prologue does the dirty work now. We just need
3784 to output the assembler directives which denote the start
3785 of a function. */
3786 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3787 if (regs_ever_live[2])
3788 fputs (",CALLS,SAVE_RP", file);
3789 else
3790 fputs (",NO_CALLS", file);
3791
3792 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3793 at the beginning of the frame and that it is used as the frame
3794 pointer for the frame. We do this because our current frame
3795 layout doesn't conform to that specified in the the HP runtime
3796 documentation and we need a way to indicate to programs such as
3797 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3798 isn't used by HP compilers but is supported by the assembler.
3799 However, SAVE_SP is supposed to indicate that the previous stack
3800 pointer has been saved in the frame marker. */
3801 if (frame_pointer_needed)
3802 fputs (",SAVE_SP", file);
3803
3804 /* Pass on information about the number of callee register saves
3805 performed in the prologue.
3806
3807 The compiler is supposed to pass the highest register number
3808 saved, the assembler then has to adjust that number before
3809 entering it into the unwind descriptor (to account for any
3810 caller saved registers with lower register numbers than the
3811 first callee saved register). */
3812 if (gr_saved)
3813 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3814
3815 if (fr_saved)
3816 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3817
3818 fputs ("\n\t.ENTRY\n", file);
3819
3820 remove_useless_addtr_insns (0);
3821 }
3822
3823 void
3824 hppa_expand_prologue (void)
3825 {
3826 int merge_sp_adjust_with_store = 0;
3827 HOST_WIDE_INT size = get_frame_size ();
3828 HOST_WIDE_INT offset;
3829 int i;
3830 rtx insn, tmpreg;
3831
3832 gr_saved = 0;
3833 fr_saved = 0;
3834 save_fregs = 0;
3835
3836 /* Compute total size for frame pointer, filler, locals and rounding to
3837 the next word boundary. Similar code appears in compute_frame_size
3838 and must be changed in tandem with this code. */
3839 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3840 if (local_fsize || frame_pointer_needed)
3841 local_fsize += STARTING_FRAME_OFFSET;
3842
3843 actual_fsize = compute_frame_size (size, &save_fregs);
3844
3845 /* Compute a few things we will use often. */
3846 tmpreg = gen_rtx_REG (word_mode, 1);
3847
3848 /* Save RP first. The calling conventions manual states RP will
3849 always be stored into the caller's frame at sp - 20 or sp - 16
3850 depending on which ABI is in use. */
3851 if (regs_ever_live[2] || current_function_calls_eh_return)
3852 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3853
3854 /* Allocate the local frame and set up the frame pointer if needed. */
3855 if (actual_fsize != 0)
3856 {
3857 if (frame_pointer_needed)
3858 {
3859 /* Copy the old frame pointer temporarily into %r1. Set up the
3860 new stack pointer, then store away the saved old frame pointer
3861 into the stack at sp and at the same time update the stack
3862 pointer by actual_fsize bytes. Two versions, first
3863 handles small (<8k) frames. The second handles large (>=8k)
3864 frames. */
3865 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3866 if (DO_FRAME_NOTES)
3867 {
3868 /* We need to record the frame pointer save here since the
3869 new frame pointer is set in the following insn. */
3870 RTX_FRAME_RELATED_P (insn) = 1;
3871 REG_NOTES (insn)
3872 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3873 gen_rtx_SET (VOIDmode,
3874 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3875 frame_pointer_rtx),
3876 REG_NOTES (insn));
3877 }
3878
3879 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3880 if (DO_FRAME_NOTES)
3881 RTX_FRAME_RELATED_P (insn) = 1;
3882
3883 if (VAL_14_BITS_P (actual_fsize))
3884 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3885 else
3886 {
3887 /* It is incorrect to store the saved frame pointer at *sp,
3888 then increment sp (writes beyond the current stack boundary).
3889
3890 So instead use stwm to store at *sp and post-increment the
3891 stack pointer as an atomic operation. Then increment sp to
3892 finish allocating the new frame. */
3893 HOST_WIDE_INT adjust1 = 8192 - 64;
3894 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3895
3896 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3897 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3898 adjust2, 1);
3899 }
3900
3901 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3902 we need to store the previous stack pointer (frame pointer)
3903 into the frame marker on targets that use the HP unwind
3904 library. This allows the HP unwind library to be used to
3905 unwind GCC frames. However, we are not fully compatible
3906 with the HP library because our frame layout differs from
3907 that specified in the HP runtime specification.
3908
3909 We don't want a frame note on this instruction as the frame
3910 marker moves during dynamic stack allocation.
3911
3912 This instruction also serves as a blockage to prevent
3913 register spills from being scheduled before the stack
3914 pointer is raised. This is necessary as we store
3915 registers using the frame pointer as a base register,
3916 and the frame pointer is set before sp is raised. */
3917 if (TARGET_HPUX_UNWIND_LIBRARY)
3918 {
3919 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3920 GEN_INT (TARGET_64BIT ? -8 : -4));
3921
3922 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3923 frame_pointer_rtx);
3924 }
3925 else
3926 emit_insn (gen_blockage ());
3927 }
3928 /* no frame pointer needed. */
3929 else
3930 {
3931 /* In some cases we can perform the first callee register save
3932 and allocating the stack frame at the same time. If so, just
3933 make a note of it and defer allocating the frame until saving
3934 the callee registers. */
3935 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3936 merge_sp_adjust_with_store = 1;
3937 /* Can not optimize. Adjust the stack frame by actual_fsize
3938 bytes. */
3939 else
3940 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3941 actual_fsize, 1);
3942 }
3943 }
3944
3945 /* Normal register save.
3946
3947 Do not save the frame pointer in the frame_pointer_needed case. It
3948 was done earlier. */
3949 if (frame_pointer_needed)
3950 {
3951 offset = local_fsize;
3952
3953 /* Saving the EH return data registers in the frame is the simplest
3954 way to get the frame unwind information emitted. We put them
3955 just before the general registers. */
3956 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3957 {
3958 unsigned int i, regno;
3959
3960 for (i = 0; ; ++i)
3961 {
3962 regno = EH_RETURN_DATA_REGNO (i);
3963 if (regno == INVALID_REGNUM)
3964 break;
3965
3966 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3967 offset += UNITS_PER_WORD;
3968 }
3969 }
3970
3971 for (i = 18; i >= 4; i--)
3972 if (regs_ever_live[i] && ! call_used_regs[i])
3973 {
3974 store_reg (i, offset, FRAME_POINTER_REGNUM);
3975 offset += UNITS_PER_WORD;
3976 gr_saved++;
3977 }
3978 /* Account for %r3 which is saved in a special place. */
3979 gr_saved++;
3980 }
3981 /* No frame pointer needed. */
3982 else
3983 {
3984 offset = local_fsize - actual_fsize;
3985
3986 /* Saving the EH return data registers in the frame is the simplest
3987 way to get the frame unwind information emitted. */
3988 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3989 {
3990 unsigned int i, regno;
3991
3992 for (i = 0; ; ++i)
3993 {
3994 regno = EH_RETURN_DATA_REGNO (i);
3995 if (regno == INVALID_REGNUM)
3996 break;
3997
3998 /* If merge_sp_adjust_with_store is nonzero, then we can
3999 optimize the first save. */
4000 if (merge_sp_adjust_with_store)
4001 {
4002 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4003 merge_sp_adjust_with_store = 0;
4004 }
4005 else
4006 store_reg (regno, offset, STACK_POINTER_REGNUM);
4007 offset += UNITS_PER_WORD;
4008 }
4009 }
4010
4011 for (i = 18; i >= 3; i--)
4012 if (regs_ever_live[i] && ! call_used_regs[i])
4013 {
4014 /* If merge_sp_adjust_with_store is nonzero, then we can
4015 optimize the first GR save. */
4016 if (merge_sp_adjust_with_store)
4017 {
4018 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4019 merge_sp_adjust_with_store = 0;
4020 }
4021 else
4022 store_reg (i, offset, STACK_POINTER_REGNUM);
4023 offset += UNITS_PER_WORD;
4024 gr_saved++;
4025 }
4026
4027 /* If we wanted to merge the SP adjustment with a GR save, but we never
4028 did any GR saves, then just emit the adjustment here. */
4029 if (merge_sp_adjust_with_store)
4030 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4031 actual_fsize, 1);
4032 }
4033
4034 /* The hppa calling conventions say that %r19, the pic offset
4035 register, is saved at sp - 32 (in this function's frame)
4036 when generating PIC code. FIXME: What is the correct thing
4037 to do for functions which make no calls and allocate no
4038 frame? Do we need to allocate a frame, or can we just omit
4039 the save? For now we'll just omit the save.
4040
4041 We don't want a note on this insn as the frame marker can
4042 move if there is a dynamic stack allocation. */
4043 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4044 {
4045 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4046
4047 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4048
4049 }
4050
4051 /* Align pointer properly (doubleword boundary). */
4052 offset = (offset + 7) & ~7;
4053
4054 /* Floating point register store. */
4055 if (save_fregs)
4056 {
4057 rtx base;
4058
4059 /* First get the frame or stack pointer to the start of the FP register
4060 save area. */
4061 if (frame_pointer_needed)
4062 {
4063 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4064 base = frame_pointer_rtx;
4065 }
4066 else
4067 {
4068 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4069 base = stack_pointer_rtx;
4070 }
4071
4072 /* Now actually save the FP registers. */
4073 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4074 {
4075 if (regs_ever_live[i]
4076 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4077 {
4078 rtx addr, insn, reg;
4079 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4080 reg = gen_rtx_REG (DFmode, i);
4081 insn = emit_move_insn (addr, reg);
4082 if (DO_FRAME_NOTES)
4083 {
4084 RTX_FRAME_RELATED_P (insn) = 1;
4085 if (TARGET_64BIT)
4086 {
4087 rtx mem = gen_rtx_MEM (DFmode,
4088 plus_constant (base, offset));
4089 REG_NOTES (insn)
4090 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4091 gen_rtx_SET (VOIDmode, mem, reg),
4092 REG_NOTES (insn));
4093 }
4094 else
4095 {
4096 rtx meml = gen_rtx_MEM (SFmode,
4097 plus_constant (base, offset));
4098 rtx memr = gen_rtx_MEM (SFmode,
4099 plus_constant (base, offset + 4));
4100 rtx regl = gen_rtx_REG (SFmode, i);
4101 rtx regr = gen_rtx_REG (SFmode, i + 1);
4102 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4103 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4104 rtvec vec;
4105
4106 RTX_FRAME_RELATED_P (setl) = 1;
4107 RTX_FRAME_RELATED_P (setr) = 1;
4108 vec = gen_rtvec (2, setl, setr);
4109 REG_NOTES (insn)
4110 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4111 gen_rtx_SEQUENCE (VOIDmode, vec),
4112 REG_NOTES (insn));
4113 }
4114 }
4115 offset += GET_MODE_SIZE (DFmode);
4116 fr_saved++;
4117 }
4118 }
4119 }
4120 }
4121
4122 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4123 Handle case where DISP > 8k by using the add_high_const patterns. */
4124
4125 static void
4126 load_reg (int reg, HOST_WIDE_INT disp, int base)
4127 {
4128 rtx dest = gen_rtx_REG (word_mode, reg);
4129 rtx basereg = gen_rtx_REG (Pmode, base);
4130 rtx src;
4131
4132 if (VAL_14_BITS_P (disp))
4133 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
4134 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4135 {
4136 rtx delta = GEN_INT (disp);
4137 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4138
4139 emit_move_insn (tmpreg, delta);
4140 if (TARGET_DISABLE_INDEXING)
4141 {
4142 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4143 src = gen_rtx_MEM (word_mode, tmpreg);
4144 }
4145 else
4146 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4147 }
4148 else
4149 {
4150 rtx delta = GEN_INT (disp);
4151 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4152 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4153
4154 emit_move_insn (tmpreg, high);
4155 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4156 }
4157
4158 emit_move_insn (dest, src);
4159 }
4160
4161 /* Update the total code bytes output to the text section. */
4162
4163 static void
4164 update_total_code_bytes (int nbytes)
4165 {
4166 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4167 && !IN_NAMED_SECTION_P (cfun->decl))
4168 {
4169 if (INSN_ADDRESSES_SET_P ())
4170 {
4171 unsigned long old_total = total_code_bytes;
4172
4173 total_code_bytes += nbytes;
4174
4175 /* Be prepared to handle overflows. */
4176 if (old_total > total_code_bytes)
4177 total_code_bytes = -1;
4178 }
4179 else
4180 total_code_bytes = -1;
4181 }
4182 }
4183
4184 /* This function generates the assembly code for function exit.
4185 Args are as for output_function_prologue ().
4186
4187 The function epilogue should not depend on the current stack
4188 pointer! It should use the frame pointer only. This is mandatory
4189 because of alloca; we also take advantage of it to omit stack
4190 adjustments before returning. */
4191
4192 static void
4193 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4194 {
4195 rtx insn = get_last_insn ();
4196
4197 last_address = 0;
4198
4199 /* hppa_expand_epilogue does the dirty work now. We just need
4200 to output the assembler directives which denote the end
4201 of a function.
4202
4203 To make debuggers happy, emit a nop if the epilogue was completely
4204 eliminated due to a volatile call as the last insn in the
4205 current function. That way the return address (in %r2) will
4206 always point to a valid instruction in the current function. */
4207
4208 /* Get the last real insn. */
4209 if (GET_CODE (insn) == NOTE)
4210 insn = prev_real_insn (insn);
4211
4212 /* If it is a sequence, then look inside. */
4213 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4214 insn = XVECEXP (PATTERN (insn), 0, 0);
4215
4216 /* If insn is a CALL_INSN, then it must be a call to a volatile
4217 function (otherwise there would be epilogue insns). */
4218 if (insn && GET_CODE (insn) == CALL_INSN)
4219 {
4220 fputs ("\tnop\n", file);
4221 last_address += 4;
4222 }
4223
4224 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4225
4226 if (TARGET_SOM && TARGET_GAS)
4227 {
4228 /* We done with this subspace except possibly for some additional
4229 debug information. Forget that we are in this subspace to ensure
4230 that the next function is output in its own subspace. */
4231 forget_section ();
4232 }
4233
4234 if (INSN_ADDRESSES_SET_P ())
4235 {
4236 insn = get_last_nonnote_insn ();
4237 last_address += INSN_ADDRESSES (INSN_UID (insn));
4238 if (INSN_P (insn))
4239 last_address += insn_default_length (insn);
4240 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4241 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4242 }
4243
4244 /* Finally, update the total number of code bytes output so far. */
4245 update_total_code_bytes (last_address);
4246 }
4247
4248 void
4249 hppa_expand_epilogue (void)
4250 {
4251 rtx tmpreg;
4252 HOST_WIDE_INT offset;
4253 HOST_WIDE_INT ret_off = 0;
4254 int i;
4255 int merge_sp_adjust_with_load = 0;
4256
4257 /* We will use this often. */
4258 tmpreg = gen_rtx_REG (word_mode, 1);
4259
4260 /* Try to restore RP early to avoid load/use interlocks when
4261 RP gets used in the return (bv) instruction. This appears to still
4262 be necessary even when we schedule the prologue and epilogue. */
4263 if (regs_ever_live [2] || current_function_calls_eh_return)
4264 {
4265 ret_off = TARGET_64BIT ? -16 : -20;
4266 if (frame_pointer_needed)
4267 {
4268 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4269 ret_off = 0;
4270 }
4271 else
4272 {
4273 /* No frame pointer, and stack is smaller than 8k. */
4274 if (VAL_14_BITS_P (ret_off - actual_fsize))
4275 {
4276 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4277 ret_off = 0;
4278 }
4279 }
4280 }
4281
4282 /* General register restores. */
4283 if (frame_pointer_needed)
4284 {
4285 offset = local_fsize;
4286
4287 /* If the current function calls __builtin_eh_return, then we need
4288 to restore the saved EH data registers. */
4289 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4290 {
4291 unsigned int i, regno;
4292
4293 for (i = 0; ; ++i)
4294 {
4295 regno = EH_RETURN_DATA_REGNO (i);
4296 if (regno == INVALID_REGNUM)
4297 break;
4298
4299 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4300 offset += UNITS_PER_WORD;
4301 }
4302 }
4303
4304 for (i = 18; i >= 4; i--)
4305 if (regs_ever_live[i] && ! call_used_regs[i])
4306 {
4307 load_reg (i, offset, FRAME_POINTER_REGNUM);
4308 offset += UNITS_PER_WORD;
4309 }
4310 }
4311 else
4312 {
4313 offset = local_fsize - actual_fsize;
4314
4315 /* If the current function calls __builtin_eh_return, then we need
4316 to restore the saved EH data registers. */
4317 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4318 {
4319 unsigned int i, regno;
4320
4321 for (i = 0; ; ++i)
4322 {
4323 regno = EH_RETURN_DATA_REGNO (i);
4324 if (regno == INVALID_REGNUM)
4325 break;
4326
4327 /* Only for the first load.
4328 merge_sp_adjust_with_load holds the register load
4329 with which we will merge the sp adjustment. */
4330 if (merge_sp_adjust_with_load == 0
4331 && local_fsize == 0
4332 && VAL_14_BITS_P (-actual_fsize))
4333 merge_sp_adjust_with_load = regno;
4334 else
4335 load_reg (regno, offset, STACK_POINTER_REGNUM);
4336 offset += UNITS_PER_WORD;
4337 }
4338 }
4339
4340 for (i = 18; i >= 3; i--)
4341 {
4342 if (regs_ever_live[i] && ! call_used_regs[i])
4343 {
4344 /* Only for the first load.
4345 merge_sp_adjust_with_load holds the register load
4346 with which we will merge the sp adjustment. */
4347 if (merge_sp_adjust_with_load == 0
4348 && local_fsize == 0
4349 && VAL_14_BITS_P (-actual_fsize))
4350 merge_sp_adjust_with_load = i;
4351 else
4352 load_reg (i, offset, STACK_POINTER_REGNUM);
4353 offset += UNITS_PER_WORD;
4354 }
4355 }
4356 }
4357
4358 /* Align pointer properly (doubleword boundary). */
4359 offset = (offset + 7) & ~7;
4360
4361 /* FP register restores. */
4362 if (save_fregs)
4363 {
4364 /* Adjust the register to index off of. */
4365 if (frame_pointer_needed)
4366 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4367 else
4368 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4369
4370 /* Actually do the restores now. */
4371 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4372 if (regs_ever_live[i]
4373 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4374 {
4375 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4376 rtx dest = gen_rtx_REG (DFmode, i);
4377 emit_move_insn (dest, src);
4378 }
4379 }
4380
4381 /* Emit a blockage insn here to keep these insns from being moved to
4382 an earlier spot in the epilogue, or into the main instruction stream.
4383
4384 This is necessary as we must not cut the stack back before all the
4385 restores are finished. */
4386 emit_insn (gen_blockage ());
4387
4388 /* Reset stack pointer (and possibly frame pointer). The stack
4389 pointer is initially set to fp + 64 to avoid a race condition. */
4390 if (frame_pointer_needed)
4391 {
4392 rtx delta = GEN_INT (-64);
4393
4394 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4395 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4396 }
4397 /* If we were deferring a callee register restore, do it now. */
4398 else if (merge_sp_adjust_with_load)
4399 {
4400 rtx delta = GEN_INT (-actual_fsize);
4401 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4402
4403 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4404 }
4405 else if (actual_fsize != 0)
4406 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4407 - actual_fsize, 0);
4408
4409 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4410 frame greater than 8k), do so now. */
4411 if (ret_off != 0)
4412 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4413
4414 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4415 {
4416 rtx sa = EH_RETURN_STACKADJ_RTX;
4417
4418 emit_insn (gen_blockage ());
4419 emit_insn (TARGET_64BIT
4420 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4421 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4422 }
4423 }
4424
4425 rtx
4426 hppa_pic_save_rtx (void)
4427 {
4428 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4429 }
4430
4431 void
4432 hppa_profile_hook (int label_no)
4433 {
4434 /* We use SImode for the address of the function in both 32 and
4435 64-bit code to avoid having to provide DImode versions of the
4436 lcla2 and load_offset_label_address insn patterns. */
4437 rtx reg = gen_reg_rtx (SImode);
4438 rtx label_rtx = gen_label_rtx ();
4439 rtx begin_label_rtx, call_insn;
4440 char begin_label_name[16];
4441
4442 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4443 label_no);
4444 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4445
4446 if (TARGET_64BIT)
4447 emit_move_insn (arg_pointer_rtx,
4448 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4449 GEN_INT (64)));
4450
4451 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4452
4453 /* The address of the function is loaded into %r25 with a instruction-
4454 relative sequence that avoids the use of relocations. The sequence
4455 is split so that the load_offset_label_address instruction can
4456 occupy the delay slot of the call to _mcount. */
4457 if (TARGET_PA_20)
4458 emit_insn (gen_lcla2 (reg, label_rtx));
4459 else
4460 emit_insn (gen_lcla1 (reg, label_rtx));
4461
4462 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4463 reg, begin_label_rtx, label_rtx));
4464
4465 #ifndef NO_PROFILE_COUNTERS
4466 {
4467 rtx count_label_rtx, addr, r24;
4468 char count_label_name[16];
4469
4470 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4471 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4472
4473 addr = force_reg (Pmode, count_label_rtx);
4474 r24 = gen_rtx_REG (Pmode, 24);
4475 emit_move_insn (r24, addr);
4476
4477 call_insn =
4478 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4479 gen_rtx_SYMBOL_REF (Pmode,
4480 "_mcount")),
4481 GEN_INT (TARGET_64BIT ? 24 : 12)));
4482
4483 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4484 }
4485 #else
4486
4487 call_insn =
4488 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4489 gen_rtx_SYMBOL_REF (Pmode,
4490 "_mcount")),
4491 GEN_INT (TARGET_64BIT ? 16 : 8)));
4492
4493 #endif
4494
4495 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4496 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4497
4498 /* Indicate the _mcount call cannot throw, nor will it execute a
4499 non-local goto. */
4500 REG_NOTES (call_insn)
4501 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4502 }
4503
4504 /* Fetch the return address for the frame COUNT steps up from
4505 the current frame, after the prologue. FRAMEADDR is the
4506 frame pointer of the COUNT frame.
4507
4508 We want to ignore any export stub remnants here. To handle this,
4509 we examine the code at the return address, and if it is an export
4510 stub, we return a memory rtx for the stub return address stored
4511 at frame-24.
4512
4513 The value returned is used in two different ways:
4514
4515 1. To find a function's caller.
4516
4517 2. To change the return address for a function.
4518
4519 This function handles most instances of case 1; however, it will
4520 fail if there are two levels of stubs to execute on the return
4521 path. The only way I believe that can happen is if the return value
4522 needs a parameter relocation, which never happens for C code.
4523
4524 This function handles most instances of case 2; however, it will
4525 fail if we did not originally have stub code on the return path
4526 but will need stub code on the new return path. This can happen if
4527 the caller & callee are both in the main program, but the new
4528 return location is in a shared library. */
4529
4530 rtx
4531 return_addr_rtx (int count, rtx frameaddr)
4532 {
4533 rtx label;
4534 rtx rp;
4535 rtx saved_rp;
4536 rtx ins;
4537
4538 if (count != 0)
4539 return NULL_RTX;
4540
4541 rp = get_hard_reg_initial_val (Pmode, 2);
4542
4543 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4544 return rp;
4545
4546 saved_rp = gen_reg_rtx (Pmode);
4547 emit_move_insn (saved_rp, rp);
4548
4549 /* Get pointer to the instruction stream. We have to mask out the
4550 privilege level from the two low order bits of the return address
4551 pointer here so that ins will point to the start of the first
4552 instruction that would have been executed if we returned. */
4553 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4554 label = gen_label_rtx ();
4555
4556 /* Check the instruction stream at the normal return address for the
4557 export stub:
4558
4559 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4560 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4561 0x00011820 | stub+16: mtsp r1,sr0
4562 0xe0400002 | stub+20: be,n 0(sr0,rp)
4563
4564 If it is an export stub, than our return address is really in
4565 -24[frameaddr]. */
4566
4567 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4568 NULL_RTX, SImode, 1);
4569 emit_jump_insn (gen_bne (label));
4570
4571 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4572 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4573 emit_jump_insn (gen_bne (label));
4574
4575 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4576 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4577 emit_jump_insn (gen_bne (label));
4578
4579 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4580 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4581
4582 /* If there is no export stub then just use the value saved from
4583 the return pointer register. */
4584
4585 emit_jump_insn (gen_bne (label));
4586
4587 /* Here we know that our return address points to an export
4588 stub. We don't want to return the address of the export stub,
4589 but rather the return address of the export stub. That return
4590 address is stored at -24[frameaddr]. */
4591
4592 emit_move_insn (saved_rp,
4593 gen_rtx_MEM (Pmode,
4594 memory_address (Pmode,
4595 plus_constant (frameaddr,
4596 -24))));
4597
4598 emit_label (label);
4599 return saved_rp;
4600 }
4601
4602 /* This is only valid once reload has completed because it depends on
4603 knowing exactly how much (if any) frame there is and...
4604
4605 It's only valid if there is no frame marker to de-allocate and...
4606
4607 It's only valid if %r2 hasn't been saved into the caller's frame
4608 (we're not profiling and %r2 isn't live anywhere). */
4609 int
4610 hppa_can_use_return_insn_p (void)
4611 {
4612 return (reload_completed
4613 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4614 && ! regs_ever_live[2]
4615 && ! frame_pointer_needed);
4616 }
4617
4618 void
4619 emit_bcond_fp (enum rtx_code code, rtx operand0)
4620 {
4621 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4622 gen_rtx_IF_THEN_ELSE (VOIDmode,
4623 gen_rtx_fmt_ee (code,
4624 VOIDmode,
4625 gen_rtx_REG (CCFPmode, 0),
4626 const0_rtx),
4627 gen_rtx_LABEL_REF (VOIDmode, operand0),
4628 pc_rtx)));
4629
4630 }
4631
4632 rtx
4633 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4634 {
4635 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4636 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4637 }
4638
4639 /* Adjust the cost of a scheduling dependency. Return the new cost of
4640 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4641
4642 static int
4643 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4644 {
4645 enum attr_type attr_type;
4646
4647 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4648 true dependencies as they are described with bypasses now. */
4649 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4650 return cost;
4651
4652 if (! recog_memoized (insn))
4653 return 0;
4654
4655 attr_type = get_attr_type (insn);
4656
4657 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
4658 {
4659 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4660 cycles later. */
4661
4662 if (attr_type == TYPE_FPLOAD)
4663 {
4664 rtx pat = PATTERN (insn);
4665 rtx dep_pat = PATTERN (dep_insn);
4666 if (GET_CODE (pat) == PARALLEL)
4667 {
4668 /* This happens for the fldXs,mb patterns. */
4669 pat = XVECEXP (pat, 0, 0);
4670 }
4671 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4672 /* If this happens, we have to extend this to schedule
4673 optimally. Return 0 for now. */
4674 return 0;
4675
4676 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4677 {
4678 if (! recog_memoized (dep_insn))
4679 return 0;
4680 switch (get_attr_type (dep_insn))
4681 {
4682 case TYPE_FPALU:
4683 case TYPE_FPMULSGL:
4684 case TYPE_FPMULDBL:
4685 case TYPE_FPDIVSGL:
4686 case TYPE_FPDIVDBL:
4687 case TYPE_FPSQRTSGL:
4688 case TYPE_FPSQRTDBL:
4689 /* A fpload can't be issued until one cycle before a
4690 preceding arithmetic operation has finished if
4691 the target of the fpload is any of the sources
4692 (or destination) of the arithmetic operation. */
4693 return insn_default_latency (dep_insn) - 1;
4694
4695 default:
4696 return 0;
4697 }
4698 }
4699 }
4700 else if (attr_type == TYPE_FPALU)
4701 {
4702 rtx pat = PATTERN (insn);
4703 rtx dep_pat = PATTERN (dep_insn);
4704 if (GET_CODE (pat) == PARALLEL)
4705 {
4706 /* This happens for the fldXs,mb patterns. */
4707 pat = XVECEXP (pat, 0, 0);
4708 }
4709 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4710 /* If this happens, we have to extend this to schedule
4711 optimally. Return 0 for now. */
4712 return 0;
4713
4714 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4715 {
4716 if (! recog_memoized (dep_insn))
4717 return 0;
4718 switch (get_attr_type (dep_insn))
4719 {
4720 case TYPE_FPDIVSGL:
4721 case TYPE_FPDIVDBL:
4722 case TYPE_FPSQRTSGL:
4723 case TYPE_FPSQRTDBL:
4724 /* An ALU flop can't be issued until two cycles before a
4725 preceding divide or sqrt operation has finished if
4726 the target of the ALU flop is any of the sources
4727 (or destination) of the divide or sqrt operation. */
4728 return insn_default_latency (dep_insn) - 2;
4729
4730 default:
4731 return 0;
4732 }
4733 }
4734 }
4735
4736 /* For other anti dependencies, the cost is 0. */
4737 return 0;
4738 }
4739 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4740 {
4741 /* Output dependency; DEP_INSN writes a register that INSN writes some
4742 cycles later. */
4743 if (attr_type == TYPE_FPLOAD)
4744 {
4745 rtx pat = PATTERN (insn);
4746 rtx dep_pat = PATTERN (dep_insn);
4747 if (GET_CODE (pat) == PARALLEL)
4748 {
4749 /* This happens for the fldXs,mb patterns. */
4750 pat = XVECEXP (pat, 0, 0);
4751 }
4752 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4753 /* If this happens, we have to extend this to schedule
4754 optimally. Return 0 for now. */
4755 return 0;
4756
4757 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4758 {
4759 if (! recog_memoized (dep_insn))
4760 return 0;
4761 switch (get_attr_type (dep_insn))
4762 {
4763 case TYPE_FPALU:
4764 case TYPE_FPMULSGL:
4765 case TYPE_FPMULDBL:
4766 case TYPE_FPDIVSGL:
4767 case TYPE_FPDIVDBL:
4768 case TYPE_FPSQRTSGL:
4769 case TYPE_FPSQRTDBL:
4770 /* A fpload can't be issued until one cycle before a
4771 preceding arithmetic operation has finished if
4772 the target of the fpload is the destination of the
4773 arithmetic operation.
4774
4775 Exception: For PA7100LC, PA7200 and PA7300, the cost
4776 is 3 cycles, unless they bundle together. We also
4777 pay the penalty if the second insn is a fpload. */
4778 return insn_default_latency (dep_insn) - 1;
4779
4780 default:
4781 return 0;
4782 }
4783 }
4784 }
4785 else if (attr_type == TYPE_FPALU)
4786 {
4787 rtx pat = PATTERN (insn);
4788 rtx dep_pat = PATTERN (dep_insn);
4789 if (GET_CODE (pat) == PARALLEL)
4790 {
4791 /* This happens for the fldXs,mb patterns. */
4792 pat = XVECEXP (pat, 0, 0);
4793 }
4794 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4795 /* If this happens, we have to extend this to schedule
4796 optimally. Return 0 for now. */
4797 return 0;
4798
4799 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4800 {
4801 if (! recog_memoized (dep_insn))
4802 return 0;
4803 switch (get_attr_type (dep_insn))
4804 {
4805 case TYPE_FPDIVSGL:
4806 case TYPE_FPDIVDBL:
4807 case TYPE_FPSQRTSGL:
4808 case TYPE_FPSQRTDBL:
4809 /* An ALU flop can't be issued until two cycles before a
4810 preceding divide or sqrt operation has finished if
4811 the target of the ALU flop is also the target of
4812 the divide or sqrt operation. */
4813 return insn_default_latency (dep_insn) - 2;
4814
4815 default:
4816 return 0;
4817 }
4818 }
4819 }
4820
4821 /* For other output dependencies, the cost is 0. */
4822 return 0;
4823 }
4824 else
4825 abort ();
4826 }
4827
4828 /* Adjust scheduling priorities. We use this to try and keep addil
4829 and the next use of %r1 close together. */
4830 static int
4831 pa_adjust_priority (rtx insn, int priority)
4832 {
4833 rtx set = single_set (insn);
4834 rtx src, dest;
4835 if (set)
4836 {
4837 src = SET_SRC (set);
4838 dest = SET_DEST (set);
4839 if (GET_CODE (src) == LO_SUM
4840 && symbolic_operand (XEXP (src, 1), VOIDmode)
4841 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4842 priority >>= 3;
4843
4844 else if (GET_CODE (src) == MEM
4845 && GET_CODE (XEXP (src, 0)) == LO_SUM
4846 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4847 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4848 priority >>= 1;
4849
4850 else if (GET_CODE (dest) == MEM
4851 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4852 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4853 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4854 priority >>= 3;
4855 }
4856 return priority;
4857 }
4858
4859 /* The 700 can only issue a single insn at a time.
4860 The 7XXX processors can issue two insns at a time.
4861 The 8000 can issue 4 insns at a time. */
4862 static int
4863 pa_issue_rate (void)
4864 {
4865 switch (pa_cpu)
4866 {
4867 case PROCESSOR_700: return 1;
4868 case PROCESSOR_7100: return 2;
4869 case PROCESSOR_7100LC: return 2;
4870 case PROCESSOR_7200: return 2;
4871 case PROCESSOR_7300: return 2;
4872 case PROCESSOR_8000: return 4;
4873
4874 default:
4875 abort ();
4876 }
4877 }
4878
4879
4880
4881 /* Return any length adjustment needed by INSN which already has its length
4882 computed as LENGTH. Return zero if no adjustment is necessary.
4883
4884 For the PA: function calls, millicode calls, and backwards short
4885 conditional branches with unfilled delay slots need an adjustment by +1
4886 (to account for the NOP which will be inserted into the instruction stream).
4887
4888 Also compute the length of an inline block move here as it is too
4889 complicated to express as a length attribute in pa.md. */
4890 int
4891 pa_adjust_insn_length (rtx insn, int length)
4892 {
4893 rtx pat = PATTERN (insn);
4894
4895 /* Jumps inside switch tables which have unfilled delay slots need
4896 adjustment. */
4897 if (GET_CODE (insn) == JUMP_INSN
4898 && GET_CODE (pat) == PARALLEL
4899 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4900 return 4;
4901 /* Millicode insn with an unfilled delay slot. */
4902 else if (GET_CODE (insn) == INSN
4903 && GET_CODE (pat) != SEQUENCE
4904 && GET_CODE (pat) != USE
4905 && GET_CODE (pat) != CLOBBER
4906 && get_attr_type (insn) == TYPE_MILLI)
4907 return 4;
4908 /* Block move pattern. */
4909 else if (GET_CODE (insn) == INSN
4910 && GET_CODE (pat) == PARALLEL
4911 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4912 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4913 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4914 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4915 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4916 return compute_movmem_length (insn) - 4;
4917 /* Block clear pattern. */
4918 else if (GET_CODE (insn) == INSN
4919 && GET_CODE (pat) == PARALLEL
4920 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4921 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4922 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4923 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4924 return compute_clrmem_length (insn) - 4;
4925 /* Conditional branch with an unfilled delay slot. */
4926 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4927 {
4928 /* Adjust a short backwards conditional with an unfilled delay slot. */
4929 if (GET_CODE (pat) == SET
4930 && length == 4
4931 && ! forward_branch_p (insn))
4932 return 4;
4933 else if (GET_CODE (pat) == PARALLEL
4934 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4935 && length == 4)
4936 return 4;
4937 /* Adjust dbra insn with short backwards conditional branch with
4938 unfilled delay slot -- only for case where counter is in a
4939 general register register. */
4940 else if (GET_CODE (pat) == PARALLEL
4941 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4942 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4943 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4944 && length == 4
4945 && ! forward_branch_p (insn))
4946 return 4;
4947 else
4948 return 0;
4949 }
4950 return 0;
4951 }
4952
4953 /* Print operand X (an rtx) in assembler syntax to file FILE.
4954 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4955 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4956
4957 void
4958 print_operand (FILE *file, rtx x, int code)
4959 {
4960 switch (code)
4961 {
4962 case '#':
4963 /* Output a 'nop' if there's nothing for the delay slot. */
4964 if (dbr_sequence_length () == 0)
4965 fputs ("\n\tnop", file);
4966 return;
4967 case '*':
4968 /* Output a nullification completer if there's nothing for the */
4969 /* delay slot or nullification is requested. */
4970 if (dbr_sequence_length () == 0 ||
4971 (final_sequence &&
4972 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4973 fputs (",n", file);
4974 return;
4975 case 'R':
4976 /* Print out the second register name of a register pair.
4977 I.e., R (6) => 7. */
4978 fputs (reg_names[REGNO (x) + 1], file);
4979 return;
4980 case 'r':
4981 /* A register or zero. */
4982 if (x == const0_rtx
4983 || (x == CONST0_RTX (DFmode))
4984 || (x == CONST0_RTX (SFmode)))
4985 {
4986 fputs ("%r0", file);
4987 return;
4988 }
4989 else
4990 break;
4991 case 'f':
4992 /* A register or zero (floating point). */
4993 if (x == const0_rtx
4994 || (x == CONST0_RTX (DFmode))
4995 || (x == CONST0_RTX (SFmode)))
4996 {
4997 fputs ("%fr0", file);
4998 return;
4999 }
5000 else
5001 break;
5002 case 'A':
5003 {
5004 rtx xoperands[2];
5005
5006 xoperands[0] = XEXP (XEXP (x, 0), 0);
5007 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5008 output_global_address (file, xoperands[1], 0);
5009 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5010 return;
5011 }
5012
5013 case 'C': /* Plain (C)ondition */
5014 case 'X':
5015 switch (GET_CODE (x))
5016 {
5017 case EQ:
5018 fputs ("=", file); break;
5019 case NE:
5020 fputs ("<>", file); break;
5021 case GT:
5022 fputs (">", file); break;
5023 case GE:
5024 fputs (">=", file); break;
5025 case GEU:
5026 fputs (">>=", file); break;
5027 case GTU:
5028 fputs (">>", file); break;
5029 case LT:
5030 fputs ("<", file); break;
5031 case LE:
5032 fputs ("<=", file); break;
5033 case LEU:
5034 fputs ("<<=", file); break;
5035 case LTU:
5036 fputs ("<<", file); break;
5037 default:
5038 abort ();
5039 }
5040 return;
5041 case 'N': /* Condition, (N)egated */
5042 switch (GET_CODE (x))
5043 {
5044 case EQ:
5045 fputs ("<>", file); break;
5046 case NE:
5047 fputs ("=", file); break;
5048 case GT:
5049 fputs ("<=", file); break;
5050 case GE:
5051 fputs ("<", file); break;
5052 case GEU:
5053 fputs ("<<", file); break;
5054 case GTU:
5055 fputs ("<<=", file); break;
5056 case LT:
5057 fputs (">=", file); break;
5058 case LE:
5059 fputs (">", file); break;
5060 case LEU:
5061 fputs (">>", file); break;
5062 case LTU:
5063 fputs (">>=", file); break;
5064 default:
5065 abort ();
5066 }
5067 return;
5068 /* For floating point comparisons. Note that the output
5069 predicates are the complement of the desired mode. */
5070 case 'Y':
5071 switch (GET_CODE (x))
5072 {
5073 case EQ:
5074 fputs ("!=", file); break;
5075 case NE:
5076 fputs ("=", file); break;
5077 case GT:
5078 fputs ("!>", file); break;
5079 case GE:
5080 fputs ("!>=", file); break;
5081 case LT:
5082 fputs ("!<", file); break;
5083 case LE:
5084 fputs ("!<=", file); break;
5085 case LTGT:
5086 fputs ("!<>", file); break;
5087 case UNLE:
5088 fputs (">", file); break;
5089 case UNLT:
5090 fputs (">=", file); break;
5091 case UNGE:
5092 fputs ("<", file); break;
5093 case UNGT:
5094 fputs ("<=", file); break;
5095 case UNEQ:
5096 fputs ("<>", file); break;
5097 case UNORDERED:
5098 fputs ("<=>", file); break;
5099 case ORDERED:
5100 fputs ("!<=>", file); break;
5101 default:
5102 abort ();
5103 }
5104 return;
5105 case 'S': /* Condition, operands are (S)wapped. */
5106 switch (GET_CODE (x))
5107 {
5108 case EQ:
5109 fputs ("=", file); break;
5110 case NE:
5111 fputs ("<>", file); break;
5112 case GT:
5113 fputs ("<", file); break;
5114 case GE:
5115 fputs ("<=", file); break;
5116 case GEU:
5117 fputs ("<<=", file); break;
5118 case GTU:
5119 fputs ("<<", file); break;
5120 case LT:
5121 fputs (">", file); break;
5122 case LE:
5123 fputs (">=", file); break;
5124 case LEU:
5125 fputs (">>=", file); break;
5126 case LTU:
5127 fputs (">>", file); break;
5128 default:
5129 abort ();
5130 }
5131 return;
5132 case 'B': /* Condition, (B)oth swapped and negate. */
5133 switch (GET_CODE (x))
5134 {
5135 case EQ:
5136 fputs ("<>", file); break;
5137 case NE:
5138 fputs ("=", file); break;
5139 case GT:
5140 fputs (">=", file); break;
5141 case GE:
5142 fputs (">", file); break;
5143 case GEU:
5144 fputs (">>", file); break;
5145 case GTU:
5146 fputs (">>=", file); break;
5147 case LT:
5148 fputs ("<=", file); break;
5149 case LE:
5150 fputs ("<", file); break;
5151 case LEU:
5152 fputs ("<<", file); break;
5153 case LTU:
5154 fputs ("<<=", file); break;
5155 default:
5156 abort ();
5157 }
5158 return;
5159 case 'k':
5160 if (GET_CODE (x) == CONST_INT)
5161 {
5162 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5163 return;
5164 }
5165 abort ();
5166 case 'Q':
5167 if (GET_CODE (x) == CONST_INT)
5168 {
5169 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5170 return;
5171 }
5172 abort ();
5173 case 'L':
5174 if (GET_CODE (x) == CONST_INT)
5175 {
5176 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5177 return;
5178 }
5179 abort ();
5180 case 'O':
5181 if (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0)
5182 {
5183 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5184 return;
5185 }
5186 abort ();
5187 case 'p':
5188 if (GET_CODE (x) == CONST_INT)
5189 {
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5191 return;
5192 }
5193 abort ();
5194 case 'P':
5195 if (GET_CODE (x) == CONST_INT)
5196 {
5197 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5198 return;
5199 }
5200 abort ();
5201 case 'I':
5202 if (GET_CODE (x) == CONST_INT)
5203 fputs ("i", file);
5204 return;
5205 case 'M':
5206 case 'F':
5207 switch (GET_CODE (XEXP (x, 0)))
5208 {
5209 case PRE_DEC:
5210 case PRE_INC:
5211 if (ASSEMBLER_DIALECT == 0)
5212 fputs ("s,mb", file);
5213 else
5214 fputs (",mb", file);
5215 break;
5216 case POST_DEC:
5217 case POST_INC:
5218 if (ASSEMBLER_DIALECT == 0)
5219 fputs ("s,ma", file);
5220 else
5221 fputs (",ma", file);
5222 break;
5223 case PLUS:
5224 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5225 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5226 {
5227 if (ASSEMBLER_DIALECT == 0)
5228 fputs ("x", file);
5229 }
5230 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5231 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5232 {
5233 if (ASSEMBLER_DIALECT == 0)
5234 fputs ("x,s", file);
5235 else
5236 fputs (",s", file);
5237 }
5238 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5239 fputs ("s", file);
5240 break;
5241 default:
5242 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5243 fputs ("s", file);
5244 break;
5245 }
5246 return;
5247 case 'G':
5248 output_global_address (file, x, 0);
5249 return;
5250 case 'H':
5251 output_global_address (file, x, 1);
5252 return;
5253 case 0: /* Don't do anything special */
5254 break;
5255 case 'Z':
5256 {
5257 unsigned op[3];
5258 compute_zdepwi_operands (INTVAL (x), op);
5259 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5260 return;
5261 }
5262 case 'z':
5263 {
5264 unsigned op[3];
5265 compute_zdepdi_operands (INTVAL (x), op);
5266 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5267 return;
5268 }
5269 case 'c':
5270 /* We can get here from a .vtable_inherit due to our
5271 CONSTANT_ADDRESS_P rejecting perfectly good constant
5272 addresses. */
5273 break;
5274 default:
5275 abort ();
5276 }
5277 if (GET_CODE (x) == REG)
5278 {
5279 fputs (reg_names [REGNO (x)], file);
5280 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5281 {
5282 fputs ("R", file);
5283 return;
5284 }
5285 if (FP_REG_P (x)
5286 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5287 && (REGNO (x) & 1) == 0)
5288 fputs ("L", file);
5289 }
5290 else if (GET_CODE (x) == MEM)
5291 {
5292 int size = GET_MODE_SIZE (GET_MODE (x));
5293 rtx base = NULL_RTX;
5294 switch (GET_CODE (XEXP (x, 0)))
5295 {
5296 case PRE_DEC:
5297 case POST_DEC:
5298 base = XEXP (XEXP (x, 0), 0);
5299 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5300 break;
5301 case PRE_INC:
5302 case POST_INC:
5303 base = XEXP (XEXP (x, 0), 0);
5304 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5305 break;
5306 case PLUS:
5307 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5308 fprintf (file, "%s(%s)",
5309 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5310 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5311 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5312 fprintf (file, "%s(%s)",
5313 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5314 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5315 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5316 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5317 {
5318 /* Because the REG_POINTER flag can get lost during reload,
5319 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5320 index and base registers in the combined move patterns. */
5321 rtx base = XEXP (XEXP (x, 0), 1);
5322 rtx index = XEXP (XEXP (x, 0), 0);
5323
5324 fprintf (file, "%s(%s)",
5325 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5326 }
5327 else
5328 output_address (XEXP (x, 0));
5329 break;
5330 default:
5331 output_address (XEXP (x, 0));
5332 break;
5333 }
5334 }
5335 else
5336 output_addr_const (file, x);
5337 }
5338
5339 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5340
5341 void
5342 output_global_address (FILE *file, rtx x, int round_constant)
5343 {
5344
5345 /* Imagine (high (const (plus ...))). */
5346 if (GET_CODE (x) == HIGH)
5347 x = XEXP (x, 0);
5348
5349 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5350 assemble_name (file, XSTR (x, 0));
5351 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5352 {
5353 assemble_name (file, XSTR (x, 0));
5354 fputs ("-$global$", file);
5355 }
5356 else if (GET_CODE (x) == CONST)
5357 {
5358 const char *sep = "";
5359 int offset = 0; /* assembler wants -$global$ at end */
5360 rtx base = NULL_RTX;
5361
5362 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
5363 {
5364 base = XEXP (XEXP (x, 0), 0);
5365 output_addr_const (file, base);
5366 }
5367 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == CONST_INT)
5368 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5369 else abort ();
5370
5371 if (GET_CODE (XEXP (XEXP (x, 0), 1)) == SYMBOL_REF)
5372 {
5373 base = XEXP (XEXP (x, 0), 1);
5374 output_addr_const (file, base);
5375 }
5376 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5377 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5378 else abort ();
5379
5380 /* How bogus. The compiler is apparently responsible for
5381 rounding the constant if it uses an LR field selector.
5382
5383 The linker and/or assembler seem a better place since
5384 they have to do this kind of thing already.
5385
5386 If we fail to do this, HP's optimizing linker may eliminate
5387 an addil, but not update the ldw/stw/ldo instruction that
5388 uses the result of the addil. */
5389 if (round_constant)
5390 offset = ((offset + 0x1000) & ~0x1fff);
5391
5392 if (GET_CODE (XEXP (x, 0)) == PLUS)
5393 {
5394 if (offset < 0)
5395 {
5396 offset = -offset;
5397 sep = "-";
5398 }
5399 else
5400 sep = "+";
5401 }
5402 else if (GET_CODE (XEXP (x, 0)) == MINUS
5403 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
5404 sep = "-";
5405 else abort ();
5406
5407 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5408 fputs ("-$global$", file);
5409 if (offset)
5410 fprintf (file, "%s%d", sep, offset);
5411 }
5412 else
5413 output_addr_const (file, x);
5414 }
5415
5416 /* Output boilerplate text to appear at the beginning of the file.
5417 There are several possible versions. */
5418 #define aputs(x) fputs(x, asm_out_file)
5419 static inline void
5420 pa_file_start_level (void)
5421 {
5422 if (TARGET_64BIT)
5423 aputs ("\t.LEVEL 2.0w\n");
5424 else if (TARGET_PA_20)
5425 aputs ("\t.LEVEL 2.0\n");
5426 else if (TARGET_PA_11)
5427 aputs ("\t.LEVEL 1.1\n");
5428 else
5429 aputs ("\t.LEVEL 1.0\n");
5430 }
5431
5432 static inline void
5433 pa_file_start_space (int sortspace)
5434 {
5435 aputs ("\t.SPACE $PRIVATE$");
5436 if (sortspace)
5437 aputs (",SORT=16");
5438 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5439 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5440 "\n\t.SPACE $TEXT$");
5441 if (sortspace)
5442 aputs (",SORT=8");
5443 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5444 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5445 }
5446
5447 static inline void
5448 pa_file_start_file (int want_version)
5449 {
5450 if (write_symbols != NO_DEBUG)
5451 {
5452 output_file_directive (asm_out_file, main_input_filename);
5453 if (want_version)
5454 aputs ("\t.version\t\"01.01\"\n");
5455 }
5456 }
5457
5458 static inline void
5459 pa_file_start_mcount (const char *aswhat)
5460 {
5461 if (profile_flag)
5462 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5463 }
5464
5465 static void
5466 pa_elf_file_start (void)
5467 {
5468 pa_file_start_level ();
5469 pa_file_start_mcount ("ENTRY");
5470 pa_file_start_file (0);
5471 }
5472
5473 static void
5474 pa_som_file_start (void)
5475 {
5476 pa_file_start_level ();
5477 pa_file_start_space (0);
5478 aputs ("\t.IMPORT $global$,DATA\n"
5479 "\t.IMPORT $$dyncall,MILLICODE\n");
5480 pa_file_start_mcount ("CODE");
5481 pa_file_start_file (0);
5482 }
5483
5484 static void
5485 pa_linux_file_start (void)
5486 {
5487 pa_file_start_file (1);
5488 pa_file_start_level ();
5489 pa_file_start_mcount ("CODE");
5490 }
5491
5492 static void
5493 pa_hpux64_gas_file_start (void)
5494 {
5495 pa_file_start_level ();
5496 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5497 if (profile_flag)
5498 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5499 #endif
5500 pa_file_start_file (1);
5501 }
5502
5503 static void
5504 pa_hpux64_hpas_file_start (void)
5505 {
5506 pa_file_start_level ();
5507 pa_file_start_space (1);
5508 pa_file_start_mcount ("CODE");
5509 pa_file_start_file (0);
5510 }
5511 #undef aputs
5512
5513 static struct deferred_plabel *
5514 get_plabel (const char *fname)
5515 {
5516 size_t i;
5517
5518 /* See if we have already put this function on the list of deferred
5519 plabels. This list is generally small, so a liner search is not
5520 too ugly. If it proves too slow replace it with something faster. */
5521 for (i = 0; i < n_deferred_plabels; i++)
5522 if (strcmp (fname, deferred_plabels[i].name) == 0)
5523 break;
5524
5525 /* If the deferred plabel list is empty, or this entry was not found
5526 on the list, create a new entry on the list. */
5527 if (deferred_plabels == NULL || i == n_deferred_plabels)
5528 {
5529 const char *real_name;
5530
5531 if (deferred_plabels == 0)
5532 deferred_plabels = (struct deferred_plabel *)
5533 ggc_alloc (sizeof (struct deferred_plabel));
5534 else
5535 deferred_plabels = (struct deferred_plabel *)
5536 ggc_realloc (deferred_plabels,
5537 ((n_deferred_plabels + 1)
5538 * sizeof (struct deferred_plabel)));
5539
5540 i = n_deferred_plabels++;
5541 deferred_plabels[i].internal_label = gen_label_rtx ();
5542 deferred_plabels[i].name = ggc_strdup (fname);
5543
5544 /* Gross. We have just implicitly taken the address of this function,
5545 mark it as such. */
5546 real_name = (*targetm.strip_name_encoding) (fname);
5547 TREE_SYMBOL_REFERENCED (get_identifier (real_name)) = 1;
5548 }
5549
5550 return &deferred_plabels[i];
5551 }
5552
5553 static void
5554 output_deferred_plabels (void)
5555 {
5556 size_t i;
5557 /* If we have deferred plabels, then we need to switch into the data
5558 section and align it to a 4 byte boundary before we output the
5559 deferred plabels. */
5560 if (n_deferred_plabels)
5561 {
5562 data_section ();
5563 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5564 }
5565
5566 /* Now output the deferred plabels. */
5567 for (i = 0; i < n_deferred_plabels; i++)
5568 {
5569 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5570 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5571 assemble_integer (gen_rtx_SYMBOL_REF (Pmode, deferred_plabels[i].name),
5572 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5573 }
5574 }
5575
5576 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5577 /* Initialize optabs to point to HPUX long double emulation routines. */
5578 static void
5579 pa_hpux_init_libfuncs (void)
5580 {
5581 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5582 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5583 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5584 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5585 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5586 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5587 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5588 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5589 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5590
5591 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5592 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5593 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5594 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5595 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5596 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5597 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5598
5599 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5600 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5601 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5602 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5603
5604 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5605 ? "__U_Qfcnvfxt_quad_to_sgl"
5606 : "_U_Qfcnvfxt_quad_to_sgl");
5607 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5608 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5609 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5610
5611 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5612 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5613 }
5614 #endif
5615
5616 /* HP's millicode routines mean something special to the assembler.
5617 Keep track of which ones we have used. */
5618
5619 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5620 static void import_milli (enum millicodes);
5621 static char imported[(int) end1000];
5622 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5623 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5624 #define MILLI_START 10
5625
5626 static void
5627 import_milli (enum millicodes code)
5628 {
5629 char str[sizeof (import_string)];
5630
5631 if (!imported[(int) code])
5632 {
5633 imported[(int) code] = 1;
5634 strcpy (str, import_string);
5635 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5636 output_asm_insn (str, 0);
5637 }
5638 }
5639
5640 /* The register constraints have put the operands and return value in
5641 the proper registers. */
5642
5643 const char *
5644 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5645 {
5646 import_milli (mulI);
5647 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5648 }
5649
5650 /* Emit the rtl for doing a division by a constant. */
5651
5652 /* Do magic division millicodes exist for this value? */
5653 static const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,
5654 1, 1};
5655
5656 /* We'll use an array to keep track of the magic millicodes and
5657 whether or not we've used them already. [n][0] is signed, [n][1] is
5658 unsigned. */
5659
5660 static int div_milli[16][2];
5661
5662 int
5663 div_operand (rtx op, enum machine_mode mode)
5664 {
5665 return (mode == SImode
5666 && ((GET_CODE (op) == REG && REGNO (op) == 25)
5667 || (GET_CODE (op) == CONST_INT && INTVAL (op) > 0
5668 && INTVAL (op) < 16 && magic_milli[INTVAL (op)])));
5669 }
5670
5671 int
5672 emit_hpdiv_const (rtx *operands, int unsignedp)
5673 {
5674 if (GET_CODE (operands[2]) == CONST_INT
5675 && INTVAL (operands[2]) > 0
5676 && INTVAL (operands[2]) < 16
5677 && magic_milli[INTVAL (operands[2])])
5678 {
5679 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5680
5681 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5682 emit
5683 (gen_rtx_PARALLEL
5684 (VOIDmode,
5685 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5686 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5687 SImode,
5688 gen_rtx_REG (SImode, 26),
5689 operands[2])),
5690 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5691 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5692 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5693 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5694 gen_rtx_CLOBBER (VOIDmode, ret))));
5695 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5696 return 1;
5697 }
5698 return 0;
5699 }
5700
5701 const char *
5702 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5703 {
5704 int divisor;
5705
5706 /* If the divisor is a constant, try to use one of the special
5707 opcodes .*/
5708 if (GET_CODE (operands[0]) == CONST_INT)
5709 {
5710 static char buf[100];
5711 divisor = INTVAL (operands[0]);
5712 if (!div_milli[divisor][unsignedp])
5713 {
5714 div_milli[divisor][unsignedp] = 1;
5715 if (unsignedp)
5716 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5717 else
5718 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5719 }
5720 if (unsignedp)
5721 {
5722 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5723 INTVAL (operands[0]));
5724 return output_millicode_call (insn,
5725 gen_rtx_SYMBOL_REF (SImode, buf));
5726 }
5727 else
5728 {
5729 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5730 INTVAL (operands[0]));
5731 return output_millicode_call (insn,
5732 gen_rtx_SYMBOL_REF (SImode, buf));
5733 }
5734 }
5735 /* Divisor isn't a special constant. */
5736 else
5737 {
5738 if (unsignedp)
5739 {
5740 import_milli (divU);
5741 return output_millicode_call (insn,
5742 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5743 }
5744 else
5745 {
5746 import_milli (divI);
5747 return output_millicode_call (insn,
5748 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5749 }
5750 }
5751 }
5752
5753 /* Output a $$rem millicode to do mod. */
5754
5755 const char *
5756 output_mod_insn (int unsignedp, rtx insn)
5757 {
5758 if (unsignedp)
5759 {
5760 import_milli (remU);
5761 return output_millicode_call (insn,
5762 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5763 }
5764 else
5765 {
5766 import_milli (remI);
5767 return output_millicode_call (insn,
5768 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5769 }
5770 }
5771
5772 void
5773 output_arg_descriptor (rtx call_insn)
5774 {
5775 const char *arg_regs[4];
5776 enum machine_mode arg_mode;
5777 rtx link;
5778 int i, output_flag = 0;
5779 int regno;
5780
5781 /* We neither need nor want argument location descriptors for the
5782 64bit runtime environment or the ELF32 environment. */
5783 if (TARGET_64BIT || TARGET_ELF32)
5784 return;
5785
5786 for (i = 0; i < 4; i++)
5787 arg_regs[i] = 0;
5788
5789 /* Specify explicitly that no argument relocations should take place
5790 if using the portable runtime calling conventions. */
5791 if (TARGET_PORTABLE_RUNTIME)
5792 {
5793 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5794 asm_out_file);
5795 return;
5796 }
5797
5798 if (GET_CODE (call_insn) != CALL_INSN)
5799 abort ();
5800 for (link = CALL_INSN_FUNCTION_USAGE (call_insn); link; link = XEXP (link, 1))
5801 {
5802 rtx use = XEXP (link, 0);
5803
5804 if (! (GET_CODE (use) == USE
5805 && GET_CODE (XEXP (use, 0)) == REG
5806 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5807 continue;
5808
5809 arg_mode = GET_MODE (XEXP (use, 0));
5810 regno = REGNO (XEXP (use, 0));
5811 if (regno >= 23 && regno <= 26)
5812 {
5813 arg_regs[26 - regno] = "GR";
5814 if (arg_mode == DImode)
5815 arg_regs[25 - regno] = "GR";
5816 }
5817 else if (regno >= 32 && regno <= 39)
5818 {
5819 if (arg_mode == SFmode)
5820 arg_regs[(regno - 32) / 2] = "FR";
5821 else
5822 {
5823 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5824 arg_regs[(regno - 34) / 2] = "FR";
5825 arg_regs[(regno - 34) / 2 + 1] = "FU";
5826 #else
5827 arg_regs[(regno - 34) / 2] = "FU";
5828 arg_regs[(regno - 34) / 2 + 1] = "FR";
5829 #endif
5830 }
5831 }
5832 }
5833 fputs ("\t.CALL ", asm_out_file);
5834 for (i = 0; i < 4; i++)
5835 {
5836 if (arg_regs[i])
5837 {
5838 if (output_flag++)
5839 fputc (',', asm_out_file);
5840 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5841 }
5842 }
5843 fputc ('\n', asm_out_file);
5844 }
5845 \f
5846 /* Return the class of any secondary reload register that is needed to
5847 move IN into a register in class CLASS using mode MODE.
5848
5849 Profiling has showed this routine and its descendants account for
5850 a significant amount of compile time (~7%). So it has been
5851 optimized to reduce redundant computations and eliminate useless
5852 function calls.
5853
5854 It might be worthwhile to try and make this a leaf function too. */
5855
5856 enum reg_class
5857 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5858 {
5859 int regno, is_symbolic;
5860
5861 /* Trying to load a constant into a FP register during PIC code
5862 generation will require %r1 as a scratch register. */
5863 if (flag_pic
5864 && GET_MODE_CLASS (mode) == MODE_INT
5865 && FP_REG_CLASS_P (class)
5866 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5867 return R1_REGS;
5868
5869 /* Profiling showed the PA port spends about 1.3% of its compilation
5870 time in true_regnum from calls inside secondary_reload_class. */
5871
5872 if (GET_CODE (in) == REG)
5873 {
5874 regno = REGNO (in);
5875 if (regno >= FIRST_PSEUDO_REGISTER)
5876 regno = true_regnum (in);
5877 }
5878 else if (GET_CODE (in) == SUBREG)
5879 regno = true_regnum (in);
5880 else
5881 regno = -1;
5882
5883 /* If we have something like (mem (mem (...)), we can safely assume the
5884 inner MEM will end up in a general register after reloading, so there's
5885 no need for a secondary reload. */
5886 if (GET_CODE (in) == MEM
5887 && GET_CODE (XEXP (in, 0)) == MEM)
5888 return NO_REGS;
5889
5890 /* Handle out of range displacement for integer mode loads/stores of
5891 FP registers. */
5892 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5893 && GET_MODE_CLASS (mode) == MODE_INT
5894 && FP_REG_CLASS_P (class))
5895 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5896 return GENERAL_REGS;
5897
5898 /* A SAR<->FP register copy requires a secondary register (GPR) as
5899 well as secondary memory. */
5900 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5901 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5902 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5903 return GENERAL_REGS;
5904
5905 if (GET_CODE (in) == HIGH)
5906 in = XEXP (in, 0);
5907
5908 /* Profiling has showed GCC spends about 2.6% of its compilation
5909 time in symbolic_operand from calls inside secondary_reload_class.
5910
5911 We use an inline copy and only compute its return value once to avoid
5912 useless work. */
5913 switch (GET_CODE (in))
5914 {
5915 rtx tmp;
5916
5917 case SYMBOL_REF:
5918 case LABEL_REF:
5919 is_symbolic = 1;
5920 break;
5921 case CONST:
5922 tmp = XEXP (in, 0);
5923 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5924 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5925 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5926 break;
5927
5928 default:
5929 is_symbolic = 0;
5930 break;
5931 }
5932
5933 if (!flag_pic
5934 && is_symbolic
5935 && read_only_operand (in, VOIDmode))
5936 return NO_REGS;
5937
5938 if (class != R1_REGS && is_symbolic)
5939 return R1_REGS;
5940
5941 return NO_REGS;
5942 }
5943
5944 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5945 by invisible reference. As a GCC extension, we also pass anything
5946 with a zero or variable size by reference.
5947
5948 The 64-bit runtime does not describe passing any types by invisible
5949 reference. The internals of GCC can't currently handle passing
5950 empty structures, and zero or variable length arrays when they are
5951 not passed entirely on the stack or by reference. Thus, as a GCC
5952 extension, we pass these types by reference. The HP compiler doesn't
5953 support these types, so hopefully there shouldn't be any compatibility
5954 issues. This may have to be revisited when HP releases a C99 compiler
5955 or updates the ABI. */
5956
5957 static bool
5958 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5959 enum machine_mode mode, tree type,
5960 bool named ATTRIBUTE_UNUSED)
5961 {
5962 HOST_WIDE_INT size;
5963
5964 if (type)
5965 size = int_size_in_bytes (type);
5966 else
5967 size = GET_MODE_SIZE (mode);
5968
5969 if (TARGET_64BIT)
5970 return size <= 0;
5971 else
5972 return size <= 0 || size > 8;
5973 }
5974
5975 enum direction
5976 function_arg_padding (enum machine_mode mode, tree type)
5977 {
5978 if (mode == BLKmode
5979 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5980 {
5981 /* Return none if justification is not required. */
5982 if (type
5983 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5984 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5985 return none;
5986
5987 /* The directions set here are ignored when a BLKmode argument larger
5988 than a word is placed in a register. Different code is used for
5989 the stack and registers. This makes it difficult to have a
5990 consistent data representation for both the stack and registers.
5991 For both runtimes, the justification and padding for arguments on
5992 the stack and in registers should be identical. */
5993 if (TARGET_64BIT)
5994 /* The 64-bit runtime specifies left justification for aggregates. */
5995 return upward;
5996 else
5997 /* The 32-bit runtime architecture specifies right justification.
5998 When the argument is passed on the stack, the argument is padded
5999 with garbage on the left. The HP compiler pads with zeros. */
6000 return downward;
6001 }
6002
6003 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6004 return downward;
6005 else
6006 return none;
6007 }
6008
6009 \f
6010 /* Do what is necessary for `va_start'. We look at the current function
6011 to determine if stdargs or varargs is used and fill in an initial
6012 va_list. A pointer to this constructor is returned. */
6013
6014 static rtx
6015 hppa_builtin_saveregs (void)
6016 {
6017 rtx offset, dest;
6018 tree fntype = TREE_TYPE (current_function_decl);
6019 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
6020 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
6021 != void_type_node)))
6022 ? UNITS_PER_WORD : 0);
6023
6024 if (argadj)
6025 offset = plus_constant (current_function_arg_offset_rtx, argadj);
6026 else
6027 offset = current_function_arg_offset_rtx;
6028
6029 if (TARGET_64BIT)
6030 {
6031 int i, off;
6032
6033 /* Adjust for varargs/stdarg differences. */
6034 if (argadj)
6035 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
6036 else
6037 offset = current_function_arg_offset_rtx;
6038
6039 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6040 from the incoming arg pointer and growing to larger addresses. */
6041 for (i = 26, off = -64; i >= 19; i--, off += 8)
6042 emit_move_insn (gen_rtx_MEM (word_mode,
6043 plus_constant (arg_pointer_rtx, off)),
6044 gen_rtx_REG (word_mode, i));
6045
6046 /* The incoming args pointer points just beyond the flushback area;
6047 normally this is not a serious concern. However, when we are doing
6048 varargs/stdargs we want to make the arg pointer point to the start
6049 of the incoming argument area. */
6050 emit_move_insn (virtual_incoming_args_rtx,
6051 plus_constant (arg_pointer_rtx, -64));
6052
6053 /* Now return a pointer to the first anonymous argument. */
6054 return copy_to_reg (expand_binop (Pmode, add_optab,
6055 virtual_incoming_args_rtx,
6056 offset, 0, 0, OPTAB_LIB_WIDEN));
6057 }
6058
6059 /* Store general registers on the stack. */
6060 dest = gen_rtx_MEM (BLKmode,
6061 plus_constant (current_function_internal_arg_pointer,
6062 -16));
6063 set_mem_alias_set (dest, get_varargs_alias_set ());
6064 set_mem_align (dest, BITS_PER_WORD);
6065 move_block_from_reg (23, dest, 4);
6066
6067 /* move_block_from_reg will emit code to store the argument registers
6068 individually as scalar stores.
6069
6070 However, other insns may later load from the same addresses for
6071 a structure load (passing a struct to a varargs routine).
6072
6073 The alias code assumes that such aliasing can never happen, so we
6074 have to keep memory referencing insns from moving up beyond the
6075 last argument register store. So we emit a blockage insn here. */
6076 emit_insn (gen_blockage ());
6077
6078 return copy_to_reg (expand_binop (Pmode, add_optab,
6079 current_function_internal_arg_pointer,
6080 offset, 0, 0, OPTAB_LIB_WIDEN));
6081 }
6082
6083 void
6084 hppa_va_start (tree valist, rtx nextarg)
6085 {
6086 nextarg = expand_builtin_saveregs ();
6087 std_expand_builtin_va_start (valist, nextarg);
6088 }
6089
6090 static tree
6091 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
6092 {
6093 if (TARGET_64BIT)
6094 {
6095 /* Args grow upward. We can use the generic routines. */
6096 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6097 }
6098 else /* !TARGET_64BIT */
6099 {
6100 tree ptr = build_pointer_type (type);
6101 tree valist_type;
6102 tree t, u;
6103 unsigned int size, ofs;
6104 bool indirect;
6105
6106 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6107 if (indirect)
6108 {
6109 type = ptr;
6110 ptr = build_pointer_type (type);
6111 }
6112 size = int_size_in_bytes (type);
6113 valist_type = TREE_TYPE (valist);
6114
6115 /* Args grow down. Not handled by generic routines. */
6116
6117 u = fold_convert (valist_type, size_in_bytes (type));
6118 t = build (MINUS_EXPR, valist_type, valist, u);
6119
6120 /* Copied from va-pa.h, but we probably don't need to align to
6121 word size, since we generate and preserve that invariant. */
6122 u = build_int_2 ((size > 4 ? -8 : -4), -1);
6123 u = fold_convert (valist_type, u);
6124 t = build (BIT_AND_EXPR, valist_type, t, u);
6125
6126 t = build (MODIFY_EXPR, valist_type, valist, t);
6127
6128 ofs = (8 - size) % 4;
6129 if (ofs != 0)
6130 {
6131 u = fold_convert (valist_type, size_int (ofs));
6132 t = build (PLUS_EXPR, valist_type, t, u);
6133 }
6134
6135 t = fold_convert (ptr, t);
6136 t = build_fold_indirect_ref (t);
6137
6138 if (indirect)
6139 t = build_fold_indirect_ref (t);
6140
6141 return t;
6142 }
6143 }
6144
6145 /* This routine handles all the normal conditional branch sequences we
6146 might need to generate. It handles compare immediate vs compare
6147 register, nullification of delay slots, varying length branches,
6148 negated branches, and all combinations of the above. It returns the
6149 output appropriate to emit the branch corresponding to all given
6150 parameters. */
6151
6152 const char *
6153 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
6154 {
6155 static char buf[100];
6156 int useskip = 0;
6157 rtx xoperands[5];
6158
6159 /* A conditional branch to the following instruction (eg the delay slot)
6160 is asking for a disaster. This can happen when not optimizing and
6161 when jump optimization fails.
6162
6163 While it is usually safe to emit nothing, this can fail if the
6164 preceding instruction is a nullified branch with an empty delay
6165 slot and the same branch target as this branch. We could check
6166 for this but jump optimization should eliminate nop jumps. It
6167 is always safe to emit a nop. */
6168 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6169 return "nop";
6170
6171 /* The doubleword form of the cmpib instruction doesn't have the LEU
6172 and GTU conditions while the cmpb instruction does. Since we accept
6173 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6174 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6175 operands[2] = gen_rtx_REG (DImode, 0);
6176
6177 /* If this is a long branch with its delay slot unfilled, set `nullify'
6178 as it can nullify the delay slot and save a nop. */
6179 if (length == 8 && dbr_sequence_length () == 0)
6180 nullify = 1;
6181
6182 /* If this is a short forward conditional branch which did not get
6183 its delay slot filled, the delay slot can still be nullified. */
6184 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6185 nullify = forward_branch_p (insn);
6186
6187 /* A forward branch over a single nullified insn can be done with a
6188 comclr instruction. This avoids a single cycle penalty due to
6189 mis-predicted branch if we fall through (branch not taken). */
6190 if (length == 4
6191 && next_real_insn (insn) != 0
6192 && get_attr_length (next_real_insn (insn)) == 4
6193 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6194 && nullify)
6195 useskip = 1;
6196
6197 switch (length)
6198 {
6199 /* All short conditional branches except backwards with an unfilled
6200 delay slot. */
6201 case 4:
6202 if (useskip)
6203 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6204 else
6205 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6206 if (GET_MODE (operands[1]) == DImode)
6207 strcat (buf, "*");
6208 if (negated)
6209 strcat (buf, "%B3");
6210 else
6211 strcat (buf, "%S3");
6212 if (useskip)
6213 strcat (buf, " %2,%r1,%%r0");
6214 else if (nullify)
6215 strcat (buf, ",n %2,%r1,%0");
6216 else
6217 strcat (buf, " %2,%r1,%0");
6218 break;
6219
6220 /* All long conditionals. Note a short backward branch with an
6221 unfilled delay slot is treated just like a long backward branch
6222 with an unfilled delay slot. */
6223 case 8:
6224 /* Handle weird backwards branch with a filled delay slot
6225 with is nullified. */
6226 if (dbr_sequence_length () != 0
6227 && ! forward_branch_p (insn)
6228 && nullify)
6229 {
6230 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6231 if (GET_MODE (operands[1]) == DImode)
6232 strcat (buf, "*");
6233 if (negated)
6234 strcat (buf, "%S3");
6235 else
6236 strcat (buf, "%B3");
6237 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6238 }
6239 /* Handle short backwards branch with an unfilled delay slot.
6240 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6241 taken and untaken branches. */
6242 else if (dbr_sequence_length () == 0
6243 && ! forward_branch_p (insn)
6244 && INSN_ADDRESSES_SET_P ()
6245 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6246 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6247 {
6248 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6249 if (GET_MODE (operands[1]) == DImode)
6250 strcat (buf, "*");
6251 if (negated)
6252 strcat (buf, "%B3 %2,%r1,%0%#");
6253 else
6254 strcat (buf, "%S3 %2,%r1,%0%#");
6255 }
6256 else
6257 {
6258 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6259 if (GET_MODE (operands[1]) == DImode)
6260 strcat (buf, "*");
6261 if (negated)
6262 strcat (buf, "%S3");
6263 else
6264 strcat (buf, "%B3");
6265 if (nullify)
6266 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6267 else
6268 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6269 }
6270 break;
6271
6272 case 20:
6273 case 28:
6274 xoperands[0] = operands[0];
6275 xoperands[1] = operands[1];
6276 xoperands[2] = operands[2];
6277 xoperands[3] = operands[3];
6278
6279 /* The reversed conditional branch must branch over one additional
6280 instruction if the delay slot is filled. If the delay slot
6281 is empty, the instruction after the reversed condition branch
6282 must be nullified. */
6283 nullify = dbr_sequence_length () == 0;
6284 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6285
6286 /* Create a reversed conditional branch which branches around
6287 the following insns. */
6288 if (GET_MODE (operands[1]) != DImode)
6289 {
6290 if (nullify)
6291 {
6292 if (negated)
6293 strcpy (buf,
6294 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6295 else
6296 strcpy (buf,
6297 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6298 }
6299 else
6300 {
6301 if (negated)
6302 strcpy (buf,
6303 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6304 else
6305 strcpy (buf,
6306 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6307 }
6308 }
6309 else
6310 {
6311 if (nullify)
6312 {
6313 if (negated)
6314 strcpy (buf,
6315 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6316 else
6317 strcpy (buf,
6318 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6319 }
6320 else
6321 {
6322 if (negated)
6323 strcpy (buf,
6324 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6325 else
6326 strcpy (buf,
6327 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6328 }
6329 }
6330
6331 output_asm_insn (buf, xoperands);
6332 return output_lbranch (operands[0], insn);
6333
6334 default:
6335 abort ();
6336 }
6337 return buf;
6338 }
6339
6340 /* This routine handles long unconditional branches that exceed the
6341 maximum range of a simple branch instruction. */
6342
6343 const char *
6344 output_lbranch (rtx dest, rtx insn)
6345 {
6346 rtx xoperands[2];
6347
6348 xoperands[0] = dest;
6349
6350 /* First, free up the delay slot. */
6351 if (dbr_sequence_length () != 0)
6352 {
6353 /* We can't handle a jump in the delay slot. */
6354 if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
6355 abort ();
6356
6357 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6358 optimize, 0, 0, NULL);
6359
6360 /* Now delete the delay insn. */
6361 PUT_CODE (NEXT_INSN (insn), NOTE);
6362 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6363 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6364 }
6365
6366 /* Output an insn to save %r1. The runtime documentation doesn't
6367 specify whether the "Clean Up" slot in the callers frame can
6368 be clobbered by the callee. It isn't copied by HP's builtin
6369 alloca, so this suggests that it can be clobbered if necessary.
6370 The "Static Link" location is copied by HP builtin alloca, so
6371 we avoid using it. Using the cleanup slot might be a problem
6372 if we have to interoperate with languages that pass cleanup
6373 information. However, it should be possible to handle these
6374 situations with GCC's asm feature.
6375
6376 The "Current RP" slot is reserved for the called procedure, so
6377 we try to use it when we don't have a frame of our own. It's
6378 rather unlikely that we won't have a frame when we need to emit
6379 a very long branch.
6380
6381 Really the way to go long term is a register scavenger; goto
6382 the target of the jump and find a register which we can use
6383 as a scratch to hold the value in %r1. Then, we wouldn't have
6384 to free up the delay slot or clobber a slot that may be needed
6385 for other purposes. */
6386 if (TARGET_64BIT)
6387 {
6388 if (actual_fsize == 0 && !regs_ever_live[2])
6389 /* Use the return pointer slot in the frame marker. */
6390 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6391 else
6392 /* Use the slot at -40 in the frame marker since HP builtin
6393 alloca doesn't copy it. */
6394 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6395 }
6396 else
6397 {
6398 if (actual_fsize == 0 && !regs_ever_live[2])
6399 /* Use the return pointer slot in the frame marker. */
6400 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6401 else
6402 /* Use the "Clean Up" slot in the frame marker. In GCC,
6403 the only other use of this location is for copying a
6404 floating point double argument from a floating-point
6405 register to two general registers. The copy is done
6406 as an "atomic" operation when outputting a call, so it
6407 won't interfere with our using the location here. */
6408 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6409 }
6410
6411 if (TARGET_PORTABLE_RUNTIME)
6412 {
6413 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6414 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6415 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6416 }
6417 else if (flag_pic)
6418 {
6419 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6420 if (TARGET_SOM || !TARGET_GAS)
6421 {
6422 xoperands[1] = gen_label_rtx ();
6423 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6424 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6425 CODE_LABEL_NUMBER (xoperands[1]));
6426 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6427 }
6428 else
6429 {
6430 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6431 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6432 }
6433 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6434 }
6435 else
6436 /* Now output a very long branch to the original target. */
6437 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6438
6439 /* Now restore the value of %r1 in the delay slot. */
6440 if (TARGET_64BIT)
6441 {
6442 if (actual_fsize == 0 && !regs_ever_live[2])
6443 return "ldd -16(%%r30),%%r1";
6444 else
6445 return "ldd -40(%%r30),%%r1";
6446 }
6447 else
6448 {
6449 if (actual_fsize == 0 && !regs_ever_live[2])
6450 return "ldw -20(%%r30),%%r1";
6451 else
6452 return "ldw -12(%%r30),%%r1";
6453 }
6454 }
6455
6456 /* This routine handles all the branch-on-bit conditional branch sequences we
6457 might need to generate. It handles nullification of delay slots,
6458 varying length branches, negated branches and all combinations of the
6459 above. it returns the appropriate output template to emit the branch. */
6460
6461 const char *
6462 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6463 int negated, rtx insn, int which)
6464 {
6465 static char buf[100];
6466 int useskip = 0;
6467
6468 /* A conditional branch to the following instruction (eg the delay slot) is
6469 asking for a disaster. I do not think this can happen as this pattern
6470 is only used when optimizing; jump optimization should eliminate the
6471 jump. But be prepared just in case. */
6472
6473 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6474 return "nop";
6475
6476 /* If this is a long branch with its delay slot unfilled, set `nullify'
6477 as it can nullify the delay slot and save a nop. */
6478 if (length == 8 && dbr_sequence_length () == 0)
6479 nullify = 1;
6480
6481 /* If this is a short forward conditional branch which did not get
6482 its delay slot filled, the delay slot can still be nullified. */
6483 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6484 nullify = forward_branch_p (insn);
6485
6486 /* A forward branch over a single nullified insn can be done with a
6487 extrs instruction. This avoids a single cycle penalty due to
6488 mis-predicted branch if we fall through (branch not taken). */
6489
6490 if (length == 4
6491 && next_real_insn (insn) != 0
6492 && get_attr_length (next_real_insn (insn)) == 4
6493 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6494 && nullify)
6495 useskip = 1;
6496
6497 switch (length)
6498 {
6499
6500 /* All short conditional branches except backwards with an unfilled
6501 delay slot. */
6502 case 4:
6503 if (useskip)
6504 strcpy (buf, "{extrs,|extrw,s,}");
6505 else
6506 strcpy (buf, "bb,");
6507 if (useskip && GET_MODE (operands[0]) == DImode)
6508 strcpy (buf, "extrd,s,*");
6509 else if (GET_MODE (operands[0]) == DImode)
6510 strcpy (buf, "bb,*");
6511 if ((which == 0 && negated)
6512 || (which == 1 && ! negated))
6513 strcat (buf, ">=");
6514 else
6515 strcat (buf, "<");
6516 if (useskip)
6517 strcat (buf, " %0,%1,1,%%r0");
6518 else if (nullify && negated)
6519 strcat (buf, ",n %0,%1,%3");
6520 else if (nullify && ! negated)
6521 strcat (buf, ",n %0,%1,%2");
6522 else if (! nullify && negated)
6523 strcat (buf, "%0,%1,%3");
6524 else if (! nullify && ! negated)
6525 strcat (buf, " %0,%1,%2");
6526 break;
6527
6528 /* All long conditionals. Note a short backward branch with an
6529 unfilled delay slot is treated just like a long backward branch
6530 with an unfilled delay slot. */
6531 case 8:
6532 /* Handle weird backwards branch with a filled delay slot
6533 with is nullified. */
6534 if (dbr_sequence_length () != 0
6535 && ! forward_branch_p (insn)
6536 && nullify)
6537 {
6538 strcpy (buf, "bb,");
6539 if (GET_MODE (operands[0]) == DImode)
6540 strcat (buf, "*");
6541 if ((which == 0 && negated)
6542 || (which == 1 && ! negated))
6543 strcat (buf, "<");
6544 else
6545 strcat (buf, ">=");
6546 if (negated)
6547 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6548 else
6549 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6550 }
6551 /* Handle short backwards branch with an unfilled delay slot.
6552 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6553 taken and untaken branches. */
6554 else if (dbr_sequence_length () == 0
6555 && ! forward_branch_p (insn)
6556 && INSN_ADDRESSES_SET_P ()
6557 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6558 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6559 {
6560 strcpy (buf, "bb,");
6561 if (GET_MODE (operands[0]) == DImode)
6562 strcat (buf, "*");
6563 if ((which == 0 && negated)
6564 || (which == 1 && ! negated))
6565 strcat (buf, ">=");
6566 else
6567 strcat (buf, "<");
6568 if (negated)
6569 strcat (buf, " %0,%1,%3%#");
6570 else
6571 strcat (buf, " %0,%1,%2%#");
6572 }
6573 else
6574 {
6575 strcpy (buf, "{extrs,|extrw,s,}");
6576 if (GET_MODE (operands[0]) == DImode)
6577 strcpy (buf, "extrd,s,*");
6578 if ((which == 0 && negated)
6579 || (which == 1 && ! negated))
6580 strcat (buf, "<");
6581 else
6582 strcat (buf, ">=");
6583 if (nullify && negated)
6584 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6585 else if (nullify && ! negated)
6586 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6587 else if (negated)
6588 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6589 else
6590 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6591 }
6592 break;
6593
6594 default:
6595 abort ();
6596 }
6597 return buf;
6598 }
6599
6600 /* This routine handles all the branch-on-variable-bit conditional branch
6601 sequences we might need to generate. It handles nullification of delay
6602 slots, varying length branches, negated branches and all combinations
6603 of the above. it returns the appropriate output template to emit the
6604 branch. */
6605
6606 const char *
6607 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6608 int negated, rtx insn, int which)
6609 {
6610 static char buf[100];
6611 int useskip = 0;
6612
6613 /* A conditional branch to the following instruction (eg the delay slot) is
6614 asking for a disaster. I do not think this can happen as this pattern
6615 is only used when optimizing; jump optimization should eliminate the
6616 jump. But be prepared just in case. */
6617
6618 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6619 return "nop";
6620
6621 /* If this is a long branch with its delay slot unfilled, set `nullify'
6622 as it can nullify the delay slot and save a nop. */
6623 if (length == 8 && dbr_sequence_length () == 0)
6624 nullify = 1;
6625
6626 /* If this is a short forward conditional branch which did not get
6627 its delay slot filled, the delay slot can still be nullified. */
6628 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6629 nullify = forward_branch_p (insn);
6630
6631 /* A forward branch over a single nullified insn can be done with a
6632 extrs instruction. This avoids a single cycle penalty due to
6633 mis-predicted branch if we fall through (branch not taken). */
6634
6635 if (length == 4
6636 && next_real_insn (insn) != 0
6637 && get_attr_length (next_real_insn (insn)) == 4
6638 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6639 && nullify)
6640 useskip = 1;
6641
6642 switch (length)
6643 {
6644
6645 /* All short conditional branches except backwards with an unfilled
6646 delay slot. */
6647 case 4:
6648 if (useskip)
6649 strcpy (buf, "{vextrs,|extrw,s,}");
6650 else
6651 strcpy (buf, "{bvb,|bb,}");
6652 if (useskip && GET_MODE (operands[0]) == DImode)
6653 strcpy (buf, "extrd,s,*");
6654 else if (GET_MODE (operands[0]) == DImode)
6655 strcpy (buf, "bb,*");
6656 if ((which == 0 && negated)
6657 || (which == 1 && ! negated))
6658 strcat (buf, ">=");
6659 else
6660 strcat (buf, "<");
6661 if (useskip)
6662 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6663 else if (nullify && negated)
6664 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6665 else if (nullify && ! negated)
6666 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6667 else if (! nullify && negated)
6668 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6669 else if (! nullify && ! negated)
6670 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6671 break;
6672
6673 /* All long conditionals. Note a short backward branch with an
6674 unfilled delay slot is treated just like a long backward branch
6675 with an unfilled delay slot. */
6676 case 8:
6677 /* Handle weird backwards branch with a filled delay slot
6678 with is nullified. */
6679 if (dbr_sequence_length () != 0
6680 && ! forward_branch_p (insn)
6681 && nullify)
6682 {
6683 strcpy (buf, "{bvb,|bb,}");
6684 if (GET_MODE (operands[0]) == DImode)
6685 strcat (buf, "*");
6686 if ((which == 0 && negated)
6687 || (which == 1 && ! negated))
6688 strcat (buf, "<");
6689 else
6690 strcat (buf, ">=");
6691 if (negated)
6692 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6693 else
6694 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6695 }
6696 /* Handle short backwards branch with an unfilled delay slot.
6697 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6698 taken and untaken branches. */
6699 else if (dbr_sequence_length () == 0
6700 && ! forward_branch_p (insn)
6701 && INSN_ADDRESSES_SET_P ()
6702 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6703 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6704 {
6705 strcpy (buf, "{bvb,|bb,}");
6706 if (GET_MODE (operands[0]) == DImode)
6707 strcat (buf, "*");
6708 if ((which == 0 && negated)
6709 || (which == 1 && ! negated))
6710 strcat (buf, ">=");
6711 else
6712 strcat (buf, "<");
6713 if (negated)
6714 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6715 else
6716 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6717 }
6718 else
6719 {
6720 strcpy (buf, "{vextrs,|extrw,s,}");
6721 if (GET_MODE (operands[0]) == DImode)
6722 strcpy (buf, "extrd,s,*");
6723 if ((which == 0 && negated)
6724 || (which == 1 && ! negated))
6725 strcat (buf, "<");
6726 else
6727 strcat (buf, ">=");
6728 if (nullify && negated)
6729 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6730 else if (nullify && ! negated)
6731 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6732 else if (negated)
6733 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6734 else
6735 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6736 }
6737 break;
6738
6739 default:
6740 abort ();
6741 }
6742 return buf;
6743 }
6744
6745 /* Return the output template for emitting a dbra type insn.
6746
6747 Note it may perform some output operations on its own before
6748 returning the final output string. */
6749 const char *
6750 output_dbra (rtx *operands, rtx insn, int which_alternative)
6751 {
6752
6753 /* A conditional branch to the following instruction (eg the delay slot) is
6754 asking for a disaster. Be prepared! */
6755
6756 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6757 {
6758 if (which_alternative == 0)
6759 return "ldo %1(%0),%0";
6760 else if (which_alternative == 1)
6761 {
6762 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6763 output_asm_insn ("ldw -16(%%r30),%4", operands);
6764 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6765 return "{fldws|fldw} -16(%%r30),%0";
6766 }
6767 else
6768 {
6769 output_asm_insn ("ldw %0,%4", operands);
6770 return "ldo %1(%4),%4\n\tstw %4,%0";
6771 }
6772 }
6773
6774 if (which_alternative == 0)
6775 {
6776 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6777 int length = get_attr_length (insn);
6778
6779 /* If this is a long branch with its delay slot unfilled, set `nullify'
6780 as it can nullify the delay slot and save a nop. */
6781 if (length == 8 && dbr_sequence_length () == 0)
6782 nullify = 1;
6783
6784 /* If this is a short forward conditional branch which did not get
6785 its delay slot filled, the delay slot can still be nullified. */
6786 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6787 nullify = forward_branch_p (insn);
6788
6789 /* Handle short versions first. */
6790 if (length == 4 && nullify)
6791 return "addib,%C2,n %1,%0,%3";
6792 else if (length == 4 && ! nullify)
6793 return "addib,%C2 %1,%0,%3";
6794 else if (length == 8)
6795 {
6796 /* Handle weird backwards branch with a fulled delay slot
6797 which is nullified. */
6798 if (dbr_sequence_length () != 0
6799 && ! forward_branch_p (insn)
6800 && nullify)
6801 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6802 /* Handle short backwards branch with an unfilled delay slot.
6803 Using a addb;nop rather than addi;bl saves 1 cycle for both
6804 taken and untaken branches. */
6805 else if (dbr_sequence_length () == 0
6806 && ! forward_branch_p (insn)
6807 && INSN_ADDRESSES_SET_P ()
6808 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6809 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6810 return "addib,%C2 %1,%0,%3%#";
6811
6812 /* Handle normal cases. */
6813 if (nullify)
6814 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6815 else
6816 return "addi,%N2 %1,%0,%0\n\tb %3";
6817 }
6818 else
6819 abort ();
6820 }
6821 /* Deal with gross reload from FP register case. */
6822 else if (which_alternative == 1)
6823 {
6824 /* Move loop counter from FP register to MEM then into a GR,
6825 increment the GR, store the GR into MEM, and finally reload
6826 the FP register from MEM from within the branch's delay slot. */
6827 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6828 operands);
6829 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6830 if (get_attr_length (insn) == 24)
6831 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6832 else
6833 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6834 }
6835 /* Deal with gross reload from memory case. */
6836 else
6837 {
6838 /* Reload loop counter from memory, the store back to memory
6839 happens in the branch's delay slot. */
6840 output_asm_insn ("ldw %0,%4", operands);
6841 if (get_attr_length (insn) == 12)
6842 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6843 else
6844 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6845 }
6846 }
6847
6848 /* Return the output template for emitting a dbra type insn.
6849
6850 Note it may perform some output operations on its own before
6851 returning the final output string. */
6852 const char *
6853 output_movb (rtx *operands, rtx insn, int which_alternative,
6854 int reverse_comparison)
6855 {
6856
6857 /* A conditional branch to the following instruction (eg the delay slot) is
6858 asking for a disaster. Be prepared! */
6859
6860 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6861 {
6862 if (which_alternative == 0)
6863 return "copy %1,%0";
6864 else if (which_alternative == 1)
6865 {
6866 output_asm_insn ("stw %1,-16(%%r30)", operands);
6867 return "{fldws|fldw} -16(%%r30),%0";
6868 }
6869 else if (which_alternative == 2)
6870 return "stw %1,%0";
6871 else
6872 return "mtsar %r1";
6873 }
6874
6875 /* Support the second variant. */
6876 if (reverse_comparison)
6877 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6878
6879 if (which_alternative == 0)
6880 {
6881 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6882 int length = get_attr_length (insn);
6883
6884 /* If this is a long branch with its delay slot unfilled, set `nullify'
6885 as it can nullify the delay slot and save a nop. */
6886 if (length == 8 && dbr_sequence_length () == 0)
6887 nullify = 1;
6888
6889 /* If this is a short forward conditional branch which did not get
6890 its delay slot filled, the delay slot can still be nullified. */
6891 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6892 nullify = forward_branch_p (insn);
6893
6894 /* Handle short versions first. */
6895 if (length == 4 && nullify)
6896 return "movb,%C2,n %1,%0,%3";
6897 else if (length == 4 && ! nullify)
6898 return "movb,%C2 %1,%0,%3";
6899 else if (length == 8)
6900 {
6901 /* Handle weird backwards branch with a filled delay slot
6902 which is nullified. */
6903 if (dbr_sequence_length () != 0
6904 && ! forward_branch_p (insn)
6905 && nullify)
6906 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6907
6908 /* Handle short backwards branch with an unfilled delay slot.
6909 Using a movb;nop rather than or;bl saves 1 cycle for both
6910 taken and untaken branches. */
6911 else if (dbr_sequence_length () == 0
6912 && ! forward_branch_p (insn)
6913 && INSN_ADDRESSES_SET_P ()
6914 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6915 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6916 return "movb,%C2 %1,%0,%3%#";
6917 /* Handle normal cases. */
6918 if (nullify)
6919 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6920 else
6921 return "or,%N2 %1,%%r0,%0\n\tb %3";
6922 }
6923 else
6924 abort ();
6925 }
6926 /* Deal with gross reload from FP register case. */
6927 else if (which_alternative == 1)
6928 {
6929 /* Move loop counter from FP register to MEM then into a GR,
6930 increment the GR, store the GR into MEM, and finally reload
6931 the FP register from MEM from within the branch's delay slot. */
6932 output_asm_insn ("stw %1,-16(%%r30)", operands);
6933 if (get_attr_length (insn) == 12)
6934 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6935 else
6936 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6937 }
6938 /* Deal with gross reload from memory case. */
6939 else if (which_alternative == 2)
6940 {
6941 /* Reload loop counter from memory, the store back to memory
6942 happens in the branch's delay slot. */
6943 if (get_attr_length (insn) == 8)
6944 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6945 else
6946 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6947 }
6948 /* Handle SAR as a destination. */
6949 else
6950 {
6951 if (get_attr_length (insn) == 8)
6952 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6953 else
6954 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tbl %3\n\tmtsar %r1";
6955 }
6956 }
6957
6958 /* Copy any FP arguments in INSN into integer registers. */
6959 static void
6960 copy_fp_args (rtx insn)
6961 {
6962 rtx link;
6963 rtx xoperands[2];
6964
6965 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6966 {
6967 int arg_mode, regno;
6968 rtx use = XEXP (link, 0);
6969
6970 if (! (GET_CODE (use) == USE
6971 && GET_CODE (XEXP (use, 0)) == REG
6972 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6973 continue;
6974
6975 arg_mode = GET_MODE (XEXP (use, 0));
6976 regno = REGNO (XEXP (use, 0));
6977
6978 /* Is it a floating point register? */
6979 if (regno >= 32 && regno <= 39)
6980 {
6981 /* Copy the FP register into an integer register via memory. */
6982 if (arg_mode == SFmode)
6983 {
6984 xoperands[0] = XEXP (use, 0);
6985 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6986 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6987 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6988 }
6989 else
6990 {
6991 xoperands[0] = XEXP (use, 0);
6992 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6993 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6994 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6995 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6996 }
6997 }
6998 }
6999 }
7000
7001 /* Compute length of the FP argument copy sequence for INSN. */
7002 static int
7003 length_fp_args (rtx insn)
7004 {
7005 int length = 0;
7006 rtx link;
7007
7008 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7009 {
7010 int arg_mode, regno;
7011 rtx use = XEXP (link, 0);
7012
7013 if (! (GET_CODE (use) == USE
7014 && GET_CODE (XEXP (use, 0)) == REG
7015 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7016 continue;
7017
7018 arg_mode = GET_MODE (XEXP (use, 0));
7019 regno = REGNO (XEXP (use, 0));
7020
7021 /* Is it a floating point register? */
7022 if (regno >= 32 && regno <= 39)
7023 {
7024 if (arg_mode == SFmode)
7025 length += 8;
7026 else
7027 length += 12;
7028 }
7029 }
7030
7031 return length;
7032 }
7033
7034 /* Return the attribute length for the millicode call instruction INSN.
7035 The length must match the code generated by output_millicode_call.
7036 We include the delay slot in the returned length as it is better to
7037 over estimate the length than to under estimate it. */
7038
7039 int
7040 attr_length_millicode_call (rtx insn)
7041 {
7042 unsigned long distance = -1;
7043 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7044
7045 if (INSN_ADDRESSES_SET_P ())
7046 {
7047 distance = (total + insn_current_reference_address (insn));
7048 if (distance < total)
7049 distance = -1;
7050 }
7051
7052 if (TARGET_64BIT)
7053 {
7054 if (!TARGET_LONG_CALLS && distance < 7600000)
7055 return 8;
7056
7057 return 20;
7058 }
7059 else if (TARGET_PORTABLE_RUNTIME)
7060 return 24;
7061 else
7062 {
7063 if (!TARGET_LONG_CALLS && distance < 240000)
7064 return 8;
7065
7066 if (TARGET_LONG_ABS_CALL && !flag_pic)
7067 return 12;
7068
7069 return 24;
7070 }
7071 }
7072
7073 /* INSN is a function call. It may have an unconditional jump
7074 in its delay slot.
7075
7076 CALL_DEST is the routine we are calling. */
7077
7078 const char *
7079 output_millicode_call (rtx insn, rtx call_dest)
7080 {
7081 int attr_length = get_attr_length (insn);
7082 int seq_length = dbr_sequence_length ();
7083 int distance;
7084 rtx seq_insn;
7085 rtx xoperands[3];
7086
7087 xoperands[0] = call_dest;
7088 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7089
7090 /* Handle the common case where we are sure that the branch will
7091 reach the beginning of the $CODE$ subspace. The within reach
7092 form of the $$sh_func_adrs call has a length of 28. Because
7093 it has an attribute type of multi, it never has a nonzero
7094 sequence length. The length of the $$sh_func_adrs is the same
7095 as certain out of reach PIC calls to other routines. */
7096 if (!TARGET_LONG_CALLS
7097 && ((seq_length == 0
7098 && (attr_length == 12
7099 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7100 || (seq_length != 0 && attr_length == 8)))
7101 {
7102 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7103 }
7104 else
7105 {
7106 if (TARGET_64BIT)
7107 {
7108 /* It might seem that one insn could be saved by accessing
7109 the millicode function using the linkage table. However,
7110 this doesn't work in shared libraries and other dynamically
7111 loaded objects. Using a pc-relative sequence also avoids
7112 problems related to the implicit use of the gp register. */
7113 output_asm_insn ("b,l .+8,%%r1", xoperands);
7114
7115 if (TARGET_GAS)
7116 {
7117 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7118 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7119 }
7120 else
7121 {
7122 xoperands[1] = gen_label_rtx ();
7123 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7124 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7125 CODE_LABEL_NUMBER (xoperands[1]));
7126 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7127 }
7128
7129 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7130 }
7131 else if (TARGET_PORTABLE_RUNTIME)
7132 {
7133 /* Pure portable runtime doesn't allow be/ble; we also don't
7134 have PIC support in the assembler/linker, so this sequence
7135 is needed. */
7136
7137 /* Get the address of our target into %r1. */
7138 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7139 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7140
7141 /* Get our return address into %r31. */
7142 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7143 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7144
7145 /* Jump to our target address in %r1. */
7146 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7147 }
7148 else if (!flag_pic)
7149 {
7150 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7151 if (TARGET_PA_20)
7152 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7153 else
7154 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7155 }
7156 else
7157 {
7158 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7159 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7160
7161 if (TARGET_SOM || !TARGET_GAS)
7162 {
7163 /* The HP assembler can generate relocations for the
7164 difference of two symbols. GAS can do this for a
7165 millicode symbol but not an arbitrary external
7166 symbol when generating SOM output. */
7167 xoperands[1] = gen_label_rtx ();
7168 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7169 CODE_LABEL_NUMBER (xoperands[1]));
7170 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7171 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7172 }
7173 else
7174 {
7175 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7176 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7177 xoperands);
7178 }
7179
7180 /* Jump to our target address in %r1. */
7181 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7182 }
7183 }
7184
7185 if (seq_length == 0)
7186 output_asm_insn ("nop", xoperands);
7187
7188 /* We are done if there isn't a jump in the delay slot. */
7189 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7190 return "";
7191
7192 /* This call has an unconditional jump in its delay slot. */
7193 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7194
7195 /* See if the return address can be adjusted. Use the containing
7196 sequence insn's address. */
7197 if (INSN_ADDRESSES_SET_P ())
7198 {
7199 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7200 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7201 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7202
7203 if (VAL_14_BITS_P (distance))
7204 {
7205 xoperands[1] = gen_label_rtx ();
7206 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7207 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7208 CODE_LABEL_NUMBER (xoperands[1]));
7209 }
7210 else
7211 /* ??? This branch may not reach its target. */
7212 output_asm_insn ("nop\n\tb,n %0", xoperands);
7213 }
7214 else
7215 /* ??? This branch may not reach its target. */
7216 output_asm_insn ("nop\n\tb,n %0", xoperands);
7217
7218 /* Delete the jump. */
7219 PUT_CODE (NEXT_INSN (insn), NOTE);
7220 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7221 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7222
7223 return "";
7224 }
7225
7226 /* Return the attribute length of the call instruction INSN. The SIBCALL
7227 flag indicates whether INSN is a regular call or a sibling call. The
7228 length returned must be longer than the code actually generated by
7229 output_call. Since branch shortening is done before delay branch
7230 sequencing, there is no way to determine whether or not the delay
7231 slot will be filled during branch shortening. Even when the delay
7232 slot is filled, we may have to add a nop if the delay slot contains
7233 a branch that can't reach its target. Thus, we always have to include
7234 the delay slot in the length estimate. This used to be done in
7235 pa_adjust_insn_length but we do it here now as some sequences always
7236 fill the delay slot and we can save four bytes in the estimate for
7237 these sequences. */
7238
7239 int
7240 attr_length_call (rtx insn, int sibcall)
7241 {
7242 int local_call;
7243 rtx call_dest;
7244 tree call_decl;
7245 int length = 0;
7246 rtx pat = PATTERN (insn);
7247 unsigned long distance = -1;
7248
7249 if (INSN_ADDRESSES_SET_P ())
7250 {
7251 unsigned long total;
7252
7253 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7254 distance = (total + insn_current_reference_address (insn));
7255 if (distance < total)
7256 distance = -1;
7257 }
7258
7259 /* Determine if this is a local call. */
7260 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7261 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7262 else
7263 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7264
7265 call_decl = SYMBOL_REF_DECL (call_dest);
7266 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7267
7268 /* pc-relative branch. */
7269 if (!TARGET_LONG_CALLS
7270 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7271 || distance < 240000))
7272 length += 8;
7273
7274 /* 64-bit plabel sequence. */
7275 else if (TARGET_64BIT && !local_call)
7276 length += sibcall ? 28 : 24;
7277
7278 /* non-pic long absolute branch sequence. */
7279 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7280 length += 12;
7281
7282 /* long pc-relative branch sequence. */
7283 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7284 || (TARGET_64BIT && !TARGET_GAS)
7285 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7286 {
7287 length += 20;
7288
7289 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7290 length += 8;
7291 }
7292
7293 /* 32-bit plabel sequence. */
7294 else
7295 {
7296 length += 32;
7297
7298 if (TARGET_SOM)
7299 length += length_fp_args (insn);
7300
7301 if (flag_pic)
7302 length += 4;
7303
7304 if (!TARGET_PA_20)
7305 {
7306 if (!sibcall)
7307 length += 8;
7308
7309 if (!TARGET_NO_SPACE_REGS)
7310 length += 8;
7311 }
7312 }
7313
7314 return length;
7315 }
7316
7317 /* INSN is a function call. It may have an unconditional jump
7318 in its delay slot.
7319
7320 CALL_DEST is the routine we are calling. */
7321
7322 const char *
7323 output_call (rtx insn, rtx call_dest, int sibcall)
7324 {
7325 int delay_insn_deleted = 0;
7326 int delay_slot_filled = 0;
7327 int seq_length = dbr_sequence_length ();
7328 tree call_decl = SYMBOL_REF_DECL (call_dest);
7329 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7330 rtx xoperands[2];
7331
7332 xoperands[0] = call_dest;
7333
7334 /* Handle the common case where we're sure that the branch will reach
7335 the beginning of the "$CODE$" subspace. This is the beginning of
7336 the current function if we are in a named section. */
7337 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7338 {
7339 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7340 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7341 }
7342 else
7343 {
7344 if (TARGET_64BIT && !local_call)
7345 {
7346 /* ??? As far as I can tell, the HP linker doesn't support the
7347 long pc-relative sequence described in the 64-bit runtime
7348 architecture. So, we use a slightly longer indirect call. */
7349 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7350
7351 xoperands[0] = p->internal_label;
7352 xoperands[1] = gen_label_rtx ();
7353
7354 /* If this isn't a sibcall, we put the load of %r27 into the
7355 delay slot. We can't do this in a sibcall as we don't
7356 have a second call-clobbered scratch register available. */
7357 if (seq_length != 0
7358 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7359 && !sibcall)
7360 {
7361 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7362 optimize, 0, 0, NULL);
7363
7364 /* Now delete the delay insn. */
7365 PUT_CODE (NEXT_INSN (insn), NOTE);
7366 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7367 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7368 delay_insn_deleted = 1;
7369 }
7370
7371 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7372 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7373 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7374
7375 if (sibcall)
7376 {
7377 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7378 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7379 output_asm_insn ("bve (%%r1)", xoperands);
7380 }
7381 else
7382 {
7383 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7384 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7385 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7386 delay_slot_filled = 1;
7387 }
7388 }
7389 else
7390 {
7391 int indirect_call = 0;
7392
7393 /* Emit a long call. There are several different sequences
7394 of increasing length and complexity. In most cases,
7395 they don't allow an instruction in the delay slot. */
7396 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7397 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7398 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7399 && !TARGET_64BIT)
7400 indirect_call = 1;
7401
7402 if (seq_length != 0
7403 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7404 && !sibcall
7405 && (!TARGET_PA_20 || indirect_call))
7406 {
7407 /* A non-jump insn in the delay slot. By definition we can
7408 emit this insn before the call (and in fact before argument
7409 relocating. */
7410 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0, 0,
7411 NULL);
7412
7413 /* Now delete the delay insn. */
7414 PUT_CODE (NEXT_INSN (insn), NOTE);
7415 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7416 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7417 delay_insn_deleted = 1;
7418 }
7419
7420 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7421 {
7422 /* This is the best sequence for making long calls in
7423 non-pic code. Unfortunately, GNU ld doesn't provide
7424 the stub needed for external calls, and GAS's support
7425 for this with the SOM linker is buggy. It is safe
7426 to use this for local calls. */
7427 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7428 if (sibcall)
7429 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7430 else
7431 {
7432 if (TARGET_PA_20)
7433 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7434 xoperands);
7435 else
7436 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7437
7438 output_asm_insn ("copy %%r31,%%r2", xoperands);
7439 delay_slot_filled = 1;
7440 }
7441 }
7442 else
7443 {
7444 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7445 || (TARGET_64BIT && !TARGET_GAS))
7446 {
7447 /* The HP assembler and linker can handle relocations
7448 for the difference of two symbols. GAS and the HP
7449 linker can't do this when one of the symbols is
7450 external. */
7451 xoperands[1] = gen_label_rtx ();
7452 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7453 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7454 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7455 CODE_LABEL_NUMBER (xoperands[1]));
7456 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7457 }
7458 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7459 {
7460 /* GAS currently can't generate the relocations that
7461 are needed for the SOM linker under HP-UX using this
7462 sequence. The GNU linker doesn't generate the stubs
7463 that are needed for external calls on TARGET_ELF32
7464 with this sequence. For now, we have to use a
7465 longer plabel sequence when using GAS. */
7466 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7467 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7468 xoperands);
7469 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7470 xoperands);
7471 }
7472 else
7473 {
7474 /* Emit a long plabel-based call sequence. This is
7475 essentially an inline implementation of $$dyncall.
7476 We don't actually try to call $$dyncall as this is
7477 as difficult as calling the function itself. */
7478 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7479
7480 xoperands[0] = p->internal_label;
7481 xoperands[1] = gen_label_rtx ();
7482
7483 /* Since the call is indirect, FP arguments in registers
7484 need to be copied to the general registers. Then, the
7485 argument relocation stub will copy them back. */
7486 if (TARGET_SOM)
7487 copy_fp_args (insn);
7488
7489 if (flag_pic)
7490 {
7491 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7492 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7493 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7494 }
7495 else
7496 {
7497 output_asm_insn ("addil LR'%0-$global$,%%r27",
7498 xoperands);
7499 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7500 xoperands);
7501 }
7502
7503 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7504 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7505 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7506 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7507
7508 if (!sibcall && !TARGET_PA_20)
7509 {
7510 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7511 if (TARGET_NO_SPACE_REGS)
7512 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7513 else
7514 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7515 }
7516 }
7517
7518 if (TARGET_PA_20)
7519 {
7520 if (sibcall)
7521 output_asm_insn ("bve (%%r1)", xoperands);
7522 else
7523 {
7524 if (indirect_call)
7525 {
7526 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7527 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7528 delay_slot_filled = 1;
7529 }
7530 else
7531 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7532 }
7533 }
7534 else
7535 {
7536 if (!TARGET_NO_SPACE_REGS)
7537 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7538 xoperands);
7539
7540 if (sibcall)
7541 {
7542 if (TARGET_NO_SPACE_REGS)
7543 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7544 else
7545 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7546 }
7547 else
7548 {
7549 if (TARGET_NO_SPACE_REGS)
7550 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7551 else
7552 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7553
7554 if (indirect_call)
7555 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7556 else
7557 output_asm_insn ("copy %%r31,%%r2", xoperands);
7558 delay_slot_filled = 1;
7559 }
7560 }
7561 }
7562 }
7563 }
7564
7565 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7566 output_asm_insn ("nop", xoperands);
7567
7568 /* We are done if there isn't a jump in the delay slot. */
7569 if (seq_length == 0
7570 || delay_insn_deleted
7571 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7572 return "";
7573
7574 /* A sibcall should never have a branch in the delay slot. */
7575 if (sibcall)
7576 abort ();
7577
7578 /* This call has an unconditional jump in its delay slot. */
7579 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7580
7581 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7582 {
7583 /* See if the return address can be adjusted. Use the containing
7584 sequence insn's address. */
7585 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7586 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7587 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7588
7589 if (VAL_14_BITS_P (distance))
7590 {
7591 xoperands[1] = gen_label_rtx ();
7592 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7593 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7594 CODE_LABEL_NUMBER (xoperands[1]));
7595 }
7596 else
7597 output_asm_insn ("nop\n\tb,n %0", xoperands);
7598 }
7599 else
7600 output_asm_insn ("b,n %0", xoperands);
7601
7602 /* Delete the jump. */
7603 PUT_CODE (NEXT_INSN (insn), NOTE);
7604 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7605 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7606
7607 return "";
7608 }
7609
7610 /* Return the attribute length of the indirect call instruction INSN.
7611 The length must match the code generated by output_indirect call.
7612 The returned length includes the delay slot. Currently, the delay
7613 slot of an indirect call sequence is not exposed and it is used by
7614 the sequence itself. */
7615
7616 int
7617 attr_length_indirect_call (rtx insn)
7618 {
7619 unsigned long distance = -1;
7620 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7621
7622 if (INSN_ADDRESSES_SET_P ())
7623 {
7624 distance = (total + insn_current_reference_address (insn));
7625 if (distance < total)
7626 distance = -1;
7627 }
7628
7629 if (TARGET_64BIT)
7630 return 12;
7631
7632 if (TARGET_FAST_INDIRECT_CALLS
7633 || (!TARGET_PORTABLE_RUNTIME
7634 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7635 return 8;
7636
7637 if (flag_pic)
7638 return 24;
7639
7640 if (TARGET_PORTABLE_RUNTIME)
7641 return 20;
7642
7643 /* Out of reach, can use ble. */
7644 return 12;
7645 }
7646
7647 const char *
7648 output_indirect_call (rtx insn, rtx call_dest)
7649 {
7650 rtx xoperands[1];
7651
7652 if (TARGET_64BIT)
7653 {
7654 xoperands[0] = call_dest;
7655 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7656 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7657 return "";
7658 }
7659
7660 /* First the special case for kernels, level 0 systems, etc. */
7661 if (TARGET_FAST_INDIRECT_CALLS)
7662 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7663
7664 /* Now the normal case -- we can reach $$dyncall directly or
7665 we're sure that we can get there via a long-branch stub.
7666
7667 No need to check target flags as the length uniquely identifies
7668 the remaining cases. */
7669 if (attr_length_indirect_call (insn) == 8)
7670 {
7671 /* The HP linker substitutes a BLE for millicode calls using
7672 the short PIC PCREL form. Thus, we must use %r31 as the
7673 link register when generating PA 1.x code. */
7674 if (TARGET_PA_20)
7675 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7676 else
7677 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7678 }
7679
7680 /* Long millicode call, but we are not generating PIC or portable runtime
7681 code. */
7682 if (attr_length_indirect_call (insn) == 12)
7683 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7684
7685 /* Long millicode call for portable runtime. */
7686 if (attr_length_indirect_call (insn) == 20)
7687 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7688
7689 /* We need a long PIC call to $$dyncall. */
7690 xoperands[0] = NULL_RTX;
7691 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7692 if (TARGET_SOM || !TARGET_GAS)
7693 {
7694 xoperands[0] = gen_label_rtx ();
7695 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7696 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7697 CODE_LABEL_NUMBER (xoperands[0]));
7698 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7699 }
7700 else
7701 {
7702 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7703 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7704 xoperands);
7705 }
7706 output_asm_insn ("blr %%r0,%%r2", xoperands);
7707 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7708 return "";
7709 }
7710
7711 /* Return the total length of the save and restore instructions needed for
7712 the data linkage table pointer (i.e., the PIC register) across the call
7713 instruction INSN. No-return calls do not require a save and restore.
7714 In addition, we may be able to avoid the save and restore for calls
7715 within the same translation unit. */
7716
7717 int
7718 attr_length_save_restore_dltp (rtx insn)
7719 {
7720 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7721 return 0;
7722
7723 return 8;
7724 }
7725
7726 /* In HPUX 8.0's shared library scheme, special relocations are needed
7727 for function labels if they might be passed to a function
7728 in a shared library (because shared libraries don't live in code
7729 space), and special magic is needed to construct their address. */
7730
7731 void
7732 hppa_encode_label (rtx sym)
7733 {
7734 const char *str = XSTR (sym, 0);
7735 int len = strlen (str) + 1;
7736 char *newstr, *p;
7737
7738 p = newstr = alloca (len + 1);
7739 *p++ = '@';
7740 strcpy (p, str);
7741
7742 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7743 }
7744
7745 static void
7746 pa_encode_section_info (tree decl, rtx rtl, int first)
7747 {
7748 if (first && TEXT_SPACE_P (decl))
7749 {
7750 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7751 if (TREE_CODE (decl) == FUNCTION_DECL)
7752 hppa_encode_label (XEXP (rtl, 0));
7753 }
7754 }
7755
7756 /* This is sort of inverse to pa_encode_section_info. */
7757
7758 static const char *
7759 pa_strip_name_encoding (const char *str)
7760 {
7761 str += (*str == '@');
7762 str += (*str == '*');
7763 return str;
7764 }
7765
7766 int
7767 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7768 {
7769 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7770 }
7771
7772 /* Returns 1 if OP is a function label involved in a simple addition
7773 with a constant. Used to keep certain patterns from matching
7774 during instruction combination. */
7775 int
7776 is_function_label_plus_const (rtx op)
7777 {
7778 /* Strip off any CONST. */
7779 if (GET_CODE (op) == CONST)
7780 op = XEXP (op, 0);
7781
7782 return (GET_CODE (op) == PLUS
7783 && function_label_operand (XEXP (op, 0), Pmode)
7784 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7785 }
7786
7787 /* Output assembly code for a thunk to FUNCTION. */
7788
7789 static void
7790 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7791 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7792 tree function)
7793 {
7794 const char *fname = XSTR (XEXP (DECL_RTL (function), 0), 0);
7795 const char *tname = XSTR (XEXP (DECL_RTL (thunk_fndecl), 0), 0);
7796 int val_14 = VAL_14_BITS_P (delta);
7797 int nbytes = 0;
7798 static unsigned int current_thunk_number;
7799 char label[16];
7800
7801 ASM_OUTPUT_LABEL (file, tname);
7802 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7803
7804 fname = (*targetm.strip_name_encoding) (fname);
7805 tname = (*targetm.strip_name_encoding) (tname);
7806
7807 /* Output the thunk. We know that the function is in the same
7808 translation unit (i.e., the same space) as the thunk, and that
7809 thunks are output after their method. Thus, we don't need an
7810 external branch to reach the function. With SOM and GAS,
7811 functions and thunks are effectively in different sections.
7812 Thus, we can always use a IA-relative branch and the linker
7813 will add a long branch stub if necessary.
7814
7815 However, we have to be careful when generating PIC code on the
7816 SOM port to ensure that the sequence does not transfer to an
7817 import stub for the target function as this could clobber the
7818 return value saved at SP-24. This would also apply to the
7819 32-bit linux port if the multi-space model is implemented. */
7820 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7821 && !(flag_pic && TREE_PUBLIC (function))
7822 && (TARGET_GAS || last_address < 262132))
7823 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7824 && ((targetm.have_named_sections
7825 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7826 /* The GNU 64-bit linker has rather poor stub management.
7827 So, we use a long branch from thunks that aren't in
7828 the same section as the target function. */
7829 && ((!TARGET_64BIT
7830 && (DECL_SECTION_NAME (thunk_fndecl)
7831 != DECL_SECTION_NAME (function)))
7832 || ((DECL_SECTION_NAME (thunk_fndecl)
7833 == DECL_SECTION_NAME (function))
7834 && last_address < 262132)))
7835 || (!targetm.have_named_sections && last_address < 262132))))
7836 {
7837 if (val_14)
7838 {
7839 fprintf (file, "\tb %s\n\tldo " HOST_WIDE_INT_PRINT_DEC
7840 "(%%r26),%%r26\n", fname, delta);
7841 nbytes += 8;
7842 }
7843 else
7844 {
7845 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7846 ",%%r26\n", delta);
7847 fprintf (file, "\tb %s\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7848 "(%%r1),%%r26\n", fname, delta);
7849 nbytes += 12;
7850 }
7851 }
7852 else if (TARGET_64BIT)
7853 {
7854 /* We only have one call-clobbered scratch register, so we can't
7855 make use of the delay slot if delta doesn't fit in 14 bits. */
7856 if (!val_14)
7857 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7858 ",%%r26\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7859 "(%%r1),%%r26\n", delta, delta);
7860
7861 fprintf (file, "\tb,l .+8,%%r1\n");
7862
7863 if (TARGET_GAS)
7864 {
7865 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7866 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r1\n", fname);
7867 }
7868 else
7869 {
7870 int off = val_14 ? 8 : 16;
7871 fprintf (file, "\taddil L'%s-%s-%d,%%r1\n", fname, tname, off);
7872 fprintf (file, "\tldo R'%s-%s-%d(%%r1),%%r1\n", fname, tname, off);
7873 }
7874
7875 if (val_14)
7876 {
7877 fprintf (file, "\tbv %%r0(%%r1)\n\tldo ");
7878 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7879 nbytes += 20;
7880 }
7881 else
7882 {
7883 fprintf (file, "\tbv,n %%r0(%%r1)\n");
7884 nbytes += 24;
7885 }
7886 }
7887 else if (TARGET_PORTABLE_RUNTIME)
7888 {
7889 fprintf (file, "\tldil L'%s,%%r1\n", fname);
7890 fprintf (file, "\tldo R'%s(%%r1),%%r22\n", fname);
7891
7892 if (val_14)
7893 {
7894 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7895 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7896 nbytes += 16;
7897 }
7898 else
7899 {
7900 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7901 ",%%r26\n", delta);
7902 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7903 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7904 nbytes += 20;
7905 }
7906 }
7907 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7908 {
7909 /* The function is accessible from outside this module. The only
7910 way to avoid an import stub between the thunk and function is to
7911 call the function directly with an indirect sequence similar to
7912 that used by $$dyncall. This is possible because $$dyncall acts
7913 as the import stub in an indirect call. */
7914 const char *lab;
7915
7916 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7917 lab = (*targetm.strip_name_encoding) (label);
7918
7919 fprintf (file, "\taddil LT'%s,%%r19\n", lab);
7920 fprintf (file, "\tldw RT'%s(%%r1),%%r22\n", lab);
7921 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7922 fprintf (file, "\tbb,>=,n %%r22,30,.+16\n");
7923 fprintf (file, "\tdepi 0,31,2,%%r22\n");
7924 fprintf (file, "\tldw 4(%%sr0,%%r22),%%r19\n");
7925 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7926 if (!val_14)
7927 {
7928 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7929 ",%%r26\n", delta);
7930 nbytes += 4;
7931 }
7932 if (TARGET_PA_20)
7933 {
7934 fprintf (file, "\tbve (%%r22)\n\tldo ");
7935 nbytes += 36;
7936 }
7937 else
7938 {
7939 if (TARGET_NO_SPACE_REGS)
7940 {
7941 fprintf (file, "\tbe 0(%%sr4,%%r22)\n\tldo ");
7942 nbytes += 36;
7943 }
7944 else
7945 {
7946 fprintf (file, "\tldsid (%%sr0,%%r22),%%r21\n");
7947 fprintf (file, "\tmtsp %%r21,%%sr0\n");
7948 fprintf (file, "\tbe 0(%%sr0,%%r22)\n\tldo ");
7949 nbytes += 44;
7950 }
7951 }
7952
7953 if (val_14)
7954 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7955 else
7956 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7957 }
7958 else if (flag_pic)
7959 {
7960 if (TARGET_PA_20)
7961 fprintf (file, "\tb,l .+8,%%r1\n");
7962 else
7963 fprintf (file, "\tbl .+8,%%r1\n");
7964
7965 if (TARGET_SOM || !TARGET_GAS)
7966 {
7967 fprintf (file, "\taddil L'%s-%s-8,%%r1\n", fname, tname);
7968 fprintf (file, "\tldo R'%s-%s-8(%%r1),%%r22\n", fname, tname);
7969 }
7970 else
7971 {
7972 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7973 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r22\n", fname);
7974 }
7975
7976 if (val_14)
7977 {
7978 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7979 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7980 nbytes += 20;
7981 }
7982 else
7983 {
7984 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7985 ",%%r26\n", delta);
7986 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7987 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7988 nbytes += 24;
7989 }
7990 }
7991 else
7992 {
7993 if (!val_14)
7994 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC ",%%r26\n", delta);
7995
7996 fprintf (file, "\tldil L'%s,%%r22\n", fname);
7997 fprintf (file, "\tbe R'%s(%%sr4,%%r22)\n\tldo ", fname);
7998
7999 if (val_14)
8000 {
8001 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
8002 nbytes += 12;
8003 }
8004 else
8005 {
8006 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
8007 nbytes += 16;
8008 }
8009 }
8010
8011 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8012
8013 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8014 {
8015 data_section ();
8016 fprintf (file, "\t.align 4\n");
8017 ASM_OUTPUT_LABEL (file, label);
8018 fprintf (file, "\t.word P'%s\n", fname);
8019 }
8020 else if (TARGET_SOM && TARGET_GAS)
8021 forget_section ();
8022
8023 current_thunk_number++;
8024 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8025 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8026 last_address += nbytes;
8027 update_total_code_bytes (nbytes);
8028 }
8029
8030 /* Only direct calls to static functions are allowed to be sibling (tail)
8031 call optimized.
8032
8033 This restriction is necessary because some linker generated stubs will
8034 store return pointers into rp' in some cases which might clobber a
8035 live value already in rp'.
8036
8037 In a sibcall the current function and the target function share stack
8038 space. Thus if the path to the current function and the path to the
8039 target function save a value in rp', they save the value into the
8040 same stack slot, which has undesirable consequences.
8041
8042 Because of the deferred binding nature of shared libraries any function
8043 with external scope could be in a different load module and thus require
8044 rp' to be saved when calling that function. So sibcall optimizations
8045 can only be safe for static function.
8046
8047 Note that GCC never needs return value relocations, so we don't have to
8048 worry about static calls with return value relocations (which require
8049 saving rp').
8050
8051 It is safe to perform a sibcall optimization when the target function
8052 will never return. */
8053 static bool
8054 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8055 {
8056 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8057 single subspace mode and the call is not indirect. As far as I know,
8058 there is no operating system support for the multiple subspace mode.
8059 It might be possible to support indirect calls if we didn't use
8060 $$dyncall (see the indirect sequence generated in output_call). */
8061 if (TARGET_ELF32)
8062 return (decl != NULL_TREE);
8063
8064 /* Sibcalls are not ok because the arg pointer register is not a fixed
8065 register. This prevents the sibcall optimization from occurring. In
8066 addition, there are problems with stub placement using GNU ld. This
8067 is because a normal sibcall branch uses a 17-bit relocation while
8068 a regular call branch uses a 22-bit relocation. As a result, more
8069 care needs to be taken in the placement of long-branch stubs. */
8070 if (TARGET_64BIT)
8071 return false;
8072
8073 return (decl
8074 && !TARGET_PORTABLE_RUNTIME
8075 && !TREE_PUBLIC (decl));
8076 }
8077
8078 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8079 use in fmpyadd instructions. */
8080 int
8081 fmpyaddoperands (rtx *operands)
8082 {
8083 enum machine_mode mode = GET_MODE (operands[0]);
8084
8085 /* Must be a floating point mode. */
8086 if (mode != SFmode && mode != DFmode)
8087 return 0;
8088
8089 /* All modes must be the same. */
8090 if (! (mode == GET_MODE (operands[1])
8091 && mode == GET_MODE (operands[2])
8092 && mode == GET_MODE (operands[3])
8093 && mode == GET_MODE (operands[4])
8094 && mode == GET_MODE (operands[5])))
8095 return 0;
8096
8097 /* All operands must be registers. */
8098 if (! (GET_CODE (operands[1]) == REG
8099 && GET_CODE (operands[2]) == REG
8100 && GET_CODE (operands[3]) == REG
8101 && GET_CODE (operands[4]) == REG
8102 && GET_CODE (operands[5]) == REG))
8103 return 0;
8104
8105 /* Only 2 real operands to the addition. One of the input operands must
8106 be the same as the output operand. */
8107 if (! rtx_equal_p (operands[3], operands[4])
8108 && ! rtx_equal_p (operands[3], operands[5]))
8109 return 0;
8110
8111 /* Inout operand of add can not conflict with any operands from multiply. */
8112 if (rtx_equal_p (operands[3], operands[0])
8113 || rtx_equal_p (operands[3], operands[1])
8114 || rtx_equal_p (operands[3], operands[2]))
8115 return 0;
8116
8117 /* multiply can not feed into addition operands. */
8118 if (rtx_equal_p (operands[4], operands[0])
8119 || rtx_equal_p (operands[5], operands[0]))
8120 return 0;
8121
8122 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8123 if (mode == SFmode
8124 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8125 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8126 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8127 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8128 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8129 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8130 return 0;
8131
8132 /* Passed. Operands are suitable for fmpyadd. */
8133 return 1;
8134 }
8135
8136 #if !defined(USE_COLLECT2)
8137 static void
8138 pa_asm_out_constructor (rtx symbol, int priority)
8139 {
8140 if (!function_label_operand (symbol, VOIDmode))
8141 hppa_encode_label (symbol);
8142
8143 #ifdef CTORS_SECTION_ASM_OP
8144 default_ctor_section_asm_out_constructor (symbol, priority);
8145 #else
8146 # ifdef TARGET_ASM_NAMED_SECTION
8147 default_named_section_asm_out_constructor (symbol, priority);
8148 # else
8149 default_stabs_asm_out_constructor (symbol, priority);
8150 # endif
8151 #endif
8152 }
8153
8154 static void
8155 pa_asm_out_destructor (rtx symbol, int priority)
8156 {
8157 if (!function_label_operand (symbol, VOIDmode))
8158 hppa_encode_label (symbol);
8159
8160 #ifdef DTORS_SECTION_ASM_OP
8161 default_dtor_section_asm_out_destructor (symbol, priority);
8162 #else
8163 # ifdef TARGET_ASM_NAMED_SECTION
8164 default_named_section_asm_out_destructor (symbol, priority);
8165 # else
8166 default_stabs_asm_out_destructor (symbol, priority);
8167 # endif
8168 #endif
8169 }
8170 #endif
8171
8172 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8173 use in fmpysub instructions. */
8174 int
8175 fmpysuboperands (rtx *operands)
8176 {
8177 enum machine_mode mode = GET_MODE (operands[0]);
8178
8179 /* Must be a floating point mode. */
8180 if (mode != SFmode && mode != DFmode)
8181 return 0;
8182
8183 /* All modes must be the same. */
8184 if (! (mode == GET_MODE (operands[1])
8185 && mode == GET_MODE (operands[2])
8186 && mode == GET_MODE (operands[3])
8187 && mode == GET_MODE (operands[4])
8188 && mode == GET_MODE (operands[5])))
8189 return 0;
8190
8191 /* All operands must be registers. */
8192 if (! (GET_CODE (operands[1]) == REG
8193 && GET_CODE (operands[2]) == REG
8194 && GET_CODE (operands[3]) == REG
8195 && GET_CODE (operands[4]) == REG
8196 && GET_CODE (operands[5]) == REG))
8197 return 0;
8198
8199 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8200 operation, so operands[4] must be the same as operand[3]. */
8201 if (! rtx_equal_p (operands[3], operands[4]))
8202 return 0;
8203
8204 /* multiply can not feed into subtraction. */
8205 if (rtx_equal_p (operands[5], operands[0]))
8206 return 0;
8207
8208 /* Inout operand of sub can not conflict with any operands from multiply. */
8209 if (rtx_equal_p (operands[3], operands[0])
8210 || rtx_equal_p (operands[3], operands[1])
8211 || rtx_equal_p (operands[3], operands[2]))
8212 return 0;
8213
8214 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8215 if (mode == SFmode
8216 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8217 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8218 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8219 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8220 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8221 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8222 return 0;
8223
8224 /* Passed. Operands are suitable for fmpysub. */
8225 return 1;
8226 }
8227
8228 int
8229 plus_xor_ior_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8230 {
8231 return (GET_CODE (op) == PLUS || GET_CODE (op) == XOR
8232 || GET_CODE (op) == IOR);
8233 }
8234
8235 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8236 constants for shadd instructions. */
8237 static int
8238 shadd_constant_p (int val)
8239 {
8240 if (val == 2 || val == 4 || val == 8)
8241 return 1;
8242 else
8243 return 0;
8244 }
8245
8246 /* Return 1 if OP is a CONST_INT with the value 2, 4, or 8. These are
8247 the valid constant for shadd instructions. */
8248 int
8249 shadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8250 {
8251 return (GET_CODE (op) == CONST_INT && shadd_constant_p (INTVAL (op)));
8252 }
8253
8254 /* Return 1 if OP is valid as a base or index register in a
8255 REG+REG address. */
8256
8257 int
8258 borx_reg_operand (rtx op, enum machine_mode mode)
8259 {
8260 if (GET_CODE (op) != REG)
8261 return 0;
8262
8263 /* We must reject virtual registers as the only expressions that
8264 can be instantiated are REG and REG+CONST. */
8265 if (op == virtual_incoming_args_rtx
8266 || op == virtual_stack_vars_rtx
8267 || op == virtual_stack_dynamic_rtx
8268 || op == virtual_outgoing_args_rtx
8269 || op == virtual_cfa_rtx)
8270 return 0;
8271
8272 /* While it's always safe to index off the frame pointer, it's not
8273 profitable to do so when the frame pointer is being eliminated. */
8274 if (!reload_completed
8275 && flag_omit_frame_pointer
8276 && !current_function_calls_alloca
8277 && op == frame_pointer_rtx)
8278 return 0;
8279
8280 return register_operand (op, mode);
8281 }
8282
8283 /* Return 1 if this operand is anything other than a hard register. */
8284
8285 int
8286 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8287 {
8288 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8289 }
8290
8291 /* Return 1 if INSN branches forward. Should be using insn_addresses
8292 to avoid walking through all the insns... */
8293 static int
8294 forward_branch_p (rtx insn)
8295 {
8296 rtx label = JUMP_LABEL (insn);
8297
8298 while (insn)
8299 {
8300 if (insn == label)
8301 break;
8302 else
8303 insn = NEXT_INSN (insn);
8304 }
8305
8306 return (insn == label);
8307 }
8308
8309 /* Return 1 if OP is an equality comparison, else return 0. */
8310 int
8311 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8312 {
8313 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8314 }
8315
8316 /* Return 1 if OP is an operator suitable for use in a movb instruction. */
8317 int
8318 movb_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8319 {
8320 return (GET_CODE (op) == EQ || GET_CODE (op) == NE
8321 || GET_CODE (op) == LT || GET_CODE (op) == GE);
8322 }
8323
8324 /* Return 1 if INSN is in the delay slot of a call instruction. */
8325 int
8326 jump_in_call_delay (rtx insn)
8327 {
8328
8329 if (GET_CODE (insn) != JUMP_INSN)
8330 return 0;
8331
8332 if (PREV_INSN (insn)
8333 && PREV_INSN (PREV_INSN (insn))
8334 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8335 {
8336 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8337
8338 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8339 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8340
8341 }
8342 else
8343 return 0;
8344 }
8345
8346 /* Output an unconditional move and branch insn. */
8347
8348 const char *
8349 output_parallel_movb (rtx *operands, int length)
8350 {
8351 /* These are the cases in which we win. */
8352 if (length == 4)
8353 return "mov%I1b,tr %1,%0,%2";
8354
8355 /* None of these cases wins, but they don't lose either. */
8356 if (dbr_sequence_length () == 0)
8357 {
8358 /* Nothing in the delay slot, fake it by putting the combined
8359 insn (the copy or add) in the delay slot of a bl. */
8360 if (GET_CODE (operands[1]) == CONST_INT)
8361 return "b %2\n\tldi %1,%0";
8362 else
8363 return "b %2\n\tcopy %1,%0";
8364 }
8365 else
8366 {
8367 /* Something in the delay slot, but we've got a long branch. */
8368 if (GET_CODE (operands[1]) == CONST_INT)
8369 return "ldi %1,%0\n\tb %2";
8370 else
8371 return "copy %1,%0\n\tb %2";
8372 }
8373 }
8374
8375 /* Output an unconditional add and branch insn. */
8376
8377 const char *
8378 output_parallel_addb (rtx *operands, int length)
8379 {
8380 /* To make life easy we want operand0 to be the shared input/output
8381 operand and operand1 to be the readonly operand. */
8382 if (operands[0] == operands[1])
8383 operands[1] = operands[2];
8384
8385 /* These are the cases in which we win. */
8386 if (length == 4)
8387 return "add%I1b,tr %1,%0,%3";
8388
8389 /* None of these cases win, but they don't lose either. */
8390 if (dbr_sequence_length () == 0)
8391 {
8392 /* Nothing in the delay slot, fake it by putting the combined
8393 insn (the copy or add) in the delay slot of a bl. */
8394 return "b %3\n\tadd%I1 %1,%0,%0";
8395 }
8396 else
8397 {
8398 /* Something in the delay slot, but we've got a long branch. */
8399 return "add%I1 %1,%0,%0\n\tb %3";
8400 }
8401 }
8402
8403 /* Return nonzero if INSN (a jump insn) immediately follows a call
8404 to a named function. This is used to avoid filling the delay slot
8405 of the jump since it can usually be eliminated by modifying RP in
8406 the delay slot of the call. */
8407
8408 int
8409 following_call (rtx insn)
8410 {
8411 if (! TARGET_JUMP_IN_DELAY)
8412 return 0;
8413
8414 /* Find the previous real insn, skipping NOTEs. */
8415 insn = PREV_INSN (insn);
8416 while (insn && GET_CODE (insn) == NOTE)
8417 insn = PREV_INSN (insn);
8418
8419 /* Check for CALL_INSNs and millicode calls. */
8420 if (insn
8421 && ((GET_CODE (insn) == CALL_INSN
8422 && get_attr_type (insn) != TYPE_DYNCALL)
8423 || (GET_CODE (insn) == INSN
8424 && GET_CODE (PATTERN (insn)) != SEQUENCE
8425 && GET_CODE (PATTERN (insn)) != USE
8426 && GET_CODE (PATTERN (insn)) != CLOBBER
8427 && get_attr_type (insn) == TYPE_MILLI)))
8428 return 1;
8429
8430 return 0;
8431 }
8432
8433 /* We use this hook to perform a PA specific optimization which is difficult
8434 to do in earlier passes.
8435
8436 We want the delay slots of branches within jump tables to be filled.
8437 None of the compiler passes at the moment even has the notion that a
8438 PA jump table doesn't contain addresses, but instead contains actual
8439 instructions!
8440
8441 Because we actually jump into the table, the addresses of each entry
8442 must stay constant in relation to the beginning of the table (which
8443 itself must stay constant relative to the instruction to jump into
8444 it). I don't believe we can guarantee earlier passes of the compiler
8445 will adhere to those rules.
8446
8447 So, late in the compilation process we find all the jump tables, and
8448 expand them into real code -- eg each entry in the jump table vector
8449 will get an appropriate label followed by a jump to the final target.
8450
8451 Reorg and the final jump pass can then optimize these branches and
8452 fill their delay slots. We end up with smaller, more efficient code.
8453
8454 The jump instructions within the table are special; we must be able
8455 to identify them during assembly output (if the jumps don't get filled
8456 we need to emit a nop rather than nullifying the delay slot)). We
8457 identify jumps in switch tables by using insns with the attribute
8458 type TYPE_BTABLE_BRANCH.
8459
8460 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8461 insns. This serves two purposes, first it prevents jump.c from
8462 noticing that the last N entries in the table jump to the instruction
8463 immediately after the table and deleting the jumps. Second, those
8464 insns mark where we should emit .begin_brtab and .end_brtab directives
8465 when using GAS (allows for better link time optimizations). */
8466
8467 static void
8468 pa_reorg (void)
8469 {
8470 rtx insn;
8471
8472 remove_useless_addtr_insns (1);
8473
8474 if (pa_cpu < PROCESSOR_8000)
8475 pa_combine_instructions ();
8476
8477
8478 /* This is fairly cheap, so always run it if optimizing. */
8479 if (optimize > 0 && !TARGET_BIG_SWITCH)
8480 {
8481 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8482 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8483 {
8484 rtx pattern, tmp, location, label;
8485 unsigned int length, i;
8486
8487 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8488 if (GET_CODE (insn) != JUMP_INSN
8489 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8490 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8491 continue;
8492
8493 /* Emit marker for the beginning of the branch table. */
8494 emit_insn_before (gen_begin_brtab (), insn);
8495
8496 pattern = PATTERN (insn);
8497 location = PREV_INSN (insn);
8498 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8499
8500 for (i = 0; i < length; i++)
8501 {
8502 /* Emit a label before each jump to keep jump.c from
8503 removing this code. */
8504 tmp = gen_label_rtx ();
8505 LABEL_NUSES (tmp) = 1;
8506 emit_label_after (tmp, location);
8507 location = NEXT_INSN (location);
8508
8509 if (GET_CODE (pattern) == ADDR_VEC)
8510 label = XEXP (XVECEXP (pattern, 0, i), 0);
8511 else
8512 label = XEXP (XVECEXP (pattern, 1, i), 0);
8513
8514 tmp = gen_short_jump (label);
8515
8516 /* Emit the jump itself. */
8517 tmp = emit_jump_insn_after (tmp, location);
8518 JUMP_LABEL (tmp) = label;
8519 LABEL_NUSES (label)++;
8520 location = NEXT_INSN (location);
8521
8522 /* Emit a BARRIER after the jump. */
8523 emit_barrier_after (location);
8524 location = NEXT_INSN (location);
8525 }
8526
8527 /* Emit marker for the end of the branch table. */
8528 emit_insn_before (gen_end_brtab (), location);
8529 location = NEXT_INSN (location);
8530 emit_barrier_after (location);
8531
8532 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8533 delete_insn (insn);
8534 }
8535 }
8536 else
8537 {
8538 /* Still need brtab marker insns. FIXME: the presence of these
8539 markers disables output of the branch table to readonly memory,
8540 and any alignment directives that might be needed. Possibly,
8541 the begin_brtab insn should be output before the label for the
8542 table. This doesn't matter at the moment since the tables are
8543 always output in the text section. */
8544 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8545 {
8546 /* Find an ADDR_VEC insn. */
8547 if (GET_CODE (insn) != JUMP_INSN
8548 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8549 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8550 continue;
8551
8552 /* Now generate markers for the beginning and end of the
8553 branch table. */
8554 emit_insn_before (gen_begin_brtab (), insn);
8555 emit_insn_after (gen_end_brtab (), insn);
8556 }
8557 }
8558 }
8559
8560 /* The PA has a number of odd instructions which can perform multiple
8561 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8562 it may be profitable to combine two instructions into one instruction
8563 with two outputs. It's not profitable PA2.0 machines because the
8564 two outputs would take two slots in the reorder buffers.
8565
8566 This routine finds instructions which can be combined and combines
8567 them. We only support some of the potential combinations, and we
8568 only try common ways to find suitable instructions.
8569
8570 * addb can add two registers or a register and a small integer
8571 and jump to a nearby (+-8k) location. Normally the jump to the
8572 nearby location is conditional on the result of the add, but by
8573 using the "true" condition we can make the jump unconditional.
8574 Thus addb can perform two independent operations in one insn.
8575
8576 * movb is similar to addb in that it can perform a reg->reg
8577 or small immediate->reg copy and jump to a nearby (+-8k location).
8578
8579 * fmpyadd and fmpysub can perform a FP multiply and either an
8580 FP add or FP sub if the operands of the multiply and add/sub are
8581 independent (there are other minor restrictions). Note both
8582 the fmpy and fadd/fsub can in theory move to better spots according
8583 to data dependencies, but for now we require the fmpy stay at a
8584 fixed location.
8585
8586 * Many of the memory operations can perform pre & post updates
8587 of index registers. GCC's pre/post increment/decrement addressing
8588 is far too simple to take advantage of all the possibilities. This
8589 pass may not be suitable since those insns may not be independent.
8590
8591 * comclr can compare two ints or an int and a register, nullify
8592 the following instruction and zero some other register. This
8593 is more difficult to use as it's harder to find an insn which
8594 will generate a comclr than finding something like an unconditional
8595 branch. (conditional moves & long branches create comclr insns).
8596
8597 * Most arithmetic operations can conditionally skip the next
8598 instruction. They can be viewed as "perform this operation
8599 and conditionally jump to this nearby location" (where nearby
8600 is an insns away). These are difficult to use due to the
8601 branch length restrictions. */
8602
8603 static void
8604 pa_combine_instructions (void)
8605 {
8606 rtx anchor, new;
8607
8608 /* This can get expensive since the basic algorithm is on the
8609 order of O(n^2) (or worse). Only do it for -O2 or higher
8610 levels of optimization. */
8611 if (optimize < 2)
8612 return;
8613
8614 /* Walk down the list of insns looking for "anchor" insns which
8615 may be combined with "floating" insns. As the name implies,
8616 "anchor" instructions don't move, while "floating" insns may
8617 move around. */
8618 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8619 new = make_insn_raw (new);
8620
8621 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8622 {
8623 enum attr_pa_combine_type anchor_attr;
8624 enum attr_pa_combine_type floater_attr;
8625
8626 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8627 Also ignore any special USE insns. */
8628 if ((GET_CODE (anchor) != INSN
8629 && GET_CODE (anchor) != JUMP_INSN
8630 && GET_CODE (anchor) != CALL_INSN)
8631 || GET_CODE (PATTERN (anchor)) == USE
8632 || GET_CODE (PATTERN (anchor)) == CLOBBER
8633 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8634 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8635 continue;
8636
8637 anchor_attr = get_attr_pa_combine_type (anchor);
8638 /* See if anchor is an insn suitable for combination. */
8639 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8640 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8641 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8642 && ! forward_branch_p (anchor)))
8643 {
8644 rtx floater;
8645
8646 for (floater = PREV_INSN (anchor);
8647 floater;
8648 floater = PREV_INSN (floater))
8649 {
8650 if (GET_CODE (floater) == NOTE
8651 || (GET_CODE (floater) == INSN
8652 && (GET_CODE (PATTERN (floater)) == USE
8653 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8654 continue;
8655
8656 /* Anything except a regular INSN will stop our search. */
8657 if (GET_CODE (floater) != INSN
8658 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8659 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8660 {
8661 floater = NULL_RTX;
8662 break;
8663 }
8664
8665 /* See if FLOATER is suitable for combination with the
8666 anchor. */
8667 floater_attr = get_attr_pa_combine_type (floater);
8668 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8669 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8670 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8671 && floater_attr == PA_COMBINE_TYPE_FMPY))
8672 {
8673 /* If ANCHOR and FLOATER can be combined, then we're
8674 done with this pass. */
8675 if (pa_can_combine_p (new, anchor, floater, 0,
8676 SET_DEST (PATTERN (floater)),
8677 XEXP (SET_SRC (PATTERN (floater)), 0),
8678 XEXP (SET_SRC (PATTERN (floater)), 1)))
8679 break;
8680 }
8681
8682 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8683 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8684 {
8685 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8686 {
8687 if (pa_can_combine_p (new, anchor, floater, 0,
8688 SET_DEST (PATTERN (floater)),
8689 XEXP (SET_SRC (PATTERN (floater)), 0),
8690 XEXP (SET_SRC (PATTERN (floater)), 1)))
8691 break;
8692 }
8693 else
8694 {
8695 if (pa_can_combine_p (new, anchor, floater, 0,
8696 SET_DEST (PATTERN (floater)),
8697 SET_SRC (PATTERN (floater)),
8698 SET_SRC (PATTERN (floater))))
8699 break;
8700 }
8701 }
8702 }
8703
8704 /* If we didn't find anything on the backwards scan try forwards. */
8705 if (!floater
8706 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8707 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8708 {
8709 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8710 {
8711 if (GET_CODE (floater) == NOTE
8712 || (GET_CODE (floater) == INSN
8713 && (GET_CODE (PATTERN (floater)) == USE
8714 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8715
8716 continue;
8717
8718 /* Anything except a regular INSN will stop our search. */
8719 if (GET_CODE (floater) != INSN
8720 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8721 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8722 {
8723 floater = NULL_RTX;
8724 break;
8725 }
8726
8727 /* See if FLOATER is suitable for combination with the
8728 anchor. */
8729 floater_attr = get_attr_pa_combine_type (floater);
8730 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8731 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8732 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8733 && floater_attr == PA_COMBINE_TYPE_FMPY))
8734 {
8735 /* If ANCHOR and FLOATER can be combined, then we're
8736 done with this pass. */
8737 if (pa_can_combine_p (new, anchor, floater, 1,
8738 SET_DEST (PATTERN (floater)),
8739 XEXP (SET_SRC (PATTERN (floater)),
8740 0),
8741 XEXP (SET_SRC (PATTERN (floater)),
8742 1)))
8743 break;
8744 }
8745 }
8746 }
8747
8748 /* FLOATER will be nonzero if we found a suitable floating
8749 insn for combination with ANCHOR. */
8750 if (floater
8751 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8752 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8753 {
8754 /* Emit the new instruction and delete the old anchor. */
8755 emit_insn_before (gen_rtx_PARALLEL
8756 (VOIDmode,
8757 gen_rtvec (2, PATTERN (anchor),
8758 PATTERN (floater))),
8759 anchor);
8760
8761 PUT_CODE (anchor, NOTE);
8762 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8763 NOTE_SOURCE_FILE (anchor) = 0;
8764
8765 /* Emit a special USE insn for FLOATER, then delete
8766 the floating insn. */
8767 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8768 delete_insn (floater);
8769
8770 continue;
8771 }
8772 else if (floater
8773 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8774 {
8775 rtx temp;
8776 /* Emit the new_jump instruction and delete the old anchor. */
8777 temp
8778 = emit_jump_insn_before (gen_rtx_PARALLEL
8779 (VOIDmode,
8780 gen_rtvec (2, PATTERN (anchor),
8781 PATTERN (floater))),
8782 anchor);
8783
8784 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8785 PUT_CODE (anchor, NOTE);
8786 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8787 NOTE_SOURCE_FILE (anchor) = 0;
8788
8789 /* Emit a special USE insn for FLOATER, then delete
8790 the floating insn. */
8791 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8792 delete_insn (floater);
8793 continue;
8794 }
8795 }
8796 }
8797 }
8798
8799 static int
8800 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8801 rtx src1, rtx src2)
8802 {
8803 int insn_code_number;
8804 rtx start, end;
8805
8806 /* Create a PARALLEL with the patterns of ANCHOR and
8807 FLOATER, try to recognize it, then test constraints
8808 for the resulting pattern.
8809
8810 If the pattern doesn't match or the constraints
8811 aren't met keep searching for a suitable floater
8812 insn. */
8813 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8814 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8815 INSN_CODE (new) = -1;
8816 insn_code_number = recog_memoized (new);
8817 if (insn_code_number < 0
8818 || (extract_insn (new), ! constrain_operands (1)))
8819 return 0;
8820
8821 if (reversed)
8822 {
8823 start = anchor;
8824 end = floater;
8825 }
8826 else
8827 {
8828 start = floater;
8829 end = anchor;
8830 }
8831
8832 /* There's up to three operands to consider. One
8833 output and two inputs.
8834
8835 The output must not be used between FLOATER & ANCHOR
8836 exclusive. The inputs must not be set between
8837 FLOATER and ANCHOR exclusive. */
8838
8839 if (reg_used_between_p (dest, start, end))
8840 return 0;
8841
8842 if (reg_set_between_p (src1, start, end))
8843 return 0;
8844
8845 if (reg_set_between_p (src2, start, end))
8846 return 0;
8847
8848 /* If we get here, then everything is good. */
8849 return 1;
8850 }
8851
8852 /* Return nonzero if references for INSN are delayed.
8853
8854 Millicode insns are actually function calls with some special
8855 constraints on arguments and register usage.
8856
8857 Millicode calls always expect their arguments in the integer argument
8858 registers, and always return their result in %r29 (ret1). They
8859 are expected to clobber their arguments, %r1, %r29, and the return
8860 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8861
8862 This function tells reorg that the references to arguments and
8863 millicode calls do not appear to happen until after the millicode call.
8864 This allows reorg to put insns which set the argument registers into the
8865 delay slot of the millicode call -- thus they act more like traditional
8866 CALL_INSNs.
8867
8868 Note we can not consider side effects of the insn to be delayed because
8869 the branch and link insn will clobber the return pointer. If we happened
8870 to use the return pointer in the delay slot of the call, then we lose.
8871
8872 get_attr_type will try to recognize the given insn, so make sure to
8873 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8874 in particular. */
8875 int
8876 insn_refs_are_delayed (rtx insn)
8877 {
8878 return ((GET_CODE (insn) == INSN
8879 && GET_CODE (PATTERN (insn)) != SEQUENCE
8880 && GET_CODE (PATTERN (insn)) != USE
8881 && GET_CODE (PATTERN (insn)) != CLOBBER
8882 && get_attr_type (insn) == TYPE_MILLI));
8883 }
8884
8885 /* On the HP-PA the value is found in register(s) 28(-29), unless
8886 the mode is SF or DF. Then the value is returned in fr4 (32).
8887
8888 This must perform the same promotions as PROMOTE_MODE, else
8889 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8890
8891 Small structures must be returned in a PARALLEL on PA64 in order
8892 to match the HP Compiler ABI. */
8893
8894 rtx
8895 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8896 {
8897 enum machine_mode valmode;
8898
8899 /* Aggregates with a size less than or equal to 128 bits are returned
8900 in GR 28(-29). They are left justified. The pad bits are undefined.
8901 Larger aggregates are returned in memory. */
8902 if (TARGET_64BIT && AGGREGATE_TYPE_P (valtype))
8903 {
8904 rtx loc[2];
8905 int i, offset = 0;
8906 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8907
8908 for (i = 0; i < ub; i++)
8909 {
8910 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8911 gen_rtx_REG (DImode, 28 + i),
8912 GEN_INT (offset));
8913 offset += 8;
8914 }
8915
8916 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8917 }
8918
8919 if ((INTEGRAL_TYPE_P (valtype)
8920 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8921 || POINTER_TYPE_P (valtype))
8922 valmode = word_mode;
8923 else
8924 valmode = TYPE_MODE (valtype);
8925
8926 if (TREE_CODE (valtype) == REAL_TYPE
8927 && TYPE_MODE (valtype) != TFmode
8928 && !TARGET_SOFT_FLOAT)
8929 return gen_rtx_REG (valmode, 32);
8930
8931 return gen_rtx_REG (valmode, 28);
8932 }
8933
8934 /* Return the location of a parameter that is passed in a register or NULL
8935 if the parameter has any component that is passed in memory.
8936
8937 This is new code and will be pushed to into the net sources after
8938 further testing.
8939
8940 ??? We might want to restructure this so that it looks more like other
8941 ports. */
8942 rtx
8943 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8944 int named ATTRIBUTE_UNUSED)
8945 {
8946 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8947 int alignment = 0;
8948 int arg_size;
8949 int fpr_reg_base;
8950 int gpr_reg_base;
8951 rtx retval;
8952
8953 if (mode == VOIDmode)
8954 return NULL_RTX;
8955
8956 arg_size = FUNCTION_ARG_SIZE (mode, type);
8957
8958 /* If this arg would be passed partially or totally on the stack, then
8959 this routine should return zero. FUNCTION_ARG_PARTIAL_NREGS will
8960 handle arguments which are split between regs and stack slots if
8961 the ABI mandates split arguments. */
8962 if (! TARGET_64BIT)
8963 {
8964 /* The 32-bit ABI does not split arguments. */
8965 if (cum->words + arg_size > max_arg_words)
8966 return NULL_RTX;
8967 }
8968 else
8969 {
8970 if (arg_size > 1)
8971 alignment = cum->words & 1;
8972 if (cum->words + alignment >= max_arg_words)
8973 return NULL_RTX;
8974 }
8975
8976 /* The 32bit ABIs and the 64bit ABIs are rather different,
8977 particularly in their handling of FP registers. We might
8978 be able to cleverly share code between them, but I'm not
8979 going to bother in the hope that splitting them up results
8980 in code that is more easily understood. */
8981
8982 if (TARGET_64BIT)
8983 {
8984 /* Advance the base registers to their current locations.
8985
8986 Remember, gprs grow towards smaller register numbers while
8987 fprs grow to higher register numbers. Also remember that
8988 although FP regs are 32-bit addressable, we pretend that
8989 the registers are 64-bits wide. */
8990 gpr_reg_base = 26 - cum->words;
8991 fpr_reg_base = 32 + cum->words;
8992
8993 /* Arguments wider than one word and small aggregates need special
8994 treatment. */
8995 if (arg_size > 1
8996 || mode == BLKmode
8997 || (type && AGGREGATE_TYPE_P (type)))
8998 {
8999 /* Double-extended precision (80-bit), quad-precision (128-bit)
9000 and aggregates including complex numbers are aligned on
9001 128-bit boundaries. The first eight 64-bit argument slots
9002 are associated one-to-one, with general registers r26
9003 through r19, and also with floating-point registers fr4
9004 through fr11. Arguments larger than one word are always
9005 passed in general registers.
9006
9007 Using a PARALLEL with a word mode register results in left
9008 justified data on a big-endian target. */
9009
9010 rtx loc[8];
9011 int i, offset = 0, ub = arg_size;
9012
9013 /* Align the base register. */
9014 gpr_reg_base -= alignment;
9015
9016 ub = MIN (ub, max_arg_words - cum->words - alignment);
9017 for (i = 0; i < ub; i++)
9018 {
9019 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9020 gen_rtx_REG (DImode, gpr_reg_base),
9021 GEN_INT (offset));
9022 gpr_reg_base -= 1;
9023 offset += 8;
9024 }
9025
9026 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9027 }
9028 }
9029 else
9030 {
9031 /* If the argument is larger than a word, then we know precisely
9032 which registers we must use. */
9033 if (arg_size > 1)
9034 {
9035 if (cum->words)
9036 {
9037 gpr_reg_base = 23;
9038 fpr_reg_base = 38;
9039 }
9040 else
9041 {
9042 gpr_reg_base = 25;
9043 fpr_reg_base = 34;
9044 }
9045
9046 /* Structures 5 to 8 bytes in size are passed in the general
9047 registers in the same manner as other non floating-point
9048 objects. The data is right-justified and zero-extended
9049 to 64 bits. This is opposite to the normal justification
9050 used on big endian targets and requires special treatment.
9051 We now define BLOCK_REG_PADDING to pad these objects. */
9052 if (mode == BLKmode)
9053 {
9054 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9055 gen_rtx_REG (DImode, gpr_reg_base),
9056 const0_rtx);
9057 return gen_rtx_PARALLEL (mode, gen_rtvec (1, loc));
9058 }
9059 }
9060 else
9061 {
9062 /* We have a single word (32 bits). A simple computation
9063 will get us the register #s we need. */
9064 gpr_reg_base = 26 - cum->words;
9065 fpr_reg_base = 32 + 2 * cum->words;
9066 }
9067 }
9068
9069 /* Determine if the argument needs to be passed in both general and
9070 floating point registers. */
9071 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9072 /* If we are doing soft-float with portable runtime, then there
9073 is no need to worry about FP regs. */
9074 && !TARGET_SOFT_FLOAT
9075 /* The parameter must be some kind of float, else we can just
9076 pass it in integer registers. */
9077 && FLOAT_MODE_P (mode)
9078 /* The target function must not have a prototype. */
9079 && cum->nargs_prototype <= 0
9080 /* libcalls do not need to pass items in both FP and general
9081 registers. */
9082 && type != NULL_TREE
9083 /* All this hair applies to "outgoing" args only. This includes
9084 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9085 && !cum->incoming)
9086 /* Also pass outgoing floating arguments in both registers in indirect
9087 calls with the 32 bit ABI and the HP assembler since there is no
9088 way to the specify argument locations in static functions. */
9089 || (!TARGET_64BIT
9090 && !TARGET_GAS
9091 && !cum->incoming
9092 && cum->indirect
9093 && FLOAT_MODE_P (mode)))
9094 {
9095 retval
9096 = gen_rtx_PARALLEL
9097 (mode,
9098 gen_rtvec (2,
9099 gen_rtx_EXPR_LIST (VOIDmode,
9100 gen_rtx_REG (mode, fpr_reg_base),
9101 const0_rtx),
9102 gen_rtx_EXPR_LIST (VOIDmode,
9103 gen_rtx_REG (mode, gpr_reg_base),
9104 const0_rtx)));
9105 }
9106 else
9107 {
9108 /* See if we should pass this parameter in a general register. */
9109 if (TARGET_SOFT_FLOAT
9110 /* Indirect calls in the normal 32bit ABI require all arguments
9111 to be passed in general registers. */
9112 || (!TARGET_PORTABLE_RUNTIME
9113 && !TARGET_64BIT
9114 && !TARGET_ELF32
9115 && cum->indirect)
9116 /* If the parameter is not a floating point parameter, then
9117 it belongs in GPRs. */
9118 || !FLOAT_MODE_P (mode))
9119 retval = gen_rtx_REG (mode, gpr_reg_base);
9120 else
9121 retval = gen_rtx_REG (mode, fpr_reg_base);
9122 }
9123 return retval;
9124 }
9125
9126
9127 /* If this arg would be passed totally in registers or totally on the stack,
9128 then this routine should return zero. It is currently called only for
9129 the 64-bit target. */
9130 int
9131 function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9132 tree type, int named ATTRIBUTE_UNUSED)
9133 {
9134 unsigned int max_arg_words = 8;
9135 unsigned int offset = 0;
9136
9137 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9138 offset = 1;
9139
9140 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9141 /* Arg fits fully into registers. */
9142 return 0;
9143 else if (cum->words + offset >= max_arg_words)
9144 /* Arg fully on the stack. */
9145 return 0;
9146 else
9147 /* Arg is split. */
9148 return max_arg_words - cum->words - offset;
9149 }
9150
9151
9152 /* Return 1 if this is a comparison operator. This allows the use of
9153 MATCH_OPERATOR to recognize all the branch insns. */
9154
9155 int
9156 cmpib_comparison_operator (rtx op, enum machine_mode mode)
9157 {
9158 return ((mode == VOIDmode || GET_MODE (op) == mode)
9159 && (GET_CODE (op) == EQ
9160 || GET_CODE (op) == NE
9161 || GET_CODE (op) == GT
9162 || GET_CODE (op) == GTU
9163 || GET_CODE (op) == GE
9164 || GET_CODE (op) == LT
9165 || GET_CODE (op) == LE
9166 || GET_CODE (op) == LEU));
9167 }
9168
9169 #ifndef ONE_ONLY_TEXT_SECTION_ASM_OP
9170 #define ONE_ONLY_TEXT_SECTION_ASM_OP ""
9171 #endif
9172
9173 #ifndef NEW_TEXT_SECTION_ASM_OP
9174 #define NEW_TEXT_SECTION_ASM_OP ""
9175 #endif
9176
9177 #ifndef DEFAULT_TEXT_SECTION_ASM_OP
9178 #define DEFAULT_TEXT_SECTION_ASM_OP ""
9179 #endif
9180
9181 /* Select and return a TEXT_SECTION_ASM_OP for the current function.
9182
9183 This function is only used with SOM. Because we don't support
9184 named subspaces, we can only create a new subspace or switch back
9185 into the default text subspace. */
9186 const char *
9187 som_text_section_asm_op (void)
9188 {
9189 if (TARGET_SOM && TARGET_GAS)
9190 {
9191 if (cfun && !cfun->machine->in_nsubspa)
9192 {
9193 /* We only want to emit a .nsubspa directive once at the
9194 start of the function. */
9195 cfun->machine->in_nsubspa = 1;
9196
9197 /* Create a new subspace for the text. This provides
9198 better stub placement and one-only functions. */
9199 if (cfun->decl
9200 && DECL_ONE_ONLY (cfun->decl)
9201 && !DECL_WEAK (cfun->decl))
9202 return ONE_ONLY_TEXT_SECTION_ASM_OP;
9203
9204 return NEW_TEXT_SECTION_ASM_OP;
9205 }
9206 else
9207 {
9208 /* There isn't a current function or the body of the current
9209 function has been completed. So, we are changing to the
9210 text section to output debugging information. Do this in
9211 the default text section. We need to forget that we are
9212 in the text section so that text_section will call us the
9213 next time around. */
9214 forget_section ();
9215 }
9216 }
9217
9218 return DEFAULT_TEXT_SECTION_ASM_OP;
9219 }
9220
9221 /* On hpux10, the linker will give an error if we have a reference
9222 in the read-only data section to a symbol defined in a shared
9223 library. Therefore, expressions that might require a reloc can
9224 not be placed in the read-only data section. */
9225
9226 static void
9227 pa_select_section (tree exp, int reloc,
9228 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9229 {
9230 if (TREE_CODE (exp) == VAR_DECL
9231 && TREE_READONLY (exp)
9232 && !TREE_THIS_VOLATILE (exp)
9233 && DECL_INITIAL (exp)
9234 && (DECL_INITIAL (exp) == error_mark_node
9235 || TREE_CONSTANT (DECL_INITIAL (exp)))
9236 && !reloc)
9237 {
9238 if (TARGET_SOM
9239 && DECL_ONE_ONLY (exp)
9240 && !DECL_WEAK (exp))
9241 one_only_readonly_data_section ();
9242 else
9243 readonly_data_section ();
9244 }
9245 else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
9246 && !reloc)
9247 readonly_data_section ();
9248 else if (TARGET_SOM
9249 && TREE_CODE (exp) == VAR_DECL
9250 && DECL_ONE_ONLY (exp)
9251 && !DECL_WEAK (exp)
9252 && DECL_INITIAL (exp))
9253 one_only_data_section ();
9254 else
9255 data_section ();
9256 }
9257
9258 static void
9259 pa_globalize_label (FILE *stream, const char *name)
9260 {
9261 /* We only handle DATA objects here, functions are globalized in
9262 ASM_DECLARE_FUNCTION_NAME. */
9263 if (! FUNCTION_NAME_P (name))
9264 {
9265 fputs ("\t.EXPORT ", stream);
9266 assemble_name (stream, name);
9267 fputs (",DATA\n", stream);
9268 }
9269 }
9270
9271 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9272
9273 static rtx
9274 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9275 int incoming ATTRIBUTE_UNUSED)
9276 {
9277 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9278 }
9279
9280 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9281
9282 bool
9283 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9284 {
9285 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9286 PA64 ABI says that objects larger than 128 bits are returned in memory.
9287 Note, int_size_in_bytes can return -1 if the size of the object is
9288 variable or larger than the maximum value that can be expressed as
9289 a HOST_WIDE_INT. It can also return zero for an empty type. The
9290 simplest way to handle variable and empty types is to pass them in
9291 memory. This avoids problems in defining the boundaries of argument
9292 slots, allocating registers, etc. */
9293 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9294 || int_size_in_bytes (type) <= 0);
9295 }
9296
9297 #include "gt-pa.h"
This page took 0.437238 seconds and 5 git commands to generate.