]>
Commit | Line | Data |
---|---|---|
188538df | 1 | /* Subroutines for insn-output.c for HPPA. |
8f949e7e | 2 | Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, |
66647d44 JJ |
3 | 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
4 | Free Software Foundation, Inc. | |
188538df TG |
5 | Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c |
6 | ||
b7849684 | 7 | This file is part of GCC. |
188538df | 8 | |
b7849684 | 9 | GCC is free software; you can redistribute it and/or modify |
188538df | 10 | it under the terms of the GNU General Public License as published by |
2f83c7d6 | 11 | the Free Software Foundation; either version 3, or (at your option) |
188538df TG |
12 | any later version. |
13 | ||
b7849684 | 14 | GCC is distributed in the hope that it will be useful, |
188538df TG |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
2f83c7d6 NC |
20 | along with GCC; see the file COPYING3. If not see |
21 | <http://www.gnu.org/licenses/>. */ | |
188538df | 22 | |
188538df | 23 | #include "config.h" |
0b17dd98 | 24 | #include "system.h" |
4977bab6 ZW |
25 | #include "coretypes.h" |
26 | #include "tm.h" | |
188538df TG |
27 | #include "rtl.h" |
28 | #include "regs.h" | |
29 | #include "hard-reg-set.h" | |
30 | #include "real.h" | |
31 | #include "insn-config.h" | |
32 | #include "conditions.h" | |
188538df TG |
33 | #include "insn-attr.h" |
34 | #include "flags.h" | |
35 | #include "tree.h" | |
d499455b | 36 | #include "output.h" |
823fbbce | 37 | #include "except.h" |
becf1647 | 38 | #include "expr.h" |
e78d8e51 | 39 | #include "optabs.h" |
e78d8e51 | 40 | #include "reload.h" |
d777856d | 41 | #include "integrate.h" |
49ad7cfa | 42 | #include "function.h" |
0b17dd98 | 43 | #include "toplev.h" |
d07d525a | 44 | #include "ggc.h" |
519104fe | 45 | #include "recog.h" |
823fbbce | 46 | #include "predict.h" |
519104fe | 47 | #include "tm_p.h" |
672a6f42 NB |
48 | #include "target.h" |
49 | #include "target-def.h" | |
62a53968 | 50 | #include "df.h" |
188538df | 51 | |
5d50fab3 JL |
52 | /* Return nonzero if there is a bypass for the output of |
53 | OUT_INSN and the fp store IN_INSN. */ | |
54 | int | |
b7849684 | 55 | hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn) |
5d50fab3 JL |
56 | { |
57 | enum machine_mode store_mode; | |
58 | enum machine_mode other_mode; | |
59 | rtx set; | |
60 | ||
61 | if (recog_memoized (in_insn) < 0 | |
d4f2728a JDA |
62 | || (get_attr_type (in_insn) != TYPE_FPSTORE |
63 | && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD) | |
5d50fab3 JL |
64 | || recog_memoized (out_insn) < 0) |
65 | return 0; | |
66 | ||
67 | store_mode = GET_MODE (SET_SRC (PATTERN (in_insn))); | |
68 | ||
69 | set = single_set (out_insn); | |
70 | if (!set) | |
71 | return 0; | |
72 | ||
73 | other_mode = GET_MODE (SET_SRC (set)); | |
74 | ||
75 | return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode)); | |
76 | } | |
77 | ||
78 | ||
19ec6a36 AM |
79 | #ifndef DO_FRAME_NOTES |
80 | #ifdef INCOMING_RETURN_ADDR_RTX | |
81 | #define DO_FRAME_NOTES 1 | |
82 | #else | |
83 | #define DO_FRAME_NOTES 0 | |
84 | #endif | |
85 | #endif | |
86 | ||
d8f95bed | 87 | static void copy_reg_pointer (rtx, rtx); |
a2017852 | 88 | static void fix_range (const char *); |
66617831 | 89 | static bool pa_handle_option (size_t, const char *, int); |
f40751dd JH |
90 | static int hppa_address_cost (rtx, bool); |
91 | static bool hppa_rtx_costs (rtx, int, int, int *, bool); | |
b7849684 JE |
92 | static inline rtx force_mode (enum machine_mode, rtx); |
93 | static void pa_reorg (void); | |
94 | static void pa_combine_instructions (void); | |
95 | static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx); | |
96 | static int forward_branch_p (rtx); | |
b7849684 | 97 | static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *); |
70128ad9 AO |
98 | static int compute_movmem_length (rtx); |
99 | static int compute_clrmem_length (rtx); | |
b7849684 JE |
100 | static bool pa_assemble_integer (rtx, unsigned int, int); |
101 | static void remove_useless_addtr_insns (int); | |
a4295210 JDA |
102 | static void store_reg (int, HOST_WIDE_INT, int); |
103 | static void store_reg_modify (int, int, HOST_WIDE_INT); | |
104 | static void load_reg (int, HOST_WIDE_INT, int); | |
105 | static void set_reg_plus_d (int, int, HOST_WIDE_INT, int); | |
b7849684 | 106 | static void pa_output_function_prologue (FILE *, HOST_WIDE_INT); |
67b846fa | 107 | static void update_total_code_bytes (unsigned int); |
b7849684 JE |
108 | static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT); |
109 | static int pa_adjust_cost (rtx, rtx, rtx, int); | |
110 | static int pa_adjust_priority (rtx, int); | |
111 | static int pa_issue_rate (void); | |
d6b5193b RS |
112 | static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED; |
113 | static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT) | |
ae46c4e0 | 114 | ATTRIBUTE_UNUSED; |
b7849684 JE |
115 | static void pa_encode_section_info (tree, rtx, int); |
116 | static const char *pa_strip_name_encoding (const char *); | |
117 | static bool pa_function_ok_for_sibcall (tree, tree); | |
118 | static void pa_globalize_label (FILE *, const char *) | |
a5f3f0ab | 119 | ATTRIBUTE_UNUSED; |
b7849684 JE |
120 | static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, |
121 | HOST_WIDE_INT, tree); | |
35d434ed | 122 | #if !defined(USE_COLLECT2) |
b7849684 JE |
123 | static void pa_asm_out_constructor (rtx, int); |
124 | static void pa_asm_out_destructor (rtx, int); | |
35d434ed | 125 | #endif |
b7849684 | 126 | static void pa_init_builtins (void); |
3f12cd9b | 127 | static rtx hppa_builtin_saveregs (void); |
d7bd8aeb | 128 | static void hppa_va_start (tree, rtx); |
726a989a | 129 | static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *); |
83c32f2e | 130 | static bool pa_scalar_mode_supported_p (enum machine_mode); |
3101faab | 131 | static bool pa_commutative_p (const_rtx x, int outer_code); |
b7849684 JE |
132 | static void copy_fp_args (rtx) ATTRIBUTE_UNUSED; |
133 | static int length_fp_args (rtx) ATTRIBUTE_UNUSED; | |
b7849684 JE |
134 | static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED; |
135 | static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED; | |
136 | static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED; | |
137 | static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED; | |
138 | static void pa_elf_file_start (void) ATTRIBUTE_UNUSED; | |
139 | static void pa_som_file_start (void) ATTRIBUTE_UNUSED; | |
140 | static void pa_linux_file_start (void) ATTRIBUTE_UNUSED; | |
141 | static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED; | |
142 | static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED; | |
143 | static void output_deferred_plabels (void); | |
3674b34d | 144 | static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED; |
744b2d61 JDA |
145 | #ifdef ASM_OUTPUT_EXTERNAL_REAL |
146 | static void pa_hpux_file_end (void); | |
147 | #endif | |
c15c90bb ZW |
148 | #ifdef HPUX_LONG_DOUBLE_LIBRARY |
149 | static void pa_hpux_init_libfuncs (void); | |
150 | #endif | |
3f12cd9b | 151 | static rtx pa_struct_value_rtx (tree, int); |
78a52f11 | 152 | static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode, |
586de218 | 153 | const_tree, bool); |
78a52f11 RH |
154 | static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, |
155 | tree, bool); | |
9a55eab3 | 156 | static struct machine_function * pa_init_machine_status (void); |
ec963611 JDA |
157 | static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class, |
158 | enum machine_mode, | |
159 | secondary_reload_info *); | |
16c16a24 | 160 | static void pa_extra_live_on_entry (bitmap); |
2eddfed1 | 161 | |
d6b5193b RS |
162 | /* The following extra sections are only used for SOM. */ |
163 | static GTY(()) section *som_readonly_data_section; | |
164 | static GTY(()) section *som_one_only_readonly_data_section; | |
165 | static GTY(()) section *som_one_only_data_section; | |
166 | ||
188538df TG |
167 | /* Save the operands last given to a compare for use when we |
168 | generate a scc or bcc insn. */ | |
188538df TG |
169 | rtx hppa_compare_op0, hppa_compare_op1; |
170 | enum cmp_type hppa_branch_type; | |
171 | ||
a2017852 | 172 | /* Which cpu we are scheduling for. */ |
66617831 | 173 | enum processor_type pa_cpu = TARGET_SCHED_DEFAULT; |
d711cf67 JDA |
174 | |
175 | /* The UNIX standard to use for predefines and linking. */ | |
66617831 | 176 | int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993; |
d711cf67 | 177 | |
68386e1e JL |
178 | /* Counts for the number of callee-saved general and floating point |
179 | registers which were saved by the current function's prologue. */ | |
180 | static int gr_saved, fr_saved; | |
181 | ||
16c16a24 JDA |
182 | /* Boolean indicating whether the return pointer was saved by the |
183 | current function's prologue. */ | |
184 | static bool rp_saved; | |
185 | ||
b7849684 | 186 | static rtx find_addr_reg (rtx); |
188538df | 187 | |
5fad1c24 | 188 | /* Keep track of the number of bytes we have output in the CODE subspace |
279c9bde | 189 | during this compilation so we'll know when to emit inline long-calls. */ |
a02aa5b0 | 190 | unsigned long total_code_bytes; |
279c9bde | 191 | |
5fad1c24 JDA |
192 | /* The last address of the previous function plus the number of bytes in |
193 | associated thunks that have been output. This is used to determine if | |
194 | a thunk can use an IA-relative branch to reach its target function. */ | |
67b846fa | 195 | static unsigned int last_address; |
5fad1c24 | 196 | |
93ae92c1 | 197 | /* Variables to handle plabels that we discover are necessary at assembly |
ddd5a7c1 | 198 | output time. They are output after the current function. */ |
d1b38208 | 199 | struct GTY(()) deferred_plabel |
93ae92c1 JL |
200 | { |
201 | rtx internal_label; | |
744b2d61 | 202 | rtx symbol; |
e2500fed GK |
203 | }; |
204 | static GTY((length ("n_deferred_plabels"))) struct deferred_plabel * | |
205 | deferred_plabels; | |
0f8e3849 | 206 | static size_t n_deferred_plabels = 0; |
a5fe455b | 207 | |
672a6f42 NB |
208 | \f |
209 | /* Initialize the GCC target structure. */ | |
301d03af RS |
210 | |
211 | #undef TARGET_ASM_ALIGNED_HI_OP | |
212 | #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" | |
213 | #undef TARGET_ASM_ALIGNED_SI_OP | |
214 | #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" | |
215 | #undef TARGET_ASM_ALIGNED_DI_OP | |
216 | #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t" | |
217 | #undef TARGET_ASM_UNALIGNED_HI_OP | |
218 | #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP | |
219 | #undef TARGET_ASM_UNALIGNED_SI_OP | |
220 | #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP | |
221 | #undef TARGET_ASM_UNALIGNED_DI_OP | |
222 | #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP | |
223 | #undef TARGET_ASM_INTEGER | |
224 | #define TARGET_ASM_INTEGER pa_assemble_integer | |
225 | ||
08c148a8 NB |
226 | #undef TARGET_ASM_FUNCTION_PROLOGUE |
227 | #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue | |
228 | #undef TARGET_ASM_FUNCTION_EPILOGUE | |
229 | #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue | |
93ae92c1 | 230 | |
c237e94a ZW |
231 | #undef TARGET_SCHED_ADJUST_COST |
232 | #define TARGET_SCHED_ADJUST_COST pa_adjust_cost | |
233 | #undef TARGET_SCHED_ADJUST_PRIORITY | |
234 | #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority | |
235 | #undef TARGET_SCHED_ISSUE_RATE | |
236 | #define TARGET_SCHED_ISSUE_RATE pa_issue_rate | |
237 | ||
fb49053f RH |
238 | #undef TARGET_ENCODE_SECTION_INFO |
239 | #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info | |
772c5265 RH |
240 | #undef TARGET_STRIP_NAME_ENCODING |
241 | #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding | |
fb49053f | 242 | |
4977bab6 ZW |
243 | #undef TARGET_FUNCTION_OK_FOR_SIBCALL |
244 | #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall | |
245 | ||
8ddf681a R |
246 | #undef TARGET_COMMUTATIVE_P |
247 | #define TARGET_COMMUTATIVE_P pa_commutative_p | |
248 | ||
c590b625 RH |
249 | #undef TARGET_ASM_OUTPUT_MI_THUNK |
250 | #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk | |
3961e8fe RH |
251 | #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK |
252 | #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall | |
c590b625 | 253 | |
a5fe455b | 254 | #undef TARGET_ASM_FILE_END |
744b2d61 JDA |
255 | #ifdef ASM_OUTPUT_EXTERNAL_REAL |
256 | #define TARGET_ASM_FILE_END pa_hpux_file_end | |
257 | #else | |
a5fe455b | 258 | #define TARGET_ASM_FILE_END output_deferred_plabels |
744b2d61 | 259 | #endif |
a5fe455b | 260 | |
35d434ed JDA |
261 | #if !defined(USE_COLLECT2) |
262 | #undef TARGET_ASM_CONSTRUCTOR | |
263 | #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor | |
264 | #undef TARGET_ASM_DESTRUCTOR | |
265 | #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor | |
266 | #endif | |
267 | ||
66617831 RS |
268 | #undef TARGET_DEFAULT_TARGET_FLAGS |
269 | #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT) | |
270 | #undef TARGET_HANDLE_OPTION | |
271 | #define TARGET_HANDLE_OPTION pa_handle_option | |
272 | ||
4677862a JDA |
273 | #undef TARGET_INIT_BUILTINS |
274 | #define TARGET_INIT_BUILTINS pa_init_builtins | |
275 | ||
3c50106f RH |
276 | #undef TARGET_RTX_COSTS |
277 | #define TARGET_RTX_COSTS hppa_rtx_costs | |
dcefdf67 RH |
278 | #undef TARGET_ADDRESS_COST |
279 | #define TARGET_ADDRESS_COST hppa_address_cost | |
3c50106f | 280 | |
18dbd950 RS |
281 | #undef TARGET_MACHINE_DEPENDENT_REORG |
282 | #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg | |
283 | ||
c15c90bb ZW |
284 | #ifdef HPUX_LONG_DOUBLE_LIBRARY |
285 | #undef TARGET_INIT_LIBFUNCS | |
286 | #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs | |
287 | #endif | |
288 | ||
3f12cd9b | 289 | #undef TARGET_PROMOTE_FUNCTION_RETURN |
586de218 | 290 | #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true |
3f12cd9b | 291 | #undef TARGET_PROMOTE_PROTOTYPES |
586de218 | 292 | #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true |
3f12cd9b KH |
293 | |
294 | #undef TARGET_STRUCT_VALUE_RTX | |
295 | #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx | |
296 | #undef TARGET_RETURN_IN_MEMORY | |
297 | #define TARGET_RETURN_IN_MEMORY pa_return_in_memory | |
fe984136 RH |
298 | #undef TARGET_MUST_PASS_IN_STACK |
299 | #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size | |
8cd5a4e0 RH |
300 | #undef TARGET_PASS_BY_REFERENCE |
301 | #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference | |
6cdd5672 RH |
302 | #undef TARGET_CALLEE_COPIES |
303 | #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true | |
78a52f11 RH |
304 | #undef TARGET_ARG_PARTIAL_BYTES |
305 | #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes | |
3f12cd9b KH |
306 | |
307 | #undef TARGET_EXPAND_BUILTIN_SAVEREGS | |
308 | #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs | |
d7bd8aeb JJ |
309 | #undef TARGET_EXPAND_BUILTIN_VA_START |
310 | #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start | |
8101c928 RH |
311 | #undef TARGET_GIMPLIFY_VA_ARG_EXPR |
312 | #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr | |
3f12cd9b | 313 | |
83c32f2e JDA |
314 | #undef TARGET_SCALAR_MODE_SUPPORTED_P |
315 | #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p | |
316 | ||
51076f96 RC |
317 | #undef TARGET_CANNOT_FORCE_CONST_MEM |
318 | #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p | |
319 | ||
ec963611 JDA |
320 | #undef TARGET_SECONDARY_RELOAD |
321 | #define TARGET_SECONDARY_RELOAD pa_secondary_reload | |
322 | ||
16c16a24 JDA |
323 | #undef TARGET_EXTRA_LIVE_ON_ENTRY |
324 | #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry | |
325 | ||
f6897b10 | 326 | struct gcc_target targetm = TARGET_INITIALIZER; |
672a6f42 | 327 | \f |
a2017852 JDA |
328 | /* Parse the -mfixed-range= option string. */ |
329 | ||
330 | static void | |
331 | fix_range (const char *const_str) | |
332 | { | |
333 | int i, first, last; | |
334 | char *str, *dash, *comma; | |
335 | ||
336 | /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and | |
337 | REG2 are either register names or register numbers. The effect | |
338 | of this option is to mark the registers in the range from REG1 to | |
339 | REG2 as ``fixed'' so they won't be used by the compiler. This is | |
419df6a2 | 340 | used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */ |
a2017852 JDA |
341 | |
342 | i = strlen (const_str); | |
343 | str = (char *) alloca (i + 1); | |
344 | memcpy (str, const_str, i + 1); | |
345 | ||
346 | while (1) | |
347 | { | |
348 | dash = strchr (str, '-'); | |
349 | if (!dash) | |
350 | { | |
d4ee4d25 | 351 | warning (0, "value of -mfixed-range must have form REG1-REG2"); |
a2017852 JDA |
352 | return; |
353 | } | |
354 | *dash = '\0'; | |
355 | ||
356 | comma = strchr (dash + 1, ','); | |
357 | if (comma) | |
358 | *comma = '\0'; | |
359 | ||
360 | first = decode_reg_name (str); | |
361 | if (first < 0) | |
362 | { | |
d4ee4d25 | 363 | warning (0, "unknown register name: %s", str); |
a2017852 JDA |
364 | return; |
365 | } | |
366 | ||
367 | last = decode_reg_name (dash + 1); | |
368 | if (last < 0) | |
369 | { | |
d4ee4d25 | 370 | warning (0, "unknown register name: %s", dash + 1); |
a2017852 JDA |
371 | return; |
372 | } | |
373 | ||
374 | *dash = '-'; | |
375 | ||
376 | if (first > last) | |
377 | { | |
d4ee4d25 | 378 | warning (0, "%s-%s is an empty range", str, dash + 1); |
a2017852 JDA |
379 | return; |
380 | } | |
381 | ||
382 | for (i = first; i <= last; ++i) | |
383 | fixed_regs[i] = call_used_regs[i] = 1; | |
384 | ||
385 | if (!comma) | |
386 | break; | |
387 | ||
388 | *comma = ','; | |
389 | str = comma + 1; | |
390 | } | |
391 | ||
392 | /* Check if all floating point registers have been fixed. */ | |
393 | for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++) | |
394 | if (!fixed_regs[i]) | |
395 | break; | |
396 | ||
397 | if (i > FP_REG_LAST) | |
398 | target_flags |= MASK_DISABLE_FPREGS; | |
399 | } | |
400 | ||
66617831 | 401 | /* Implement TARGET_HANDLE_OPTION. */ |
6a73009d | 402 | |
66617831 RS |
403 | static bool |
404 | pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED) | |
405 | { | |
406 | switch (code) | |
ea3bfbfe | 407 | { |
66617831 RS |
408 | case OPT_mnosnake: |
409 | case OPT_mpa_risc_1_0: | |
410 | case OPT_march_1_0: | |
ea3bfbfe | 411 | target_flags &= ~(MASK_PA_11 | MASK_PA_20); |
66617831 RS |
412 | return true; |
413 | ||
414 | case OPT_msnake: | |
415 | case OPT_mpa_risc_1_1: | |
416 | case OPT_march_1_1: | |
ea3bfbfe JQ |
417 | target_flags &= ~MASK_PA_20; |
418 | target_flags |= MASK_PA_11; | |
66617831 | 419 | return true; |
ea3bfbfe | 420 | |
66617831 RS |
421 | case OPT_mpa_risc_2_0: |
422 | case OPT_march_2_0: | |
423 | target_flags |= MASK_PA_11 | MASK_PA_20; | |
424 | return true; | |
425 | ||
426 | case OPT_mschedule_: | |
427 | if (strcmp (arg, "8000") == 0) | |
428 | pa_cpu = PROCESSOR_8000; | |
429 | else if (strcmp (arg, "7100") == 0) | |
430 | pa_cpu = PROCESSOR_7100; | |
431 | else if (strcmp (arg, "700") == 0) | |
432 | pa_cpu = PROCESSOR_700; | |
433 | else if (strcmp (arg, "7100LC") == 0) | |
434 | pa_cpu = PROCESSOR_7100LC; | |
435 | else if (strcmp (arg, "7200") == 0) | |
436 | pa_cpu = PROCESSOR_7200; | |
437 | else if (strcmp (arg, "7300") == 0) | |
438 | pa_cpu = PROCESSOR_7300; | |
d711cf67 | 439 | else |
66617831 RS |
440 | return false; |
441 | return true; | |
d711cf67 | 442 | |
66617831 RS |
443 | case OPT_mfixed_range_: |
444 | fix_range (arg); | |
445 | return true; | |
a2017852 | 446 | |
66617831 RS |
447 | #if TARGET_HPUX |
448 | case OPT_munix_93: | |
449 | flag_pa_unix = 1993; | |
450 | return true; | |
451 | #endif | |
452 | ||
453 | #if TARGET_HPUX_10_10 | |
454 | case OPT_munix_95: | |
455 | flag_pa_unix = 1995; | |
456 | return true; | |
457 | #endif | |
458 | ||
459 | #if TARGET_HPUX_11_11 | |
460 | case OPT_munix_98: | |
461 | flag_pa_unix = 1998; | |
462 | return true; | |
463 | #endif | |
464 | ||
465 | default: | |
466 | return true; | |
467 | } | |
468 | } | |
469 | ||
470 | void | |
471 | override_options (void) | |
472 | { | |
1c31ecf6 JDA |
473 | /* Unconditional branches in the delay slot are not compatible with dwarf2 |
474 | call frame information. There is no benefit in using this optimization | |
475 | on PA8000 and later processors. */ | |
476 | if (pa_cpu >= PROCESSOR_8000 | |
477 | || (! USING_SJLJ_EXCEPTIONS && flag_exceptions) | |
478 | || flag_unwind_tables) | |
479 | target_flags &= ~MASK_JUMP_IN_DELAY; | |
480 | ||
6a73009d JL |
481 | if (flag_pic && TARGET_PORTABLE_RUNTIME) |
482 | { | |
ab532386 | 483 | warning (0, "PIC code generation is not supported in the portable runtime model"); |
6a73009d JL |
484 | } |
485 | ||
a7721dc0 | 486 | if (flag_pic && TARGET_FAST_INDIRECT_CALLS) |
6a73009d | 487 | { |
ab532386 | 488 | warning (0, "PIC code generation is not compatible with fast indirect calls"); |
6a73009d | 489 | } |
0eba3d30 | 490 | |
54eef932 JL |
491 | if (! TARGET_GAS && write_symbols != NO_DEBUG) |
492 | { | |
d4ee4d25 DD |
493 | warning (0, "-g is only supported when using GAS on this processor,"); |
494 | warning (0, "-g option disabled"); | |
54eef932 JL |
495 | write_symbols = NO_DEBUG; |
496 | } | |
d07d525a | 497 | |
7ee72796 JL |
498 | /* We only support the "big PIC" model now. And we always generate PIC |
499 | code when in 64bit mode. */ | |
500 | if (flag_pic == 1 || TARGET_64BIT) | |
520babc7 JL |
501 | flag_pic = 2; |
502 | ||
301d03af RS |
503 | /* We can't guarantee that .dword is available for 32-bit targets. */ |
504 | if (UNITS_PER_WORD == 4) | |
505 | targetm.asm_out.aligned_op.di = NULL; | |
506 | ||
507 | /* The unaligned ops are only available when using GAS. */ | |
508 | if (!TARGET_GAS) | |
509 | { | |
510 | targetm.asm_out.unaligned_op.hi = NULL; | |
511 | targetm.asm_out.unaligned_op.si = NULL; | |
512 | targetm.asm_out.unaligned_op.di = NULL; | |
513 | } | |
9a55eab3 JDA |
514 | |
515 | init_machine_status = pa_init_machine_status; | |
c47decad JL |
516 | } |
517 | ||
eab9e742 | 518 | static void |
b7849684 | 519 | pa_init_builtins (void) |
4677862a JDA |
520 | { |
521 | #ifdef DONT_HAVE_FPUTC_UNLOCKED | |
b53b5aa5 KG |
522 | built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = |
523 | built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED]; | |
524 | implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] | |
525 | = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED]; | |
4677862a | 526 | #endif |
dfcb2b51 | 527 | #if TARGET_HPUX_11 |
7d522000 SE |
528 | if (built_in_decls [BUILT_IN_FINITE]) |
529 | set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite"); | |
530 | if (built_in_decls [BUILT_IN_FINITEF]) | |
531 | set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef"); | |
532 | #endif | |
4677862a JDA |
533 | } |
534 | ||
9a55eab3 JDA |
535 | /* Function to init struct machine_function. |
536 | This will be called, via a pointer variable, | |
537 | from push_function_context. */ | |
538 | ||
539 | static struct machine_function * | |
540 | pa_init_machine_status (void) | |
541 | { | |
5ead67f6 | 542 | return GGC_CNEW (machine_function); |
9a55eab3 JDA |
543 | } |
544 | ||
d8f95bed JDA |
545 | /* If FROM is a probable pointer register, mark TO as a probable |
546 | pointer register with the same pointer alignment as FROM. */ | |
547 | ||
548 | static void | |
549 | copy_reg_pointer (rtx to, rtx from) | |
550 | { | |
551 | if (REG_POINTER (from)) | |
552 | mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from))); | |
553 | } | |
554 | ||
23f6f34f TG |
555 | /* Return 1 if X contains a symbolic expression. We know these |
556 | expressions will have one of a few well defined forms, so | |
c1d1b3f0 JL |
557 | we need only check those forms. */ |
558 | int | |
b7849684 | 559 | symbolic_expression_p (rtx x) |
c1d1b3f0 JL |
560 | { |
561 | ||
fe19a83d | 562 | /* Strip off any HIGH. */ |
c1d1b3f0 JL |
563 | if (GET_CODE (x) == HIGH) |
564 | x = XEXP (x, 0); | |
565 | ||
566 | return (symbolic_operand (x, VOIDmode)); | |
567 | } | |
568 | ||
47abc309 | 569 | /* Accept any constant that can be moved in one instruction into a |
6746a52e | 570 | general register. */ |
23f6f34f | 571 | int |
5b281141 | 572 | cint_ok_for_move (HOST_WIDE_INT ival) |
6746a52e JL |
573 | { |
574 | /* OK if ldo, ldil, or zdepi, can be used. */ | |
5b281141 JDA |
575 | return (VAL_14_BITS_P (ival) |
576 | || ldil_cint_p (ival) | |
577 | || zdepi_cint_p (ival)); | |
6746a52e | 578 | } |
188538df | 579 | \f |
a18c2c5f JDA |
580 | /* Return truth value of whether OP can be used as an operand in a |
581 | adddi3 insn. */ | |
582 | int | |
b7849684 | 583 | adddi3_operand (rtx op, enum machine_mode mode) |
a18c2c5f JDA |
584 | { |
585 | return (register_operand (op, mode) | |
586 | || (GET_CODE (op) == CONST_INT | |
587 | && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op)))); | |
588 | } | |
589 | ||
5b281141 JDA |
590 | /* True iff the operand OP can be used as the destination operand of |
591 | an integer store. This also implies the operand could be used as | |
592 | the source operand of an integer load. Symbolic, lo_sum and indexed | |
593 | memory operands are not allowed. We accept reloading pseudos and | |
594 | other memory operands. */ | |
595 | int | |
596 | integer_store_memory_operand (rtx op, enum machine_mode mode) | |
597 | { | |
598 | return ((reload_in_progress | |
599 | && REG_P (op) | |
600 | && REGNO (op) >= FIRST_PSEUDO_REGISTER | |
601 | && reg_renumber [REGNO (op)] < 0) | |
602 | || (GET_CODE (op) == MEM | |
603 | && (reload_in_progress || memory_address_p (mode, XEXP (op, 0))) | |
604 | && !symbolic_memory_operand (op, VOIDmode) | |
605 | && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0)) | |
606 | && !IS_INDEX_ADDR_P (XEXP (op, 0)))); | |
607 | } | |
608 | ||
609 | /* True iff ldil can be used to load this CONST_INT. The least | |
610 | significant 11 bits of the value must be zero and the value must | |
611 | not change sign when extended from 32 to 64 bits. */ | |
612 | int | |
613 | ldil_cint_p (HOST_WIDE_INT ival) | |
614 | { | |
615 | HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff); | |
616 | ||
617 | return x == 0 || x == ((HOST_WIDE_INT) -1 << 31); | |
618 | } | |
619 | ||
831c1763 | 620 | /* True iff zdepi can be used to generate this CONST_INT. |
a7b376ee | 621 | zdepi first sign extends a 5-bit signed number to a given field |
831c1763 | 622 | length, then places this field anywhere in a zero. */ |
0e7f4c19 | 623 | int |
b7849684 | 624 | zdepi_cint_p (unsigned HOST_WIDE_INT x) |
3a5babac | 625 | { |
0c235d7e | 626 | unsigned HOST_WIDE_INT lsb_mask, t; |
3a5babac TG |
627 | |
628 | /* This might not be obvious, but it's at least fast. | |
ddd5a7c1 | 629 | This function is critical; we don't have the time loops would take. */ |
a1747d2c TG |
630 | lsb_mask = x & -x; |
631 | t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1); | |
632 | /* Return true iff t is a power of two. */ | |
3a5babac TG |
633 | return ((t & (t - 1)) == 0); |
634 | } | |
635 | ||
23f6f34f TG |
636 | /* True iff depi or extru can be used to compute (reg & mask). |
637 | Accept bit pattern like these: | |
638 | 0....01....1 | |
639 | 1....10....0 | |
640 | 1..10..01..1 */ | |
0e7f4c19 | 641 | int |
b7849684 | 642 | and_mask_p (unsigned HOST_WIDE_INT mask) |
0e7f4c19 TG |
643 | { |
644 | mask = ~mask; | |
645 | mask += mask & -mask; | |
646 | return (mask & (mask - 1)) == 0; | |
647 | } | |
648 | ||
0e7f4c19 TG |
649 | /* True iff depi can be used to compute (reg | MASK). */ |
650 | int | |
b7849684 | 651 | ior_mask_p (unsigned HOST_WIDE_INT mask) |
0e7f4c19 TG |
652 | { |
653 | mask += mask & -mask; | |
654 | return (mask & (mask - 1)) == 0; | |
655 | } | |
188538df TG |
656 | \f |
657 | /* Legitimize PIC addresses. If the address is already | |
658 | position-independent, we return ORIG. Newly generated | |
659 | position-independent addresses go to REG. If we need more | |
660 | than one register, we lose. */ | |
661 | ||
662 | rtx | |
b7849684 | 663 | legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg) |
188538df TG |
664 | { |
665 | rtx pic_ref = orig; | |
666 | ||
06ae7eb1 | 667 | gcc_assert (!PA_SYMBOL_REF_TLS_P (orig)); |
51076f96 | 668 | |
abc95ed3 | 669 | /* Labels need special handling. */ |
519104fe | 670 | if (pic_label_operand (orig, mode)) |
6bb36601 | 671 | { |
4d811a05 JDA |
672 | rtx insn; |
673 | ||
b3d9ecf0 JL |
674 | /* We do not want to go through the movXX expanders here since that |
675 | would create recursion. | |
676 | ||
677 | Nor do we really want to call a generator for a named pattern | |
678 | since that requires multiple patterns if we want to support | |
679 | multiple word sizes. | |
680 | ||
681 | So instead we just emit the raw set, which avoids the movXX | |
682 | expanders completely. */ | |
d8f95bed | 683 | mark_reg_pointer (reg, BITS_PER_UNIT); |
4d811a05 JDA |
684 | insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig)); |
685 | ||
686 | /* Put a REG_EQUAL note on this insn, so that it can be optimized. */ | |
bbbbb16a | 687 | add_reg_note (insn, REG_EQUAL, orig); |
4d811a05 JDA |
688 | |
689 | /* During and after reload, we need to generate a REG_LABEL_OPERAND note | |
690 | and update LABEL_NUSES because this is not done automatically. */ | |
691 | if (reload_in_progress || reload_completed) | |
692 | { | |
693 | /* Extract LABEL_REF. */ | |
694 | if (GET_CODE (orig) == CONST) | |
695 | orig = XEXP (XEXP (orig, 0), 0); | |
696 | /* Extract CODE_LABEL. */ | |
697 | orig = XEXP (orig, 0); | |
65c5f2a6 | 698 | add_reg_note (insn, REG_LABEL_OPERAND, orig); |
4d811a05 JDA |
699 | LABEL_NUSES (orig)++; |
700 | } | |
e3b5732b | 701 | crtl->uses_pic_offset_table = 1; |
6bb36601 JL |
702 | return reg; |
703 | } | |
188538df TG |
704 | if (GET_CODE (orig) == SYMBOL_REF) |
705 | { | |
9ab81df2 JDA |
706 | rtx insn, tmp_reg; |
707 | ||
144d51f9 | 708 | gcc_assert (reg); |
188538df | 709 | |
9ab81df2 JDA |
710 | /* Before reload, allocate a temporary register for the intermediate |
711 | result. This allows the sequence to be deleted when the final | |
712 | result is unused and the insns are trivially dead. */ | |
713 | tmp_reg = ((reload_in_progress || reload_completed) | |
714 | ? reg : gen_reg_rtx (Pmode)); | |
715 | ||
f946206c | 716 | if (function_label_operand (orig, mode)) |
7813231b | 717 | { |
0b076fea JDA |
718 | /* Force function label into memory in word mode. */ |
719 | orig = XEXP (force_const_mem (word_mode, orig), 0); | |
7813231b JDA |
720 | /* Load plabel address from DLT. */ |
721 | emit_move_insn (tmp_reg, | |
722 | gen_rtx_PLUS (word_mode, pic_offset_table_rtx, | |
723 | gen_rtx_HIGH (word_mode, orig))); | |
724 | pic_ref | |
725 | = gen_const_mem (Pmode, | |
726 | gen_rtx_LO_SUM (Pmode, tmp_reg, | |
727 | gen_rtx_UNSPEC (Pmode, | |
542a8afa RH |
728 | gen_rtvec (1, orig), |
729 | UNSPEC_DLTIND14R))); | |
7813231b JDA |
730 | emit_move_insn (reg, pic_ref); |
731 | /* Now load address of function descriptor. */ | |
732 | pic_ref = gen_rtx_MEM (Pmode, reg); | |
733 | } | |
734 | else | |
735 | { | |
736 | /* Load symbol reference from DLT. */ | |
737 | emit_move_insn (tmp_reg, | |
738 | gen_rtx_PLUS (word_mode, pic_offset_table_rtx, | |
739 | gen_rtx_HIGH (word_mode, orig))); | |
740 | pic_ref | |
741 | = gen_const_mem (Pmode, | |
742 | gen_rtx_LO_SUM (Pmode, tmp_reg, | |
743 | gen_rtx_UNSPEC (Pmode, | |
744 | gen_rtvec (1, orig), | |
745 | UNSPEC_DLTIND14R))); | |
746 | } | |
c5c76735 | 747 | |
e3b5732b | 748 | crtl->uses_pic_offset_table = 1; |
d8f95bed | 749 | mark_reg_pointer (reg, BITS_PER_UNIT); |
9ab81df2 JDA |
750 | insn = emit_move_insn (reg, pic_ref); |
751 | ||
752 | /* Put a REG_EQUAL note on this insn, so that it can be optimized. */ | |
bd94cb6e | 753 | set_unique_reg_note (insn, REG_EQUAL, orig); |
9ab81df2 | 754 | |
188538df TG |
755 | return reg; |
756 | } | |
757 | else if (GET_CODE (orig) == CONST) | |
758 | { | |
f1c7ce82 | 759 | rtx base; |
188538df TG |
760 | |
761 | if (GET_CODE (XEXP (orig, 0)) == PLUS | |
762 | && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) | |
763 | return orig; | |
764 | ||
144d51f9 NS |
765 | gcc_assert (reg); |
766 | gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS); | |
767 | ||
768 | base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); | |
769 | orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, | |
770 | base == reg ? 0 : reg); | |
d8f95bed | 771 | |
188538df TG |
772 | if (GET_CODE (orig) == CONST_INT) |
773 | { | |
a1747d2c | 774 | if (INT_14_BITS (orig)) |
ed8908e7 | 775 | return plus_constant (base, INTVAL (orig)); |
188538df TG |
776 | orig = force_reg (Pmode, orig); |
777 | } | |
ad2c71b7 | 778 | pic_ref = gen_rtx_PLUS (Pmode, base, orig); |
188538df TG |
779 | /* Likewise, should we set special REG_NOTEs here? */ |
780 | } | |
d8f95bed | 781 | |
188538df TG |
782 | return pic_ref; |
783 | } | |
784 | ||
51076f96 RC |
785 | static GTY(()) rtx gen_tls_tga; |
786 | ||
787 | static rtx | |
788 | gen_tls_get_addr (void) | |
789 | { | |
790 | if (!gen_tls_tga) | |
791 | gen_tls_tga = init_one_libfunc ("__tls_get_addr"); | |
792 | return gen_tls_tga; | |
793 | } | |
794 | ||
795 | static rtx | |
796 | hppa_tls_call (rtx arg) | |
797 | { | |
798 | rtx ret; | |
799 | ||
800 | ret = gen_reg_rtx (Pmode); | |
801 | emit_library_call_value (gen_tls_get_addr (), ret, | |
802 | LCT_CONST, Pmode, 1, arg, Pmode); | |
803 | ||
804 | return ret; | |
805 | } | |
806 | ||
807 | static rtx | |
808 | legitimize_tls_address (rtx addr) | |
809 | { | |
810 | rtx ret, insn, tmp, t1, t2, tp; | |
811 | enum tls_model model = SYMBOL_REF_TLS_MODEL (addr); | |
812 | ||
813 | switch (model) | |
814 | { | |
815 | case TLS_MODEL_GLOBAL_DYNAMIC: | |
816 | tmp = gen_reg_rtx (Pmode); | |
a758fa89 AJ |
817 | if (flag_pic) |
818 | emit_insn (gen_tgd_load_pic (tmp, addr)); | |
819 | else | |
820 | emit_insn (gen_tgd_load (tmp, addr)); | |
51076f96 RC |
821 | ret = hppa_tls_call (tmp); |
822 | break; | |
823 | ||
824 | case TLS_MODEL_LOCAL_DYNAMIC: | |
825 | ret = gen_reg_rtx (Pmode); | |
826 | tmp = gen_reg_rtx (Pmode); | |
827 | start_sequence (); | |
a758fa89 AJ |
828 | if (flag_pic) |
829 | emit_insn (gen_tld_load_pic (tmp, addr)); | |
830 | else | |
831 | emit_insn (gen_tld_load (tmp, addr)); | |
51076f96 RC |
832 | t1 = hppa_tls_call (tmp); |
833 | insn = get_insns (); | |
834 | end_sequence (); | |
835 | t2 = gen_reg_rtx (Pmode); | |
836 | emit_libcall_block (insn, t2, t1, | |
837 | gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), | |
838 | UNSPEC_TLSLDBASE)); | |
839 | emit_insn (gen_tld_offset_load (ret, addr, t2)); | |
840 | break; | |
841 | ||
842 | case TLS_MODEL_INITIAL_EXEC: | |
843 | tp = gen_reg_rtx (Pmode); | |
844 | tmp = gen_reg_rtx (Pmode); | |
845 | ret = gen_reg_rtx (Pmode); | |
846 | emit_insn (gen_tp_load (tp)); | |
a758fa89 AJ |
847 | if (flag_pic) |
848 | emit_insn (gen_tie_load_pic (tmp, addr)); | |
849 | else | |
850 | emit_insn (gen_tie_load (tmp, addr)); | |
51076f96 RC |
851 | emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp)); |
852 | break; | |
853 | ||
854 | case TLS_MODEL_LOCAL_EXEC: | |
855 | tp = gen_reg_rtx (Pmode); | |
856 | ret = gen_reg_rtx (Pmode); | |
857 | emit_insn (gen_tp_load (tp)); | |
858 | emit_insn (gen_tle_load (ret, addr, tp)); | |
859 | break; | |
860 | ||
861 | default: | |
06ae7eb1 | 862 | gcc_unreachable (); |
51076f96 RC |
863 | } |
864 | ||
865 | return ret; | |
866 | } | |
867 | ||
c1d1b3f0 JL |
868 | /* Try machine-dependent ways of modifying an illegitimate address |
869 | to be legitimate. If we find one, return the new, valid address. | |
870 | This macro is used in only one place: `memory_address' in explow.c. | |
871 | ||
872 | OLDX is the address as it was before break_out_memory_refs was called. | |
873 | In some cases it is useful to look at this to decide what needs to be done. | |
874 | ||
875 | MODE and WIN are passed so that this macro can use | |
876 | GO_IF_LEGITIMATE_ADDRESS. | |
877 | ||
878 | It is always safe for this macro to do nothing. It exists to recognize | |
23f6f34f | 879 | opportunities to optimize the output. |
c1d1b3f0 JL |
880 | |
881 | For the PA, transform: | |
882 | ||
883 | memory(X + <large int>) | |
884 | ||
885 | into: | |
886 | ||
887 | if (<large int> & mask) >= 16 | |
888 | Y = (<large int> & ~mask) + mask + 1 Round up. | |
889 | else | |
890 | Y = (<large int> & ~mask) Round down. | |
891 | Z = X + Y | |
892 | memory (Z + (<large int> - Y)); | |
893 | ||
23f6f34f | 894 | This is for CSE to find several similar references, and only use one Z. |
c1d1b3f0 | 895 | |
1e5f1716 | 896 | X can either be a SYMBOL_REF or REG, but because combine cannot |
c1d1b3f0 JL |
897 | perform a 4->2 combination we do nothing for SYMBOL_REF + D where |
898 | D will not fit in 14 bits. | |
899 | ||
900 | MODE_FLOAT references allow displacements which fit in 5 bits, so use | |
23f6f34f | 901 | 0x1f as the mask. |
c1d1b3f0 JL |
902 | |
903 | MODE_INT references allow displacements which fit in 14 bits, so use | |
23f6f34f | 904 | 0x3fff as the mask. |
c1d1b3f0 JL |
905 | |
906 | This relies on the fact that most mode MODE_FLOAT references will use FP | |
907 | registers and most mode MODE_INT references will use integer registers. | |
908 | (In the rare case of an FP register used in an integer MODE, we depend | |
909 | on secondary reloads to clean things up.) | |
910 | ||
911 | ||
912 | It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special | |
913 | manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed | |
ddd5a7c1 | 914 | addressing modes to be used). |
c1d1b3f0 JL |
915 | |
916 | Put X and Z into registers. Then put the entire expression into | |
917 | a register. */ | |
918 | ||
919 | rtx | |
b7849684 JE |
920 | hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, |
921 | enum machine_mode mode) | |
c1d1b3f0 | 922 | { |
c1d1b3f0 JL |
923 | rtx orig = x; |
924 | ||
d8f95bed JDA |
925 | /* We need to canonicalize the order of operands in unscaled indexed |
926 | addresses since the code that checks if an address is valid doesn't | |
927 | always try both orders. */ | |
928 | if (!TARGET_NO_SPACE_REGS | |
929 | && GET_CODE (x) == PLUS | |
930 | && GET_MODE (x) == Pmode | |
931 | && REG_P (XEXP (x, 0)) | |
932 | && REG_P (XEXP (x, 1)) | |
933 | && REG_POINTER (XEXP (x, 0)) | |
934 | && !REG_POINTER (XEXP (x, 1))) | |
935 | return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0)); | |
936 | ||
51076f96 RC |
937 | if (PA_SYMBOL_REF_TLS_P (x)) |
938 | return legitimize_tls_address (x); | |
939 | else if (flag_pic) | |
6bb36601 JL |
940 | return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode)); |
941 | ||
fe19a83d | 942 | /* Strip off CONST. */ |
c1d1b3f0 JL |
943 | if (GET_CODE (x) == CONST) |
944 | x = XEXP (x, 0); | |
945 | ||
68944452 JL |
946 | /* Special case. Get the SYMBOL_REF into a register and use indexing. |
947 | That should always be safe. */ | |
948 | if (GET_CODE (x) == PLUS | |
949 | && GET_CODE (XEXP (x, 0)) == REG | |
950 | && GET_CODE (XEXP (x, 1)) == SYMBOL_REF) | |
951 | { | |
690d4228 JL |
952 | rtx reg = force_reg (Pmode, XEXP (x, 1)); |
953 | return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0))); | |
68944452 JL |
954 | } |
955 | ||
326bc2de JL |
956 | /* Note we must reject symbols which represent function addresses |
957 | since the assembler/linker can't handle arithmetic on plabels. */ | |
c1d1b3f0 JL |
958 | if (GET_CODE (x) == PLUS |
959 | && GET_CODE (XEXP (x, 1)) == CONST_INT | |
326bc2de JL |
960 | && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF |
961 | && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0))) | |
c1d1b3f0 JL |
962 | || GET_CODE (XEXP (x, 0)) == REG)) |
963 | { | |
964 | rtx int_part, ptr_reg; | |
965 | int newoffset; | |
966 | int offset = INTVAL (XEXP (x, 1)); | |
f9bd8d8e JL |
967 | int mask; |
968 | ||
969 | mask = (GET_MODE_CLASS (mode) == MODE_FLOAT | |
94919bd2 | 970 | ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff); |
c1d1b3f0 | 971 | |
23f6f34f | 972 | /* Choose which way to round the offset. Round up if we |
c1d1b3f0 JL |
973 | are >= halfway to the next boundary. */ |
974 | if ((offset & mask) >= ((mask + 1) / 2)) | |
975 | newoffset = (offset & ~ mask) + mask + 1; | |
976 | else | |
977 | newoffset = (offset & ~ mask); | |
978 | ||
979 | /* If the newoffset will not fit in 14 bits (ldo), then | |
980 | handling this would take 4 or 5 instructions (2 to load | |
981 | the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to | |
982 | add the new offset and the SYMBOL_REF.) Combine can | |
983 | not handle 4->2 or 5->2 combinations, so do not create | |
984 | them. */ | |
985 | if (! VAL_14_BITS_P (newoffset) | |
986 | && GET_CODE (XEXP (x, 0)) == SYMBOL_REF) | |
987 | { | |
c5c76735 | 988 | rtx const_part = plus_constant (XEXP (x, 0), newoffset); |
c1d1b3f0 | 989 | rtx tmp_reg |
e5e28962 | 990 | = force_reg (Pmode, |
ad2c71b7 | 991 | gen_rtx_HIGH (Pmode, const_part)); |
c1d1b3f0 | 992 | ptr_reg |
e5e28962 | 993 | = force_reg (Pmode, |
c5c76735 JL |
994 | gen_rtx_LO_SUM (Pmode, |
995 | tmp_reg, const_part)); | |
c1d1b3f0 JL |
996 | } |
997 | else | |
998 | { | |
999 | if (! VAL_14_BITS_P (newoffset)) | |
e5e28962 | 1000 | int_part = force_reg (Pmode, GEN_INT (newoffset)); |
c1d1b3f0 JL |
1001 | else |
1002 | int_part = GEN_INT (newoffset); | |
1003 | ||
e5e28962 | 1004 | ptr_reg = force_reg (Pmode, |
ad2c71b7 JL |
1005 | gen_rtx_PLUS (Pmode, |
1006 | force_reg (Pmode, XEXP (x, 0)), | |
1007 | int_part)); | |
c1d1b3f0 JL |
1008 | } |
1009 | return plus_constant (ptr_reg, offset - newoffset); | |
1010 | } | |
7426c959 | 1011 | |
78c0acfd | 1012 | /* Handle (plus (mult (a) (shadd_constant)) (b)). */ |
7426c959 | 1013 | |
c1d1b3f0 JL |
1014 | if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT |
1015 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT | |
7426c959 | 1016 | && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))) |
ec8e098d | 1017 | && (OBJECT_P (XEXP (x, 1)) |
7426c959 JL |
1018 | || GET_CODE (XEXP (x, 1)) == SUBREG) |
1019 | && GET_CODE (XEXP (x, 1)) != CONST) | |
c1d1b3f0 JL |
1020 | { |
1021 | int val = INTVAL (XEXP (XEXP (x, 0), 1)); | |
1022 | rtx reg1, reg2; | |
78c0acfd JL |
1023 | |
1024 | reg1 = XEXP (x, 1); | |
1025 | if (GET_CODE (reg1) != REG) | |
1026 | reg1 = force_reg (Pmode, force_operand (reg1, 0)); | |
1027 | ||
1028 | reg2 = XEXP (XEXP (x, 0), 0); | |
1029 | if (GET_CODE (reg2) != REG) | |
1030 | reg2 = force_reg (Pmode, force_operand (reg2, 0)); | |
1031 | ||
ad2c71b7 | 1032 | return force_reg (Pmode, gen_rtx_PLUS (Pmode, |
c5c76735 JL |
1033 | gen_rtx_MULT (Pmode, |
1034 | reg2, | |
1035 | GEN_INT (val)), | |
ad2c71b7 | 1036 | reg1)); |
c1d1b3f0 | 1037 | } |
7426c959 | 1038 | |
305123ba JL |
1039 | /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)). |
1040 | ||
1041 | Only do so for floating point modes since this is more speculative | |
1042 | and we lose if it's an integer store. */ | |
78c0acfd | 1043 | if (GET_CODE (x) == PLUS |
305123ba JL |
1044 | && GET_CODE (XEXP (x, 0)) == PLUS |
1045 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT | |
1046 | && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT | |
78c0acfd JL |
1047 | && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))) |
1048 | && (mode == SFmode || mode == DFmode)) | |
305123ba | 1049 | { |
78c0acfd JL |
1050 | |
1051 | /* First, try and figure out what to use as a base register. */ | |
1052 | rtx reg1, reg2, base, idx, orig_base; | |
1053 | ||
1054 | reg1 = XEXP (XEXP (x, 0), 1); | |
1055 | reg2 = XEXP (x, 1); | |
1056 | base = NULL_RTX; | |
1057 | idx = NULL_RTX; | |
1058 | ||
1059 | /* Make sure they're both regs. If one was a SYMBOL_REF [+ const], | |
3502dc9c JDA |
1060 | then emit_move_sequence will turn on REG_POINTER so we'll know |
1061 | it's a base register below. */ | |
78c0acfd JL |
1062 | if (GET_CODE (reg1) != REG) |
1063 | reg1 = force_reg (Pmode, force_operand (reg1, 0)); | |
1064 | ||
1065 | if (GET_CODE (reg2) != REG) | |
1066 | reg2 = force_reg (Pmode, force_operand (reg2, 0)); | |
1067 | ||
1068 | /* Figure out what the base and index are. */ | |
6619e96c | 1069 | |
78c0acfd | 1070 | if (GET_CODE (reg1) == REG |
3502dc9c | 1071 | && REG_POINTER (reg1)) |
78c0acfd JL |
1072 | { |
1073 | base = reg1; | |
1074 | orig_base = XEXP (XEXP (x, 0), 1); | |
ad2c71b7 JL |
1075 | idx = gen_rtx_PLUS (Pmode, |
1076 | gen_rtx_MULT (Pmode, | |
1077 | XEXP (XEXP (XEXP (x, 0), 0), 0), | |
1078 | XEXP (XEXP (XEXP (x, 0), 0), 1)), | |
1079 | XEXP (x, 1)); | |
78c0acfd JL |
1080 | } |
1081 | else if (GET_CODE (reg2) == REG | |
3502dc9c | 1082 | && REG_POINTER (reg2)) |
78c0acfd JL |
1083 | { |
1084 | base = reg2; | |
1085 | orig_base = XEXP (x, 1); | |
1086 | idx = XEXP (x, 0); | |
1087 | } | |
1088 | ||
1089 | if (base == 0) | |
31d4f31f | 1090 | return orig; |
78c0acfd JL |
1091 | |
1092 | /* If the index adds a large constant, try to scale the | |
1093 | constant so that it can be loaded with only one insn. */ | |
1094 | if (GET_CODE (XEXP (idx, 1)) == CONST_INT | |
1095 | && VAL_14_BITS_P (INTVAL (XEXP (idx, 1)) | |
1096 | / INTVAL (XEXP (XEXP (idx, 0), 1))) | |
1097 | && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0) | |
1098 | { | |
1099 | /* Divide the CONST_INT by the scale factor, then add it to A. */ | |
1100 | int val = INTVAL (XEXP (idx, 1)); | |
1101 | ||
1102 | val /= INTVAL (XEXP (XEXP (idx, 0), 1)); | |
1103 | reg1 = XEXP (XEXP (idx, 0), 0); | |
1104 | if (GET_CODE (reg1) != REG) | |
1105 | reg1 = force_reg (Pmode, force_operand (reg1, 0)); | |
1106 | ||
ad2c71b7 | 1107 | reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val))); |
78c0acfd JL |
1108 | |
1109 | /* We can now generate a simple scaled indexed address. */ | |
c5c76735 JL |
1110 | return |
1111 | force_reg | |
1112 | (Pmode, gen_rtx_PLUS (Pmode, | |
1113 | gen_rtx_MULT (Pmode, reg1, | |
1114 | XEXP (XEXP (idx, 0), 1)), | |
1115 | base)); | |
78c0acfd JL |
1116 | } |
1117 | ||
1118 | /* If B + C is still a valid base register, then add them. */ | |
1119 | if (GET_CODE (XEXP (idx, 1)) == CONST_INT | |
1120 | && INTVAL (XEXP (idx, 1)) <= 4096 | |
1121 | && INTVAL (XEXP (idx, 1)) >= -4096) | |
1122 | { | |
1123 | int val = INTVAL (XEXP (XEXP (idx, 0), 1)); | |
1124 | rtx reg1, reg2; | |
1125 | ||
ad2c71b7 | 1126 | reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1))); |
78c0acfd JL |
1127 | |
1128 | reg2 = XEXP (XEXP (idx, 0), 0); | |
1129 | if (GET_CODE (reg2) != CONST_INT) | |
1130 | reg2 = force_reg (Pmode, force_operand (reg2, 0)); | |
1131 | ||
ad2c71b7 | 1132 | return force_reg (Pmode, gen_rtx_PLUS (Pmode, |
c5c76735 JL |
1133 | gen_rtx_MULT (Pmode, |
1134 | reg2, | |
ad2c71b7 JL |
1135 | GEN_INT (val)), |
1136 | reg1)); | |
78c0acfd JL |
1137 | } |
1138 | ||
1139 | /* Get the index into a register, then add the base + index and | |
1140 | return a register holding the result. */ | |
1141 | ||
1142 | /* First get A into a register. */ | |
1143 | reg1 = XEXP (XEXP (idx, 0), 0); | |
1144 | if (GET_CODE (reg1) != REG) | |
1145 | reg1 = force_reg (Pmode, force_operand (reg1, 0)); | |
1146 | ||
1147 | /* And get B into a register. */ | |
1148 | reg2 = XEXP (idx, 1); | |
1149 | if (GET_CODE (reg2) != REG) | |
1150 | reg2 = force_reg (Pmode, force_operand (reg2, 0)); | |
1151 | ||
ad2c71b7 JL |
1152 | reg1 = force_reg (Pmode, |
1153 | gen_rtx_PLUS (Pmode, | |
1154 | gen_rtx_MULT (Pmode, reg1, | |
1155 | XEXP (XEXP (idx, 0), 1)), | |
1156 | reg2)); | |
78c0acfd JL |
1157 | |
1158 | /* Add the result to our base register and return. */ | |
ad2c71b7 | 1159 | return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1)); |
6619e96c | 1160 | |
305123ba JL |
1161 | } |
1162 | ||
23f6f34f | 1163 | /* Uh-oh. We might have an address for x[n-100000]. This needs |
c2827c50 JL |
1164 | special handling to avoid creating an indexed memory address |
1165 | with x-100000 as the base. | |
6619e96c | 1166 | |
c2827c50 JL |
1167 | If the constant part is small enough, then it's still safe because |
1168 | there is a guard page at the beginning and end of the data segment. | |
1169 | ||
1170 | Scaled references are common enough that we want to try and rearrange the | |
1171 | terms so that we can use indexing for these addresses too. Only | |
305123ba | 1172 | do the optimization for floatint point modes. */ |
7426c959 | 1173 | |
c2827c50 JL |
1174 | if (GET_CODE (x) == PLUS |
1175 | && symbolic_expression_p (XEXP (x, 1))) | |
7426c959 JL |
1176 | { |
1177 | /* Ugly. We modify things here so that the address offset specified | |
1178 | by the index expression is computed first, then added to x to form | |
c2827c50 | 1179 | the entire address. */ |
7426c959 | 1180 | |
305123ba | 1181 | rtx regx1, regx2, regy1, regy2, y; |
7426c959 JL |
1182 | |
1183 | /* Strip off any CONST. */ | |
1184 | y = XEXP (x, 1); | |
1185 | if (GET_CODE (y) == CONST) | |
1186 | y = XEXP (y, 0); | |
1187 | ||
77fc9313 RK |
1188 | if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS) |
1189 | { | |
305123ba JL |
1190 | /* See if this looks like |
1191 | (plus (mult (reg) (shadd_const)) | |
1192 | (const (plus (symbol_ref) (const_int)))) | |
1193 | ||
78c0acfd | 1194 | Where const_int is small. In that case the const |
6619e96c | 1195 | expression is a valid pointer for indexing. |
78c0acfd JL |
1196 | |
1197 | If const_int is big, but can be divided evenly by shadd_const | |
1198 | and added to (reg). This allows more scaled indexed addresses. */ | |
1199 | if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF | |
1200 | && GET_CODE (XEXP (x, 0)) == MULT | |
305123ba | 1201 | && GET_CODE (XEXP (y, 1)) == CONST_INT |
78c0acfd JL |
1202 | && INTVAL (XEXP (y, 1)) >= -4096 |
1203 | && INTVAL (XEXP (y, 1)) <= 4095 | |
1204 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT | |
1205 | && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))) | |
1206 | { | |
1207 | int val = INTVAL (XEXP (XEXP (x, 0), 1)); | |
1208 | rtx reg1, reg2; | |
1209 | ||
1210 | reg1 = XEXP (x, 1); | |
1211 | if (GET_CODE (reg1) != REG) | |
1212 | reg1 = force_reg (Pmode, force_operand (reg1, 0)); | |
1213 | ||
1214 | reg2 = XEXP (XEXP (x, 0), 0); | |
1215 | if (GET_CODE (reg2) != REG) | |
1216 | reg2 = force_reg (Pmode, force_operand (reg2, 0)); | |
1217 | ||
ad2c71b7 JL |
1218 | return force_reg (Pmode, |
1219 | gen_rtx_PLUS (Pmode, | |
c5c76735 JL |
1220 | gen_rtx_MULT (Pmode, |
1221 | reg2, | |
ad2c71b7 | 1222 | GEN_INT (val)), |
c5c76735 | 1223 | reg1)); |
78c0acfd JL |
1224 | } |
1225 | else if ((mode == DFmode || mode == SFmode) | |
1226 | && GET_CODE (XEXP (y, 0)) == SYMBOL_REF | |
1227 | && GET_CODE (XEXP (x, 0)) == MULT | |
1228 | && GET_CODE (XEXP (y, 1)) == CONST_INT | |
1229 | && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0 | |
1230 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT | |
1231 | && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))) | |
305123ba JL |
1232 | { |
1233 | regx1 | |
1234 | = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1)) | |
1235 | / INTVAL (XEXP (XEXP (x, 0), 1)))); | |
1236 | regx2 = XEXP (XEXP (x, 0), 0); | |
1237 | if (GET_CODE (regx2) != REG) | |
1238 | regx2 = force_reg (Pmode, force_operand (regx2, 0)); | |
ad2c71b7 JL |
1239 | regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode, |
1240 | regx2, regx1)); | |
c5c76735 JL |
1241 | return |
1242 | force_reg (Pmode, | |
1243 | gen_rtx_PLUS (Pmode, | |
1244 | gen_rtx_MULT (Pmode, regx2, | |
1245 | XEXP (XEXP (x, 0), 1)), | |
1246 | force_reg (Pmode, XEXP (y, 0)))); | |
305123ba | 1247 | } |
c2827c50 JL |
1248 | else if (GET_CODE (XEXP (y, 1)) == CONST_INT |
1249 | && INTVAL (XEXP (y, 1)) >= -4096 | |
1250 | && INTVAL (XEXP (y, 1)) <= 4095) | |
1251 | { | |
1252 | /* This is safe because of the guard page at the | |
1253 | beginning and end of the data space. Just | |
1254 | return the original address. */ | |
1255 | return orig; | |
1256 | } | |
305123ba JL |
1257 | else |
1258 | { | |
1259 | /* Doesn't look like one we can optimize. */ | |
1260 | regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0)); | |
1261 | regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0)); | |
1262 | regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0)); | |
1263 | regx1 = force_reg (Pmode, | |
ad2c71b7 JL |
1264 | gen_rtx_fmt_ee (GET_CODE (y), Pmode, |
1265 | regx1, regy2)); | |
1266 | return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1)); | |
305123ba | 1267 | } |
77fc9313 | 1268 | } |
7426c959 JL |
1269 | } |
1270 | ||
c1d1b3f0 JL |
1271 | return orig; |
1272 | } | |
1273 | ||
188538df TG |
1274 | /* For the HPPA, REG and REG+CONST is cost 0 |
1275 | and addresses involving symbolic constants are cost 2. | |
1276 | ||
1277 | PIC addresses are very expensive. | |
1278 | ||
1279 | It is no coincidence that this has the same structure | |
1280 | as GO_IF_LEGITIMATE_ADDRESS. */ | |
dcefdf67 RH |
1281 | |
1282 | static int | |
f40751dd JH |
1283 | hppa_address_cost (rtx X, |
1284 | bool speed ATTRIBUTE_UNUSED) | |
188538df | 1285 | { |
dcefdf67 RH |
1286 | switch (GET_CODE (X)) |
1287 | { | |
1288 | case REG: | |
1289 | case PLUS: | |
1290 | case LO_SUM: | |
188538df | 1291 | return 1; |
dcefdf67 RH |
1292 | case HIGH: |
1293 | return 2; | |
1294 | default: | |
1295 | return 4; | |
1296 | } | |
188538df TG |
1297 | } |
1298 | ||
3c50106f RH |
1299 | /* Compute a (partial) cost for rtx X. Return true if the complete |
1300 | cost has been computed, and false if subexpressions should be | |
1301 | scanned. In either case, *TOTAL contains the cost result. */ | |
1302 | ||
1303 | static bool | |
f40751dd JH |
1304 | hppa_rtx_costs (rtx x, int code, int outer_code, int *total, |
1305 | bool speed ATTRIBUTE_UNUSED) | |
3c50106f RH |
1306 | { |
1307 | switch (code) | |
1308 | { | |
1309 | case CONST_INT: | |
1310 | if (INTVAL (x) == 0) | |
1311 | *total = 0; | |
1312 | else if (INT_14_BITS (x)) | |
1313 | *total = 1; | |
1314 | else | |
1315 | *total = 2; | |
1316 | return true; | |
1317 | ||
1318 | case HIGH: | |
1319 | *total = 2; | |
1320 | return true; | |
1321 | ||
1322 | case CONST: | |
1323 | case LABEL_REF: | |
1324 | case SYMBOL_REF: | |
1325 | *total = 4; | |
1326 | return true; | |
1327 | ||
1328 | case CONST_DOUBLE: | |
1329 | if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode)) | |
1330 | && outer_code != SET) | |
1331 | *total = 0; | |
1332 | else | |
1333 | *total = 8; | |
1334 | return true; | |
1335 | ||
1336 | case MULT: | |
1337 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) | |
1338 | *total = COSTS_N_INSNS (3); | |
1339 | else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT) | |
1340 | *total = COSTS_N_INSNS (8); | |
1341 | else | |
1342 | *total = COSTS_N_INSNS (20); | |
1343 | return true; | |
1344 | ||
1345 | case DIV: | |
1346 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) | |
1347 | { | |
1348 | *total = COSTS_N_INSNS (14); | |
1349 | return true; | |
1350 | } | |
5efb1046 | 1351 | /* FALLTHRU */ |
3c50106f RH |
1352 | |
1353 | case UDIV: | |
1354 | case MOD: | |
1355 | case UMOD: | |
1356 | *total = COSTS_N_INSNS (60); | |
1357 | return true; | |
1358 | ||
1359 | case PLUS: /* this includes shNadd insns */ | |
1360 | case MINUS: | |
1361 | if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT) | |
1362 | *total = COSTS_N_INSNS (3); | |
1363 | else | |
1364 | *total = COSTS_N_INSNS (1); | |
1365 | return true; | |
1366 | ||
1367 | case ASHIFT: | |
1368 | case ASHIFTRT: | |
1369 | case LSHIFTRT: | |
1370 | *total = COSTS_N_INSNS (1); | |
1371 | return true; | |
1372 | ||
1373 | default: | |
1374 | return false; | |
1375 | } | |
1376 | } | |
1377 | ||
6619e96c AM |
1378 | /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a |
1379 | new rtx with the correct mode. */ | |
1380 | static inline rtx | |
b7849684 | 1381 | force_mode (enum machine_mode mode, rtx orig) |
6619e96c AM |
1382 | { |
1383 | if (mode == GET_MODE (orig)) | |
1384 | return orig; | |
1385 | ||
144d51f9 | 1386 | gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER); |
6619e96c AM |
1387 | |
1388 | return gen_rtx_REG (mode, REGNO (orig)); | |
1389 | } | |
1390 | ||
51076f96 RC |
1391 | /* Return 1 if *X is a thread-local symbol. */ |
1392 | ||
1393 | static int | |
1394 | pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED) | |
1395 | { | |
1396 | return PA_SYMBOL_REF_TLS_P (*x); | |
1397 | } | |
1398 | ||
1399 | /* Return 1 if X contains a thread-local symbol. */ | |
1400 | ||
1401 | bool | |
1402 | pa_tls_referenced_p (rtx x) | |
1403 | { | |
1404 | if (!TARGET_HAVE_TLS) | |
1405 | return false; | |
1406 | ||
1407 | return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0); | |
1408 | } | |
1409 | ||
188538df TG |
1410 | /* Emit insns to move operands[1] into operands[0]. |
1411 | ||
1412 | Return 1 if we have written out everything that needs to be done to | |
1413 | do the move. Otherwise, return 0 and the caller will emit the move | |
6619e96c | 1414 | normally. |
1b8ad134 JL |
1415 | |
1416 | Note SCRATCH_REG may not be in the proper mode depending on how it | |
c1207243 | 1417 | will be used. This routine is responsible for creating a new copy |
1b8ad134 | 1418 | of SCRATCH_REG in the proper mode. */ |
188538df TG |
1419 | |
1420 | int | |
b7849684 | 1421 | emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg) |
188538df TG |
1422 | { |
1423 | register rtx operand0 = operands[0]; | |
1424 | register rtx operand1 = operands[1]; | |
428be702 | 1425 | register rtx tem; |
188538df | 1426 | |
d8f95bed JDA |
1427 | /* We can only handle indexed addresses in the destination operand |
1428 | of floating point stores. Thus, we need to break out indexed | |
1429 | addresses from the destination operand. */ | |
1430 | if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0))) | |
1431 | { | |
b3a13419 | 1432 | gcc_assert (can_create_pseudo_p ()); |
d8f95bed JDA |
1433 | |
1434 | tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0)); | |
1435 | operand0 = replace_equiv_address (operand0, tem); | |
1436 | } | |
1437 | ||
1438 | /* On targets with non-equivalent space registers, break out unscaled | |
1439 | indexed addresses from the source operand before the final CSE. | |
1440 | We have to do this because the REG_POINTER flag is not correctly | |
1441 | carried through various optimization passes and CSE may substitute | |
1442 | a pseudo without the pointer set for one with the pointer set. As | |
71cc389b | 1443 | a result, we loose various opportunities to create insns with |
d8f95bed JDA |
1444 | unscaled indexed addresses. */ |
1445 | if (!TARGET_NO_SPACE_REGS | |
1446 | && !cse_not_expected | |
1447 | && GET_CODE (operand1) == MEM | |
1448 | && GET_CODE (XEXP (operand1, 0)) == PLUS | |
1449 | && REG_P (XEXP (XEXP (operand1, 0), 0)) | |
1450 | && REG_P (XEXP (XEXP (operand1, 0), 1))) | |
1451 | operand1 | |
1452 | = replace_equiv_address (operand1, | |
1453 | copy_to_mode_reg (Pmode, XEXP (operand1, 0))); | |
1454 | ||
54d65918 JL |
1455 | if (scratch_reg |
1456 | && reload_in_progress && GET_CODE (operand0) == REG | |
8a642d97 RK |
1457 | && REGNO (operand0) >= FIRST_PSEUDO_REGISTER) |
1458 | operand0 = reg_equiv_mem[REGNO (operand0)]; | |
54d65918 JL |
1459 | else if (scratch_reg |
1460 | && reload_in_progress && GET_CODE (operand0) == SUBREG | |
8a642d97 RK |
1461 | && GET_CODE (SUBREG_REG (operand0)) == REG |
1462 | && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER) | |
27a2c2b5 | 1463 | { |
ddef6bc7 | 1464 | /* We must not alter SUBREG_BYTE (operand0) since that would confuse |
71443006 JL |
1465 | the code which tracks sets/uses for delete_output_reload. */ |
1466 | rtx temp = gen_rtx_SUBREG (GET_MODE (operand0), | |
1467 | reg_equiv_mem [REGNO (SUBREG_REG (operand0))], | |
ddef6bc7 | 1468 | SUBREG_BYTE (operand0)); |
847898f6 | 1469 | operand0 = alter_subreg (&temp); |
27a2c2b5 | 1470 | } |
8a642d97 | 1471 | |
54d65918 JL |
1472 | if (scratch_reg |
1473 | && reload_in_progress && GET_CODE (operand1) == REG | |
8a642d97 RK |
1474 | && REGNO (operand1) >= FIRST_PSEUDO_REGISTER) |
1475 | operand1 = reg_equiv_mem[REGNO (operand1)]; | |
54d65918 JL |
1476 | else if (scratch_reg |
1477 | && reload_in_progress && GET_CODE (operand1) == SUBREG | |
8a642d97 RK |
1478 | && GET_CODE (SUBREG_REG (operand1)) == REG |
1479 | && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER) | |
27a2c2b5 | 1480 | { |
ddef6bc7 | 1481 | /* We must not alter SUBREG_BYTE (operand0) since that would confuse |
71443006 JL |
1482 | the code which tracks sets/uses for delete_output_reload. */ |
1483 | rtx temp = gen_rtx_SUBREG (GET_MODE (operand1), | |
1484 | reg_equiv_mem [REGNO (SUBREG_REG (operand1))], | |
ddef6bc7 | 1485 | SUBREG_BYTE (operand1)); |
847898f6 | 1486 | operand1 = alter_subreg (&temp); |
27a2c2b5 | 1487 | } |
8a642d97 | 1488 | |
54d65918 | 1489 | if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM |
428be702 RK |
1490 | && ((tem = find_replacement (&XEXP (operand0, 0))) |
1491 | != XEXP (operand0, 0))) | |
7c95bbfb | 1492 | operand0 = replace_equiv_address (operand0, tem); |
d8f95bed | 1493 | |
54d65918 | 1494 | if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM |
428be702 RK |
1495 | && ((tem = find_replacement (&XEXP (operand1, 0))) |
1496 | != XEXP (operand1, 0))) | |
7c95bbfb | 1497 | operand1 = replace_equiv_address (operand1, tem); |
428be702 | 1498 | |
4d3cea21 | 1499 | /* Handle secondary reloads for loads/stores of FP registers from |
cae80939 | 1500 | REG+D addresses where D does not fit in 5 or 14 bits, including |
68944452 | 1501 | (subreg (mem (addr))) cases. */ |
a4295210 JDA |
1502 | if (scratch_reg |
1503 | && fp_reg_operand (operand0, mode) | |
42fbe27f | 1504 | && ((GET_CODE (operand1) == MEM |
cae80939 JDA |
1505 | && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode), |
1506 | XEXP (operand1, 0))) | |
42fbe27f JL |
1507 | || ((GET_CODE (operand1) == SUBREG |
1508 | && GET_CODE (XEXP (operand1, 0)) == MEM | |
cae80939 JDA |
1509 | && !memory_address_p ((GET_MODE_SIZE (mode) == 4 |
1510 | ? SFmode : DFmode), | |
1511 | XEXP (XEXP (operand1, 0), 0)))))) | |
d2a94ec0 | 1512 | { |
42fbe27f JL |
1513 | if (GET_CODE (operand1) == SUBREG) |
1514 | operand1 = XEXP (operand1, 0); | |
1515 | ||
1b8ad134 JL |
1516 | /* SCRATCH_REG will hold an address and maybe the actual data. We want |
1517 | it in WORD_MODE regardless of what mode it was originally given | |
1518 | to us. */ | |
6619e96c | 1519 | scratch_reg = force_mode (word_mode, scratch_reg); |
2d7b2c36 JL |
1520 | |
1521 | /* D might not fit in 14 bits either; for such cases load D into | |
1522 | scratch reg. */ | |
690d4228 | 1523 | if (!memory_address_p (Pmode, XEXP (operand1, 0))) |
2d7b2c36 JL |
1524 | { |
1525 | emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1)); | |
d8f95bed JDA |
1526 | emit_move_insn (scratch_reg, |
1527 | gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)), | |
1528 | Pmode, | |
1529 | XEXP (XEXP (operand1, 0), 0), | |
1530 | scratch_reg)); | |
2d7b2c36 JL |
1531 | } |
1532 | else | |
1533 | emit_move_insn (scratch_reg, XEXP (operand1, 0)); | |
c5c76735 | 1534 | emit_insn (gen_rtx_SET (VOIDmode, operand0, |
7c95bbfb | 1535 | replace_equiv_address (operand1, scratch_reg))); |
d2a94ec0 TM |
1536 | return 1; |
1537 | } | |
a4295210 JDA |
1538 | else if (scratch_reg |
1539 | && fp_reg_operand (operand1, mode) | |
42fbe27f | 1540 | && ((GET_CODE (operand0) == MEM |
cae80939 JDA |
1541 | && !memory_address_p ((GET_MODE_SIZE (mode) == 4 |
1542 | ? SFmode : DFmode), | |
1543 | XEXP (operand0, 0))) | |
42fbe27f JL |
1544 | || ((GET_CODE (operand0) == SUBREG) |
1545 | && GET_CODE (XEXP (operand0, 0)) == MEM | |
cae80939 JDA |
1546 | && !memory_address_p ((GET_MODE_SIZE (mode) == 4 |
1547 | ? SFmode : DFmode), | |
a4295210 | 1548 | XEXP (XEXP (operand0, 0), 0))))) |
d2a94ec0 | 1549 | { |
42fbe27f JL |
1550 | if (GET_CODE (operand0) == SUBREG) |
1551 | operand0 = XEXP (operand0, 0); | |
1552 | ||
1b8ad134 JL |
1553 | /* SCRATCH_REG will hold an address and maybe the actual data. We want |
1554 | it in WORD_MODE regardless of what mode it was originally given | |
1555 | to us. */ | |
6619e96c | 1556 | scratch_reg = force_mode (word_mode, scratch_reg); |
1b8ad134 | 1557 | |
2d7b2c36 JL |
1558 | /* D might not fit in 14 bits either; for such cases load D into |
1559 | scratch reg. */ | |
690d4228 | 1560 | if (!memory_address_p (Pmode, XEXP (operand0, 0))) |
2d7b2c36 JL |
1561 | { |
1562 | emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1)); | |
ad2c71b7 JL |
1563 | emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0, |
1564 | 0)), | |
690d4228 | 1565 | Pmode, |
ad2c71b7 JL |
1566 | XEXP (XEXP (operand0, 0), |
1567 | 0), | |
1568 | scratch_reg)); | |
2d7b2c36 JL |
1569 | } |
1570 | else | |
1571 | emit_move_insn (scratch_reg, XEXP (operand0, 0)); | |
7c95bbfb RH |
1572 | emit_insn (gen_rtx_SET (VOIDmode, |
1573 | replace_equiv_address (operand0, scratch_reg), | |
ad2c71b7 | 1574 | operand1)); |
d2a94ec0 TM |
1575 | return 1; |
1576 | } | |
c063ad75 JL |
1577 | /* Handle secondary reloads for loads of FP registers from constant |
1578 | expressions by forcing the constant into memory. | |
1579 | ||
a4295210 | 1580 | Use scratch_reg to hold the address of the memory location. |
c063ad75 | 1581 | |
8c417c25 | 1582 | The proper fix is to change PREFERRED_RELOAD_CLASS to return |
5bdc5878 | 1583 | NO_REGS when presented with a const_int and a register class |
c063ad75 JL |
1584 | containing only FP registers. Doing so unfortunately creates |
1585 | more problems than it solves. Fix this for 2.5. */ | |
a4295210 | 1586 | else if (scratch_reg |
c063ad75 | 1587 | && CONSTANT_P (operand1) |
a4295210 | 1588 | && fp_reg_operand (operand0, mode)) |
c063ad75 | 1589 | { |
7c95bbfb | 1590 | rtx const_mem, xoperands[2]; |
c063ad75 | 1591 | |
1b8ad134 JL |
1592 | /* SCRATCH_REG will hold an address and maybe the actual data. We want |
1593 | it in WORD_MODE regardless of what mode it was originally given | |
1594 | to us. */ | |
6619e96c | 1595 | scratch_reg = force_mode (word_mode, scratch_reg); |
1b8ad134 | 1596 | |
c063ad75 JL |
1597 | /* Force the constant into memory and put the address of the |
1598 | memory location into scratch_reg. */ | |
7c95bbfb | 1599 | const_mem = force_const_mem (mode, operand1); |
c063ad75 | 1600 | xoperands[0] = scratch_reg; |
7c95bbfb | 1601 | xoperands[1] = XEXP (const_mem, 0); |
669054c1 | 1602 | emit_move_sequence (xoperands, Pmode, 0); |
c063ad75 JL |
1603 | |
1604 | /* Now load the destination register. */ | |
c5c76735 | 1605 | emit_insn (gen_rtx_SET (mode, operand0, |
7c95bbfb | 1606 | replace_equiv_address (const_mem, scratch_reg))); |
c063ad75 JL |
1607 | return 1; |
1608 | } | |
4d3cea21 | 1609 | /* Handle secondary reloads for SAR. These occur when trying to load |
9c1eed37 | 1610 | the SAR from memory, FP register, or with a constant. */ |
a4295210 JDA |
1611 | else if (scratch_reg |
1612 | && GET_CODE (operand0) == REG | |
9c1eed37 | 1613 | && REGNO (operand0) < FIRST_PSEUDO_REGISTER |
4d3cea21 JL |
1614 | && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS |
1615 | && (GET_CODE (operand1) == MEM | |
2c51d187 | 1616 | || GET_CODE (operand1) == CONST_INT |
4d3cea21 | 1617 | || (GET_CODE (operand1) == REG |
a4295210 | 1618 | && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1)))))) |
4d3cea21 | 1619 | { |
09ece7b5 JL |
1620 | /* D might not fit in 14 bits either; for such cases load D into |
1621 | scratch reg. */ | |
1622 | if (GET_CODE (operand1) == MEM | |
690d4228 | 1623 | && !memory_address_p (Pmode, XEXP (operand1, 0))) |
09ece7b5 | 1624 | { |
dd8c13e3 JL |
1625 | /* We are reloading the address into the scratch register, so we |
1626 | want to make sure the scratch register is a full register. */ | |
6619e96c | 1627 | scratch_reg = force_mode (word_mode, scratch_reg); |
dd8c13e3 | 1628 | |
6619e96c | 1629 | emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1)); |
ad2c71b7 JL |
1630 | emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, |
1631 | 0)), | |
690d4228 | 1632 | Pmode, |
ad2c71b7 JL |
1633 | XEXP (XEXP (operand1, 0), |
1634 | 0), | |
1635 | scratch_reg)); | |
dd8c13e3 JL |
1636 | |
1637 | /* Now we are going to load the scratch register from memory, | |
1638 | we want to load it in the same width as the original MEM, | |
1639 | which must be the same as the width of the ultimate destination, | |
1640 | OPERAND0. */ | |
6619e96c AM |
1641 | scratch_reg = force_mode (GET_MODE (operand0), scratch_reg); |
1642 | ||
7c95bbfb RH |
1643 | emit_move_insn (scratch_reg, |
1644 | replace_equiv_address (operand1, scratch_reg)); | |
09ece7b5 JL |
1645 | } |
1646 | else | |
dd8c13e3 JL |
1647 | { |
1648 | /* We want to load the scratch register using the same mode as | |
1649 | the ultimate destination. */ | |
6619e96c AM |
1650 | scratch_reg = force_mode (GET_MODE (operand0), scratch_reg); |
1651 | ||
dd8c13e3 JL |
1652 | emit_move_insn (scratch_reg, operand1); |
1653 | } | |
1654 | ||
1655 | /* And emit the insn to set the ultimate destination. We know that | |
1656 | the scratch register has the same mode as the destination at this | |
1657 | point. */ | |
4d3cea21 JL |
1658 | emit_move_insn (operand0, scratch_reg); |
1659 | return 1; | |
1660 | } | |
d8f95bed | 1661 | /* Handle the most common case: storing into a register. */ |
d2a94ec0 | 1662 | else if (register_operand (operand0, mode)) |
188538df TG |
1663 | { |
1664 | if (register_operand (operand1, mode) | |
b8e42321 JDA |
1665 | || (GET_CODE (operand1) == CONST_INT |
1666 | && cint_ok_for_move (INTVAL (operand1))) | |
f048ca47 | 1667 | || (operand1 == CONST0_RTX (mode)) |
188538df | 1668 | || (GET_CODE (operand1) == HIGH |
80225b66 | 1669 | && !symbolic_operand (XEXP (operand1, 0), VOIDmode)) |
188538df TG |
1670 | /* Only `general_operands' can come here, so MEM is ok. */ |
1671 | || GET_CODE (operand1) == MEM) | |
1672 | { | |
d8f95bed JDA |
1673 | /* Various sets are created during RTL generation which don't |
1674 | have the REG_POINTER flag correctly set. After the CSE pass, | |
1675 | instruction recognition can fail if we don't consistently | |
1676 | set this flag when performing register copies. This should | |
1677 | also improve the opportunities for creating insns that use | |
1678 | unscaled indexing. */ | |
1679 | if (REG_P (operand0) && REG_P (operand1)) | |
1680 | { | |
1681 | if (REG_POINTER (operand1) | |
1682 | && !REG_POINTER (operand0) | |
1683 | && !HARD_REGISTER_P (operand0)) | |
1684 | copy_reg_pointer (operand0, operand1); | |
1685 | else if (REG_POINTER (operand0) | |
1686 | && !REG_POINTER (operand1) | |
1687 | && !HARD_REGISTER_P (operand1)) | |
1688 | copy_reg_pointer (operand1, operand0); | |
1689 | } | |
1690 | ||
1691 | /* When MEMs are broken out, the REG_POINTER flag doesn't | |
1692 | get set. In some cases, we can set the REG_POINTER flag | |
1693 | from the declaration for the MEM. */ | |
1694 | if (REG_P (operand0) | |
1695 | && GET_CODE (operand1) == MEM | |
1696 | && !REG_POINTER (operand0)) | |
1697 | { | |
1698 | tree decl = MEM_EXPR (operand1); | |
1699 | ||
1700 | /* Set the register pointer flag and register alignment | |
1701 | if the declaration for this memory reference is a | |
1702 | pointer type. Fortran indirect argument references | |
1703 | are ignored. */ | |
1704 | if (decl | |
1705 | && !(flag_argument_noalias > 1 | |
1706 | && TREE_CODE (decl) == INDIRECT_REF | |
1707 | && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL)) | |
1708 | { | |
1709 | tree type; | |
1710 | ||
1711 | /* If this is a COMPONENT_REF, use the FIELD_DECL from | |
1712 | tree operand 1. */ | |
1713 | if (TREE_CODE (decl) == COMPONENT_REF) | |
1714 | decl = TREE_OPERAND (decl, 1); | |
1715 | ||
1716 | type = TREE_TYPE (decl); | |
dd25a747 | 1717 | type = strip_array_types (type); |
d8f95bed JDA |
1718 | |
1719 | if (POINTER_TYPE_P (type)) | |
1720 | { | |
1721 | int align; | |
1722 | ||
1723 | type = TREE_TYPE (type); | |
1724 | /* Using TYPE_ALIGN_OK is rather conservative as | |
1725 | only the ada frontend actually sets it. */ | |
1726 | align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type) | |
1727 | : BITS_PER_UNIT); | |
1728 | mark_reg_pointer (operand0, align); | |
1729 | } | |
1730 | } | |
1731 | } | |
1732 | ||
ad2c71b7 | 1733 | emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); |
188538df TG |
1734 | return 1; |
1735 | } | |
1736 | } | |
1737 | else if (GET_CODE (operand0) == MEM) | |
1738 | { | |
d66dec28 JL |
1739 | if (mode == DFmode && operand1 == CONST0_RTX (mode) |
1740 | && !(reload_in_progress || reload_completed)) | |
1741 | { | |
1742 | rtx temp = gen_reg_rtx (DFmode); | |
1743 | ||
ad2c71b7 JL |
1744 | emit_insn (gen_rtx_SET (VOIDmode, temp, operand1)); |
1745 | emit_insn (gen_rtx_SET (VOIDmode, operand0, temp)); | |
d66dec28 JL |
1746 | return 1; |
1747 | } | |
f048ca47 | 1748 | if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode)) |
188538df TG |
1749 | { |
1750 | /* Run this case quickly. */ | |
ad2c71b7 | 1751 | emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); |
188538df TG |
1752 | return 1; |
1753 | } | |
1bc695cd | 1754 | if (! (reload_in_progress || reload_completed)) |
188538df TG |
1755 | { |
1756 | operands[0] = validize_mem (operand0); | |
1757 | operands[1] = operand1 = force_reg (mode, operand1); | |
1758 | } | |
1759 | } | |
1760 | ||
44201dba JL |
1761 | /* Simplify the source if we need to. |
1762 | Note we do have to handle function labels here, even though we do | |
1763 | not consider them legitimate constants. Loop optimizations can | |
06387d7c | 1764 | call the emit_move_xxx with one as a source. */ |
f1c7ce82 | 1765 | if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode)) |
44201dba | 1766 | || function_label_operand (operand1, mode) |
43940f6b | 1767 | || (GET_CODE (operand1) == HIGH |
ba365a19 | 1768 | && symbolic_operand (XEXP (operand1, 0), mode))) |
188538df | 1769 | { |
43940f6b JL |
1770 | int ishighonly = 0; |
1771 | ||
1772 | if (GET_CODE (operand1) == HIGH) | |
1773 | { | |
1774 | ishighonly = 1; | |
1775 | operand1 = XEXP (operand1, 0); | |
1776 | } | |
188538df TG |
1777 | if (symbolic_operand (operand1, mode)) |
1778 | { | |
5eceed92 | 1779 | /* Argh. The assembler and linker can't handle arithmetic |
b0fabad3 | 1780 | involving plabels. |
5eceed92 | 1781 | |
b0fabad3 JL |
1782 | So we force the plabel into memory, load operand0 from |
1783 | the memory location, then add in the constant part. */ | |
44201dba JL |
1784 | if ((GET_CODE (operand1) == CONST |
1785 | && GET_CODE (XEXP (operand1, 0)) == PLUS | |
1786 | && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode)) | |
1787 | || function_label_operand (operand1, mode)) | |
5eceed92 | 1788 | { |
8e64b41a | 1789 | rtx temp, const_part; |
b0fabad3 JL |
1790 | |
1791 | /* Figure out what (if any) scratch register to use. */ | |
1792 | if (reload_in_progress || reload_completed) | |
1b8ad134 JL |
1793 | { |
1794 | scratch_reg = scratch_reg ? scratch_reg : operand0; | |
1795 | /* SCRATCH_REG will hold an address and maybe the actual | |
1796 | data. We want it in WORD_MODE regardless of what mode it | |
1797 | was originally given to us. */ | |
6619e96c | 1798 | scratch_reg = force_mode (word_mode, scratch_reg); |
1b8ad134 | 1799 | } |
b0fabad3 JL |
1800 | else if (flag_pic) |
1801 | scratch_reg = gen_reg_rtx (Pmode); | |
1802 | ||
44201dba JL |
1803 | if (GET_CODE (operand1) == CONST) |
1804 | { | |
1805 | /* Save away the constant part of the expression. */ | |
1806 | const_part = XEXP (XEXP (operand1, 0), 1); | |
144d51f9 | 1807 | gcc_assert (GET_CODE (const_part) == CONST_INT); |
44201dba JL |
1808 | |
1809 | /* Force the function label into memory. */ | |
1810 | temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0)); | |
1811 | } | |
1812 | else | |
1813 | { | |
1814 | /* No constant part. */ | |
1815 | const_part = NULL_RTX; | |
5eceed92 | 1816 | |
44201dba JL |
1817 | /* Force the function label into memory. */ |
1818 | temp = force_const_mem (mode, operand1); | |
1819 | } | |
6619e96c | 1820 | |
b0fabad3 JL |
1821 | |
1822 | /* Get the address of the memory location. PIC-ify it if | |
1823 | necessary. */ | |
1824 | temp = XEXP (temp, 0); | |
1825 | if (flag_pic) | |
1826 | temp = legitimize_pic_address (temp, mode, scratch_reg); | |
1827 | ||
1828 | /* Put the address of the memory location into our destination | |
1829 | register. */ | |
1830 | operands[1] = temp; | |
1831 | emit_move_sequence (operands, mode, scratch_reg); | |
1832 | ||
1833 | /* Now load from the memory location into our destination | |
1834 | register. */ | |
ad2c71b7 | 1835 | operands[1] = gen_rtx_MEM (Pmode, operands[0]); |
b0fabad3 JL |
1836 | emit_move_sequence (operands, mode, scratch_reg); |
1837 | ||
1838 | /* And add back in the constant part. */ | |
44201dba JL |
1839 | if (const_part != NULL_RTX) |
1840 | expand_inc (operand0, const_part); | |
b0fabad3 JL |
1841 | |
1842 | return 1; | |
5eceed92 JL |
1843 | } |
1844 | ||
188538df TG |
1845 | if (flag_pic) |
1846 | { | |
1bc695cd JL |
1847 | rtx temp; |
1848 | ||
1849 | if (reload_in_progress || reload_completed) | |
1b8ad134 JL |
1850 | { |
1851 | temp = scratch_reg ? scratch_reg : operand0; | |
1852 | /* TEMP will hold an address and maybe the actual | |
1853 | data. We want it in WORD_MODE regardless of what mode it | |
1854 | was originally given to us. */ | |
6619e96c | 1855 | temp = force_mode (word_mode, temp); |
1b8ad134 | 1856 | } |
1bc695cd JL |
1857 | else |
1858 | temp = gen_reg_rtx (Pmode); | |
23f6f34f | 1859 | |
b0fabad3 JL |
1860 | /* (const (plus (symbol) (const_int))) must be forced to |
1861 | memory during/after reload if the const_int will not fit | |
1862 | in 14 bits. */ | |
1863 | if (GET_CODE (operand1) == CONST | |
bc4a9f17 JL |
1864 | && GET_CODE (XEXP (operand1, 0)) == PLUS |
1865 | && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT | |
1866 | && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)) | |
1867 | && (reload_completed || reload_in_progress) | |
1868 | && flag_pic) | |
1869 | { | |
7c95bbfb | 1870 | rtx const_mem = force_const_mem (mode, operand1); |
1c9ef36d | 1871 | operands[1] = legitimize_pic_address (XEXP (const_mem, 0), |
bc4a9f17 | 1872 | mode, temp); |
7c95bbfb | 1873 | operands[1] = replace_equiv_address (const_mem, operands[1]); |
bc4a9f17 JL |
1874 | emit_move_sequence (operands, mode, temp); |
1875 | } | |
5eceed92 JL |
1876 | else |
1877 | { | |
1878 | operands[1] = legitimize_pic_address (operand1, mode, temp); | |
d8f95bed JDA |
1879 | if (REG_P (operand0) && REG_P (operands[1])) |
1880 | copy_reg_pointer (operand0, operands[1]); | |
ad2c71b7 | 1881 | emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1])); |
5eceed92 | 1882 | } |
188538df | 1883 | } |
6bb36601 JL |
1884 | /* On the HPPA, references to data space are supposed to use dp, |
1885 | register 27, but showing it in the RTL inhibits various cse | |
1886 | and loop optimizations. */ | |
23f6f34f | 1887 | else |
188538df | 1888 | { |
5eceed92 | 1889 | rtx temp, set; |
43940f6b | 1890 | |
23f6f34f | 1891 | if (reload_in_progress || reload_completed) |
1b8ad134 JL |
1892 | { |
1893 | temp = scratch_reg ? scratch_reg : operand0; | |
1894 | /* TEMP will hold an address and maybe the actual | |
1895 | data. We want it in WORD_MODE regardless of what mode it | |
1896 | was originally given to us. */ | |
6619e96c | 1897 | temp = force_mode (word_mode, temp); |
1b8ad134 | 1898 | } |
43940f6b JL |
1899 | else |
1900 | temp = gen_reg_rtx (mode); | |
1901 | ||
68944452 | 1902 | /* Loading a SYMBOL_REF into a register makes that register |
6619e96c | 1903 | safe to be used as the base in an indexed address. |
68944452 JL |
1904 | |
1905 | Don't mark hard registers though. That loses. */ | |
c34d858f RK |
1906 | if (GET_CODE (operand0) == REG |
1907 | && REGNO (operand0) >= FIRST_PSEUDO_REGISTER) | |
d8f95bed | 1908 | mark_reg_pointer (operand0, BITS_PER_UNIT); |
68944452 | 1909 | if (REGNO (temp) >= FIRST_PSEUDO_REGISTER) |
d8f95bed JDA |
1910 | mark_reg_pointer (temp, BITS_PER_UNIT); |
1911 | ||
43940f6b | 1912 | if (ishighonly) |
ad2c71b7 | 1913 | set = gen_rtx_SET (mode, operand0, temp); |
43940f6b | 1914 | else |
c5c76735 JL |
1915 | set = gen_rtx_SET (VOIDmode, |
1916 | operand0, | |
ad2c71b7 | 1917 | gen_rtx_LO_SUM (mode, temp, operand1)); |
23f6f34f | 1918 | |
ad2c71b7 JL |
1919 | emit_insn (gen_rtx_SET (VOIDmode, |
1920 | temp, | |
1921 | gen_rtx_HIGH (mode, operand1))); | |
b0ce651a | 1922 | emit_insn (set); |
326bc2de | 1923 | |
188538df | 1924 | } |
43940f6b | 1925 | return 1; |
188538df | 1926 | } |
51076f96 RC |
1927 | else if (pa_tls_referenced_p (operand1)) |
1928 | { | |
1929 | rtx tmp = operand1; | |
1930 | rtx addend = NULL; | |
1931 | ||
1932 | if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS) | |
1933 | { | |
1934 | addend = XEXP (XEXP (tmp, 0), 1); | |
1935 | tmp = XEXP (XEXP (tmp, 0), 0); | |
1936 | } | |
1937 | ||
1938 | gcc_assert (GET_CODE (tmp) == SYMBOL_REF); | |
1939 | tmp = legitimize_tls_address (tmp); | |
1940 | if (addend) | |
1941 | { | |
1942 | tmp = gen_rtx_PLUS (mode, tmp, addend); | |
1943 | tmp = force_operand (tmp, operands[0]); | |
1944 | } | |
1945 | operands[1] = tmp; | |
1946 | } | |
a1747d2c | 1947 | else if (GET_CODE (operand1) != CONST_INT |
a4295210 | 1948 | || !cint_ok_for_move (INTVAL (operand1))) |
188538df | 1949 | { |
a4295210 JDA |
1950 | rtx insn, temp; |
1951 | rtx op1 = operand1; | |
4cce9dd8 | 1952 | HOST_WIDE_INT value = 0; |
a4295210 JDA |
1953 | HOST_WIDE_INT insv = 0; |
1954 | int insert = 0; | |
1955 | ||
4cce9dd8 RS |
1956 | if (GET_CODE (operand1) == CONST_INT) |
1957 | value = INTVAL (operand1); | |
1958 | ||
a4295210 JDA |
1959 | if (TARGET_64BIT |
1960 | && GET_CODE (operand1) == CONST_INT | |
e0c556d3 | 1961 | && HOST_BITS_PER_WIDE_INT > 32 |
520babc7 JL |
1962 | && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32) |
1963 | { | |
e0c556d3 | 1964 | HOST_WIDE_INT nval; |
520babc7 | 1965 | |
b8e42321 JDA |
1966 | /* Extract the low order 32 bits of the value and sign extend. |
1967 | If the new value is the same as the original value, we can | |
1968 | can use the original value as-is. If the new value is | |
1969 | different, we use it and insert the most-significant 32-bits | |
1970 | of the original value into the final result. */ | |
a4295210 | 1971 | nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1)) |
e0c556d3 | 1972 | ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31); |
a4295210 | 1973 | if (value != nval) |
520babc7 | 1974 | { |
b8e42321 | 1975 | #if HOST_BITS_PER_WIDE_INT > 32 |
a4295210 | 1976 | insv = value >= 0 ? value >> 32 : ~(~value >> 32); |
b8e42321 | 1977 | #endif |
a4295210 JDA |
1978 | insert = 1; |
1979 | value = nval; | |
520babc7 JL |
1980 | operand1 = GEN_INT (nval); |
1981 | } | |
1982 | } | |
1bc695cd JL |
1983 | |
1984 | if (reload_in_progress || reload_completed) | |
a4295210 | 1985 | temp = scratch_reg ? scratch_reg : operand0; |
1bc695cd JL |
1986 | else |
1987 | temp = gen_reg_rtx (mode); | |
1988 | ||
47abc309 JDA |
1989 | /* We don't directly split DImode constants on 32-bit targets |
1990 | because PLUS uses an 11-bit immediate and the insn sequence | |
1991 | generated is not as efficient as the one using HIGH/LO_SUM. */ | |
1992 | if (GET_CODE (operand1) == CONST_INT | |
0eab7815 | 1993 | && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD |
a4295210 JDA |
1994 | && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT |
1995 | && !insert) | |
b8e42321 | 1996 | { |
47abc309 | 1997 | /* Directly break constant into high and low parts. This |
b8e42321 JDA |
1998 | provides better optimization opportunities because various |
1999 | passes recognize constants split with PLUS but not LO_SUM. | |
2000 | We use a 14-bit signed low part except when the addition | |
2001 | of 0x4000 to the high part might change the sign of the | |
2002 | high part. */ | |
b8e42321 JDA |
2003 | HOST_WIDE_INT low = value & 0x3fff; |
2004 | HOST_WIDE_INT high = value & ~ 0x3fff; | |
2005 | ||
2006 | if (low >= 0x2000) | |
2007 | { | |
2008 | if (high == 0x7fffc000 || (mode == HImode && high == 0x4000)) | |
2009 | high += 0x2000; | |
2010 | else | |
2011 | high += 0x4000; | |
2012 | } | |
2013 | ||
2014 | low = value - high; | |
520babc7 | 2015 | |
b8e42321 JDA |
2016 | emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high))); |
2017 | operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low)); | |
2018 | } | |
2019 | else | |
520babc7 | 2020 | { |
b8e42321 JDA |
2021 | emit_insn (gen_rtx_SET (VOIDmode, temp, |
2022 | gen_rtx_HIGH (mode, operand1))); | |
2023 | operands[1] = gen_rtx_LO_SUM (mode, temp, operand1); | |
520babc7 | 2024 | } |
6619e96c | 2025 | |
a4295210 JDA |
2026 | insn = emit_move_insn (operands[0], operands[1]); |
2027 | ||
2028 | /* Now insert the most significant 32 bits of the value | |
2029 | into the register. When we don't have a second register | |
2030 | available, it could take up to nine instructions to load | |
2031 | a 64-bit integer constant. Prior to reload, we force | |
2032 | constants that would take more than three instructions | |
2033 | to load to the constant pool. During and after reload, | |
2034 | we have to handle all possible values. */ | |
2035 | if (insert) | |
2036 | { | |
2037 | /* Use a HIGH/LO_SUM/INSV sequence if we have a second | |
2038 | register and the value to be inserted is outside the | |
2039 | range that can be loaded with three depdi instructions. */ | |
2040 | if (temp != operand0 && (insv >= 16384 || insv < -16384)) | |
2041 | { | |
2042 | operand1 = GEN_INT (insv); | |
2043 | ||
2044 | emit_insn (gen_rtx_SET (VOIDmode, temp, | |
2045 | gen_rtx_HIGH (mode, operand1))); | |
2046 | emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1)); | |
2047 | emit_insn (gen_insv (operand0, GEN_INT (32), | |
2048 | const0_rtx, temp)); | |
2049 | } | |
2050 | else | |
2051 | { | |
2052 | int len = 5, pos = 27; | |
2053 | ||
2054 | /* Insert the bits using the depdi instruction. */ | |
2055 | while (pos >= 0) | |
2056 | { | |
2057 | HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16; | |
2058 | HOST_WIDE_INT sign = v5 < 0; | |
2059 | ||
2060 | /* Left extend the insertion. */ | |
2061 | insv = (insv >= 0 ? insv >> len : ~(~insv >> len)); | |
2062 | while (pos > 0 && (insv & 1) == sign) | |
2063 | { | |
2064 | insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1)); | |
2065 | len += 1; | |
2066 | pos -= 1; | |
2067 | } | |
2068 | ||
2069 | emit_insn (gen_insv (operand0, GEN_INT (len), | |
2070 | GEN_INT (pos), GEN_INT (v5))); | |
2071 | ||
2072 | len = pos > 0 && pos < 5 ? pos : 5; | |
2073 | pos -= len; | |
2074 | } | |
2075 | } | |
2076 | } | |
b8e42321 | 2077 | |
bd94cb6e | 2078 | set_unique_reg_note (insn, REG_EQUAL, op1); |
b8e42321 | 2079 | |
520babc7 | 2080 | return 1; |
188538df TG |
2081 | } |
2082 | } | |
2083 | /* Now have insn-emit do whatever it normally does. */ | |
2084 | return 0; | |
2085 | } | |
2086 | ||
c77c286a | 2087 | /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning |
c4bb6b38 | 2088 | it will need a link/runtime reloc). */ |
c77c286a JL |
2089 | |
2090 | int | |
b7849684 | 2091 | reloc_needed (tree exp) |
c77c286a JL |
2092 | { |
2093 | int reloc = 0; | |
2094 | ||
2095 | switch (TREE_CODE (exp)) | |
2096 | { | |
2097 | case ADDR_EXPR: | |
2098 | return 1; | |
2099 | ||
5be014d5 | 2100 | case POINTER_PLUS_EXPR: |
c77c286a JL |
2101 | case PLUS_EXPR: |
2102 | case MINUS_EXPR: | |
2103 | reloc = reloc_needed (TREE_OPERAND (exp, 0)); | |
2104 | reloc |= reloc_needed (TREE_OPERAND (exp, 1)); | |
2105 | break; | |
2106 | ||
1043771b | 2107 | CASE_CONVERT: |
c77c286a JL |
2108 | case NON_LVALUE_EXPR: |
2109 | reloc = reloc_needed (TREE_OPERAND (exp, 0)); | |
2110 | break; | |
2111 | ||
2112 | case CONSTRUCTOR: | |
2113 | { | |
28f155be GB |
2114 | tree value; |
2115 | unsigned HOST_WIDE_INT ix; | |
2116 | ||
2117 | FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value) | |
2118 | if (value) | |
2119 | reloc |= reloc_needed (value); | |
c77c286a JL |
2120 | } |
2121 | break; | |
2122 | ||
2123 | case ERROR_MARK: | |
2124 | break; | |
51723711 KG |
2125 | |
2126 | default: | |
2127 | break; | |
c77c286a JL |
2128 | } |
2129 | return reloc; | |
2130 | } | |
2131 | ||
fb49053f RH |
2132 | /* Does operand (which is a symbolic_operand) live in text space? |
2133 | If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info, | |
2134 | will be true. */ | |
188538df TG |
2135 | |
2136 | int | |
b7849684 | 2137 | read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED) |
188538df TG |
2138 | { |
2139 | if (GET_CODE (operand) == CONST) | |
2140 | operand = XEXP (XEXP (operand, 0), 0); | |
6bb36601 JL |
2141 | if (flag_pic) |
2142 | { | |
2143 | if (GET_CODE (operand) == SYMBOL_REF) | |
2144 | return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand); | |
2145 | } | |
2146 | else | |
2147 | { | |
2148 | if (GET_CODE (operand) == SYMBOL_REF) | |
2149 | return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand); | |
2150 | } | |
188538df TG |
2151 | return 1; |
2152 | } | |
23f6f34f | 2153 | |
188538df TG |
2154 | \f |
2155 | /* Return the best assembler insn template | |
71cc389b | 2156 | for moving operands[1] into operands[0] as a fullword. */ |
519104fe | 2157 | const char * |
b7849684 | 2158 | singlemove_string (rtx *operands) |
188538df | 2159 | { |
0c235d7e TG |
2160 | HOST_WIDE_INT intval; |
2161 | ||
188538df TG |
2162 | if (GET_CODE (operands[0]) == MEM) |
2163 | return "stw %r1,%0"; | |
0c235d7e | 2164 | if (GET_CODE (operands[1]) == MEM) |
188538df | 2165 | return "ldw %1,%0"; |
0c235d7e | 2166 | if (GET_CODE (operands[1]) == CONST_DOUBLE) |
e5c2baa1 | 2167 | { |
0c235d7e TG |
2168 | long i; |
2169 | REAL_VALUE_TYPE d; | |
e5c2baa1 | 2170 | |
144d51f9 | 2171 | gcc_assert (GET_MODE (operands[1]) == SFmode); |
e5c2baa1 | 2172 | |
0c235d7e TG |
2173 | /* Translate the CONST_DOUBLE to a CONST_INT with the same target |
2174 | bit pattern. */ | |
2175 | REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]); | |
2176 | REAL_VALUE_TO_TARGET_SINGLE (d, i); | |
e5c2baa1 | 2177 | |
0c235d7e TG |
2178 | operands[1] = GEN_INT (i); |
2179 | /* Fall through to CONST_INT case. */ | |
2180 | } | |
2181 | if (GET_CODE (operands[1]) == CONST_INT) | |
e5c2baa1 | 2182 | { |
0c235d7e TG |
2183 | intval = INTVAL (operands[1]); |
2184 | ||
2185 | if (VAL_14_BITS_P (intval)) | |
2186 | return "ldi %1,%0"; | |
2187 | else if ((intval & 0x7ff) == 0) | |
2188 | return "ldil L'%1,%0"; | |
2189 | else if (zdepi_cint_p (intval)) | |
f38b27c7 | 2190 | return "{zdepi %Z1,%0|depwi,z %Z1,%0}"; |
e5c2baa1 RS |
2191 | else |
2192 | return "ldil L'%1,%0\n\tldo R'%1(%0),%0"; | |
2193 | } | |
188538df TG |
2194 | return "copy %1,%0"; |
2195 | } | |
2196 | \f | |
2197 | ||
f133af4c TG |
2198 | /* Compute position (in OP[1]) and width (in OP[2]) |
2199 | useful for copying IMM to a register using the zdepi | |
2200 | instructions. Store the immediate value to insert in OP[0]. */ | |
519104fe | 2201 | static void |
b7849684 | 2202 | compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op) |
c819adf2 | 2203 | { |
0e7f4c19 | 2204 | int lsb, len; |
c819adf2 | 2205 | |
0e7f4c19 TG |
2206 | /* Find the least significant set bit in IMM. */ |
2207 | for (lsb = 0; lsb < 32; lsb++) | |
c819adf2 | 2208 | { |
0e7f4c19 | 2209 | if ((imm & 1) != 0) |
c819adf2 | 2210 | break; |
0e7f4c19 | 2211 | imm >>= 1; |
c819adf2 TG |
2212 | } |
2213 | ||
0e7f4c19 TG |
2214 | /* Choose variants based on *sign* of the 5-bit field. */ |
2215 | if ((imm & 0x10) == 0) | |
2216 | len = (lsb <= 28) ? 4 : 32 - lsb; | |
c819adf2 TG |
2217 | else |
2218 | { | |
0e7f4c19 TG |
2219 | /* Find the width of the bitstring in IMM. */ |
2220 | for (len = 5; len < 32; len++) | |
c819adf2 | 2221 | { |
0e7f4c19 | 2222 | if ((imm & (1 << len)) == 0) |
c819adf2 | 2223 | break; |
c819adf2 TG |
2224 | } |
2225 | ||
0e7f4c19 TG |
2226 | /* Sign extend IMM as a 5-bit value. */ |
2227 | imm = (imm & 0xf) - 0x10; | |
c819adf2 TG |
2228 | } |
2229 | ||
a1747d2c TG |
2230 | op[0] = imm; |
2231 | op[1] = 31 - lsb; | |
2232 | op[2] = len; | |
c819adf2 TG |
2233 | } |
2234 | ||
520babc7 JL |
2235 | /* Compute position (in OP[1]) and width (in OP[2]) |
2236 | useful for copying IMM to a register using the depdi,z | |
2237 | instructions. Store the immediate value to insert in OP[0]. */ | |
2238 | void | |
b7849684 | 2239 | compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op) |
520babc7 JL |
2240 | { |
2241 | HOST_WIDE_INT lsb, len; | |
2242 | ||
2243 | /* Find the least significant set bit in IMM. */ | |
2244 | for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++) | |
2245 | { | |
2246 | if ((imm & 1) != 0) | |
2247 | break; | |
2248 | imm >>= 1; | |
2249 | } | |
2250 | ||
2251 | /* Choose variants based on *sign* of the 5-bit field. */ | |
2252 | if ((imm & 0x10) == 0) | |
2253 | len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4) | |
2254 | ? 4 : HOST_BITS_PER_WIDE_INT - lsb); | |
2255 | else | |
2256 | { | |
2257 | /* Find the width of the bitstring in IMM. */ | |
2258 | for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++) | |
2259 | { | |
831c1763 | 2260 | if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0) |
520babc7 JL |
2261 | break; |
2262 | } | |
2263 | ||
2264 | /* Sign extend IMM as a 5-bit value. */ | |
2265 | imm = (imm & 0xf) - 0x10; | |
2266 | } | |
2267 | ||
2268 | op[0] = imm; | |
2269 | op[1] = 63 - lsb; | |
2270 | op[2] = len; | |
2271 | } | |
2272 | ||
188538df TG |
2273 | /* Output assembler code to perform a doubleword move insn |
2274 | with operands OPERANDS. */ | |
2275 | ||
519104fe | 2276 | const char * |
b7849684 | 2277 | output_move_double (rtx *operands) |
188538df TG |
2278 | { |
2279 | enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1; | |
2280 | rtx latehalf[2]; | |
2281 | rtx addreg0 = 0, addreg1 = 0; | |
2282 | ||
2283 | /* First classify both operands. */ | |
2284 | ||
2285 | if (REG_P (operands[0])) | |
2286 | optype0 = REGOP; | |
2287 | else if (offsettable_memref_p (operands[0])) | |
2288 | optype0 = OFFSOP; | |
2289 | else if (GET_CODE (operands[0]) == MEM) | |
2290 | optype0 = MEMOP; | |
2291 | else | |
2292 | optype0 = RNDOP; | |
2293 | ||
2294 | if (REG_P (operands[1])) | |
2295 | optype1 = REGOP; | |
2296 | else if (CONSTANT_P (operands[1])) | |
2297 | optype1 = CNSTOP; | |
2298 | else if (offsettable_memref_p (operands[1])) | |
2299 | optype1 = OFFSOP; | |
2300 | else if (GET_CODE (operands[1]) == MEM) | |
2301 | optype1 = MEMOP; | |
2302 | else | |
2303 | optype1 = RNDOP; | |
2304 | ||
2305 | /* Check for the cases that the operand constraints are not | |
144d51f9 NS |
2306 | supposed to allow to happen. */ |
2307 | gcc_assert (optype0 == REGOP || optype1 == REGOP); | |
188538df | 2308 | |
5401050b JDA |
2309 | /* Handle copies between general and floating registers. */ |
2310 | ||
2311 | if (optype0 == REGOP && optype1 == REGOP | |
2312 | && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1])) | |
2313 | { | |
2314 | if (FP_REG_P (operands[0])) | |
2315 | { | |
2316 | output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands); | |
2317 | output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands); | |
2318 | return "{fldds|fldd} -16(%%sp),%0"; | |
2319 | } | |
2320 | else | |
2321 | { | |
2322 | output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands); | |
2323 | output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands); | |
2324 | return "{ldws|ldw} -12(%%sp),%R0"; | |
2325 | } | |
2326 | } | |
2327 | ||
188538df TG |
2328 | /* Handle auto decrementing and incrementing loads and stores |
2329 | specifically, since the structure of the function doesn't work | |
2330 | for them without major modification. Do it better when we learn | |
2331 | this port about the general inc/dec addressing of PA. | |
2332 | (This was written by tege. Chide him if it doesn't work.) */ | |
2333 | ||
2334 | if (optype0 == MEMOP) | |
2335 | { | |
e37ce5f6 JL |
2336 | /* We have to output the address syntax ourselves, since print_operand |
2337 | doesn't deal with the addresses we want to use. Fix this later. */ | |
2338 | ||
188538df | 2339 | rtx addr = XEXP (operands[0], 0); |
e37ce5f6 | 2340 | if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC) |
188538df | 2341 | { |
ad2c71b7 | 2342 | rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0); |
e37ce5f6 JL |
2343 | |
2344 | operands[0] = XEXP (addr, 0); | |
144d51f9 NS |
2345 | gcc_assert (GET_CODE (operands[1]) == REG |
2346 | && GET_CODE (operands[0]) == REG); | |
e37ce5f6 | 2347 | |
144d51f9 NS |
2348 | gcc_assert (!reg_overlap_mentioned_p (high_reg, addr)); |
2349 | ||
2350 | /* No overlap between high target register and address | |
2351 | register. (We do this in a non-obvious way to | |
2352 | save a register file writeback) */ | |
2353 | if (GET_CODE (addr) == POST_INC) | |
2354 | return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)"; | |
2355 | return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)"; | |
9682683d | 2356 | } |
e37ce5f6 | 2357 | else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC) |
9682683d | 2358 | { |
ad2c71b7 | 2359 | rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0); |
e37ce5f6 JL |
2360 | |
2361 | operands[0] = XEXP (addr, 0); | |
144d51f9 NS |
2362 | gcc_assert (GET_CODE (operands[1]) == REG |
2363 | && GET_CODE (operands[0]) == REG); | |
2364 | ||
2365 | gcc_assert (!reg_overlap_mentioned_p (high_reg, addr)); | |
2366 | /* No overlap between high target register and address | |
2367 | register. (We do this in a non-obvious way to save a | |
2368 | register file writeback) */ | |
2369 | if (GET_CODE (addr) == PRE_INC) | |
2370 | return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)"; | |
2371 | return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)"; | |
188538df TG |
2372 | } |
2373 | } | |
2374 | if (optype1 == MEMOP) | |
2375 | { | |
2376 | /* We have to output the address syntax ourselves, since print_operand | |
2377 | doesn't deal with the addresses we want to use. Fix this later. */ | |
2378 | ||
2379 | rtx addr = XEXP (operands[1], 0); | |
2380 | if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC) | |
2381 | { | |
ad2c71b7 | 2382 | rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0); |
188538df TG |
2383 | |
2384 | operands[1] = XEXP (addr, 0); | |
144d51f9 NS |
2385 | gcc_assert (GET_CODE (operands[0]) == REG |
2386 | && GET_CODE (operands[1]) == REG); | |
188538df TG |
2387 | |
2388 | if (!reg_overlap_mentioned_p (high_reg, addr)) | |
2389 | { | |
2390 | /* No overlap between high target register and address | |
dd605bb4 | 2391 | register. (We do this in a non-obvious way to |
188538df TG |
2392 | save a register file writeback) */ |
2393 | if (GET_CODE (addr) == POST_INC) | |
f38b27c7 | 2394 | return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0"; |
6126a380 | 2395 | return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0"; |
188538df TG |
2396 | } |
2397 | else | |
2398 | { | |
2399 | /* This is an undefined situation. We should load into the | |
2400 | address register *and* update that register. Probably | |
2401 | we don't need to handle this at all. */ | |
2402 | if (GET_CODE (addr) == POST_INC) | |
f38b27c7 JL |
2403 | return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0"; |
2404 | return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0"; | |
188538df TG |
2405 | } |
2406 | } | |
2407 | else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC) | |
2408 | { | |
ad2c71b7 | 2409 | rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0); |
188538df TG |
2410 | |
2411 | operands[1] = XEXP (addr, 0); | |
144d51f9 NS |
2412 | gcc_assert (GET_CODE (operands[0]) == REG |
2413 | && GET_CODE (operands[1]) == REG); | |
188538df TG |
2414 | |
2415 | if (!reg_overlap_mentioned_p (high_reg, addr)) | |
2416 | { | |
2417 | /* No overlap between high target register and address | |
dd605bb4 | 2418 | register. (We do this in a non-obvious way to |
188538df TG |
2419 | save a register file writeback) */ |
2420 | if (GET_CODE (addr) == PRE_INC) | |
f38b27c7 JL |
2421 | return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0"; |
2422 | return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0"; | |
188538df TG |
2423 | } |
2424 | else | |
2425 | { | |
2426 | /* This is an undefined situation. We should load into the | |
2427 | address register *and* update that register. Probably | |
2428 | we don't need to handle this at all. */ | |
2429 | if (GET_CODE (addr) == PRE_INC) | |
f38b27c7 JL |
2430 | return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0"; |
2431 | return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0"; | |
188538df TG |
2432 | } |
2433 | } | |
a89974a2 JL |
2434 | else if (GET_CODE (addr) == PLUS |
2435 | && GET_CODE (XEXP (addr, 0)) == MULT) | |
2436 | { | |
4c6d8726 | 2437 | rtx xoperands[4]; |
ad2c71b7 | 2438 | rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0); |
a89974a2 JL |
2439 | |
2440 | if (!reg_overlap_mentioned_p (high_reg, addr)) | |
2441 | { | |
a89974a2 JL |
2442 | xoperands[0] = high_reg; |
2443 | xoperands[1] = XEXP (addr, 1); | |
2444 | xoperands[2] = XEXP (XEXP (addr, 0), 0); | |
2445 | xoperands[3] = XEXP (XEXP (addr, 0), 1); | |
f38b27c7 JL |
2446 | output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}", |
2447 | xoperands); | |
d2d28085 | 2448 | return "ldw 4(%0),%R0\n\tldw 0(%0),%0"; |
a89974a2 JL |
2449 | } |
2450 | else | |
2451 | { | |
a89974a2 JL |
2452 | xoperands[0] = high_reg; |
2453 | xoperands[1] = XEXP (addr, 1); | |
2454 | xoperands[2] = XEXP (XEXP (addr, 0), 0); | |
2455 | xoperands[3] = XEXP (XEXP (addr, 0), 1); | |
f38b27c7 JL |
2456 | output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}", |
2457 | xoperands); | |
d2d28085 | 2458 | return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0"; |
a89974a2 | 2459 | } |
a89974a2 | 2460 | } |
188538df TG |
2461 | } |
2462 | ||
2463 | /* If an operand is an unoffsettable memory ref, find a register | |
2464 | we can increment temporarily to make it refer to the second word. */ | |
2465 | ||
2466 | if (optype0 == MEMOP) | |
2467 | addreg0 = find_addr_reg (XEXP (operands[0], 0)); | |
2468 | ||
2469 | if (optype1 == MEMOP) | |
2470 | addreg1 = find_addr_reg (XEXP (operands[1], 0)); | |
2471 | ||
2472 | /* Ok, we can do one word at a time. | |
2473 | Normally we do the low-numbered word first. | |
2474 | ||
2475 | In either case, set up in LATEHALF the operands to use | |
2476 | for the high-numbered word and in some cases alter the | |
2477 | operands in OPERANDS to be suitable for the low-numbered word. */ | |
2478 | ||
2479 | if (optype0 == REGOP) | |
ad2c71b7 | 2480 | latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); |
188538df | 2481 | else if (optype0 == OFFSOP) |
b72f00af | 2482 | latehalf[0] = adjust_address (operands[0], SImode, 4); |
188538df TG |
2483 | else |
2484 | latehalf[0] = operands[0]; | |
2485 | ||
2486 | if (optype1 == REGOP) | |
ad2c71b7 | 2487 | latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1); |
188538df | 2488 | else if (optype1 == OFFSOP) |
b72f00af | 2489 | latehalf[1] = adjust_address (operands[1], SImode, 4); |
188538df TG |
2490 | else if (optype1 == CNSTOP) |
2491 | split_double (operands[1], &operands[1], &latehalf[1]); | |
2492 | else | |
2493 | latehalf[1] = operands[1]; | |
2494 | ||
2495 | /* If the first move would clobber the source of the second one, | |
2496 | do them in the other order. | |
2497 | ||
bad883f8 | 2498 | This can happen in two cases: |
188538df | 2499 | |
bad883f8 JL |
2500 | mem -> register where the first half of the destination register |
2501 | is the same register used in the memory's address. Reload | |
2502 | can create such insns. | |
188538df | 2503 | |
bad883f8 | 2504 | mem in this case will be either register indirect or register |
6619e96c | 2505 | indirect plus a valid offset. |
bad883f8 JL |
2506 | |
2507 | register -> register move where REGNO(dst) == REGNO(src + 1) | |
6619e96c | 2508 | someone (Tim/Tege?) claimed this can happen for parameter loads. |
bad883f8 JL |
2509 | |
2510 | Handle mem -> register case first. */ | |
2511 | if (optype0 == REGOP | |
2512 | && (optype1 == MEMOP || optype1 == OFFSOP) | |
2513 | && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1, | |
2514 | operands[1], 0)) | |
188538df | 2515 | { |
188538df TG |
2516 | /* Do the late half first. */ |
2517 | if (addreg1) | |
498ee10c | 2518 | output_asm_insn ("ldo 4(%0),%0", &addreg1); |
188538df | 2519 | output_asm_insn (singlemove_string (latehalf), latehalf); |
bad883f8 JL |
2520 | |
2521 | /* Then clobber. */ | |
188538df | 2522 | if (addreg1) |
498ee10c | 2523 | output_asm_insn ("ldo -4(%0),%0", &addreg1); |
188538df TG |
2524 | return singlemove_string (operands); |
2525 | } | |
2526 | ||
bad883f8 | 2527 | /* Now handle register -> register case. */ |
63a1f834 TG |
2528 | if (optype0 == REGOP && optype1 == REGOP |
2529 | && REGNO (operands[0]) == REGNO (operands[1]) + 1) | |
2530 | { | |
2531 | output_asm_insn (singlemove_string (latehalf), latehalf); | |
2532 | return singlemove_string (operands); | |
2533 | } | |
2534 | ||
188538df TG |
2535 | /* Normal case: do the two words, low-numbered first. */ |
2536 | ||
2537 | output_asm_insn (singlemove_string (operands), operands); | |
2538 | ||
2539 | /* Make any unoffsettable addresses point at high-numbered word. */ | |
2540 | if (addreg0) | |
498ee10c | 2541 | output_asm_insn ("ldo 4(%0),%0", &addreg0); |
188538df | 2542 | if (addreg1) |
498ee10c | 2543 | output_asm_insn ("ldo 4(%0),%0", &addreg1); |
188538df TG |
2544 | |
2545 | /* Do that word. */ | |
2546 | output_asm_insn (singlemove_string (latehalf), latehalf); | |
2547 | ||
2548 | /* Undo the adds we just did. */ | |
2549 | if (addreg0) | |
498ee10c | 2550 | output_asm_insn ("ldo -4(%0),%0", &addreg0); |
188538df | 2551 | if (addreg1) |
498ee10c | 2552 | output_asm_insn ("ldo -4(%0),%0", &addreg1); |
188538df TG |
2553 | |
2554 | return ""; | |
2555 | } | |
2556 | \f | |
519104fe | 2557 | const char * |
b7849684 | 2558 | output_fp_move_double (rtx *operands) |
188538df TG |
2559 | { |
2560 | if (FP_REG_P (operands[0])) | |
2561 | { | |
23f6f34f | 2562 | if (FP_REG_P (operands[1]) |
f048ca47 | 2563 | || operands[1] == CONST0_RTX (GET_MODE (operands[0]))) |
55abf18a | 2564 | output_asm_insn ("fcpy,dbl %f1,%0", operands); |
23f6f34f | 2565 | else |
2414e0e2 | 2566 | output_asm_insn ("fldd%F1 %1,%0", operands); |
188538df TG |
2567 | } |
2568 | else if (FP_REG_P (operands[1])) | |
2569 | { | |
2414e0e2 | 2570 | output_asm_insn ("fstd%F0 %1,%0", operands); |
188538df | 2571 | } |
144d51f9 | 2572 | else |
f048ca47 | 2573 | { |
144d51f9 NS |
2574 | rtx xoperands[2]; |
2575 | ||
2576 | gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0]))); | |
2577 | ||
23f6f34f | 2578 | /* This is a pain. You have to be prepared to deal with an |
ddd5a7c1 | 2579 | arbitrary address here including pre/post increment/decrement. |
f048ca47 JL |
2580 | |
2581 | so avoid this in the MD. */ | |
144d51f9 NS |
2582 | gcc_assert (GET_CODE (operands[0]) == REG); |
2583 | ||
2584 | xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1); | |
2585 | xoperands[0] = operands[0]; | |
2586 | output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands); | |
f048ca47 | 2587 | } |
188538df TG |
2588 | return ""; |
2589 | } | |
2590 | \f | |
2591 | /* Return a REG that occurs in ADDR with coefficient 1. | |
2592 | ADDR can be effectively incremented by incrementing REG. */ | |
2593 | ||
2594 | static rtx | |
b7849684 | 2595 | find_addr_reg (rtx addr) |
188538df TG |
2596 | { |
2597 | while (GET_CODE (addr) == PLUS) | |
2598 | { | |
2599 | if (GET_CODE (XEXP (addr, 0)) == REG) | |
2600 | addr = XEXP (addr, 0); | |
2601 | else if (GET_CODE (XEXP (addr, 1)) == REG) | |
2602 | addr = XEXP (addr, 1); | |
2603 | else if (CONSTANT_P (XEXP (addr, 0))) | |
2604 | addr = XEXP (addr, 1); | |
2605 | else if (CONSTANT_P (XEXP (addr, 1))) | |
2606 | addr = XEXP (addr, 0); | |
2607 | else | |
144d51f9 | 2608 | gcc_unreachable (); |
188538df | 2609 | } |
144d51f9 NS |
2610 | gcc_assert (GET_CODE (addr) == REG); |
2611 | return addr; | |
188538df TG |
2612 | } |
2613 | ||
188538df TG |
2614 | /* Emit code to perform a block move. |
2615 | ||
188538df TG |
2616 | OPERANDS[0] is the destination pointer as a REG, clobbered. |
2617 | OPERANDS[1] is the source pointer as a REG, clobbered. | |
68944452 | 2618 | OPERANDS[2] is a register for temporary storage. |
188538df | 2619 | OPERANDS[3] is a register for temporary storage. |
cdc9103c | 2620 | OPERANDS[4] is the size as a CONST_INT |
6619e96c | 2621 | OPERANDS[5] is the alignment safe to use, as a CONST_INT. |
71cc389b | 2622 | OPERANDS[6] is another temporary register. */ |
188538df | 2623 | |
519104fe | 2624 | const char * |
b7849684 | 2625 | output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED) |
188538df TG |
2626 | { |
2627 | int align = INTVAL (operands[5]); | |
68944452 | 2628 | unsigned long n_bytes = INTVAL (operands[4]); |
188538df | 2629 | |
cdc9103c | 2630 | /* We can't move more than a word at a time because the PA |
188538df | 2631 | has no longer integer move insns. (Could use fp mem ops?) */ |
cdc9103c JDA |
2632 | if (align > (TARGET_64BIT ? 8 : 4)) |
2633 | align = (TARGET_64BIT ? 8 : 4); | |
188538df | 2634 | |
68944452 JL |
2635 | /* Note that we know each loop below will execute at least twice |
2636 | (else we would have open-coded the copy). */ | |
2637 | switch (align) | |
188538df | 2638 | { |
cdc9103c JDA |
2639 | case 8: |
2640 | /* Pre-adjust the loop counter. */ | |
2641 | operands[4] = GEN_INT (n_bytes - 16); | |
2642 | output_asm_insn ("ldi %4,%2", operands); | |
2643 | ||
2644 | /* Copying loop. */ | |
2645 | output_asm_insn ("ldd,ma 8(%1),%3", operands); | |
2646 | output_asm_insn ("ldd,ma 8(%1),%6", operands); | |
2647 | output_asm_insn ("std,ma %3,8(%0)", operands); | |
2648 | output_asm_insn ("addib,>= -16,%2,.-12", operands); | |
2649 | output_asm_insn ("std,ma %6,8(%0)", operands); | |
2650 | ||
2651 | /* Handle the residual. There could be up to 7 bytes of | |
2652 | residual to copy! */ | |
2653 | if (n_bytes % 16 != 0) | |
2654 | { | |
2655 | operands[4] = GEN_INT (n_bytes % 8); | |
2656 | if (n_bytes % 16 >= 8) | |
2657 | output_asm_insn ("ldd,ma 8(%1),%3", operands); | |
2658 | if (n_bytes % 8 != 0) | |
2659 | output_asm_insn ("ldd 0(%1),%6", operands); | |
2660 | if (n_bytes % 16 >= 8) | |
2661 | output_asm_insn ("std,ma %3,8(%0)", operands); | |
2662 | if (n_bytes % 8 != 0) | |
2663 | output_asm_insn ("stdby,e %6,%4(%0)", operands); | |
2664 | } | |
2665 | return ""; | |
2666 | ||
68944452 JL |
2667 | case 4: |
2668 | /* Pre-adjust the loop counter. */ | |
2669 | operands[4] = GEN_INT (n_bytes - 8); | |
2670 | output_asm_insn ("ldi %4,%2", operands); | |
2671 | ||
2672 | /* Copying loop. */ | |
f38b27c7 JL |
2673 | output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands); |
2674 | output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands); | |
2675 | output_asm_insn ("{stws|stw},ma %3,4(%0)", operands); | |
68944452 | 2676 | output_asm_insn ("addib,>= -8,%2,.-12", operands); |
f38b27c7 | 2677 | output_asm_insn ("{stws|stw},ma %6,4(%0)", operands); |
68944452 JL |
2678 | |
2679 | /* Handle the residual. There could be up to 7 bytes of | |
2680 | residual to copy! */ | |
2681 | if (n_bytes % 8 != 0) | |
2682 | { | |
2683 | operands[4] = GEN_INT (n_bytes % 4); | |
2684 | if (n_bytes % 8 >= 4) | |
f38b27c7 | 2685 | output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands); |
68944452 | 2686 | if (n_bytes % 4 != 0) |
d2d28085 | 2687 | output_asm_insn ("ldw 0(%1),%6", operands); |
68944452 | 2688 | if (n_bytes % 8 >= 4) |
f38b27c7 | 2689 | output_asm_insn ("{stws|stw},ma %3,4(%0)", operands); |
68944452 | 2690 | if (n_bytes % 4 != 0) |
f38b27c7 | 2691 | output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands); |
68944452 JL |
2692 | } |
2693 | return ""; | |
188538df | 2694 | |
68944452 JL |
2695 | case 2: |
2696 | /* Pre-adjust the loop counter. */ | |
2697 | operands[4] = GEN_INT (n_bytes - 4); | |
2698 | output_asm_insn ("ldi %4,%2", operands); | |
188538df | 2699 | |
68944452 | 2700 | /* Copying loop. */ |
f38b27c7 JL |
2701 | output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands); |
2702 | output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands); | |
2703 | output_asm_insn ("{sths|sth},ma %3,2(%0)", operands); | |
68944452 | 2704 | output_asm_insn ("addib,>= -4,%2,.-12", operands); |
f38b27c7 | 2705 | output_asm_insn ("{sths|sth},ma %6,2(%0)", operands); |
188538df | 2706 | |
68944452 JL |
2707 | /* Handle the residual. */ |
2708 | if (n_bytes % 4 != 0) | |
2709 | { | |
2710 | if (n_bytes % 4 >= 2) | |
f38b27c7 | 2711 | output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands); |
68944452 | 2712 | if (n_bytes % 2 != 0) |
d2d28085 | 2713 | output_asm_insn ("ldb 0(%1),%6", operands); |
68944452 | 2714 | if (n_bytes % 4 >= 2) |
f38b27c7 | 2715 | output_asm_insn ("{sths|sth},ma %3,2(%0)", operands); |
68944452 | 2716 | if (n_bytes % 2 != 0) |
d2d28085 | 2717 | output_asm_insn ("stb %6,0(%0)", operands); |
68944452 JL |
2718 | } |
2719 | return ""; | |
188538df | 2720 | |
68944452 JL |
2721 | case 1: |
2722 | /* Pre-adjust the loop counter. */ | |
2723 | operands[4] = GEN_INT (n_bytes - 2); | |
2724 | output_asm_insn ("ldi %4,%2", operands); | |
188538df | 2725 | |
68944452 | 2726 | /* Copying loop. */ |
f38b27c7 JL |
2727 | output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands); |
2728 | output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands); | |
2729 | output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands); | |
68944452 | 2730 | output_asm_insn ("addib,>= -2,%2,.-12", operands); |
f38b27c7 | 2731 | output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands); |
188538df | 2732 | |
68944452 JL |
2733 | /* Handle the residual. */ |
2734 | if (n_bytes % 2 != 0) | |
2735 | { | |
d2d28085 JL |
2736 | output_asm_insn ("ldb 0(%1),%3", operands); |
2737 | output_asm_insn ("stb %3,0(%0)", operands); | |
68944452 JL |
2738 | } |
2739 | return ""; | |
188538df | 2740 | |
68944452 | 2741 | default: |
144d51f9 | 2742 | gcc_unreachable (); |
188538df | 2743 | } |
188538df | 2744 | } |
3673e996 RS |
2745 | |
2746 | /* Count the number of insns necessary to handle this block move. | |
2747 | ||
2748 | Basic structure is the same as emit_block_move, except that we | |
2749 | count insns rather than emit them. */ | |
2750 | ||
519104fe | 2751 | static int |
70128ad9 | 2752 | compute_movmem_length (rtx insn) |
3673e996 RS |
2753 | { |
2754 | rtx pat = PATTERN (insn); | |
a36a47ad GS |
2755 | unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0)); |
2756 | unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0)); | |
68944452 | 2757 | unsigned int n_insns = 0; |
3673e996 RS |
2758 | |
2759 | /* We can't move more than four bytes at a time because the PA | |
2760 | has no longer integer move insns. (Could use fp mem ops?) */ | |
cdc9103c JDA |
2761 | if (align > (TARGET_64BIT ? 8 : 4)) |
2762 | align = (TARGET_64BIT ? 8 : 4); | |
3673e996 | 2763 | |
90304f64 | 2764 | /* The basic copying loop. */ |
68944452 | 2765 | n_insns = 6; |
3673e996 | 2766 | |
68944452 JL |
2767 | /* Residuals. */ |
2768 | if (n_bytes % (2 * align) != 0) | |
3673e996 | 2769 | { |
90304f64 JL |
2770 | if ((n_bytes % (2 * align)) >= align) |
2771 | n_insns += 2; | |
2772 | ||
2773 | if ((n_bytes % align) != 0) | |
2774 | n_insns += 2; | |
3673e996 | 2775 | } |
68944452 JL |
2776 | |
2777 | /* Lengths are expressed in bytes now; each insn is 4 bytes. */ | |
2778 | return n_insns * 4; | |
3673e996 | 2779 | } |
cdc9103c JDA |
2780 | |
2781 | /* Emit code to perform a block clear. | |
2782 | ||
2783 | OPERANDS[0] is the destination pointer as a REG, clobbered. | |
2784 | OPERANDS[1] is a register for temporary storage. | |
2785 | OPERANDS[2] is the size as a CONST_INT | |
2786 | OPERANDS[3] is the alignment safe to use, as a CONST_INT. */ | |
2787 | ||
2788 | const char * | |
2789 | output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED) | |
2790 | { | |
2791 | int align = INTVAL (operands[3]); | |
2792 | unsigned long n_bytes = INTVAL (operands[2]); | |
2793 | ||
2794 | /* We can't clear more than a word at a time because the PA | |
2795 | has no longer integer move insns. */ | |
2796 | if (align > (TARGET_64BIT ? 8 : 4)) | |
2797 | align = (TARGET_64BIT ? 8 : 4); | |
2798 | ||
2799 | /* Note that we know each loop below will execute at least twice | |
2800 | (else we would have open-coded the copy). */ | |
2801 | switch (align) | |
2802 | { | |
2803 | case 8: | |
2804 | /* Pre-adjust the loop counter. */ | |
2805 | operands[2] = GEN_INT (n_bytes - 16); | |
2806 | output_asm_insn ("ldi %2,%1", operands); | |
2807 | ||
2808 | /* Loop. */ | |
2809 | output_asm_insn ("std,ma %%r0,8(%0)", operands); | |
2810 | output_asm_insn ("addib,>= -16,%1,.-4", operands); | |
2811 | output_asm_insn ("std,ma %%r0,8(%0)", operands); | |
2812 | ||
2813 | /* Handle the residual. There could be up to 7 bytes of | |
2814 | residual to copy! */ | |
2815 | if (n_bytes % 16 != 0) | |
2816 | { | |
2817 | operands[2] = GEN_INT (n_bytes % 8); | |
2818 | if (n_bytes % 16 >= 8) | |
2819 | output_asm_insn ("std,ma %%r0,8(%0)", operands); | |
2820 | if (n_bytes % 8 != 0) | |
2821 | output_asm_insn ("stdby,e %%r0,%2(%0)", operands); | |
2822 | } | |
2823 | return ""; | |
2824 | ||
2825 | case 4: | |
2826 | /* Pre-adjust the loop counter. */ | |
2827 | operands[2] = GEN_INT (n_bytes - 8); | |
2828 | output_asm_insn ("ldi %2,%1", operands); | |
2829 | ||
2830 | /* Loop. */ | |
2831 | output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands); | |
2832 | output_asm_insn ("addib,>= -8,%1,.-4", operands); | |
2833 | output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands); | |
2834 | ||
2835 | /* Handle the residual. There could be up to 7 bytes of | |
2836 | residual to copy! */ | |
2837 | if (n_bytes % 8 != 0) | |
2838 | { | |
2839 | operands[2] = GEN_INT (n_bytes % 4); | |
2840 | if (n_bytes % 8 >= 4) | |
2841 | output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands); | |
2842 | if (n_bytes % 4 != 0) | |
2843 | output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands); | |
2844 | } | |
2845 | return ""; | |
2846 | ||
2847 | case 2: | |
2848 | /* Pre-adjust the loop counter. */ | |
2849 | operands[2] = GEN_INT (n_bytes - 4); | |
2850 | output_asm_insn ("ldi %2,%1", operands); | |
2851 | ||
2852 | /* Loop. */ | |
2853 | output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands); | |
2854 | output_asm_insn ("addib,>= -4,%1,.-4", operands); | |
2855 | output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands); | |
2856 | ||
2857 | /* Handle the residual. */ | |
2858 | if (n_bytes % 4 != 0) | |
2859 | { | |
2860 | if (n_bytes % 4 >= 2) | |
2861 | output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands); | |
2862 | if (n_bytes % 2 != 0) | |
2863 | output_asm_insn ("stb %%r0,0(%0)", operands); | |
2864 | } | |
2865 | return ""; | |
2866 | ||
2867 | case 1: | |
2868 | /* Pre-adjust the loop counter. */ | |
2869 | operands[2] = GEN_INT (n_bytes - 2); | |
2870 | output_asm_insn ("ldi %2,%1", operands); | |
2871 | ||
2872 | /* Loop. */ | |
2873 | output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands); | |
2874 | output_asm_insn ("addib,>= -2,%1,.-4", operands); | |
2875 | output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands); | |
2876 | ||
2877 | /* Handle the residual. */ | |
2878 | if (n_bytes % 2 != 0) | |
2879 | output_asm_insn ("stb %%r0,0(%0)", operands); | |
2880 | ||
2881 | return ""; | |
2882 | ||
2883 | default: | |
144d51f9 | 2884 | gcc_unreachable (); |
cdc9103c JDA |
2885 | } |
2886 | } | |
2887 | ||
2888 | /* Count the number of insns necessary to handle this block move. | |
2889 | ||
2890 | Basic structure is the same as emit_block_move, except that we | |
2891 | count insns rather than emit them. */ | |
2892 | ||
2893 | static int | |
70128ad9 | 2894 | compute_clrmem_length (rtx insn) |
cdc9103c JDA |
2895 | { |
2896 | rtx pat = PATTERN (insn); | |
2897 | unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0)); | |
2898 | unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0)); | |
2899 | unsigned int n_insns = 0; | |
2900 | ||
2901 | /* We can't clear more than a word at a time because the PA | |
2902 | has no longer integer move insns. */ | |
2903 | if (align > (TARGET_64BIT ? 8 : 4)) | |
2904 | align = (TARGET_64BIT ? 8 : 4); | |
2905 | ||
2906 | /* The basic loop. */ | |
2907 | n_insns = 4; | |
2908 | ||
2909 | /* Residuals. */ | |
2910 | if (n_bytes % (2 * align) != 0) | |
2911 | { | |
2912 | if ((n_bytes % (2 * align)) >= align) | |
2913 | n_insns++; | |
2914 | ||
2915 | if ((n_bytes % align) != 0) | |
2916 | n_insns++; | |
2917 | } | |
2918 | ||
2919 | /* Lengths are expressed in bytes now; each insn is 4 bytes. */ | |
2920 | return n_insns * 4; | |
2921 | } | |
188538df TG |
2922 | \f |
2923 | ||
519104fe | 2924 | const char * |
b7849684 | 2925 | output_and (rtx *operands) |
0e7f4c19 | 2926 | { |
d2a94ec0 | 2927 | if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0) |
0e7f4c19 | 2928 | { |
0c235d7e | 2929 | unsigned HOST_WIDE_INT mask = INTVAL (operands[2]); |
0e7f4c19 TG |
2930 | int ls0, ls1, ms0, p, len; |
2931 | ||
2932 | for (ls0 = 0; ls0 < 32; ls0++) | |
2933 | if ((mask & (1 << ls0)) == 0) | |
2934 | break; | |
2935 | ||
2936 | for (ls1 = ls0; ls1 < 32; ls1++) | |
2937 | if ((mask & (1 << ls1)) != 0) | |
2938 | break; | |
2939 | ||
2940 | for (ms0 = ls1; ms0 < 32; ms0++) | |
2941 | if ((mask & (1 << ms0)) == 0) | |
2942 | break; | |
2943 | ||
144d51f9 | 2944 | gcc_assert (ms0 == 32); |
0e7f4c19 TG |
2945 | |
2946 | if (ls1 == 32) | |
2947 | { | |
2948 | len = ls0; | |
2949 | ||
144d51f9 | 2950 | gcc_assert (len); |
0e7f4c19 | 2951 | |
8919037c | 2952 | operands[2] = GEN_INT (len); |
f38b27c7 | 2953 | return "{extru|extrw,u} %1,31,%2,%0"; |
0e7f4c19 TG |
2954 | } |
2955 | else | |
2956 | { | |
2957 | /* We could use this `depi' for the case above as well, but `depi' | |
2958 | requires one more register file access than an `extru'. */ | |
2959 | ||
2960 | p = 31 - ls0; | |
2961 | len = ls1 - ls0; | |
2962 | ||
8919037c TG |
2963 | operands[2] = GEN_INT (p); |
2964 | operands[3] = GEN_INT (len); | |
f38b27c7 | 2965 | return "{depi|depwi} 0,%2,%3,%0"; |
0e7f4c19 TG |
2966 | } |
2967 | } | |
2968 | else | |
2969 | return "and %1,%2,%0"; | |
2970 | } | |
2971 | ||
520babc7 JL |
2972 | /* Return a string to perform a bitwise-and of operands[1] with operands[2] |
2973 | storing the result in operands[0]. */ | |
0952f89b | 2974 | const char * |
b7849684 | 2975 | output_64bit_and (rtx *operands) |
520babc7 JL |
2976 | { |
2977 | if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0) | |
2978 | { | |
2979 | unsigned HOST_WIDE_INT mask = INTVAL (operands[2]); | |
e0c556d3 | 2980 | int ls0, ls1, ms0, p, len; |
520babc7 JL |
2981 | |
2982 | for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++) | |
e0c556d3 | 2983 | if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0) |
520babc7 JL |
2984 | break; |
2985 | ||
2986 | for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++) | |
e0c556d3 | 2987 | if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0) |
520babc7 JL |
2988 | break; |
2989 | ||
2990 | for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++) | |
e0c556d3 | 2991 | if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0) |
520babc7 JL |
2992 | break; |
2993 | ||
144d51f9 | 2994 | gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT); |
520babc7 JL |
2995 | |
2996 | if (ls1 == HOST_BITS_PER_WIDE_INT) | |
2997 | { | |
2998 | len = ls0; | |
2999 | ||
144d51f9 | 3000 | gcc_assert (len); |
520babc7 JL |
3001 | |
3002 | operands[2] = GEN_INT (len); | |
3003 | return "extrd,u %1,63,%2,%0"; | |
3004 | } | |
3005 | else | |
3006 | { | |
3007 | /* We could use this `depi' for the case above as well, but `depi' | |
3008 | requires one more register file access than an `extru'. */ | |
3009 | ||
3010 | p = 63 - ls0; | |
3011 | len = ls1 - ls0; | |
3012 | ||
3013 | operands[2] = GEN_INT (p); | |
3014 | operands[3] = GEN_INT (len); | |
3015 | return "depdi 0,%2,%3,%0"; | |
3016 | } | |
3017 | } | |
3018 | else | |
3019 | return "and %1,%2,%0"; | |
3020 | } | |
3021 | ||
519104fe | 3022 | const char * |
b7849684 | 3023 | output_ior (rtx *operands) |
0e7f4c19 | 3024 | { |
0c235d7e | 3025 | unsigned HOST_WIDE_INT mask = INTVAL (operands[2]); |
f1c7ce82 | 3026 | int bs0, bs1, p, len; |
23f6f34f | 3027 | |
8365d59b TG |
3028 | if (INTVAL (operands[2]) == 0) |
3029 | return "copy %1,%0"; | |
0e7f4c19 | 3030 | |
8365d59b TG |
3031 | for (bs0 = 0; bs0 < 32; bs0++) |
3032 | if ((mask & (1 << bs0)) != 0) | |
3033 | break; | |
0e7f4c19 | 3034 | |
8365d59b TG |
3035 | for (bs1 = bs0; bs1 < 32; bs1++) |
3036 | if ((mask & (1 << bs1)) == 0) | |
3037 | break; | |
0e7f4c19 | 3038 | |
144d51f9 | 3039 | gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask); |
0e7f4c19 | 3040 | |
8365d59b TG |
3041 | p = 31 - bs0; |
3042 | len = bs1 - bs0; | |
0e7f4c19 | 3043 | |
8919037c TG |
3044 | operands[2] = GEN_INT (p); |
3045 | operands[3] = GEN_INT (len); | |
f38b27c7 | 3046 | return "{depi|depwi} -1,%2,%3,%0"; |
0e7f4c19 | 3047 | } |
520babc7 JL |
3048 | |
3049 | /* Return a string to perform a bitwise-and of operands[1] with operands[2] | |
3050 | storing the result in operands[0]. */ | |
0952f89b | 3051 | const char * |
b7849684 | 3052 | output_64bit_ior (rtx *operands) |
520babc7 JL |
3053 | { |
3054 | unsigned HOST_WIDE_INT mask = INTVAL (operands[2]); | |
e0c556d3 | 3055 | int bs0, bs1, p, len; |
520babc7 JL |
3056 | |
3057 | if (INTVAL (operands[2]) == 0) | |
3058 | return "copy %1,%0"; | |
3059 | ||
3060 | for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++) | |
e0c556d3 | 3061 | if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0) |
520babc7 JL |
3062 | break; |
3063 | ||
3064 | for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++) | |
e0c556d3 | 3065 | if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0) |
520babc7 JL |
3066 | break; |
3067 | ||
144d51f9 NS |
3068 | gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT |
3069 | || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask); | |
520babc7 JL |
3070 | |
3071 | p = 63 - bs0; | |
3072 | len = bs1 - bs0; | |
3073 | ||
3074 | operands[2] = GEN_INT (p); | |
3075 | operands[3] = GEN_INT (len); | |
3076 | return "depdi -1,%2,%3,%0"; | |
3077 | } | |
0e7f4c19 | 3078 | \f |
301d03af | 3079 | /* Target hook for assembling integer objects. This code handles |
cdcb88d7 JDA |
3080 | aligned SI and DI integers specially since function references |
3081 | must be preceded by P%. */ | |
301d03af RS |
3082 | |
3083 | static bool | |
b7849684 | 3084 | pa_assemble_integer (rtx x, unsigned int size, int aligned_p) |
301d03af | 3085 | { |
cdcb88d7 JDA |
3086 | if (size == UNITS_PER_WORD |
3087 | && aligned_p | |
301d03af RS |
3088 | && function_label_operand (x, VOIDmode)) |
3089 | { | |
3090 | fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file); | |
3091 | output_addr_const (asm_out_file, x); | |
3092 | fputc ('\n', asm_out_file); | |
3093 | return true; | |
3094 | } | |
3095 | return default_assemble_integer (x, size, aligned_p); | |
3096 | } | |
3097 | \f | |
188538df | 3098 | /* Output an ascii string. */ |
f1c7ce82 | 3099 | void |
b7849684 | 3100 | output_ascii (FILE *file, const char *p, int size) |
188538df TG |
3101 | { |
3102 | int i; | |
3103 | int chars_output; | |
71cc389b | 3104 | unsigned char partial_output[16]; /* Max space 4 chars can occupy. */ |
188538df TG |
3105 | |
3106 | /* The HP assembler can only take strings of 256 characters at one | |
3107 | time. This is a limitation on input line length, *not* the | |
3108 | length of the string. Sigh. Even worse, it seems that the | |
3109 | restriction is in number of input characters (see \xnn & | |
3110 | \whatever). So we have to do this very carefully. */ | |
3111 | ||
e236a9ff | 3112 | fputs ("\t.STRING \"", file); |
188538df TG |
3113 | |
3114 | chars_output = 0; | |
3115 | for (i = 0; i < size; i += 4) | |
3116 | { | |
3117 | int co = 0; | |
3118 | int io = 0; | |
3119 | for (io = 0, co = 0; io < MIN (4, size - i); io++) | |
3120 | { | |
6b5ffd4e | 3121 | register unsigned int c = (unsigned char) p[i + io]; |
188538df TG |
3122 | |
3123 | if (c == '\"' || c == '\\') | |
3124 | partial_output[co++] = '\\'; | |
3125 | if (c >= ' ' && c < 0177) | |
3126 | partial_output[co++] = c; | |
3127 | else | |
3128 | { | |
3129 | unsigned int hexd; | |
3130 | partial_output[co++] = '\\'; | |
3131 | partial_output[co++] = 'x'; | |
3132 | hexd = c / 16 - 0 + '0'; | |
3133 | if (hexd > '9') | |
3134 | hexd -= '9' - 'a' + 1; | |
3135 | partial_output[co++] = hexd; | |
3136 | hexd = c % 16 - 0 + '0'; | |
3137 | if (hexd > '9') | |
3138 | hexd -= '9' - 'a' + 1; | |
3139 | partial_output[co++] = hexd; | |
3140 | } | |
3141 | } | |
3142 | if (chars_output + co > 243) | |
3143 | { | |
e236a9ff | 3144 | fputs ("\"\n\t.STRING \"", file); |
188538df TG |
3145 | chars_output = 0; |
3146 | } | |
823fbbce | 3147 | fwrite (partial_output, 1, (size_t) co, file); |
188538df TG |
3148 | chars_output += co; |
3149 | co = 0; | |
3150 | } | |
e236a9ff | 3151 | fputs ("\"\n", file); |
188538df | 3152 | } |
5621d717 JL |
3153 | |
3154 | /* Try to rewrite floating point comparisons & branches to avoid | |
3155 | useless add,tr insns. | |
3156 | ||
3157 | CHECK_NOTES is nonzero if we should examine REG_DEAD notes | |
3158 | to see if FPCC is dead. CHECK_NOTES is nonzero for the | |
3159 | first attempt to remove useless add,tr insns. It is zero | |
3160 | for the second pass as reorg sometimes leaves bogus REG_DEAD | |
3161 | notes lying around. | |
3162 | ||
3163 | When CHECK_NOTES is zero we can only eliminate add,tr insns | |
3164 | when there's a 1:1 correspondence between fcmp and ftest/fbranch | |
3165 | instructions. */ | |
519104fe | 3166 | static void |
b7849684 | 3167 | remove_useless_addtr_insns (int check_notes) |
5621d717 JL |
3168 | { |
3169 | rtx insn; | |
5621d717 JL |
3170 | static int pass = 0; |
3171 | ||
3172 | /* This is fairly cheap, so always run it when optimizing. */ | |
3173 | if (optimize > 0) | |
3174 | { | |
3175 | int fcmp_count = 0; | |
3176 | int fbranch_count = 0; | |
3177 | ||
3178 | /* Walk all the insns in this function looking for fcmp & fbranch | |
3179 | instructions. Keep track of how many of each we find. */ | |
18dbd950 | 3180 | for (insn = get_insns (); insn; insn = next_insn (insn)) |
5621d717 JL |
3181 | { |
3182 | rtx tmp; | |
3183 | ||
3184 | /* Ignore anything that isn't an INSN or a JUMP_INSN. */ | |
3185 | if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN) | |
3186 | continue; | |
3187 | ||
3188 | tmp = PATTERN (insn); | |
3189 | ||
3190 | /* It must be a set. */ | |
3191 | if (GET_CODE (tmp) != SET) | |
3192 | continue; | |
3193 | ||
3194 | /* If the destination is CCFP, then we've found an fcmp insn. */ | |
3195 | tmp = SET_DEST (tmp); | |
3196 | if (GET_CODE (tmp) == REG && REGNO (tmp) == 0) | |
3197 | { | |
3198 | fcmp_count++; | |
3199 | continue; | |
3200 | } | |
6619e96c | 3201 | |
5621d717 JL |
3202 | tmp = PATTERN (insn); |
3203 | /* If this is an fbranch instruction, bump the fbranch counter. */ | |
3204 | if (GET_CODE (tmp) == SET | |
3205 | && SET_DEST (tmp) == pc_rtx | |
3206 | && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE | |
3207 | && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE | |
3208 | && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG | |
3209 | && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0) | |
3210 | { | |
3211 | fbranch_count++; | |
3212 | continue; | |
3213 | } | |
3214 | } | |
3215 | ||
3216 | ||
3217 | /* Find all floating point compare + branch insns. If possible, | |
3218 | reverse the comparison & the branch to avoid add,tr insns. */ | |
18dbd950 | 3219 | for (insn = get_insns (); insn; insn = next_insn (insn)) |
5621d717 JL |
3220 | { |
3221 | rtx tmp, next; | |
3222 | ||
3223 | /* Ignore anything that isn't an INSN. */ | |
3224 | if (GET_CODE (insn) != INSN) | |
3225 | continue; | |
3226 | ||
3227 | tmp = PATTERN (insn); | |
3228 | ||
3229 | /* It must be a set. */ | |
3230 | if (GET_CODE (tmp) != SET) | |
3231 | continue; | |
3232 | ||
3233 | /* The destination must be CCFP, which is register zero. */ | |
3234 | tmp = SET_DEST (tmp); | |
3235 | if (GET_CODE (tmp) != REG || REGNO (tmp) != 0) | |
3236 | continue; | |
3237 | ||
3238 | /* INSN should be a set of CCFP. | |
3239 | ||
3240 | See if the result of this insn is used in a reversed FP | |
3241 | conditional branch. If so, reverse our condition and | |
3242 | the branch. Doing so avoids useless add,tr insns. */ | |
3243 | next = next_insn (insn); | |
3244 | while (next) | |
3245 | { | |
3246 | /* Jumps, calls and labels stop our search. */ | |
3247 | if (GET_CODE (next) == JUMP_INSN | |
3248 | || GET_CODE (next) == CALL_INSN | |
3249 | || GET_CODE (next) == CODE_LABEL) | |
3250 | break; | |
3251 | ||
3252 | /* As does another fcmp insn. */ | |
3253 | if (GET_CODE (next) == INSN | |
3254 | && GET_CODE (PATTERN (next)) == SET | |
3255 | && GET_CODE (SET_DEST (PATTERN (next))) == REG | |
3256 | && REGNO (SET_DEST (PATTERN (next))) == 0) | |
3257 | break; | |
3258 | ||
3259 | next = next_insn (next); | |
3260 | } | |
3261 | ||
3262 | /* Is NEXT_INSN a branch? */ | |
3263 | if (next | |
3264 | && GET_CODE (next) == JUMP_INSN) | |
3265 | { | |
3266 | rtx pattern = PATTERN (next); | |
3267 | ||
112cdef5 | 3268 | /* If it a reversed fp conditional branch (e.g. uses add,tr) |
5621d717 JL |
3269 | and CCFP dies, then reverse our conditional and the branch |
3270 | to avoid the add,tr. */ | |
3271 | if (GET_CODE (pattern) == SET | |
3272 | && SET_DEST (pattern) == pc_rtx | |
3273 | && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE | |
3274 | && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE | |
3275 | && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG | |
3276 | && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0 | |
3277 | && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC | |
3278 | && (fcmp_count == fbranch_count | |
3279 | || (check_notes | |
3280 | && find_regno_note (next, REG_DEAD, 0)))) | |
3281 | { | |
3282 | /* Reverse the branch. */ | |
3283 | tmp = XEXP (SET_SRC (pattern), 1); | |
3284 | XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2); | |
3285 | XEXP (SET_SRC (pattern), 2) = tmp; | |
3286 | INSN_CODE (next) = -1; | |
3287 | ||
3288 | /* Reverse our condition. */ | |
3289 | tmp = PATTERN (insn); | |
3290 | PUT_CODE (XEXP (tmp, 1), | |
831c1763 AM |
3291 | (reverse_condition_maybe_unordered |
3292 | (GET_CODE (XEXP (tmp, 1))))); | |
5621d717 JL |
3293 | } |
3294 | } | |
3295 | } | |
3296 | } | |
3297 | ||
3298 | pass = !pass; | |
3299 | ||
3300 | } | |
188538df | 3301 | \f |
831c1763 AM |
3302 | /* You may have trouble believing this, but this is the 32 bit HP-PA |
3303 | stack layout. Wow. | |
188538df TG |
3304 | |
3305 | Offset Contents | |
3306 | ||
3307 | Variable arguments (optional; any number may be allocated) | |
3308 | ||
3309 | SP-(4*(N+9)) arg word N | |
3310 | : : | |
3311 | SP-56 arg word 5 | |
3312 | SP-52 arg word 4 | |
3313 | ||
3314 | Fixed arguments (must be allocated; may remain unused) | |
3315 | ||
3316 | SP-48 arg word 3 | |
3317 | SP-44 arg word 2 | |
3318 | SP-40 arg word 1 | |
3319 | SP-36 arg word 0 | |
3320 | ||
3321 | Frame Marker | |
3322 | ||
3323 | SP-32 External Data Pointer (DP) | |
3324 | SP-28 External sr4 | |
3325 | SP-24 External/stub RP (RP') | |
3326 | SP-20 Current RP | |
3327 | SP-16 Static Link | |
3328 | SP-12 Clean up | |
3329 | SP-8 Calling Stub RP (RP'') | |
3330 | SP-4 Previous SP | |
3331 | ||
3332 | Top of Frame | |
3333 | ||
3334 | SP-0 Stack Pointer (points to next available address) | |
3335 | ||
3336 | */ | |
3337 | ||
3338 | /* This function saves registers as follows. Registers marked with ' are | |
3339 | this function's registers (as opposed to the previous function's). | |
3340 | If a frame_pointer isn't needed, r4 is saved as a general register; | |
3341 | the space for the frame pointer is still allocated, though, to keep | |
3342 | things simple. | |
3343 | ||
3344 | ||
3345 | Top of Frame | |
3346 | ||
3347 | SP (FP') Previous FP | |
3348 | SP + 4 Alignment filler (sigh) | |
3349 | SP + 8 Space for locals reserved here. | |
3350 | . | |
3351 | . | |
3352 | . | |
3353 | SP + n All call saved register used. | |
3354 | . | |
3355 | . | |
3356 | . | |
3357 | SP + o All call saved fp registers used. | |
3358 | . | |
3359 | . | |
3360 | . | |
3361 | SP + p (SP') points to next available address. | |
23f6f34f | 3362 | |
188538df TG |
3363 | */ |
3364 | ||
08c148a8 | 3365 | /* Global variables set by output_function_prologue(). */ |
19ec6a36 AM |
3366 | /* Size of frame. Need to know this to emit return insns from |
3367 | leaf procedures. */ | |
a4295210 JDA |
3368 | static HOST_WIDE_INT actual_fsize, local_fsize; |
3369 | static int save_fregs; | |
19ec6a36 | 3370 | |
aadcdb45 | 3371 | /* Emit RTL to store REG at the memory location specified by BASE+DISP. |
fc82f2f1 | 3372 | Handle case where DISP > 8k by using the add_high_const patterns. |
aadcdb45 JL |
3373 | |
3374 | Note in DISP > 8k case, we will leave the high part of the address | |
3375 | in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/ | |
c5c76735 | 3376 | |
f6bcf44c | 3377 | static void |
a4295210 | 3378 | store_reg (int reg, HOST_WIDE_INT disp, int base) |
188538df | 3379 | { |
f6bcf44c | 3380 | rtx insn, dest, src, basereg; |
19ec6a36 AM |
3381 | |
3382 | src = gen_rtx_REG (word_mode, reg); | |
3383 | basereg = gen_rtx_REG (Pmode, base); | |
188538df | 3384 | if (VAL_14_BITS_P (disp)) |
aadcdb45 | 3385 | { |
19ec6a36 | 3386 | dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp)); |
f6bcf44c | 3387 | insn = emit_move_insn (dest, src); |
aadcdb45 | 3388 | } |
a4295210 JDA |
3389 | else if (TARGET_64BIT && !VAL_32_BITS_P (disp)) |
3390 | { | |
3391 | rtx delta = GEN_INT (disp); | |
3392 | rtx tmpreg = gen_rtx_REG (Pmode, 1); | |
3393 | ||
3394 | emit_move_insn (tmpreg, delta); | |
5dcc9605 | 3395 | insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg)); |
a4295210 JDA |
3396 | if (DO_FRAME_NOTES) |
3397 | { | |
bbbbb16a ILT |
3398 | add_reg_note (insn, REG_FRAME_RELATED_EXPR, |
3399 | gen_rtx_SET (VOIDmode, tmpreg, | |
3400 | gen_rtx_PLUS (Pmode, basereg, delta))); | |
5dcc9605 | 3401 | RTX_FRAME_RELATED_P (insn) = 1; |
a4295210 | 3402 | } |
5dcc9605 JDA |
3403 | dest = gen_rtx_MEM (word_mode, tmpreg); |
3404 | insn = emit_move_insn (dest, src); | |
a4295210 | 3405 | } |
aadcdb45 JL |
3406 | else |
3407 | { | |
19ec6a36 AM |
3408 | rtx delta = GEN_INT (disp); |
3409 | rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta)); | |
3410 | rtx tmpreg = gen_rtx_REG (Pmode, 1); | |
a4295210 | 3411 | |
19ec6a36 AM |
3412 | emit_move_insn (tmpreg, high); |
3413 | dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta)); | |
f6bcf44c JDA |
3414 | insn = emit_move_insn (dest, src); |
3415 | if (DO_FRAME_NOTES) | |
bbbbb16a ILT |
3416 | add_reg_note (insn, REG_FRAME_RELATED_EXPR, |
3417 | gen_rtx_SET (VOIDmode, | |
3418 | gen_rtx_MEM (word_mode, | |
3419 | gen_rtx_PLUS (word_mode, | |
3420 | basereg, | |
3421 | delta)), | |
3422 | src)); | |
aadcdb45 | 3423 | } |
f6bcf44c JDA |
3424 | |
3425 | if (DO_FRAME_NOTES) | |
3426 | RTX_FRAME_RELATED_P (insn) = 1; | |
aadcdb45 JL |
3427 | } |
3428 | ||
823fbbce JDA |
3429 | /* Emit RTL to store REG at the memory location specified by BASE and then |
3430 | add MOD to BASE. MOD must be <= 8k. */ | |
aadcdb45 | 3431 | |
823fbbce | 3432 | static void |
a4295210 | 3433 | store_reg_modify (int base, int reg, HOST_WIDE_INT mod) |
823fbbce JDA |
3434 | { |
3435 | rtx insn, basereg, srcreg, delta; | |
3436 | ||
144d51f9 | 3437 | gcc_assert (VAL_14_BITS_P (mod)); |
823fbbce JDA |
3438 | |
3439 | basereg = gen_rtx_REG (Pmode, base); | |
3440 | srcreg = gen_rtx_REG (word_mode, reg); | |
3441 | delta = GEN_INT (mod); | |
3442 | ||
3443 | insn = emit_insn (gen_post_store (basereg, srcreg, delta)); | |
3444 | if (DO_FRAME_NOTES) | |
3445 | { | |
3446 | RTX_FRAME_RELATED_P (insn) = 1; | |
3447 | ||
3448 | /* RTX_FRAME_RELATED_P must be set on each frame related set | |
77c4f044 RH |
3449 | in a parallel with more than one element. */ |
3450 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1; | |
3451 | RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1; | |
823fbbce JDA |
3452 | } |
3453 | } | |
3454 | ||
3455 | /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case | |
3456 | where DISP > 8k by using the add_high_const patterns. NOTE indicates | |
3457 | whether to add a frame note or not. | |
3458 | ||
3459 | In the DISP > 8k case, we leave the high part of the address in %r1. | |
3460 | There is code in expand_hppa_{prologue,epilogue} that knows about this. */ | |
c5c76735 | 3461 | |
f6bcf44c | 3462 | static void |
a4295210 | 3463 | set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note) |
188538df | 3464 | { |
f6bcf44c | 3465 | rtx insn; |
19ec6a36 | 3466 | |
188538df | 3467 | if (VAL_14_BITS_P (disp)) |
19ec6a36 | 3468 | { |
f6bcf44c JDA |
3469 | insn = emit_move_insn (gen_rtx_REG (Pmode, reg), |
3470 | plus_constant (gen_rtx_REG (Pmode, base), disp)); | |
19ec6a36 | 3471 | } |
a4295210 JDA |
3472 | else if (TARGET_64BIT && !VAL_32_BITS_P (disp)) |
3473 | { | |
3474 | rtx basereg = gen_rtx_REG (Pmode, base); | |
3475 | rtx delta = GEN_INT (disp); | |
3476 | rtx tmpreg = gen_rtx_REG (Pmode, 1); | |
3477 | ||
3478 | emit_move_insn (tmpreg, delta); | |
3479 | insn = emit_move_insn (gen_rtx_REG (Pmode, reg), | |
3480 | gen_rtx_PLUS (Pmode, tmpreg, basereg)); | |
5dcc9605 | 3481 | if (DO_FRAME_NOTES) |
bbbbb16a ILT |
3482 | add_reg_note (insn, REG_FRAME_RELATED_EXPR, |
3483 | gen_rtx_SET (VOIDmode, tmpreg, | |
3484 | gen_rtx_PLUS (Pmode, basereg, delta))); | |
a4295210 | 3485 | } |
188538df | 3486 | else |
aadcdb45 | 3487 | { |
f6bcf44c | 3488 | rtx basereg = gen_rtx_REG (Pmode, base); |
19ec6a36 | 3489 | rtx delta = GEN_INT (disp); |
a4295210 | 3490 | rtx tmpreg = gen_rtx_REG (Pmode, 1); |
f6bcf44c | 3491 | |
a4295210 | 3492 | emit_move_insn (tmpreg, |
f6bcf44c | 3493 | gen_rtx_PLUS (Pmode, basereg, |
19ec6a36 | 3494 | gen_rtx_HIGH (Pmode, delta))); |
f6bcf44c | 3495 | insn = emit_move_insn (gen_rtx_REG (Pmode, reg), |
a4295210 | 3496 | gen_rtx_LO_SUM (Pmode, tmpreg, delta)); |
aadcdb45 | 3497 | } |
f6bcf44c | 3498 | |
823fbbce | 3499 | if (DO_FRAME_NOTES && note) |
f6bcf44c | 3500 | RTX_FRAME_RELATED_P (insn) = 1; |
188538df TG |
3501 | } |
3502 | ||
a4295210 JDA |
3503 | HOST_WIDE_INT |
3504 | compute_frame_size (HOST_WIDE_INT size, int *fregs_live) | |
188538df | 3505 | { |
95f3f59e JDA |
3506 | int freg_saved = 0; |
3507 | int i, j; | |
3508 | ||
3509 | /* The code in hppa_expand_prologue and hppa_expand_epilogue must | |
3510 | be consistent with the rounding and size calculation done here. | |
3511 | Change them at the same time. */ | |
3512 | ||
3513 | /* We do our own stack alignment. First, round the size of the | |
3514 | stack locals up to a word boundary. */ | |
3515 | size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1); | |
3516 | ||
3517 | /* Space for previous frame pointer + filler. If any frame is | |
3518 | allocated, we need to add in the STARTING_FRAME_OFFSET. We | |
3519 | waste some space here for the sake of HP compatibility. The | |
3520 | first slot is only used when the frame pointer is needed. */ | |
3521 | if (size || frame_pointer_needed) | |
3522 | size += STARTING_FRAME_OFFSET; | |
3523 | ||
823fbbce JDA |
3524 | /* If the current function calls __builtin_eh_return, then we need |
3525 | to allocate stack space for registers that will hold data for | |
3526 | the exception handler. */ | |
e3b5732b | 3527 | if (DO_FRAME_NOTES && crtl->calls_eh_return) |
823fbbce JDA |
3528 | { |
3529 | unsigned int i; | |
3530 | ||
3531 | for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i) | |
3532 | continue; | |
95f3f59e | 3533 | size += i * UNITS_PER_WORD; |
823fbbce JDA |
3534 | } |
3535 | ||
6261ede7 | 3536 | /* Account for space used by the callee general register saves. */ |
95f3f59e | 3537 | for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--) |
6fb5fa3c | 3538 | if (df_regs_ever_live_p (i)) |
95f3f59e | 3539 | size += UNITS_PER_WORD; |
80225b66 | 3540 | |
6261ede7 | 3541 | /* Account for space used by the callee floating point register saves. */ |
88624c0e | 3542 | for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP) |
6fb5fa3c DB |
3543 | if (df_regs_ever_live_p (i) |
3544 | || (!TARGET_64BIT && df_regs_ever_live_p (i + 1))) | |
80225b66 | 3545 | { |
95f3f59e | 3546 | freg_saved = 1; |
9e18f575 | 3547 | |
6261ede7 JL |
3548 | /* We always save both halves of the FP register, so always |
3549 | increment the frame size by 8 bytes. */ | |
95f3f59e | 3550 | size += 8; |
80225b66 TG |
3551 | } |
3552 | ||
95f3f59e JDA |
3553 | /* If any of the floating registers are saved, account for the |
3554 | alignment needed for the floating point register save block. */ | |
3555 | if (freg_saved) | |
3556 | { | |
3557 | size = (size + 7) & ~7; | |
3558 | if (fregs_live) | |
3559 | *fregs_live = 1; | |
3560 | } | |
3561 | ||
6261ede7 | 3562 | /* The various ABIs include space for the outgoing parameters in the |
95f3f59e JDA |
3563 | size of the current function's stack frame. We don't need to align |
3564 | for the outgoing arguments as their alignment is set by the final | |
3565 | rounding for the frame as a whole. */ | |
38173d38 | 3566 | size += crtl->outgoing_args_size; |
6261ede7 JL |
3567 | |
3568 | /* Allocate space for the fixed frame marker. This space must be | |
685d0e07 | 3569 | allocated for any function that makes calls or allocates |
6261ede7 | 3570 | stack space. */ |
95f3f59e | 3571 | if (!current_function_is_leaf || size) |
685d0e07 | 3572 | size += TARGET_64BIT ? 48 : 32; |
520babc7 | 3573 | |
95f3f59e | 3574 | /* Finally, round to the preferred stack boundary. */ |
5fad1c24 JDA |
3575 | return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1) |
3576 | & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)); | |
188538df | 3577 | } |
23f6f34f | 3578 | |
08c148a8 NB |
3579 | /* Generate the assembly code for function entry. FILE is a stdio |
3580 | stream to output the code to. SIZE is an int: how many units of | |
3581 | temporary storage to allocate. | |
3582 | ||
3583 | Refer to the array `regs_ever_live' to determine which registers to | |
3584 | save; `regs_ever_live[I]' is nonzero if register number I is ever | |
3585 | used in the function. This function is responsible for knowing | |
3586 | which registers should not be saved even if used. */ | |
3587 | ||
3588 | /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block | |
3589 | of memory. If any fpu reg is used in the function, we allocate | |
3590 | such a block here, at the bottom of the frame, just in case it's needed. | |
3591 | ||
3592 | If this function is a leaf procedure, then we may choose not | |
3593 | to do a "save" insn. The decision about whether or not | |
3594 | to do this is made in regclass.c. */ | |
3595 | ||
c590b625 | 3596 | static void |
b7849684 | 3597 | pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED) |
188538df | 3598 | { |
ba0bfdac JL |
3599 | /* The function's label and associated .PROC must never be |
3600 | separated and must be output *after* any profiling declarations | |
3601 | to avoid changing spaces/subspaces within a procedure. */ | |
3602 | ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); | |
3603 | fputs ("\t.PROC\n", file); | |
3604 | ||
aadcdb45 JL |
3605 | /* hppa_expand_prologue does the dirty work now. We just need |
3606 | to output the assembler directives which denote the start | |
3607 | of a function. */ | |
a4295210 | 3608 | fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize); |
16c16a24 | 3609 | if (current_function_is_leaf) |
e236a9ff | 3610 | fputs (",NO_CALLS", file); |
16c16a24 JDA |
3611 | else |
3612 | fputs (",CALLS", file); | |
3613 | if (rp_saved) | |
3614 | fputs (",SAVE_RP", file); | |
da3c3336 | 3615 | |
685d0e07 JDA |
3616 | /* The SAVE_SP flag is used to indicate that register %r3 is stored |
3617 | at the beginning of the frame and that it is used as the frame | |
3618 | pointer for the frame. We do this because our current frame | |
a4d05547 | 3619 | layout doesn't conform to that specified in the HP runtime |
685d0e07 JDA |
3620 | documentation and we need a way to indicate to programs such as |
3621 | GDB where %r3 is saved. The SAVE_SP flag was chosen because it | |
3622 | isn't used by HP compilers but is supported by the assembler. | |
3623 | However, SAVE_SP is supposed to indicate that the previous stack | |
3624 | pointer has been saved in the frame marker. */ | |
da3c3336 | 3625 | if (frame_pointer_needed) |
e236a9ff | 3626 | fputs (",SAVE_SP", file); |
da3c3336 | 3627 | |
68386e1e | 3628 | /* Pass on information about the number of callee register saves |
e8cfae5c JL |
3629 | performed in the prologue. |
3630 | ||
3631 | The compiler is supposed to pass the highest register number | |
23f6f34f | 3632 | saved, the assembler then has to adjust that number before |
e8cfae5c | 3633 | entering it into the unwind descriptor (to account for any |
23f6f34f | 3634 | caller saved registers with lower register numbers than the |
e8cfae5c JL |
3635 | first callee saved register). */ |
3636 | if (gr_saved) | |
3637 | fprintf (file, ",ENTRY_GR=%d", gr_saved + 2); | |
3638 | ||
3639 | if (fr_saved) | |
3640 | fprintf (file, ",ENTRY_FR=%d", fr_saved + 11); | |
68386e1e | 3641 | |
e236a9ff | 3642 | fputs ("\n\t.ENTRY\n", file); |
aadcdb45 | 3643 | |
18dbd950 | 3644 | remove_useless_addtr_insns (0); |
aadcdb45 JL |
3645 | } |
3646 | ||
f1c7ce82 | 3647 | void |
b7849684 | 3648 | hppa_expand_prologue (void) |
aadcdb45 | 3649 | { |
4971c587 | 3650 | int merge_sp_adjust_with_store = 0; |
a4295210 JDA |
3651 | HOST_WIDE_INT size = get_frame_size (); |
3652 | HOST_WIDE_INT offset; | |
3653 | int i; | |
823fbbce | 3654 | rtx insn, tmpreg; |
aadcdb45 | 3655 | |
68386e1e JL |
3656 | gr_saved = 0; |
3657 | fr_saved = 0; | |
8a9c76f3 | 3658 | save_fregs = 0; |
6261ede7 | 3659 | |
95f3f59e JDA |
3660 | /* Compute total size for frame pointer, filler, locals and rounding to |
3661 | the next word boundary. Similar code appears in compute_frame_size | |
3662 | and must be changed in tandem with this code. */ | |
3663 | local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1); | |
3664 | if (local_fsize || frame_pointer_needed) | |
3665 | local_fsize += STARTING_FRAME_OFFSET; | |
6261ede7 | 3666 | |
2b41935c | 3667 | actual_fsize = compute_frame_size (size, &save_fregs); |
188538df | 3668 | |
aadcdb45 | 3669 | /* Compute a few things we will use often. */ |
690d4228 | 3670 | tmpreg = gen_rtx_REG (word_mode, 1); |
188538df | 3671 | |
23f6f34f | 3672 | /* Save RP first. The calling conventions manual states RP will |
19ec6a36 | 3673 | always be stored into the caller's frame at sp - 20 or sp - 16 |
520babc7 | 3674 | depending on which ABI is in use. */ |
e3b5732b | 3675 | if (df_regs_ever_live_p (2) || crtl->calls_eh_return) |
16c16a24 JDA |
3676 | { |
3677 | store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM); | |
3678 | rp_saved = true; | |
3679 | } | |
3680 | else | |
3681 | rp_saved = false; | |
23f6f34f | 3682 | |
aadcdb45 | 3683 | /* Allocate the local frame and set up the frame pointer if needed. */ |
31d68947 AM |
3684 | if (actual_fsize != 0) |
3685 | { | |
3686 | if (frame_pointer_needed) | |
3687 | { | |
3688 | /* Copy the old frame pointer temporarily into %r1. Set up the | |
3689 | new stack pointer, then store away the saved old frame pointer | |
823fbbce JDA |
3690 | into the stack at sp and at the same time update the stack |
3691 | pointer by actual_fsize bytes. Two versions, first | |
31d68947 AM |
3692 | handles small (<8k) frames. The second handles large (>=8k) |
3693 | frames. */ | |
823fbbce JDA |
3694 | insn = emit_move_insn (tmpreg, frame_pointer_rtx); |
3695 | if (DO_FRAME_NOTES) | |
77c4f044 | 3696 | RTX_FRAME_RELATED_P (insn) = 1; |
823fbbce JDA |
3697 | |
3698 | insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); | |
3699 | if (DO_FRAME_NOTES) | |
3700 | RTX_FRAME_RELATED_P (insn) = 1; | |
3701 | ||
3702 | if (VAL_14_BITS_P (actual_fsize)) | |
3703 | store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize); | |
31d68947 AM |
3704 | else |
3705 | { | |
3706 | /* It is incorrect to store the saved frame pointer at *sp, | |
3707 | then increment sp (writes beyond the current stack boundary). | |
3708 | ||
3709 | So instead use stwm to store at *sp and post-increment the | |
3710 | stack pointer as an atomic operation. Then increment sp to | |
3711 | finish allocating the new frame. */ | |
a4295210 JDA |
3712 | HOST_WIDE_INT adjust1 = 8192 - 64; |
3713 | HOST_WIDE_INT adjust2 = actual_fsize - adjust1; | |
19ec6a36 | 3714 | |
823fbbce | 3715 | store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1); |
f6bcf44c | 3716 | set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM, |
823fbbce | 3717 | adjust2, 1); |
31d68947 | 3718 | } |
823fbbce | 3719 | |
685d0e07 JDA |
3720 | /* We set SAVE_SP in frames that need a frame pointer. Thus, |
3721 | we need to store the previous stack pointer (frame pointer) | |
3722 | into the frame marker on targets that use the HP unwind | |
3723 | library. This allows the HP unwind library to be used to | |
3724 | unwind GCC frames. However, we are not fully compatible | |
3725 | with the HP library because our frame layout differs from | |
3726 | that specified in the HP runtime specification. | |
3727 | ||
3728 | We don't want a frame note on this instruction as the frame | |
3729 | marker moves during dynamic stack allocation. | |
3730 | ||
3731 | This instruction also serves as a blockage to prevent | |
3732 | register spills from being scheduled before the stack | |
3733 | pointer is raised. This is necessary as we store | |
3734 | registers using the frame pointer as a base register, | |
3735 | and the frame pointer is set before sp is raised. */ | |
3736 | if (TARGET_HPUX_UNWIND_LIBRARY) | |
3737 | { | |
3738 | rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, | |
3739 | GEN_INT (TARGET_64BIT ? -8 : -4)); | |
3740 | ||
3741 | emit_move_insn (gen_rtx_MEM (word_mode, addr), | |
3742 | frame_pointer_rtx); | |
3743 | } | |
3744 | else | |
3745 | emit_insn (gen_blockage ()); | |
31d68947 AM |
3746 | } |
3747 | /* no frame pointer needed. */ | |
3748 | else | |
3749 | { | |
3750 | /* In some cases we can perform the first callee register save | |
3751 | and allocating the stack frame at the same time. If so, just | |
3752 | make a note of it and defer allocating the frame until saving | |
3753 | the callee registers. */ | |
1c7a8112 | 3754 | if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0) |
31d68947 AM |
3755 | merge_sp_adjust_with_store = 1; |
3756 | /* Can not optimize. Adjust the stack frame by actual_fsize | |
3757 | bytes. */ | |
3758 | else | |
f6bcf44c | 3759 | set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM, |
823fbbce | 3760 | actual_fsize, 1); |
31d68947 | 3761 | } |
a9d91d6f RS |
3762 | } |
3763 | ||
23f6f34f | 3764 | /* Normal register save. |
aadcdb45 JL |
3765 | |
3766 | Do not save the frame pointer in the frame_pointer_needed case. It | |
3767 | was done earlier. */ | |
188538df TG |
3768 | if (frame_pointer_needed) |
3769 | { | |
823fbbce JDA |
3770 | offset = local_fsize; |
3771 | ||
3772 | /* Saving the EH return data registers in the frame is the simplest | |
3773 | way to get the frame unwind information emitted. We put them | |
3774 | just before the general registers. */ | |
e3b5732b | 3775 | if (DO_FRAME_NOTES && crtl->calls_eh_return) |
823fbbce JDA |
3776 | { |
3777 | unsigned int i, regno; | |
3778 | ||
3779 | for (i = 0; ; ++i) | |
3780 | { | |
3781 | regno = EH_RETURN_DATA_REGNO (i); | |
3782 | if (regno == INVALID_REGNUM) | |
3783 | break; | |
3784 | ||
3785 | store_reg (regno, offset, FRAME_POINTER_REGNUM); | |
3786 | offset += UNITS_PER_WORD; | |
3787 | } | |
3788 | } | |
3789 | ||
3790 | for (i = 18; i >= 4; i--) | |
6fb5fa3c | 3791 | if (df_regs_ever_live_p (i) && ! call_used_regs[i]) |
188538df | 3792 | { |
f6bcf44c | 3793 | store_reg (i, offset, FRAME_POINTER_REGNUM); |
d7735a07 | 3794 | offset += UNITS_PER_WORD; |
68386e1e | 3795 | gr_saved++; |
188538df | 3796 | } |
e63ffc38 | 3797 | /* Account for %r3 which is saved in a special place. */ |
e8cfae5c | 3798 | gr_saved++; |
188538df | 3799 | } |
aadcdb45 | 3800 | /* No frame pointer needed. */ |
188538df TG |
3801 | else |
3802 | { | |
823fbbce JDA |
3803 | offset = local_fsize - actual_fsize; |
3804 | ||
3805 | /* Saving the EH return data registers in the frame is the simplest | |
3806 | way to get the frame unwind information emitted. */ | |
e3b5732b | 3807 | if (DO_FRAME_NOTES && crtl->calls_eh_return) |
823fbbce JDA |
3808 | { |
3809 | unsigned int i, regno; | |
3810 | ||
3811 | for (i = 0; ; ++i) | |
3812 | { | |
3813 | regno = EH_RETURN_DATA_REGNO (i); | |
3814 | if (regno == INVALID_REGNUM) | |
3815 | break; | |
3816 | ||
3817 | /* If merge_sp_adjust_with_store is nonzero, then we can | |
3818 | optimize the first save. */ | |
3819 | if (merge_sp_adjust_with_store) | |
3820 | { | |
3821 | store_reg_modify (STACK_POINTER_REGNUM, regno, -offset); | |
3822 | merge_sp_adjust_with_store = 0; | |
3823 | } | |
3824 | else | |
3825 | store_reg (regno, offset, STACK_POINTER_REGNUM); | |
3826 | offset += UNITS_PER_WORD; | |
3827 | } | |
3828 | } | |
3829 | ||
3830 | for (i = 18; i >= 3; i--) | |
6fb5fa3c | 3831 | if (df_regs_ever_live_p (i) && ! call_used_regs[i]) |
188538df | 3832 | { |
23f6f34f | 3833 | /* If merge_sp_adjust_with_store is nonzero, then we can |
4971c587 | 3834 | optimize the first GR save. */ |
f133af4c | 3835 | if (merge_sp_adjust_with_store) |
4971c587 | 3836 | { |
823fbbce | 3837 | store_reg_modify (STACK_POINTER_REGNUM, i, -offset); |
4971c587 | 3838 | merge_sp_adjust_with_store = 0; |
4971c587 JL |
3839 | } |
3840 | else | |
f6bcf44c | 3841 | store_reg (i, offset, STACK_POINTER_REGNUM); |
d7735a07 | 3842 | offset += UNITS_PER_WORD; |
68386e1e | 3843 | gr_saved++; |
188538df | 3844 | } |
aadcdb45 | 3845 | |
4971c587 | 3846 | /* If we wanted to merge the SP adjustment with a GR save, but we never |
aadcdb45 | 3847 | did any GR saves, then just emit the adjustment here. */ |
f133af4c | 3848 | if (merge_sp_adjust_with_store) |
f6bcf44c | 3849 | set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM, |
823fbbce | 3850 | actual_fsize, 1); |
188538df | 3851 | } |
23f6f34f | 3852 | |
1c7a8112 AM |
3853 | /* The hppa calling conventions say that %r19, the pic offset |
3854 | register, is saved at sp - 32 (in this function's frame) | |
3855 | when generating PIC code. FIXME: What is the correct thing | |
3856 | to do for functions which make no calls and allocate no | |
3857 | frame? Do we need to allocate a frame, or can we just omit | |
3ffa9dc1 JDA |
3858 | the save? For now we'll just omit the save. |
3859 | ||
3860 | We don't want a note on this insn as the frame marker can | |
3861 | move if there is a dynamic stack allocation. */ | |
1c7a8112 | 3862 | if (flag_pic && actual_fsize != 0 && !TARGET_64BIT) |
3ffa9dc1 JDA |
3863 | { |
3864 | rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32)); | |
3865 | ||
3866 | emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx); | |
3867 | ||
3868 | } | |
1c7a8112 | 3869 | |
188538df TG |
3870 | /* Align pointer properly (doubleword boundary). */ |
3871 | offset = (offset + 7) & ~7; | |
3872 | ||
3873 | /* Floating point register store. */ | |
3874 | if (save_fregs) | |
188538df | 3875 | { |
823fbbce JDA |
3876 | rtx base; |
3877 | ||
aadcdb45 JL |
3878 | /* First get the frame or stack pointer to the start of the FP register |
3879 | save area. */ | |
2b41935c | 3880 | if (frame_pointer_needed) |
823fbbce JDA |
3881 | { |
3882 | set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0); | |
3883 | base = frame_pointer_rtx; | |
3884 | } | |
2b41935c | 3885 | else |
823fbbce JDA |
3886 | { |
3887 | set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0); | |
3888 | base = stack_pointer_rtx; | |
3889 | } | |
aadcdb45 JL |
3890 | |
3891 | /* Now actually save the FP registers. */ | |
88624c0e | 3892 | for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP) |
e63ffc38 | 3893 | { |
6fb5fa3c DB |
3894 | if (df_regs_ever_live_p (i) |
3895 | || (! TARGET_64BIT && df_regs_ever_live_p (i + 1))) | |
e63ffc38 | 3896 | { |
f6bcf44c | 3897 | rtx addr, insn, reg; |
19ec6a36 AM |
3898 | addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg)); |
3899 | reg = gen_rtx_REG (DFmode, i); | |
f6bcf44c JDA |
3900 | insn = emit_move_insn (addr, reg); |
3901 | if (DO_FRAME_NOTES) | |
3902 | { | |
3903 | RTX_FRAME_RELATED_P (insn) = 1; | |
823fbbce JDA |
3904 | if (TARGET_64BIT) |
3905 | { | |
3906 | rtx mem = gen_rtx_MEM (DFmode, | |
3907 | plus_constant (base, offset)); | |
bbbbb16a ILT |
3908 | add_reg_note (insn, REG_FRAME_RELATED_EXPR, |
3909 | gen_rtx_SET (VOIDmode, mem, reg)); | |
823fbbce JDA |
3910 | } |
3911 | else | |
3912 | { | |
3913 | rtx meml = gen_rtx_MEM (SFmode, | |
3914 | plus_constant (base, offset)); | |
3915 | rtx memr = gen_rtx_MEM (SFmode, | |
3916 | plus_constant (base, offset + 4)); | |
3917 | rtx regl = gen_rtx_REG (SFmode, i); | |
3918 | rtx regr = gen_rtx_REG (SFmode, i + 1); | |
3919 | rtx setl = gen_rtx_SET (VOIDmode, meml, regl); | |
3920 | rtx setr = gen_rtx_SET (VOIDmode, memr, regr); | |
3921 | rtvec vec; | |
3922 | ||
3923 | RTX_FRAME_RELATED_P (setl) = 1; | |
3924 | RTX_FRAME_RELATED_P (setr) = 1; | |
3925 | vec = gen_rtvec (2, setl, setr); | |
bbbbb16a ILT |
3926 | add_reg_note (insn, REG_FRAME_RELATED_EXPR, |
3927 | gen_rtx_SEQUENCE (VOIDmode, vec)); | |
823fbbce | 3928 | } |
f6bcf44c JDA |
3929 | } |
3930 | offset += GET_MODE_SIZE (DFmode); | |
e63ffc38 JL |
3931 | fr_saved++; |
3932 | } | |
3933 | } | |
188538df TG |
3934 | } |
3935 | } | |
3936 | ||
19ec6a36 AM |
3937 | /* Emit RTL to load REG from the memory location specified by BASE+DISP. |
3938 | Handle case where DISP > 8k by using the add_high_const patterns. */ | |
3939 | ||
f6bcf44c | 3940 | static void |
a4295210 | 3941 | load_reg (int reg, HOST_WIDE_INT disp, int base) |
19ec6a36 | 3942 | { |
a4295210 JDA |
3943 | rtx dest = gen_rtx_REG (word_mode, reg); |
3944 | rtx basereg = gen_rtx_REG (Pmode, base); | |
3945 | rtx src; | |
19ec6a36 | 3946 | |
19ec6a36 | 3947 | if (VAL_14_BITS_P (disp)) |
a4295210 JDA |
3948 | src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp)); |
3949 | else if (TARGET_64BIT && !VAL_32_BITS_P (disp)) | |
19ec6a36 | 3950 | { |
a4295210 JDA |
3951 | rtx delta = GEN_INT (disp); |
3952 | rtx tmpreg = gen_rtx_REG (Pmode, 1); | |
3953 | ||
3954 | emit_move_insn (tmpreg, delta); | |
3955 | if (TARGET_DISABLE_INDEXING) | |
3956 | { | |
3957 | emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg)); | |
3958 | src = gen_rtx_MEM (word_mode, tmpreg); | |
3959 | } | |
3960 | else | |
3961 | src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg)); | |
19ec6a36 AM |
3962 | } |
3963 | else | |
3964 | { | |
3965 | rtx delta = GEN_INT (disp); | |
3966 | rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta)); | |
3967 | rtx tmpreg = gen_rtx_REG (Pmode, 1); | |
a4295210 | 3968 | |
19ec6a36 AM |
3969 | emit_move_insn (tmpreg, high); |
3970 | src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta)); | |
19ec6a36 | 3971 | } |
a4295210 JDA |
3972 | |
3973 | emit_move_insn (dest, src); | |
19ec6a36 | 3974 | } |
aadcdb45 | 3975 | |
5fad1c24 JDA |
3976 | /* Update the total code bytes output to the text section. */ |
3977 | ||
3978 | static void | |
67b846fa | 3979 | update_total_code_bytes (unsigned int nbytes) |
5fad1c24 JDA |
3980 | { |
3981 | if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM) | |
62910663 | 3982 | && !IN_NAMED_SECTION_P (cfun->decl)) |
5fad1c24 | 3983 | { |
67b846fa | 3984 | unsigned int old_total = total_code_bytes; |
5fad1c24 | 3985 | |
67b846fa | 3986 | total_code_bytes += nbytes; |
5fad1c24 | 3987 | |
67b846fa JDA |
3988 | /* Be prepared to handle overflows. */ |
3989 | if (old_total > total_code_bytes) | |
3990 | total_code_bytes = UINT_MAX; | |
5fad1c24 JDA |
3991 | } |
3992 | } | |
3993 | ||
08c148a8 NB |
3994 | /* This function generates the assembly code for function exit. |
3995 | Args are as for output_function_prologue (). | |
3996 | ||
3997 | The function epilogue should not depend on the current stack | |
3998 | pointer! It should use the frame pointer only. This is mandatory | |
3999 | because of alloca; we also take advantage of it to omit stack | |
fe19a83d | 4000 | adjustments before returning. */ |
08c148a8 NB |
4001 | |
4002 | static void | |
b7849684 | 4003 | pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED) |
188538df | 4004 | { |
08a2b118 RS |
4005 | rtx insn = get_last_insn (); |
4006 | ||
5fad1c24 JDA |
4007 | last_address = 0; |
4008 | ||
aadcdb45 JL |
4009 | /* hppa_expand_epilogue does the dirty work now. We just need |
4010 | to output the assembler directives which denote the end | |
08a2b118 RS |
4011 | of a function. |
4012 | ||
4013 | To make debuggers happy, emit a nop if the epilogue was completely | |
4014 | eliminated due to a volatile call as the last insn in the | |
23f6f34f | 4015 | current function. That way the return address (in %r2) will |
08a2b118 RS |
4016 | always point to a valid instruction in the current function. */ |
4017 | ||
4018 | /* Get the last real insn. */ | |
4019 | if (GET_CODE (insn) == NOTE) | |
4020 | insn = prev_real_insn (insn); | |
4021 | ||
4022 | /* If it is a sequence, then look inside. */ | |
4023 | if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) | |
4024 | insn = XVECEXP (PATTERN (insn), 0, 0); | |
4025 | ||
23f6f34f | 4026 | /* If insn is a CALL_INSN, then it must be a call to a volatile |
08a2b118 RS |
4027 | function (otherwise there would be epilogue insns). */ |
4028 | if (insn && GET_CODE (insn) == CALL_INSN) | |
17e6098e JDA |
4029 | { |
4030 | fputs ("\tnop\n", file); | |
4031 | last_address += 4; | |
4032 | } | |
23f6f34f | 4033 | |
e236a9ff | 4034 | fputs ("\t.EXIT\n\t.PROCEND\n", file); |
17e6098e | 4035 | |
9a55eab3 JDA |
4036 | if (TARGET_SOM && TARGET_GAS) |
4037 | { | |
4038 | /* We done with this subspace except possibly for some additional | |
4039 | debug information. Forget that we are in this subspace to ensure | |
4040 | that the next function is output in its own subspace. */ | |
d6b5193b | 4041 | in_section = NULL; |
1a83bfc3 | 4042 | cfun->machine->in_nsubspa = 2; |
9a55eab3 JDA |
4043 | } |
4044 | ||
5fad1c24 | 4045 | if (INSN_ADDRESSES_SET_P ()) |
17e6098e | 4046 | { |
5fad1c24 JDA |
4047 | insn = get_last_nonnote_insn (); |
4048 | last_address += INSN_ADDRESSES (INSN_UID (insn)); | |
4049 | if (INSN_P (insn)) | |
4050 | last_address += insn_default_length (insn); | |
4051 | last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1) | |
4052 | & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)); | |
17e6098e | 4053 | } |
67b846fa JDA |
4054 | else |
4055 | last_address = UINT_MAX; | |
5fad1c24 JDA |
4056 | |
4057 | /* Finally, update the total number of code bytes output so far. */ | |
4058 | update_total_code_bytes (last_address); | |
aadcdb45 | 4059 | } |
4971c587 | 4060 | |
aadcdb45 | 4061 | void |
b7849684 | 4062 | hppa_expand_epilogue (void) |
aadcdb45 | 4063 | { |
23f6f34f | 4064 | rtx tmpreg; |
a4295210 JDA |
4065 | HOST_WIDE_INT offset; |
4066 | HOST_WIDE_INT ret_off = 0; | |
4067 | int i; | |
31d68947 | 4068 | int merge_sp_adjust_with_load = 0; |
aadcdb45 JL |
4069 | |
4070 | /* We will use this often. */ | |
690d4228 | 4071 | tmpreg = gen_rtx_REG (word_mode, 1); |
aadcdb45 JL |
4072 | |
4073 | /* Try to restore RP early to avoid load/use interlocks when | |
4074 | RP gets used in the return (bv) instruction. This appears to still | |
fe19a83d | 4075 | be necessary even when we schedule the prologue and epilogue. */ |
16c16a24 | 4076 | if (rp_saved) |
31d68947 AM |
4077 | { |
4078 | ret_off = TARGET_64BIT ? -16 : -20; | |
4079 | if (frame_pointer_needed) | |
4080 | { | |
f6bcf44c | 4081 | load_reg (2, ret_off, FRAME_POINTER_REGNUM); |
31d68947 AM |
4082 | ret_off = 0; |
4083 | } | |
4084 | else | |
4085 | { | |
4086 | /* No frame pointer, and stack is smaller than 8k. */ | |
4087 | if (VAL_14_BITS_P (ret_off - actual_fsize)) | |
4088 | { | |
f6bcf44c | 4089 | load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM); |
31d68947 AM |
4090 | ret_off = 0; |
4091 | } | |
4092 | } | |
4093 | } | |
aadcdb45 JL |
4094 | |
4095 | /* General register restores. */ | |
188538df TG |
4096 | if (frame_pointer_needed) |
4097 | { | |
823fbbce JDA |
4098 | offset = local_fsize; |
4099 | ||
4100 | /* If the current function calls __builtin_eh_return, then we need | |
4101 | to restore the saved EH data registers. */ | |
e3b5732b | 4102 | if (DO_FRAME_NOTES && crtl->calls_eh_return) |
823fbbce JDA |
4103 | { |
4104 | unsigned int i, regno; | |
4105 | ||
4106 | for (i = 0; ; ++i) | |
4107 | { | |
4108 | regno = EH_RETURN_DATA_REGNO (i); | |
4109 | if (regno == INVALID_REGNUM) | |
4110 | break; | |
4111 | ||
4112 | load_reg (regno, offset, FRAME_POINTER_REGNUM); | |
4113 | offset += UNITS_PER_WORD; | |
4114 | } | |
4115 | } | |
4116 | ||
4117 | for (i = 18; i >= 4; i--) | |
6fb5fa3c | 4118 | if (df_regs_ever_live_p (i) && ! call_used_regs[i]) |
188538df | 4119 | { |
f6bcf44c | 4120 | load_reg (i, offset, FRAME_POINTER_REGNUM); |
d7735a07 | 4121 | offset += UNITS_PER_WORD; |
188538df | 4122 | } |
188538df TG |
4123 | } |
4124 | else | |
4125 | { | |
823fbbce JDA |
4126 | offset = local_fsize - actual_fsize; |
4127 | ||
4128 | /* If the current function calls __builtin_eh_return, then we need | |
4129 | to restore the saved EH data registers. */ | |
e3b5732b | 4130 | if (DO_FRAME_NOTES && crtl->calls_eh_return) |
823fbbce JDA |
4131 | { |
4132 | unsigned int i, regno; | |
4133 | ||
4134 | for (i = 0; ; ++i) | |
4135 | { | |
4136 | regno = EH_RETURN_DATA_REGNO (i); | |
4137 | if (regno == INVALID_REGNUM) | |
4138 | break; | |
4139 | ||
4140 | /* Only for the first load. | |
4141 | merge_sp_adjust_with_load holds the register load | |
4142 | with which we will merge the sp adjustment. */ | |
4143 | if (merge_sp_adjust_with_load == 0 | |
4144 | && local_fsize == 0 | |
4145 | && VAL_14_BITS_P (-actual_fsize)) | |
4146 | merge_sp_adjust_with_load = regno; | |
4147 | else | |
4148 | load_reg (regno, offset, STACK_POINTER_REGNUM); | |
4149 | offset += UNITS_PER_WORD; | |
4150 | } | |
4151 | } | |
4152 | ||
4153 | for (i = 18; i >= 3; i--) | |
e63ffc38 | 4154 | { |
6fb5fa3c | 4155 | if (df_regs_ever_live_p (i) && ! call_used_regs[i]) |
e63ffc38 | 4156 | { |
e63ffc38 JL |
4157 | /* Only for the first load. |
4158 | merge_sp_adjust_with_load holds the register load | |
4159 | with which we will merge the sp adjustment. */ | |
31d68947 | 4160 | if (merge_sp_adjust_with_load == 0 |
e63ffc38 | 4161 | && local_fsize == 0 |
31d68947 | 4162 | && VAL_14_BITS_P (-actual_fsize)) |
e63ffc38 JL |
4163 | merge_sp_adjust_with_load = i; |
4164 | else | |
f6bcf44c | 4165 | load_reg (i, offset, STACK_POINTER_REGNUM); |
d7735a07 | 4166 | offset += UNITS_PER_WORD; |
e63ffc38 JL |
4167 | } |
4168 | } | |
188538df | 4169 | } |
aadcdb45 | 4170 | |
188538df TG |
4171 | /* Align pointer properly (doubleword boundary). */ |
4172 | offset = (offset + 7) & ~7; | |
4173 | ||
aadcdb45 | 4174 | /* FP register restores. */ |
188538df | 4175 | if (save_fregs) |
188538df | 4176 | { |
aadcdb45 | 4177 | /* Adjust the register to index off of. */ |
2b41935c | 4178 | if (frame_pointer_needed) |
823fbbce | 4179 | set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0); |
2b41935c | 4180 | else |
823fbbce | 4181 | set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0); |
aadcdb45 JL |
4182 | |
4183 | /* Actually do the restores now. */ | |
88624c0e | 4184 | for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP) |
6fb5fa3c DB |
4185 | if (df_regs_ever_live_p (i) |
4186 | || (! TARGET_64BIT && df_regs_ever_live_p (i + 1))) | |
19ec6a36 AM |
4187 | { |
4188 | rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg)); | |
4189 | rtx dest = gen_rtx_REG (DFmode, i); | |
f6bcf44c | 4190 | emit_move_insn (dest, src); |
19ec6a36 | 4191 | } |
188538df | 4192 | } |
aadcdb45 | 4193 | |
1144563f JL |
4194 | /* Emit a blockage insn here to keep these insns from being moved to |
4195 | an earlier spot in the epilogue, or into the main instruction stream. | |
4196 | ||
4197 | This is necessary as we must not cut the stack back before all the | |
4198 | restores are finished. */ | |
4199 | emit_insn (gen_blockage ()); | |
aadcdb45 | 4200 | |
6619e96c | 4201 | /* Reset stack pointer (and possibly frame pointer). The stack |
68944452 | 4202 | pointer is initially set to fp + 64 to avoid a race condition. */ |
31d68947 | 4203 | if (frame_pointer_needed) |
188538df | 4204 | { |
19ec6a36 | 4205 | rtx delta = GEN_INT (-64); |
823fbbce JDA |
4206 | |
4207 | set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0); | |
4208 | emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta)); | |
188538df | 4209 | } |
aadcdb45 | 4210 | /* If we were deferring a callee register restore, do it now. */ |
31d68947 AM |
4211 | else if (merge_sp_adjust_with_load) |
4212 | { | |
4213 | rtx delta = GEN_INT (-actual_fsize); | |
19ec6a36 | 4214 | rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load); |
823fbbce JDA |
4215 | |
4216 | emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta)); | |
31d68947 | 4217 | } |
aadcdb45 | 4218 | else if (actual_fsize != 0) |
823fbbce JDA |
4219 | set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM, |
4220 | - actual_fsize, 0); | |
31d68947 AM |
4221 | |
4222 | /* If we haven't restored %r2 yet (no frame pointer, and a stack | |
4223 | frame greater than 8k), do so now. */ | |
4224 | if (ret_off != 0) | |
f6bcf44c | 4225 | load_reg (2, ret_off, STACK_POINTER_REGNUM); |
823fbbce | 4226 | |
e3b5732b | 4227 | if (DO_FRAME_NOTES && crtl->calls_eh_return) |
823fbbce JDA |
4228 | { |
4229 | rtx sa = EH_RETURN_STACKADJ_RTX; | |
4230 | ||
4231 | emit_insn (gen_blockage ()); | |
4232 | emit_insn (TARGET_64BIT | |
4233 | ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa) | |
4234 | : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa)); | |
4235 | } | |
188538df TG |
4236 | } |
4237 | ||
d777856d | 4238 | rtx |
b7849684 | 4239 | hppa_pic_save_rtx (void) |
824e7605 | 4240 | { |
d777856d | 4241 | return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM); |
1c7a8112 AM |
4242 | } |
4243 | ||
3674b34d JDA |
4244 | #ifndef NO_DEFERRED_PROFILE_COUNTERS |
4245 | #define NO_DEFERRED_PROFILE_COUNTERS 0 | |
4246 | #endif | |
4247 | ||
3674b34d JDA |
4248 | |
4249 | /* Vector of funcdef numbers. */ | |
4250 | static VEC(int,heap) *funcdef_nos; | |
4251 | ||
4252 | /* Output deferred profile counters. */ | |
4253 | static void | |
4254 | output_deferred_profile_counters (void) | |
4255 | { | |
4256 | unsigned int i; | |
4257 | int align, n; | |
4258 | ||
4259 | if (VEC_empty (int, funcdef_nos)) | |
4260 | return; | |
4261 | ||
d6b5193b | 4262 | switch_to_section (data_section); |
3674b34d JDA |
4263 | align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE); |
4264 | ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT)); | |
4265 | ||
4266 | for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++) | |
4267 | { | |
4268 | targetm.asm_out.internal_label (asm_out_file, "LP", n); | |
4269 | assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1); | |
4270 | } | |
4271 | ||
4272 | VEC_free (int, heap, funcdef_nos); | |
4273 | } | |
4274 | ||
1c7a8112 | 4275 | void |
b7849684 | 4276 | hppa_profile_hook (int label_no) |
1c7a8112 | 4277 | { |
a3d4c92f RC |
4278 | /* We use SImode for the address of the function in both 32 and |
4279 | 64-bit code to avoid having to provide DImode versions of the | |
4280 | lcla2 and load_offset_label_address insn patterns. */ | |
4281 | rtx reg = gen_reg_rtx (SImode); | |
4282 | rtx label_rtx = gen_label_rtx (); | |
8f949e7e JDA |
4283 | rtx begin_label_rtx, call_insn; |
4284 | char begin_label_name[16]; | |
1c7a8112 | 4285 | |
8f949e7e | 4286 | ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL, |
f6f315fe | 4287 | label_no); |
a3d4c92f | 4288 | begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name)); |
1c7a8112 AM |
4289 | |
4290 | if (TARGET_64BIT) | |
4291 | emit_move_insn (arg_pointer_rtx, | |
4292 | gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx, | |
4293 | GEN_INT (64))); | |
4294 | ||
1c7a8112 AM |
4295 | emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2)); |
4296 | ||
110abdbc | 4297 | /* The address of the function is loaded into %r25 with an instruction- |
a3d4c92f RC |
4298 | relative sequence that avoids the use of relocations. The sequence |
4299 | is split so that the load_offset_label_address instruction can | |
4300 | occupy the delay slot of the call to _mcount. */ | |
4301 | if (TARGET_PA_20) | |
4302 | emit_insn (gen_lcla2 (reg, label_rtx)); | |
4303 | else | |
4304 | emit_insn (gen_lcla1 (reg, label_rtx)); | |
4305 | ||
4306 | emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25), | |
4307 | reg, begin_label_rtx, label_rtx)); | |
4308 | ||
3674b34d | 4309 | #if !NO_DEFERRED_PROFILE_COUNTERS |
1c7a8112 AM |
4310 | { |
4311 | rtx count_label_rtx, addr, r24; | |
8f949e7e | 4312 | char count_label_name[16]; |
1c7a8112 | 4313 | |
3674b34d | 4314 | VEC_safe_push (int, heap, funcdef_nos, label_no); |
8f949e7e JDA |
4315 | ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no); |
4316 | count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name)); | |
1c7a8112 | 4317 | |
bdad4be5 | 4318 | addr = force_reg (Pmode, count_label_rtx); |
1c7a8112 AM |
4319 | r24 = gen_rtx_REG (Pmode, 24); |
4320 | emit_move_insn (r24, addr); | |
4321 | ||
1c7a8112 | 4322 | call_insn = |
a3d4c92f RC |
4323 | emit_call_insn (gen_call (gen_rtx_MEM (Pmode, |
4324 | gen_rtx_SYMBOL_REF (Pmode, | |
4325 | "_mcount")), | |
4326 | GEN_INT (TARGET_64BIT ? 24 : 12))); | |
1c7a8112 AM |
4327 | |
4328 | use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24); | |
4329 | } | |
4330 | #else | |
a3d4c92f | 4331 | |
1c7a8112 | 4332 | call_insn = |
a3d4c92f RC |
4333 | emit_call_insn (gen_call (gen_rtx_MEM (Pmode, |
4334 | gen_rtx_SYMBOL_REF (Pmode, | |
4335 | "_mcount")), | |
4336 | GEN_INT (TARGET_64BIT ? 16 : 8))); | |
4337 | ||
1c7a8112 AM |
4338 | #endif |
4339 | ||
a3d4c92f RC |
4340 | use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25)); |
4341 | use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26)); | |
4342 | ||
1c7a8112 AM |
4343 | /* Indicate the _mcount call cannot throw, nor will it execute a |
4344 | non-local goto. */ | |
bbbbb16a | 4345 | add_reg_note (call_insn, REG_EH_REGION, constm1_rtx); |
824e7605 AM |
4346 | } |
4347 | ||
e99d6592 MS |
4348 | /* Fetch the return address for the frame COUNT steps up from |
4349 | the current frame, after the prologue. FRAMEADDR is the | |
4350 | frame pointer of the COUNT frame. | |
4351 | ||
cf3735b8 JDA |
4352 | We want to ignore any export stub remnants here. To handle this, |
4353 | we examine the code at the return address, and if it is an export | |
4354 | stub, we return a memory rtx for the stub return address stored | |
4355 | at frame-24. | |
c28eb6c2 JL |
4356 | |
4357 | The value returned is used in two different ways: | |
4358 | ||
4359 | 1. To find a function's caller. | |
4360 | ||
4361 | 2. To change the return address for a function. | |
4362 | ||
4363 | This function handles most instances of case 1; however, it will | |
4364 | fail if there are two levels of stubs to execute on the return | |
4365 | path. The only way I believe that can happen is if the return value | |
4366 | needs a parameter relocation, which never happens for C code. | |
4367 | ||
4368 | This function handles most instances of case 2; however, it will | |
4369 | fail if we did not originally have stub code on the return path | |
cf3735b8 | 4370 | but will need stub code on the new return path. This can happen if |
c28eb6c2 | 4371 | the caller & callee are both in the main program, but the new |
cf3735b8 | 4372 | return location is in a shared library. */ |
e99d6592 MS |
4373 | |
4374 | rtx | |
b7849684 | 4375 | return_addr_rtx (int count, rtx frameaddr) |
e99d6592 MS |
4376 | { |
4377 | rtx label; | |
cf3735b8 | 4378 | rtx rp; |
e99d6592 MS |
4379 | rtx saved_rp; |
4380 | rtx ins; | |
4381 | ||
cf3735b8 JDA |
4382 | if (count != 0) |
4383 | return NULL_RTX; | |
a7721dc0 | 4384 | |
cf3735b8 | 4385 | rp = get_hard_reg_initial_val (Pmode, 2); |
e99d6592 | 4386 | |
cf3735b8 JDA |
4387 | if (TARGET_64BIT || TARGET_NO_SPACE_REGS) |
4388 | return rp; | |
e99d6592 | 4389 | |
a7721dc0 | 4390 | saved_rp = gen_reg_rtx (Pmode); |
cf3735b8 | 4391 | emit_move_insn (saved_rp, rp); |
e99d6592 MS |
4392 | |
4393 | /* Get pointer to the instruction stream. We have to mask out the | |
4394 | privilege level from the two low order bits of the return address | |
4395 | pointer here so that ins will point to the start of the first | |
4396 | instruction that would have been executed if we returned. */ | |
cf3735b8 | 4397 | ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR)); |
e99d6592 MS |
4398 | label = gen_label_rtx (); |
4399 | ||
4400 | /* Check the instruction stream at the normal return address for the | |
4401 | export stub: | |
4402 | ||
4403 | 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp | |
4404 | 0x004010a1 | stub+12: ldsid (sr0,rp),r1 | |
4405 | 0x00011820 | stub+16: mtsp r1,sr0 | |
4406 | 0xe0400002 | stub+20: be,n 0(sr0,rp) | |
4407 | ||
4408 | If it is an export stub, than our return address is really in | |
4409 | -24[frameaddr]. */ | |
4410 | ||
847898f6 RK |
4411 | emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE, |
4412 | NULL_RTX, SImode, 1); | |
e99d6592 MS |
4413 | emit_jump_insn (gen_bne (label)); |
4414 | ||
ad2c71b7 | 4415 | emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)), |
847898f6 | 4416 | GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1); |
e99d6592 MS |
4417 | emit_jump_insn (gen_bne (label)); |
4418 | ||
ad2c71b7 | 4419 | emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)), |
847898f6 | 4420 | GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1); |
e99d6592 MS |
4421 | emit_jump_insn (gen_bne (label)); |
4422 | ||
af1e323e JDA |
4423 | /* 0xe0400002 must be specified as -532676606 so that it won't be |
4424 | rejected as an invalid immediate operand on 64-bit hosts. */ | |
ad2c71b7 | 4425 | emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)), |
af1e323e | 4426 | GEN_INT (-532676606), NE, NULL_RTX, SImode, 1); |
e99d6592 | 4427 | |
cf3735b8 JDA |
4428 | /* If there is no export stub then just use the value saved from |
4429 | the return pointer register. */ | |
e99d6592 MS |
4430 | |
4431 | emit_jump_insn (gen_bne (label)); | |
4432 | ||
cf3735b8 | 4433 | /* Here we know that our return address points to an export |
e99d6592 | 4434 | stub. We don't want to return the address of the export stub, |
cf3735b8 JDA |
4435 | but rather the return address of the export stub. That return |
4436 | address is stored at -24[frameaddr]. */ | |
e99d6592 | 4437 | |
cf3735b8 JDA |
4438 | emit_move_insn (saved_rp, |
4439 | gen_rtx_MEM (Pmode, | |
4440 | memory_address (Pmode, | |
4441 | plus_constant (frameaddr, | |
4442 | -24)))); | |
e99d6592 MS |
4443 | |
4444 | emit_label (label); | |
cf3735b8 | 4445 | return saved_rp; |
e99d6592 MS |
4446 | } |
4447 | ||
188538df | 4448 | void |
b7849684 | 4449 | emit_bcond_fp (enum rtx_code code, rtx operand0) |
188538df | 4450 | { |
ad2c71b7 JL |
4451 | emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, |
4452 | gen_rtx_IF_THEN_ELSE (VOIDmode, | |
4453 | gen_rtx_fmt_ee (code, | |
4454 | VOIDmode, | |
4455 | gen_rtx_REG (CCFPmode, 0), | |
4456 | const0_rtx), | |
4457 | gen_rtx_LABEL_REF (VOIDmode, operand0), | |
4458 | pc_rtx))); | |
188538df TG |
4459 | |
4460 | } | |
4461 | ||
4462 | rtx | |
b7849684 | 4463 | gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1) |
188538df | 4464 | { |
ad2c71b7 JL |
4465 | return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0), |
4466 | gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)); | |
188538df TG |
4467 | } |
4468 | ||
780f491f TG |
4469 | /* Adjust the cost of a scheduling dependency. Return the new cost of |
4470 | a dependency LINK or INSN on DEP_INSN. COST is the current cost. */ | |
4471 | ||
c237e94a | 4472 | static int |
b7849684 | 4473 | pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost) |
780f491f | 4474 | { |
b09fa787 JL |
4475 | enum attr_type attr_type; |
4476 | ||
5d50fab3 JL |
4477 | /* Don't adjust costs for a pa8000 chip, also do not adjust any |
4478 | true dependencies as they are described with bypasses now. */ | |
4479 | if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0) | |
86001391 JQ |
4480 | return cost; |
4481 | ||
e150ae4f TG |
4482 | if (! recog_memoized (insn)) |
4483 | return 0; | |
780f491f | 4484 | |
b09fa787 JL |
4485 | attr_type = get_attr_type (insn); |
4486 | ||
144d51f9 | 4487 | switch (REG_NOTE_KIND (link)) |
780f491f | 4488 | { |
144d51f9 | 4489 | case REG_DEP_ANTI: |
780f491f TG |
4490 | /* Anti dependency; DEP_INSN reads a register that INSN writes some |
4491 | cycles later. */ | |
4492 | ||
b09fa787 | 4493 | if (attr_type == TYPE_FPLOAD) |
780f491f | 4494 | { |
e150ae4f TG |
4495 | rtx pat = PATTERN (insn); |
4496 | rtx dep_pat = PATTERN (dep_insn); | |
4497 | if (GET_CODE (pat) == PARALLEL) | |
4498 | { | |
4499 | /* This happens for the fldXs,mb patterns. */ | |
4500 | pat = XVECEXP (pat, 0, 0); | |
4501 | } | |
4502 | if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET) | |
780f491f | 4503 | /* If this happens, we have to extend this to schedule |
e150ae4f TG |
4504 | optimally. Return 0 for now. */ |
4505 | return 0; | |
780f491f | 4506 | |
e150ae4f | 4507 | if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat))) |
780f491f | 4508 | { |
e150ae4f TG |
4509 | if (! recog_memoized (dep_insn)) |
4510 | return 0; | |
780f491f TG |
4511 | switch (get_attr_type (dep_insn)) |
4512 | { | |
4513 | case TYPE_FPALU: | |
c47decad JL |
4514 | case TYPE_FPMULSGL: |
4515 | case TYPE_FPMULDBL: | |
780f491f TG |
4516 | case TYPE_FPDIVSGL: |
4517 | case TYPE_FPDIVDBL: | |
4518 | case TYPE_FPSQRTSGL: | |
4519 | case TYPE_FPSQRTDBL: | |
e150ae4f | 4520 | /* A fpload can't be issued until one cycle before a |
ddd5a7c1 | 4521 | preceding arithmetic operation has finished if |
e150ae4f TG |
4522 | the target of the fpload is any of the sources |
4523 | (or destination) of the arithmetic operation. */ | |
5d50fab3 | 4524 | return insn_default_latency (dep_insn) - 1; |
c47decad JL |
4525 | |
4526 | default: | |
4527 | return 0; | |
4528 | } | |
4529 | } | |
4530 | } | |
b09fa787 | 4531 | else if (attr_type == TYPE_FPALU) |
c47decad JL |
4532 | { |
4533 | rtx pat = PATTERN (insn); | |
4534 | rtx dep_pat = PATTERN (dep_insn); | |
4535 | if (GET_CODE (pat) == PARALLEL) | |
4536 | { | |
4537 | /* This happens for the fldXs,mb patterns. */ | |
4538 | pat = XVECEXP (pat, 0, 0); | |
4539 | } | |
4540 | if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET) | |
4541 | /* If this happens, we have to extend this to schedule | |
4542 | optimally. Return 0 for now. */ | |
4543 | return 0; | |
4544 | ||
4545 | if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat))) | |
4546 | { | |
4547 | if (! recog_memoized (dep_insn)) | |
4548 | return 0; | |
4549 | switch (get_attr_type (dep_insn)) | |
4550 | { | |
4551 | case TYPE_FPDIVSGL: | |
4552 | case TYPE_FPDIVDBL: | |
4553 | case TYPE_FPSQRTSGL: | |
4554 | case TYPE_FPSQRTDBL: | |
4555 | /* An ALU flop can't be issued until two cycles before a | |
ddd5a7c1 | 4556 | preceding divide or sqrt operation has finished if |
c47decad JL |
4557 | the target of the ALU flop is any of the sources |
4558 | (or destination) of the divide or sqrt operation. */ | |
5d50fab3 | 4559 | return insn_default_latency (dep_insn) - 2; |
780f491f TG |
4560 | |
4561 | default: | |
4562 | return 0; | |
4563 | } | |
4564 | } | |
4565 | } | |
4566 | ||
4567 | /* For other anti dependencies, the cost is 0. */ | |
4568 | return 0; | |
144d51f9 NS |
4569 | |
4570 | case REG_DEP_OUTPUT: | |
c47decad JL |
4571 | /* Output dependency; DEP_INSN writes a register that INSN writes some |
4572 | cycles later. */ | |
b09fa787 | 4573 | if (attr_type == TYPE_FPLOAD) |
c47decad JL |
4574 | { |
4575 | rtx pat = PATTERN (insn); | |
4576 | rtx dep_pat = PATTERN (dep_insn); | |
4577 | if (GET_CODE (pat) == PARALLEL) | |
4578 | { | |
4579 | /* This happens for the fldXs,mb patterns. */ | |
4580 | pat = XVECEXP (pat, 0, 0); | |
4581 | } | |
4582 | if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET) | |
4583 | /* If this happens, we have to extend this to schedule | |
4584 | optimally. Return 0 for now. */ | |
4585 | return 0; | |
4586 | ||
4587 | if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat))) | |
4588 | { | |
4589 | if (! recog_memoized (dep_insn)) | |
4590 | return 0; | |
4591 | switch (get_attr_type (dep_insn)) | |
4592 | { | |
4593 | case TYPE_FPALU: | |
4594 | case TYPE_FPMULSGL: | |
4595 | case TYPE_FPMULDBL: | |
4596 | case TYPE_FPDIVSGL: | |
4597 | case TYPE_FPDIVDBL: | |
4598 | case TYPE_FPSQRTSGL: | |
4599 | case TYPE_FPSQRTDBL: | |
4600 | /* A fpload can't be issued until one cycle before a | |
ddd5a7c1 | 4601 | preceding arithmetic operation has finished if |
c47decad | 4602 | the target of the fpload is the destination of the |
fae15c93 VM |
4603 | arithmetic operation. |
4604 | ||
4605 | Exception: For PA7100LC, PA7200 and PA7300, the cost | |
4606 | is 3 cycles, unless they bundle together. We also | |
4607 | pay the penalty if the second insn is a fpload. */ | |
5d50fab3 | 4608 | return insn_default_latency (dep_insn) - 1; |
780f491f | 4609 | |
c47decad JL |
4610 | default: |
4611 | return 0; | |
4612 | } | |
4613 | } | |
4614 | } | |
b09fa787 | 4615 | else if (attr_type == TYPE_FPALU) |
c47decad JL |
4616 | { |
4617 | rtx pat = PATTERN (insn); | |
4618 | rtx dep_pat = PATTERN (dep_insn); | |
4619 | if (GET_CODE (pat) == PARALLEL) | |
4620 | { | |
4621 | /* This happens for the fldXs,mb patterns. */ | |
4622 | pat = XVECEXP (pat, 0, 0); | |
4623 | } | |
4624 | if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET) | |
4625 | /* If this happens, we have to extend this to schedule | |
4626 | optimally. Return 0 for now. */ | |
4627 | return 0; | |
4628 | ||
4629 | if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat))) | |
4630 | { | |
4631 | if (! recog_memoized (dep_insn)) | |
4632 | return 0; | |
4633 | switch (get_attr_type (dep_insn)) | |
4634 | { | |
4635 | case TYPE_FPDIVSGL: | |
4636 | case TYPE_FPDIVDBL: | |
4637 | case TYPE_FPSQRTSGL: | |
4638 | case TYPE_FPSQRTDBL: | |
4639 | /* An ALU flop can't be issued until two cycles before a | |
ddd5a7c1 | 4640 | preceding divide or sqrt operation has finished if |
c47decad | 4641 | the target of the ALU flop is also the target of |
38e01259 | 4642 | the divide or sqrt operation. */ |
5d50fab3 | 4643 | return insn_default_latency (dep_insn) - 2; |
c47decad JL |
4644 | |
4645 | default: | |
4646 | return 0; | |
4647 | } | |
4648 | } | |
4649 | } | |
4650 | ||
4651 | /* For other output dependencies, the cost is 0. */ | |
4652 | return 0; | |
144d51f9 NS |
4653 | |
4654 | default: | |
4655 | gcc_unreachable (); | |
c47decad | 4656 | } |
780f491f | 4657 | } |
188538df | 4658 | |
c237e94a ZW |
4659 | /* Adjust scheduling priorities. We use this to try and keep addil |
4660 | and the next use of %r1 close together. */ | |
4661 | static int | |
b7849684 | 4662 | pa_adjust_priority (rtx insn, int priority) |
c237e94a ZW |
4663 | { |
4664 | rtx set = single_set (insn); | |
4665 | rtx src, dest; | |
4666 | if (set) | |
4667 | { | |
4668 | src = SET_SRC (set); | |
4669 | dest = SET_DEST (set); | |
4670 | if (GET_CODE (src) == LO_SUM | |
4671 | && symbolic_operand (XEXP (src, 1), VOIDmode) | |
4672 | && ! read_only_operand (XEXP (src, 1), VOIDmode)) | |
4673 | priority >>= 3; | |
4674 | ||
4675 | else if (GET_CODE (src) == MEM | |
4676 | && GET_CODE (XEXP (src, 0)) == LO_SUM | |
4677 | && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode) | |
4678 | && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode)) | |
4679 | priority >>= 1; | |
4680 | ||
4681 | else if (GET_CODE (dest) == MEM | |
4682 | && GET_CODE (XEXP (dest, 0)) == LO_SUM | |
4683 | && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode) | |
4684 | && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)) | |
4685 | priority >>= 3; | |
4686 | } | |
4687 | return priority; | |
4688 | } | |
4689 | ||
4690 | /* The 700 can only issue a single insn at a time. | |
4691 | The 7XXX processors can issue two insns at a time. | |
4692 | The 8000 can issue 4 insns at a time. */ | |
4693 | static int | |
b7849684 | 4694 | pa_issue_rate (void) |
c237e94a ZW |
4695 | { |
4696 | switch (pa_cpu) | |
4697 | { | |
4698 | case PROCESSOR_700: return 1; | |
4699 | case PROCESSOR_7100: return 2; | |
4700 | case PROCESSOR_7100LC: return 2; | |
4701 | case PROCESSOR_7200: return 2; | |
fae15c93 | 4702 | case PROCESSOR_7300: return 2; |
c237e94a ZW |
4703 | case PROCESSOR_8000: return 4; |
4704 | ||
4705 | default: | |
144d51f9 | 4706 | gcc_unreachable (); |
c237e94a ZW |
4707 | } |
4708 | } | |
4709 | ||
4710 | ||
4711 | ||
3673e996 | 4712 | /* Return any length adjustment needed by INSN which already has its length |
23f6f34f | 4713 | computed as LENGTH. Return zero if no adjustment is necessary. |
3673e996 | 4714 | |
b9821af8 | 4715 | For the PA: function calls, millicode calls, and backwards short |
23f6f34f | 4716 | conditional branches with unfilled delay slots need an adjustment by +1 |
b9821af8 | 4717 | (to account for the NOP which will be inserted into the instruction stream). |
3673e996 RS |
4718 | |
4719 | Also compute the length of an inline block move here as it is too | |
b9821af8 | 4720 | complicated to express as a length attribute in pa.md. */ |
3673e996 | 4721 | int |
b7849684 | 4722 | pa_adjust_insn_length (rtx insn, int length) |
3673e996 RS |
4723 | { |
4724 | rtx pat = PATTERN (insn); | |
4725 | ||
32562302 JDA |
4726 | /* Jumps inside switch tables which have unfilled delay slots need |
4727 | adjustment. */ | |
4728 | if (GET_CODE (insn) == JUMP_INSN | |
cb4d476c JDA |
4729 | && GET_CODE (pat) == PARALLEL |
4730 | && get_attr_type (insn) == TYPE_BTABLE_BRANCH) | |
746a9efa | 4731 | return 4; |
3673e996 RS |
4732 | /* Millicode insn with an unfilled delay slot. */ |
4733 | else if (GET_CODE (insn) == INSN | |
4734 | && GET_CODE (pat) != SEQUENCE | |
4735 | && GET_CODE (pat) != USE | |
4736 | && GET_CODE (pat) != CLOBBER | |
4737 | && get_attr_type (insn) == TYPE_MILLI) | |
a1b36964 | 4738 | return 4; |
3673e996 RS |
4739 | /* Block move pattern. */ |
4740 | else if (GET_CODE (insn) == INSN | |
4741 | && GET_CODE (pat) == PARALLEL | |
4096479e | 4742 | && GET_CODE (XVECEXP (pat, 0, 0)) == SET |
3673e996 RS |
4743 | && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM |
4744 | && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM | |
4745 | && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode | |
4746 | && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode) | |
70128ad9 | 4747 | return compute_movmem_length (insn) - 4; |
cdc9103c JDA |
4748 | /* Block clear pattern. */ |
4749 | else if (GET_CODE (insn) == INSN | |
4750 | && GET_CODE (pat) == PARALLEL | |
4751 | && GET_CODE (XVECEXP (pat, 0, 0)) == SET | |
4752 | && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM | |
4753 | && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx | |
4754 | && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode) | |
70128ad9 | 4755 | return compute_clrmem_length (insn) - 4; |
3673e996 | 4756 | /* Conditional branch with an unfilled delay slot. */ |
b9821af8 JL |
4757 | else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn)) |
4758 | { | |
4759 | /* Adjust a short backwards conditional with an unfilled delay slot. */ | |
4760 | if (GET_CODE (pat) == SET | |
a1b36964 | 4761 | && length == 4 |
b9821af8 | 4762 | && ! forward_branch_p (insn)) |
a1b36964 | 4763 | return 4; |
b1092901 JL |
4764 | else if (GET_CODE (pat) == PARALLEL |
4765 | && get_attr_type (insn) == TYPE_PARALLEL_BRANCH | |
4766 | && length == 4) | |
4767 | return 4; | |
b9821af8 | 4768 | /* Adjust dbra insn with short backwards conditional branch with |
23f6f34f | 4769 | unfilled delay slot -- only for case where counter is in a |
fe19a83d | 4770 | general register register. */ |
b9821af8 JL |
4771 | else if (GET_CODE (pat) == PARALLEL |
4772 | && GET_CODE (XVECEXP (pat, 0, 1)) == SET | |
4773 | && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG | |
23f6f34f | 4774 | && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0)) |
a1b36964 | 4775 | && length == 4 |
b9821af8 | 4776 | && ! forward_branch_p (insn)) |
a1b36964 | 4777 | return 4; |
b9821af8 JL |
4778 | else |
4779 | return 0; | |
4780 | } | |
b1092901 | 4781 | return 0; |
3673e996 RS |
4782 | } |
4783 | ||
188538df TG |
4784 | /* Print operand X (an rtx) in assembler syntax to file FILE. |
4785 | CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified. | |
4786 | For `%' followed by punctuation, CODE is the punctuation and X is null. */ | |
4787 | ||
4788 | void | |
b7849684 | 4789 | print_operand (FILE *file, rtx x, int code) |
188538df TG |
4790 | { |
4791 | switch (code) | |
4792 | { | |
4793 | case '#': | |
4794 | /* Output a 'nop' if there's nothing for the delay slot. */ | |
4795 | if (dbr_sequence_length () == 0) | |
4796 | fputs ("\n\tnop", file); | |
4797 | return; | |
4798 | case '*': | |
5bdc5878 | 4799 | /* Output a nullification completer if there's nothing for the */ |
23f6f34f | 4800 | /* delay slot or nullification is requested. */ |
188538df TG |
4801 | if (dbr_sequence_length () == 0 || |
4802 | (final_sequence && | |
4803 | INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))) | |
4804 | fputs (",n", file); | |
4805 | return; | |
4806 | case 'R': | |
4807 | /* Print out the second register name of a register pair. | |
4808 | I.e., R (6) => 7. */ | |
831c1763 | 4809 | fputs (reg_names[REGNO (x) + 1], file); |
188538df TG |
4810 | return; |
4811 | case 'r': | |
fe19a83d | 4812 | /* A register or zero. */ |
f048ca47 JL |
4813 | if (x == const0_rtx |
4814 | || (x == CONST0_RTX (DFmode)) | |
4815 | || (x == CONST0_RTX (SFmode))) | |
188538df | 4816 | { |
55abf18a JL |
4817 | fputs ("%r0", file); |
4818 | return; | |
4819 | } | |
4820 | else | |
4821 | break; | |
4822 | case 'f': | |
fe19a83d | 4823 | /* A register or zero (floating point). */ |
55abf18a JL |
4824 | if (x == const0_rtx |
4825 | || (x == CONST0_RTX (DFmode)) | |
4826 | || (x == CONST0_RTX (SFmode))) | |
4827 | { | |
4828 | fputs ("%fr0", file); | |
188538df TG |
4829 | return; |
4830 | } | |
4831 | else | |
4832 | break; | |
f8eb41cc JL |
4833 | case 'A': |
4834 | { | |
4835 | rtx xoperands[2]; | |
4836 | ||
4837 | xoperands[0] = XEXP (XEXP (x, 0), 0); | |
4838 | xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0); | |
4839 | output_global_address (file, xoperands[1], 0); | |
4840 | fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]); | |
4841 | return; | |
4842 | } | |
4843 | ||
c85b8963 | 4844 | case 'C': /* Plain (C)ondition */ |
188538df TG |
4845 | case 'X': |
4846 | switch (GET_CODE (x)) | |
23f6f34f | 4847 | { |
188538df | 4848 | case EQ: |
e236a9ff | 4849 | fputs ("=", file); break; |
188538df | 4850 | case NE: |
e236a9ff | 4851 | fputs ("<>", file); break; |
188538df | 4852 | case GT: |
e236a9ff | 4853 | fputs (">", file); break; |
188538df | 4854 | case GE: |
e236a9ff | 4855 | fputs (">=", file); break; |
188538df | 4856 | case GEU: |
e236a9ff | 4857 | fputs (">>=", file); break; |
188538df | 4858 | case GTU: |
e236a9ff | 4859 | fputs (">>", file); break; |
188538df | 4860 | case LT: |
e236a9ff | 4861 | fputs ("<", file); break; |
188538df | 4862 | case LE: |
e236a9ff | 4863 | fputs ("<=", file); break; |
188538df | 4864 | case LEU: |
e236a9ff | 4865 | fputs ("<<=", file); break; |
188538df | 4866 | case LTU: |
e236a9ff | 4867 | fputs ("<<", file); break; |
188538df | 4868 | default: |
144d51f9 | 4869 | gcc_unreachable (); |
188538df TG |
4870 | } |
4871 | return; | |
c85b8963 | 4872 | case 'N': /* Condition, (N)egated */ |
188538df TG |
4873 | switch (GET_CODE (x)) |
4874 | { | |
4875 | case EQ: | |
e236a9ff | 4876 | fputs ("<>", file); break; |
188538df | 4877 | case NE: |
e236a9ff | 4878 | fputs ("=", file); break; |
188538df | 4879 | case GT: |
e236a9ff | 4880 | fputs ("<=", file); break; |
188538df | 4881 | case GE: |
e236a9ff | 4882 | fputs ("<", file); break; |
188538df | 4883 | case GEU: |
e236a9ff | 4884 | fputs ("<<", file); break; |
188538df | 4885 | case GTU: |
e236a9ff | 4886 | fputs ("<<=", file); break; |
188538df | 4887 | case LT: |
e236a9ff | 4888 | fputs (">=", file); break; |
188538df | 4889 | case LE: |
e236a9ff | 4890 | fputs (">", file); break; |
188538df | 4891 | case LEU: |
e236a9ff | 4892 | fputs (">>", file); break; |
188538df | 4893 | case LTU: |
e236a9ff | 4894 | fputs (">>=", file); break; |
188538df | 4895 | default: |
144d51f9 | 4896 | gcc_unreachable (); |
188538df TG |
4897 | } |
4898 | return; | |
831c1763 | 4899 | /* For floating point comparisons. Note that the output |
69049ba0 JDA |
4900 | predicates are the complement of the desired mode. The |
4901 | conditions for GT, GE, LT, LE and LTGT cause an invalid | |
4902 | operation exception if the result is unordered and this | |
4903 | exception is enabled in the floating-point status register. */ | |
d6c0d377 JL |
4904 | case 'Y': |
4905 | switch (GET_CODE (x)) | |
4906 | { | |
4907 | case EQ: | |
e236a9ff | 4908 | fputs ("!=", file); break; |
d6c0d377 | 4909 | case NE: |
e236a9ff | 4910 | fputs ("=", file); break; |
d6c0d377 | 4911 | case GT: |
becf1647 | 4912 | fputs ("!>", file); break; |
d6c0d377 | 4913 | case GE: |
becf1647 | 4914 | fputs ("!>=", file); break; |
d6c0d377 | 4915 | case LT: |
becf1647 | 4916 | fputs ("!<", file); break; |
d6c0d377 | 4917 | case LE: |
becf1647 DA |
4918 | fputs ("!<=", file); break; |
4919 | case LTGT: | |
4920 | fputs ("!<>", file); break; | |
4921 | case UNLE: | |
69049ba0 | 4922 | fputs ("!?<=", file); break; |
becf1647 | 4923 | case UNLT: |
69049ba0 | 4924 | fputs ("!?<", file); break; |
becf1647 | 4925 | case UNGE: |
69049ba0 | 4926 | fputs ("!?>=", file); break; |
becf1647 | 4927 | case UNGT: |
69049ba0 | 4928 | fputs ("!?>", file); break; |
becf1647 | 4929 | case UNEQ: |
69049ba0 | 4930 | fputs ("!?=", file); break; |
becf1647 | 4931 | case UNORDERED: |
69049ba0 | 4932 | fputs ("!?", file); break; |
becf1647 | 4933 | case ORDERED: |
69049ba0 | 4934 | fputs ("?", file); break; |
d6c0d377 | 4935 | default: |
144d51f9 | 4936 | gcc_unreachable (); |
d6c0d377 JL |
4937 | } |
4938 | return; | |
c85b8963 TG |
4939 | case 'S': /* Condition, operands are (S)wapped. */ |
4940 | switch (GET_CODE (x)) | |
4941 | { | |
4942 | case EQ: | |
e236a9ff | 4943 | fputs ("=", file); break; |
c85b8963 | 4944 | case NE: |
e236a9ff | 4945 | fputs ("<>", file); break; |
c85b8963 | 4946 | case GT: |
e236a9ff | 4947 | fputs ("<", file); break; |
c85b8963 | 4948 | case GE: |
e236a9ff | 4949 | fputs ("<=", file); break; |
c85b8963 | 4950 | case GEU: |
e236a9ff | 4951 | fputs ("<<=", file); break; |
c85b8963 | 4952 | case GTU: |
e236a9ff | 4953 | fputs ("<<", file); break; |
c85b8963 | 4954 | case LT: |
e236a9ff | 4955 | fputs (">", file); break; |
c85b8963 | 4956 | case LE: |
e236a9ff | 4957 | fputs (">=", file); break; |
c85b8963 | 4958 | case LEU: |
e236a9ff | 4959 | fputs (">>=", file); break; |
c85b8963 | 4960 | case LTU: |
e236a9ff | 4961 | fputs (">>", file); break; |
c85b8963 | 4962 | default: |
144d51f9 | 4963 | gcc_unreachable (); |
23f6f34f | 4964 | } |
c85b8963 TG |
4965 | return; |
4966 | case 'B': /* Condition, (B)oth swapped and negate. */ | |
4967 | switch (GET_CODE (x)) | |
4968 | { | |
4969 | case EQ: | |
e236a9ff | 4970 | fputs ("<>", file); break; |
c85b8963 | 4971 | case NE: |
e236a9ff | 4972 | fputs ("=", file); break; |
c85b8963 | 4973 | case GT: |
e236a9ff | 4974 | fputs (">=", file); break; |
c85b8963 | 4975 | case GE: |
e236a9ff | 4976 | fputs (">", file); break; |
c85b8963 | 4977 | case GEU: |
e236a9ff | 4978 | fputs (">>", file); break; |
c85b8963 | 4979 | case GTU: |
e236a9ff | 4980 | fputs (">>=", file); break; |
c85b8963 | 4981 | case LT: |
e236a9ff | 4982 | fputs ("<=", file); break; |
c85b8963 | 4983 | case LE: |
e236a9ff | 4984 | fputs ("<", file); break; |
c85b8963 | 4985 | case LEU: |
e236a9ff | 4986 | fputs ("<<", file); break; |
c85b8963 | 4987 | case LTU: |
e236a9ff | 4988 | fputs ("<<=", file); break; |
c85b8963 | 4989 | default: |
144d51f9 | 4990 | gcc_unreachable (); |
23f6f34f | 4991 | } |
c85b8963 TG |
4992 | return; |
4993 | case 'k': | |
144d51f9 NS |
4994 | gcc_assert (GET_CODE (x) == CONST_INT); |
4995 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x)); | |
4996 | return; | |
520babc7 | 4997 | case 'Q': |
144d51f9 NS |
4998 | gcc_assert (GET_CODE (x) == CONST_INT); |
4999 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63)); | |
5000 | return; | |
c8d6697c | 5001 | case 'L': |
144d51f9 NS |
5002 | gcc_assert (GET_CODE (x) == CONST_INT); |
5003 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31)); | |
5004 | return; | |
4802a0d6 | 5005 | case 'O': |
144d51f9 NS |
5006 | gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0); |
5007 | fprintf (file, "%d", exact_log2 (INTVAL (x))); | |
5008 | return; | |
520babc7 | 5009 | case 'p': |
144d51f9 NS |
5010 | gcc_assert (GET_CODE (x) == CONST_INT); |
5011 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63)); | |
5012 | return; | |
c8d6697c | 5013 | case 'P': |
144d51f9 NS |
5014 | gcc_assert (GET_CODE (x) == CONST_INT); |
5015 | fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31)); | |
5016 | return; | |
c85b8963 TG |
5017 | case 'I': |
5018 | if (GET_CODE (x) == CONST_INT) | |
5019 | fputs ("i", file); | |
5020 | return; | |
188538df | 5021 | case 'M': |
2414e0e2 | 5022 | case 'F': |
188538df TG |
5023 | switch (GET_CODE (XEXP (x, 0))) |
5024 | { | |
5025 | case PRE_DEC: | |
5026 | case PRE_INC: | |
f38b27c7 JL |
5027 | if (ASSEMBLER_DIALECT == 0) |
5028 | fputs ("s,mb", file); | |
5029 | else | |
5030 | fputs (",mb", file); | |
188538df TG |
5031 | break; |
5032 | case POST_DEC: | |
5033 | case POST_INC: | |
f38b27c7 JL |
5034 | if (ASSEMBLER_DIALECT == 0) |
5035 | fputs ("s,ma", file); | |
5036 | else | |
5037 | fputs (",ma", file); | |
188538df | 5038 | break; |
2414e0e2 | 5039 | case PLUS: |
d8f95bed JDA |
5040 | if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG |
5041 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG) | |
5042 | { | |
5043 | if (ASSEMBLER_DIALECT == 0) | |
5044 | fputs ("x", file); | |
5045 | } | |
5046 | else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT | |
5047 | || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT) | |
f38b27c7 JL |
5048 | { |
5049 | if (ASSEMBLER_DIALECT == 0) | |
5050 | fputs ("x,s", file); | |
5051 | else | |
5052 | fputs (",s", file); | |
5053 | } | |
5054 | else if (code == 'F' && ASSEMBLER_DIALECT == 0) | |
2414e0e2 | 5055 | fputs ("s", file); |
188538df TG |
5056 | break; |
5057 | default: | |
f38b27c7 | 5058 | if (code == 'F' && ASSEMBLER_DIALECT == 0) |
2414e0e2 | 5059 | fputs ("s", file); |
188538df TG |
5060 | break; |
5061 | } | |
5062 | return; | |
5063 | case 'G': | |
ad238e4b JL |
5064 | output_global_address (file, x, 0); |
5065 | return; | |
5066 | case 'H': | |
5067 | output_global_address (file, x, 1); | |
188538df TG |
5068 | return; |
5069 | case 0: /* Don't do anything special */ | |
5070 | break; | |
a1747d2c TG |
5071 | case 'Z': |
5072 | { | |
5073 | unsigned op[3]; | |
6fda0f5b | 5074 | compute_zdepwi_operands (INTVAL (x), op); |
a1747d2c TG |
5075 | fprintf (file, "%d,%d,%d", op[0], op[1], op[2]); |
5076 | return; | |
5077 | } | |
520babc7 JL |
5078 | case 'z': |
5079 | { | |
5080 | unsigned op[3]; | |
5081 | compute_zdepdi_operands (INTVAL (x), op); | |
5082 | fprintf (file, "%d,%d,%d", op[0], op[1], op[2]); | |
5083 | return; | |
5084 | } | |
11881f37 AM |
5085 | case 'c': |
5086 | /* We can get here from a .vtable_inherit due to our | |
5087 | CONSTANT_ADDRESS_P rejecting perfectly good constant | |
5088 | addresses. */ | |
5089 | break; | |
188538df | 5090 | default: |
144d51f9 | 5091 | gcc_unreachable (); |
188538df TG |
5092 | } |
5093 | if (GET_CODE (x) == REG) | |
80225b66 | 5094 | { |
3ba1236f | 5095 | fputs (reg_names [REGNO (x)], file); |
520babc7 JL |
5096 | if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4) |
5097 | { | |
5098 | fputs ("R", file); | |
5099 | return; | |
5100 | } | |
5101 | if (FP_REG_P (x) | |
5102 | && GET_MODE_SIZE (GET_MODE (x)) <= 4 | |
5103 | && (REGNO (x) & 1) == 0) | |
3ba1236f | 5104 | fputs ("L", file); |
80225b66 | 5105 | } |
188538df TG |
5106 | else if (GET_CODE (x) == MEM) |
5107 | { | |
5108 | int size = GET_MODE_SIZE (GET_MODE (x)); | |
478a4495 | 5109 | rtx base = NULL_RTX; |
188538df TG |
5110 | switch (GET_CODE (XEXP (x, 0))) |
5111 | { | |
5112 | case PRE_DEC: | |
5113 | case POST_DEC: | |
520babc7 | 5114 | base = XEXP (XEXP (x, 0), 0); |
d2d28085 | 5115 | fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]); |
188538df TG |
5116 | break; |
5117 | case PRE_INC: | |
5118 | case POST_INC: | |
520babc7 | 5119 | base = XEXP (XEXP (x, 0), 0); |
d2d28085 | 5120 | fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]); |
188538df | 5121 | break; |
d8f95bed JDA |
5122 | case PLUS: |
5123 | if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT) | |
d2d28085 | 5124 | fprintf (file, "%s(%s)", |
2414e0e2 JL |
5125 | reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))], |
5126 | reg_names [REGNO (XEXP (XEXP (x, 0), 1))]); | |
d8f95bed | 5127 | else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT) |
d2d28085 | 5128 | fprintf (file, "%s(%s)", |
2414e0e2 JL |
5129 | reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))], |
5130 | reg_names [REGNO (XEXP (XEXP (x, 0), 0))]); | |
d8f95bed JDA |
5131 | else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG |
5132 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG) | |
5133 | { | |
5134 | /* Because the REG_POINTER flag can get lost during reload, | |
5135 | GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the | |
5136 | index and base registers in the combined move patterns. */ | |
5137 | rtx base = XEXP (XEXP (x, 0), 1); | |
5138 | rtx index = XEXP (XEXP (x, 0), 0); | |
5139 | ||
5140 | fprintf (file, "%s(%s)", | |
5141 | reg_names [REGNO (index)], reg_names [REGNO (base)]); | |
5142 | } | |
2414e0e2 JL |
5143 | else |
5144 | output_address (XEXP (x, 0)); | |
188538df | 5145 | break; |
d8f95bed JDA |
5146 | default: |
5147 | output_address (XEXP (x, 0)); | |
5148 | break; | |
188538df TG |
5149 | } |
5150 | } | |
188538df TG |
5151 | else |
5152 | output_addr_const (file, x); | |
5153 | } | |
5154 | ||
fe19a83d | 5155 | /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */ |
188538df TG |
5156 | |
5157 | void | |
b7849684 | 5158 | output_global_address (FILE *file, rtx x, int round_constant) |
188538df | 5159 | { |
43940f6b JL |
5160 | |
5161 | /* Imagine (high (const (plus ...))). */ | |
5162 | if (GET_CODE (x) == HIGH) | |
5163 | x = XEXP (x, 0); | |
5164 | ||
519104fe | 5165 | if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode)) |
744b2d61 | 5166 | output_addr_const (file, x); |
6bb36601 | 5167 | else if (GET_CODE (x) == SYMBOL_REF && !flag_pic) |
188538df | 5168 | { |
744b2d61 | 5169 | output_addr_const (file, x); |
e236a9ff | 5170 | fputs ("-$global$", file); |
188538df TG |
5171 | } |
5172 | else if (GET_CODE (x) == CONST) | |
5173 | { | |
519104fe | 5174 | const char *sep = ""; |
188538df | 5175 | int offset = 0; /* assembler wants -$global$ at end */ |
516c2342 | 5176 | rtx base = NULL_RTX; |
23f6f34f | 5177 | |
144d51f9 | 5178 | switch (GET_CODE (XEXP (XEXP (x, 0), 0))) |
188538df | 5179 | { |
144d51f9 | 5180 | case SYMBOL_REF: |
188538df TG |
5181 | base = XEXP (XEXP (x, 0), 0); |
5182 | output_addr_const (file, base); | |
144d51f9 NS |
5183 | break; |
5184 | case CONST_INT: | |
5185 | offset = INTVAL (XEXP (XEXP (x, 0), 0)); | |
5186 | break; | |
5187 | default: | |
5188 | gcc_unreachable (); | |
188538df | 5189 | } |
188538df | 5190 | |
144d51f9 | 5191 | switch (GET_CODE (XEXP (XEXP (x, 0), 1))) |
188538df | 5192 | { |
144d51f9 | 5193 | case SYMBOL_REF: |
188538df TG |
5194 | base = XEXP (XEXP (x, 0), 1); |
5195 | output_addr_const (file, base); | |
144d51f9 NS |
5196 | break; |
5197 | case CONST_INT: | |
5198 | offset = INTVAL (XEXP (XEXP (x, 0), 1)); | |
5199 | break; | |
5200 | default: | |
5201 | gcc_unreachable (); | |
188538df | 5202 | } |
188538df | 5203 | |
ad238e4b JL |
5204 | /* How bogus. The compiler is apparently responsible for |
5205 | rounding the constant if it uses an LR field selector. | |
5206 | ||
5207 | The linker and/or assembler seem a better place since | |
5208 | they have to do this kind of thing already. | |
5209 | ||
5210 | If we fail to do this, HP's optimizing linker may eliminate | |
5211 | an addil, but not update the ldw/stw/ldo instruction that | |
5212 | uses the result of the addil. */ | |
5213 | if (round_constant) | |
5214 | offset = ((offset + 0x1000) & ~0x1fff); | |
5215 | ||
144d51f9 | 5216 | switch (GET_CODE (XEXP (x, 0))) |
188538df | 5217 | { |
144d51f9 | 5218 | case PLUS: |
188538df TG |
5219 | if (offset < 0) |
5220 | { | |
5221 | offset = -offset; | |
5222 | sep = "-"; | |
5223 | } | |
5224 | else | |
5225 | sep = "+"; | |
144d51f9 NS |
5226 | break; |
5227 | ||
5228 | case MINUS: | |
5229 | gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF); | |
5230 | sep = "-"; | |
5231 | break; | |
188538df | 5232 | |
144d51f9 NS |
5233 | default: |
5234 | gcc_unreachable (); | |
5235 | } | |
5236 | ||
519104fe | 5237 | if (!read_only_operand (base, VOIDmode) && !flag_pic) |
e236a9ff | 5238 | fputs ("-$global$", file); |
ad238e4b | 5239 | if (offset) |
831c1763 | 5240 | fprintf (file, "%s%d", sep, offset); |
188538df TG |
5241 | } |
5242 | else | |
5243 | output_addr_const (file, x); | |
5244 | } | |
5245 | ||
1bc7c5b6 ZW |
5246 | /* Output boilerplate text to appear at the beginning of the file. |
5247 | There are several possible versions. */ | |
5248 | #define aputs(x) fputs(x, asm_out_file) | |
5249 | static inline void | |
b7849684 | 5250 | pa_file_start_level (void) |
1bc7c5b6 ZW |
5251 | { |
5252 | if (TARGET_64BIT) | |
5253 | aputs ("\t.LEVEL 2.0w\n"); | |
5254 | else if (TARGET_PA_20) | |
5255 | aputs ("\t.LEVEL 2.0\n"); | |
5256 | else if (TARGET_PA_11) | |
5257 | aputs ("\t.LEVEL 1.1\n"); | |
5258 | else | |
5259 | aputs ("\t.LEVEL 1.0\n"); | |
5260 | } | |
5261 | ||
5262 | static inline void | |
b7849684 | 5263 | pa_file_start_space (int sortspace) |
1bc7c5b6 ZW |
5264 | { |
5265 | aputs ("\t.SPACE $PRIVATE$"); | |
5266 | if (sortspace) | |
5267 | aputs (",SORT=16"); | |
5268 | aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31" | |
5269 | "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82" | |
5270 | "\n\t.SPACE $TEXT$"); | |
5271 | if (sortspace) | |
5272 | aputs (",SORT=8"); | |
5273 | aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44" | |
5274 | "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n"); | |
5275 | } | |
5276 | ||
5277 | static inline void | |
b7849684 | 5278 | pa_file_start_file (int want_version) |
1bc7c5b6 ZW |
5279 | { |
5280 | if (write_symbols != NO_DEBUG) | |
5281 | { | |
5282 | output_file_directive (asm_out_file, main_input_filename); | |
5283 | if (want_version) | |
5284 | aputs ("\t.version\t\"01.01\"\n"); | |
5285 | } | |
5286 | } | |
5287 | ||
5288 | static inline void | |
b7849684 | 5289 | pa_file_start_mcount (const char *aswhat) |
1bc7c5b6 ZW |
5290 | { |
5291 | if (profile_flag) | |
5292 | fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat); | |
5293 | } | |
5294 | ||
5295 | static void | |
b7849684 | 5296 | pa_elf_file_start (void) |
1bc7c5b6 ZW |
5297 | { |
5298 | pa_file_start_level (); | |
5299 | pa_file_start_mcount ("ENTRY"); | |
5300 | pa_file_start_file (0); | |
5301 | } | |
5302 | ||
5303 | static void | |
b7849684 | 5304 | pa_som_file_start (void) |
1bc7c5b6 ZW |
5305 | { |
5306 | pa_file_start_level (); | |
5307 | pa_file_start_space (0); | |
5308 | aputs ("\t.IMPORT $global$,DATA\n" | |
5309 | "\t.IMPORT $$dyncall,MILLICODE\n"); | |
5310 | pa_file_start_mcount ("CODE"); | |
5311 | pa_file_start_file (0); | |
5312 | } | |
5313 | ||
5314 | static void | |
b7849684 | 5315 | pa_linux_file_start (void) |
1bc7c5b6 ZW |
5316 | { |
5317 | pa_file_start_file (1); | |
5318 | pa_file_start_level (); | |
5319 | pa_file_start_mcount ("CODE"); | |
5320 | } | |
5321 | ||
5322 | static void | |
b7849684 | 5323 | pa_hpux64_gas_file_start (void) |
1bc7c5b6 ZW |
5324 | { |
5325 | pa_file_start_level (); | |
5326 | #ifdef ASM_OUTPUT_TYPE_DIRECTIVE | |
5327 | if (profile_flag) | |
5328 | ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function"); | |
5329 | #endif | |
5330 | pa_file_start_file (1); | |
5331 | } | |
5332 | ||
5333 | static void | |
b7849684 | 5334 | pa_hpux64_hpas_file_start (void) |
1bc7c5b6 ZW |
5335 | { |
5336 | pa_file_start_level (); | |
5337 | pa_file_start_space (1); | |
5338 | pa_file_start_mcount ("CODE"); | |
5339 | pa_file_start_file (0); | |
5340 | } | |
5341 | #undef aputs | |
5342 | ||
7aaf280e JDA |
5343 | /* Search the deferred plabel list for SYMBOL and return its internal |
5344 | label. If an entry for SYMBOL is not found, a new entry is created. */ | |
5345 | ||
5346 | rtx | |
5347 | get_deferred_plabel (rtx symbol) | |
a02aa5b0 | 5348 | { |
744b2d61 | 5349 | const char *fname = XSTR (symbol, 0); |
a02aa5b0 JDA |
5350 | size_t i; |
5351 | ||
5352 | /* See if we have already put this function on the list of deferred | |
5353 | plabels. This list is generally small, so a liner search is not | |
5354 | too ugly. If it proves too slow replace it with something faster. */ | |
5355 | for (i = 0; i < n_deferred_plabels; i++) | |
744b2d61 | 5356 | if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0) |
a02aa5b0 JDA |
5357 | break; |
5358 | ||
5359 | /* If the deferred plabel list is empty, or this entry was not found | |
5360 | on the list, create a new entry on the list. */ | |
5361 | if (deferred_plabels == NULL || i == n_deferred_plabels) | |
5362 | { | |
744b2d61 JDA |
5363 | tree id; |
5364 | ||
a02aa5b0 JDA |
5365 | if (deferred_plabels == 0) |
5366 | deferred_plabels = (struct deferred_plabel *) | |
5367 | ggc_alloc (sizeof (struct deferred_plabel)); | |
5368 | else | |
5369 | deferred_plabels = (struct deferred_plabel *) | |
5370 | ggc_realloc (deferred_plabels, | |
5371 | ((n_deferred_plabels + 1) | |
5372 | * sizeof (struct deferred_plabel))); | |
5373 | ||
5374 | i = n_deferred_plabels++; | |
5375 | deferred_plabels[i].internal_label = gen_label_rtx (); | |
744b2d61 | 5376 | deferred_plabels[i].symbol = symbol; |
a02aa5b0 | 5377 | |
744b2d61 JDA |
5378 | /* Gross. We have just implicitly taken the address of this |
5379 | function. Mark it in the same manner as assemble_name. */ | |
5380 | id = maybe_get_identifier (targetm.strip_name_encoding (fname)); | |
5381 | if (id) | |
5382 | mark_referenced (id); | |
a02aa5b0 JDA |
5383 | } |
5384 | ||
7aaf280e | 5385 | return deferred_plabels[i].internal_label; |
a02aa5b0 JDA |
5386 | } |
5387 | ||
a5fe455b | 5388 | static void |
b7849684 | 5389 | output_deferred_plabels (void) |
359255a9 | 5390 | { |
0f8e3849 | 5391 | size_t i; |
1a83bfc3 JDA |
5392 | |
5393 | /* If we have some deferred plabels, then we need to switch into the | |
5394 | data or readonly data section, and align it to a 4 byte boundary | |
6416ae7f | 5395 | before outputting the deferred plabels. */ |
359255a9 JL |
5396 | if (n_deferred_plabels) |
5397 | { | |
1a83bfc3 | 5398 | switch_to_section (flag_pic ? data_section : readonly_data_section); |
a5fe455b | 5399 | ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2); |
359255a9 JL |
5400 | } |
5401 | ||
5402 | /* Now output the deferred plabels. */ | |
5403 | for (i = 0; i < n_deferred_plabels; i++) | |
5404 | { | |
ecc418c4 | 5405 | targetm.asm_out.internal_label (asm_out_file, "L", |
a5fe455b | 5406 | CODE_LABEL_NUMBER (deferred_plabels[i].internal_label)); |
744b2d61 | 5407 | assemble_integer (deferred_plabels[i].symbol, |
3d9268b6 | 5408 | TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1); |
359255a9 JL |
5409 | } |
5410 | } | |
5411 | ||
c15c90bb ZW |
5412 | #ifdef HPUX_LONG_DOUBLE_LIBRARY |
5413 | /* Initialize optabs to point to HPUX long double emulation routines. */ | |
5414 | static void | |
5415 | pa_hpux_init_libfuncs (void) | |
5416 | { | |
5417 | set_optab_libfunc (add_optab, TFmode, "_U_Qfadd"); | |
5418 | set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub"); | |
5419 | set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy"); | |
5420 | set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv"); | |
5421 | set_optab_libfunc (smin_optab, TFmode, "_U_Qmin"); | |
5422 | set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax"); | |
5423 | set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt"); | |
5424 | set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs"); | |
5425 | set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg"); | |
5426 | ||
c9034561 ZW |
5427 | set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq"); |
5428 | set_optab_libfunc (ne_optab, TFmode, "_U_Qfne"); | |
5429 | set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt"); | |
5430 | set_optab_libfunc (ge_optab, TFmode, "_U_Qfge"); | |
5431 | set_optab_libfunc (lt_optab, TFmode, "_U_Qflt"); | |
5432 | set_optab_libfunc (le_optab, TFmode, "_U_Qfle"); | |
e2ddd6ca | 5433 | set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord"); |
c15c90bb | 5434 | |
85363ca0 ZW |
5435 | set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad"); |
5436 | set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad"); | |
5437 | set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl"); | |
5438 | set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl"); | |
5439 | ||
5440 | set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT | |
5441 | ? "__U_Qfcnvfxt_quad_to_sgl" | |
5442 | : "_U_Qfcnvfxt_quad_to_sgl"); | |
5443 | set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl"); | |
5444 | set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl"); | |
5445 | set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl"); | |
5446 | ||
5447 | set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad"); | |
5448 | set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad"); | |
7db0cc7e JDA |
5449 | set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad"); |
5450 | set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad"); | |
c15c90bb ZW |
5451 | } |
5452 | #endif | |
5453 | ||
188538df TG |
5454 | /* HP's millicode routines mean something special to the assembler. |
5455 | Keep track of which ones we have used. */ | |
5456 | ||
f3a4e54e | 5457 | enum millicodes { remI, remU, divI, divU, mulI, end1000 }; |
b7849684 | 5458 | static void import_milli (enum millicodes); |
831c1763 | 5459 | static char imported[(int) end1000]; |
f3a4e54e | 5460 | static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"}; |
8b60264b | 5461 | static const char import_string[] = ".IMPORT $$....,MILLICODE"; |
188538df TG |
5462 | #define MILLI_START 10 |
5463 | ||
f1c7ce82 | 5464 | static void |
b7849684 | 5465 | import_milli (enum millicodes code) |
188538df TG |
5466 | { |
5467 | char str[sizeof (import_string)]; | |
23f6f34f | 5468 | |
831c1763 | 5469 | if (!imported[(int) code]) |
188538df | 5470 | { |
831c1763 | 5471 | imported[(int) code] = 1; |
188538df | 5472 | strcpy (str, import_string); |
831c1763 | 5473 | strncpy (str + MILLI_START, milli_names[(int) code], 4); |
188538df TG |
5474 | output_asm_insn (str, 0); |
5475 | } | |
5476 | } | |
5477 | ||
23f6f34f | 5478 | /* The register constraints have put the operands and return value in |
fe19a83d | 5479 | the proper registers. */ |
188538df | 5480 | |
519104fe | 5481 | const char * |
b7849684 | 5482 | output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn) |
188538df | 5483 | { |
9b38c2fa | 5484 | import_milli (mulI); |
690d4228 | 5485 | return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI")); |
188538df TG |
5486 | } |
5487 | ||
fe19a83d | 5488 | /* Emit the rtl for doing a division by a constant. */ |
188538df | 5489 | |
9b38c2fa | 5490 | /* Do magic division millicodes exist for this value? */ |
c9a88190 | 5491 | const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1}; |
188538df | 5492 | |
23f6f34f | 5493 | /* We'll use an array to keep track of the magic millicodes and |
188538df | 5494 | whether or not we've used them already. [n][0] is signed, [n][1] is |
fe19a83d | 5495 | unsigned. */ |
188538df | 5496 | |
188538df TG |
5497 | static int div_milli[16][2]; |
5498 | ||
188538df | 5499 | int |
b7849684 | 5500 | emit_hpdiv_const (rtx *operands, int unsignedp) |
188538df TG |
5501 | { |
5502 | if (GET_CODE (operands[2]) == CONST_INT | |
5503 | && INTVAL (operands[2]) > 0 | |
5504 | && INTVAL (operands[2]) < 16 | |
5505 | && magic_milli[INTVAL (operands[2])]) | |
5506 | { | |
7d8b1412 AM |
5507 | rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31); |
5508 | ||
ad2c71b7 | 5509 | emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]); |
188538df | 5510 | emit |
92fd5e41 KH |
5511 | (gen_rtx_PARALLEL |
5512 | (VOIDmode, | |
bd83f9a5 | 5513 | gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29), |
ad2c71b7 JL |
5514 | gen_rtx_fmt_ee (unsignedp ? UDIV : DIV, |
5515 | SImode, | |
5516 | gen_rtx_REG (SImode, 26), | |
5517 | operands[2])), | |
bd83f9a5 | 5518 | gen_rtx_CLOBBER (VOIDmode, operands[4]), |
ad2c71b7 JL |
5519 | gen_rtx_CLOBBER (VOIDmode, operands[3]), |
5520 | gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)), | |
5521 | gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)), | |
7d8b1412 | 5522 | gen_rtx_CLOBBER (VOIDmode, ret)))); |
ad2c71b7 | 5523 | emit_move_insn (operands[0], gen_rtx_REG (SImode, 29)); |
188538df TG |
5524 | return 1; |
5525 | } | |
5526 | return 0; | |
5527 | } | |
5528 | ||
519104fe | 5529 | const char * |
b7849684 | 5530 | output_div_insn (rtx *operands, int unsignedp, rtx insn) |
188538df TG |
5531 | { |
5532 | int divisor; | |
23f6f34f TG |
5533 | |
5534 | /* If the divisor is a constant, try to use one of the special | |
188538df TG |
5535 | opcodes .*/ |
5536 | if (GET_CODE (operands[0]) == CONST_INT) | |
5537 | { | |
2c4ff308 | 5538 | static char buf[100]; |
188538df TG |
5539 | divisor = INTVAL (operands[0]); |
5540 | if (!div_milli[divisor][unsignedp]) | |
5541 | { | |
2c4ff308 | 5542 | div_milli[divisor][unsignedp] = 1; |
188538df TG |
5543 | if (unsignedp) |
5544 | output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands); | |
5545 | else | |
5546 | output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands); | |
188538df TG |
5547 | } |
5548 | if (unsignedp) | |
2c4ff308 | 5549 | { |
4a0a75dd KG |
5550 | sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC, |
5551 | INTVAL (operands[0])); | |
6a73009d | 5552 | return output_millicode_call (insn, |
ad2c71b7 | 5553 | gen_rtx_SYMBOL_REF (SImode, buf)); |
2c4ff308 JL |
5554 | } |
5555 | else | |
5556 | { | |
4a0a75dd KG |
5557 | sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC, |
5558 | INTVAL (operands[0])); | |
6a73009d | 5559 | return output_millicode_call (insn, |
ad2c71b7 | 5560 | gen_rtx_SYMBOL_REF (SImode, buf)); |
2c4ff308 | 5561 | } |
188538df | 5562 | } |
fe19a83d | 5563 | /* Divisor isn't a special constant. */ |
188538df TG |
5564 | else |
5565 | { | |
5566 | if (unsignedp) | |
5567 | { | |
5568 | import_milli (divU); | |
6a73009d | 5569 | return output_millicode_call (insn, |
ad2c71b7 | 5570 | gen_rtx_SYMBOL_REF (SImode, "$$divU")); |
188538df TG |
5571 | } |
5572 | else | |
5573 | { | |
5574 | import_milli (divI); | |
6a73009d | 5575 | return output_millicode_call (insn, |
ad2c71b7 | 5576 | gen_rtx_SYMBOL_REF (SImode, "$$divI")); |
188538df TG |
5577 | } |
5578 | } | |
5579 | } | |
5580 | ||
fe19a83d | 5581 | /* Output a $$rem millicode to do mod. */ |
188538df | 5582 | |
519104fe | 5583 | const char * |
b7849684 | 5584 | output_mod_insn (int unsignedp, rtx insn) |
188538df TG |
5585 | { |
5586 | if (unsignedp) | |
5587 | { | |
5588 | import_milli (remU); | |
6a73009d | 5589 | return output_millicode_call (insn, |
ad2c71b7 | 5590 | gen_rtx_SYMBOL_REF (SImode, "$$remU")); |
188538df TG |
5591 | } |
5592 | else | |
5593 | { | |
5594 | import_milli (remI); | |
6a73009d | 5595 | return output_millicode_call (insn, |
ad2c71b7 | 5596 | gen_rtx_SYMBOL_REF (SImode, "$$remI")); |
188538df TG |
5597 | } |
5598 | } | |
5599 | ||
5600 | void | |
b7849684 | 5601 | output_arg_descriptor (rtx call_insn) |
188538df | 5602 | { |
519104fe | 5603 | const char *arg_regs[4]; |
188538df | 5604 | enum machine_mode arg_mode; |
80225b66 | 5605 | rtx link; |
188538df TG |
5606 | int i, output_flag = 0; |
5607 | int regno; | |
23f6f34f | 5608 | |
520babc7 | 5609 | /* We neither need nor want argument location descriptors for the |
e25724d8 AM |
5610 | 64bit runtime environment or the ELF32 environment. */ |
5611 | if (TARGET_64BIT || TARGET_ELF32) | |
520babc7 JL |
5612 | return; |
5613 | ||
188538df TG |
5614 | for (i = 0; i < 4; i++) |
5615 | arg_regs[i] = 0; | |
5616 | ||
2822d96e JL |
5617 | /* Specify explicitly that no argument relocations should take place |
5618 | if using the portable runtime calling conventions. */ | |
5619 | if (TARGET_PORTABLE_RUNTIME) | |
5620 | { | |
e236a9ff JL |
5621 | fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n", |
5622 | asm_out_file); | |
2822d96e JL |
5623 | return; |
5624 | } | |
5625 | ||
144d51f9 NS |
5626 | gcc_assert (GET_CODE (call_insn) == CALL_INSN); |
5627 | for (link = CALL_INSN_FUNCTION_USAGE (call_insn); | |
5628 | link; link = XEXP (link, 1)) | |
188538df | 5629 | { |
80225b66 | 5630 | rtx use = XEXP (link, 0); |
3529be83 | 5631 | |
80225b66 TG |
5632 | if (! (GET_CODE (use) == USE |
5633 | && GET_CODE (XEXP (use, 0)) == REG | |
5634 | && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0))))) | |
3529be83 RS |
5635 | continue; |
5636 | ||
80225b66 TG |
5637 | arg_mode = GET_MODE (XEXP (use, 0)); |
5638 | regno = REGNO (XEXP (use, 0)); | |
188538df | 5639 | if (regno >= 23 && regno <= 26) |
a9d91d6f RS |
5640 | { |
5641 | arg_regs[26 - regno] = "GR"; | |
5642 | if (arg_mode == DImode) | |
5643 | arg_regs[25 - regno] = "GR"; | |
5644 | } | |
80225b66 | 5645 | else if (regno >= 32 && regno <= 39) |
188538df TG |
5646 | { |
5647 | if (arg_mode == SFmode) | |
80225b66 | 5648 | arg_regs[(regno - 32) / 2] = "FR"; |
d0616842 | 5649 | else |
188538df | 5650 | { |
22d6e660 | 5651 | #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED |
80225b66 TG |
5652 | arg_regs[(regno - 34) / 2] = "FR"; |
5653 | arg_regs[(regno - 34) / 2 + 1] = "FU"; | |
188538df | 5654 | #else |
80225b66 TG |
5655 | arg_regs[(regno - 34) / 2] = "FU"; |
5656 | arg_regs[(regno - 34) / 2 + 1] = "FR"; | |
188538df TG |
5657 | #endif |
5658 | } | |
188538df TG |
5659 | } |
5660 | } | |
5661 | fputs ("\t.CALL ", asm_out_file); | |
5662 | for (i = 0; i < 4; i++) | |
5663 | { | |
5664 | if (arg_regs[i]) | |
5665 | { | |
5666 | if (output_flag++) | |
5667 | fputc (',', asm_out_file); | |
5668 | fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]); | |
5669 | } | |
5670 | } | |
5671 | fputc ('\n', asm_out_file); | |
5672 | } | |
5673 | \f | |
ec963611 | 5674 | static enum reg_class |
0a2aaacc | 5675 | pa_secondary_reload (bool in_p, rtx x, enum reg_class rclass, |
ec963611 JDA |
5676 | enum machine_mode mode, secondary_reload_info *sri) |
5677 | { | |
69f8a2d6 | 5678 | int is_symbolic, regno; |
e236a9ff | 5679 | |
ec963611 | 5680 | /* Handle the easy stuff first. */ |
0a2aaacc | 5681 | if (rclass == R1_REGS) |
ec963611 | 5682 | return NO_REGS; |
e236a9ff | 5683 | |
ec963611 JDA |
5684 | if (REG_P (x)) |
5685 | { | |
5686 | regno = REGNO (x); | |
0a2aaacc | 5687 | if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER) |
ec963611 JDA |
5688 | return NO_REGS; |
5689 | } | |
69f8a2d6 JDA |
5690 | else |
5691 | regno = -1; | |
188538df | 5692 | |
ec963611 JDA |
5693 | /* If we have something like (mem (mem (...)), we can safely assume the |
5694 | inner MEM will end up in a general register after reloading, so there's | |
5695 | no need for a secondary reload. */ | |
5696 | if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM) | |
5697 | return NO_REGS; | |
188538df | 5698 | |
6bb36601 | 5699 | /* Trying to load a constant into a FP register during PIC code |
ec963611 | 5700 | generation requires %r1 as a scratch register. */ |
7ee72796 | 5701 | if (flag_pic |
7e646101 | 5702 | && (mode == SImode || mode == DImode) |
0a2aaacc | 5703 | && FP_REG_CLASS_P (rclass) |
ec963611 | 5704 | && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)) |
e236a9ff | 5705 | { |
ec963611 JDA |
5706 | sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1 |
5707 | : CODE_FOR_reload_indi_r1); | |
5708 | return NO_REGS; | |
e236a9ff | 5709 | } |
e236a9ff | 5710 | |
ec963611 JDA |
5711 | /* Profiling showed the PA port spends about 1.3% of its compilation |
5712 | time in true_regnum from calls inside pa_secondary_reload_class. */ | |
5713 | if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG) | |
5714 | regno = true_regnum (x); | |
39dfb55a | 5715 | |
6982c5d4 JDA |
5716 | /* In order to allow 14-bit displacements in integer loads and stores, |
5717 | we need to prevent reload from generating out of range integer mode | |
5718 | loads and stores to the floating point registers. Previously, we | |
5719 | used to call for a secondary reload and have emit_move_sequence() | |
5720 | fix the instruction sequence. However, reload occasionally wouldn't | |
5721 | generate the reload and we would end up with an invalid REG+D memory | |
5722 | address. So, now we use an intermediate general register for most | |
5723 | memory loads and stores. */ | |
5724 | if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1) | |
5725 | && GET_MODE_CLASS (mode) == MODE_INT | |
0a2aaacc | 5726 | && FP_REG_CLASS_P (rclass)) |
6982c5d4 JDA |
5727 | { |
5728 | /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check | |
5729 | the secondary reload needed for a pseudo. It never passes a | |
5730 | REG+D address. */ | |
5731 | if (GET_CODE (x) == MEM) | |
5732 | { | |
5733 | x = XEXP (x, 0); | |
5734 | ||
5735 | /* We don't need an intermediate for indexed and LO_SUM DLT | |
5736 | memory addresses. When INT14_OK_STRICT is true, it might | |
5737 | appear that we could directly allow register indirect | |
5738 | memory addresses. However, this doesn't work because we | |
5739 | don't support SUBREGs in floating-point register copies | |
5740 | and reload doesn't tell us when it's going to use a SUBREG. */ | |
5741 | if (IS_INDEX_ADDR_P (x) | |
5742 | || IS_LO_SUM_DLT_ADDR_P (x)) | |
5743 | return NO_REGS; | |
5744 | ||
5745 | /* Otherwise, we need an intermediate general register. */ | |
5746 | return GENERAL_REGS; | |
5747 | } | |
5748 | ||
5749 | /* Request a secondary reload with a general scratch register | |
5750 | for everthing else. ??? Could symbolic operands be handled | |
5751 | directly when generating non-pic PA 2.0 code? */ | |
5752 | sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode]; | |
5753 | return NO_REGS; | |
5754 | } | |
5755 | ||
5756 | /* We need a secondary register (GPR) for copies between the SAR | |
5757 | and anything other than a general register. */ | |
0a2aaacc | 5758 | if (rclass == SHIFT_REGS && (regno <= 0 || regno >= 32)) |
ec963611 JDA |
5759 | { |
5760 | sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode]; | |
5761 | return NO_REGS; | |
5762 | } | |
fa5e5c1e | 5763 | |
26ee120d JL |
5764 | /* A SAR<->FP register copy requires a secondary register (GPR) as |
5765 | well as secondary memory. */ | |
5766 | if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER | |
6982c5d4 | 5767 | && (REGNO_REG_CLASS (regno) == SHIFT_REGS |
0a2aaacc | 5768 | && FP_REG_CLASS_P (rclass))) |
ec963611 JDA |
5769 | { |
5770 | sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode]; | |
5771 | return NO_REGS; | |
5772 | } | |
26ee120d | 5773 | |
ec963611 | 5774 | /* Secondary reloads of symbolic operands require %r1 as a scratch |
6982c5d4 | 5775 | register when we're generating PIC code and when the operand isn't |
ec963611 JDA |
5776 | readonly. */ |
5777 | if (GET_CODE (x) == HIGH) | |
5778 | x = XEXP (x, 0); | |
43940f6b | 5779 | |
e236a9ff | 5780 | /* Profiling has showed GCC spends about 2.6% of its compilation |
3c4774e0 | 5781 | time in symbolic_operand from calls inside pa_secondary_reload_class. |
ec963611 JDA |
5782 | So, we use an inline copy to avoid useless work. */ |
5783 | switch (GET_CODE (x)) | |
e236a9ff | 5784 | { |
ec963611 | 5785 | rtx op; |
e236a9ff JL |
5786 | |
5787 | case SYMBOL_REF: | |
ec963611 JDA |
5788 | is_symbolic = !SYMBOL_REF_TLS_MODEL (x); |
5789 | break; | |
e236a9ff JL |
5790 | case LABEL_REF: |
5791 | is_symbolic = 1; | |
5792 | break; | |
5793 | case CONST: | |
ec963611 JDA |
5794 | op = XEXP (x, 0); |
5795 | is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF | |
5796 | && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0))) | |
5797 | || GET_CODE (XEXP (op, 0)) == LABEL_REF) | |
5798 | && GET_CODE (XEXP (op, 1)) == CONST_INT); | |
e236a9ff JL |
5799 | break; |
5800 | default: | |
5801 | is_symbolic = 0; | |
5802 | break; | |
5803 | } | |
6619e96c | 5804 | |
ec963611 JDA |
5805 | if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode))) |
5806 | { | |
5807 | gcc_assert (mode == SImode || mode == DImode); | |
5808 | sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1 | |
5809 | : CODE_FOR_reload_indi_r1); | |
5810 | } | |
43940f6b | 5811 | |
fa5e5c1e | 5812 | return NO_REGS; |
188538df TG |
5813 | } |
5814 | ||
16c16a24 JDA |
5815 | /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer |
5816 | is only marked as live on entry by df-scan when it is a fixed | |
5817 | register. It isn't a fixed register in the 64-bit runtime, | |
5818 | so we need to mark it here. */ | |
5819 | ||
5820 | static void | |
5821 | pa_extra_live_on_entry (bitmap regs) | |
5822 | { | |
5823 | if (TARGET_64BIT) | |
5824 | bitmap_set_bit (regs, ARG_POINTER_REGNUM); | |
5825 | } | |
5826 | ||
5827 | /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile | |
5828 | to prevent it from being deleted. */ | |
5829 | ||
5830 | rtx | |
5831 | pa_eh_return_handler_rtx (void) | |
5832 | { | |
5833 | rtx tmp; | |
5834 | ||
5835 | tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx, | |
5836 | TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20)); | |
5837 | tmp = gen_rtx_MEM (word_mode, tmp); | |
5838 | tmp->volatil = 1; | |
5839 | return tmp; | |
5840 | } | |
5841 | ||
8cd5a4e0 RH |
5842 | /* In the 32-bit runtime, arguments larger than eight bytes are passed |
5843 | by invisible reference. As a GCC extension, we also pass anything | |
5844 | with a zero or variable size by reference. | |
5845 | ||
5846 | The 64-bit runtime does not describe passing any types by invisible | |
5847 | reference. The internals of GCC can't currently handle passing | |
5848 | empty structures, and zero or variable length arrays when they are | |
5849 | not passed entirely on the stack or by reference. Thus, as a GCC | |
5850 | extension, we pass these types by reference. The HP compiler doesn't | |
5851 | support these types, so hopefully there shouldn't be any compatibility | |
5852 | issues. This may have to be revisited when HP releases a C99 compiler | |
5853 | or updates the ABI. */ | |
5854 | ||
5855 | static bool | |
5856 | pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED, | |
586de218 | 5857 | enum machine_mode mode, const_tree type, |
8cd5a4e0 RH |
5858 | bool named ATTRIBUTE_UNUSED) |
5859 | { | |
5860 | HOST_WIDE_INT size; | |
5861 | ||
5862 | if (type) | |
5863 | size = int_size_in_bytes (type); | |
5864 | else | |
5865 | size = GET_MODE_SIZE (mode); | |
5866 | ||
5867 | if (TARGET_64BIT) | |
5868 | return size <= 0; | |
5869 | else | |
5870 | return size <= 0 || size > 8; | |
5871 | } | |
5872 | ||
188538df | 5873 | enum direction |
586de218 | 5874 | function_arg_padding (enum machine_mode mode, const_tree type) |
188538df | 5875 | { |
9dff28ab | 5876 | if (mode == BLKmode |
c3e39a47 JDA |
5877 | || (TARGET_64BIT |
5878 | && type | |
5879 | && (AGGREGATE_TYPE_P (type) | |
5880 | || TREE_CODE (type) == COMPLEX_TYPE | |
5881 | || TREE_CODE (type) == VECTOR_TYPE))) | |
9dff28ab JDA |
5882 | { |
5883 | /* Return none if justification is not required. */ | |
5884 | if (type | |
5885 | && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST | |
5886 | && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0) | |
5887 | return none; | |
5888 | ||
5889 | /* The directions set here are ignored when a BLKmode argument larger | |
5890 | than a word is placed in a register. Different code is used for | |
5891 | the stack and registers. This makes it difficult to have a | |
5892 | consistent data representation for both the stack and registers. | |
5893 | For both runtimes, the justification and padding for arguments on | |
5894 | the stack and in registers should be identical. */ | |
5895 | if (TARGET_64BIT) | |
5896 | /* The 64-bit runtime specifies left justification for aggregates. */ | |
5897 | return upward; | |
188538df | 5898 | else |
9dff28ab JDA |
5899 | /* The 32-bit runtime architecture specifies right justification. |
5900 | When the argument is passed on the stack, the argument is padded | |
5901 | with garbage on the left. The HP compiler pads with zeros. */ | |
5902 | return downward; | |
188538df | 5903 | } |
9dff28ab JDA |
5904 | |
5905 | if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY) | |
188538df | 5906 | return downward; |
188538df TG |
5907 | else |
5908 | return none; | |
5909 | } | |
5910 | ||
188538df | 5911 | \f |
648d2ffc RH |
5912 | /* Do what is necessary for `va_start'. We look at the current function |
5913 | to determine if stdargs or varargs is used and fill in an initial | |
5914 | va_list. A pointer to this constructor is returned. */ | |
188538df | 5915 | |
3f12cd9b | 5916 | static rtx |
b7849684 | 5917 | hppa_builtin_saveregs (void) |
188538df | 5918 | { |
5e32727c | 5919 | rtx offset, dest; |
188538df TG |
5920 | tree fntype = TREE_TYPE (current_function_decl); |
5921 | int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0 | |
5922 | && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype))) | |
5923 | != void_type_node))) | |
5924 | ? UNITS_PER_WORD : 0); | |
5925 | ||
5926 | if (argadj) | |
38173d38 | 5927 | offset = plus_constant (crtl->args.arg_offset_rtx, argadj); |
188538df | 5928 | else |
38173d38 | 5929 | offset = crtl->args.arg_offset_rtx; |
17e1dfa2 | 5930 | |
520babc7 JL |
5931 | if (TARGET_64BIT) |
5932 | { | |
5933 | int i, off; | |
6619e96c | 5934 | |
520babc7 JL |
5935 | /* Adjust for varargs/stdarg differences. */ |
5936 | if (argadj) | |
38173d38 | 5937 | offset = plus_constant (crtl->args.arg_offset_rtx, -argadj); |
520babc7 | 5938 | else |
38173d38 | 5939 | offset = crtl->args.arg_offset_rtx; |
520babc7 JL |
5940 | |
5941 | /* We need to save %r26 .. %r19 inclusive starting at offset -64 | |
5942 | from the incoming arg pointer and growing to larger addresses. */ | |
5943 | for (i = 26, off = -64; i >= 19; i--, off += 8) | |
5944 | emit_move_insn (gen_rtx_MEM (word_mode, | |
5945 | plus_constant (arg_pointer_rtx, off)), | |
5946 | gen_rtx_REG (word_mode, i)); | |
5947 | ||
5948 | /* The incoming args pointer points just beyond the flushback area; | |
f710504c | 5949 | normally this is not a serious concern. However, when we are doing |
520babc7 JL |
5950 | varargs/stdargs we want to make the arg pointer point to the start |
5951 | of the incoming argument area. */ | |
5952 | emit_move_insn (virtual_incoming_args_rtx, | |
5953 | plus_constant (arg_pointer_rtx, -64)); | |
5954 | ||
5955 | /* Now return a pointer to the first anonymous argument. */ | |
5956 | return copy_to_reg (expand_binop (Pmode, add_optab, | |
5957 | virtual_incoming_args_rtx, | |
5958 | offset, 0, 0, OPTAB_LIB_WIDEN)); | |
5959 | } | |
5960 | ||
fe19a83d | 5961 | /* Store general registers on the stack. */ |
ad2c71b7 | 5962 | dest = gen_rtx_MEM (BLKmode, |
38173d38 | 5963 | plus_constant (crtl->args.internal_arg_pointer, |
ad2c71b7 | 5964 | -16)); |
ba4828e0 | 5965 | set_mem_alias_set (dest, get_varargs_alias_set ()); |
8ac61af7 | 5966 | set_mem_align (dest, BITS_PER_WORD); |
c6b97fac | 5967 | move_block_from_reg (23, dest, 4); |
5e32727c | 5968 | |
39dfb55a JL |
5969 | /* move_block_from_reg will emit code to store the argument registers |
5970 | individually as scalar stores. | |
5971 | ||
5972 | However, other insns may later load from the same addresses for | |
956d6950 | 5973 | a structure load (passing a struct to a varargs routine). |
39dfb55a JL |
5974 | |
5975 | The alias code assumes that such aliasing can never happen, so we | |
5976 | have to keep memory referencing insns from moving up beyond the | |
5977 | last argument register store. So we emit a blockage insn here. */ | |
5978 | emit_insn (gen_blockage ()); | |
5979 | ||
17e1dfa2 | 5980 | return copy_to_reg (expand_binop (Pmode, add_optab, |
38173d38 | 5981 | crtl->args.internal_arg_pointer, |
17e1dfa2 | 5982 | offset, 0, 0, OPTAB_LIB_WIDEN)); |
188538df | 5983 | } |
d2a94ec0 | 5984 | |
d7bd8aeb | 5985 | static void |
b7849684 | 5986 | hppa_va_start (tree valist, rtx nextarg) |
ca5f4364 RH |
5987 | { |
5988 | nextarg = expand_builtin_saveregs (); | |
e5faf155 | 5989 | std_expand_builtin_va_start (valist, nextarg); |
ca5f4364 RH |
5990 | } |
5991 | ||
8101c928 | 5992 | static tree |
726a989a RB |
5993 | hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, |
5994 | gimple_seq *post_p) | |
ca5f4364 | 5995 | { |
520babc7 JL |
5996 | if (TARGET_64BIT) |
5997 | { | |
8101c928 | 5998 | /* Args grow upward. We can use the generic routines. */ |
af064de5 | 5999 | return std_gimplify_va_arg_expr (valist, type, pre_p, post_p); |
ca5f4364 | 6000 | } |
9dff28ab | 6001 | else /* !TARGET_64BIT */ |
ca5f4364 | 6002 | { |
8101c928 RH |
6003 | tree ptr = build_pointer_type (type); |
6004 | tree valist_type; | |
6005 | tree t, u; | |
6006 | unsigned int size, ofs; | |
af064de5 | 6007 | bool indirect; |
ca5f4364 | 6008 | |
af064de5 | 6009 | indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0); |
8101c928 | 6010 | if (indirect) |
9dff28ab | 6011 | { |
8101c928 RH |
6012 | type = ptr; |
6013 | ptr = build_pointer_type (type); | |
ca5f4364 | 6014 | } |
8101c928 RH |
6015 | size = int_size_in_bytes (type); |
6016 | valist_type = TREE_TYPE (valist); | |
9dff28ab | 6017 | |
8101c928 | 6018 | /* Args grow down. Not handled by generic routines. */ |
9dff28ab | 6019 | |
5be014d5 AP |
6020 | u = fold_convert (sizetype, size_in_bytes (type)); |
6021 | u = fold_build1 (NEGATE_EXPR, sizetype, u); | |
6022 | t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u); | |
9dff28ab | 6023 | |
8101c928 RH |
6024 | /* Copied from va-pa.h, but we probably don't need to align to |
6025 | word size, since we generate and preserve that invariant. */ | |
5be014d5 AP |
6026 | u = size_int (size > 4 ? -8 : -4); |
6027 | t = fold_convert (sizetype, t); | |
6028 | t = build2 (BIT_AND_EXPR, sizetype, t, u); | |
6029 | t = fold_convert (valist_type, t); | |
8101c928 | 6030 | |
66863d89 | 6031 | t = build2 (MODIFY_EXPR, valist_type, valist, t); |
ca5f4364 | 6032 | |
8101c928 RH |
6033 | ofs = (8 - size) % 4; |
6034 | if (ofs != 0) | |
6035 | { | |
5be014d5 AP |
6036 | u = size_int (ofs); |
6037 | t = build2 (POINTER_PLUS_EXPR, valist_type, t, u); | |
9dff28ab | 6038 | } |
ca5f4364 | 6039 | |
8101c928 | 6040 | t = fold_convert (ptr, t); |
d6e9821f | 6041 | t = build_va_arg_indirect_ref (t); |
ca5f4364 | 6042 | |
8101c928 | 6043 | if (indirect) |
d6e9821f | 6044 | t = build_va_arg_indirect_ref (t); |
ca5f4364 | 6045 | |
8101c928 RH |
6046 | return t; |
6047 | } | |
6048 | } | |
ca5f4364 | 6049 | |
83c32f2e JDA |
6050 | /* True if MODE is valid for the target. By "valid", we mean able to |
6051 | be manipulated in non-trivial ways. In particular, this means all | |
6052 | the arithmetic is supported. | |
6053 | ||
6054 | Currently, TImode is not valid as the HP 64-bit runtime documentation | |
6055 | doesn't document the alignment and calling conventions for this type. | |
6056 | Thus, we return false when PRECISION is 2 * BITS_PER_WORD and | |
6057 | 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */ | |
6058 | ||
6059 | static bool | |
6060 | pa_scalar_mode_supported_p (enum machine_mode mode) | |
6061 | { | |
6062 | int precision = GET_MODE_PRECISION (mode); | |
6063 | ||
6064 | switch (GET_MODE_CLASS (mode)) | |
6065 | { | |
6066 | case MODE_PARTIAL_INT: | |
6067 | case MODE_INT: | |
6068 | if (precision == CHAR_TYPE_SIZE) | |
6069 | return true; | |
6070 | if (precision == SHORT_TYPE_SIZE) | |
6071 | return true; | |
6072 | if (precision == INT_TYPE_SIZE) | |
6073 | return true; | |
6074 | if (precision == LONG_TYPE_SIZE) | |
6075 | return true; | |
6076 | if (precision == LONG_LONG_TYPE_SIZE) | |
6077 | return true; | |
6078 | return false; | |
6079 | ||
6080 | case MODE_FLOAT: | |
6081 | if (precision == FLOAT_TYPE_SIZE) | |
6082 | return true; | |
6083 | if (precision == DOUBLE_TYPE_SIZE) | |
6084 | return true; | |
6085 | if (precision == LONG_DOUBLE_TYPE_SIZE) | |
6086 | return true; | |
6087 | return false; | |
6088 | ||
70c1d012 JDA |
6089 | case MODE_DECIMAL_FLOAT: |
6090 | return false; | |
6091 | ||
83c32f2e JDA |
6092 | default: |
6093 | gcc_unreachable (); | |
6094 | } | |
6095 | } | |
6096 | ||
23f6f34f TG |
6097 | /* This routine handles all the normal conditional branch sequences we |
6098 | might need to generate. It handles compare immediate vs compare | |
6099 | register, nullification of delay slots, varying length branches, | |
d2364a74 | 6100 | negated branches, and all combinations of the above. It returns the |
23f6f34f | 6101 | output appropriate to emit the branch corresponding to all given |
d2364a74 JL |
6102 | parameters. */ |
6103 | ||
519104fe | 6104 | const char * |
16d74a3c | 6105 | output_cbranch (rtx *operands, int negated, rtx insn) |
b1a275e1 | 6106 | { |
d2364a74 JL |
6107 | static char buf[100]; |
6108 | int useskip = 0; | |
16d74a3c JDA |
6109 | int nullify = INSN_ANNULLED_BRANCH_P (insn); |
6110 | int length = get_attr_length (insn); | |
6111 | int xdelay; | |
d2364a74 | 6112 | |
112cdef5 | 6113 | /* A conditional branch to the following instruction (e.g. the delay slot) |
02a57c73 JDA |
6114 | is asking for a disaster. This can happen when not optimizing and |
6115 | when jump optimization fails. | |
b1a275e1 | 6116 | |
7772f0a9 JDA |
6117 | While it is usually safe to emit nothing, this can fail if the |
6118 | preceding instruction is a nullified branch with an empty delay | |
6119 | slot and the same branch target as this branch. We could check | |
6120 | for this but jump optimization should eliminate nop jumps. It | |
6121 | is always safe to emit a nop. */ | |
02a57c73 JDA |
6122 | if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn)) |
6123 | return "nop"; | |
23f6f34f | 6124 | |
ae2ea719 JDA |
6125 | /* The doubleword form of the cmpib instruction doesn't have the LEU |
6126 | and GTU conditions while the cmpb instruction does. Since we accept | |
6127 | zero for cmpb, we must ensure that we use cmpb for the comparison. */ | |
6128 | if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx) | |
6129 | operands[2] = gen_rtx_REG (DImode, 0); | |
9972f30d SE |
6130 | if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx) |
6131 | operands[1] = gen_rtx_REG (DImode, 0); | |
ae2ea719 | 6132 | |
b9821af8 JL |
6133 | /* If this is a long branch with its delay slot unfilled, set `nullify' |
6134 | as it can nullify the delay slot and save a nop. */ | |
a1b36964 | 6135 | if (length == 8 && dbr_sequence_length () == 0) |
b9821af8 JL |
6136 | nullify = 1; |
6137 | ||
6138 | /* If this is a short forward conditional branch which did not get | |
6139 | its delay slot filled, the delay slot can still be nullified. */ | |
a1b36964 | 6140 | if (! nullify && length == 4 && dbr_sequence_length () == 0) |
b9821af8 JL |
6141 | nullify = forward_branch_p (insn); |
6142 | ||
23f6f34f | 6143 | /* A forward branch over a single nullified insn can be done with a |
d2364a74 JL |
6144 | comclr instruction. This avoids a single cycle penalty due to |
6145 | mis-predicted branch if we fall through (branch not taken). */ | |
a1b36964 | 6146 | if (length == 4 |
b9821af8 | 6147 | && next_real_insn (insn) != 0 |
a1b36964 | 6148 | && get_attr_length (next_real_insn (insn)) == 4 |
b9821af8 | 6149 | && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn)) |
d2364a74 JL |
6150 | && nullify) |
6151 | useskip = 1; | |
6152 | ||
6153 | switch (length) | |
6154 | { | |
b9821af8 JL |
6155 | /* All short conditional branches except backwards with an unfilled |
6156 | delay slot. */ | |
a1b36964 | 6157 | case 4: |
d2364a74 | 6158 | if (useskip) |
f38b27c7 | 6159 | strcpy (buf, "{com%I2clr,|cmp%I2clr,}"); |
d2364a74 | 6160 | else |
f38b27c7 | 6161 | strcpy (buf, "{com%I2b,|cmp%I2b,}"); |
520babc7 JL |
6162 | if (GET_MODE (operands[1]) == DImode) |
6163 | strcat (buf, "*"); | |
d2364a74 JL |
6164 | if (negated) |
6165 | strcat (buf, "%B3"); | |
6166 | else | |
6167 | strcat (buf, "%S3"); | |
6168 | if (useskip) | |
3b5e5fb3 | 6169 | strcat (buf, " %2,%r1,%%r0"); |
d2364a74 | 6170 | else if (nullify) |
dcaeffef | 6171 | strcat (buf, ",n %2,%r1,%0"); |
23f6f34f | 6172 | else |
dcaeffef | 6173 | strcat (buf, " %2,%r1,%0"); |
d2364a74 JL |
6174 | break; |
6175 | ||
5bdc5878 | 6176 | /* All long conditionals. Note a short backward branch with an |
b9821af8 JL |
6177 | unfilled delay slot is treated just like a long backward branch |
6178 | with an unfilled delay slot. */ | |
a1b36964 | 6179 | case 8: |
b9821af8 | 6180 | /* Handle weird backwards branch with a filled delay slot |
16d74a3c | 6181 | which is nullified. */ |
b9821af8 JL |
6182 | if (dbr_sequence_length () != 0 |
6183 | && ! forward_branch_p (insn) | |
6184 | && nullify) | |
6185 | { | |
f38b27c7 | 6186 | strcpy (buf, "{com%I2b,|cmp%I2b,}"); |
520babc7 JL |
6187 | if (GET_MODE (operands[1]) == DImode) |
6188 | strcat (buf, "*"); | |
b9821af8 JL |
6189 | if (negated) |
6190 | strcat (buf, "%S3"); | |
6191 | else | |
6192 | strcat (buf, "%B3"); | |
3b5e5fb3 | 6193 | strcat (buf, ",n %2,%r1,.+12\n\tb %0"); |
b9821af8 | 6194 | } |
923f781d JL |
6195 | /* Handle short backwards branch with an unfilled delay slot. |
6196 | Using a comb;nop rather than comiclr;bl saves 1 cycle for both | |
6197 | taken and untaken branches. */ | |
6198 | else if (dbr_sequence_length () == 0 | |
6199 | && ! forward_branch_p (insn) | |
9d98a694 AO |
6200 | && INSN_ADDRESSES_SET_P () |
6201 | && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn))) | |
6202 | - INSN_ADDRESSES (INSN_UID (insn)) - 8)) | |
923f781d | 6203 | { |
f38b27c7 | 6204 | strcpy (buf, "{com%I2b,|cmp%I2b,}"); |
520babc7 JL |
6205 | if (GET_MODE (operands[1]) == DImode) |
6206 | strcat (buf, "*"); | |
923f781d | 6207 | if (negated) |
dcaeffef | 6208 | strcat (buf, "%B3 %2,%r1,%0%#"); |
923f781d | 6209 | else |
dcaeffef | 6210 | strcat (buf, "%S3 %2,%r1,%0%#"); |
923f781d | 6211 | } |
d2364a74 | 6212 | else |
b9821af8 | 6213 | { |
f38b27c7 | 6214 | strcpy (buf, "{com%I2clr,|cmp%I2clr,}"); |
520babc7 JL |
6215 | if (GET_MODE (operands[1]) == DImode) |
6216 | strcat (buf, "*"); | |
b9821af8 JL |
6217 | if (negated) |
6218 | strcat (buf, "%S3"); | |
6219 | else | |
6220 | strcat (buf, "%B3"); | |
6221 | if (nullify) | |
3b5e5fb3 | 6222 | strcat (buf, " %2,%r1,%%r0\n\tb,n %0"); |
b9821af8 | 6223 | else |
3b5e5fb3 | 6224 | strcat (buf, " %2,%r1,%%r0\n\tb %0"); |
b9821af8 | 6225 | } |
d2364a74 JL |
6226 | break; |
6227 | ||
16d74a3c | 6228 | default: |
685d0e07 | 6229 | /* The reversed conditional branch must branch over one additional |
16d74a3c JDA |
6230 | instruction if the delay slot is filled and needs to be extracted |
6231 | by output_lbranch. If the delay slot is empty or this is a | |
6232 | nullified forward branch, the instruction after the reversed | |
6233 | condition branch must be nullified. */ | |
6234 | if (dbr_sequence_length () == 0 | |
6235 | || (nullify && forward_branch_p (insn))) | |
6236 | { | |
6237 | nullify = 1; | |
6238 | xdelay = 0; | |
6239 | operands[4] = GEN_INT (length); | |
6240 | } | |
6241 | else | |
6242 | { | |
6243 | xdelay = 1; | |
6244 | operands[4] = GEN_INT (length + 4); | |
6245 | } | |
4bcb9e3f JL |
6246 | |
6247 | /* Create a reversed conditional branch which branches around | |
6248 | the following insns. */ | |
685d0e07 JDA |
6249 | if (GET_MODE (operands[1]) != DImode) |
6250 | { | |
6251 | if (nullify) | |
6252 | { | |
6253 | if (negated) | |
6254 | strcpy (buf, | |
6255 | "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}"); | |
6256 | else | |
6257 | strcpy (buf, | |
6258 | "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}"); | |
6259 | } | |
6260 | else | |
6261 | { | |
6262 | if (negated) | |
6263 | strcpy (buf, | |
6264 | "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}"); | |
6265 | else | |
6266 | strcpy (buf, | |
6267 | "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}"); | |
6268 | } | |
6269 | } | |
4bcb9e3f | 6270 | else |
520babc7 | 6271 | { |
685d0e07 JDA |
6272 | if (nullify) |
6273 | { | |
6274 | if (negated) | |
6275 | strcpy (buf, | |
6276 | "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}"); | |
6277 | else | |
6278 | strcpy (buf, | |
6279 | "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}"); | |
6280 | } | |
520babc7 | 6281 | else |
685d0e07 JDA |
6282 | { |
6283 | if (negated) | |
6284 | strcpy (buf, | |
6285 | "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}"); | |
6286 | else | |
6287 | strcpy (buf, | |
6288 | "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}"); | |
6289 | } | |
520babc7 | 6290 | } |
4bcb9e3f | 6291 | |
16d74a3c JDA |
6292 | output_asm_insn (buf, operands); |
6293 | return output_lbranch (operands[0], insn, xdelay); | |
685d0e07 JDA |
6294 | } |
6295 | return buf; | |
6296 | } | |
4bcb9e3f | 6297 | |
16d74a3c JDA |
6298 | /* This routine handles output of long unconditional branches that |
6299 | exceed the maximum range of a simple branch instruction. Since | |
6300 | we don't have a register available for the branch, we save register | |
6301 | %r1 in the frame marker, load the branch destination DEST into %r1, | |
6302 | execute the branch, and restore %r1 in the delay slot of the branch. | |
6303 | ||
6304 | Since long branches may have an insn in the delay slot and the | |
6305 | delay slot is used to restore %r1, we in general need to extract | |
6306 | this insn and execute it before the branch. However, to facilitate | |
6307 | use of this function by conditional branches, we also provide an | |
6308 | option to not extract the delay insn so that it will be emitted | |
6309 | after the long branch. So, if there is an insn in the delay slot, | |
6310 | it is extracted if XDELAY is nonzero. | |
6311 | ||
6312 | The lengths of the various long-branch sequences are 20, 16 and 24 | |
6313 | bytes for the portable runtime, non-PIC and PIC cases, respectively. */ | |
4bcb9e3f | 6314 | |
685d0e07 | 6315 | const char * |
16d74a3c | 6316 | output_lbranch (rtx dest, rtx insn, int xdelay) |
685d0e07 JDA |
6317 | { |
6318 | rtx xoperands[2]; | |
6319 | ||
6320 | xoperands[0] = dest; | |
4bcb9e3f | 6321 | |
685d0e07 | 6322 | /* First, free up the delay slot. */ |
16d74a3c | 6323 | if (xdelay && dbr_sequence_length () != 0) |
685d0e07 JDA |
6324 | { |
6325 | /* We can't handle a jump in the delay slot. */ | |
144d51f9 | 6326 | gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN); |
4bcb9e3f | 6327 | |
685d0e07 | 6328 | final_scan_insn (NEXT_INSN (insn), asm_out_file, |
c9d691e9 | 6329 | optimize, 0, NULL); |
4bcb9e3f | 6330 | |
685d0e07 | 6331 | /* Now delete the delay insn. */ |
a38e7aa5 | 6332 | SET_INSN_DELETED (NEXT_INSN (insn)); |
685d0e07 | 6333 | } |
4bcb9e3f | 6334 | |
685d0e07 JDA |
6335 | /* Output an insn to save %r1. The runtime documentation doesn't |
6336 | specify whether the "Clean Up" slot in the callers frame can | |
6337 | be clobbered by the callee. It isn't copied by HP's builtin | |
6338 | alloca, so this suggests that it can be clobbered if necessary. | |
6339 | The "Static Link" location is copied by HP builtin alloca, so | |
6340 | we avoid using it. Using the cleanup slot might be a problem | |
6341 | if we have to interoperate with languages that pass cleanup | |
6342 | information. However, it should be possible to handle these | |
6343 | situations with GCC's asm feature. | |
6344 | ||
6345 | The "Current RP" slot is reserved for the called procedure, so | |
6346 | we try to use it when we don't have a frame of our own. It's | |
6347 | rather unlikely that we won't have a frame when we need to emit | |
6348 | a very long branch. | |
6349 | ||
6350 | Really the way to go long term is a register scavenger; goto | |
6351 | the target of the jump and find a register which we can use | |
6352 | as a scratch to hold the value in %r1. Then, we wouldn't have | |
6353 | to free up the delay slot or clobber a slot that may be needed | |
6354 | for other purposes. */ | |
6355 | if (TARGET_64BIT) | |
6356 | { | |
6fb5fa3c | 6357 | if (actual_fsize == 0 && !df_regs_ever_live_p (2)) |
685d0e07 JDA |
6358 | /* Use the return pointer slot in the frame marker. */ |
6359 | output_asm_insn ("std %%r1,-16(%%r30)", xoperands); | |
6360 | else | |
6361 | /* Use the slot at -40 in the frame marker since HP builtin | |
6362 | alloca doesn't copy it. */ | |
6363 | output_asm_insn ("std %%r1,-40(%%r30)", xoperands); | |
6364 | } | |
6365 | else | |
6366 | { | |
6fb5fa3c | 6367 | if (actual_fsize == 0 && !df_regs_ever_live_p (2)) |
685d0e07 JDA |
6368 | /* Use the return pointer slot in the frame marker. */ |
6369 | output_asm_insn ("stw %%r1,-20(%%r30)", xoperands); | |
6370 | else | |
6371 | /* Use the "Clean Up" slot in the frame marker. In GCC, | |
6372 | the only other use of this location is for copying a | |
6373 | floating point double argument from a floating-point | |
6374 | register to two general registers. The copy is done | |
aa7f1eb1 | 6375 | as an "atomic" operation when outputting a call, so it |
685d0e07 JDA |
6376 | won't interfere with our using the location here. */ |
6377 | output_asm_insn ("stw %%r1,-12(%%r30)", xoperands); | |
6378 | } | |
3d9268b6 | 6379 | |
5fad1c24 JDA |
6380 | if (TARGET_PORTABLE_RUNTIME) |
6381 | { | |
6382 | output_asm_insn ("ldil L'%0,%%r1", xoperands); | |
6383 | output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands); | |
6384 | output_asm_insn ("bv %%r0(%%r1)", xoperands); | |
6385 | } | |
6386 | else if (flag_pic) | |
685d0e07 JDA |
6387 | { |
6388 | output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands); | |
6389 | if (TARGET_SOM || !TARGET_GAS) | |
6390 | { | |
6391 | xoperands[1] = gen_label_rtx (); | |
6392 | output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands); | |
ecc418c4 JDA |
6393 | targetm.asm_out.internal_label (asm_out_file, "L", |
6394 | CODE_LABEL_NUMBER (xoperands[1])); | |
685d0e07 | 6395 | output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands); |
4bcb9e3f | 6396 | } |
685d0e07 JDA |
6397 | else |
6398 | { | |
6399 | output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands); | |
6400 | output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands); | |
6401 | } | |
6402 | output_asm_insn ("bv %%r0(%%r1)", xoperands); | |
6403 | } | |
6404 | else | |
6405 | /* Now output a very long branch to the original target. */ | |
6406 | output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands); | |
4bcb9e3f | 6407 | |
685d0e07 JDA |
6408 | /* Now restore the value of %r1 in the delay slot. */ |
6409 | if (TARGET_64BIT) | |
6410 | { | |
6fb5fa3c | 6411 | if (actual_fsize == 0 && !df_regs_ever_live_p (2)) |
685d0e07 JDA |
6412 | return "ldd -16(%%r30),%%r1"; |
6413 | else | |
6414 | return "ldd -40(%%r30),%%r1"; | |
6415 | } | |
6416 | else | |
6417 | { | |
6fb5fa3c | 6418 | if (actual_fsize == 0 && !df_regs_ever_live_p (2)) |
685d0e07 JDA |
6419 | return "ldw -20(%%r30),%%r1"; |
6420 | else | |
6421 | return "ldw -12(%%r30),%%r1"; | |
b9821af8 | 6422 | } |
d2364a74 JL |
6423 | } |
6424 | ||
23f6f34f | 6425 | /* This routine handles all the branch-on-bit conditional branch sequences we |
d2364a74 JL |
6426 | might need to generate. It handles nullification of delay slots, |
6427 | varying length branches, negated branches and all combinations of the | |
6428 | above. it returns the appropriate output template to emit the branch. */ | |
6429 | ||
519104fe | 6430 | const char * |
16d74a3c | 6431 | output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which) |
b1a275e1 | 6432 | { |
d2364a74 JL |
6433 | static char buf[100]; |
6434 | int useskip = 0; | |
16d74a3c JDA |
6435 | int nullify = INSN_ANNULLED_BRANCH_P (insn); |
6436 | int length = get_attr_length (insn); | |
6437 | int xdelay; | |
d2364a74 | 6438 | |
112cdef5 | 6439 | /* A conditional branch to the following instruction (e.g. the delay slot) is |
b1a275e1 | 6440 | asking for a disaster. I do not think this can happen as this pattern |
23f6f34f | 6441 | is only used when optimizing; jump optimization should eliminate the |
b1a275e1 | 6442 | jump. But be prepared just in case. */ |
23f6f34f | 6443 | |
02a57c73 JDA |
6444 | if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn)) |
6445 | return "nop"; | |
23f6f34f | 6446 | |
b9821af8 JL |
6447 | /* If this is a long branch with its delay slot unfilled, set `nullify' |
6448 | as it can nullify the delay slot and save a nop. */ | |
a1b36964 | 6449 | if (length == 8 && dbr_sequence_length () == 0) |
b9821af8 JL |
6450 | nullify = 1; |
6451 | ||
6452 | /* If this is a short forward conditional branch which did not get | |
6453 | its delay slot filled, the delay slot can still be nullified. */ | |
a1b36964 | 6454 | if (! nullify && length == 4 && dbr_sequence_length () == 0) |
b9821af8 JL |
6455 | nullify = forward_branch_p (insn); |
6456 | ||
23f6f34f | 6457 | /* A forward branch over a single nullified insn can be done with a |
d2364a74 JL |
6458 | extrs instruction. This avoids a single cycle penalty due to |
6459 | mis-predicted branch if we fall through (branch not taken). */ | |
6460 | ||
a1b36964 | 6461 | if (length == 4 |
b9821af8 | 6462 | && next_real_insn (insn) != 0 |
a1b36964 | 6463 | && get_attr_length (next_real_insn (insn)) == 4 |
b9821af8 | 6464 | && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn)) |
d2364a74 JL |
6465 | && nullify) |
6466 | useskip = 1; | |
6467 | ||
6468 | switch (length) | |
6469 | { | |
6470 | ||
b9821af8 JL |
6471 | /* All short conditional branches except backwards with an unfilled |
6472 | delay slot. */ | |
a1b36964 | 6473 | case 4: |
d2364a74 | 6474 | if (useskip) |
f38b27c7 | 6475 | strcpy (buf, "{extrs,|extrw,s,}"); |
23f6f34f | 6476 | else |
d2364a74 | 6477 | strcpy (buf, "bb,"); |
520babc7 JL |
6478 | if (useskip && GET_MODE (operands[0]) == DImode) |
6479 | strcpy (buf, "extrd,s,*"); | |
6480 | else if (GET_MODE (operands[0]) == DImode) | |
6481 | strcpy (buf, "bb,*"); | |
d2364a74 JL |
6482 | if ((which == 0 && negated) |
6483 | || (which == 1 && ! negated)) | |
6484 | strcat (buf, ">="); | |
6485 | else | |
6486 | strcat (buf, "<"); | |
6487 | if (useskip) | |
3b5e5fb3 | 6488 | strcat (buf, " %0,%1,1,%%r0"); |
d2364a74 JL |
6489 | else if (nullify && negated) |
6490 | strcat (buf, ",n %0,%1,%3"); | |
6491 | else if (nullify && ! negated) | |
6492 | strcat (buf, ",n %0,%1,%2"); | |
6493 | else if (! nullify && negated) | |
b9821af8 | 6494 | strcat (buf, "%0,%1,%3"); |
d2364a74 | 6495 | else if (! nullify && ! negated) |
b9821af8 | 6496 | strcat (buf, " %0,%1,%2"); |
d2364a74 JL |
6497 | break; |
6498 | ||
5bdc5878 | 6499 | /* All long conditionals. Note a short backward branch with an |
b9821af8 JL |
6500 | unfilled delay slot is treated just like a long backward branch |
6501 | with an unfilled delay slot. */ | |
a1b36964 | 6502 | case 8: |
b9821af8 | 6503 | /* Handle weird backwards branch with a filled delay slot |
16d74a3c | 6504 | which is nullified. */ |
b9821af8 JL |
6505 | if (dbr_sequence_length () != 0 |
6506 | && ! forward_branch_p (insn) | |
6507 | && nullify) | |
6508 | { | |
6509 | strcpy (buf, "bb,"); | |
520babc7 JL |
6510 | if (GET_MODE (operands[0]) == DImode) |
6511 | strcat (buf, "*"); | |
b9821af8 JL |
6512 | if ((which == 0 && negated) |
6513 | || (which == 1 && ! negated)) | |
6514 | strcat (buf, "<"); | |
6515 | else | |
6516 | strcat (buf, ">="); | |
6517 | if (negated) | |
3b5e5fb3 | 6518 | strcat (buf, ",n %0,%1,.+12\n\tb %3"); |
b9821af8 | 6519 | else |
3b5e5fb3 | 6520 | strcat (buf, ",n %0,%1,.+12\n\tb %2"); |
b9821af8 | 6521 | } |
923f781d JL |
6522 | /* Handle short backwards branch with an unfilled delay slot. |
6523 | Using a bb;nop rather than extrs;bl saves 1 cycle for both | |
6524 | taken and untaken branches. */ | |
6525 | else if (dbr_sequence_length () == 0 | |
6526 | && ! forward_branch_p (insn) | |
9d98a694 AO |
6527 | && INSN_ADDRESSES_SET_P () |
6528 | && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn))) | |
6529 | - INSN_ADDRESSES (INSN_UID (insn)) - 8)) | |
923f781d JL |
6530 | { |
6531 | strcpy (buf, "bb,"); | |
520babc7 JL |
6532 | if (GET_MODE (operands[0]) == DImode) |
6533 | strcat (buf, "*"); | |
923f781d JL |
6534 | if ((which == 0 && negated) |
6535 | || (which == 1 && ! negated)) | |
6536 | strcat (buf, ">="); | |
6537 | else | |
6538 | strcat (buf, "<"); | |
6539 | if (negated) | |
6540 | strcat (buf, " %0,%1,%3%#"); | |
6541 | else | |
6542 | strcat (buf, " %0,%1,%2%#"); | |
6543 | } | |
d2364a74 | 6544 | else |
b9821af8 | 6545 | { |
520babc7 JL |
6546 | if (GET_MODE (operands[0]) == DImode) |
6547 | strcpy (buf, "extrd,s,*"); | |
16d74a3c JDA |
6548 | else |
6549 | strcpy (buf, "{extrs,|extrw,s,}"); | |
b9821af8 JL |
6550 | if ((which == 0 && negated) |
6551 | || (which == 1 && ! negated)) | |
6552 | strcat (buf, "<"); | |
6553 | else | |
6554 | strcat (buf, ">="); | |
6555 | if (nullify && negated) | |
55abf18a | 6556 | strcat (buf, " %0,%1,1,%%r0\n\tb,n %3"); |
b9821af8 | 6557 | else if (nullify && ! negated) |
55abf18a | 6558 | strcat (buf, " %0,%1,1,%%r0\n\tb,n %2"); |
b9821af8 | 6559 | else if (negated) |
3b5e5fb3 | 6560 | strcat (buf, " %0,%1,1,%%r0\n\tb %3"); |
23f6f34f | 6561 | else |
3b5e5fb3 | 6562 | strcat (buf, " %0,%1,1,%%r0\n\tb %2"); |
b9821af8 | 6563 | } |
d2364a74 JL |
6564 | break; |
6565 | ||
6566 | default: | |
16d74a3c JDA |
6567 | /* The reversed conditional branch must branch over one additional |
6568 | instruction if the delay slot is filled and needs to be extracted | |
6569 | by output_lbranch. If the delay slot is empty or this is a | |
6570 | nullified forward branch, the instruction after the reversed | |
6571 | condition branch must be nullified. */ | |
6572 | if (dbr_sequence_length () == 0 | |
6573 | || (nullify && forward_branch_p (insn))) | |
6574 | { | |
6575 | nullify = 1; | |
6576 | xdelay = 0; | |
8370f6fa | 6577 | operands[4] = GEN_INT (length); |
16d74a3c JDA |
6578 | } |
6579 | else | |
6580 | { | |
6581 | xdelay = 1; | |
8370f6fa | 6582 | operands[4] = GEN_INT (length + 4); |
16d74a3c JDA |
6583 | } |
6584 | ||
6585 | if (GET_MODE (operands[0]) == DImode) | |
8370f6fa | 6586 | strcpy (buf, "bb,*"); |
16d74a3c | 6587 | else |
8370f6fa | 6588 | strcpy (buf, "bb,"); |
16d74a3c JDA |
6589 | if ((which == 0 && negated) |
6590 | || (which == 1 && !negated)) | |
8370f6fa | 6591 | strcat (buf, "<"); |
16d74a3c | 6592 | else |
8370f6fa | 6593 | strcat (buf, ">="); |
16d74a3c | 6594 | if (nullify) |
8370f6fa | 6595 | strcat (buf, ",n %0,%1,.+%4"); |
16d74a3c | 6596 | else |
8370f6fa | 6597 | strcat (buf, " %0,%1,.+%4"); |
16d74a3c JDA |
6598 | output_asm_insn (buf, operands); |
6599 | return output_lbranch (negated ? operands[3] : operands[2], | |
6600 | insn, xdelay); | |
b9821af8 | 6601 | } |
d2364a74 JL |
6602 | return buf; |
6603 | } | |
6604 | ||
6a73009d JL |
6605 | /* This routine handles all the branch-on-variable-bit conditional branch |
6606 | sequences we might need to generate. It handles nullification of delay | |
6607 | slots, varying length branches, negated branches and all combinations | |
6608 | of the above. it returns the appropriate output template to emit the | |
6609 | branch. */ | |
6610 | ||
519104fe | 6611 | const char * |
16d74a3c | 6612 | output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which) |
6a73009d JL |
6613 | { |
6614 | static char buf[100]; | |
6615 | int useskip = 0; | |
16d74a3c JDA |
6616 | int nullify = INSN_ANNULLED_BRANCH_P (insn); |
6617 | int length = get_attr_length (insn); | |
6618 | int xdelay; | |
6a73009d | 6619 | |
112cdef5 | 6620 | /* A conditional branch to the following instruction (e.g. the delay slot) is |
6a73009d JL |
6621 | asking for a disaster. I do not think this can happen as this pattern |
6622 | is only used when optimizing; jump optimization should eliminate the | |
6623 | jump. But be prepared just in case. */ | |
6624 | ||
02a57c73 JDA |
6625 | if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn)) |
6626 | return "nop"; | |
6a73009d JL |
6627 | |
6628 | /* If this is a long branch with its delay slot unfilled, set `nullify' | |
6629 | as it can nullify the delay slot and save a nop. */ | |
6630 | if (length == 8 && dbr_sequence_length () == 0) | |
6631 | nullify = 1; | |
6632 | ||
6633 | /* If this is a short forward conditional branch which did not get | |
6634 | its delay slot filled, the delay slot can still be nullified. */ | |
6635 | if (! nullify && length == 4 && dbr_sequence_length () == 0) | |
6636 | nullify = forward_branch_p (insn); | |
6637 | ||
6638 | /* A forward branch over a single nullified insn can be done with a | |
6639 | extrs instruction. This avoids a single cycle penalty due to | |
6640 | mis-predicted branch if we fall through (branch not taken). */ | |
6641 | ||
6642 | if (length == 4 | |
6643 | && next_real_insn (insn) != 0 | |
6644 | && get_attr_length (next_real_insn (insn)) == 4 | |
6645 | && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn)) | |
6646 | && nullify) | |
6647 | useskip = 1; | |
6648 | ||
6649 | switch (length) | |
6650 | { | |
6651 | ||
6652 | /* All short conditional branches except backwards with an unfilled | |
6653 | delay slot. */ | |
6654 | case 4: | |
6655 | if (useskip) | |
f38b27c7 | 6656 | strcpy (buf, "{vextrs,|extrw,s,}"); |
6a73009d | 6657 | else |
f38b27c7 | 6658 | strcpy (buf, "{bvb,|bb,}"); |
520babc7 | 6659 | if (useskip && GET_MODE (operands[0]) == DImode) |
e72ed000 | 6660 | strcpy (buf, "extrd,s,*"); |
520babc7 JL |
6661 | else if (GET_MODE (operands[0]) == DImode) |
6662 | strcpy (buf, "bb,*"); | |
6a73009d JL |
6663 | if ((which == 0 && negated) |
6664 | || (which == 1 && ! negated)) | |
6665 | strcat (buf, ">="); | |
6666 | else | |
6667 | strcat (buf, "<"); | |
6668 | if (useskip) | |
f38b27c7 | 6669 | strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}"); |
6a73009d | 6670 | else if (nullify && negated) |
f38b27c7 | 6671 | strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}"); |
6a73009d | 6672 | else if (nullify && ! negated) |
f38b27c7 | 6673 | strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}"); |
6a73009d | 6674 | else if (! nullify && negated) |
f38b27c7 | 6675 | strcat (buf, "{%0,%3|%0,%%sar,%3}"); |
6a73009d | 6676 | else if (! nullify && ! negated) |
f38b27c7 | 6677 | strcat (buf, "{ %0,%2| %0,%%sar,%2}"); |
6a73009d JL |
6678 | break; |
6679 | ||
5bdc5878 | 6680 | /* All long conditionals. Note a short backward branch with an |
6a73009d JL |
6681 | unfilled delay slot is treated just like a long backward branch |
6682 | with an unfilled delay slot. */ | |
6683 | case 8: | |
6684 | /* Handle weird backwards branch with a filled delay slot | |
16d74a3c | 6685 | which is nullified. */ |
6a73009d JL |
6686 | if (dbr_sequence_length () != 0 |
6687 | && ! forward_branch_p (insn) | |
6688 | && nullify) | |
6689 | { | |
f38b27c7 | 6690 | strcpy (buf, "{bvb,|bb,}"); |
520babc7 JL |
6691 | if (GET_MODE (operands[0]) == DImode) |
6692 | strcat (buf, "*"); | |
6a73009d JL |
6693 | if ((which == 0 && negated) |
6694 | || (which == 1 && ! negated)) | |
6695 | strcat (buf, "<"); | |
6696 | else | |
6697 | strcat (buf, ">="); | |
6698 | if (negated) | |
f38b27c7 | 6699 | strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}"); |
6a73009d | 6700 | else |
f38b27c7 | 6701 | strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}"); |
6a73009d JL |
6702 | } |
6703 | /* Handle short backwards branch with an unfilled delay slot. | |
6704 | Using a bb;nop rather than extrs;bl saves 1 cycle for both | |
6705 | taken and untaken branches. */ | |
6706 | else if (dbr_sequence_length () == 0 | |
6707 | && ! forward_branch_p (insn) | |
9d98a694 AO |
6708 | && INSN_ADDRESSES_SET_P () |
6709 | && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn))) | |
6710 | - INSN_ADDRESSES (INSN_UID (insn)) - 8)) | |
6a73009d | 6711 | { |
f38b27c7 | 6712 | strcpy (buf, "{bvb,|bb,}"); |
520babc7 JL |
6713 | if (GET_MODE (operands[0]) == DImode) |
6714 | strcat (buf, "*"); | |
6a73009d JL |
6715 | if ((which == 0 && negated) |
6716 | || (which == 1 && ! negated)) | |
6717 | strcat (buf, ">="); | |
6718 | else | |
6719 | strcat (buf, "<"); | |
6720 | if (negated) | |
f38b27c7 | 6721 | strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}"); |
6a73009d | 6722 | else |
f38b27c7 | 6723 | strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}"); |
6a73009d JL |
6724 | } |
6725 | else | |
6726 | { | |
f38b27c7 | 6727 | strcpy (buf, "{vextrs,|extrw,s,}"); |
520babc7 JL |
6728 | if (GET_MODE (operands[0]) == DImode) |
6729 | strcpy (buf, "extrd,s,*"); | |
6a73009d JL |
6730 | if ((which == 0 && negated) |
6731 | || (which == 1 && ! negated)) | |
6732 | strcat (buf, "<"); | |
6733 | else | |
6734 | strcat (buf, ">="); | |
6735 | if (nullify && negated) | |
f38b27c7 | 6736 | strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}"); |
6a73009d | 6737 | else if (nullify && ! negated) |
f38b27c7 | 6738 | strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}"); |
6a73009d | 6739 | else if (negated) |
f38b27c7 | 6740 | strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}"); |
6a73009d | 6741 | else |
f38b27c7 | 6742 | strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}"); |
6a73009d JL |
6743 | } |
6744 | break; | |
6745 | ||
6746 | default: | |
16d74a3c JDA |
6747 | /* The reversed conditional branch must branch over one additional |
6748 | instruction if the delay slot is filled and needs to be extracted | |
6749 | by output_lbranch. If the delay slot is empty or this is a | |
6750 | nullified forward branch, the instruction after the reversed | |
6751 | condition branch must be nullified. */ | |
6752 | if (dbr_sequence_length () == 0 | |
6753 | || (nullify && forward_branch_p (insn))) | |
6754 | { | |
6755 | nullify = 1; | |
6756 | xdelay = 0; | |
8370f6fa | 6757 | operands[4] = GEN_INT (length); |
16d74a3c JDA |
6758 | } |
6759 | else | |
6760 | { | |
6761 | xdelay = 1; | |
8370f6fa | 6762 | operands[4] = GEN_INT (length + 4); |
16d74a3c JDA |
6763 | } |
6764 | ||
6765 | if (GET_MODE (operands[0]) == DImode) | |
8370f6fa | 6766 | strcpy (buf, "bb,*"); |
16d74a3c | 6767 | else |
8370f6fa | 6768 | strcpy (buf, "{bvb,|bb,}"); |
16d74a3c JDA |
6769 | if ((which == 0 && negated) |
6770 | || (which == 1 && !negated)) | |
8370f6fa | 6771 | strcat (buf, "<"); |
16d74a3c | 6772 | else |
8370f6fa | 6773 | strcat (buf, ">="); |
16d74a3c | 6774 | if (nullify) |
8370f6fa | 6775 | strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}"); |
16d74a3c | 6776 | else |
8370f6fa | 6777 | strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}"); |
16d74a3c JDA |
6778 | output_asm_insn (buf, operands); |
6779 | return output_lbranch (negated ? operands[3] : operands[2], | |
6780 | insn, xdelay); | |
6a73009d JL |
6781 | } |
6782 | return buf; | |
6783 | } | |
6784 | ||
b1a275e1 JL |
6785 | /* Return the output template for emitting a dbra type insn. |
6786 | ||
6787 | Note it may perform some output operations on its own before | |
6788 | returning the final output string. */ | |
519104fe | 6789 | const char * |
b7849684 | 6790 | output_dbra (rtx *operands, rtx insn, int which_alternative) |
b1a275e1 | 6791 | { |
16d74a3c | 6792 | int length = get_attr_length (insn); |
b1a275e1 | 6793 | |
112cdef5 | 6794 | /* A conditional branch to the following instruction (e.g. the delay slot) is |
b1a275e1 JL |
6795 | asking for a disaster. Be prepared! */ |
6796 | ||
02a57c73 | 6797 | if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn)) |
b1a275e1 JL |
6798 | { |
6799 | if (which_alternative == 0) | |
6800 | return "ldo %1(%0),%0"; | |
6801 | else if (which_alternative == 1) | |
6802 | { | |
831c1763 AM |
6803 | output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands); |
6804 | output_asm_insn ("ldw -16(%%r30),%4", operands); | |
d2d28085 | 6805 | output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands); |
f38b27c7 | 6806 | return "{fldws|fldw} -16(%%r30),%0"; |
b1a275e1 JL |
6807 | } |
6808 | else | |
6809 | { | |
6810 | output_asm_insn ("ldw %0,%4", operands); | |
6811 | return "ldo %1(%4),%4\n\tstw %4,%0"; | |
6812 | } | |
6813 | } | |
6814 | ||
6815 | if (which_alternative == 0) | |
6816 | { | |
6817 | int nullify = INSN_ANNULLED_BRANCH_P (insn); | |
16d74a3c | 6818 | int xdelay; |
b1a275e1 JL |
6819 | |
6820 | /* If this is a long branch with its delay slot unfilled, set `nullify' | |
6821 | as it can nullify the delay slot and save a nop. */ | |
a1b36964 | 6822 | if (length == 8 && dbr_sequence_length () == 0) |
b1a275e1 JL |
6823 | nullify = 1; |
6824 | ||
6825 | /* If this is a short forward conditional branch which did not get | |
6826 | its delay slot filled, the delay slot can still be nullified. */ | |
a1b36964 | 6827 | if (! nullify && length == 4 && dbr_sequence_length () == 0) |
b1a275e1 JL |
6828 | nullify = forward_branch_p (insn); |
6829 | ||
144d51f9 | 6830 | switch (length) |
b1a275e1 | 6831 | { |
144d51f9 NS |
6832 | case 4: |
6833 | if (nullify) | |
6834 | return "addib,%C2,n %1,%0,%3"; | |
6835 | else | |
6836 | return "addib,%C2 %1,%0,%3"; | |
6837 | ||
6838 | case 8: | |
23f6f34f | 6839 | /* Handle weird backwards branch with a fulled delay slot |
b1a275e1 JL |
6840 | which is nullified. */ |
6841 | if (dbr_sequence_length () != 0 | |
6842 | && ! forward_branch_p (insn) | |
6843 | && nullify) | |
3b5e5fb3 | 6844 | return "addib,%N2,n %1,%0,.+12\n\tb %3"; |
923f781d JL |
6845 | /* Handle short backwards branch with an unfilled delay slot. |
6846 | Using a addb;nop rather than addi;bl saves 1 cycle for both | |
6847 | taken and untaken branches. */ | |
6848 | else if (dbr_sequence_length () == 0 | |
6849 | && ! forward_branch_p (insn) | |
9d98a694 AO |
6850 | && INSN_ADDRESSES_SET_P () |
6851 | && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn))) | |
6852 | - INSN_ADDRESSES (INSN_UID (insn)) - 8)) | |
923f781d | 6853 | return "addib,%C2 %1,%0,%3%#"; |
23f6f34f TG |
6854 | |
6855 | /* Handle normal cases. */ | |
b1a275e1 | 6856 | if (nullify) |
3b5e5fb3 | 6857 | return "addi,%N2 %1,%0,%0\n\tb,n %3"; |
b1a275e1 | 6858 | else |
3b5e5fb3 | 6859 | return "addi,%N2 %1,%0,%0\n\tb %3"; |
144d51f9 NS |
6860 | |
6861 | default: | |
16d74a3c JDA |
6862 | /* The reversed conditional branch must branch over one additional |
6863 | instruction if the delay slot is filled and needs to be extracted | |
6864 | by output_lbranch. If the delay slot is empty or this is a | |
6865 | nullified forward branch, the instruction after the reversed | |
6866 | condition branch must be nullified. */ | |
6867 | if (dbr_sequence_length () == 0 | |
6868 | || (nullify && forward_branch_p (insn))) | |
6869 | { | |
6870 | nullify = 1; | |
6871 | xdelay = 0; | |
6872 | operands[4] = GEN_INT (length); | |
6873 | } | |
6874 | else | |
6875 | { | |
6876 | xdelay = 1; | |
6877 | operands[4] = GEN_INT (length + 4); | |
6878 | } | |
6879 | ||
6880 | if (nullify) | |
6881 | output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands); | |
6882 | else | |
6883 | output_asm_insn ("addib,%N2 %1,%0,.+%4", operands); | |
6884 | ||
6885 | return output_lbranch (operands[3], insn, xdelay); | |
b1a275e1 | 6886 | } |
144d51f9 | 6887 | |
b1a275e1 JL |
6888 | } |
6889 | /* Deal with gross reload from FP register case. */ | |
6890 | else if (which_alternative == 1) | |
6891 | { | |
6892 | /* Move loop counter from FP register to MEM then into a GR, | |
6893 | increment the GR, store the GR into MEM, and finally reload | |
23f6f34f | 6894 | the FP register from MEM from within the branch's delay slot. */ |
831c1763 AM |
6895 | output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4", |
6896 | operands); | |
d2d28085 | 6897 | output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands); |
16d74a3c | 6898 | if (length == 24) |
f38b27c7 | 6899 | return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0"; |
16d74a3c | 6900 | else if (length == 28) |
f38b27c7 | 6901 | return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0"; |
16d74a3c JDA |
6902 | else |
6903 | { | |
8370f6fa JDA |
6904 | operands[5] = GEN_INT (length - 16); |
6905 | output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands); | |
16d74a3c JDA |
6906 | output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands); |
6907 | return output_lbranch (operands[3], insn, 0); | |
6908 | } | |
b1a275e1 JL |
6909 | } |
6910 | /* Deal with gross reload from memory case. */ | |
6911 | else | |
6912 | { | |
6913 | /* Reload loop counter from memory, the store back to memory | |
71cc389b | 6914 | happens in the branch's delay slot. */ |
b1a275e1 | 6915 | output_asm_insn ("ldw %0,%4", operands); |
16d74a3c | 6916 | if (length == 12) |
b1a275e1 | 6917 | return "addib,%C2 %1,%4,%3\n\tstw %4,%0"; |
16d74a3c | 6918 | else if (length == 16) |
3b5e5fb3 | 6919 | return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0"; |
16d74a3c JDA |
6920 | else |
6921 | { | |
8370f6fa JDA |
6922 | operands[5] = GEN_INT (length - 4); |
6923 | output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands); | |
16d74a3c JDA |
6924 | return output_lbranch (operands[3], insn, 0); |
6925 | } | |
b1a275e1 JL |
6926 | } |
6927 | } | |
6928 | ||
16d74a3c | 6929 | /* Return the output template for emitting a movb type insn. |
b1a275e1 JL |
6930 | |
6931 | Note it may perform some output operations on its own before | |
6932 | returning the final output string. */ | |
519104fe | 6933 | const char * |
b7849684 JE |
6934 | output_movb (rtx *operands, rtx insn, int which_alternative, |
6935 | int reverse_comparison) | |
b1a275e1 | 6936 | { |
16d74a3c | 6937 | int length = get_attr_length (insn); |
b1a275e1 | 6938 | |
112cdef5 | 6939 | /* A conditional branch to the following instruction (e.g. the delay slot) is |
b1a275e1 JL |
6940 | asking for a disaster. Be prepared! */ |
6941 | ||
02a57c73 | 6942 | if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn)) |
b1a275e1 JL |
6943 | { |
6944 | if (which_alternative == 0) | |
6945 | return "copy %1,%0"; | |
6946 | else if (which_alternative == 1) | |
6947 | { | |
831c1763 | 6948 | output_asm_insn ("stw %1,-16(%%r30)", operands); |
f38b27c7 | 6949 | return "{fldws|fldw} -16(%%r30),%0"; |
b1a275e1 | 6950 | } |
b1092901 | 6951 | else if (which_alternative == 2) |
b1a275e1 | 6952 | return "stw %1,%0"; |
b1092901 JL |
6953 | else |
6954 | return "mtsar %r1"; | |
b1a275e1 JL |
6955 | } |
6956 | ||
6957 | /* Support the second variant. */ | |
6958 | if (reverse_comparison) | |
6959 | PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2]))); | |
6960 | ||
6961 | if (which_alternative == 0) | |
6962 | { | |
6963 | int nullify = INSN_ANNULLED_BRANCH_P (insn); | |
16d74a3c | 6964 | int xdelay; |
b1a275e1 JL |
6965 | |
6966 | /* If this is a long branch with its delay slot unfilled, set `nullify' | |
6967 | as it can nullify the delay slot and save a nop. */ | |
a1b36964 | 6968 | if (length == 8 && dbr_sequence_length () == 0) |
b1a275e1 JL |
6969 | nullify = 1; |
6970 | ||
6971 | /* If this is a short forward conditional branch which did not get | |
6972 | its delay slot filled, the delay slot can still be nullified. */ | |
a1b36964 | 6973 | if (! nullify && length == 4 && dbr_sequence_length () == 0) |
b1a275e1 JL |
6974 | nullify = forward_branch_p (insn); |
6975 | ||
144d51f9 | 6976 | switch (length) |
b1a275e1 | 6977 | { |
144d51f9 NS |
6978 | case 4: |
6979 | if (nullify) | |
6980 | return "movb,%C2,n %1,%0,%3"; | |
6981 | else | |
6982 | return "movb,%C2 %1,%0,%3"; | |
6983 | ||
6984 | case 8: | |
23f6f34f | 6985 | /* Handle weird backwards branch with a filled delay slot |
b1a275e1 JL |
6986 | which is nullified. */ |
6987 | if (dbr_sequence_length () != 0 | |
6988 | && ! forward_branch_p (insn) | |
6989 | && nullify) | |
3b5e5fb3 | 6990 | return "movb,%N2,n %1,%0,.+12\n\tb %3"; |
23f6f34f | 6991 | |
923f781d JL |
6992 | /* Handle short backwards branch with an unfilled delay slot. |
6993 | Using a movb;nop rather than or;bl saves 1 cycle for both | |
6994 | taken and untaken branches. */ | |
6995 | else if (dbr_sequence_length () == 0 | |
6996 | && ! forward_branch_p (insn) | |
9d98a694 AO |
6997 | && INSN_ADDRESSES_SET_P () |
6998 | && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn))) | |
6999 | - INSN_ADDRESSES (INSN_UID (insn)) - 8)) | |
923f781d | 7000 | return "movb,%C2 %1,%0,%3%#"; |
23f6f34f | 7001 | /* Handle normal cases. */ |
b1a275e1 | 7002 | if (nullify) |
3b5e5fb3 | 7003 | return "or,%N2 %1,%%r0,%0\n\tb,n %3"; |
b1a275e1 | 7004 | else |
3b5e5fb3 | 7005 | return "or,%N2 %1,%%r0,%0\n\tb %3"; |
144d51f9 NS |
7006 | |
7007 | default: | |
16d74a3c JDA |
7008 | /* The reversed conditional branch must branch over one additional |
7009 | instruction if the delay slot is filled and needs to be extracted | |
7010 | by output_lbranch. If the delay slot is empty or this is a | |
7011 | nullified forward branch, the instruction after the reversed | |
7012 | condition branch must be nullified. */ | |
7013 | if (dbr_sequence_length () == 0 | |
7014 | || (nullify && forward_branch_p (insn))) | |
7015 | { | |
7016 | nullify = 1; | |
7017 | xdelay = 0; | |
7018 | operands[4] = GEN_INT (length); | |
7019 | } | |
7020 | else | |
7021 | { | |
7022 | xdelay = 1; | |
7023 | operands[4] = GEN_INT (length + 4); | |
7024 | } | |
7025 | ||
7026 | if (nullify) | |
7027 | output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands); | |
7028 | else | |
7029 | output_asm_insn ("movb,%N2 %1,%0,.+%4", operands); | |
7030 | ||
7031 | return output_lbranch (operands[3], insn, xdelay); | |
b1a275e1 | 7032 | } |
b1a275e1 | 7033 | } |
16d74a3c | 7034 | /* Deal with gross reload for FP destination register case. */ |
b1a275e1 JL |
7035 | else if (which_alternative == 1) |
7036 | { | |
16d74a3c JDA |
7037 | /* Move source register to MEM, perform the branch test, then |
7038 | finally load the FP register from MEM from within the branch's | |
7039 | delay slot. */ | |
831c1763 | 7040 | output_asm_insn ("stw %1,-16(%%r30)", operands); |
16d74a3c | 7041 | if (length == 12) |
f38b27c7 | 7042 | return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0"; |
16d74a3c | 7043 | else if (length == 16) |
f38b27c7 | 7044 | return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0"; |
16d74a3c JDA |
7045 | else |
7046 | { | |
8370f6fa JDA |
7047 | operands[4] = GEN_INT (length - 4); |
7048 | output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands); | |
16d74a3c JDA |
7049 | output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands); |
7050 | return output_lbranch (operands[3], insn, 0); | |
7051 | } | |
b1a275e1 JL |
7052 | } |
7053 | /* Deal with gross reload from memory case. */ | |
b1092901 | 7054 | else if (which_alternative == 2) |
b1a275e1 JL |
7055 | { |
7056 | /* Reload loop counter from memory, the store back to memory | |
71cc389b | 7057 | happens in the branch's delay slot. */ |
16d74a3c | 7058 | if (length == 8) |
f38b27c7 | 7059 | return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0"; |
16d74a3c | 7060 | else if (length == 12) |
f38b27c7 | 7061 | return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0"; |
16d74a3c JDA |
7062 | else |
7063 | { | |
8370f6fa JDA |
7064 | operands[4] = GEN_INT (length); |
7065 | output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0", | |
7066 | operands); | |
16d74a3c JDA |
7067 | return output_lbranch (operands[3], insn, 0); |
7068 | } | |
b1a275e1 | 7069 | } |
b1092901 JL |
7070 | /* Handle SAR as a destination. */ |
7071 | else | |
7072 | { | |
16d74a3c | 7073 | if (length == 8) |
f38b27c7 | 7074 | return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1"; |
16d74a3c | 7075 | else if (length == 12) |
715ab8c3 | 7076 | return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1"; |
16d74a3c JDA |
7077 | else |
7078 | { | |
8370f6fa JDA |
7079 | operands[4] = GEN_INT (length); |
7080 | output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1", | |
7081 | operands); | |
16d74a3c JDA |
7082 | return output_lbranch (operands[3], insn, 0); |
7083 | } | |
b1092901 | 7084 | } |
b1a275e1 JL |
7085 | } |
7086 | ||
a02aa5b0 JDA |
7087 | /* Copy any FP arguments in INSN into integer registers. */ |
7088 | static void | |
b7849684 | 7089 | copy_fp_args (rtx insn) |
a02aa5b0 JDA |
7090 | { |
7091 | rtx link; | |
7092 | rtx xoperands[2]; | |
b1a275e1 | 7093 | |
a02aa5b0 JDA |
7094 | for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) |
7095 | { | |
7096 | int arg_mode, regno; | |
7097 | rtx use = XEXP (link, 0); | |
f726ea7d | 7098 | |
a02aa5b0 JDA |
7099 | if (! (GET_CODE (use) == USE |
7100 | && GET_CODE (XEXP (use, 0)) == REG | |
7101 | && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0))))) | |
7102 | continue; | |
2c4ff308 | 7103 | |
a02aa5b0 JDA |
7104 | arg_mode = GET_MODE (XEXP (use, 0)); |
7105 | regno = REGNO (XEXP (use, 0)); | |
520babc7 | 7106 | |
a02aa5b0 JDA |
7107 | /* Is it a floating point register? */ |
7108 | if (regno >= 32 && regno <= 39) | |
7109 | { | |
7110 | /* Copy the FP register into an integer register via memory. */ | |
7111 | if (arg_mode == SFmode) | |
7112 | { | |
7113 | xoperands[0] = XEXP (use, 0); | |
7114 | xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2); | |
7115 | output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands); | |
7116 | output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands); | |
7117 | } | |
7118 | else | |
7119 | { | |
7120 | xoperands[0] = XEXP (use, 0); | |
7121 | xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2); | |
7122 | output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands); | |
7123 | output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands); | |
7124 | output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands); | |
7125 | } | |
7126 | } | |
279c9bde | 7127 | } |
a02aa5b0 JDA |
7128 | } |
7129 | ||
7130 | /* Compute length of the FP argument copy sequence for INSN. */ | |
7131 | static int | |
b7849684 | 7132 | length_fp_args (rtx insn) |
a02aa5b0 JDA |
7133 | { |
7134 | int length = 0; | |
7135 | rtx link; | |
279c9bde | 7136 | |
a02aa5b0 | 7137 | for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) |
6a73009d | 7138 | { |
a02aa5b0 JDA |
7139 | int arg_mode, regno; |
7140 | rtx use = XEXP (link, 0); | |
7141 | ||
7142 | if (! (GET_CODE (use) == USE | |
7143 | && GET_CODE (XEXP (use, 0)) == REG | |
7144 | && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0))))) | |
7145 | continue; | |
6a73009d | 7146 | |
a02aa5b0 JDA |
7147 | arg_mode = GET_MODE (XEXP (use, 0)); |
7148 | regno = REGNO (XEXP (use, 0)); | |
7149 | ||
7150 | /* Is it a floating point register? */ | |
7151 | if (regno >= 32 && regno <= 39) | |
6a73009d | 7152 | { |
a02aa5b0 JDA |
7153 | if (arg_mode == SFmode) |
7154 | length += 8; | |
7155 | else | |
7156 | length += 12; | |
6a73009d | 7157 | } |
a02aa5b0 | 7158 | } |
6a73009d | 7159 | |
a02aa5b0 JDA |
7160 | return length; |
7161 | } | |
3d9268b6 | 7162 | |
611ad29e JDA |
7163 | /* Return the attribute length for the millicode call instruction INSN. |
7164 | The length must match the code generated by output_millicode_call. | |
7165 | We include the delay slot in the returned length as it is better to | |
a02aa5b0 | 7166 | over estimate the length than to under estimate it. */ |
a7721dc0 | 7167 | |
a02aa5b0 | 7168 | int |
b7849684 | 7169 | attr_length_millicode_call (rtx insn) |
a02aa5b0 | 7170 | { |
611ad29e | 7171 | unsigned long distance = -1; |
62910663 | 7172 | unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes; |
a02aa5b0 | 7173 | |
611ad29e JDA |
7174 | if (INSN_ADDRESSES_SET_P ()) |
7175 | { | |
5fad1c24 JDA |
7176 | distance = (total + insn_current_reference_address (insn)); |
7177 | if (distance < total) | |
611ad29e JDA |
7178 | distance = -1; |
7179 | } | |
a02aa5b0 JDA |
7180 | |
7181 | if (TARGET_64BIT) | |
7182 | { | |
7183 | if (!TARGET_LONG_CALLS && distance < 7600000) | |
611ad29e | 7184 | return 8; |
a02aa5b0 | 7185 | |
611ad29e | 7186 | return 20; |
a02aa5b0 JDA |
7187 | } |
7188 | else if (TARGET_PORTABLE_RUNTIME) | |
611ad29e | 7189 | return 24; |
a02aa5b0 JDA |
7190 | else |
7191 | { | |
7192 | if (!TARGET_LONG_CALLS && distance < 240000) | |
611ad29e | 7193 | return 8; |
a02aa5b0 JDA |
7194 | |
7195 | if (TARGET_LONG_ABS_CALL && !flag_pic) | |
611ad29e | 7196 | return 12; |
a02aa5b0 | 7197 | |
611ad29e | 7198 | return 24; |
a02aa5b0 JDA |
7199 | } |
7200 | } | |
7201 | ||
7202 | /* INSN is a function call. It may have an unconditional jump | |
7203 | in its delay slot. | |
a7721dc0 | 7204 | |
a02aa5b0 | 7205 | CALL_DEST is the routine we are calling. */ |
a7721dc0 | 7206 | |
a02aa5b0 | 7207 | const char * |
b7849684 | 7208 | output_millicode_call (rtx insn, rtx call_dest) |
a02aa5b0 JDA |
7209 | { |
7210 | int attr_length = get_attr_length (insn); | |
7211 | int seq_length = dbr_sequence_length (); | |
7212 | int distance; | |
7213 | rtx seq_insn; | |
7214 | rtx xoperands[3]; | |
a7721dc0 | 7215 | |
a02aa5b0 JDA |
7216 | xoperands[0] = call_dest; |
7217 | xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31); | |
7218 | ||
7219 | /* Handle the common case where we are sure that the branch will | |
7220 | reach the beginning of the $CODE$ subspace. The within reach | |
7221 | form of the $$sh_func_adrs call has a length of 28. Because | |
272d0bee | 7222 | it has an attribute type of multi, it never has a nonzero |
a02aa5b0 JDA |
7223 | sequence length. The length of the $$sh_func_adrs is the same |
7224 | as certain out of reach PIC calls to other routines. */ | |
7225 | if (!TARGET_LONG_CALLS | |
7226 | && ((seq_length == 0 | |
7227 | && (attr_length == 12 | |
7228 | || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI))) | |
7229 | || (seq_length != 0 && attr_length == 8))) | |
7230 | { | |
7231 | output_asm_insn ("{bl|b,l} %0,%2", xoperands); | |
7232 | } | |
7233 | else | |
7234 | { | |
7235 | if (TARGET_64BIT) | |
7236 | { | |
7237 | /* It might seem that one insn could be saved by accessing | |
7238 | the millicode function using the linkage table. However, | |
7239 | this doesn't work in shared libraries and other dynamically | |
7240 | loaded objects. Using a pc-relative sequence also avoids | |
7241 | problems related to the implicit use of the gp register. */ | |
7242 | output_asm_insn ("b,l .+8,%%r1", xoperands); | |
581d9404 JDA |
7243 | |
7244 | if (TARGET_GAS) | |
7245 | { | |
7246 | output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands); | |
7247 | output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands); | |
7248 | } | |
7249 | else | |
7250 | { | |
7251 | xoperands[1] = gen_label_rtx (); | |
7252 | output_asm_insn ("addil L'%0-%l1,%%r1", xoperands); | |
ecc418c4 | 7253 | targetm.asm_out.internal_label (asm_out_file, "L", |
581d9404 JDA |
7254 | CODE_LABEL_NUMBER (xoperands[1])); |
7255 | output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands); | |
7256 | } | |
7257 | ||
a02aa5b0 | 7258 | output_asm_insn ("bve,l (%%r1),%%r2", xoperands); |
6a73009d | 7259 | } |
6a73009d JL |
7260 | else if (TARGET_PORTABLE_RUNTIME) |
7261 | { | |
a02aa5b0 JDA |
7262 | /* Pure portable runtime doesn't allow be/ble; we also don't |
7263 | have PIC support in the assembler/linker, so this sequence | |
7264 | is needed. */ | |
6a73009d | 7265 | |
a02aa5b0 JDA |
7266 | /* Get the address of our target into %r1. */ |
7267 | output_asm_insn ("ldil L'%0,%%r1", xoperands); | |
7268 | output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands); | |
6a73009d | 7269 | |
a02aa5b0 JDA |
7270 | /* Get our return address into %r31. */ |
7271 | output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands); | |
7272 | output_asm_insn ("addi 8,%%r31,%%r31", xoperands); | |
6a73009d | 7273 | |
a02aa5b0 JDA |
7274 | /* Jump to our target address in %r1. */ |
7275 | output_asm_insn ("bv %%r0(%%r1)", xoperands); | |
6a73009d | 7276 | } |
a02aa5b0 | 7277 | else if (!flag_pic) |
6a73009d | 7278 | { |
a02aa5b0 | 7279 | output_asm_insn ("ldil L'%0,%%r1", xoperands); |
6248c4dd | 7280 | if (TARGET_PA_20) |
a02aa5b0 | 7281 | output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands); |
6248c4dd | 7282 | else |
a02aa5b0 | 7283 | output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands); |
6a73009d | 7284 | } |
a02aa5b0 | 7285 | else |
6a73009d | 7286 | { |
581d9404 JDA |
7287 | output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands); |
7288 | output_asm_insn ("addi 16,%%r1,%%r31", xoperands); | |
7289 | ||
a02aa5b0 JDA |
7290 | if (TARGET_SOM || !TARGET_GAS) |
7291 | { | |
7292 | /* The HP assembler can generate relocations for the | |
7293 | difference of two symbols. GAS can do this for a | |
7294 | millicode symbol but not an arbitrary external | |
7295 | symbol when generating SOM output. */ | |
7296 | xoperands[1] = gen_label_rtx (); | |
ecc418c4 | 7297 | targetm.asm_out.internal_label (asm_out_file, "L", |
a02aa5b0 JDA |
7298 | CODE_LABEL_NUMBER (xoperands[1])); |
7299 | output_asm_insn ("addil L'%0-%l1,%%r1", xoperands); | |
7300 | output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands); | |
7301 | } | |
7302 | else | |
7303 | { | |
a02aa5b0 JDA |
7304 | output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands); |
7305 | output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1", | |
7306 | xoperands); | |
7307 | } | |
6a73009d | 7308 | |
a02aa5b0 JDA |
7309 | /* Jump to our target address in %r1. */ |
7310 | output_asm_insn ("bv %%r0(%%r1)", xoperands); | |
6a73009d | 7311 | } |
6a73009d JL |
7312 | } |
7313 | ||
a02aa5b0 JDA |
7314 | if (seq_length == 0) |
7315 | output_asm_insn ("nop", xoperands); | |
6a73009d | 7316 | |
a02aa5b0 JDA |
7317 | /* We are done if there isn't a jump in the delay slot. */ |
7318 | if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN) | |
7319 | return ""; | |
6a73009d | 7320 | |
a02aa5b0 JDA |
7321 | /* This call has an unconditional jump in its delay slot. */ |
7322 | xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1); | |
6a73009d | 7323 | |
a02aa5b0 JDA |
7324 | /* See if the return address can be adjusted. Use the containing |
7325 | sequence insn's address. */ | |
611ad29e | 7326 | if (INSN_ADDRESSES_SET_P ()) |
6a73009d | 7327 | { |
611ad29e JDA |
7328 | seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0))); |
7329 | distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn)))) | |
7330 | - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8); | |
7331 | ||
7332 | if (VAL_14_BITS_P (distance)) | |
7333 | { | |
7334 | xoperands[1] = gen_label_rtx (); | |
7335 | output_asm_insn ("ldo %0-%1(%2),%2", xoperands); | |
ecc418c4 JDA |
7336 | targetm.asm_out.internal_label (asm_out_file, "L", |
7337 | CODE_LABEL_NUMBER (xoperands[1])); | |
611ad29e JDA |
7338 | } |
7339 | else | |
7340 | /* ??? This branch may not reach its target. */ | |
7341 | output_asm_insn ("nop\n\tb,n %0", xoperands); | |
6a73009d | 7342 | } |
a02aa5b0 JDA |
7343 | else |
7344 | /* ??? This branch may not reach its target. */ | |
7345 | output_asm_insn ("nop\n\tb,n %0", xoperands); | |
6a73009d JL |
7346 | |
7347 | /* Delete the jump. */ | |
a38e7aa5 | 7348 | SET_INSN_DELETED (NEXT_INSN (insn)); |
a02aa5b0 | 7349 | |
6a73009d JL |
7350 | return ""; |
7351 | } | |
7352 | ||
611ad29e JDA |
7353 | /* Return the attribute length of the call instruction INSN. The SIBCALL |
7354 | flag indicates whether INSN is a regular call or a sibling call. The | |
32562302 JDA |
7355 | length returned must be longer than the code actually generated by |
7356 | output_call. Since branch shortening is done before delay branch | |
7357 | sequencing, there is no way to determine whether or not the delay | |
7358 | slot will be filled during branch shortening. Even when the delay | |
7359 | slot is filled, we may have to add a nop if the delay slot contains | |
7360 | a branch that can't reach its target. Thus, we always have to include | |
7361 | the delay slot in the length estimate. This used to be done in | |
7362 | pa_adjust_insn_length but we do it here now as some sequences always | |
7363 | fill the delay slot and we can save four bytes in the estimate for | |
7364 | these sequences. */ | |
a02aa5b0 JDA |
7365 | |
7366 | int | |
b7849684 | 7367 | attr_length_call (rtx insn, int sibcall) |
a02aa5b0 | 7368 | { |
32562302 | 7369 | int local_call; |
e40375e0 | 7370 | rtx call, call_dest; |
32562302 JDA |
7371 | tree call_decl; |
7372 | int length = 0; | |
7373 | rtx pat = PATTERN (insn); | |
611ad29e | 7374 | unsigned long distance = -1; |
a02aa5b0 | 7375 | |
e40375e0 JDA |
7376 | gcc_assert (GET_CODE (insn) == CALL_INSN); |
7377 | ||
611ad29e JDA |
7378 | if (INSN_ADDRESSES_SET_P ()) |
7379 | { | |
32562302 JDA |
7380 | unsigned long total; |
7381 | ||
7382 | total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes; | |
5fad1c24 JDA |
7383 | distance = (total + insn_current_reference_address (insn)); |
7384 | if (distance < total) | |
611ad29e JDA |
7385 | distance = -1; |
7386 | } | |
a02aa5b0 | 7387 | |
e40375e0 | 7388 | gcc_assert (GET_CODE (pat) == PARALLEL); |
a02aa5b0 | 7389 | |
e40375e0 JDA |
7390 | /* Get the call rtx. */ |
7391 | call = XVECEXP (pat, 0, 0); | |
7392 | if (GET_CODE (call) == SET) | |
7393 | call = SET_SRC (call); | |
7394 | ||
7395 | gcc_assert (GET_CODE (call) == CALL); | |
7396 | ||
7397 | /* Determine if this is a local call. */ | |
7398 | call_dest = XEXP (XEXP (call, 0), 0); | |
32562302 | 7399 | call_decl = SYMBOL_REF_DECL (call_dest); |
ecc418c4 | 7400 | local_call = call_decl && targetm.binds_local_p (call_decl); |
a02aa5b0 | 7401 | |
32562302 JDA |
7402 | /* pc-relative branch. */ |
7403 | if (!TARGET_LONG_CALLS | |
7404 | && ((TARGET_PA_20 && !sibcall && distance < 7600000) | |
7405 | || distance < 240000)) | |
7406 | length += 8; | |
a02aa5b0 | 7407 | |
32562302 JDA |
7408 | /* 64-bit plabel sequence. */ |
7409 | else if (TARGET_64BIT && !local_call) | |
7410 | length += sibcall ? 28 : 24; | |
a02aa5b0 | 7411 | |
32562302 JDA |
7412 | /* non-pic long absolute branch sequence. */ |
7413 | else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic) | |
7414 | length += 12; | |
a02aa5b0 | 7415 | |
32562302 | 7416 | /* long pc-relative branch sequence. */ |
9dbd54be | 7417 | else if (TARGET_LONG_PIC_SDIFF_CALL |
751d9855 JDA |
7418 | || (TARGET_GAS && !TARGET_SOM |
7419 | && (TARGET_LONG_PIC_PCREL_CALL || local_call))) | |
32562302 JDA |
7420 | { |
7421 | length += 20; | |
a02aa5b0 | 7422 | |
9dbd54be | 7423 | if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && flag_pic) |
32562302 JDA |
7424 | length += 8; |
7425 | } | |
62910663 | 7426 | |
32562302 JDA |
7427 | /* 32-bit plabel sequence. */ |
7428 | else | |
7429 | { | |
7430 | length += 32; | |
a02aa5b0 | 7431 | |
32562302 JDA |
7432 | if (TARGET_SOM) |
7433 | length += length_fp_args (insn); | |
7434 | ||
7435 | if (flag_pic) | |
7436 | length += 4; | |
90330d31 | 7437 | |
32562302 JDA |
7438 | if (!TARGET_PA_20) |
7439 | { | |
a02aa5b0 JDA |
7440 | if (!sibcall) |
7441 | length += 8; | |
7442 | ||
9dbd54be | 7443 | if (!TARGET_NO_SPACE_REGS && flag_pic) |
32562302 | 7444 | length += 8; |
a02aa5b0 JDA |
7445 | } |
7446 | } | |
32562302 JDA |
7447 | |
7448 | return length; | |
a02aa5b0 JDA |
7449 | } |
7450 | ||
7451 | /* INSN is a function call. It may have an unconditional jump | |
6a73009d JL |
7452 | in its delay slot. |
7453 | ||
7454 | CALL_DEST is the routine we are calling. */ | |
7455 | ||
519104fe | 7456 | const char * |
b7849684 | 7457 | output_call (rtx insn, rtx call_dest, int sibcall) |
6a73009d | 7458 | { |
a02aa5b0 JDA |
7459 | int delay_insn_deleted = 0; |
7460 | int delay_slot_filled = 0; | |
3d9268b6 | 7461 | int seq_length = dbr_sequence_length (); |
5fad1c24 | 7462 | tree call_decl = SYMBOL_REF_DECL (call_dest); |
ecc418c4 | 7463 | int local_call = call_decl && targetm.binds_local_p (call_decl); |
a02aa5b0 JDA |
7464 | rtx xoperands[2]; |
7465 | ||
7466 | xoperands[0] = call_dest; | |
6a73009d | 7467 | |
a02aa5b0 | 7468 | /* Handle the common case where we're sure that the branch will reach |
5fad1c24 JDA |
7469 | the beginning of the "$CODE$" subspace. This is the beginning of |
7470 | the current function if we are in a named section. */ | |
611ad29e | 7471 | if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8) |
2c4ff308 | 7472 | { |
520babc7 | 7473 | xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2); |
a02aa5b0 | 7474 | output_asm_insn ("{bl|b,l} %0,%1", xoperands); |
279c9bde | 7475 | } |
a02aa5b0 | 7476 | else |
279c9bde | 7477 | { |
5fad1c24 | 7478 | if (TARGET_64BIT && !local_call) |
f726ea7d | 7479 | { |
a02aa5b0 JDA |
7480 | /* ??? As far as I can tell, the HP linker doesn't support the |
7481 | long pc-relative sequence described in the 64-bit runtime | |
7482 | architecture. So, we use a slightly longer indirect call. */ | |
7aaf280e | 7483 | xoperands[0] = get_deferred_plabel (call_dest); |
a02aa5b0 JDA |
7484 | xoperands[1] = gen_label_rtx (); |
7485 | ||
7486 | /* If this isn't a sibcall, we put the load of %r27 into the | |
7487 | delay slot. We can't do this in a sibcall as we don't | |
7488 | have a second call-clobbered scratch register available. */ | |
7489 | if (seq_length != 0 | |
7490 | && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN | |
7491 | && !sibcall) | |
7492 | { | |
7493 | final_scan_insn (NEXT_INSN (insn), asm_out_file, | |
c9d691e9 | 7494 | optimize, 0, NULL); |
a02aa5b0 JDA |
7495 | |
7496 | /* Now delete the delay insn. */ | |
a38e7aa5 | 7497 | SET_INSN_DELETED (NEXT_INSN (insn)); |
a02aa5b0 JDA |
7498 | delay_insn_deleted = 1; |
7499 | } | |
279c9bde | 7500 | |
a02aa5b0 JDA |
7501 | output_asm_insn ("addil LT'%0,%%r27", xoperands); |
7502 | output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands); | |
7503 | output_asm_insn ("ldd 0(%%r1),%%r1", xoperands); | |
279c9bde | 7504 | |
a02aa5b0 | 7505 | if (sibcall) |
279c9bde | 7506 | { |
a02aa5b0 JDA |
7507 | output_asm_insn ("ldd 24(%%r1),%%r27", xoperands); |
7508 | output_asm_insn ("ldd 16(%%r1),%%r1", xoperands); | |
7509 | output_asm_insn ("bve (%%r1)", xoperands); | |
7510 | } | |
7511 | else | |
7512 | { | |
7513 | output_asm_insn ("ldd 16(%%r1),%%r2", xoperands); | |
7514 | output_asm_insn ("bve,l (%%r2),%%r2", xoperands); | |
7515 | output_asm_insn ("ldd 24(%%r1),%%r27", xoperands); | |
7516 | delay_slot_filled = 1; | |
279c9bde JL |
7517 | } |
7518 | } | |
a02aa5b0 | 7519 | else |
93ae92c1 | 7520 | { |
a02aa5b0 JDA |
7521 | int indirect_call = 0; |
7522 | ||
7523 | /* Emit a long call. There are several different sequences | |
7524 | of increasing length and complexity. In most cases, | |
7525 | they don't allow an instruction in the delay slot. */ | |
5fad1c24 | 7526 | if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic) |
9dbd54be | 7527 | && !TARGET_LONG_PIC_SDIFF_CALL |
751d9855 JDA |
7528 | && !(TARGET_GAS && !TARGET_SOM |
7529 | && (TARGET_LONG_PIC_PCREL_CALL || local_call)) | |
5fad1c24 | 7530 | && !TARGET_64BIT) |
a02aa5b0 JDA |
7531 | indirect_call = 1; |
7532 | ||
7533 | if (seq_length != 0 | |
7534 | && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN | |
7535 | && !sibcall | |
44b86471 JDA |
7536 | && (!TARGET_PA_20 |
7537 | || indirect_call | |
7538 | || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic))) | |
359255a9 | 7539 | { |
a02aa5b0 JDA |
7540 | /* A non-jump insn in the delay slot. By definition we can |
7541 | emit this insn before the call (and in fact before argument | |
7542 | relocating. */ | |
c9d691e9 | 7543 | final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0, |
5cfc5f84 | 7544 | NULL); |
a02aa5b0 JDA |
7545 | |
7546 | /* Now delete the delay insn. */ | |
a38e7aa5 | 7547 | SET_INSN_DELETED (NEXT_INSN (insn)); |
a02aa5b0 | 7548 | delay_insn_deleted = 1; |
359255a9 | 7549 | } |
93ae92c1 | 7550 | |
5fad1c24 | 7551 | if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic) |
359255a9 | 7552 | { |
a02aa5b0 JDA |
7553 | /* This is the best sequence for making long calls in |
7554 | non-pic code. Unfortunately, GNU ld doesn't provide | |
7555 | the stub needed for external calls, and GAS's support | |
5fad1c24 JDA |
7556 | for this with the SOM linker is buggy. It is safe |
7557 | to use this for local calls. */ | |
a02aa5b0 JDA |
7558 | output_asm_insn ("ldil L'%0,%%r1", xoperands); |
7559 | if (sibcall) | |
7560 | output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands); | |
7561 | else | |
7562 | { | |
7563 | if (TARGET_PA_20) | |
7564 | output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", | |
7565 | xoperands); | |
7566 | else | |
7567 | output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands); | |
6a73009d | 7568 | |
a02aa5b0 JDA |
7569 | output_asm_insn ("copy %%r31,%%r2", xoperands); |
7570 | delay_slot_filled = 1; | |
7571 | } | |
7572 | } | |
7573 | else | |
7574 | { | |
9dbd54be | 7575 | if (TARGET_LONG_PIC_SDIFF_CALL) |
3d9268b6 | 7576 | { |
a02aa5b0 | 7577 | /* The HP assembler and linker can handle relocations |
9dbd54be JDA |
7578 | for the difference of two symbols. The HP assembler |
7579 | recognizes the sequence as a pc-relative call and | |
7580 | the linker provides stubs when needed. */ | |
a02aa5b0 JDA |
7581 | xoperands[1] = gen_label_rtx (); |
7582 | output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands); | |
7583 | output_asm_insn ("addil L'%0-%l1,%%r1", xoperands); | |
ecc418c4 | 7584 | targetm.asm_out.internal_label (asm_out_file, "L", |
3d9268b6 | 7585 | CODE_LABEL_NUMBER (xoperands[1])); |
a02aa5b0 JDA |
7586 | output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands); |
7587 | } | |
751d9855 JDA |
7588 | else if (TARGET_GAS && !TARGET_SOM |
7589 | && (TARGET_LONG_PIC_PCREL_CALL || local_call)) | |
3d9268b6 | 7590 | { |
a02aa5b0 JDA |
7591 | /* GAS currently can't generate the relocations that |
7592 | are needed for the SOM linker under HP-UX using this | |
7593 | sequence. The GNU linker doesn't generate the stubs | |
7594 | that are needed for external calls on TARGET_ELF32 | |
7595 | with this sequence. For now, we have to use a | |
7596 | longer plabel sequence when using GAS. */ | |
7597 | output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands); | |
7598 | output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", | |
3d9268b6 | 7599 | xoperands); |
a02aa5b0 | 7600 | output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", |
3d9268b6 JDA |
7601 | xoperands); |
7602 | } | |
520babc7 JL |
7603 | else |
7604 | { | |
a02aa5b0 JDA |
7605 | /* Emit a long plabel-based call sequence. This is |
7606 | essentially an inline implementation of $$dyncall. | |
7607 | We don't actually try to call $$dyncall as this is | |
7608 | as difficult as calling the function itself. */ | |
7aaf280e | 7609 | xoperands[0] = get_deferred_plabel (call_dest); |
a02aa5b0 JDA |
7610 | xoperands[1] = gen_label_rtx (); |
7611 | ||
7612 | /* Since the call is indirect, FP arguments in registers | |
7613 | need to be copied to the general registers. Then, the | |
7614 | argument relocation stub will copy them back. */ | |
7615 | if (TARGET_SOM) | |
7616 | copy_fp_args (insn); | |
7617 | ||
7618 | if (flag_pic) | |
7619 | { | |
7620 | output_asm_insn ("addil LT'%0,%%r19", xoperands); | |
7621 | output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands); | |
7622 | output_asm_insn ("ldw 0(%%r1),%%r1", xoperands); | |
7623 | } | |
7624 | else | |
7625 | { | |
7626 | output_asm_insn ("addil LR'%0-$global$,%%r27", | |
7627 | xoperands); | |
7628 | output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1", | |
7629 | xoperands); | |
7630 | } | |
279c9bde | 7631 | |
a02aa5b0 JDA |
7632 | output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands); |
7633 | output_asm_insn ("depi 0,31,2,%%r1", xoperands); | |
7634 | output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands); | |
7635 | output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands); | |
6a73009d | 7636 | |
a02aa5b0 JDA |
7637 | if (!sibcall && !TARGET_PA_20) |
7638 | { | |
7639 | output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands); | |
90330d31 JDA |
7640 | if (TARGET_NO_SPACE_REGS) |
7641 | output_asm_insn ("addi 8,%%r2,%%r2", xoperands); | |
7642 | else | |
7643 | output_asm_insn ("addi 16,%%r2,%%r2", xoperands); | |
a02aa5b0 JDA |
7644 | } |
7645 | } | |
6a73009d | 7646 | |
a02aa5b0 | 7647 | if (TARGET_PA_20) |
520babc7 | 7648 | { |
a02aa5b0 JDA |
7649 | if (sibcall) |
7650 | output_asm_insn ("bve (%%r1)", xoperands); | |
7651 | else | |
7652 | { | |
7653 | if (indirect_call) | |
7654 | { | |
7655 | output_asm_insn ("bve,l (%%r1),%%r2", xoperands); | |
7656 | output_asm_insn ("stw %%r2,-24(%%sp)", xoperands); | |
7657 | delay_slot_filled = 1; | |
7658 | } | |
7659 | else | |
7660 | output_asm_insn ("bve,l (%%r1),%%r2", xoperands); | |
7661 | } | |
520babc7 JL |
7662 | } |
7663 | else | |
7664 | { | |
9dbd54be | 7665 | if (!TARGET_NO_SPACE_REGS && flag_pic) |
90330d31 JDA |
7666 | output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0", |
7667 | xoperands); | |
279c9bde | 7668 | |
a02aa5b0 | 7669 | if (sibcall) |
90330d31 | 7670 | { |
9dbd54be | 7671 | if (TARGET_NO_SPACE_REGS || !flag_pic) |
90330d31 JDA |
7672 | output_asm_insn ("be 0(%%sr4,%%r1)", xoperands); |
7673 | else | |
7674 | output_asm_insn ("be 0(%%sr0,%%r1)", xoperands); | |
7675 | } | |
a02aa5b0 JDA |
7676 | else |
7677 | { | |
9dbd54be | 7678 | if (TARGET_NO_SPACE_REGS || !flag_pic) |
90330d31 JDA |
7679 | output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands); |
7680 | else | |
7681 | output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands); | |
279c9bde | 7682 | |
a02aa5b0 JDA |
7683 | if (indirect_call) |
7684 | output_asm_insn ("stw %%r31,-24(%%sp)", xoperands); | |
7685 | else | |
7686 | output_asm_insn ("copy %%r31,%%r2", xoperands); | |
7687 | delay_slot_filled = 1; | |
7688 | } | |
7689 | } | |
7690 | } | |
279c9bde | 7691 | } |
2c4ff308 | 7692 | } |
23f6f34f | 7693 | |
62910663 | 7694 | if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted)) |
a02aa5b0 | 7695 | output_asm_insn ("nop", xoperands); |
2c4ff308 | 7696 | |
a02aa5b0 JDA |
7697 | /* We are done if there isn't a jump in the delay slot. */ |
7698 | if (seq_length == 0 | |
7699 | || delay_insn_deleted | |
7700 | || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN) | |
7701 | return ""; | |
2c4ff308 | 7702 | |
a02aa5b0 | 7703 | /* A sibcall should never have a branch in the delay slot. */ |
144d51f9 | 7704 | gcc_assert (!sibcall); |
2c4ff308 | 7705 | |
a02aa5b0 JDA |
7706 | /* This call has an unconditional jump in its delay slot. */ |
7707 | xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1); | |
2c4ff308 | 7708 | |
611ad29e | 7709 | if (!delay_slot_filled && INSN_ADDRESSES_SET_P ()) |
2c4ff308 | 7710 | { |
a02aa5b0 JDA |
7711 | /* See if the return address can be adjusted. Use the containing |
7712 | sequence insn's address. */ | |
7713 | rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0))); | |
7714 | int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn)))) | |
7715 | - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8); | |
7716 | ||
7717 | if (VAL_14_BITS_P (distance)) | |
7718 | { | |
7719 | xoperands[1] = gen_label_rtx (); | |
7720 | output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands); | |
ecc418c4 JDA |
7721 | targetm.asm_out.internal_label (asm_out_file, "L", |
7722 | CODE_LABEL_NUMBER (xoperands[1])); | |
a02aa5b0 JDA |
7723 | } |
7724 | else | |
a02aa5b0 | 7725 | output_asm_insn ("nop\n\tb,n %0", xoperands); |
2c4ff308 | 7726 | } |
a02aa5b0 | 7727 | else |
a02aa5b0 | 7728 | output_asm_insn ("b,n %0", xoperands); |
2c4ff308 JL |
7729 | |
7730 | /* Delete the jump. */ | |
a38e7aa5 | 7731 | SET_INSN_DELETED (NEXT_INSN (insn)); |
a02aa5b0 | 7732 | |
2c4ff308 JL |
7733 | return ""; |
7734 | } | |
7735 | ||
611ad29e JDA |
7736 | /* Return the attribute length of the indirect call instruction INSN. |
7737 | The length must match the code generated by output_indirect call. | |
7738 | The returned length includes the delay slot. Currently, the delay | |
7739 | slot of an indirect call sequence is not exposed and it is used by | |
7740 | the sequence itself. */ | |
7741 | ||
7742 | int | |
b7849684 | 7743 | attr_length_indirect_call (rtx insn) |
611ad29e JDA |
7744 | { |
7745 | unsigned long distance = -1; | |
62910663 | 7746 | unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes; |
611ad29e JDA |
7747 | |
7748 | if (INSN_ADDRESSES_SET_P ()) | |
7749 | { | |
5fad1c24 JDA |
7750 | distance = (total + insn_current_reference_address (insn)); |
7751 | if (distance < total) | |
611ad29e JDA |
7752 | distance = -1; |
7753 | } | |
7754 | ||
7755 | if (TARGET_64BIT) | |
7756 | return 12; | |
7757 | ||
7758 | if (TARGET_FAST_INDIRECT_CALLS | |
7759 | || (!TARGET_PORTABLE_RUNTIME | |
40fc2e0b JDA |
7760 | && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000) |
7761 | || distance < 240000))) | |
611ad29e JDA |
7762 | return 8; |
7763 | ||
7764 | if (flag_pic) | |
7765 | return 24; | |
7766 | ||
7767 | if (TARGET_PORTABLE_RUNTIME) | |
7768 | return 20; | |
7769 | ||
7770 | /* Out of reach, can use ble. */ | |
7771 | return 12; | |
7772 | } | |
7773 | ||
7774 | const char * | |
b7849684 | 7775 | output_indirect_call (rtx insn, rtx call_dest) |
611ad29e JDA |
7776 | { |
7777 | rtx xoperands[1]; | |
7778 | ||
7779 | if (TARGET_64BIT) | |
7780 | { | |
7781 | xoperands[0] = call_dest; | |
7782 | output_asm_insn ("ldd 16(%0),%%r2", xoperands); | |
7783 | output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands); | |
7784 | return ""; | |
7785 | } | |
7786 | ||
7787 | /* First the special case for kernels, level 0 systems, etc. */ | |
7788 | if (TARGET_FAST_INDIRECT_CALLS) | |
7789 | return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2"; | |
7790 | ||
7791 | /* Now the normal case -- we can reach $$dyncall directly or | |
7792 | we're sure that we can get there via a long-branch stub. | |
7793 | ||
7794 | No need to check target flags as the length uniquely identifies | |
7795 | the remaining cases. */ | |
7796 | if (attr_length_indirect_call (insn) == 8) | |
2c774817 | 7797 | { |
40fc2e0b JDA |
7798 | /* The HP linker sometimes substitutes a BLE for BL/B,L calls to |
7799 | $$dyncall. Since BLE uses %r31 as the link register, the 22-bit | |
7800 | variant of the B,L instruction can't be used on the SOM target. */ | |
7801 | if (TARGET_PA_20 && !TARGET_SOM) | |
2c774817 JDA |
7802 | return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31"; |
7803 | else | |
7804 | return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2"; | |
7805 | } | |
611ad29e JDA |
7806 | |
7807 | /* Long millicode call, but we are not generating PIC or portable runtime | |
7808 | code. */ | |
7809 | if (attr_length_indirect_call (insn) == 12) | |
7810 | return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2"; | |
7811 | ||
7812 | /* Long millicode call for portable runtime. */ | |
7813 | if (attr_length_indirect_call (insn) == 20) | |
7814 | return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop"; | |
7815 | ||
7816 | /* We need a long PIC call to $$dyncall. */ | |
7817 | xoperands[0] = NULL_RTX; | |
7818 | output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands); | |
7819 | if (TARGET_SOM || !TARGET_GAS) | |
7820 | { | |
7821 | xoperands[0] = gen_label_rtx (); | |
7822 | output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands); | |
ecc418c4 JDA |
7823 | targetm.asm_out.internal_label (asm_out_file, "L", |
7824 | CODE_LABEL_NUMBER (xoperands[0])); | |
611ad29e JDA |
7825 | output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands); |
7826 | } | |
7827 | else | |
7828 | { | |
7829 | output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands); | |
7830 | output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1", | |
7831 | xoperands); | |
7832 | } | |
7833 | output_asm_insn ("blr %%r0,%%r2", xoperands); | |
7834 | output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands); | |
7835 | return ""; | |
7836 | } | |
7837 | ||
7838 | /* Return the total length of the save and restore instructions needed for | |
7839 | the data linkage table pointer (i.e., the PIC register) across the call | |
7840 | instruction INSN. No-return calls do not require a save and restore. | |
7841 | In addition, we may be able to avoid the save and restore for calls | |
7842 | within the same translation unit. */ | |
7843 | ||
7844 | int | |
b7849684 | 7845 | attr_length_save_restore_dltp (rtx insn) |
611ad29e JDA |
7846 | { |
7847 | if (find_reg_note (insn, REG_NORETURN, NULL_RTX)) | |
7848 | return 0; | |
7849 | ||
7850 | return 8; | |
7851 | } | |
7852 | ||
d2a94ec0 | 7853 | /* In HPUX 8.0's shared library scheme, special relocations are needed |
23f6f34f | 7854 | for function labels if they might be passed to a function |
d2a94ec0 | 7855 | in a shared library (because shared libraries don't live in code |
520a57c8 | 7856 | space), and special magic is needed to construct their address. */ |
d2a94ec0 TM |
7857 | |
7858 | void | |
b7849684 | 7859 | hppa_encode_label (rtx sym) |
d2a94ec0 | 7860 | { |
519104fe | 7861 | const char *str = XSTR (sym, 0); |
10d17cb7 AM |
7862 | int len = strlen (str) + 1; |
7863 | char *newstr, *p; | |
d2a94ec0 | 7864 | |
5ead67f6 | 7865 | p = newstr = XALLOCAVEC (char, len + 1); |
10d17cb7 AM |
7866 | *p++ = '@'; |
7867 | strcpy (p, str); | |
67d6f2fc | 7868 | |
831c1763 | 7869 | XSTR (sym, 0) = ggc_alloc_string (newstr, len); |
d2a94ec0 | 7870 | } |
23f6f34f | 7871 | |
fb49053f | 7872 | static void |
b7849684 | 7873 | pa_encode_section_info (tree decl, rtx rtl, int first) |
fb49053f | 7874 | { |
9a60b229 JJ |
7875 | int old_referenced = 0; |
7876 | ||
7877 | if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF) | |
7878 | old_referenced | |
7879 | = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED; | |
7880 | ||
51076f96 RC |
7881 | default_encode_section_info (decl, rtl, first); |
7882 | ||
fb49053f RH |
7883 | if (first && TEXT_SPACE_P (decl)) |
7884 | { | |
fb49053f RH |
7885 | SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; |
7886 | if (TREE_CODE (decl) == FUNCTION_DECL) | |
c6a2438a | 7887 | hppa_encode_label (XEXP (rtl, 0)); |
fb49053f | 7888 | } |
9a60b229 JJ |
7889 | else if (old_referenced) |
7890 | SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced; | |
fb49053f RH |
7891 | } |
7892 | ||
772c5265 RH |
7893 | /* This is sort of inverse to pa_encode_section_info. */ |
7894 | ||
7895 | static const char * | |
b7849684 | 7896 | pa_strip_name_encoding (const char *str) |
772c5265 | 7897 | { |
7830ba7b JDA |
7898 | str += (*str == '@'); |
7899 | str += (*str == '*'); | |
7900 | return str; | |
772c5265 RH |
7901 | } |
7902 | ||
d2a94ec0 | 7903 | int |
b7849684 | 7904 | function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
d2a94ec0 | 7905 | { |
e5d4ff05 | 7906 | return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0)); |
d2a94ec0 | 7907 | } |
9bb77117 | 7908 | |
326bc2de JL |
7909 | /* Returns 1 if OP is a function label involved in a simple addition |
7910 | with a constant. Used to keep certain patterns from matching | |
7911 | during instruction combination. */ | |
7912 | int | |
b7849684 | 7913 | is_function_label_plus_const (rtx op) |
326bc2de JL |
7914 | { |
7915 | /* Strip off any CONST. */ | |
7916 | if (GET_CODE (op) == CONST) | |
7917 | op = XEXP (op, 0); | |
7918 | ||
7919 | return (GET_CODE (op) == PLUS | |
7920 | && function_label_operand (XEXP (op, 0), Pmode) | |
7921 | && GET_CODE (XEXP (op, 1)) == CONST_INT); | |
7922 | } | |
7923 | ||
54374491 JL |
7924 | /* Output assembly code for a thunk to FUNCTION. */ |
7925 | ||
c590b625 | 7926 | static void |
b7849684 JE |
7927 | pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta, |
7928 | HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED, | |
7929 | tree function) | |
54374491 | 7930 | { |
cdcb88d7 | 7931 | static unsigned int current_thunk_number; |
5fad1c24 | 7932 | int val_14 = VAL_14_BITS_P (delta); |
67b846fa | 7933 | unsigned int old_last_address = last_address, nbytes = 0; |
54374491 | 7934 | char label[16]; |
cdcb88d7 | 7935 | rtx xoperands[4]; |
5fad1c24 | 7936 | |
cdcb88d7 JDA |
7937 | xoperands[0] = XEXP (DECL_RTL (function), 0); |
7938 | xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0); | |
7939 | xoperands[2] = GEN_INT (delta); | |
5fad1c24 | 7940 | |
cdcb88d7 JDA |
7941 | ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0)); |
7942 | fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n"); | |
5fad1c24 JDA |
7943 | |
7944 | /* Output the thunk. We know that the function is in the same | |
7945 | translation unit (i.e., the same space) as the thunk, and that | |
7946 | thunks are output after their method. Thus, we don't need an | |
7947 | external branch to reach the function. With SOM and GAS, | |
7948 | functions and thunks are effectively in different sections. | |
7949 | Thus, we can always use a IA-relative branch and the linker | |
7950 | will add a long branch stub if necessary. | |
7951 | ||
7952 | However, we have to be careful when generating PIC code on the | |
7953 | SOM port to ensure that the sequence does not transfer to an | |
7954 | import stub for the target function as this could clobber the | |
7955 | return value saved at SP-24. This would also apply to the | |
7956 | 32-bit linux port if the multi-space model is implemented. */ | |
7957 | if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME | |
7958 | && !(flag_pic && TREE_PUBLIC (function)) | |
7959 | && (TARGET_GAS || last_address < 262132)) | |
7960 | || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME | |
7961 | && ((targetm.have_named_sections | |
7962 | && DECL_SECTION_NAME (thunk_fndecl) != NULL | |
7963 | /* The GNU 64-bit linker has rather poor stub management. | |
7964 | So, we use a long branch from thunks that aren't in | |
7965 | the same section as the target function. */ | |
7966 | && ((!TARGET_64BIT | |
7967 | && (DECL_SECTION_NAME (thunk_fndecl) | |
7968 | != DECL_SECTION_NAME (function))) | |
7969 | || ((DECL_SECTION_NAME (thunk_fndecl) | |
7970 | == DECL_SECTION_NAME (function)) | |
7971 | && last_address < 262132))) | |
2842bb86 JDA |
7972 | || (targetm.have_named_sections |
7973 | && DECL_SECTION_NAME (thunk_fndecl) == NULL | |
7974 | && DECL_SECTION_NAME (function) == NULL | |
7975 | && last_address < 262132) | |
5fad1c24 JDA |
7976 | || (!targetm.have_named_sections && last_address < 262132)))) |
7977 | { | |
cdcb88d7 JDA |
7978 | if (!val_14) |
7979 | output_asm_insn ("addil L'%2,%%r26", xoperands); | |
7980 | ||
7981 | output_asm_insn ("b %0", xoperands); | |
7982 | ||
5fad1c24 JDA |
7983 | if (val_14) |
7984 | { | |
cdcb88d7 | 7985 | output_asm_insn ("ldo %2(%%r26),%%r26", xoperands); |
5fad1c24 JDA |
7986 | nbytes += 8; |
7987 | } | |
7988 | else | |
7989 | { | |
cdcb88d7 | 7990 | output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands); |
5fad1c24 JDA |
7991 | nbytes += 12; |
7992 | } | |
7993 | } | |
7994 | else if (TARGET_64BIT) | |
7995 | { | |
7996 | /* We only have one call-clobbered scratch register, so we can't | |
7997 | make use of the delay slot if delta doesn't fit in 14 bits. */ | |
7998 | if (!val_14) | |
cdcb88d7 JDA |
7999 | { |
8000 | output_asm_insn ("addil L'%2,%%r26", xoperands); | |
8001 | output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands); | |
8002 | } | |
5fad1c24 | 8003 | |
cdcb88d7 | 8004 | output_asm_insn ("b,l .+8,%%r1", xoperands); |
5fad1c24 JDA |
8005 | |
8006 | if (TARGET_GAS) | |
8007 | { | |
cdcb88d7 JDA |
8008 | output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands); |
8009 | output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands); | |
5fad1c24 JDA |
8010 | } |
8011 | else | |
8012 | { | |
cdcb88d7 JDA |
8013 | xoperands[3] = GEN_INT (val_14 ? 8 : 16); |
8014 | output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands); | |
5fad1c24 JDA |
8015 | } |
8016 | ||
8017 | if (val_14) | |
8018 | { | |
cdcb88d7 JDA |
8019 | output_asm_insn ("bv %%r0(%%r1)", xoperands); |
8020 | output_asm_insn ("ldo %2(%%r26),%%r26", xoperands); | |
5fad1c24 JDA |
8021 | nbytes += 20; |
8022 | } | |
8023 | else | |
8024 | { | |
cdcb88d7 | 8025 | output_asm_insn ("bv,n %%r0(%%r1)", xoperands); |
5fad1c24 JDA |
8026 | nbytes += 24; |
8027 | } | |
8028 | } | |
8029 | else if (TARGET_PORTABLE_RUNTIME) | |
8030 | { | |
cdcb88d7 JDA |
8031 | output_asm_insn ("ldil L'%0,%%r1", xoperands); |
8032 | output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands); | |
8033 | ||
8034 | if (!val_14) | |
8035 | output_asm_insn ("addil L'%2,%%r26", xoperands); | |
8036 | ||
8037 | output_asm_insn ("bv %%r0(%%r22)", xoperands); | |
5fad1c24 JDA |
8038 | |
8039 | if (val_14) | |
8040 | { | |
cdcb88d7 | 8041 | output_asm_insn ("ldo %2(%%r26),%%r26", xoperands); |
5fad1c24 JDA |
8042 | nbytes += 16; |
8043 | } | |
8044 | else | |
8045 | { | |
cdcb88d7 | 8046 | output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands); |
5fad1c24 JDA |
8047 | nbytes += 20; |
8048 | } | |
8049 | } | |
8050 | else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function)) | |
8051 | { | |
8052 | /* The function is accessible from outside this module. The only | |
8053 | way to avoid an import stub between the thunk and function is to | |
8054 | call the function directly with an indirect sequence similar to | |
8055 | that used by $$dyncall. This is possible because $$dyncall acts | |
8056 | as the import stub in an indirect call. */ | |
5fad1c24 | 8057 | ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number); |
cdcb88d7 JDA |
8058 | xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label); |
8059 | output_asm_insn ("addil LT'%3,%%r19", xoperands); | |
8060 | output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands); | |
8061 | output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands); | |
8062 | output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands); | |
8063 | output_asm_insn ("depi 0,31,2,%%r22", xoperands); | |
8064 | output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands); | |
8065 | output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands); | |
8066 | ||
5fad1c24 JDA |
8067 | if (!val_14) |
8068 | { | |
cdcb88d7 | 8069 | output_asm_insn ("addil L'%2,%%r26", xoperands); |
5fad1c24 JDA |
8070 | nbytes += 4; |
8071 | } | |
cdcb88d7 | 8072 | |
5fad1c24 JDA |
8073 | if (TARGET_PA_20) |
8074 | { | |
cdcb88d7 JDA |
8075 | output_asm_insn ("bve (%%r22)", xoperands); |
8076 | nbytes += 36; | |
8077 | } | |
8078 | else if (TARGET_NO_SPACE_REGS) | |
8079 | { | |
8080 | output_asm_insn ("be 0(%%sr4,%%r22)", xoperands); | |
5fad1c24 JDA |
8081 | nbytes += 36; |
8082 | } | |
8083 | else | |
54374491 | 8084 | { |
cdcb88d7 JDA |
8085 | output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands); |
8086 | output_asm_insn ("mtsp %%r21,%%sr0", xoperands); | |
8087 | output_asm_insn ("be 0(%%sr0,%%r22)", xoperands); | |
8088 | nbytes += 44; | |
5fad1c24 JDA |
8089 | } |
8090 | ||
8091 | if (val_14) | |
cdcb88d7 | 8092 | output_asm_insn ("ldo %2(%%r26),%%r26", xoperands); |
5fad1c24 | 8093 | else |
cdcb88d7 | 8094 | output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands); |
5fad1c24 JDA |
8095 | } |
8096 | else if (flag_pic) | |
8097 | { | |
cdcb88d7 | 8098 | output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands); |
5fad1c24 JDA |
8099 | |
8100 | if (TARGET_SOM || !TARGET_GAS) | |
8101 | { | |
cdcb88d7 JDA |
8102 | output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands); |
8103 | output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands); | |
5fad1c24 JDA |
8104 | } |
8105 | else | |
8106 | { | |
cdcb88d7 JDA |
8107 | output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands); |
8108 | output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands); | |
5fad1c24 JDA |
8109 | } |
8110 | ||
cdcb88d7 JDA |
8111 | if (!val_14) |
8112 | output_asm_insn ("addil L'%2,%%r26", xoperands); | |
8113 | ||
8114 | output_asm_insn ("bv %%r0(%%r22)", xoperands); | |
8115 | ||
5fad1c24 JDA |
8116 | if (val_14) |
8117 | { | |
cdcb88d7 | 8118 | output_asm_insn ("ldo %2(%%r26),%%r26", xoperands); |
5fad1c24 | 8119 | nbytes += 20; |
54374491 JL |
8120 | } |
8121 | else | |
5fad1c24 | 8122 | { |
cdcb88d7 | 8123 | output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands); |
5fad1c24 JDA |
8124 | nbytes += 24; |
8125 | } | |
54374491 JL |
8126 | } |
8127 | else | |
8128 | { | |
5fad1c24 | 8129 | if (!val_14) |
cdcb88d7 | 8130 | output_asm_insn ("addil L'%2,%%r26", xoperands); |
5fad1c24 | 8131 | |
cdcb88d7 JDA |
8132 | output_asm_insn ("ldil L'%0,%%r22", xoperands); |
8133 | output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands); | |
5fad1c24 JDA |
8134 | |
8135 | if (val_14) | |
54374491 | 8136 | { |
cdcb88d7 | 8137 | output_asm_insn ("ldo %2(%%r26),%%r26", xoperands); |
5fad1c24 | 8138 | nbytes += 12; |
54374491 JL |
8139 | } |
8140 | else | |
5fad1c24 | 8141 | { |
cdcb88d7 | 8142 | output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands); |
5fad1c24 JDA |
8143 | nbytes += 16; |
8144 | } | |
54374491 | 8145 | } |
5fad1c24 | 8146 | |
54374491 | 8147 | fprintf (file, "\t.EXIT\n\t.PROCEND\n"); |
5fad1c24 | 8148 | |
1a83bfc3 JDA |
8149 | if (TARGET_SOM && TARGET_GAS) |
8150 | { | |
8151 | /* We done with this subspace except possibly for some additional | |
8152 | debug information. Forget that we are in this subspace to ensure | |
8153 | that the next function is output in its own subspace. */ | |
8154 | in_section = NULL; | |
8155 | cfun->machine->in_nsubspa = 2; | |
8156 | } | |
8157 | ||
5fad1c24 | 8158 | if (TARGET_SOM && flag_pic && TREE_PUBLIC (function)) |
54374491 | 8159 | { |
d6b5193b | 8160 | switch_to_section (data_section); |
cdcb88d7 | 8161 | output_asm_insn (".align 4", xoperands); |
5fad1c24 | 8162 | ASM_OUTPUT_LABEL (file, label); |
cdcb88d7 | 8163 | output_asm_insn (".word P'%0", xoperands); |
54374491 | 8164 | } |
5fad1c24 | 8165 | |
54374491 | 8166 | current_thunk_number++; |
5fad1c24 JDA |
8167 | nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1) |
8168 | & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)); | |
8169 | last_address += nbytes; | |
67b846fa JDA |
8170 | if (old_last_address > last_address) |
8171 | last_address = UINT_MAX; | |
5fad1c24 | 8172 | update_total_code_bytes (nbytes); |
54374491 JL |
8173 | } |
8174 | ||
4977bab6 ZW |
8175 | /* Only direct calls to static functions are allowed to be sibling (tail) |
8176 | call optimized. | |
8177 | ||
8178 | This restriction is necessary because some linker generated stubs will | |
8179 | store return pointers into rp' in some cases which might clobber a | |
8180 | live value already in rp'. | |
8181 | ||
8182 | In a sibcall the current function and the target function share stack | |
8183 | space. Thus if the path to the current function and the path to the | |
8184 | target function save a value in rp', they save the value into the | |
8185 | same stack slot, which has undesirable consequences. | |
8186 | ||
8187 | Because of the deferred binding nature of shared libraries any function | |
8188 | with external scope could be in a different load module and thus require | |
8189 | rp' to be saved when calling that function. So sibcall optimizations | |
8190 | can only be safe for static function. | |
8191 | ||
8192 | Note that GCC never needs return value relocations, so we don't have to | |
8193 | worry about static calls with return value relocations (which require | |
8194 | saving rp'). | |
8195 | ||
8196 | It is safe to perform a sibcall optimization when the target function | |
8197 | will never return. */ | |
8198 | static bool | |
b7849684 | 8199 | pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) |
4977bab6 | 8200 | { |
73096ba9 JDA |
8201 | if (TARGET_PORTABLE_RUNTIME) |
8202 | return false; | |
8203 | ||
11f43127 JDA |
8204 | /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in |
8205 | single subspace mode and the call is not indirect. As far as I know, | |
8206 | there is no operating system support for the multiple subspace mode. | |
8207 | It might be possible to support indirect calls if we didn't use | |
8208 | $$dyncall (see the indirect sequence generated in output_call). */ | |
8209 | if (TARGET_ELF32) | |
8210 | return (decl != NULL_TREE); | |
8211 | ||
8212 | /* Sibcalls are not ok because the arg pointer register is not a fixed | |
c1207243 | 8213 | register. This prevents the sibcall optimization from occurring. In |
11f43127 JDA |
8214 | addition, there are problems with stub placement using GNU ld. This |
8215 | is because a normal sibcall branch uses a 17-bit relocation while | |
8216 | a regular call branch uses a 22-bit relocation. As a result, more | |
8217 | care needs to be taken in the placement of long-branch stubs. */ | |
8218 | if (TARGET_64BIT) | |
8219 | return false; | |
8220 | ||
73096ba9 JDA |
8221 | /* Sibcalls are only ok within a translation unit. */ |
8222 | return (decl && !TREE_PUBLIC (decl)); | |
4977bab6 ZW |
8223 | } |
8224 | ||
8ddf681a R |
8225 | /* ??? Addition is not commutative on the PA due to the weird implicit |
8226 | space register selection rules for memory addresses. Therefore, we | |
8227 | don't consider a + b == b + a, as this might be inside a MEM. */ | |
8228 | static bool | |
3101faab | 8229 | pa_commutative_p (const_rtx x, int outer_code) |
8ddf681a R |
8230 | { |
8231 | return (COMMUTATIVE_P (x) | |
bd7d5043 JDA |
8232 | && (TARGET_NO_SPACE_REGS |
8233 | || (outer_code != UNKNOWN && outer_code != MEM) | |
8ddf681a R |
8234 | || GET_CODE (x) != PLUS)); |
8235 | } | |
8236 | ||
88e5c029 JL |
8237 | /* Returns 1 if the 6 operands specified in OPERANDS are suitable for |
8238 | use in fmpyadd instructions. */ | |
2fe24884 | 8239 | int |
b7849684 | 8240 | fmpyaddoperands (rtx *operands) |
2fe24884 | 8241 | { |
f133af4c | 8242 | enum machine_mode mode = GET_MODE (operands[0]); |
2fe24884 | 8243 | |
d85ab966 JL |
8244 | /* Must be a floating point mode. */ |
8245 | if (mode != SFmode && mode != DFmode) | |
8246 | return 0; | |
8247 | ||
2fe24884 | 8248 | /* All modes must be the same. */ |
f133af4c TG |
8249 | if (! (mode == GET_MODE (operands[1]) |
8250 | && mode == GET_MODE (operands[2]) | |
8251 | && mode == GET_MODE (operands[3]) | |
8252 | && mode == GET_MODE (operands[4]) | |
8253 | && mode == GET_MODE (operands[5]))) | |
2fe24884 JL |
8254 | return 0; |
8255 | ||
d85ab966 JL |
8256 | /* All operands must be registers. */ |
8257 | if (! (GET_CODE (operands[1]) == REG | |
8258 | && GET_CODE (operands[2]) == REG | |
8259 | && GET_CODE (operands[3]) == REG | |
8260 | && GET_CODE (operands[4]) == REG | |
8261 | && GET_CODE (operands[5]) == REG)) | |
2fe24884 JL |
8262 | return 0; |
8263 | ||
88e5c029 JL |
8264 | /* Only 2 real operands to the addition. One of the input operands must |
8265 | be the same as the output operand. */ | |
2fe24884 JL |
8266 | if (! rtx_equal_p (operands[3], operands[4]) |
8267 | && ! rtx_equal_p (operands[3], operands[5])) | |
8268 | return 0; | |
8269 | ||
1e5f1716 | 8270 | /* Inout operand of add cannot conflict with any operands from multiply. */ |
2fe24884 JL |
8271 | if (rtx_equal_p (operands[3], operands[0]) |
8272 | || rtx_equal_p (operands[3], operands[1]) | |
8273 | || rtx_equal_p (operands[3], operands[2])) | |
8274 | return 0; | |
8275 | ||
1e5f1716 | 8276 | /* multiply cannot feed into addition operands. */ |
2fe24884 JL |
8277 | if (rtx_equal_p (operands[4], operands[0]) |
8278 | || rtx_equal_p (operands[5], operands[0])) | |
8279 | return 0; | |
8280 | ||
d85ab966 JL |
8281 | /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */ |
8282 | if (mode == SFmode | |
88624c0e JL |
8283 | && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS |
8284 | || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS | |
8285 | || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS | |
8286 | || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS | |
8287 | || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS | |
8288 | || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS)) | |
d85ab966 JL |
8289 | return 0; |
8290 | ||
2fe24884 JL |
8291 | /* Passed. Operands are suitable for fmpyadd. */ |
8292 | return 1; | |
8293 | } | |
8294 | ||
35d434ed JDA |
8295 | #if !defined(USE_COLLECT2) |
8296 | static void | |
b7849684 | 8297 | pa_asm_out_constructor (rtx symbol, int priority) |
35d434ed JDA |
8298 | { |
8299 | if (!function_label_operand (symbol, VOIDmode)) | |
8300 | hppa_encode_label (symbol); | |
8301 | ||
8302 | #ifdef CTORS_SECTION_ASM_OP | |
8303 | default_ctor_section_asm_out_constructor (symbol, priority); | |
8304 | #else | |
8305 | # ifdef TARGET_ASM_NAMED_SECTION | |
8306 | default_named_section_asm_out_constructor (symbol, priority); | |
8307 | # else | |
8308 | default_stabs_asm_out_constructor (symbol, priority); | |
8309 | # endif | |
8310 | #endif | |
8311 | } | |
8312 | ||
8313 | static void | |
b7849684 | 8314 | pa_asm_out_destructor (rtx symbol, int priority) |
35d434ed JDA |
8315 | { |
8316 | if (!function_label_operand (symbol, VOIDmode)) | |
8317 | hppa_encode_label (symbol); | |
8318 | ||
8319 | #ifdef DTORS_SECTION_ASM_OP | |
8320 | default_dtor_section_asm_out_destructor (symbol, priority); | |
8321 | #else | |
8322 | # ifdef TARGET_ASM_NAMED_SECTION | |
8323 | default_named_section_asm_out_destructor (symbol, priority); | |
8324 | # else | |
8325 | default_stabs_asm_out_destructor (symbol, priority); | |
8326 | # endif | |
8327 | #endif | |
8328 | } | |
8329 | #endif | |
8330 | ||
d4482715 JDA |
8331 | /* This function places uninitialized global data in the bss section. |
8332 | The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this | |
8333 | function on the SOM port to prevent uninitialized global data from | |
8334 | being placed in the data section. */ | |
8335 | ||
8336 | void | |
8337 | pa_asm_output_aligned_bss (FILE *stream, | |
8338 | const char *name, | |
8339 | unsigned HOST_WIDE_INT size, | |
8340 | unsigned int align) | |
8341 | { | |
d6b5193b | 8342 | switch_to_section (bss_section); |
d4482715 JDA |
8343 | fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT); |
8344 | ||
8345 | #ifdef ASM_OUTPUT_TYPE_DIRECTIVE | |
8346 | ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); | |
8347 | #endif | |
8348 | ||
8349 | #ifdef ASM_OUTPUT_SIZE_DIRECTIVE | |
8350 | ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); | |
8351 | #endif | |
8352 | ||
8353 | fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT); | |
8354 | ASM_OUTPUT_LABEL (stream, name); | |
8355 | fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size); | |
8356 | } | |
8357 | ||
8358 | /* Both the HP and GNU assemblers under HP-UX provide a .comm directive | |
8359 | that doesn't allow the alignment of global common storage to be directly | |
8360 | specified. The SOM linker aligns common storage based on the rounded | |
8361 | value of the NUM_BYTES parameter in the .comm directive. It's not | |
8362 | possible to use the .align directive as it doesn't affect the alignment | |
8363 | of the label associated with a .comm directive. */ | |
8364 | ||
8365 | void | |
8366 | pa_asm_output_aligned_common (FILE *stream, | |
8367 | const char *name, | |
8368 | unsigned HOST_WIDE_INT size, | |
8369 | unsigned int align) | |
8370 | { | |
22f549fd JDA |
8371 | unsigned int max_common_align; |
8372 | ||
8373 | max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64); | |
8374 | if (align > max_common_align) | |
8375 | { | |
d4ee4d25 | 8376 | warning (0, "alignment (%u) for %s exceeds maximum alignment " |
22f549fd JDA |
8377 | "for global common data. Using %u", |
8378 | align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT); | |
8379 | align = max_common_align; | |
8380 | } | |
8381 | ||
d6b5193b | 8382 | switch_to_section (bss_section); |
d4482715 JDA |
8383 | |
8384 | assemble_name (stream, name); | |
8385 | fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n", | |
8386 | MAX (size, align / BITS_PER_UNIT)); | |
8387 | } | |
8388 | ||
8389 | /* We can't use .comm for local common storage as the SOM linker effectively | |
8390 | treats the symbol as universal and uses the same storage for local symbols | |
8391 | with the same name in different object files. The .block directive | |
8392 | reserves an uninitialized block of storage. However, it's not common | |
8393 | storage. Fortunately, GCC never requests common storage with the same | |
8394 | name in any given translation unit. */ | |
8395 | ||
8396 | void | |
8397 | pa_asm_output_aligned_local (FILE *stream, | |
8398 | const char *name, | |
8399 | unsigned HOST_WIDE_INT size, | |
8400 | unsigned int align) | |
8401 | { | |
d6b5193b | 8402 | switch_to_section (bss_section); |
d4482715 JDA |
8403 | fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT); |
8404 | ||
8405 | #ifdef LOCAL_ASM_OP | |
8406 | fprintf (stream, "%s", LOCAL_ASM_OP); | |
8407 | assemble_name (stream, name); | |
8408 | fprintf (stream, "\n"); | |
8409 | #endif | |
8410 | ||
8411 | ASM_OUTPUT_LABEL (stream, name); | |
8412 | fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size); | |
8413 | } | |
8414 | ||
88e5c029 JL |
8415 | /* Returns 1 if the 6 operands specified in OPERANDS are suitable for |
8416 | use in fmpysub instructions. */ | |
2fe24884 | 8417 | int |
b7849684 | 8418 | fmpysuboperands (rtx *operands) |
2fe24884 | 8419 | { |
f133af4c | 8420 | enum machine_mode mode = GET_MODE (operands[0]); |
2fe24884 | 8421 | |
d85ab966 JL |
8422 | /* Must be a floating point mode. */ |
8423 | if (mode != SFmode && mode != DFmode) | |
8424 | return 0; | |
8425 | ||
2fe24884 | 8426 | /* All modes must be the same. */ |
f133af4c TG |
8427 | if (! (mode == GET_MODE (operands[1]) |
8428 | && mode == GET_MODE (operands[2]) | |
8429 | && mode == GET_MODE (operands[3]) | |
8430 | && mode == GET_MODE (operands[4]) | |
8431 | && mode == GET_MODE (operands[5]))) | |
2fe24884 JL |
8432 | return 0; |
8433 | ||
d85ab966 JL |
8434 | /* All operands must be registers. */ |
8435 | if (! (GET_CODE (operands[1]) == REG | |
8436 | && GET_CODE (operands[2]) == REG | |
8437 | && GET_CODE (operands[3]) == REG | |
8438 | && GET_CODE (operands[4]) == REG | |
8439 | && GET_CODE (operands[5]) == REG)) | |
2fe24884 JL |
8440 | return 0; |
8441 | ||
88e5c029 JL |
8442 | /* Only 2 real operands to the subtraction. Subtraction is not a commutative |
8443 | operation, so operands[4] must be the same as operand[3]. */ | |
2fe24884 JL |
8444 | if (! rtx_equal_p (operands[3], operands[4])) |
8445 | return 0; | |
8446 | ||
1e5f1716 | 8447 | /* multiply cannot feed into subtraction. */ |
88e5c029 | 8448 | if (rtx_equal_p (operands[5], operands[0])) |
2fe24884 JL |
8449 | return 0; |
8450 | ||
1e5f1716 | 8451 | /* Inout operand of sub cannot conflict with any operands from multiply. */ |
2fe24884 JL |
8452 | if (rtx_equal_p (operands[3], operands[0]) |
8453 | || rtx_equal_p (operands[3], operands[1]) | |
8454 | || rtx_equal_p (operands[3], operands[2])) | |
8455 | return 0; | |
8456 | ||
d85ab966 JL |
8457 | /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */ |
8458 | if (mode == SFmode | |
88624c0e JL |
8459 | && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS |
8460 | || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS | |
8461 | || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS | |
8462 | || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS | |
8463 | || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS | |
8464 | || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS)) | |
d85ab966 JL |
8465 | return 0; |
8466 | ||
2fe24884 JL |
8467 | /* Passed. Operands are suitable for fmpysub. */ |
8468 | return 1; | |
8469 | } | |
8470 | ||
c2264220 JL |
8471 | /* Return 1 if the given constant is 2, 4, or 8. These are the valid |
8472 | constants for shadd instructions. */ | |
c9a88190 | 8473 | int |
b7849684 | 8474 | shadd_constant_p (int val) |
c2264220 JL |
8475 | { |
8476 | if (val == 2 || val == 4 || val == 8) | |
8477 | return 1; | |
8478 | else | |
8479 | return 0; | |
8480 | } | |
4802a0d6 | 8481 | |
d8f95bed JDA |
8482 | /* Return 1 if OP is valid as a base or index register in a |
8483 | REG+REG address. */ | |
68944452 JL |
8484 | |
8485 | int | |
d8f95bed | 8486 | borx_reg_operand (rtx op, enum machine_mode mode) |
68944452 | 8487 | { |
d8f95bed | 8488 | if (GET_CODE (op) != REG) |
31d4f31f JL |
8489 | return 0; |
8490 | ||
d8f95bed JDA |
8491 | /* We must reject virtual registers as the only expressions that |
8492 | can be instantiated are REG and REG+CONST. */ | |
8493 | if (op == virtual_incoming_args_rtx | |
8494 | || op == virtual_stack_vars_rtx | |
8495 | || op == virtual_stack_dynamic_rtx | |
8496 | || op == virtual_outgoing_args_rtx | |
8497 | || op == virtual_cfa_rtx) | |
8498 | return 0; | |
68944452 | 8499 | |
31d4f31f | 8500 | /* While it's always safe to index off the frame pointer, it's not |
d8f95bed JDA |
8501 | profitable to do so when the frame pointer is being eliminated. */ |
8502 | if (!reload_completed | |
8503 | && flag_omit_frame_pointer | |
e3b5732b | 8504 | && !cfun->calls_alloca |
d8f95bed JDA |
8505 | && op == frame_pointer_rtx) |
8506 | return 0; | |
68944452 | 8507 | |
d8f95bed | 8508 | return register_operand (op, mode); |
68944452 JL |
8509 | } |
8510 | ||
8a149902 RK |
8511 | /* Return 1 if this operand is anything other than a hard register. */ |
8512 | ||
8513 | int | |
b7849684 | 8514 | non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
8a149902 RK |
8515 | { |
8516 | return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER); | |
8517 | } | |
8518 | ||
b9821af8 | 8519 | /* Return 1 if INSN branches forward. Should be using insn_addresses |
fe19a83d | 8520 | to avoid walking through all the insns... */ |
51723711 | 8521 | static int |
b7849684 | 8522 | forward_branch_p (rtx insn) |
b9821af8 JL |
8523 | { |
8524 | rtx label = JUMP_LABEL (insn); | |
8525 | ||
8526 | while (insn) | |
8527 | { | |
8528 | if (insn == label) | |
8529 | break; | |
8530 | else | |
8531 | insn = NEXT_INSN (insn); | |
8532 | } | |
8533 | ||
8534 | return (insn == label); | |
8535 | } | |
8536 | ||
b1a275e1 JL |
8537 | /* Return 1 if OP is an equality comparison, else return 0. */ |
8538 | int | |
b7849684 | 8539 | eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) |
b1a275e1 JL |
8540 | { |
8541 | return (GET_CODE (op) == EQ || GET_CODE (op) == NE); | |
8542 | } | |
8543 | ||
2c4ff308 JL |
8544 | /* Return 1 if INSN is in the delay slot of a call instruction. */ |
8545 | int | |
b7849684 | 8546 | jump_in_call_delay (rtx insn) |
2c4ff308 JL |
8547 | { |
8548 | ||
8549 | if (GET_CODE (insn) != JUMP_INSN) | |
8550 | return 0; | |
8551 | ||
8552 | if (PREV_INSN (insn) | |
8553 | && PREV_INSN (PREV_INSN (insn)) | |
02a57c73 | 8554 | && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN) |
2c4ff308 | 8555 | { |
02a57c73 | 8556 | rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn))); |
2c4ff308 JL |
8557 | |
8558 | return (GET_CODE (PATTERN (test_insn)) == SEQUENCE | |
8559 | && XVECEXP (PATTERN (test_insn), 0, 1) == insn); | |
8560 | ||
8561 | } | |
8562 | else | |
8563 | return 0; | |
8564 | } | |
746a9efa | 8565 | |
b1092901 JL |
8566 | /* Output an unconditional move and branch insn. */ |
8567 | ||
519104fe | 8568 | const char * |
16d74a3c | 8569 | output_parallel_movb (rtx *operands, rtx insn) |
b1092901 | 8570 | { |
16d74a3c JDA |
8571 | int length = get_attr_length (insn); |
8572 | ||
b1092901 JL |
8573 | /* These are the cases in which we win. */ |
8574 | if (length == 4) | |
8575 | return "mov%I1b,tr %1,%0,%2"; | |
8576 | ||
16d74a3c JDA |
8577 | /* None of the following cases win, but they don't lose either. */ |
8578 | if (length == 8) | |
b1092901 | 8579 | { |
16d74a3c JDA |
8580 | if (dbr_sequence_length () == 0) |
8581 | { | |
8582 | /* Nothing in the delay slot, fake it by putting the combined | |
8583 | insn (the copy or add) in the delay slot of a bl. */ | |
8584 | if (GET_CODE (operands[1]) == CONST_INT) | |
8585 | return "b %2\n\tldi %1,%0"; | |
8586 | else | |
8587 | return "b %2\n\tcopy %1,%0"; | |
8588 | } | |
b1092901 | 8589 | else |
16d74a3c JDA |
8590 | { |
8591 | /* Something in the delay slot, but we've got a long branch. */ | |
8592 | if (GET_CODE (operands[1]) == CONST_INT) | |
8593 | return "ldi %1,%0\n\tb %2"; | |
8594 | else | |
8595 | return "copy %1,%0\n\tb %2"; | |
8596 | } | |
b1092901 | 8597 | } |
16d74a3c JDA |
8598 | |
8599 | if (GET_CODE (operands[1]) == CONST_INT) | |
8600 | output_asm_insn ("ldi %1,%0", operands); | |
b1092901 | 8601 | else |
16d74a3c JDA |
8602 | output_asm_insn ("copy %1,%0", operands); |
8603 | return output_lbranch (operands[2], insn, 1); | |
b1092901 JL |
8604 | } |
8605 | ||
8606 | /* Output an unconditional add and branch insn. */ | |
8607 | ||
519104fe | 8608 | const char * |
16d74a3c | 8609 | output_parallel_addb (rtx *operands, rtx insn) |
b1092901 | 8610 | { |
16d74a3c JDA |
8611 | int length = get_attr_length (insn); |
8612 | ||
b1092901 JL |
8613 | /* To make life easy we want operand0 to be the shared input/output |
8614 | operand and operand1 to be the readonly operand. */ | |
8615 | if (operands[0] == operands[1]) | |
8616 | operands[1] = operands[2]; | |
8617 | ||
8618 | /* These are the cases in which we win. */ | |
8619 | if (length == 4) | |
8620 | return "add%I1b,tr %1,%0,%3"; | |
8621 | ||
16d74a3c JDA |
8622 | /* None of the following cases win, but they don't lose either. */ |
8623 | if (length == 8) | |
b1092901 | 8624 | { |
16d74a3c JDA |
8625 | if (dbr_sequence_length () == 0) |
8626 | /* Nothing in the delay slot, fake it by putting the combined | |
8627 | insn (the copy or add) in the delay slot of a bl. */ | |
8628 | return "b %3\n\tadd%I1 %1,%0,%0"; | |
8629 | else | |
8630 | /* Something in the delay slot, but we've got a long branch. */ | |
8631 | return "add%I1 %1,%0,%0\n\tb %3"; | |
b1092901 | 8632 | } |
16d74a3c JDA |
8633 | |
8634 | output_asm_insn ("add%I1 %1,%0,%0", operands); | |
8635 | return output_lbranch (operands[3], insn, 1); | |
b1092901 JL |
8636 | } |
8637 | ||
1c31ecf6 JDA |
8638 | /* Return nonzero if INSN (a jump insn) immediately follows a call |
8639 | to a named function. This is used to avoid filling the delay slot | |
8640 | of the jump since it can usually be eliminated by modifying RP in | |
8641 | the delay slot of the call. */ | |
6619e96c | 8642 | |
51723711 | 8643 | int |
b7849684 | 8644 | following_call (rtx insn) |
b1092901 | 8645 | { |
6d8d2bbc | 8646 | if (! TARGET_JUMP_IN_DELAY) |
f9bd8d8e JL |
8647 | return 0; |
8648 | ||
b1092901 JL |
8649 | /* Find the previous real insn, skipping NOTEs. */ |
8650 | insn = PREV_INSN (insn); | |
8651 | while (insn && GET_CODE (insn) == NOTE) | |
8652 | insn = PREV_INSN (insn); | |
8653 | ||
8654 | /* Check for CALL_INSNs and millicode calls. */ | |
8655 | if (insn | |
cdc0de30 JL |
8656 | && ((GET_CODE (insn) == CALL_INSN |
8657 | && get_attr_type (insn) != TYPE_DYNCALL) | |
b1092901 JL |
8658 | || (GET_CODE (insn) == INSN |
8659 | && GET_CODE (PATTERN (insn)) != SEQUENCE | |
8660 | && GET_CODE (PATTERN (insn)) != USE | |
8661 | && GET_CODE (PATTERN (insn)) != CLOBBER | |
8662 | && get_attr_type (insn) == TYPE_MILLI))) | |
8663 | return 1; | |
8664 | ||
8665 | return 0; | |
8666 | } | |
8667 | ||
746a9efa JL |
8668 | /* We use this hook to perform a PA specific optimization which is difficult |
8669 | to do in earlier passes. | |
8670 | ||
8671 | We want the delay slots of branches within jump tables to be filled. | |
8672 | None of the compiler passes at the moment even has the notion that a | |
8673 | PA jump table doesn't contain addresses, but instead contains actual | |
8674 | instructions! | |
8675 | ||
8676 | Because we actually jump into the table, the addresses of each entry | |
ddd5a7c1 | 8677 | must stay constant in relation to the beginning of the table (which |
746a9efa JL |
8678 | itself must stay constant relative to the instruction to jump into |
8679 | it). I don't believe we can guarantee earlier passes of the compiler | |
8680 | will adhere to those rules. | |
8681 | ||
8682 | So, late in the compilation process we find all the jump tables, and | |
112cdef5 | 8683 | expand them into real code -- e.g. each entry in the jump table vector |
746a9efa JL |
8684 | will get an appropriate label followed by a jump to the final target. |
8685 | ||
8686 | Reorg and the final jump pass can then optimize these branches and | |
8687 | fill their delay slots. We end up with smaller, more efficient code. | |
8688 | ||
6619e96c | 8689 | The jump instructions within the table are special; we must be able |
746a9efa JL |
8690 | to identify them during assembly output (if the jumps don't get filled |
8691 | we need to emit a nop rather than nullifying the delay slot)). We | |
cb4d476c JDA |
8692 | identify jumps in switch tables by using insns with the attribute |
8693 | type TYPE_BTABLE_BRANCH. | |
251ffdee JL |
8694 | |
8695 | We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB | |
8696 | insns. This serves two purposes, first it prevents jump.c from | |
8697 | noticing that the last N entries in the table jump to the instruction | |
8698 | immediately after the table and deleting the jumps. Second, those | |
8699 | insns mark where we should emit .begin_brtab and .end_brtab directives | |
8700 | when using GAS (allows for better link time optimizations). */ | |
746a9efa | 8701 | |
18dbd950 | 8702 | static void |
b7849684 | 8703 | pa_reorg (void) |
746a9efa JL |
8704 | { |
8705 | rtx insn; | |
8706 | ||
18dbd950 | 8707 | remove_useless_addtr_insns (1); |
d8b79470 | 8708 | |
86001391 | 8709 | if (pa_cpu < PROCESSOR_8000) |
18dbd950 | 8710 | pa_combine_instructions (); |
86001391 | 8711 | |
c4bb6b38 | 8712 | |
d8b79470 | 8713 | /* This is fairly cheap, so always run it if optimizing. */ |
3e056efc | 8714 | if (optimize > 0 && !TARGET_BIG_SWITCH) |
746a9efa | 8715 | { |
29763968 | 8716 | /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */ |
18dbd950 | 8717 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
746a9efa | 8718 | { |
cb4d476c | 8719 | rtx pattern, tmp, location, label; |
746a9efa JL |
8720 | unsigned int length, i; |
8721 | ||
29763968 | 8722 | /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */ |
746a9efa | 8723 | if (GET_CODE (insn) != JUMP_INSN |
29763968 JL |
8724 | || (GET_CODE (PATTERN (insn)) != ADDR_VEC |
8725 | && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)) | |
746a9efa JL |
8726 | continue; |
8727 | ||
251ffdee JL |
8728 | /* Emit marker for the beginning of the branch table. */ |
8729 | emit_insn_before (gen_begin_brtab (), insn); | |
ad238e4b | 8730 | |
746a9efa JL |
8731 | pattern = PATTERN (insn); |
8732 | location = PREV_INSN (insn); | |
29763968 | 8733 | length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC); |
ad238e4b | 8734 | |
746a9efa JL |
8735 | for (i = 0; i < length; i++) |
8736 | { | |
3e056efc JL |
8737 | /* Emit a label before each jump to keep jump.c from |
8738 | removing this code. */ | |
8739 | tmp = gen_label_rtx (); | |
8740 | LABEL_NUSES (tmp) = 1; | |
8741 | emit_label_after (tmp, location); | |
8742 | location = NEXT_INSN (location); | |
8743 | ||
29763968 | 8744 | if (GET_CODE (pattern) == ADDR_VEC) |
cb4d476c | 8745 | label = XEXP (XVECEXP (pattern, 0, i), 0); |
29763968 | 8746 | else |
cb4d476c JDA |
8747 | label = XEXP (XVECEXP (pattern, 1, i), 0); |
8748 | ||
8749 | tmp = gen_short_jump (label); | |
8750 | ||
8751 | /* Emit the jump itself. */ | |
8752 | tmp = emit_jump_insn_after (tmp, location); | |
8753 | JUMP_LABEL (tmp) = label; | |
8754 | LABEL_NUSES (label)++; | |
8755 | location = NEXT_INSN (location); | |
746a9efa JL |
8756 | |
8757 | /* Emit a BARRIER after the jump. */ | |
746a9efa | 8758 | emit_barrier_after (location); |
746a9efa JL |
8759 | location = NEXT_INSN (location); |
8760 | } | |
ad238e4b | 8761 | |
251ffdee JL |
8762 | /* Emit marker for the end of the branch table. */ |
8763 | emit_insn_before (gen_end_brtab (), location); | |
8764 | location = NEXT_INSN (location); | |
8765 | emit_barrier_after (location); | |
3e056efc | 8766 | |
29763968 | 8767 | /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */ |
746a9efa JL |
8768 | delete_insn (insn); |
8769 | } | |
8770 | } | |
251ffdee | 8771 | else |
ad238e4b | 8772 | { |
cb4d476c JDA |
8773 | /* Still need brtab marker insns. FIXME: the presence of these |
8774 | markers disables output of the branch table to readonly memory, | |
8775 | and any alignment directives that might be needed. Possibly, | |
8776 | the begin_brtab insn should be output before the label for the | |
1ae58c30 | 8777 | table. This doesn't matter at the moment since the tables are |
cb4d476c | 8778 | always output in the text section. */ |
18dbd950 | 8779 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
ad238e4b JL |
8780 | { |
8781 | /* Find an ADDR_VEC insn. */ | |
8782 | if (GET_CODE (insn) != JUMP_INSN | |
29763968 JL |
8783 | || (GET_CODE (PATTERN (insn)) != ADDR_VEC |
8784 | && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)) | |
ad238e4b JL |
8785 | continue; |
8786 | ||
8787 | /* Now generate markers for the beginning and end of the | |
956d6950 | 8788 | branch table. */ |
ad238e4b JL |
8789 | emit_insn_before (gen_begin_brtab (), insn); |
8790 | emit_insn_after (gen_end_brtab (), insn); | |
8791 | } | |
8792 | } | |
aba892c4 | 8793 | } |
c4bb6b38 JL |
8794 | |
8795 | /* The PA has a number of odd instructions which can perform multiple | |
8796 | tasks at once. On first generation PA machines (PA1.0 and PA1.1) | |
8797 | it may be profitable to combine two instructions into one instruction | |
8798 | with two outputs. It's not profitable PA2.0 machines because the | |
8799 | two outputs would take two slots in the reorder buffers. | |
8800 | ||
8801 | This routine finds instructions which can be combined and combines | |
8802 | them. We only support some of the potential combinations, and we | |
8803 | only try common ways to find suitable instructions. | |
8804 | ||
8805 | * addb can add two registers or a register and a small integer | |
8806 | and jump to a nearby (+-8k) location. Normally the jump to the | |
8807 | nearby location is conditional on the result of the add, but by | |
8808 | using the "true" condition we can make the jump unconditional. | |
8809 | Thus addb can perform two independent operations in one insn. | |
8810 | ||
8811 | * movb is similar to addb in that it can perform a reg->reg | |
8812 | or small immediate->reg copy and jump to a nearby (+-8k location). | |
8813 | ||
8814 | * fmpyadd and fmpysub can perform a FP multiply and either an | |
8815 | FP add or FP sub if the operands of the multiply and add/sub are | |
8816 | independent (there are other minor restrictions). Note both | |
8817 | the fmpy and fadd/fsub can in theory move to better spots according | |
8818 | to data dependencies, but for now we require the fmpy stay at a | |
8819 | fixed location. | |
8820 | ||
8821 | * Many of the memory operations can perform pre & post updates | |
8822 | of index registers. GCC's pre/post increment/decrement addressing | |
8823 | is far too simple to take advantage of all the possibilities. This | |
8824 | pass may not be suitable since those insns may not be independent. | |
8825 | ||
8826 | * comclr can compare two ints or an int and a register, nullify | |
8827 | the following instruction and zero some other register. This | |
8828 | is more difficult to use as it's harder to find an insn which | |
8829 | will generate a comclr than finding something like an unconditional | |
8830 | branch. (conditional moves & long branches create comclr insns). | |
8831 | ||
8832 | * Most arithmetic operations can conditionally skip the next | |
8833 | instruction. They can be viewed as "perform this operation | |
8834 | and conditionally jump to this nearby location" (where nearby | |
8835 | is an insns away). These are difficult to use due to the | |
8836 | branch length restrictions. */ | |
8837 | ||
51723711 | 8838 | static void |
b7849684 | 8839 | pa_combine_instructions (void) |
c4bb6b38 | 8840 | { |
0a2aaacc | 8841 | rtx anchor, new_rtx; |
c4bb6b38 JL |
8842 | |
8843 | /* This can get expensive since the basic algorithm is on the | |
8844 | order of O(n^2) (or worse). Only do it for -O2 or higher | |
956d6950 | 8845 | levels of optimization. */ |
c4bb6b38 JL |
8846 | if (optimize < 2) |
8847 | return; | |
8848 | ||
8849 | /* Walk down the list of insns looking for "anchor" insns which | |
8850 | may be combined with "floating" insns. As the name implies, | |
8851 | "anchor" instructions don't move, while "floating" insns may | |
8852 | move around. */ | |
0a2aaacc KG |
8853 | new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX)); |
8854 | new_rtx = make_insn_raw (new_rtx); | |
c4bb6b38 JL |
8855 | |
8856 | for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor)) | |
8857 | { | |
8858 | enum attr_pa_combine_type anchor_attr; | |
8859 | enum attr_pa_combine_type floater_attr; | |
8860 | ||
8861 | /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs. | |
8862 | Also ignore any special USE insns. */ | |
51723711 | 8863 | if ((GET_CODE (anchor) != INSN |
c4bb6b38 | 8864 | && GET_CODE (anchor) != JUMP_INSN |
51723711 | 8865 | && GET_CODE (anchor) != CALL_INSN) |
c4bb6b38 JL |
8866 | || GET_CODE (PATTERN (anchor)) == USE |
8867 | || GET_CODE (PATTERN (anchor)) == CLOBBER | |
8868 | || GET_CODE (PATTERN (anchor)) == ADDR_VEC | |
8869 | || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC) | |
8870 | continue; | |
8871 | ||
8872 | anchor_attr = get_attr_pa_combine_type (anchor); | |
8873 | /* See if anchor is an insn suitable for combination. */ | |
8874 | if (anchor_attr == PA_COMBINE_TYPE_FMPY | |
8875 | || anchor_attr == PA_COMBINE_TYPE_FADDSUB | |
8876 | || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH | |
8877 | && ! forward_branch_p (anchor))) | |
8878 | { | |
8879 | rtx floater; | |
8880 | ||
8881 | for (floater = PREV_INSN (anchor); | |
8882 | floater; | |
8883 | floater = PREV_INSN (floater)) | |
8884 | { | |
8885 | if (GET_CODE (floater) == NOTE | |
8886 | || (GET_CODE (floater) == INSN | |
8887 | && (GET_CODE (PATTERN (floater)) == USE | |
8888 | || GET_CODE (PATTERN (floater)) == CLOBBER))) | |
8889 | continue; | |
8890 | ||
8891 | /* Anything except a regular INSN will stop our search. */ | |
8892 | if (GET_CODE (floater) != INSN | |
8893 | || GET_CODE (PATTERN (floater)) == ADDR_VEC | |
8894 | || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC) | |
8895 | { | |
8896 | floater = NULL_RTX; | |
8897 | break; | |
8898 | } | |
8899 | ||
8900 | /* See if FLOATER is suitable for combination with the | |
8901 | anchor. */ | |
8902 | floater_attr = get_attr_pa_combine_type (floater); | |
8903 | if ((anchor_attr == PA_COMBINE_TYPE_FMPY | |
8904 | && floater_attr == PA_COMBINE_TYPE_FADDSUB) | |
8905 | || (anchor_attr == PA_COMBINE_TYPE_FADDSUB | |
8906 | && floater_attr == PA_COMBINE_TYPE_FMPY)) | |
8907 | { | |
8908 | /* If ANCHOR and FLOATER can be combined, then we're | |
8909 | done with this pass. */ | |
0a2aaacc | 8910 | if (pa_can_combine_p (new_rtx, anchor, floater, 0, |
c4bb6b38 JL |
8911 | SET_DEST (PATTERN (floater)), |
8912 | XEXP (SET_SRC (PATTERN (floater)), 0), | |
8913 | XEXP (SET_SRC (PATTERN (floater)), 1))) | |
8914 | break; | |
8915 | } | |
8916 | ||
8917 | else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH | |
8918 | && floater_attr == PA_COMBINE_TYPE_ADDMOVE) | |
8919 | { | |
8920 | if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS) | |
8921 | { | |
0a2aaacc | 8922 | if (pa_can_combine_p (new_rtx, anchor, floater, 0, |
c4bb6b38 JL |
8923 | SET_DEST (PATTERN (floater)), |
8924 | XEXP (SET_SRC (PATTERN (floater)), 0), | |
8925 | XEXP (SET_SRC (PATTERN (floater)), 1))) | |
8926 | break; | |
8927 | } | |
8928 | else | |
8929 | { | |
0a2aaacc | 8930 | if (pa_can_combine_p (new_rtx, anchor, floater, 0, |
c4bb6b38 JL |
8931 | SET_DEST (PATTERN (floater)), |
8932 | SET_SRC (PATTERN (floater)), | |
8933 | SET_SRC (PATTERN (floater)))) | |
8934 | break; | |
8935 | } | |
8936 | } | |
8937 | } | |
8938 | ||
8939 | /* If we didn't find anything on the backwards scan try forwards. */ | |
8940 | if (!floater | |
8941 | && (anchor_attr == PA_COMBINE_TYPE_FMPY | |
8942 | || anchor_attr == PA_COMBINE_TYPE_FADDSUB)) | |
8943 | { | |
8944 | for (floater = anchor; floater; floater = NEXT_INSN (floater)) | |
8945 | { | |
8946 | if (GET_CODE (floater) == NOTE | |
8947 | || (GET_CODE (floater) == INSN | |
8948 | && (GET_CODE (PATTERN (floater)) == USE | |
8949 | || GET_CODE (PATTERN (floater)) == CLOBBER))) | |
6619e96c | 8950 | |
c4bb6b38 JL |
8951 | continue; |
8952 | ||
8953 | /* Anything except a regular INSN will stop our search. */ | |
8954 | if (GET_CODE (floater) != INSN | |
8955 | || GET_CODE (PATTERN (floater)) == ADDR_VEC | |
8956 | || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC) | |
8957 | { | |
8958 | floater = NULL_RTX; | |
8959 | break; | |
8960 | } | |
8961 | ||
8962 | /* See if FLOATER is suitable for combination with the | |
8963 | anchor. */ | |
8964 | floater_attr = get_attr_pa_combine_type (floater); | |
8965 | if ((anchor_attr == PA_COMBINE_TYPE_FMPY | |
8966 | && floater_attr == PA_COMBINE_TYPE_FADDSUB) | |
8967 | || (anchor_attr == PA_COMBINE_TYPE_FADDSUB | |
8968 | && floater_attr == PA_COMBINE_TYPE_FMPY)) | |
8969 | { | |
8970 | /* If ANCHOR and FLOATER can be combined, then we're | |
8971 | done with this pass. */ | |
0a2aaacc | 8972 | if (pa_can_combine_p (new_rtx, anchor, floater, 1, |
c4bb6b38 | 8973 | SET_DEST (PATTERN (floater)), |
831c1763 AM |
8974 | XEXP (SET_SRC (PATTERN (floater)), |
8975 | 0), | |
8976 | XEXP (SET_SRC (PATTERN (floater)), | |
8977 | 1))) | |
c4bb6b38 JL |
8978 | break; |
8979 | } | |
8980 | } | |
8981 | } | |
8982 | ||
8983 | /* FLOATER will be nonzero if we found a suitable floating | |
8984 | insn for combination with ANCHOR. */ | |
8985 | if (floater | |
8986 | && (anchor_attr == PA_COMBINE_TYPE_FADDSUB | |
8987 | || anchor_attr == PA_COMBINE_TYPE_FMPY)) | |
8988 | { | |
8989 | /* Emit the new instruction and delete the old anchor. */ | |
c5c76735 JL |
8990 | emit_insn_before (gen_rtx_PARALLEL |
8991 | (VOIDmode, | |
8992 | gen_rtvec (2, PATTERN (anchor), | |
8993 | PATTERN (floater))), | |
8994 | anchor); | |
8995 | ||
a38e7aa5 | 8996 | SET_INSN_DELETED (anchor); |
c4bb6b38 JL |
8997 | |
8998 | /* Emit a special USE insn for FLOATER, then delete | |
8999 | the floating insn. */ | |
ad2c71b7 | 9000 | emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater); |
c4bb6b38 JL |
9001 | delete_insn (floater); |
9002 | ||
9003 | continue; | |
9004 | } | |
9005 | else if (floater | |
9006 | && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH) | |
9007 | { | |
9008 | rtx temp; | |
9009 | /* Emit the new_jump instruction and delete the old anchor. */ | |
c5c76735 JL |
9010 | temp |
9011 | = emit_jump_insn_before (gen_rtx_PARALLEL | |
9012 | (VOIDmode, | |
9013 | gen_rtvec (2, PATTERN (anchor), | |
9014 | PATTERN (floater))), | |
9015 | anchor); | |
9016 | ||
c4bb6b38 | 9017 | JUMP_LABEL (temp) = JUMP_LABEL (anchor); |
a38e7aa5 | 9018 | SET_INSN_DELETED (anchor); |
c4bb6b38 JL |
9019 | |
9020 | /* Emit a special USE insn for FLOATER, then delete | |
9021 | the floating insn. */ | |
ad2c71b7 | 9022 | emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater); |
c4bb6b38 JL |
9023 | delete_insn (floater); |
9024 | continue; | |
9025 | } | |
9026 | } | |
9027 | } | |
9028 | } | |
9029 | ||
0952f89b | 9030 | static int |
0a2aaacc | 9031 | pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest, |
b7849684 | 9032 | rtx src1, rtx src2) |
c4bb6b38 JL |
9033 | { |
9034 | int insn_code_number; | |
9035 | rtx start, end; | |
9036 | ||
9037 | /* Create a PARALLEL with the patterns of ANCHOR and | |
9038 | FLOATER, try to recognize it, then test constraints | |
9039 | for the resulting pattern. | |
9040 | ||
9041 | If the pattern doesn't match or the constraints | |
9042 | aren't met keep searching for a suitable floater | |
9043 | insn. */ | |
0a2aaacc KG |
9044 | XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor); |
9045 | XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater); | |
9046 | INSN_CODE (new_rtx) = -1; | |
9047 | insn_code_number = recog_memoized (new_rtx); | |
c4bb6b38 | 9048 | if (insn_code_number < 0 |
0a2aaacc | 9049 | || (extract_insn (new_rtx), ! constrain_operands (1))) |
c4bb6b38 JL |
9050 | return 0; |
9051 | ||
9052 | if (reversed) | |
9053 | { | |
9054 | start = anchor; | |
9055 | end = floater; | |
9056 | } | |
9057 | else | |
9058 | { | |
9059 | start = floater; | |
9060 | end = anchor; | |
9061 | } | |
9062 | ||
9063 | /* There's up to three operands to consider. One | |
9064 | output and two inputs. | |
9065 | ||
9066 | The output must not be used between FLOATER & ANCHOR | |
9067 | exclusive. The inputs must not be set between | |
9068 | FLOATER and ANCHOR exclusive. */ | |
9069 | ||
9070 | if (reg_used_between_p (dest, start, end)) | |
9071 | return 0; | |
9072 | ||
9073 | if (reg_set_between_p (src1, start, end)) | |
9074 | return 0; | |
9075 | ||
9076 | if (reg_set_between_p (src2, start, end)) | |
9077 | return 0; | |
9078 | ||
9079 | /* If we get here, then everything is good. */ | |
9080 | return 1; | |
9081 | } | |
b9cd54d2 | 9082 | |
2561a923 | 9083 | /* Return nonzero if references for INSN are delayed. |
b9cd54d2 JL |
9084 | |
9085 | Millicode insns are actually function calls with some special | |
9086 | constraints on arguments and register usage. | |
9087 | ||
9088 | Millicode calls always expect their arguments in the integer argument | |
9089 | registers, and always return their result in %r29 (ret1). They | |
7d8b1412 AM |
9090 | are expected to clobber their arguments, %r1, %r29, and the return |
9091 | pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else. | |
9092 | ||
9093 | This function tells reorg that the references to arguments and | |
9094 | millicode calls do not appear to happen until after the millicode call. | |
9095 | This allows reorg to put insns which set the argument registers into the | |
9096 | delay slot of the millicode call -- thus they act more like traditional | |
9097 | CALL_INSNs. | |
9098 | ||
1e5f1716 | 9099 | Note we cannot consider side effects of the insn to be delayed because |
7d8b1412 AM |
9100 | the branch and link insn will clobber the return pointer. If we happened |
9101 | to use the return pointer in the delay slot of the call, then we lose. | |
b9cd54d2 JL |
9102 | |
9103 | get_attr_type will try to recognize the given insn, so make sure to | |
9104 | filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns | |
9105 | in particular. */ | |
9106 | int | |
b7849684 | 9107 | insn_refs_are_delayed (rtx insn) |
b9cd54d2 | 9108 | { |
6619e96c | 9109 | return ((GET_CODE (insn) == INSN |
b9cd54d2 JL |
9110 | && GET_CODE (PATTERN (insn)) != SEQUENCE |
9111 | && GET_CODE (PATTERN (insn)) != USE | |
9112 | && GET_CODE (PATTERN (insn)) != CLOBBER | |
9113 | && get_attr_type (insn) == TYPE_MILLI)); | |
9114 | } | |
d07d525a | 9115 | |
44571d6e JDA |
9116 | /* On the HP-PA the value is found in register(s) 28(-29), unless |
9117 | the mode is SF or DF. Then the value is returned in fr4 (32). | |
9118 | ||
9119 | This must perform the same promotions as PROMOTE_MODE, else | |
3f12cd9b | 9120 | TARGET_PROMOTE_FUNCTION_RETURN will not work correctly. |
44571d6e JDA |
9121 | |
9122 | Small structures must be returned in a PARALLEL on PA64 in order | |
9123 | to match the HP Compiler ABI. */ | |
9124 | ||
9125 | rtx | |
586de218 | 9126 | function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED) |
44571d6e JDA |
9127 | { |
9128 | enum machine_mode valmode; | |
9129 | ||
4720d5ca JDA |
9130 | if (AGGREGATE_TYPE_P (valtype) |
9131 | || TREE_CODE (valtype) == COMPLEX_TYPE | |
9132 | || TREE_CODE (valtype) == VECTOR_TYPE) | |
44571d6e | 9133 | { |
2a04824b JDA |
9134 | if (TARGET_64BIT) |
9135 | { | |
9136 | /* Aggregates with a size less than or equal to 128 bits are | |
9137 | returned in GR 28(-29). They are left justified. The pad | |
9138 | bits are undefined. Larger aggregates are returned in | |
9139 | memory. */ | |
9140 | rtx loc[2]; | |
9141 | int i, offset = 0; | |
9142 | int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2; | |
9143 | ||
9144 | for (i = 0; i < ub; i++) | |
9145 | { | |
9146 | loc[i] = gen_rtx_EXPR_LIST (VOIDmode, | |
9147 | gen_rtx_REG (DImode, 28 + i), | |
9148 | GEN_INT (offset)); | |
9149 | offset += 8; | |
9150 | } | |
44571d6e | 9151 | |
2a04824b JDA |
9152 | return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc)); |
9153 | } | |
9154 | else if (int_size_in_bytes (valtype) > UNITS_PER_WORD) | |
44571d6e | 9155 | { |
2a04824b JDA |
9156 | /* Aggregates 5 to 8 bytes in size are returned in general |
9157 | registers r28-r29 in the same manner as other non | |
9158 | floating-point objects. The data is right-justified and | |
9159 | zero-extended to 64 bits. This is opposite to the normal | |
9160 | justification used on big endian targets and requires | |
9161 | special treatment. */ | |
9162 | rtx loc = gen_rtx_EXPR_LIST (VOIDmode, | |
9163 | gen_rtx_REG (DImode, 28), const0_rtx); | |
9164 | return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc)); | |
44571d6e | 9165 | } |
44571d6e JDA |
9166 | } |
9167 | ||
9168 | if ((INTEGRAL_TYPE_P (valtype) | |
2ae88ecd | 9169 | && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD) |
44571d6e JDA |
9170 | || POINTER_TYPE_P (valtype)) |
9171 | valmode = word_mode; | |
9172 | else | |
9173 | valmode = TYPE_MODE (valtype); | |
9174 | ||
9175 | if (TREE_CODE (valtype) == REAL_TYPE | |
2a04824b | 9176 | && !AGGREGATE_TYPE_P (valtype) |
44571d6e JDA |
9177 | && TYPE_MODE (valtype) != TFmode |
9178 | && !TARGET_SOFT_FLOAT) | |
9179 | return gen_rtx_REG (valmode, 32); | |
9180 | ||
9181 | return gen_rtx_REG (valmode, 28); | |
9182 | } | |
9183 | ||
520babc7 JL |
9184 | /* Return the location of a parameter that is passed in a register or NULL |
9185 | if the parameter has any component that is passed in memory. | |
9186 | ||
9187 | This is new code and will be pushed to into the net sources after | |
6619e96c | 9188 | further testing. |
520babc7 JL |
9189 | |
9190 | ??? We might want to restructure this so that it looks more like other | |
9191 | ports. */ | |
9192 | rtx | |
b7849684 JE |
9193 | function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type, |
9194 | int named ATTRIBUTE_UNUSED) | |
520babc7 JL |
9195 | { |
9196 | int max_arg_words = (TARGET_64BIT ? 8 : 4); | |
015b1ad1 | 9197 | int alignment = 0; |
9dff28ab | 9198 | int arg_size; |
520babc7 JL |
9199 | int fpr_reg_base; |
9200 | int gpr_reg_base; | |
9201 | rtx retval; | |
9202 | ||
9dff28ab JDA |
9203 | if (mode == VOIDmode) |
9204 | return NULL_RTX; | |
9205 | ||
9206 | arg_size = FUNCTION_ARG_SIZE (mode, type); | |
9207 | ||
9208 | /* If this arg would be passed partially or totally on the stack, then | |
78a52f11 | 9209 | this routine should return zero. pa_arg_partial_bytes will |
9dff28ab JDA |
9210 | handle arguments which are split between regs and stack slots if |
9211 | the ABI mandates split arguments. */ | |
4720d5ca | 9212 | if (!TARGET_64BIT) |
520babc7 | 9213 | { |
9dff28ab JDA |
9214 | /* The 32-bit ABI does not split arguments. */ |
9215 | if (cum->words + arg_size > max_arg_words) | |
520babc7 JL |
9216 | return NULL_RTX; |
9217 | } | |
9218 | else | |
9219 | { | |
015b1ad1 JDA |
9220 | if (arg_size > 1) |
9221 | alignment = cum->words & 1; | |
9dff28ab | 9222 | if (cum->words + alignment >= max_arg_words) |
520babc7 JL |
9223 | return NULL_RTX; |
9224 | } | |
9225 | ||
9226 | /* The 32bit ABIs and the 64bit ABIs are rather different, | |
9227 | particularly in their handling of FP registers. We might | |
9228 | be able to cleverly share code between them, but I'm not | |
0952f89b | 9229 | going to bother in the hope that splitting them up results |
015b1ad1 | 9230 | in code that is more easily understood. */ |
520babc7 | 9231 | |
520babc7 JL |
9232 | if (TARGET_64BIT) |
9233 | { | |
9234 | /* Advance the base registers to their current locations. | |
9235 | ||
9236 | Remember, gprs grow towards smaller register numbers while | |
015b1ad1 JDA |
9237 | fprs grow to higher register numbers. Also remember that |
9238 | although FP regs are 32-bit addressable, we pretend that | |
9239 | the registers are 64-bits wide. */ | |
520babc7 JL |
9240 | gpr_reg_base = 26 - cum->words; |
9241 | fpr_reg_base = 32 + cum->words; | |
6619e96c | 9242 | |
9dff28ab JDA |
9243 | /* Arguments wider than one word and small aggregates need special |
9244 | treatment. */ | |
9245 | if (arg_size > 1 | |
9246 | || mode == BLKmode | |
4720d5ca JDA |
9247 | || (type && (AGGREGATE_TYPE_P (type) |
9248 | || TREE_CODE (type) == COMPLEX_TYPE | |
9249 | || TREE_CODE (type) == VECTOR_TYPE))) | |
520babc7 | 9250 | { |
015b1ad1 JDA |
9251 | /* Double-extended precision (80-bit), quad-precision (128-bit) |
9252 | and aggregates including complex numbers are aligned on | |
9253 | 128-bit boundaries. The first eight 64-bit argument slots | |
9254 | are associated one-to-one, with general registers r26 | |
9255 | through r19, and also with floating-point registers fr4 | |
9256 | through fr11. Arguments larger than one word are always | |
9dff28ab JDA |
9257 | passed in general registers. |
9258 | ||
9259 | Using a PARALLEL with a word mode register results in left | |
9260 | justified data on a big-endian target. */ | |
015b1ad1 JDA |
9261 | |
9262 | rtx loc[8]; | |
9263 | int i, offset = 0, ub = arg_size; | |
9264 | ||
9265 | /* Align the base register. */ | |
9266 | gpr_reg_base -= alignment; | |
9267 | ||
9268 | ub = MIN (ub, max_arg_words - cum->words - alignment); | |
9269 | for (i = 0; i < ub; i++) | |
520babc7 | 9270 | { |
015b1ad1 JDA |
9271 | loc[i] = gen_rtx_EXPR_LIST (VOIDmode, |
9272 | gen_rtx_REG (DImode, gpr_reg_base), | |
9273 | GEN_INT (offset)); | |
9274 | gpr_reg_base -= 1; | |
9275 | offset += 8; | |
520babc7 | 9276 | } |
015b1ad1 | 9277 | |
e4b95210 | 9278 | return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc)); |
520babc7 | 9279 | } |
9dff28ab | 9280 | } |
520babc7 JL |
9281 | else |
9282 | { | |
9283 | /* If the argument is larger than a word, then we know precisely | |
9284 | which registers we must use. */ | |
015b1ad1 | 9285 | if (arg_size > 1) |
520babc7 JL |
9286 | { |
9287 | if (cum->words) | |
9288 | { | |
9289 | gpr_reg_base = 23; | |
9290 | fpr_reg_base = 38; | |
9291 | } | |
9292 | else | |
9293 | { | |
9294 | gpr_reg_base = 25; | |
9295 | fpr_reg_base = 34; | |
9296 | } | |
9dff28ab JDA |
9297 | |
9298 | /* Structures 5 to 8 bytes in size are passed in the general | |
9299 | registers in the same manner as other non floating-point | |
9300 | objects. The data is right-justified and zero-extended | |
7ea18c08 JDA |
9301 | to 64 bits. This is opposite to the normal justification |
9302 | used on big endian targets and requires special treatment. | |
4720d5ca JDA |
9303 | We now define BLOCK_REG_PADDING to pad these objects. |
9304 | Aggregates, complex and vector types are passed in the same | |
9305 | manner as structures. */ | |
9306 | if (mode == BLKmode | |
9307 | || (type && (AGGREGATE_TYPE_P (type) | |
9308 | || TREE_CODE (type) == COMPLEX_TYPE | |
9309 | || TREE_CODE (type) == VECTOR_TYPE))) | |
9dff28ab | 9310 | { |
44571d6e JDA |
9311 | rtx loc = gen_rtx_EXPR_LIST (VOIDmode, |
9312 | gen_rtx_REG (DImode, gpr_reg_base), | |
9313 | const0_rtx); | |
2a04824b | 9314 | return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc)); |
9dff28ab | 9315 | } |
520babc7 JL |
9316 | } |
9317 | else | |
9318 | { | |
9319 | /* We have a single word (32 bits). A simple computation | |
9320 | will get us the register #s we need. */ | |
9321 | gpr_reg_base = 26 - cum->words; | |
9322 | fpr_reg_base = 32 + 2 * cum->words; | |
9323 | } | |
9324 | } | |
9325 | ||
b848dc65 | 9326 | /* Determine if the argument needs to be passed in both general and |
520babc7 | 9327 | floating point registers. */ |
b848dc65 JDA |
9328 | if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32) |
9329 | /* If we are doing soft-float with portable runtime, then there | |
9330 | is no need to worry about FP regs. */ | |
c328adfa | 9331 | && !TARGET_SOFT_FLOAT |
4720d5ca | 9332 | /* The parameter must be some kind of scalar float, else we just |
b848dc65 | 9333 | pass it in integer registers. */ |
4720d5ca | 9334 | && GET_MODE_CLASS (mode) == MODE_FLOAT |
b848dc65 JDA |
9335 | /* The target function must not have a prototype. */ |
9336 | && cum->nargs_prototype <= 0 | |
9337 | /* libcalls do not need to pass items in both FP and general | |
9338 | registers. */ | |
9339 | && type != NULL_TREE | |
c328adfa JDA |
9340 | /* All this hair applies to "outgoing" args only. This includes |
9341 | sibcall arguments setup with FUNCTION_INCOMING_ARG. */ | |
9342 | && !cum->incoming) | |
b848dc65 JDA |
9343 | /* Also pass outgoing floating arguments in both registers in indirect |
9344 | calls with the 32 bit ABI and the HP assembler since there is no | |
9345 | way to the specify argument locations in static functions. */ | |
c328adfa JDA |
9346 | || (!TARGET_64BIT |
9347 | && !TARGET_GAS | |
9348 | && !cum->incoming | |
b848dc65 | 9349 | && cum->indirect |
4720d5ca | 9350 | && GET_MODE_CLASS (mode) == MODE_FLOAT)) |
520babc7 JL |
9351 | { |
9352 | retval | |
9353 | = gen_rtx_PARALLEL | |
9354 | (mode, | |
9355 | gen_rtvec (2, | |
9356 | gen_rtx_EXPR_LIST (VOIDmode, | |
9357 | gen_rtx_REG (mode, fpr_reg_base), | |
9358 | const0_rtx), | |
9359 | gen_rtx_EXPR_LIST (VOIDmode, | |
9360 | gen_rtx_REG (mode, gpr_reg_base), | |
9361 | const0_rtx))); | |
9362 | } | |
9363 | else | |
9364 | { | |
9365 | /* See if we should pass this parameter in a general register. */ | |
9366 | if (TARGET_SOFT_FLOAT | |
9367 | /* Indirect calls in the normal 32bit ABI require all arguments | |
9368 | to be passed in general registers. */ | |
9369 | || (!TARGET_PORTABLE_RUNTIME | |
9370 | && !TARGET_64BIT | |
50b424a9 | 9371 | && !TARGET_ELF32 |
520babc7 | 9372 | && cum->indirect) |
4720d5ca JDA |
9373 | /* If the parameter is not a scalar floating-point parameter, |
9374 | then it belongs in GPRs. */ | |
9375 | || GET_MODE_CLASS (mode) != MODE_FLOAT | |
2a04824b JDA |
9376 | /* Structure with single SFmode field belongs in GPR. */ |
9377 | || (type && AGGREGATE_TYPE_P (type))) | |
520babc7 JL |
9378 | retval = gen_rtx_REG (mode, gpr_reg_base); |
9379 | else | |
9380 | retval = gen_rtx_REG (mode, fpr_reg_base); | |
9381 | } | |
9382 | return retval; | |
9383 | } | |
9384 | ||
9385 | ||
9386 | /* If this arg would be passed totally in registers or totally on the stack, | |
78a52f11 RH |
9387 | then this routine should return zero. */ |
9388 | ||
9389 | static int | |
9390 | pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode, | |
9391 | tree type, bool named ATTRIBUTE_UNUSED) | |
520babc7 | 9392 | { |
e0c556d3 AM |
9393 | unsigned int max_arg_words = 8; |
9394 | unsigned int offset = 0; | |
520babc7 | 9395 | |
78a52f11 RH |
9396 | if (!TARGET_64BIT) |
9397 | return 0; | |
9398 | ||
e0c556d3 | 9399 | if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1)) |
520babc7 JL |
9400 | offset = 1; |
9401 | ||
e0c556d3 | 9402 | if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words) |
fe19a83d | 9403 | /* Arg fits fully into registers. */ |
520babc7 | 9404 | return 0; |
6619e96c | 9405 | else if (cum->words + offset >= max_arg_words) |
fe19a83d | 9406 | /* Arg fully on the stack. */ |
520babc7 JL |
9407 | return 0; |
9408 | else | |
fe19a83d | 9409 | /* Arg is split. */ |
78a52f11 | 9410 | return (max_arg_words - cum->words - offset) * UNITS_PER_WORD; |
520babc7 JL |
9411 | } |
9412 | ||
9413 | ||
d6b5193b | 9414 | /* A get_unnamed_section callback for switching to the text section. |
9a55eab3 JDA |
9415 | |
9416 | This function is only used with SOM. Because we don't support | |
9417 | named subspaces, we can only create a new subspace or switch back | |
774acadf | 9418 | to the default text subspace. */ |
774acadf | 9419 | |
d6b5193b RS |
9420 | static void |
9421 | som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED) | |
9422 | { | |
9423 | gcc_assert (TARGET_SOM); | |
774acadf | 9424 | if (TARGET_GAS) |
9a55eab3 | 9425 | { |
8c5e065b | 9426 | if (cfun && cfun->machine && !cfun->machine->in_nsubspa) |
9a55eab3 JDA |
9427 | { |
9428 | /* We only want to emit a .nsubspa directive once at the | |
9429 | start of the function. */ | |
9430 | cfun->machine->in_nsubspa = 1; | |
9431 | ||
9432 | /* Create a new subspace for the text. This provides | |
9433 | better stub placement and one-only functions. */ | |
9434 | if (cfun->decl | |
9435 | && DECL_ONE_ONLY (cfun->decl) | |
9436 | && !DECL_WEAK (cfun->decl)) | |
1a83bfc3 JDA |
9437 | { |
9438 | output_section_asm_op ("\t.SPACE $TEXT$\n" | |
9439 | "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8," | |
9440 | "ACCESS=44,SORT=24,COMDAT"); | |
9441 | return; | |
9442 | } | |
9a55eab3 JDA |
9443 | } |
9444 | else | |
9445 | { | |
9446 | /* There isn't a current function or the body of the current | |
9447 | function has been completed. So, we are changing to the | |
1a83bfc3 JDA |
9448 | text section to output debugging information. Thus, we |
9449 | need to forget that we are in the text section so that | |
9450 | varasm.c will call us when text_section is selected again. */ | |
8c5e065b JDA |
9451 | gcc_assert (!cfun || !cfun->machine |
9452 | || cfun->machine->in_nsubspa == 2); | |
d6b5193b | 9453 | in_section = NULL; |
9a55eab3 | 9454 | } |
1a83bfc3 JDA |
9455 | output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$"); |
9456 | return; | |
9a55eab3 | 9457 | } |
d6b5193b RS |
9458 | output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$"); |
9459 | } | |
9460 | ||
1a83bfc3 JDA |
9461 | /* A get_unnamed_section callback for switching to comdat data |
9462 | sections. This function is only used with SOM. */ | |
9463 | ||
9464 | static void | |
9465 | som_output_comdat_data_section_asm_op (const void *data) | |
9466 | { | |
9467 | in_section = NULL; | |
9468 | output_section_asm_op (data); | |
9469 | } | |
9470 | ||
d6b5193b | 9471 | /* Implement TARGET_ASM_INITIALIZE_SECTIONS */ |
9a55eab3 | 9472 | |
d6b5193b RS |
9473 | static void |
9474 | pa_som_asm_init_sections (void) | |
9475 | { | |
9476 | text_section | |
9477 | = get_unnamed_section (0, som_output_text_section_asm_op, NULL); | |
9478 | ||
9479 | /* SOM puts readonly data in the default $LIT$ subspace when PIC code | |
9480 | is not being generated. */ | |
9481 | som_readonly_data_section | |
9482 | = get_unnamed_section (0, output_section_asm_op, | |
9483 | "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$"); | |
9484 | ||
9485 | /* When secondary definitions are not supported, SOM makes readonly | |
9486 | data one-only by creating a new $LIT$ subspace in $TEXT$ with | |
9487 | the comdat flag. */ | |
9488 | som_one_only_readonly_data_section | |
1a83bfc3 | 9489 | = get_unnamed_section (0, som_output_comdat_data_section_asm_op, |
d6b5193b RS |
9490 | "\t.SPACE $TEXT$\n" |
9491 | "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8," | |
9492 | "ACCESS=0x2c,SORT=16,COMDAT"); | |
9493 | ||
9494 | ||
9495 | /* When secondary definitions are not supported, SOM makes data one-only | |
9496 | by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */ | |
9497 | som_one_only_data_section | |
1a83bfc3 JDA |
9498 | = get_unnamed_section (SECTION_WRITE, |
9499 | som_output_comdat_data_section_asm_op, | |
d6b5193b RS |
9500 | "\t.SPACE $PRIVATE$\n" |
9501 | "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8," | |
9502 | "ACCESS=31,SORT=24,COMDAT"); | |
9503 | ||
9504 | /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups | |
9505 | which reference data within the $TEXT$ space (for example constant | |
9506 | strings in the $LIT$ subspace). | |
9507 | ||
9508 | The assemblers (GAS and HP as) both have problems with handling | |
9509 | the difference of two symbols which is the other correct way to | |
9510 | reference constant data during PIC code generation. | |
9511 | ||
9512 | So, there's no way to reference constant data which is in the | |
9513 | $TEXT$ space during PIC generation. Instead place all constant | |
9514 | data into the $PRIVATE$ subspace (this reduces sharing, but it | |
9515 | works correctly). */ | |
9516 | readonly_data_section = flag_pic ? data_section : som_readonly_data_section; | |
9517 | ||
9518 | /* We must not have a reference to an external symbol defined in a | |
9519 | shared library in a readonly section, else the SOM linker will | |
9520 | complain. | |
9521 | ||
9522 | So, we force exception information into the data section. */ | |
9523 | exception_section = data_section; | |
9a55eab3 JDA |
9524 | } |
9525 | ||
ae46c4e0 RH |
9526 | /* On hpux10, the linker will give an error if we have a reference |
9527 | in the read-only data section to a symbol defined in a shared | |
9528 | library. Therefore, expressions that might require a reloc can | |
9529 | not be placed in the read-only data section. */ | |
9530 | ||
d6b5193b | 9531 | static section * |
24a52160 JDA |
9532 | pa_select_section (tree exp, int reloc, |
9533 | unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) | |
ae46c4e0 RH |
9534 | { |
9535 | if (TREE_CODE (exp) == VAR_DECL | |
9536 | && TREE_READONLY (exp) | |
9537 | && !TREE_THIS_VOLATILE (exp) | |
9538 | && DECL_INITIAL (exp) | |
9539 | && (DECL_INITIAL (exp) == error_mark_node | |
9540 | || TREE_CONSTANT (DECL_INITIAL (exp))) | |
9541 | && !reloc) | |
9a55eab3 JDA |
9542 | { |
9543 | if (TARGET_SOM | |
9544 | && DECL_ONE_ONLY (exp) | |
9545 | && !DECL_WEAK (exp)) | |
d6b5193b | 9546 | return som_one_only_readonly_data_section; |
9a55eab3 | 9547 | else |
d6b5193b | 9548 | return readonly_data_section; |
9a55eab3 | 9549 | } |
6615c446 | 9550 | else if (CONSTANT_CLASS_P (exp) && !reloc) |
d6b5193b | 9551 | return readonly_data_section; |
9a55eab3 JDA |
9552 | else if (TARGET_SOM |
9553 | && TREE_CODE (exp) == VAR_DECL | |
9554 | && DECL_ONE_ONLY (exp) | |
e41f3691 | 9555 | && !DECL_WEAK (exp)) |
d6b5193b | 9556 | return som_one_only_data_section; |
ae46c4e0 | 9557 | else |
d6b5193b | 9558 | return data_section; |
ae46c4e0 | 9559 | } |
e2500fed | 9560 | |
5eb99654 | 9561 | static void |
b7849684 | 9562 | pa_globalize_label (FILE *stream, const char *name) |
5eb99654 KG |
9563 | { |
9564 | /* We only handle DATA objects here, functions are globalized in | |
9565 | ASM_DECLARE_FUNCTION_NAME. */ | |
9566 | if (! FUNCTION_NAME_P (name)) | |
9567 | { | |
9568 | fputs ("\t.EXPORT ", stream); | |
9569 | assemble_name (stream, name); | |
9570 | fputs (",DATA\n", stream); | |
9571 | } | |
9572 | } | |
3f12cd9b | 9573 | |
bd5bd7ac KH |
9574 | /* Worker function for TARGET_STRUCT_VALUE_RTX. */ |
9575 | ||
3f12cd9b KH |
9576 | static rtx |
9577 | pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED, | |
9578 | int incoming ATTRIBUTE_UNUSED) | |
9579 | { | |
9580 | return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM); | |
9581 | } | |
9582 | ||
bd5bd7ac KH |
9583 | /* Worker function for TARGET_RETURN_IN_MEMORY. */ |
9584 | ||
3f12cd9b | 9585 | bool |
586de218 | 9586 | pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) |
3f12cd9b KH |
9587 | { |
9588 | /* SOM ABI says that objects larger than 64 bits are returned in memory. | |
9589 | PA64 ABI says that objects larger than 128 bits are returned in memory. | |
9590 | Note, int_size_in_bytes can return -1 if the size of the object is | |
9591 | variable or larger than the maximum value that can be expressed as | |
9592 | a HOST_WIDE_INT. It can also return zero for an empty type. The | |
9593 | simplest way to handle variable and empty types is to pass them in | |
9594 | memory. This avoids problems in defining the boundaries of argument | |
9595 | slots, allocating registers, etc. */ | |
9596 | return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8) | |
9597 | || int_size_in_bytes (type) <= 0); | |
9598 | } | |
9599 | ||
744b2d61 JDA |
9600 | /* Structure to hold declaration and name of external symbols that are |
9601 | emitted by GCC. We generate a vector of these symbols and output them | |
9602 | at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true. | |
9603 | This avoids putting out names that are never really used. */ | |
9604 | ||
d1b38208 | 9605 | typedef struct GTY(()) extern_symbol |
744b2d61 JDA |
9606 | { |
9607 | tree decl; | |
9608 | const char *name; | |
d4e6fecb | 9609 | } extern_symbol; |
744b2d61 JDA |
9610 | |
9611 | /* Define gc'd vector type for extern_symbol. */ | |
d4e6fecb NS |
9612 | DEF_VEC_O(extern_symbol); |
9613 | DEF_VEC_ALLOC_O(extern_symbol,gc); | |
744b2d61 JDA |
9614 | |
9615 | /* Vector of extern_symbol pointers. */ | |
d4e6fecb | 9616 | static GTY(()) VEC(extern_symbol,gc) *extern_symbols; |
744b2d61 JDA |
9617 | |
9618 | #ifdef ASM_OUTPUT_EXTERNAL_REAL | |
9619 | /* Mark DECL (name NAME) as an external reference (assembler output | |
9620 | file FILE). This saves the names to output at the end of the file | |
9621 | if actually referenced. */ | |
9622 | ||
9623 | void | |
9624 | pa_hpux_asm_output_external (FILE *file, tree decl, const char *name) | |
9625 | { | |
d4e6fecb | 9626 | extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL); |
744b2d61 JDA |
9627 | |
9628 | gcc_assert (file == asm_out_file); | |
9629 | p->decl = decl; | |
9630 | p->name = name; | |
744b2d61 JDA |
9631 | } |
9632 | ||
9633 | /* Output text required at the end of an assembler file. | |
9634 | This includes deferred plabels and .import directives for | |
9635 | all external symbols that were actually referenced. */ | |
9636 | ||
9637 | static void | |
9638 | pa_hpux_file_end (void) | |
9639 | { | |
9640 | unsigned int i; | |
d4e6fecb | 9641 | extern_symbol *p; |
744b2d61 | 9642 | |
3674b34d JDA |
9643 | if (!NO_DEFERRED_PROFILE_COUNTERS) |
9644 | output_deferred_profile_counters (); | |
9645 | ||
744b2d61 JDA |
9646 | output_deferred_plabels (); |
9647 | ||
9648 | for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++) | |
9649 | { | |
9650 | tree decl = p->decl; | |
9651 | ||
9652 | if (!TREE_ASM_WRITTEN (decl) | |
9653 | && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0))) | |
9654 | ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name); | |
9655 | } | |
9656 | ||
d4e6fecb | 9657 | VEC_free (extern_symbol, gc, extern_symbols); |
744b2d61 JDA |
9658 | } |
9659 | #endif | |
9660 | ||
6982c5d4 | 9661 | /* Return true if a change from mode FROM to mode TO for a register |
0a2aaacc | 9662 | in register class RCLASS is invalid. */ |
6982c5d4 JDA |
9663 | |
9664 | bool | |
9665 | pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to, | |
0a2aaacc | 9666 | enum reg_class rclass) |
6982c5d4 JDA |
9667 | { |
9668 | if (from == to) | |
9669 | return false; | |
9670 | ||
9671 | /* Reject changes to/from complex and vector modes. */ | |
9672 | if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from) | |
9673 | || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to)) | |
9674 | return true; | |
9675 | ||
9676 | if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)) | |
9677 | return false; | |
9678 | ||
9679 | /* There is no way to load QImode or HImode values directly from | |
9680 | memory. SImode loads to the FP registers are not zero extended. | |
9681 | On the 64-bit target, this conflicts with the definition of | |
9682 | LOAD_EXTEND_OP. Thus, we can't allow changing between modes | |
9683 | with different sizes in the floating-point registers. */ | |
0a2aaacc | 9684 | if (MAYBE_FP_REG_CLASS_P (rclass)) |
6982c5d4 JDA |
9685 | return true; |
9686 | ||
9687 | /* HARD_REGNO_MODE_OK places modes with sizes larger than a word | |
9688 | in specific sets of registers. Thus, we cannot allow changing | |
9689 | to a larger mode when it's larger than a word. */ | |
9690 | if (GET_MODE_SIZE (to) > UNITS_PER_WORD | |
9691 | && GET_MODE_SIZE (to) > GET_MODE_SIZE (from)) | |
9692 | return true; | |
9693 | ||
9694 | return false; | |
9695 | } | |
9696 | ||
9697 | /* Returns TRUE if it is a good idea to tie two pseudo registers | |
9698 | when one has mode MODE1 and one has mode MODE2. | |
9699 | If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2, | |
9700 | for any hard reg, then this must be FALSE for correct output. | |
9701 | ||
9702 | We should return FALSE for QImode and HImode because these modes | |
9703 | are not ok in the floating-point registers. However, this prevents | |
9704 | tieing these modes to SImode and DImode in the general registers. | |
9705 | So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and | |
9706 | CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used | |
9707 | in the floating-point registers. */ | |
9708 | ||
9709 | bool | |
9710 | pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2) | |
9711 | { | |
9712 | /* Don't tie modes in different classes. */ | |
9713 | if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2)) | |
9714 | return false; | |
9715 | ||
9716 | return true; | |
9717 | } | |
9718 | ||
e2500fed | 9719 | #include "gt-pa.h" |