]> gcc.gnu.org Git - gcc.git/blob - gcc/config/arm/arm.c
7c614fd53963c6c44df13a9d4c0f16dfde858408
[gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
55
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
59
60 const struct attribute_spec arm_attribute_table[];
61
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static unsigned long thumb_compute_save_reg_mask (void);
75 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
76 static rtx emit_multi_reg_push (int);
77 static rtx emit_sfm (int, int);
78 #ifndef AOF_ASSEMBLER
79 static bool arm_assemble_integer (rtx, unsigned int, int);
80 #endif
81 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
82 static arm_cc get_arm_condition_code (rtx);
83 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
84 static rtx is_jump_table (rtx);
85 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 int, HOST_WIDE_INT);
87 static void print_multi_reg (FILE *, const char *, int, int);
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static int number_of_first_bit_set (int);
91 static void replace_symbols_in_block (tree, rtx, rtx);
92 static void thumb_exit (FILE *, int);
93 static void thumb_pushpop (FILE *, int, int, int *, int);
94 static rtx is_jump_table (rtx);
95 static HOST_WIDE_INT get_jump_table_size (rtx);
96 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_forward_ref (Mfix *);
98 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99 static Mnode *add_minipool_backward_ref (Mfix *);
100 static void assign_minipool_offsets (Mfix *);
101 static void arm_print_value (FILE *, rtx);
102 static void dump_minipool (rtx);
103 static int arm_barrier_cost (rtx);
104 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
105 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
106 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
107 rtx);
108 static void arm_reorg (void);
109 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
110 static int current_file_function_operand (rtx);
111 static unsigned long arm_compute_save_reg0_reg12_mask (void);
112 static unsigned long arm_compute_save_reg_mask (void);
113 static unsigned long arm_isr_value (tree);
114 static unsigned long arm_compute_func_type (void);
115 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
116 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
117 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
118 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
119 #endif
120 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
121 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
122 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
123 static int arm_comp_type_attributes (tree, tree);
124 static void arm_set_default_type_attributes (tree);
125 static int arm_adjust_cost (rtx, rtx, rtx, int);
126 static int count_insns_for_constant (HOST_WIDE_INT, int);
127 static int arm_get_strip_length (int);
128 static bool arm_function_ok_for_sibcall (tree, tree);
129 static void arm_internal_label (FILE *, const char *, unsigned long);
130 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
131 tree);
132 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
133 static bool arm_size_rtx_costs (rtx, int, int, int *);
134 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
135 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
136 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
137 static bool arm_9e_rtx_costs (rtx, int, int, int *);
138 static int arm_address_cost (rtx);
139 static bool arm_memory_load_p (rtx);
140 static bool arm_cirrus_insn_p (rtx);
141 static void cirrus_reorg (rtx);
142 static void arm_init_builtins (void);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void arm_init_iwmmxt_builtins (void);
145 static rtx safe_vector_operand (rtx, enum machine_mode);
146 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
147 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
148 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
149 static void emit_constant_insn (rtx cond, rtx pattern);
150 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
151 tree, bool);
152
153 #ifndef ARM_PE
154 static void arm_encode_section_info (tree, rtx, int);
155 #endif
156
157 static void arm_file_end (void);
158
159 #ifdef AOF_ASSEMBLER
160 static void aof_globalize_label (FILE *, const char *);
161 static void aof_dump_imports (FILE *);
162 static void aof_dump_pic_table (FILE *);
163 static void aof_file_start (void);
164 static void aof_file_end (void);
165 #endif
166 static rtx arm_struct_value_rtx (tree, int);
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
168 tree, int *, int);
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
174
175 static tree arm_cxx_guard_type (void);
176 static bool arm_cxx_guard_mask_bit (void);
177 static tree arm_get_cookie_size (tree);
178 static bool arm_cookie_has_size (void);
179 static bool arm_cxx_cdtor_returns_this (void);
180 static bool arm_cxx_key_method_may_be_inline (void);
181 static bool arm_cxx_export_class_data (void);
182 static void arm_init_libfuncs (void);
183 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
184 \f
185 /* Initialize the GCC target structure. */
186 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
187 #undef TARGET_MERGE_DECL_ATTRIBUTES
188 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
189 #endif
190
191 #undef TARGET_ATTRIBUTE_TABLE
192 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
193
194 #undef TARGET_ASM_FILE_END
195 #define TARGET_ASM_FILE_END arm_file_end
196
197 #ifdef AOF_ASSEMBLER
198 #undef TARGET_ASM_BYTE_OP
199 #define TARGET_ASM_BYTE_OP "\tDCB\t"
200 #undef TARGET_ASM_ALIGNED_HI_OP
201 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
202 #undef TARGET_ASM_ALIGNED_SI_OP
203 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
204 #undef TARGET_ASM_GLOBALIZE_LABEL
205 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
206 #undef TARGET_ASM_FILE_START
207 #define TARGET_ASM_FILE_START aof_file_start
208 #undef TARGET_ASM_FILE_END
209 #define TARGET_ASM_FILE_END aof_file_end
210 #else
211 #undef TARGET_ASM_ALIGNED_SI_OP
212 #define TARGET_ASM_ALIGNED_SI_OP NULL
213 #undef TARGET_ASM_INTEGER
214 #define TARGET_ASM_INTEGER arm_assemble_integer
215 #endif
216
217 #undef TARGET_ASM_FUNCTION_PROLOGUE
218 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
219
220 #undef TARGET_ASM_FUNCTION_EPILOGUE
221 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
222
223 #undef TARGET_COMP_TYPE_ATTRIBUTES
224 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
225
226 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
227 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
228
229 #undef TARGET_SCHED_ADJUST_COST
230 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
231
232 #undef TARGET_ENCODE_SECTION_INFO
233 #ifdef ARM_PE
234 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
235 #else
236 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
237 #endif
238
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
241
242 #undef TARGET_ASM_INTERNAL_LABEL
243 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
244
245 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
246 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
247
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
252
253 /* This will be overridden in arm_override_options. */
254 #undef TARGET_RTX_COSTS
255 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
256 #undef TARGET_ADDRESS_COST
257 #define TARGET_ADDRESS_COST arm_address_cost
258
259 #undef TARGET_SHIFT_TRUNCATION_MASK
260 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
261 #undef TARGET_VECTOR_MODE_SUPPORTED_P
262 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
263
264 #undef TARGET_MACHINE_DEPENDENT_REORG
265 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
266
267 #undef TARGET_INIT_BUILTINS
268 #define TARGET_INIT_BUILTINS arm_init_builtins
269 #undef TARGET_EXPAND_BUILTIN
270 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
271
272 #undef TARGET_INIT_LIBFUNCS
273 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
274
275 #undef TARGET_PROMOTE_FUNCTION_ARGS
276 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
277 #undef TARGET_PROMOTE_FUNCTION_RETURN
278 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
279 #undef TARGET_PROMOTE_PROTOTYPES
280 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
281 #undef TARGET_PASS_BY_REFERENCE
282 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
283 #undef TARGET_ARG_PARTIAL_BYTES
284 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
285
286 #undef TARGET_STRUCT_VALUE_RTX
287 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
288
289 #undef TARGET_SETUP_INCOMING_VARARGS
290 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
291
292 #undef TARGET_DEFAULT_SHORT_ENUMS
293 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
294
295 #undef TARGET_ALIGN_ANON_BITFIELD
296 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
297
298 #undef TARGET_CXX_GUARD_TYPE
299 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
300
301 #undef TARGET_CXX_GUARD_MASK_BIT
302 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
303
304 #undef TARGET_CXX_GET_COOKIE_SIZE
305 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
306
307 #undef TARGET_CXX_COOKIE_HAS_SIZE
308 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
309
310 #undef TARGET_CXX_CDTOR_RETURNS_THIS
311 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
312
313 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
314 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
315
316 #undef TARGET_CXX_EXPORT_CLASS_DATA
317 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
318
319 struct gcc_target targetm = TARGET_INITIALIZER;
320 \f
321 /* Obstack for minipool constant handling. */
322 static struct obstack minipool_obstack;
323 static char * minipool_startobj;
324
325 /* The maximum number of insns skipped which
326 will be conditionalised if possible. */
327 static int max_insns_skipped = 5;
328
329 extern FILE * asm_out_file;
330
331 /* True if we are currently building a constant table. */
332 int making_const_table;
333
334 /* Define the information needed to generate branch insns. This is
335 stored from the compare operation. */
336 rtx arm_compare_op0, arm_compare_op1;
337
338 /* The processor for which instructions should be scheduled. */
339 enum processor_type arm_tune = arm_none;
340
341 /* Which floating point model to use. */
342 enum arm_fp_model arm_fp_model;
343
344 /* Which floating point hardware is available. */
345 enum fputype arm_fpu_arch;
346
347 /* Which floating point hardware to schedule for. */
348 enum fputype arm_fpu_tune;
349
350 /* Whether to use floating point hardware. */
351 enum float_abi_type arm_float_abi;
352
353 /* Which ABI to use. */
354 enum arm_abi_type arm_abi;
355
356 /* Set by the -mfpu=... option. */
357 const char * target_fpu_name = NULL;
358
359 /* Set by the -mfpe=... option. */
360 const char * target_fpe_name = NULL;
361
362 /* Set by the -mfloat-abi=... option. */
363 const char * target_float_abi_name = NULL;
364
365 /* Set by the legacy -mhard-float and -msoft-float options. */
366 const char * target_float_switch = NULL;
367
368 /* Set by the -mabi=... option. */
369 const char * target_abi_name = NULL;
370
371 /* Used to parse -mstructure_size_boundary command line option. */
372 const char * structure_size_string = NULL;
373 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
374
375 /* Used for Thumb call_via trampolines. */
376 rtx thumb_call_via_label[13];
377 static int thumb_call_reg_needed;
378
379 /* Bit values used to identify processor capabilities. */
380 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
381 #define FL_ARCH3M (1 << 1) /* Extended multiply */
382 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
383 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
384 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
385 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
386 #define FL_THUMB (1 << 6) /* Thumb aware */
387 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
388 #define FL_STRONG (1 << 8) /* StrongARM */
389 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
390 #define FL_XSCALE (1 << 10) /* XScale */
391 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
392 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
393 media instructions. */
394 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
395
396 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
397
398 #define FL_FOR_ARCH2 0
399 #define FL_FOR_ARCH3 FL_MODE32
400 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
401 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
402 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
403 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
404 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
405 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
406 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
407 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
408 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
409 #define FL_FOR_ARCH6J FL_FOR_ARCH6
410 #define FL_FOR_ARCH6K FL_FOR_ARCH6
411 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
412 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
413
414 /* The bits in this mask specify which
415 instructions we are allowed to generate. */
416 static unsigned long insn_flags = 0;
417
418 /* The bits in this mask specify which instruction scheduling options should
419 be used. */
420 static unsigned long tune_flags = 0;
421
422 /* The following are used in the arm.md file as equivalents to bits
423 in the above two flag variables. */
424
425 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
426 int arm_arch3m = 0;
427
428 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
429 int arm_arch4 = 0;
430
431 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
432 int arm_arch4t = 0;
433
434 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
435 int arm_arch5 = 0;
436
437 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
438 int arm_arch5e = 0;
439
440 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
441 int arm_arch6 = 0;
442
443 /* Nonzero if this chip can benefit from load scheduling. */
444 int arm_ld_sched = 0;
445
446 /* Nonzero if this chip is a StrongARM. */
447 int arm_is_strong = 0;
448
449 /* Nonzero if this chip is a Cirrus variant. */
450 int arm_arch_cirrus = 0;
451
452 /* Nonzero if this chip supports Intel Wireless MMX technology. */
453 int arm_arch_iwmmxt = 0;
454
455 /* Nonzero if this chip is an XScale. */
456 int arm_arch_xscale = 0;
457
458 /* Nonzero if tuning for XScale */
459 int arm_tune_xscale = 0;
460
461 /* Nonzero if this chip is an ARM6 or an ARM7. */
462 int arm_is_6_or_7 = 0;
463
464 /* Nonzero if generating Thumb instructions. */
465 int thumb_code = 0;
466
467 /* Nonzero if we should define __THUMB_INTERWORK__ in the
468 preprocessor.
469 XXX This is a bit of a hack, it's intended to help work around
470 problems in GLD which doesn't understand that armv5t code is
471 interworking clean. */
472 int arm_cpp_interwork = 0;
473
474 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
475 must report the mode of the memory reference from PRINT_OPERAND to
476 PRINT_OPERAND_ADDRESS. */
477 enum machine_mode output_memory_reference_mode;
478
479 /* The register number to be used for the PIC offset register. */
480 const char * arm_pic_register_string = NULL;
481 int arm_pic_register = INVALID_REGNUM;
482
483 /* Set to 1 when a return insn is output, this means that the epilogue
484 is not needed. */
485 int return_used_this_function;
486
487 /* Set to 1 after arm_reorg has started. Reset to start at the start of
488 the next function. */
489 static int after_arm_reorg = 0;
490
491 /* The maximum number of insns to be used when loading a constant. */
492 static int arm_constant_limit = 3;
493
494 /* For an explanation of these variables, see final_prescan_insn below. */
495 int arm_ccfsm_state;
496 enum arm_cond_code arm_current_cc;
497 rtx arm_target_insn;
498 int arm_target_label;
499
500 /* The condition codes of the ARM, and the inverse function. */
501 static const char * const arm_condition_codes[] =
502 {
503 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
504 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
505 };
506
507 #define streq(string1, string2) (strcmp (string1, string2) == 0)
508 \f
509 /* Initialization code. */
510
511 struct processors
512 {
513 const char *const name;
514 enum processor_type core;
515 const char *arch;
516 const unsigned long flags;
517 bool (* rtx_costs) (rtx, int, int, int *);
518 };
519
520 /* Not all of these give usefully different compilation alternatives,
521 but there is no simple way of generalizing them. */
522 static const struct processors all_cores[] =
523 {
524 /* ARM Cores */
525 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
526 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
527 #include "arm-cores.def"
528 #undef ARM_CORE
529 {NULL, arm_none, NULL, 0, NULL}
530 };
531
532 static const struct processors all_architectures[] =
533 {
534 /* ARM Architectures */
535 /* We don't specify rtx_costs here as it will be figured out
536 from the core. */
537
538 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
539 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
540 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
541 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
542 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
543 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
544 implementations that support it, so we will leave it out for now. */
545 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
546 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
547 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
548 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
549 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
550 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
551 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
552 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
553 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
554 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
555 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
556 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
557 {NULL, arm_none, NULL, 0 , NULL}
558 };
559
560 /* This is a magic structure. The 'string' field is magically filled in
561 with a pointer to the value specified by the user on the command line
562 assuming that the user has specified such a value. */
563
564 struct arm_cpu_select arm_select[] =
565 {
566 /* string name processors */
567 { NULL, "-mcpu=", all_cores },
568 { NULL, "-march=", all_architectures },
569 { NULL, "-mtune=", all_cores }
570 };
571
572
573 /* The name of the proprocessor macro to define for this architecture. */
574
575 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
576
577 struct fpu_desc
578 {
579 const char * name;
580 enum fputype fpu;
581 };
582
583
584 /* Available values for for -mfpu=. */
585
586 static const struct fpu_desc all_fpus[] =
587 {
588 {"fpa", FPUTYPE_FPA},
589 {"fpe2", FPUTYPE_FPA_EMU2},
590 {"fpe3", FPUTYPE_FPA_EMU2},
591 {"maverick", FPUTYPE_MAVERICK},
592 {"vfp", FPUTYPE_VFP}
593 };
594
595
596 /* Floating point models used by the different hardware.
597 See fputype in arm.h. */
598
599 static const enum fputype fp_model_for_fpu[] =
600 {
601 /* No FP hardware. */
602 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
603 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
604 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
605 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
606 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
607 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
608 };
609
610
611 struct float_abi
612 {
613 const char * name;
614 enum float_abi_type abi_type;
615 };
616
617
618 /* Available values for -mfloat-abi=. */
619
620 static const struct float_abi all_float_abis[] =
621 {
622 {"soft", ARM_FLOAT_ABI_SOFT},
623 {"softfp", ARM_FLOAT_ABI_SOFTFP},
624 {"hard", ARM_FLOAT_ABI_HARD}
625 };
626
627
628 struct abi_name
629 {
630 const char *name;
631 enum arm_abi_type abi_type;
632 };
633
634
635 /* Available values for -mabi=. */
636
637 static const struct abi_name arm_all_abis[] =
638 {
639 {"apcs-gnu", ARM_ABI_APCS},
640 {"atpcs", ARM_ABI_ATPCS},
641 {"aapcs", ARM_ABI_AAPCS},
642 {"iwmmxt", ARM_ABI_IWMMXT}
643 };
644
645 /* Return the number of bits set in VALUE. */
646 static unsigned
647 bit_count (unsigned long value)
648 {
649 unsigned long count = 0;
650
651 while (value)
652 {
653 count++;
654 value &= value - 1; /* Clear the least-significant set bit. */
655 }
656
657 return count;
658 }
659
660 /* Set up library functions unique to ARM. */
661
662 static void
663 arm_init_libfuncs (void)
664 {
665 /* There are no special library functions unless we are using the
666 ARM BPABI. */
667 if (!TARGET_BPABI)
668 return;
669
670 /* The functions below are described in Section 4 of the "Run-Time
671 ABI for the ARM architecture", Version 1.0. */
672
673 /* Double-precision floating-point arithmetic. Table 2. */
674 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
675 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
676 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
677 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
678 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
679
680 /* Double-precision comparisons. Table 3. */
681 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
682 set_optab_libfunc (ne_optab, DFmode, NULL);
683 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
684 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
685 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
686 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
687 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
688
689 /* Single-precision floating-point arithmetic. Table 4. */
690 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
691 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
692 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
693 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
694 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
695
696 /* Single-precision comparisons. Table 5. */
697 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
698 set_optab_libfunc (ne_optab, SFmode, NULL);
699 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
700 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
701 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
702 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
703 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
704
705 /* Floating-point to integer conversions. Table 6. */
706 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
707 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
708 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
709 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
710 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
711 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
712 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
713 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
714
715 /* Conversions between floating types. Table 7. */
716 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
717 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
718
719 /* Integer to floating-point conversions. Table 8. */
720 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
721 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
722 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
723 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
724 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
725 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
726 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
727 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
728
729 /* Long long. Table 9. */
730 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
731 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
732 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
733 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
734 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
735 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
736 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
737 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
738
739 /* Integer (32/32->32) division. \S 4.3.1. */
740 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
741 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
742
743 /* The divmod functions are designed so that they can be used for
744 plain division, even though they return both the quotient and the
745 remainder. The quotient is returned in the usual location (i.e.,
746 r0 for SImode, {r0, r1} for DImode), just as would be expected
747 for an ordinary division routine. Because the AAPCS calling
748 conventions specify that all of { r0, r1, r2, r3 } are
749 callee-saved registers, there is no need to tell the compiler
750 explicitly that those registers are clobbered by these
751 routines. */
752 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
753 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
754 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
755 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
756 }
757
758 /* Fix up any incompatible options that the user has specified.
759 This has now turned into a maze. */
760 void
761 arm_override_options (void)
762 {
763 unsigned i;
764
765 /* Set up the flags based on the cpu/architecture selected by the user. */
766 for (i = ARRAY_SIZE (arm_select); i--;)
767 {
768 struct arm_cpu_select * ptr = arm_select + i;
769
770 if (ptr->string != NULL && ptr->string[0] != '\0')
771 {
772 const struct processors * sel;
773
774 for (sel = ptr->processors; sel->name != NULL; sel++)
775 if (streq (ptr->string, sel->name))
776 {
777 /* Set the architecture define. */
778 if (i != 2)
779 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
780
781 /* Determine the processor core for which we should
782 tune code-generation. */
783 if (/* -mcpu= is a sensible default. */
784 i == 0
785 /* If -march= is used, and -mcpu= has not been used,
786 assume that we should tune for a representative
787 CPU from that architecture. */
788 || i == 1
789 /* -mtune= overrides -mcpu= and -march=. */
790 || i == 2)
791 arm_tune = (enum processor_type) (sel - ptr->processors);
792
793 if (i != 2)
794 {
795 /* If we have been given an architecture and a processor
796 make sure that they are compatible. We only generate
797 a warning though, and we prefer the CPU over the
798 architecture. */
799 if (insn_flags != 0 && (insn_flags ^ sel->flags))
800 warning ("switch -mcpu=%s conflicts with -march= switch",
801 ptr->string);
802
803 insn_flags = sel->flags;
804 }
805
806 break;
807 }
808
809 if (sel->name == NULL)
810 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
811 }
812 }
813
814 /* If the user did not specify a processor, choose one for them. */
815 if (insn_flags == 0)
816 {
817 const struct processors * sel;
818 unsigned int sought;
819 enum processor_type cpu;
820
821 cpu = TARGET_CPU_DEFAULT;
822 if (cpu == arm_none)
823 {
824 #ifdef SUBTARGET_CPU_DEFAULT
825 /* Use the subtarget default CPU if none was specified by
826 configure. */
827 cpu = SUBTARGET_CPU_DEFAULT;
828 #endif
829 /* Default to ARM6. */
830 if (cpu == arm_none)
831 cpu = arm6;
832 }
833 sel = &all_cores[cpu];
834
835 insn_flags = sel->flags;
836
837 /* Now check to see if the user has specified some command line
838 switch that require certain abilities from the cpu. */
839 sought = 0;
840
841 if (TARGET_INTERWORK || TARGET_THUMB)
842 {
843 sought |= (FL_THUMB | FL_MODE32);
844
845 /* There are no ARM processors that support both APCS-26 and
846 interworking. Therefore we force FL_MODE26 to be removed
847 from insn_flags here (if it was set), so that the search
848 below will always be able to find a compatible processor. */
849 insn_flags &= ~FL_MODE26;
850 }
851
852 if (sought != 0 && ((sought & insn_flags) != sought))
853 {
854 /* Try to locate a CPU type that supports all of the abilities
855 of the default CPU, plus the extra abilities requested by
856 the user. */
857 for (sel = all_cores; sel->name != NULL; sel++)
858 if ((sel->flags & sought) == (sought | insn_flags))
859 break;
860
861 if (sel->name == NULL)
862 {
863 unsigned current_bit_count = 0;
864 const struct processors * best_fit = NULL;
865
866 /* Ideally we would like to issue an error message here
867 saying that it was not possible to find a CPU compatible
868 with the default CPU, but which also supports the command
869 line options specified by the programmer, and so they
870 ought to use the -mcpu=<name> command line option to
871 override the default CPU type.
872
873 If we cannot find a cpu that has both the
874 characteristics of the default cpu and the given
875 command line options we scan the array again looking
876 for a best match. */
877 for (sel = all_cores; sel->name != NULL; sel++)
878 if ((sel->flags & sought) == sought)
879 {
880 unsigned count;
881
882 count = bit_count (sel->flags & insn_flags);
883
884 if (count >= current_bit_count)
885 {
886 best_fit = sel;
887 current_bit_count = count;
888 }
889 }
890
891 if (best_fit == NULL)
892 abort ();
893 else
894 sel = best_fit;
895 }
896
897 insn_flags = sel->flags;
898 }
899 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
900 if (arm_tune == arm_none)
901 arm_tune = (enum processor_type) (sel - all_cores);
902 }
903
904 /* The processor for which we should tune should now have been
905 chosen. */
906 if (arm_tune == arm_none)
907 abort ();
908
909 tune_flags = all_cores[(int)arm_tune].flags;
910 if (optimize_size)
911 targetm.rtx_costs = arm_size_rtx_costs;
912 else
913 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
914
915 /* Make sure that the processor choice does not conflict with any of the
916 other command line choices. */
917 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
918 {
919 warning ("target CPU does not support interworking" );
920 target_flags &= ~ARM_FLAG_INTERWORK;
921 }
922
923 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
924 {
925 warning ("target CPU does not support THUMB instructions");
926 target_flags &= ~ARM_FLAG_THUMB;
927 }
928
929 if (TARGET_APCS_FRAME && TARGET_THUMB)
930 {
931 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
932 target_flags &= ~ARM_FLAG_APCS_FRAME;
933 }
934
935 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
936 from here where no function is being compiled currently. */
937 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
938 && TARGET_ARM)
939 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
940
941 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
942 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
943
944 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
945 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
946
947 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
948 {
949 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
950 target_flags |= ARM_FLAG_APCS_FRAME;
951 }
952
953 if (TARGET_POKE_FUNCTION_NAME)
954 target_flags |= ARM_FLAG_APCS_FRAME;
955
956 if (TARGET_APCS_REENT && flag_pic)
957 error ("-fpic and -mapcs-reent are incompatible");
958
959 if (TARGET_APCS_REENT)
960 warning ("APCS reentrant code not supported. Ignored");
961
962 /* If this target is normally configured to use APCS frames, warn if they
963 are turned off and debugging is turned on. */
964 if (TARGET_ARM
965 && write_symbols != NO_DEBUG
966 && !TARGET_APCS_FRAME
967 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
968 warning ("-g with -mno-apcs-frame may not give sensible debugging");
969
970 /* If stack checking is disabled, we can use r10 as the PIC register,
971 which keeps r9 available. */
972 if (flag_pic)
973 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
974
975 if (TARGET_APCS_FLOAT)
976 warning ("passing floating point arguments in fp regs not yet supported");
977
978 /* Initialize boolean versions of the flags, for use in the arm.md file. */
979 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
980 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
981 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
982 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
983 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
984 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
985 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
986 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
987
988 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
989 arm_is_strong = (tune_flags & FL_STRONG) != 0;
990 thumb_code = (TARGET_ARM == 0);
991 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
992 && !(tune_flags & FL_ARCH4))) != 0;
993 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
994 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
995
996 /* V5 code we generate is completely interworking capable, so we turn off
997 TARGET_INTERWORK here to avoid many tests later on. */
998
999 /* XXX However, we must pass the right pre-processor defines to CPP
1000 or GLD can get confused. This is a hack. */
1001 if (TARGET_INTERWORK)
1002 arm_cpp_interwork = 1;
1003
1004 if (arm_arch5)
1005 target_flags &= ~ARM_FLAG_INTERWORK;
1006
1007 if (target_abi_name)
1008 {
1009 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1010 {
1011 if (streq (arm_all_abis[i].name, target_abi_name))
1012 {
1013 arm_abi = arm_all_abis[i].abi_type;
1014 break;
1015 }
1016 }
1017 if (i == ARRAY_SIZE (arm_all_abis))
1018 error ("invalid ABI option: -mabi=%s", target_abi_name);
1019 }
1020 else
1021 arm_abi = ARM_DEFAULT_ABI;
1022
1023 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1024 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1025
1026 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1027 error ("iwmmxt abi requires an iwmmxt capable cpu");
1028
1029 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1030 if (target_fpu_name == NULL && target_fpe_name != NULL)
1031 {
1032 if (streq (target_fpe_name, "2"))
1033 target_fpu_name = "fpe2";
1034 else if (streq (target_fpe_name, "3"))
1035 target_fpu_name = "fpe3";
1036 else
1037 error ("invalid floating point emulation option: -mfpe=%s",
1038 target_fpe_name);
1039 }
1040 if (target_fpu_name != NULL)
1041 {
1042 /* The user specified a FPU. */
1043 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1044 {
1045 if (streq (all_fpus[i].name, target_fpu_name))
1046 {
1047 arm_fpu_arch = all_fpus[i].fpu;
1048 arm_fpu_tune = arm_fpu_arch;
1049 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1050 break;
1051 }
1052 }
1053 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1054 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1055 }
1056 else
1057 {
1058 #ifdef FPUTYPE_DEFAULT
1059 /* Use the default if it is specified for this platform. */
1060 arm_fpu_arch = FPUTYPE_DEFAULT;
1061 arm_fpu_tune = FPUTYPE_DEFAULT;
1062 #else
1063 /* Pick one based on CPU type. */
1064 /* ??? Some targets assume FPA is the default.
1065 if ((insn_flags & FL_VFP) != 0)
1066 arm_fpu_arch = FPUTYPE_VFP;
1067 else
1068 */
1069 if (arm_arch_cirrus)
1070 arm_fpu_arch = FPUTYPE_MAVERICK;
1071 else
1072 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1073 #endif
1074 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1075 arm_fpu_tune = FPUTYPE_FPA;
1076 else
1077 arm_fpu_tune = arm_fpu_arch;
1078 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1079 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1080 abort ();
1081 }
1082
1083 if (target_float_abi_name != NULL)
1084 {
1085 /* The user specified a FP ABI. */
1086 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1087 {
1088 if (streq (all_float_abis[i].name, target_float_abi_name))
1089 {
1090 arm_float_abi = all_float_abis[i].abi_type;
1091 break;
1092 }
1093 }
1094 if (i == ARRAY_SIZE (all_float_abis))
1095 error ("invalid floating point abi: -mfloat-abi=%s",
1096 target_float_abi_name);
1097 }
1098 else if (target_float_switch)
1099 {
1100 /* This is a bit of a hack to avoid needing target flags for these. */
1101 if (target_float_switch[0] == 'h')
1102 arm_float_abi = ARM_FLOAT_ABI_HARD;
1103 else
1104 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1105 }
1106 else
1107 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1108
1109 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1110 sorry ("-mfloat-abi=hard and VFP");
1111
1112 /* If soft-float is specified then don't use FPU. */
1113 if (TARGET_SOFT_FLOAT)
1114 arm_fpu_arch = FPUTYPE_NONE;
1115
1116 /* For arm2/3 there is no need to do any scheduling if there is only
1117 a floating point emulator, or we are doing software floating-point. */
1118 if ((TARGET_SOFT_FLOAT
1119 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1120 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1121 && (tune_flags & FL_MODE32) == 0)
1122 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1123
1124 /* Override the default structure alignment for AAPCS ABI. */
1125 if (arm_abi == ARM_ABI_AAPCS)
1126 arm_structure_size_boundary = 8;
1127
1128 if (structure_size_string != NULL)
1129 {
1130 int size = strtol (structure_size_string, NULL, 0);
1131
1132 if (size == 8 || size == 32
1133 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1134 arm_structure_size_boundary = size;
1135 else
1136 warning ("structure size boundary can only be set to %s",
1137 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1138 }
1139
1140 if (arm_pic_register_string != NULL)
1141 {
1142 int pic_register = decode_reg_name (arm_pic_register_string);
1143
1144 if (!flag_pic)
1145 warning ("-mpic-register= is useless without -fpic");
1146
1147 /* Prevent the user from choosing an obviously stupid PIC register. */
1148 else if (pic_register < 0 || call_used_regs[pic_register]
1149 || pic_register == HARD_FRAME_POINTER_REGNUM
1150 || pic_register == STACK_POINTER_REGNUM
1151 || pic_register >= PC_REGNUM)
1152 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1153 else
1154 arm_pic_register = pic_register;
1155 }
1156
1157 if (TARGET_THUMB && flag_schedule_insns)
1158 {
1159 /* Don't warn since it's on by default in -O2. */
1160 flag_schedule_insns = 0;
1161 }
1162
1163 if (optimize_size)
1164 {
1165 /* There's some dispute as to whether this should be 1 or 2. However,
1166 experiments seem to show that in pathological cases a setting of
1167 1 degrades less severely than a setting of 2. This could change if
1168 other parts of the compiler change their behavior. */
1169 arm_constant_limit = 1;
1170
1171 /* If optimizing for size, bump the number of instructions that we
1172 are prepared to conditionally execute (even on a StrongARM). */
1173 max_insns_skipped = 6;
1174 }
1175 else
1176 {
1177 /* For processors with load scheduling, it never costs more than
1178 2 cycles to load a constant, and the load scheduler may well
1179 reduce that to 1. */
1180 if (arm_ld_sched)
1181 arm_constant_limit = 1;
1182
1183 /* On XScale the longer latency of a load makes it more difficult
1184 to achieve a good schedule, so it's faster to synthesize
1185 constants that can be done in two insns. */
1186 if (arm_tune_xscale)
1187 arm_constant_limit = 2;
1188
1189 /* StrongARM has early execution of branches, so a sequence
1190 that is worth skipping is shorter. */
1191 if (arm_is_strong)
1192 max_insns_skipped = 3;
1193 }
1194
1195 /* Register global variables with the garbage collector. */
1196 arm_add_gc_roots ();
1197 }
1198
1199 static void
1200 arm_add_gc_roots (void)
1201 {
1202 gcc_obstack_init(&minipool_obstack);
1203 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1204 }
1205 \f
1206 /* A table of known ARM exception types.
1207 For use with the interrupt function attribute. */
1208
1209 typedef struct
1210 {
1211 const char *const arg;
1212 const unsigned long return_value;
1213 }
1214 isr_attribute_arg;
1215
1216 static const isr_attribute_arg isr_attribute_args [] =
1217 {
1218 { "IRQ", ARM_FT_ISR },
1219 { "irq", ARM_FT_ISR },
1220 { "FIQ", ARM_FT_FIQ },
1221 { "fiq", ARM_FT_FIQ },
1222 { "ABORT", ARM_FT_ISR },
1223 { "abort", ARM_FT_ISR },
1224 { "ABORT", ARM_FT_ISR },
1225 { "abort", ARM_FT_ISR },
1226 { "UNDEF", ARM_FT_EXCEPTION },
1227 { "undef", ARM_FT_EXCEPTION },
1228 { "SWI", ARM_FT_EXCEPTION },
1229 { "swi", ARM_FT_EXCEPTION },
1230 { NULL, ARM_FT_NORMAL }
1231 };
1232
1233 /* Returns the (interrupt) function type of the current
1234 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1235
1236 static unsigned long
1237 arm_isr_value (tree argument)
1238 {
1239 const isr_attribute_arg * ptr;
1240 const char * arg;
1241
1242 /* No argument - default to IRQ. */
1243 if (argument == NULL_TREE)
1244 return ARM_FT_ISR;
1245
1246 /* Get the value of the argument. */
1247 if (TREE_VALUE (argument) == NULL_TREE
1248 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1249 return ARM_FT_UNKNOWN;
1250
1251 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1252
1253 /* Check it against the list of known arguments. */
1254 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1255 if (streq (arg, ptr->arg))
1256 return ptr->return_value;
1257
1258 /* An unrecognized interrupt type. */
1259 return ARM_FT_UNKNOWN;
1260 }
1261
1262 /* Computes the type of the current function. */
1263
1264 static unsigned long
1265 arm_compute_func_type (void)
1266 {
1267 unsigned long type = ARM_FT_UNKNOWN;
1268 tree a;
1269 tree attr;
1270
1271 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1272 abort ();
1273
1274 /* Decide if the current function is volatile. Such functions
1275 never return, and many memory cycles can be saved by not storing
1276 register values that will never be needed again. This optimization
1277 was added to speed up context switching in a kernel application. */
1278 if (optimize > 0
1279 && TREE_NOTHROW (current_function_decl)
1280 && TREE_THIS_VOLATILE (current_function_decl))
1281 type |= ARM_FT_VOLATILE;
1282
1283 if (cfun->static_chain_decl != NULL)
1284 type |= ARM_FT_NESTED;
1285
1286 attr = DECL_ATTRIBUTES (current_function_decl);
1287
1288 a = lookup_attribute ("naked", attr);
1289 if (a != NULL_TREE)
1290 type |= ARM_FT_NAKED;
1291
1292 a = lookup_attribute ("isr", attr);
1293 if (a == NULL_TREE)
1294 a = lookup_attribute ("interrupt", attr);
1295
1296 if (a == NULL_TREE)
1297 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1298 else
1299 type |= arm_isr_value (TREE_VALUE (a));
1300
1301 return type;
1302 }
1303
1304 /* Returns the type of the current function. */
1305
1306 unsigned long
1307 arm_current_func_type (void)
1308 {
1309 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1310 cfun->machine->func_type = arm_compute_func_type ();
1311
1312 return cfun->machine->func_type;
1313 }
1314 \f
1315 /* Return 1 if it is possible to return using a single instruction.
1316 If SIBLING is non-null, this is a test for a return before a sibling
1317 call. SIBLING is the call insn, so we can examine its register usage. */
1318
1319 int
1320 use_return_insn (int iscond, rtx sibling)
1321 {
1322 int regno;
1323 unsigned int func_type;
1324 unsigned long saved_int_regs;
1325 unsigned HOST_WIDE_INT stack_adjust;
1326 arm_stack_offsets *offsets;
1327
1328 /* Never use a return instruction before reload has run. */
1329 if (!reload_completed)
1330 return 0;
1331
1332 func_type = arm_current_func_type ();
1333
1334 /* Naked functions and volatile functions need special
1335 consideration. */
1336 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1337 return 0;
1338
1339 /* So do interrupt functions that use the frame pointer. */
1340 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1341 return 0;
1342
1343 offsets = arm_get_frame_offsets ();
1344 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1345
1346 /* As do variadic functions. */
1347 if (current_function_pretend_args_size
1348 || cfun->machine->uses_anonymous_args
1349 /* Or if the function calls __builtin_eh_return () */
1350 || current_function_calls_eh_return
1351 /* Or if the function calls alloca */
1352 || current_function_calls_alloca
1353 /* Or if there is a stack adjustment. However, if the stack pointer
1354 is saved on the stack, we can use a pre-incrementing stack load. */
1355 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1356 return 0;
1357
1358 saved_int_regs = arm_compute_save_reg_mask ();
1359
1360 /* Unfortunately, the insn
1361
1362 ldmib sp, {..., sp, ...}
1363
1364 triggers a bug on most SA-110 based devices, such that the stack
1365 pointer won't be correctly restored if the instruction takes a
1366 page fault. We work around this problem by popping r3 along with
1367 the other registers, since that is never slower than executing
1368 another instruction.
1369
1370 We test for !arm_arch5 here, because code for any architecture
1371 less than this could potentially be run on one of the buggy
1372 chips. */
1373 if (stack_adjust == 4 && !arm_arch5)
1374 {
1375 /* Validate that r3 is a call-clobbered register (always true in
1376 the default abi) ... */
1377 if (!call_used_regs[3])
1378 return 0;
1379
1380 /* ... that it isn't being used for a return value (always true
1381 until we implement return-in-regs), or for a tail-call
1382 argument ... */
1383 if (sibling)
1384 {
1385 if (GET_CODE (sibling) != CALL_INSN)
1386 abort ();
1387
1388 if (find_regno_fusage (sibling, USE, 3))
1389 return 0;
1390 }
1391
1392 /* ... and that there are no call-saved registers in r0-r2
1393 (always true in the default ABI). */
1394 if (saved_int_regs & 0x7)
1395 return 0;
1396 }
1397
1398 /* Can't be done if interworking with Thumb, and any registers have been
1399 stacked. */
1400 if (TARGET_INTERWORK && saved_int_regs != 0)
1401 return 0;
1402
1403 /* On StrongARM, conditional returns are expensive if they aren't
1404 taken and multiple registers have been stacked. */
1405 if (iscond && arm_is_strong)
1406 {
1407 /* Conditional return when just the LR is stored is a simple
1408 conditional-load instruction, that's not expensive. */
1409 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1410 return 0;
1411
1412 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1413 return 0;
1414 }
1415
1416 /* If there are saved registers but the LR isn't saved, then we need
1417 two instructions for the return. */
1418 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1419 return 0;
1420
1421 /* Can't be done if any of the FPA regs are pushed,
1422 since this also requires an insn. */
1423 if (TARGET_HARD_FLOAT && TARGET_FPA)
1424 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1425 if (regs_ever_live[regno] && !call_used_regs[regno])
1426 return 0;
1427
1428 /* Likewise VFP regs. */
1429 if (TARGET_HARD_FLOAT && TARGET_VFP)
1430 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1431 if (regs_ever_live[regno] && !call_used_regs[regno])
1432 return 0;
1433
1434 if (TARGET_REALLY_IWMMXT)
1435 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1436 if (regs_ever_live[regno] && ! call_used_regs [regno])
1437 return 0;
1438
1439 return 1;
1440 }
1441
1442 /* Return TRUE if int I is a valid immediate ARM constant. */
1443
1444 int
1445 const_ok_for_arm (HOST_WIDE_INT i)
1446 {
1447 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1448
1449 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1450 be all zero, or all one. */
1451 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1452 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1453 != ((~(unsigned HOST_WIDE_INT) 0)
1454 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1455 return FALSE;
1456
1457 /* Fast return for 0 and powers of 2 */
1458 if ((i & (i - 1)) == 0)
1459 return TRUE;
1460
1461 do
1462 {
1463 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1464 return TRUE;
1465 mask =
1466 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1467 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1468 }
1469 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1470
1471 return FALSE;
1472 }
1473
1474 /* Return true if I is a valid constant for the operation CODE. */
1475 static int
1476 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1477 {
1478 if (const_ok_for_arm (i))
1479 return 1;
1480
1481 switch (code)
1482 {
1483 case PLUS:
1484 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1485
1486 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1487 case XOR:
1488 case IOR:
1489 return 0;
1490
1491 case AND:
1492 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1493
1494 default:
1495 abort ();
1496 }
1497 }
1498
1499 /* Emit a sequence of insns to handle a large constant.
1500 CODE is the code of the operation required, it can be any of SET, PLUS,
1501 IOR, AND, XOR, MINUS;
1502 MODE is the mode in which the operation is being performed;
1503 VAL is the integer to operate on;
1504 SOURCE is the other operand (a register, or a null-pointer for SET);
1505 SUBTARGETS means it is safe to create scratch registers if that will
1506 either produce a simpler sequence, or we will want to cse the values.
1507 Return value is the number of insns emitted. */
1508
1509 int
1510 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1511 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1512 {
1513 rtx cond;
1514
1515 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1516 cond = COND_EXEC_TEST (PATTERN (insn));
1517 else
1518 cond = NULL_RTX;
1519
1520 if (subtargets || code == SET
1521 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1522 && REGNO (target) != REGNO (source)))
1523 {
1524 /* After arm_reorg has been called, we can't fix up expensive
1525 constants by pushing them into memory so we must synthesize
1526 them in-line, regardless of the cost. This is only likely to
1527 be more costly on chips that have load delay slots and we are
1528 compiling without running the scheduler (so no splitting
1529 occurred before the final instruction emission).
1530
1531 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1532 */
1533 if (!after_arm_reorg
1534 && !cond
1535 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1536 1, 0)
1537 > arm_constant_limit + (code != SET)))
1538 {
1539 if (code == SET)
1540 {
1541 /* Currently SET is the only monadic value for CODE, all
1542 the rest are diadic. */
1543 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1544 return 1;
1545 }
1546 else
1547 {
1548 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1549
1550 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1551 /* For MINUS, the value is subtracted from, since we never
1552 have subtraction of a constant. */
1553 if (code == MINUS)
1554 emit_insn (gen_rtx_SET (VOIDmode, target,
1555 gen_rtx_MINUS (mode, temp, source)));
1556 else
1557 emit_insn (gen_rtx_SET (VOIDmode, target,
1558 gen_rtx_fmt_ee (code, mode, source, temp)));
1559 return 2;
1560 }
1561 }
1562 }
1563
1564 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1565 1);
1566 }
1567
1568 static int
1569 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1570 {
1571 HOST_WIDE_INT temp1;
1572 int num_insns = 0;
1573 do
1574 {
1575 int end;
1576
1577 if (i <= 0)
1578 i += 32;
1579 if (remainder & (3 << (i - 2)))
1580 {
1581 end = i - 8;
1582 if (end < 0)
1583 end += 32;
1584 temp1 = remainder & ((0x0ff << end)
1585 | ((i < end) ? (0xff >> (32 - end)) : 0));
1586 remainder &= ~temp1;
1587 num_insns++;
1588 i -= 6;
1589 }
1590 i -= 2;
1591 } while (remainder);
1592 return num_insns;
1593 }
1594
1595 /* Emit an instruction with the indicated PATTERN. If COND is
1596 non-NULL, conditionalize the execution of the instruction on COND
1597 being true. */
1598
1599 static void
1600 emit_constant_insn (rtx cond, rtx pattern)
1601 {
1602 if (cond)
1603 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1604 emit_insn (pattern);
1605 }
1606
1607 /* As above, but extra parameter GENERATE which, if clear, suppresses
1608 RTL generation. */
1609
1610 static int
1611 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1612 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1613 int generate)
1614 {
1615 int can_invert = 0;
1616 int can_negate = 0;
1617 int can_negate_initial = 0;
1618 int can_shift = 0;
1619 int i;
1620 int num_bits_set = 0;
1621 int set_sign_bit_copies = 0;
1622 int clear_sign_bit_copies = 0;
1623 int clear_zero_bit_copies = 0;
1624 int set_zero_bit_copies = 0;
1625 int insns = 0;
1626 unsigned HOST_WIDE_INT temp1, temp2;
1627 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1628
1629 /* Find out which operations are safe for a given CODE. Also do a quick
1630 check for degenerate cases; these can occur when DImode operations
1631 are split. */
1632 switch (code)
1633 {
1634 case SET:
1635 can_invert = 1;
1636 can_shift = 1;
1637 can_negate = 1;
1638 break;
1639
1640 case PLUS:
1641 can_negate = 1;
1642 can_negate_initial = 1;
1643 break;
1644
1645 case IOR:
1646 if (remainder == 0xffffffff)
1647 {
1648 if (generate)
1649 emit_constant_insn (cond,
1650 gen_rtx_SET (VOIDmode, target,
1651 GEN_INT (ARM_SIGN_EXTEND (val))));
1652 return 1;
1653 }
1654 if (remainder == 0)
1655 {
1656 if (reload_completed && rtx_equal_p (target, source))
1657 return 0;
1658 if (generate)
1659 emit_constant_insn (cond,
1660 gen_rtx_SET (VOIDmode, target, source));
1661 return 1;
1662 }
1663 break;
1664
1665 case AND:
1666 if (remainder == 0)
1667 {
1668 if (generate)
1669 emit_constant_insn (cond,
1670 gen_rtx_SET (VOIDmode, target, const0_rtx));
1671 return 1;
1672 }
1673 if (remainder == 0xffffffff)
1674 {
1675 if (reload_completed && rtx_equal_p (target, source))
1676 return 0;
1677 if (generate)
1678 emit_constant_insn (cond,
1679 gen_rtx_SET (VOIDmode, target, source));
1680 return 1;
1681 }
1682 can_invert = 1;
1683 break;
1684
1685 case XOR:
1686 if (remainder == 0)
1687 {
1688 if (reload_completed && rtx_equal_p (target, source))
1689 return 0;
1690 if (generate)
1691 emit_constant_insn (cond,
1692 gen_rtx_SET (VOIDmode, target, source));
1693 return 1;
1694 }
1695 if (remainder == 0xffffffff)
1696 {
1697 if (generate)
1698 emit_constant_insn (cond,
1699 gen_rtx_SET (VOIDmode, target,
1700 gen_rtx_NOT (mode, source)));
1701 return 1;
1702 }
1703
1704 /* We don't know how to handle this yet below. */
1705 abort ();
1706
1707 case MINUS:
1708 /* We treat MINUS as (val - source), since (source - val) is always
1709 passed as (source + (-val)). */
1710 if (remainder == 0)
1711 {
1712 if (generate)
1713 emit_constant_insn (cond,
1714 gen_rtx_SET (VOIDmode, target,
1715 gen_rtx_NEG (mode, source)));
1716 return 1;
1717 }
1718 if (const_ok_for_arm (val))
1719 {
1720 if (generate)
1721 emit_constant_insn (cond,
1722 gen_rtx_SET (VOIDmode, target,
1723 gen_rtx_MINUS (mode, GEN_INT (val),
1724 source)));
1725 return 1;
1726 }
1727 can_negate = 1;
1728
1729 break;
1730
1731 default:
1732 abort ();
1733 }
1734
1735 /* If we can do it in one insn get out quickly. */
1736 if (const_ok_for_arm (val)
1737 || (can_negate_initial && const_ok_for_arm (-val))
1738 || (can_invert && const_ok_for_arm (~val)))
1739 {
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target,
1743 (source
1744 ? gen_rtx_fmt_ee (code, mode, source,
1745 GEN_INT (val))
1746 : GEN_INT (val))));
1747 return 1;
1748 }
1749
1750 /* Calculate a few attributes that may be useful for specific
1751 optimizations. */
1752 for (i = 31; i >= 0; i--)
1753 {
1754 if ((remainder & (1 << i)) == 0)
1755 clear_sign_bit_copies++;
1756 else
1757 break;
1758 }
1759
1760 for (i = 31; i >= 0; i--)
1761 {
1762 if ((remainder & (1 << i)) != 0)
1763 set_sign_bit_copies++;
1764 else
1765 break;
1766 }
1767
1768 for (i = 0; i <= 31; i++)
1769 {
1770 if ((remainder & (1 << i)) == 0)
1771 clear_zero_bit_copies++;
1772 else
1773 break;
1774 }
1775
1776 for (i = 0; i <= 31; i++)
1777 {
1778 if ((remainder & (1 << i)) != 0)
1779 set_zero_bit_copies++;
1780 else
1781 break;
1782 }
1783
1784 switch (code)
1785 {
1786 case SET:
1787 /* See if we can do this by sign_extending a constant that is known
1788 to be negative. This is a good, way of doing it, since the shift
1789 may well merge into a subsequent insn. */
1790 if (set_sign_bit_copies > 1)
1791 {
1792 if (const_ok_for_arm
1793 (temp1 = ARM_SIGN_EXTEND (remainder
1794 << (set_sign_bit_copies - 1))))
1795 {
1796 if (generate)
1797 {
1798 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1799 emit_constant_insn (cond,
1800 gen_rtx_SET (VOIDmode, new_src,
1801 GEN_INT (temp1)));
1802 emit_constant_insn (cond,
1803 gen_ashrsi3 (target, new_src,
1804 GEN_INT (set_sign_bit_copies - 1)));
1805 }
1806 return 2;
1807 }
1808 /* For an inverted constant, we will need to set the low bits,
1809 these will be shifted out of harm's way. */
1810 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1811 if (const_ok_for_arm (~temp1))
1812 {
1813 if (generate)
1814 {
1815 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1816 emit_constant_insn (cond,
1817 gen_rtx_SET (VOIDmode, new_src,
1818 GEN_INT (temp1)));
1819 emit_constant_insn (cond,
1820 gen_ashrsi3 (target, new_src,
1821 GEN_INT (set_sign_bit_copies - 1)));
1822 }
1823 return 2;
1824 }
1825 }
1826
1827 /* See if we can generate this by setting the bottom (or the top)
1828 16 bits, and then shifting these into the other half of the
1829 word. We only look for the simplest cases, to do more would cost
1830 too much. Be careful, however, not to generate this when the
1831 alternative would take fewer insns. */
1832 if (val & 0xffff0000)
1833 {
1834 temp1 = remainder & 0xffff0000;
1835 temp2 = remainder & 0x0000ffff;
1836
1837 /* Overlaps outside this range are best done using other methods. */
1838 for (i = 9; i < 24; i++)
1839 {
1840 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1841 && !const_ok_for_arm (temp2))
1842 {
1843 rtx new_src = (subtargets
1844 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1845 : target);
1846 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1847 source, subtargets, generate);
1848 source = new_src;
1849 if (generate)
1850 emit_constant_insn
1851 (cond,
1852 gen_rtx_SET
1853 (VOIDmode, target,
1854 gen_rtx_IOR (mode,
1855 gen_rtx_ASHIFT (mode, source,
1856 GEN_INT (i)),
1857 source)));
1858 return insns + 1;
1859 }
1860 }
1861
1862 /* Don't duplicate cases already considered. */
1863 for (i = 17; i < 24; i++)
1864 {
1865 if (((temp1 | (temp1 >> i)) == remainder)
1866 && !const_ok_for_arm (temp1))
1867 {
1868 rtx new_src = (subtargets
1869 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1870 : target);
1871 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1872 source, subtargets, generate);
1873 source = new_src;
1874 if (generate)
1875 emit_constant_insn
1876 (cond,
1877 gen_rtx_SET (VOIDmode, target,
1878 gen_rtx_IOR
1879 (mode,
1880 gen_rtx_LSHIFTRT (mode, source,
1881 GEN_INT (i)),
1882 source)));
1883 return insns + 1;
1884 }
1885 }
1886 }
1887 break;
1888
1889 case IOR:
1890 case XOR:
1891 /* If we have IOR or XOR, and the constant can be loaded in a
1892 single instruction, and we can find a temporary to put it in,
1893 then this can be done in two instructions instead of 3-4. */
1894 if (subtargets
1895 /* TARGET can't be NULL if SUBTARGETS is 0 */
1896 || (reload_completed && !reg_mentioned_p (target, source)))
1897 {
1898 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1899 {
1900 if (generate)
1901 {
1902 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1903
1904 emit_constant_insn (cond,
1905 gen_rtx_SET (VOIDmode, sub,
1906 GEN_INT (val)));
1907 emit_constant_insn (cond,
1908 gen_rtx_SET (VOIDmode, target,
1909 gen_rtx_fmt_ee (code, mode,
1910 source, sub)));
1911 }
1912 return 2;
1913 }
1914 }
1915
1916 if (code == XOR)
1917 break;
1918
1919 if (set_sign_bit_copies > 8
1920 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1921 {
1922 if (generate)
1923 {
1924 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1925 rtx shift = GEN_INT (set_sign_bit_copies);
1926
1927 emit_constant_insn
1928 (cond,
1929 gen_rtx_SET (VOIDmode, sub,
1930 gen_rtx_NOT (mode,
1931 gen_rtx_ASHIFT (mode,
1932 source,
1933 shift))));
1934 emit_constant_insn
1935 (cond,
1936 gen_rtx_SET (VOIDmode, target,
1937 gen_rtx_NOT (mode,
1938 gen_rtx_LSHIFTRT (mode, sub,
1939 shift))));
1940 }
1941 return 2;
1942 }
1943
1944 if (set_zero_bit_copies > 8
1945 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1946 {
1947 if (generate)
1948 {
1949 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1950 rtx shift = GEN_INT (set_zero_bit_copies);
1951
1952 emit_constant_insn
1953 (cond,
1954 gen_rtx_SET (VOIDmode, sub,
1955 gen_rtx_NOT (mode,
1956 gen_rtx_LSHIFTRT (mode,
1957 source,
1958 shift))));
1959 emit_constant_insn
1960 (cond,
1961 gen_rtx_SET (VOIDmode, target,
1962 gen_rtx_NOT (mode,
1963 gen_rtx_ASHIFT (mode, sub,
1964 shift))));
1965 }
1966 return 2;
1967 }
1968
1969 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1970 {
1971 if (generate)
1972 {
1973 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1974 emit_constant_insn (cond,
1975 gen_rtx_SET (VOIDmode, sub,
1976 gen_rtx_NOT (mode, source)));
1977 source = sub;
1978 if (subtargets)
1979 sub = gen_reg_rtx (mode);
1980 emit_constant_insn (cond,
1981 gen_rtx_SET (VOIDmode, sub,
1982 gen_rtx_AND (mode, source,
1983 GEN_INT (temp1))));
1984 emit_constant_insn (cond,
1985 gen_rtx_SET (VOIDmode, target,
1986 gen_rtx_NOT (mode, sub)));
1987 }
1988 return 3;
1989 }
1990 break;
1991
1992 case AND:
1993 /* See if two shifts will do 2 or more insn's worth of work. */
1994 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1995 {
1996 HOST_WIDE_INT shift_mask = ((0xffffffff
1997 << (32 - clear_sign_bit_copies))
1998 & 0xffffffff);
1999
2000 if ((remainder | shift_mask) != 0xffffffff)
2001 {
2002 if (generate)
2003 {
2004 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2005 insns = arm_gen_constant (AND, mode, cond,
2006 remainder | shift_mask,
2007 new_src, source, subtargets, 1);
2008 source = new_src;
2009 }
2010 else
2011 {
2012 rtx targ = subtargets ? NULL_RTX : target;
2013 insns = arm_gen_constant (AND, mode, cond,
2014 remainder | shift_mask,
2015 targ, source, subtargets, 0);
2016 }
2017 }
2018
2019 if (generate)
2020 {
2021 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2022 rtx shift = GEN_INT (clear_sign_bit_copies);
2023
2024 emit_insn (gen_ashlsi3 (new_src, source, shift));
2025 emit_insn (gen_lshrsi3 (target, new_src, shift));
2026 }
2027
2028 return insns + 2;
2029 }
2030
2031 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2032 {
2033 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2034
2035 if ((remainder | shift_mask) != 0xffffffff)
2036 {
2037 if (generate)
2038 {
2039 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2040
2041 insns = arm_gen_constant (AND, mode, cond,
2042 remainder | shift_mask,
2043 new_src, source, subtargets, 1);
2044 source = new_src;
2045 }
2046 else
2047 {
2048 rtx targ = subtargets ? NULL_RTX : target;
2049
2050 insns = arm_gen_constant (AND, mode, cond,
2051 remainder | shift_mask,
2052 targ, source, subtargets, 0);
2053 }
2054 }
2055
2056 if (generate)
2057 {
2058 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2059 rtx shift = GEN_INT (clear_zero_bit_copies);
2060
2061 emit_insn (gen_lshrsi3 (new_src, source, shift));
2062 emit_insn (gen_ashlsi3 (target, new_src, shift));
2063 }
2064
2065 return insns + 2;
2066 }
2067
2068 break;
2069
2070 default:
2071 break;
2072 }
2073
2074 for (i = 0; i < 32; i++)
2075 if (remainder & (1 << i))
2076 num_bits_set++;
2077
2078 if (code == AND || (can_invert && num_bits_set > 16))
2079 remainder = (~remainder) & 0xffffffff;
2080 else if (code == PLUS && num_bits_set > 16)
2081 remainder = (-remainder) & 0xffffffff;
2082 else
2083 {
2084 can_invert = 0;
2085 can_negate = 0;
2086 }
2087
2088 /* Now try and find a way of doing the job in either two or three
2089 instructions.
2090 We start by looking for the largest block of zeros that are aligned on
2091 a 2-bit boundary, we then fill up the temps, wrapping around to the
2092 top of the word when we drop off the bottom.
2093 In the worst case this code should produce no more than four insns. */
2094 {
2095 int best_start = 0;
2096 int best_consecutive_zeros = 0;
2097
2098 for (i = 0; i < 32; i += 2)
2099 {
2100 int consecutive_zeros = 0;
2101
2102 if (!(remainder & (3 << i)))
2103 {
2104 while ((i < 32) && !(remainder & (3 << i)))
2105 {
2106 consecutive_zeros += 2;
2107 i += 2;
2108 }
2109 if (consecutive_zeros > best_consecutive_zeros)
2110 {
2111 best_consecutive_zeros = consecutive_zeros;
2112 best_start = i - consecutive_zeros;
2113 }
2114 i -= 2;
2115 }
2116 }
2117
2118 /* So long as it won't require any more insns to do so, it's
2119 desirable to emit a small constant (in bits 0...9) in the last
2120 insn. This way there is more chance that it can be combined with
2121 a later addressing insn to form a pre-indexed load or store
2122 operation. Consider:
2123
2124 *((volatile int *)0xe0000100) = 1;
2125 *((volatile int *)0xe0000110) = 2;
2126
2127 We want this to wind up as:
2128
2129 mov rA, #0xe0000000
2130 mov rB, #1
2131 str rB, [rA, #0x100]
2132 mov rB, #2
2133 str rB, [rA, #0x110]
2134
2135 rather than having to synthesize both large constants from scratch.
2136
2137 Therefore, we calculate how many insns would be required to emit
2138 the constant starting from `best_start', and also starting from
2139 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2140 yield a shorter sequence, we may as well use zero. */
2141 if (best_start != 0
2142 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2143 && (count_insns_for_constant (remainder, 0) <=
2144 count_insns_for_constant (remainder, best_start)))
2145 best_start = 0;
2146
2147 /* Now start emitting the insns. */
2148 i = best_start;
2149 do
2150 {
2151 int end;
2152
2153 if (i <= 0)
2154 i += 32;
2155 if (remainder & (3 << (i - 2)))
2156 {
2157 end = i - 8;
2158 if (end < 0)
2159 end += 32;
2160 temp1 = remainder & ((0x0ff << end)
2161 | ((i < end) ? (0xff >> (32 - end)) : 0));
2162 remainder &= ~temp1;
2163
2164 if (generate)
2165 {
2166 rtx new_src, temp1_rtx;
2167
2168 if (code == SET || code == MINUS)
2169 {
2170 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2171 if (can_invert && code != MINUS)
2172 temp1 = ~temp1;
2173 }
2174 else
2175 {
2176 if (remainder && subtargets)
2177 new_src = gen_reg_rtx (mode);
2178 else
2179 new_src = target;
2180 if (can_invert)
2181 temp1 = ~temp1;
2182 else if (can_negate)
2183 temp1 = -temp1;
2184 }
2185
2186 temp1 = trunc_int_for_mode (temp1, mode);
2187 temp1_rtx = GEN_INT (temp1);
2188
2189 if (code == SET)
2190 ;
2191 else if (code == MINUS)
2192 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2193 else
2194 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2195
2196 emit_constant_insn (cond,
2197 gen_rtx_SET (VOIDmode, new_src,
2198 temp1_rtx));
2199 source = new_src;
2200 }
2201
2202 if (code == SET)
2203 {
2204 can_invert = 0;
2205 code = PLUS;
2206 }
2207 else if (code == MINUS)
2208 code = PLUS;
2209
2210 insns++;
2211 i -= 6;
2212 }
2213 i -= 2;
2214 }
2215 while (remainder);
2216 }
2217
2218 return insns;
2219 }
2220
2221 /* Canonicalize a comparison so that we are more likely to recognize it.
2222 This can be done for a few constant compares, where we can make the
2223 immediate value easier to load. */
2224
2225 enum rtx_code
2226 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2227 {
2228 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2229
2230 switch (code)
2231 {
2232 case EQ:
2233 case NE:
2234 return code;
2235
2236 case GT:
2237 case LE:
2238 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2239 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2240 {
2241 *op1 = GEN_INT (i + 1);
2242 return code == GT ? GE : LT;
2243 }
2244 break;
2245
2246 case GE:
2247 case LT:
2248 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2249 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2250 {
2251 *op1 = GEN_INT (i - 1);
2252 return code == GE ? GT : LE;
2253 }
2254 break;
2255
2256 case GTU:
2257 case LEU:
2258 if (i != ~((unsigned HOST_WIDE_INT) 0)
2259 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2260 {
2261 *op1 = GEN_INT (i + 1);
2262 return code == GTU ? GEU : LTU;
2263 }
2264 break;
2265
2266 case GEU:
2267 case LTU:
2268 if (i != 0
2269 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2270 {
2271 *op1 = GEN_INT (i - 1);
2272 return code == GEU ? GTU : LEU;
2273 }
2274 break;
2275
2276 default:
2277 abort ();
2278 }
2279
2280 return code;
2281 }
2282
2283
2284 /* Define how to find the value returned by a function. */
2285
2286 rtx
2287 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2288 {
2289 enum machine_mode mode;
2290 int unsignedp ATTRIBUTE_UNUSED;
2291 rtx r ATTRIBUTE_UNUSED;
2292
2293
2294 mode = TYPE_MODE (type);
2295 /* Promote integer types. */
2296 if (INTEGRAL_TYPE_P (type))
2297 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2298 return LIBCALL_VALUE(mode);
2299 }
2300
2301 /* Determine the amount of memory needed to store the possible return
2302 registers of an untyped call. */
2303 int
2304 arm_apply_result_size (void)
2305 {
2306 int size = 16;
2307
2308 if (TARGET_ARM)
2309 {
2310 if (TARGET_HARD_FLOAT_ABI)
2311 {
2312 if (TARGET_FPA)
2313 size += 12;
2314 if (TARGET_MAVERICK)
2315 size += 8;
2316 }
2317 if (TARGET_IWMMXT_ABI)
2318 size += 8;
2319 }
2320
2321 return size;
2322 }
2323
2324 /* Decide whether a type should be returned in memory (true)
2325 or in a register (false). This is called by the macro
2326 RETURN_IN_MEMORY. */
2327 int
2328 arm_return_in_memory (tree type)
2329 {
2330 HOST_WIDE_INT size;
2331
2332 if (!AGGREGATE_TYPE_P (type) &&
2333 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2334 /* All simple types are returned in registers.
2335 For AAPCS, complex types are treated the same as aggregates. */
2336 return 0;
2337
2338 size = int_size_in_bytes (type);
2339
2340 if (arm_abi != ARM_ABI_APCS)
2341 {
2342 /* ATPCS and later return aggregate types in memory only if they are
2343 larger than a word (or are variable size). */
2344 return (size < 0 || size > UNITS_PER_WORD);
2345 }
2346
2347 /* For the arm-wince targets we choose to be compatible with Microsoft's
2348 ARM and Thumb compilers, which always return aggregates in memory. */
2349 #ifndef ARM_WINCE
2350 /* All structures/unions bigger than one word are returned in memory.
2351 Also catch the case where int_size_in_bytes returns -1. In this case
2352 the aggregate is either huge or of variable size, and in either case
2353 we will want to return it via memory and not in a register. */
2354 if (size < 0 || size > UNITS_PER_WORD)
2355 return 1;
2356
2357 if (TREE_CODE (type) == RECORD_TYPE)
2358 {
2359 tree field;
2360
2361 /* For a struct the APCS says that we only return in a register
2362 if the type is 'integer like' and every addressable element
2363 has an offset of zero. For practical purposes this means
2364 that the structure can have at most one non bit-field element
2365 and that this element must be the first one in the structure. */
2366
2367 /* Find the first field, ignoring non FIELD_DECL things which will
2368 have been created by C++. */
2369 for (field = TYPE_FIELDS (type);
2370 field && TREE_CODE (field) != FIELD_DECL;
2371 field = TREE_CHAIN (field))
2372 continue;
2373
2374 if (field == NULL)
2375 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2376
2377 /* Check that the first field is valid for returning in a register. */
2378
2379 /* ... Floats are not allowed */
2380 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2381 return 1;
2382
2383 /* ... Aggregates that are not themselves valid for returning in
2384 a register are not allowed. */
2385 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2386 return 1;
2387
2388 /* Now check the remaining fields, if any. Only bitfields are allowed,
2389 since they are not addressable. */
2390 for (field = TREE_CHAIN (field);
2391 field;
2392 field = TREE_CHAIN (field))
2393 {
2394 if (TREE_CODE (field) != FIELD_DECL)
2395 continue;
2396
2397 if (!DECL_BIT_FIELD_TYPE (field))
2398 return 1;
2399 }
2400
2401 return 0;
2402 }
2403
2404 if (TREE_CODE (type) == UNION_TYPE)
2405 {
2406 tree field;
2407
2408 /* Unions can be returned in registers if every element is
2409 integral, or can be returned in an integer register. */
2410 for (field = TYPE_FIELDS (type);
2411 field;
2412 field = TREE_CHAIN (field))
2413 {
2414 if (TREE_CODE (field) != FIELD_DECL)
2415 continue;
2416
2417 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2418 return 1;
2419
2420 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2421 return 1;
2422 }
2423
2424 return 0;
2425 }
2426 #endif /* not ARM_WINCE */
2427
2428 /* Return all other types in memory. */
2429 return 1;
2430 }
2431
2432 /* Indicate whether or not words of a double are in big-endian order. */
2433
2434 int
2435 arm_float_words_big_endian (void)
2436 {
2437 if (TARGET_MAVERICK)
2438 return 0;
2439
2440 /* For FPA, float words are always big-endian. For VFP, floats words
2441 follow the memory system mode. */
2442
2443 if (TARGET_FPA)
2444 {
2445 return 1;
2446 }
2447
2448 if (TARGET_VFP)
2449 return (TARGET_BIG_END ? 1 : 0);
2450
2451 return 1;
2452 }
2453
2454 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2455 for a call to a function whose data type is FNTYPE.
2456 For a library call, FNTYPE is NULL. */
2457 void
2458 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2459 rtx libname ATTRIBUTE_UNUSED,
2460 tree fndecl ATTRIBUTE_UNUSED)
2461 {
2462 /* On the ARM, the offset starts at 0. */
2463 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2464 pcum->iwmmxt_nregs = 0;
2465 pcum->can_split = true;
2466
2467 pcum->call_cookie = CALL_NORMAL;
2468
2469 if (TARGET_LONG_CALLS)
2470 pcum->call_cookie = CALL_LONG;
2471
2472 /* Check for long call/short call attributes. The attributes
2473 override any command line option. */
2474 if (fntype)
2475 {
2476 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2477 pcum->call_cookie = CALL_SHORT;
2478 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2479 pcum->call_cookie = CALL_LONG;
2480 }
2481
2482 /* Varargs vectors are treated the same as long long.
2483 named_count avoids having to change the way arm handles 'named' */
2484 pcum->named_count = 0;
2485 pcum->nargs = 0;
2486
2487 if (TARGET_REALLY_IWMMXT && fntype)
2488 {
2489 tree fn_arg;
2490
2491 for (fn_arg = TYPE_ARG_TYPES (fntype);
2492 fn_arg;
2493 fn_arg = TREE_CHAIN (fn_arg))
2494 pcum->named_count += 1;
2495
2496 if (! pcum->named_count)
2497 pcum->named_count = INT_MAX;
2498 }
2499 }
2500
2501
2502 /* Return true if mode/type need doubleword alignment. */
2503 bool
2504 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2505 {
2506 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2507 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2508 }
2509
2510
2511 /* Determine where to put an argument to a function.
2512 Value is zero to push the argument on the stack,
2513 or a hard register in which to store the argument.
2514
2515 MODE is the argument's machine mode.
2516 TYPE is the data type of the argument (as a tree).
2517 This is null for libcalls where that information may
2518 not be available.
2519 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2520 the preceding args and about the function being called.
2521 NAMED is nonzero if this argument is a named parameter
2522 (otherwise it is an extra parameter matching an ellipsis). */
2523
2524 rtx
2525 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2526 tree type, int named)
2527 {
2528 int nregs;
2529
2530 /* Varargs vectors are treated the same as long long.
2531 named_count avoids having to change the way arm handles 'named' */
2532 if (TARGET_IWMMXT_ABI
2533 && arm_vector_mode_supported_p (mode)
2534 && pcum->named_count > pcum->nargs + 1)
2535 {
2536 if (pcum->iwmmxt_nregs <= 9)
2537 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2538 else
2539 {
2540 pcum->can_split = false;
2541 return NULL_RTX;
2542 }
2543 }
2544
2545 /* Put doubleword aligned quantities in even register pairs. */
2546 if (pcum->nregs & 1
2547 && ARM_DOUBLEWORD_ALIGN
2548 && arm_needs_doubleword_align (mode, type))
2549 pcum->nregs++;
2550
2551 if (mode == VOIDmode)
2552 /* Compute operand 2 of the call insn. */
2553 return GEN_INT (pcum->call_cookie);
2554
2555 /* Only allow splitting an arg between regs and memory if all preceding
2556 args were allocated to regs. For args passed by reference we only count
2557 the reference pointer. */
2558 if (pcum->can_split)
2559 nregs = 1;
2560 else
2561 nregs = ARM_NUM_REGS2 (mode, type);
2562
2563 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2564 return NULL_RTX;
2565
2566 return gen_rtx_REG (mode, pcum->nregs);
2567 }
2568
2569 static int
2570 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2571 tree type, bool named ATTRIBUTE_UNUSED)
2572 {
2573 int nregs = pcum->nregs;
2574
2575 if (arm_vector_mode_supported_p (mode))
2576 return 0;
2577
2578 if (NUM_ARG_REGS > nregs
2579 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2580 && pcum->can_split)
2581 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2582
2583 return 0;
2584 }
2585
2586 /* Variable sized types are passed by reference. This is a GCC
2587 extension to the ARM ABI. */
2588
2589 static bool
2590 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2591 enum machine_mode mode ATTRIBUTE_UNUSED,
2592 tree type, bool named ATTRIBUTE_UNUSED)
2593 {
2594 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2595 }
2596 \f
2597 /* Encode the current state of the #pragma [no_]long_calls. */
2598 typedef enum
2599 {
2600 OFF, /* No #pramgma [no_]long_calls is in effect. */
2601 LONG, /* #pragma long_calls is in effect. */
2602 SHORT /* #pragma no_long_calls is in effect. */
2603 } arm_pragma_enum;
2604
2605 static arm_pragma_enum arm_pragma_long_calls = OFF;
2606
2607 void
2608 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2609 {
2610 arm_pragma_long_calls = LONG;
2611 }
2612
2613 void
2614 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2615 {
2616 arm_pragma_long_calls = SHORT;
2617 }
2618
2619 void
2620 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2621 {
2622 arm_pragma_long_calls = OFF;
2623 }
2624 \f
2625 /* Table of machine attributes. */
2626 const struct attribute_spec arm_attribute_table[] =
2627 {
2628 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2629 /* Function calls made to this symbol must be done indirectly, because
2630 it may lie outside of the 26 bit addressing range of a normal function
2631 call. */
2632 { "long_call", 0, 0, false, true, true, NULL },
2633 /* Whereas these functions are always known to reside within the 26 bit
2634 addressing range. */
2635 { "short_call", 0, 0, false, true, true, NULL },
2636 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2637 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2638 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2639 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2640 #ifdef ARM_PE
2641 /* ARM/PE has three new attributes:
2642 interfacearm - ?
2643 dllexport - for exporting a function/variable that will live in a dll
2644 dllimport - for importing a function/variable from a dll
2645
2646 Microsoft allows multiple declspecs in one __declspec, separating
2647 them with spaces. We do NOT support this. Instead, use __declspec
2648 multiple times.
2649 */
2650 { "dllimport", 0, 0, true, false, false, NULL },
2651 { "dllexport", 0, 0, true, false, false, NULL },
2652 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2653 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2654 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2655 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2656 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2657 #endif
2658 { NULL, 0, 0, false, false, false, NULL }
2659 };
2660
2661 /* Handle an attribute requiring a FUNCTION_DECL;
2662 arguments as in struct attribute_spec.handler. */
2663 static tree
2664 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2665 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2666 {
2667 if (TREE_CODE (*node) != FUNCTION_DECL)
2668 {
2669 warning ("%qs attribute only applies to functions",
2670 IDENTIFIER_POINTER (name));
2671 *no_add_attrs = true;
2672 }
2673
2674 return NULL_TREE;
2675 }
2676
2677 /* Handle an "interrupt" or "isr" attribute;
2678 arguments as in struct attribute_spec.handler. */
2679 static tree
2680 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2681 bool *no_add_attrs)
2682 {
2683 if (DECL_P (*node))
2684 {
2685 if (TREE_CODE (*node) != FUNCTION_DECL)
2686 {
2687 warning ("%qs attribute only applies to functions",
2688 IDENTIFIER_POINTER (name));
2689 *no_add_attrs = true;
2690 }
2691 /* FIXME: the argument if any is checked for type attributes;
2692 should it be checked for decl ones? */
2693 }
2694 else
2695 {
2696 if (TREE_CODE (*node) == FUNCTION_TYPE
2697 || TREE_CODE (*node) == METHOD_TYPE)
2698 {
2699 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2700 {
2701 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2702 *no_add_attrs = true;
2703 }
2704 }
2705 else if (TREE_CODE (*node) == POINTER_TYPE
2706 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2707 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2708 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2709 {
2710 *node = build_variant_type_copy (*node);
2711 TREE_TYPE (*node) = build_type_attribute_variant
2712 (TREE_TYPE (*node),
2713 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2714 *no_add_attrs = true;
2715 }
2716 else
2717 {
2718 /* Possibly pass this attribute on from the type to a decl. */
2719 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2720 | (int) ATTR_FLAG_FUNCTION_NEXT
2721 | (int) ATTR_FLAG_ARRAY_NEXT))
2722 {
2723 *no_add_attrs = true;
2724 return tree_cons (name, args, NULL_TREE);
2725 }
2726 else
2727 {
2728 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2729 }
2730 }
2731 }
2732
2733 return NULL_TREE;
2734 }
2735
2736 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2737 /* Handle the "notshared" attribute. This attribute is another way of
2738 requesting hidden visibility. ARM's compiler supports
2739 "__declspec(notshared)"; we support the same thing via an
2740 attribute. */
2741
2742 static tree
2743 arm_handle_notshared_attribute (tree *node,
2744 tree name ATTRIBUTE_UNUSED,
2745 tree args ATTRIBUTE_UNUSED,
2746 int flags ATTRIBUTE_UNUSED,
2747 bool *no_add_attrs)
2748 {
2749 tree decl = TYPE_NAME (*node);
2750
2751 if (decl)
2752 {
2753 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2754 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2755 *no_add_attrs = false;
2756 }
2757 return NULL_TREE;
2758 }
2759 #endif
2760
2761 /* Return 0 if the attributes for two types are incompatible, 1 if they
2762 are compatible, and 2 if they are nearly compatible (which causes a
2763 warning to be generated). */
2764 static int
2765 arm_comp_type_attributes (tree type1, tree type2)
2766 {
2767 int l1, l2, s1, s2;
2768
2769 /* Check for mismatch of non-default calling convention. */
2770 if (TREE_CODE (type1) != FUNCTION_TYPE)
2771 return 1;
2772
2773 /* Check for mismatched call attributes. */
2774 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2775 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2776 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2777 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2778
2779 /* Only bother to check if an attribute is defined. */
2780 if (l1 | l2 | s1 | s2)
2781 {
2782 /* If one type has an attribute, the other must have the same attribute. */
2783 if ((l1 != l2) || (s1 != s2))
2784 return 0;
2785
2786 /* Disallow mixed attributes. */
2787 if ((l1 & s2) || (l2 & s1))
2788 return 0;
2789 }
2790
2791 /* Check for mismatched ISR attribute. */
2792 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2793 if (! l1)
2794 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2795 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2796 if (! l2)
2797 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2798 if (l1 != l2)
2799 return 0;
2800
2801 return 1;
2802 }
2803
2804 /* Encode long_call or short_call attribute by prefixing
2805 symbol name in DECL with a special character FLAG. */
2806 void
2807 arm_encode_call_attribute (tree decl, int flag)
2808 {
2809 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2810 int len = strlen (str);
2811 char * newstr;
2812
2813 /* Do not allow weak functions to be treated as short call. */
2814 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2815 return;
2816
2817 newstr = alloca (len + 2);
2818 newstr[0] = flag;
2819 strcpy (newstr + 1, str);
2820
2821 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2822 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2823 }
2824
2825 /* Assigns default attributes to newly defined type. This is used to
2826 set short_call/long_call attributes for function types of
2827 functions defined inside corresponding #pragma scopes. */
2828 static void
2829 arm_set_default_type_attributes (tree type)
2830 {
2831 /* Add __attribute__ ((long_call)) to all functions, when
2832 inside #pragma long_calls or __attribute__ ((short_call)),
2833 when inside #pragma no_long_calls. */
2834 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2835 {
2836 tree type_attr_list, attr_name;
2837 type_attr_list = TYPE_ATTRIBUTES (type);
2838
2839 if (arm_pragma_long_calls == LONG)
2840 attr_name = get_identifier ("long_call");
2841 else if (arm_pragma_long_calls == SHORT)
2842 attr_name = get_identifier ("short_call");
2843 else
2844 return;
2845
2846 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2847 TYPE_ATTRIBUTES (type) = type_attr_list;
2848 }
2849 }
2850 \f
2851 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2852 defined within the current compilation unit. If this cannot be
2853 determined, then 0 is returned. */
2854 static int
2855 current_file_function_operand (rtx sym_ref)
2856 {
2857 /* This is a bit of a fib. A function will have a short call flag
2858 applied to its name if it has the short call attribute, or it has
2859 already been defined within the current compilation unit. */
2860 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2861 return 1;
2862
2863 /* The current function is always defined within the current compilation
2864 unit. If it s a weak definition however, then this may not be the real
2865 definition of the function, and so we have to say no. */
2866 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2867 && !DECL_WEAK (current_function_decl))
2868 return 1;
2869
2870 /* We cannot make the determination - default to returning 0. */
2871 return 0;
2872 }
2873
2874 /* Return nonzero if a 32 bit "long_call" should be generated for
2875 this call. We generate a long_call if the function:
2876
2877 a. has an __attribute__((long call))
2878 or b. is within the scope of a #pragma long_calls
2879 or c. the -mlong-calls command line switch has been specified
2880 . and either:
2881 1. -ffunction-sections is in effect
2882 or 2. the current function has __attribute__ ((section))
2883 or 3. the target function has __attribute__ ((section))
2884
2885 However we do not generate a long call if the function:
2886
2887 d. has an __attribute__ ((short_call))
2888 or e. is inside the scope of a #pragma no_long_calls
2889 or f. is defined within the current compilation unit.
2890
2891 This function will be called by C fragments contained in the machine
2892 description file. SYM_REF and CALL_COOKIE correspond to the matched
2893 rtl operands. CALL_SYMBOL is used to distinguish between
2894 two different callers of the function. It is set to 1 in the
2895 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2896 and "call_value" patterns. This is because of the difference in the
2897 SYM_REFs passed by these patterns. */
2898 int
2899 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2900 {
2901 if (!call_symbol)
2902 {
2903 if (GET_CODE (sym_ref) != MEM)
2904 return 0;
2905
2906 sym_ref = XEXP (sym_ref, 0);
2907 }
2908
2909 if (GET_CODE (sym_ref) != SYMBOL_REF)
2910 return 0;
2911
2912 if (call_cookie & CALL_SHORT)
2913 return 0;
2914
2915 if (TARGET_LONG_CALLS)
2916 {
2917 if (flag_function_sections
2918 || DECL_SECTION_NAME (current_function_decl))
2919 /* c.3 is handled by the definition of the
2920 ARM_DECLARE_FUNCTION_SIZE macro. */
2921 return 1;
2922 }
2923
2924 if (current_file_function_operand (sym_ref))
2925 return 0;
2926
2927 return (call_cookie & CALL_LONG)
2928 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2929 || TARGET_LONG_CALLS;
2930 }
2931
2932 /* Return nonzero if it is ok to make a tail-call to DECL. */
2933 static bool
2934 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2935 {
2936 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2937
2938 if (cfun->machine->sibcall_blocked)
2939 return false;
2940
2941 /* Never tailcall something for which we have no decl, or if we
2942 are in Thumb mode. */
2943 if (decl == NULL || TARGET_THUMB)
2944 return false;
2945
2946 /* Get the calling method. */
2947 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2948 call_type = CALL_SHORT;
2949 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2950 call_type = CALL_LONG;
2951
2952 /* Cannot tail-call to long calls, since these are out of range of
2953 a branch instruction. However, if not compiling PIC, we know
2954 we can reach the symbol if it is in this compilation unit. */
2955 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2956 return false;
2957
2958 /* If we are interworking and the function is not declared static
2959 then we can't tail-call it unless we know that it exists in this
2960 compilation unit (since it might be a Thumb routine). */
2961 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2962 return false;
2963
2964 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2965 if (IS_INTERRUPT (arm_current_func_type ()))
2966 return false;
2967
2968 /* Everything else is ok. */
2969 return true;
2970 }
2971
2972 \f
2973 /* Addressing mode support functions. */
2974
2975 /* Return nonzero if X is a legitimate immediate operand when compiling
2976 for PIC. */
2977 int
2978 legitimate_pic_operand_p (rtx x)
2979 {
2980 if (CONSTANT_P (x)
2981 && flag_pic
2982 && (GET_CODE (x) == SYMBOL_REF
2983 || (GET_CODE (x) == CONST
2984 && GET_CODE (XEXP (x, 0)) == PLUS
2985 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2986 return 0;
2987
2988 return 1;
2989 }
2990
2991 rtx
2992 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2993 {
2994 if (GET_CODE (orig) == SYMBOL_REF
2995 || GET_CODE (orig) == LABEL_REF)
2996 {
2997 #ifndef AOF_ASSEMBLER
2998 rtx pic_ref, address;
2999 #endif
3000 rtx insn;
3001 int subregs = 0;
3002
3003 if (reg == 0)
3004 {
3005 if (no_new_pseudos)
3006 abort ();
3007 else
3008 reg = gen_reg_rtx (Pmode);
3009
3010 subregs = 1;
3011 }
3012
3013 #ifdef AOF_ASSEMBLER
3014 /* The AOF assembler can generate relocations for these directly, and
3015 understands that the PIC register has to be added into the offset. */
3016 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3017 #else
3018 if (subregs)
3019 address = gen_reg_rtx (Pmode);
3020 else
3021 address = reg;
3022
3023 if (TARGET_ARM)
3024 emit_insn (gen_pic_load_addr_arm (address, orig));
3025 else
3026 emit_insn (gen_pic_load_addr_thumb (address, orig));
3027
3028 if ((GET_CODE (orig) == LABEL_REF
3029 || (GET_CODE (orig) == SYMBOL_REF &&
3030 SYMBOL_REF_LOCAL_P (orig)))
3031 && NEED_GOT_RELOC)
3032 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3033 else
3034 {
3035 pic_ref = gen_const_mem (Pmode,
3036 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3037 address));
3038 }
3039
3040 insn = emit_move_insn (reg, pic_ref);
3041 #endif
3042 current_function_uses_pic_offset_table = 1;
3043 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3044 by loop. */
3045 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3046 REG_NOTES (insn));
3047 return reg;
3048 }
3049 else if (GET_CODE (orig) == CONST)
3050 {
3051 rtx base, offset;
3052
3053 if (GET_CODE (XEXP (orig, 0)) == PLUS
3054 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3055 return orig;
3056
3057 if (reg == 0)
3058 {
3059 if (no_new_pseudos)
3060 abort ();
3061 else
3062 reg = gen_reg_rtx (Pmode);
3063 }
3064
3065 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3066 {
3067 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3068 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3069 base == reg ? 0 : reg);
3070 }
3071 else
3072 abort ();
3073
3074 if (GET_CODE (offset) == CONST_INT)
3075 {
3076 /* The base register doesn't really matter, we only want to
3077 test the index for the appropriate mode. */
3078 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3079 {
3080 if (!no_new_pseudos)
3081 offset = force_reg (Pmode, offset);
3082 else
3083 abort ();
3084 }
3085
3086 if (GET_CODE (offset) == CONST_INT)
3087 return plus_constant (base, INTVAL (offset));
3088 }
3089
3090 if (GET_MODE_SIZE (mode) > 4
3091 && (GET_MODE_CLASS (mode) == MODE_INT
3092 || TARGET_SOFT_FLOAT))
3093 {
3094 emit_insn (gen_addsi3 (reg, base, offset));
3095 return reg;
3096 }
3097
3098 return gen_rtx_PLUS (Pmode, base, offset);
3099 }
3100
3101 return orig;
3102 }
3103
3104
3105 /* Find a spare low register. */
3106
3107 static int
3108 thumb_find_work_register (int live_regs_mask)
3109 {
3110 int reg;
3111
3112 /* Use a spare arg register. */
3113 if (!regs_ever_live[LAST_ARG_REGNUM])
3114 return LAST_ARG_REGNUM;
3115
3116 /* Look for a pushed register. This is used before the frame pointer is
3117 setup, so r7 is a candidate. */
3118 for (reg = LAST_LO_REGNUM; reg >=0; reg--)
3119 if (live_regs_mask & (1 << reg))
3120 return reg;
3121
3122 /* Something went wrong. */
3123 abort ();
3124 }
3125
3126
3127 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3128 low register. */
3129
3130 void
3131 arm_load_pic_register (unsigned int scratch)
3132 {
3133 #ifndef AOF_ASSEMBLER
3134 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3135 rtx global_offset_table;
3136
3137 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3138 return;
3139
3140 if (!flag_pic)
3141 abort ();
3142
3143 l1 = gen_label_rtx ();
3144
3145 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3146 /* On the ARM the PC register contains 'dot + 8' at the time of the
3147 addition, on the Thumb it is 'dot + 4'. */
3148 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3149 if (GOT_PCREL)
3150 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3151 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3152 else
3153 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3154
3155 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3156
3157 if (TARGET_ARM)
3158 {
3159 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3160 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3161 }
3162 else
3163 {
3164 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3165 {
3166 /* We will have pushed the pic register, so should always be
3167 able to find a work register. */
3168 pic_tmp = gen_rtx_REG (SImode, scratch);
3169 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3170 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3171 }
3172 else
3173 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3174 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3175 }
3176
3177 /* Need to emit this whether or not we obey regdecls,
3178 since setjmp/longjmp can cause life info to screw up. */
3179 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3180 #endif /* AOF_ASSEMBLER */
3181 }
3182
3183
3184 /* Return nonzero if X is valid as an ARM state addressing register. */
3185 static int
3186 arm_address_register_rtx_p (rtx x, int strict_p)
3187 {
3188 int regno;
3189
3190 if (GET_CODE (x) != REG)
3191 return 0;
3192
3193 regno = REGNO (x);
3194
3195 if (strict_p)
3196 return ARM_REGNO_OK_FOR_BASE_P (regno);
3197
3198 return (regno <= LAST_ARM_REGNUM
3199 || regno >= FIRST_PSEUDO_REGISTER
3200 || regno == FRAME_POINTER_REGNUM
3201 || regno == ARG_POINTER_REGNUM);
3202 }
3203
3204 /* Return nonzero if X is a valid ARM state address operand. */
3205 int
3206 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3207 int strict_p)
3208 {
3209 bool use_ldrd;
3210 enum rtx_code code = GET_CODE (x);
3211
3212 if (arm_address_register_rtx_p (x, strict_p))
3213 return 1;
3214
3215 use_ldrd = (TARGET_LDRD
3216 && (mode == DImode
3217 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3218
3219 if (code == POST_INC || code == PRE_DEC
3220 || ((code == PRE_INC || code == POST_DEC)
3221 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3222 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3223
3224 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3225 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3226 && GET_CODE (XEXP (x, 1)) == PLUS
3227 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3228 {
3229 rtx addend = XEXP (XEXP (x, 1), 1);
3230
3231 /* Don't allow ldrd post increment by register because it's hard
3232 to fixup invalid register choices. */
3233 if (use_ldrd
3234 && GET_CODE (x) == POST_MODIFY
3235 && GET_CODE (addend) == REG)
3236 return 0;
3237
3238 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3239 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3240 }
3241
3242 /* After reload constants split into minipools will have addresses
3243 from a LABEL_REF. */
3244 else if (reload_completed
3245 && (code == LABEL_REF
3246 || (code == CONST
3247 && GET_CODE (XEXP (x, 0)) == PLUS
3248 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3249 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3250 return 1;
3251
3252 else if (mode == TImode)
3253 return 0;
3254
3255 else if (code == PLUS)
3256 {
3257 rtx xop0 = XEXP (x, 0);
3258 rtx xop1 = XEXP (x, 1);
3259
3260 return ((arm_address_register_rtx_p (xop0, strict_p)
3261 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3262 || (arm_address_register_rtx_p (xop1, strict_p)
3263 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3264 }
3265
3266 #if 0
3267 /* Reload currently can't handle MINUS, so disable this for now */
3268 else if (GET_CODE (x) == MINUS)
3269 {
3270 rtx xop0 = XEXP (x, 0);
3271 rtx xop1 = XEXP (x, 1);
3272
3273 return (arm_address_register_rtx_p (xop0, strict_p)
3274 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3275 }
3276 #endif
3277
3278 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3279 && code == SYMBOL_REF
3280 && CONSTANT_POOL_ADDRESS_P (x)
3281 && ! (flag_pic
3282 && symbol_mentioned_p (get_pool_constant (x))))
3283 return 1;
3284
3285 return 0;
3286 }
3287
3288 /* Return nonzero if INDEX is valid for an address index operand in
3289 ARM state. */
3290 static int
3291 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3292 int strict_p)
3293 {
3294 HOST_WIDE_INT range;
3295 enum rtx_code code = GET_CODE (index);
3296
3297 /* Standard coprocessor addressing modes. */
3298 if (TARGET_HARD_FLOAT
3299 && (TARGET_FPA || TARGET_MAVERICK)
3300 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3301 || (TARGET_MAVERICK && mode == DImode)))
3302 return (code == CONST_INT && INTVAL (index) < 1024
3303 && INTVAL (index) > -1024
3304 && (INTVAL (index) & 3) == 0);
3305
3306 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3307 return (code == CONST_INT
3308 && INTVAL (index) < 1024
3309 && INTVAL (index) > -1024
3310 && (INTVAL (index) & 3) == 0);
3311
3312 if (arm_address_register_rtx_p (index, strict_p)
3313 && (GET_MODE_SIZE (mode) <= 4))
3314 return 1;
3315
3316 if (mode == DImode || mode == DFmode)
3317 {
3318 if (code == CONST_INT)
3319 {
3320 HOST_WIDE_INT val = INTVAL (index);
3321
3322 if (TARGET_LDRD)
3323 return val > -256 && val < 256;
3324 else
3325 return val > -4096 && val < 4092;
3326 }
3327
3328 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3329 }
3330
3331 if (GET_MODE_SIZE (mode) <= 4
3332 && ! (arm_arch4
3333 && (mode == HImode
3334 || (mode == QImode && outer == SIGN_EXTEND))))
3335 {
3336 if (code == MULT)
3337 {
3338 rtx xiop0 = XEXP (index, 0);
3339 rtx xiop1 = XEXP (index, 1);
3340
3341 return ((arm_address_register_rtx_p (xiop0, strict_p)
3342 && power_of_two_operand (xiop1, SImode))
3343 || (arm_address_register_rtx_p (xiop1, strict_p)
3344 && power_of_two_operand (xiop0, SImode)));
3345 }
3346 else if (code == LSHIFTRT || code == ASHIFTRT
3347 || code == ASHIFT || code == ROTATERT)
3348 {
3349 rtx op = XEXP (index, 1);
3350
3351 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3352 && GET_CODE (op) == CONST_INT
3353 && INTVAL (op) > 0
3354 && INTVAL (op) <= 31);
3355 }
3356 }
3357
3358 /* For ARM v4 we may be doing a sign-extend operation during the
3359 load. */
3360 if (arm_arch4)
3361 {
3362 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3363 range = 256;
3364 else
3365 range = 4096;
3366 }
3367 else
3368 range = (mode == HImode) ? 4095 : 4096;
3369
3370 return (code == CONST_INT
3371 && INTVAL (index) < range
3372 && INTVAL (index) > -range);
3373 }
3374
3375 /* Return nonzero if X is valid as a Thumb state base register. */
3376 static int
3377 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3378 {
3379 int regno;
3380
3381 if (GET_CODE (x) != REG)
3382 return 0;
3383
3384 regno = REGNO (x);
3385
3386 if (strict_p)
3387 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3388
3389 return (regno <= LAST_LO_REGNUM
3390 || regno > LAST_VIRTUAL_REGISTER
3391 || regno == FRAME_POINTER_REGNUM
3392 || (GET_MODE_SIZE (mode) >= 4
3393 && (regno == STACK_POINTER_REGNUM
3394 || regno >= FIRST_PSEUDO_REGISTER
3395 || x == hard_frame_pointer_rtx
3396 || x == arg_pointer_rtx)));
3397 }
3398
3399 /* Return nonzero if x is a legitimate index register. This is the case
3400 for any base register that can access a QImode object. */
3401 inline static int
3402 thumb_index_register_rtx_p (rtx x, int strict_p)
3403 {
3404 return thumb_base_register_rtx_p (x, QImode, strict_p);
3405 }
3406
3407 /* Return nonzero if x is a legitimate Thumb-state address.
3408
3409 The AP may be eliminated to either the SP or the FP, so we use the
3410 least common denominator, e.g. SImode, and offsets from 0 to 64.
3411
3412 ??? Verify whether the above is the right approach.
3413
3414 ??? Also, the FP may be eliminated to the SP, so perhaps that
3415 needs special handling also.
3416
3417 ??? Look at how the mips16 port solves this problem. It probably uses
3418 better ways to solve some of these problems.
3419
3420 Although it is not incorrect, we don't accept QImode and HImode
3421 addresses based on the frame pointer or arg pointer until the
3422 reload pass starts. This is so that eliminating such addresses
3423 into stack based ones won't produce impossible code. */
3424 int
3425 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3426 {
3427 /* ??? Not clear if this is right. Experiment. */
3428 if (GET_MODE_SIZE (mode) < 4
3429 && !(reload_in_progress || reload_completed)
3430 && (reg_mentioned_p (frame_pointer_rtx, x)
3431 || reg_mentioned_p (arg_pointer_rtx, x)
3432 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3433 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3434 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3435 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3436 return 0;
3437
3438 /* Accept any base register. SP only in SImode or larger. */
3439 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3440 return 1;
3441
3442 /* This is PC relative data before arm_reorg runs. */
3443 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3444 && GET_CODE (x) == SYMBOL_REF
3445 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3446 return 1;
3447
3448 /* This is PC relative data after arm_reorg runs. */
3449 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3450 && (GET_CODE (x) == LABEL_REF
3451 || (GET_CODE (x) == CONST
3452 && GET_CODE (XEXP (x, 0)) == PLUS
3453 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3454 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3455 return 1;
3456
3457 /* Post-inc indexing only supported for SImode and larger. */
3458 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3459 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3460 return 1;
3461
3462 else if (GET_CODE (x) == PLUS)
3463 {
3464 /* REG+REG address can be any two index registers. */
3465 /* We disallow FRAME+REG addressing since we know that FRAME
3466 will be replaced with STACK, and SP relative addressing only
3467 permits SP+OFFSET. */
3468 if (GET_MODE_SIZE (mode) <= 4
3469 && XEXP (x, 0) != frame_pointer_rtx
3470 && XEXP (x, 1) != frame_pointer_rtx
3471 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3472 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3473 return 1;
3474
3475 /* REG+const has 5-7 bit offset for non-SP registers. */
3476 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3477 || XEXP (x, 0) == arg_pointer_rtx)
3478 && GET_CODE (XEXP (x, 1)) == CONST_INT
3479 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3480 return 1;
3481
3482 /* REG+const has 10 bit offset for SP, but only SImode and
3483 larger is supported. */
3484 /* ??? Should probably check for DI/DFmode overflow here
3485 just like GO_IF_LEGITIMATE_OFFSET does. */
3486 else if (GET_CODE (XEXP (x, 0)) == REG
3487 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3488 && GET_MODE_SIZE (mode) >= 4
3489 && GET_CODE (XEXP (x, 1)) == CONST_INT
3490 && INTVAL (XEXP (x, 1)) >= 0
3491 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3492 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3493 return 1;
3494
3495 else if (GET_CODE (XEXP (x, 0)) == REG
3496 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3497 && GET_MODE_SIZE (mode) >= 4
3498 && GET_CODE (XEXP (x, 1)) == CONST_INT
3499 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3500 return 1;
3501 }
3502
3503 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3504 && GET_MODE_SIZE (mode) == 4
3505 && GET_CODE (x) == SYMBOL_REF
3506 && CONSTANT_POOL_ADDRESS_P (x)
3507 && !(flag_pic
3508 && symbol_mentioned_p (get_pool_constant (x))))
3509 return 1;
3510
3511 return 0;
3512 }
3513
3514 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3515 instruction of mode MODE. */
3516 int
3517 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3518 {
3519 switch (GET_MODE_SIZE (mode))
3520 {
3521 case 1:
3522 return val >= 0 && val < 32;
3523
3524 case 2:
3525 return val >= 0 && val < 64 && (val & 1) == 0;
3526
3527 default:
3528 return (val >= 0
3529 && (val + GET_MODE_SIZE (mode)) <= 128
3530 && (val & 3) == 0);
3531 }
3532 }
3533
3534 /* Try machine-dependent ways of modifying an illegitimate address
3535 to be legitimate. If we find one, return the new, valid address. */
3536 rtx
3537 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3538 {
3539 if (GET_CODE (x) == PLUS)
3540 {
3541 rtx xop0 = XEXP (x, 0);
3542 rtx xop1 = XEXP (x, 1);
3543
3544 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3545 xop0 = force_reg (SImode, xop0);
3546
3547 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3548 xop1 = force_reg (SImode, xop1);
3549
3550 if (ARM_BASE_REGISTER_RTX_P (xop0)
3551 && GET_CODE (xop1) == CONST_INT)
3552 {
3553 HOST_WIDE_INT n, low_n;
3554 rtx base_reg, val;
3555 n = INTVAL (xop1);
3556
3557 /* VFP addressing modes actually allow greater offsets, but for
3558 now we just stick with the lowest common denominator. */
3559 if (mode == DImode
3560 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3561 {
3562 low_n = n & 0x0f;
3563 n &= ~0x0f;
3564 if (low_n > 4)
3565 {
3566 n += 16;
3567 low_n -= 16;
3568 }
3569 }
3570 else
3571 {
3572 low_n = ((mode) == TImode ? 0
3573 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3574 n -= low_n;
3575 }
3576
3577 base_reg = gen_reg_rtx (SImode);
3578 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3579 GEN_INT (n)), NULL_RTX);
3580 emit_move_insn (base_reg, val);
3581 x = (low_n == 0 ? base_reg
3582 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3583 }
3584 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3585 x = gen_rtx_PLUS (SImode, xop0, xop1);
3586 }
3587
3588 /* XXX We don't allow MINUS any more -- see comment in
3589 arm_legitimate_address_p (). */
3590 else if (GET_CODE (x) == MINUS)
3591 {
3592 rtx xop0 = XEXP (x, 0);
3593 rtx xop1 = XEXP (x, 1);
3594
3595 if (CONSTANT_P (xop0))
3596 xop0 = force_reg (SImode, xop0);
3597
3598 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3599 xop1 = force_reg (SImode, xop1);
3600
3601 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3602 x = gen_rtx_MINUS (SImode, xop0, xop1);
3603 }
3604
3605 if (flag_pic)
3606 {
3607 /* We need to find and carefully transform any SYMBOL and LABEL
3608 references; so go back to the original address expression. */
3609 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3610
3611 if (new_x != orig_x)
3612 x = new_x;
3613 }
3614
3615 return x;
3616 }
3617
3618
3619 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3620 to be legitimate. If we find one, return the new, valid address. */
3621 rtx
3622 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3623 {
3624 if (GET_CODE (x) == PLUS
3625 && GET_CODE (XEXP (x, 1)) == CONST_INT
3626 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3627 || INTVAL (XEXP (x, 1)) < 0))
3628 {
3629 rtx xop0 = XEXP (x, 0);
3630 rtx xop1 = XEXP (x, 1);
3631 HOST_WIDE_INT offset = INTVAL (xop1);
3632
3633 /* Try and fold the offset into a biasing of the base register and
3634 then offsetting that. Don't do this when optimizing for space
3635 since it can cause too many CSEs. */
3636 if (optimize_size && offset >= 0
3637 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3638 {
3639 HOST_WIDE_INT delta;
3640
3641 if (offset >= 256)
3642 delta = offset - (256 - GET_MODE_SIZE (mode));
3643 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3644 delta = 31 * GET_MODE_SIZE (mode);
3645 else
3646 delta = offset & (~31 * GET_MODE_SIZE (mode));
3647
3648 xop0 = force_operand (plus_constant (xop0, offset - delta),
3649 NULL_RTX);
3650 x = plus_constant (xop0, delta);
3651 }
3652 else if (offset < 0 && offset > -256)
3653 /* Small negative offsets are best done with a subtract before the
3654 dereference, forcing these into a register normally takes two
3655 instructions. */
3656 x = force_operand (x, NULL_RTX);
3657 else
3658 {
3659 /* For the remaining cases, force the constant into a register. */
3660 xop1 = force_reg (SImode, xop1);
3661 x = gen_rtx_PLUS (SImode, xop0, xop1);
3662 }
3663 }
3664 else if (GET_CODE (x) == PLUS
3665 && s_register_operand (XEXP (x, 1), SImode)
3666 && !s_register_operand (XEXP (x, 0), SImode))
3667 {
3668 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3669
3670 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3671 }
3672
3673 if (flag_pic)
3674 {
3675 /* We need to find and carefully transform any SYMBOL and LABEL
3676 references; so go back to the original address expression. */
3677 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3678
3679 if (new_x != orig_x)
3680 x = new_x;
3681 }
3682
3683 return x;
3684 }
3685
3686 \f
3687
3688 #define REG_OR_SUBREG_REG(X) \
3689 (GET_CODE (X) == REG \
3690 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3691
3692 #define REG_OR_SUBREG_RTX(X) \
3693 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3694
3695 #ifndef COSTS_N_INSNS
3696 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3697 #endif
3698 static inline int
3699 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3700 {
3701 enum machine_mode mode = GET_MODE (x);
3702
3703 switch (code)
3704 {
3705 case ASHIFT:
3706 case ASHIFTRT:
3707 case LSHIFTRT:
3708 case ROTATERT:
3709 case PLUS:
3710 case MINUS:
3711 case COMPARE:
3712 case NEG:
3713 case NOT:
3714 return COSTS_N_INSNS (1);
3715
3716 case MULT:
3717 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3718 {
3719 int cycles = 0;
3720 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3721
3722 while (i)
3723 {
3724 i >>= 2;
3725 cycles++;
3726 }
3727 return COSTS_N_INSNS (2) + cycles;
3728 }
3729 return COSTS_N_INSNS (1) + 16;
3730
3731 case SET:
3732 return (COSTS_N_INSNS (1)
3733 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3734 + GET_CODE (SET_DEST (x)) == MEM));
3735
3736 case CONST_INT:
3737 if (outer == SET)
3738 {
3739 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3740 return 0;
3741 if (thumb_shiftable_const (INTVAL (x)))
3742 return COSTS_N_INSNS (2);
3743 return COSTS_N_INSNS (3);
3744 }
3745 else if ((outer == PLUS || outer == COMPARE)
3746 && INTVAL (x) < 256 && INTVAL (x) > -256)
3747 return 0;
3748 else if (outer == AND
3749 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3750 return COSTS_N_INSNS (1);
3751 else if (outer == ASHIFT || outer == ASHIFTRT
3752 || outer == LSHIFTRT)
3753 return 0;
3754 return COSTS_N_INSNS (2);
3755
3756 case CONST:
3757 case CONST_DOUBLE:
3758 case LABEL_REF:
3759 case SYMBOL_REF:
3760 return COSTS_N_INSNS (3);
3761
3762 case UDIV:
3763 case UMOD:
3764 case DIV:
3765 case MOD:
3766 return 100;
3767
3768 case TRUNCATE:
3769 return 99;
3770
3771 case AND:
3772 case XOR:
3773 case IOR:
3774 /* XXX guess. */
3775 return 8;
3776
3777 case MEM:
3778 /* XXX another guess. */
3779 /* Memory costs quite a lot for the first word, but subsequent words
3780 load at the equivalent of a single insn each. */
3781 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3782 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3783 ? 4 : 0));
3784
3785 case IF_THEN_ELSE:
3786 /* XXX a guess. */
3787 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3788 return 14;
3789 return 2;
3790
3791 case ZERO_EXTEND:
3792 /* XXX still guessing. */
3793 switch (GET_MODE (XEXP (x, 0)))
3794 {
3795 case QImode:
3796 return (1 + (mode == DImode ? 4 : 0)
3797 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3798
3799 case HImode:
3800 return (4 + (mode == DImode ? 4 : 0)
3801 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3802
3803 case SImode:
3804 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3805
3806 default:
3807 return 99;
3808 }
3809
3810 default:
3811 return 99;
3812 }
3813 }
3814
3815
3816 /* Worker routine for arm_rtx_costs. */
3817 static inline int
3818 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3819 {
3820 enum machine_mode mode = GET_MODE (x);
3821 enum rtx_code subcode;
3822 int extra_cost;
3823
3824 switch (code)
3825 {
3826 case MEM:
3827 /* Memory costs quite a lot for the first word, but subsequent words
3828 load at the equivalent of a single insn each. */
3829 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3830 + (GET_CODE (x) == SYMBOL_REF
3831 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3832
3833 case DIV:
3834 case MOD:
3835 case UDIV:
3836 case UMOD:
3837 return optimize_size ? COSTS_N_INSNS (2) : 100;
3838
3839 case ROTATE:
3840 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3841 return 4;
3842 /* Fall through */
3843 case ROTATERT:
3844 if (mode != SImode)
3845 return 8;
3846 /* Fall through */
3847 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3848 if (mode == DImode)
3849 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3850 + ((GET_CODE (XEXP (x, 0)) == REG
3851 || (GET_CODE (XEXP (x, 0)) == SUBREG
3852 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3853 ? 0 : 8));
3854 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3855 || (GET_CODE (XEXP (x, 0)) == SUBREG
3856 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3857 ? 0 : 4)
3858 + ((GET_CODE (XEXP (x, 1)) == REG
3859 || (GET_CODE (XEXP (x, 1)) == SUBREG
3860 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3861 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3862 ? 0 : 4));
3863
3864 case MINUS:
3865 if (mode == DImode)
3866 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3867 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3868 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3869 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3870 ? 0 : 8));
3871
3872 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3873 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3874 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3875 && arm_const_double_rtx (XEXP (x, 1))))
3876 ? 0 : 8)
3877 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3878 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3879 && arm_const_double_rtx (XEXP (x, 0))))
3880 ? 0 : 8));
3881
3882 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3883 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3884 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3885 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3886 || subcode == ASHIFTRT || subcode == LSHIFTRT
3887 || subcode == ROTATE || subcode == ROTATERT
3888 || (subcode == MULT
3889 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3890 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3891 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3892 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3893 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3894 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3895 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3896 return 1;
3897 /* Fall through */
3898
3899 case PLUS:
3900 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3901 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3902 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3903 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3904 && arm_const_double_rtx (XEXP (x, 1))))
3905 ? 0 : 8));
3906
3907 /* Fall through */
3908 case AND: case XOR: case IOR:
3909 extra_cost = 0;
3910
3911 /* Normally the frame registers will be spilt into reg+const during
3912 reload, so it is a bad idea to combine them with other instructions,
3913 since then they might not be moved outside of loops. As a compromise
3914 we allow integration with ops that have a constant as their second
3915 operand. */
3916 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3917 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3918 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3919 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3920 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3921 extra_cost = 4;
3922
3923 if (mode == DImode)
3924 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3925 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3926 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3927 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3928 ? 0 : 8));
3929
3930 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3931 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3932 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3933 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3934 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3935 ? 0 : 4));
3936
3937 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3938 return (1 + extra_cost
3939 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3940 || subcode == LSHIFTRT || subcode == ASHIFTRT
3941 || subcode == ROTATE || subcode == ROTATERT
3942 || (subcode == MULT
3943 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3944 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3945 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3946 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3947 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3948 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3949 ? 0 : 4));
3950
3951 return 8;
3952
3953 case MULT:
3954 /* This should have been handled by the CPU specific routines. */
3955 abort ();
3956
3957 case TRUNCATE:
3958 if (arm_arch3m && mode == SImode
3959 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3960 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3961 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3962 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3963 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3964 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3965 return 8;
3966 return 99;
3967
3968 case NEG:
3969 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3970 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3971 /* Fall through */
3972 case NOT:
3973 if (mode == DImode)
3974 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3975
3976 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3977
3978 case IF_THEN_ELSE:
3979 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3980 return 14;
3981 return 2;
3982
3983 case COMPARE:
3984 return 1;
3985
3986 case ABS:
3987 return 4 + (mode == DImode ? 4 : 0);
3988
3989 case SIGN_EXTEND:
3990 if (GET_MODE (XEXP (x, 0)) == QImode)
3991 return (4 + (mode == DImode ? 4 : 0)
3992 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3993 /* Fall through */
3994 case ZERO_EXTEND:
3995 switch (GET_MODE (XEXP (x, 0)))
3996 {
3997 case QImode:
3998 return (1 + (mode == DImode ? 4 : 0)
3999 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4000
4001 case HImode:
4002 return (4 + (mode == DImode ? 4 : 0)
4003 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4004
4005 case SImode:
4006 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4007
4008 case V8QImode:
4009 case V4HImode:
4010 case V2SImode:
4011 case V4QImode:
4012 case V2HImode:
4013 return 1;
4014
4015 default:
4016 break;
4017 }
4018 abort ();
4019
4020 case CONST_INT:
4021 if (const_ok_for_arm (INTVAL (x)))
4022 return outer == SET ? 2 : -1;
4023 else if (outer == AND
4024 && const_ok_for_arm (~INTVAL (x)))
4025 return -1;
4026 else if ((outer == COMPARE
4027 || outer == PLUS || outer == MINUS)
4028 && const_ok_for_arm (-INTVAL (x)))
4029 return -1;
4030 else
4031 return 5;
4032
4033 case CONST:
4034 case LABEL_REF:
4035 case SYMBOL_REF:
4036 return 6;
4037
4038 case CONST_DOUBLE:
4039 if (arm_const_double_rtx (x))
4040 return outer == SET ? 2 : -1;
4041 else if ((outer == COMPARE || outer == PLUS)
4042 && neg_const_double_rtx_ok_for_fpa (x))
4043 return -1;
4044 return 7;
4045
4046 default:
4047 return 99;
4048 }
4049 }
4050
4051 /* RTX costs when optimizing for size. */
4052 static bool
4053 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4054 {
4055 enum machine_mode mode = GET_MODE (x);
4056
4057 if (TARGET_THUMB)
4058 {
4059 /* XXX TBD. For now, use the standard costs. */
4060 *total = thumb_rtx_costs (x, code, outer_code);
4061 return true;
4062 }
4063
4064 switch (code)
4065 {
4066 case MEM:
4067 /* A memory access costs 1 insn if the mode is small, or the address is
4068 a single register, otherwise it costs one insn per word. */
4069 if (REG_P (XEXP (x, 0)))
4070 *total = COSTS_N_INSNS (1);
4071 else
4072 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4073 return true;
4074
4075 case DIV:
4076 case MOD:
4077 case UDIV:
4078 case UMOD:
4079 /* Needs a libcall, so it costs about this. */
4080 *total = COSTS_N_INSNS (2);
4081 return false;
4082
4083 case ROTATE:
4084 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4085 {
4086 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4087 return true;
4088 }
4089 /* Fall through */
4090 case ROTATERT:
4091 case ASHIFT:
4092 case LSHIFTRT:
4093 case ASHIFTRT:
4094 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4095 {
4096 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4097 return true;
4098 }
4099 else if (mode == SImode)
4100 {
4101 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4102 /* Slightly disparage register shifts, but not by much. */
4103 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4104 *total += 1 + rtx_cost (XEXP (x, 1), code);
4105 return true;
4106 }
4107
4108 /* Needs a libcall. */
4109 *total = COSTS_N_INSNS (2);
4110 return false;
4111
4112 case MINUS:
4113 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4114 {
4115 *total = COSTS_N_INSNS (1);
4116 return false;
4117 }
4118
4119 if (mode == SImode)
4120 {
4121 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4122 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4123
4124 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4125 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4126 || subcode1 == ROTATE || subcode1 == ROTATERT
4127 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4128 || subcode1 == ASHIFTRT)
4129 {
4130 /* It's just the cost of the two operands. */
4131 *total = 0;
4132 return false;
4133 }
4134
4135 *total = COSTS_N_INSNS (1);
4136 return false;
4137 }
4138
4139 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4140 return false;
4141
4142 case PLUS:
4143 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4144 {
4145 *total = COSTS_N_INSNS (1);
4146 return false;
4147 }
4148
4149 /* Fall through */
4150 case AND: case XOR: case IOR:
4151 if (mode == SImode)
4152 {
4153 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4154
4155 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4156 || subcode == LSHIFTRT || subcode == ASHIFTRT
4157 || (code == AND && subcode == NOT))
4158 {
4159 /* It's just the cost of the two operands. */
4160 *total = 0;
4161 return false;
4162 }
4163 }
4164
4165 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4166 return false;
4167
4168 case MULT:
4169 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4170 return false;
4171
4172 case NEG:
4173 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4174 *total = COSTS_N_INSNS (1);
4175 /* Fall through */
4176 case NOT:
4177 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4178
4179 return false;
4180
4181 case IF_THEN_ELSE:
4182 *total = 0;
4183 return false;
4184
4185 case COMPARE:
4186 if (cc_register (XEXP (x, 0), VOIDmode))
4187 * total = 0;
4188 else
4189 *total = COSTS_N_INSNS (1);
4190 return false;
4191
4192 case ABS:
4193 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4194 *total = COSTS_N_INSNS (1);
4195 else
4196 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4197 return false;
4198
4199 case SIGN_EXTEND:
4200 *total = 0;
4201 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4202 {
4203 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4204 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4205 }
4206 if (mode == DImode)
4207 *total += COSTS_N_INSNS (1);
4208 return false;
4209
4210 case ZERO_EXTEND:
4211 *total = 0;
4212 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4213 {
4214 switch (GET_MODE (XEXP (x, 0)))
4215 {
4216 case QImode:
4217 *total += COSTS_N_INSNS (1);
4218 break;
4219
4220 case HImode:
4221 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4222
4223 case SImode:
4224 break;
4225
4226 default:
4227 *total += COSTS_N_INSNS (2);
4228 }
4229 }
4230
4231 if (mode == DImode)
4232 *total += COSTS_N_INSNS (1);
4233
4234 return false;
4235
4236 case CONST_INT:
4237 if (const_ok_for_arm (INTVAL (x)))
4238 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4239 else if (const_ok_for_arm (~INTVAL (x)))
4240 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4241 else if (const_ok_for_arm (-INTVAL (x)))
4242 {
4243 if (outer_code == COMPARE || outer_code == PLUS
4244 || outer_code == MINUS)
4245 *total = 0;
4246 else
4247 *total = COSTS_N_INSNS (1);
4248 }
4249 else
4250 *total = COSTS_N_INSNS (2);
4251 return true;
4252
4253 case CONST:
4254 case LABEL_REF:
4255 case SYMBOL_REF:
4256 *total = COSTS_N_INSNS (2);
4257 return true;
4258
4259 case CONST_DOUBLE:
4260 *total = COSTS_N_INSNS (4);
4261 return true;
4262
4263 default:
4264 if (mode != VOIDmode)
4265 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4266 else
4267 *total = COSTS_N_INSNS (4); /* How knows? */
4268 return false;
4269 }
4270 }
4271
4272 /* RTX costs for cores with a slow MUL implementation. */
4273
4274 static bool
4275 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4276 {
4277 enum machine_mode mode = GET_MODE (x);
4278
4279 if (TARGET_THUMB)
4280 {
4281 *total = thumb_rtx_costs (x, code, outer_code);
4282 return true;
4283 }
4284
4285 switch (code)
4286 {
4287 case MULT:
4288 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4289 || mode == DImode)
4290 {
4291 *total = 30;
4292 return true;
4293 }
4294
4295 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4296 {
4297 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4298 & (unsigned HOST_WIDE_INT) 0xffffffff);
4299 int cost, const_ok = const_ok_for_arm (i);
4300 int j, booth_unit_size;
4301
4302 /* Tune as appropriate. */
4303 cost = const_ok ? 4 : 8;
4304 booth_unit_size = 2;
4305 for (j = 0; i && j < 32; j += booth_unit_size)
4306 {
4307 i >>= booth_unit_size;
4308 cost += 2;
4309 }
4310
4311 *total = cost;
4312 return true;
4313 }
4314
4315 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4316 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4317 return true;
4318
4319 default:
4320 *total = arm_rtx_costs_1 (x, code, outer_code);
4321 return true;
4322 }
4323 }
4324
4325
4326 /* RTX cost for cores with a fast multiply unit (M variants). */
4327
4328 static bool
4329 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4330 {
4331 enum machine_mode mode = GET_MODE (x);
4332
4333 if (TARGET_THUMB)
4334 {
4335 *total = thumb_rtx_costs (x, code, outer_code);
4336 return true;
4337 }
4338
4339 switch (code)
4340 {
4341 case MULT:
4342 /* There is no point basing this on the tuning, since it is always the
4343 fast variant if it exists at all. */
4344 if (mode == DImode
4345 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4346 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4347 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4348 {
4349 *total = 8;
4350 return true;
4351 }
4352
4353
4354 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4355 || mode == DImode)
4356 {
4357 *total = 30;
4358 return true;
4359 }
4360
4361 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4362 {
4363 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4364 & (unsigned HOST_WIDE_INT) 0xffffffff);
4365 int cost, const_ok = const_ok_for_arm (i);
4366 int j, booth_unit_size;
4367
4368 /* Tune as appropriate. */
4369 cost = const_ok ? 4 : 8;
4370 booth_unit_size = 8;
4371 for (j = 0; i && j < 32; j += booth_unit_size)
4372 {
4373 i >>= booth_unit_size;
4374 cost += 2;
4375 }
4376
4377 *total = cost;
4378 return true;
4379 }
4380
4381 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4382 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4383 return true;
4384
4385 default:
4386 *total = arm_rtx_costs_1 (x, code, outer_code);
4387 return true;
4388 }
4389 }
4390
4391
4392 /* RTX cost for XScale CPUs. */
4393
4394 static bool
4395 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4396 {
4397 enum machine_mode mode = GET_MODE (x);
4398
4399 if (TARGET_THUMB)
4400 {
4401 *total = thumb_rtx_costs (x, code, outer_code);
4402 return true;
4403 }
4404
4405 switch (code)
4406 {
4407 case MULT:
4408 /* There is no point basing this on the tuning, since it is always the
4409 fast variant if it exists at all. */
4410 if (mode == DImode
4411 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4412 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4413 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4414 {
4415 *total = 8;
4416 return true;
4417 }
4418
4419
4420 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4421 || mode == DImode)
4422 {
4423 *total = 30;
4424 return true;
4425 }
4426
4427 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4428 {
4429 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4430 & (unsigned HOST_WIDE_INT) 0xffffffff);
4431 int cost, const_ok = const_ok_for_arm (i);
4432 unsigned HOST_WIDE_INT masked_const;
4433
4434 /* The cost will be related to two insns.
4435 First a load of the constant (MOV or LDR), then a multiply. */
4436 cost = 2;
4437 if (! const_ok)
4438 cost += 1; /* LDR is probably more expensive because
4439 of longer result latency. */
4440 masked_const = i & 0xffff8000;
4441 if (masked_const != 0 && masked_const != 0xffff8000)
4442 {
4443 masked_const = i & 0xf8000000;
4444 if (masked_const == 0 || masked_const == 0xf8000000)
4445 cost += 1;
4446 else
4447 cost += 2;
4448 }
4449 *total = cost;
4450 return true;
4451 }
4452
4453 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4454 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4455 return true;
4456
4457 case COMPARE:
4458 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4459 will stall until the multiplication is complete. */
4460 if (GET_CODE (XEXP (x, 0)) == MULT)
4461 *total = 4 + rtx_cost (XEXP (x, 0), code);
4462 else
4463 *total = arm_rtx_costs_1 (x, code, outer_code);
4464 return true;
4465
4466 default:
4467 *total = arm_rtx_costs_1 (x, code, outer_code);
4468 return true;
4469 }
4470 }
4471
4472
4473 /* RTX costs for 9e (and later) cores. */
4474
4475 static bool
4476 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4477 {
4478 enum machine_mode mode = GET_MODE (x);
4479 int nonreg_cost;
4480 int cost;
4481
4482 if (TARGET_THUMB)
4483 {
4484 switch (code)
4485 {
4486 case MULT:
4487 *total = COSTS_N_INSNS (3);
4488 return true;
4489
4490 default:
4491 *total = thumb_rtx_costs (x, code, outer_code);
4492 return true;
4493 }
4494 }
4495
4496 switch (code)
4497 {
4498 case MULT:
4499 /* There is no point basing this on the tuning, since it is always the
4500 fast variant if it exists at all. */
4501 if (mode == DImode
4502 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4503 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4504 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4505 {
4506 *total = 3;
4507 return true;
4508 }
4509
4510
4511 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4512 {
4513 *total = 30;
4514 return true;
4515 }
4516 if (mode == DImode)
4517 {
4518 cost = 7;
4519 nonreg_cost = 8;
4520 }
4521 else
4522 {
4523 cost = 2;
4524 nonreg_cost = 4;
4525 }
4526
4527
4528 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4529 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4530 return true;
4531
4532 default:
4533 *total = arm_rtx_costs_1 (x, code, outer_code);
4534 return true;
4535 }
4536 }
4537 /* All address computations that can be done are free, but rtx cost returns
4538 the same for practically all of them. So we weight the different types
4539 of address here in the order (most pref first):
4540 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4541 static inline int
4542 arm_arm_address_cost (rtx x)
4543 {
4544 enum rtx_code c = GET_CODE (x);
4545
4546 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4547 return 0;
4548 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4549 return 10;
4550
4551 if (c == PLUS || c == MINUS)
4552 {
4553 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4554 return 2;
4555
4556 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4557 return 3;
4558
4559 return 4;
4560 }
4561
4562 return 6;
4563 }
4564
4565 static inline int
4566 arm_thumb_address_cost (rtx x)
4567 {
4568 enum rtx_code c = GET_CODE (x);
4569
4570 if (c == REG)
4571 return 1;
4572 if (c == PLUS
4573 && GET_CODE (XEXP (x, 0)) == REG
4574 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4575 return 1;
4576
4577 return 2;
4578 }
4579
4580 static int
4581 arm_address_cost (rtx x)
4582 {
4583 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4584 }
4585
4586 static int
4587 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4588 {
4589 rtx i_pat, d_pat;
4590
4591 /* Some true dependencies can have a higher cost depending
4592 on precisely how certain input operands are used. */
4593 if (arm_tune_xscale
4594 && REG_NOTE_KIND (link) == 0
4595 && recog_memoized (insn) >= 0
4596 && recog_memoized (dep) >= 0)
4597 {
4598 int shift_opnum = get_attr_shift (insn);
4599 enum attr_type attr_type = get_attr_type (dep);
4600
4601 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4602 operand for INSN. If we have a shifted input operand and the
4603 instruction we depend on is another ALU instruction, then we may
4604 have to account for an additional stall. */
4605 if (shift_opnum != 0
4606 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4607 {
4608 rtx shifted_operand;
4609 int opno;
4610
4611 /* Get the shifted operand. */
4612 extract_insn (insn);
4613 shifted_operand = recog_data.operand[shift_opnum];
4614
4615 /* Iterate over all the operands in DEP. If we write an operand
4616 that overlaps with SHIFTED_OPERAND, then we have increase the
4617 cost of this dependency. */
4618 extract_insn (dep);
4619 preprocess_constraints ();
4620 for (opno = 0; opno < recog_data.n_operands; opno++)
4621 {
4622 /* We can ignore strict inputs. */
4623 if (recog_data.operand_type[opno] == OP_IN)
4624 continue;
4625
4626 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4627 shifted_operand))
4628 return 2;
4629 }
4630 }
4631 }
4632
4633 /* XXX This is not strictly true for the FPA. */
4634 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4635 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4636 return 0;
4637
4638 /* Call insns don't incur a stall, even if they follow a load. */
4639 if (REG_NOTE_KIND (link) == 0
4640 && GET_CODE (insn) == CALL_INSN)
4641 return 1;
4642
4643 if ((i_pat = single_set (insn)) != NULL
4644 && GET_CODE (SET_SRC (i_pat)) == MEM
4645 && (d_pat = single_set (dep)) != NULL
4646 && GET_CODE (SET_DEST (d_pat)) == MEM)
4647 {
4648 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4649 /* This is a load after a store, there is no conflict if the load reads
4650 from a cached area. Assume that loads from the stack, and from the
4651 constant pool are cached, and that others will miss. This is a
4652 hack. */
4653
4654 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4655 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4656 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4657 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4658 return 1;
4659 }
4660
4661 return cost;
4662 }
4663
4664 static int fp_consts_inited = 0;
4665
4666 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4667 static const char * const strings_fp[8] =
4668 {
4669 "0", "1", "2", "3",
4670 "4", "5", "0.5", "10"
4671 };
4672
4673 static REAL_VALUE_TYPE values_fp[8];
4674
4675 static void
4676 init_fp_table (void)
4677 {
4678 int i;
4679 REAL_VALUE_TYPE r;
4680
4681 if (TARGET_VFP)
4682 fp_consts_inited = 1;
4683 else
4684 fp_consts_inited = 8;
4685
4686 for (i = 0; i < fp_consts_inited; i++)
4687 {
4688 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4689 values_fp[i] = r;
4690 }
4691 }
4692
4693 /* Return TRUE if rtx X is a valid immediate FP constant. */
4694 int
4695 arm_const_double_rtx (rtx x)
4696 {
4697 REAL_VALUE_TYPE r;
4698 int i;
4699
4700 if (!fp_consts_inited)
4701 init_fp_table ();
4702
4703 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4704 if (REAL_VALUE_MINUS_ZERO (r))
4705 return 0;
4706
4707 for (i = 0; i < fp_consts_inited; i++)
4708 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4709 return 1;
4710
4711 return 0;
4712 }
4713
4714 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4715 int
4716 neg_const_double_rtx_ok_for_fpa (rtx x)
4717 {
4718 REAL_VALUE_TYPE r;
4719 int i;
4720
4721 if (!fp_consts_inited)
4722 init_fp_table ();
4723
4724 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4725 r = REAL_VALUE_NEGATE (r);
4726 if (REAL_VALUE_MINUS_ZERO (r))
4727 return 0;
4728
4729 for (i = 0; i < 8; i++)
4730 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4731 return 1;
4732
4733 return 0;
4734 }
4735 \f
4736 /* Predicates for `match_operand' and `match_operator'. */
4737
4738 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4739 int
4740 cirrus_memory_offset (rtx op)
4741 {
4742 /* Reject eliminable registers. */
4743 if (! (reload_in_progress || reload_completed)
4744 && ( reg_mentioned_p (frame_pointer_rtx, op)
4745 || reg_mentioned_p (arg_pointer_rtx, op)
4746 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4747 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4748 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4749 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4750 return 0;
4751
4752 if (GET_CODE (op) == MEM)
4753 {
4754 rtx ind;
4755
4756 ind = XEXP (op, 0);
4757
4758 /* Match: (mem (reg)). */
4759 if (GET_CODE (ind) == REG)
4760 return 1;
4761
4762 /* Match:
4763 (mem (plus (reg)
4764 (const))). */
4765 if (GET_CODE (ind) == PLUS
4766 && GET_CODE (XEXP (ind, 0)) == REG
4767 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4768 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4769 return 1;
4770 }
4771
4772 return 0;
4773 }
4774
4775 /* Return TRUE if OP is a valid VFP memory address pattern.
4776 WB if true if writeback address modes are allowed. */
4777
4778 int
4779 arm_coproc_mem_operand (rtx op, bool wb)
4780 {
4781 rtx ind;
4782
4783 /* Reject eliminable registers. */
4784 if (! (reload_in_progress || reload_completed)
4785 && ( reg_mentioned_p (frame_pointer_rtx, op)
4786 || reg_mentioned_p (arg_pointer_rtx, op)
4787 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4788 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4789 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4790 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4791 return FALSE;
4792
4793 /* Constants are converted into offsets from labels. */
4794 if (GET_CODE (op) != MEM)
4795 return FALSE;
4796
4797 ind = XEXP (op, 0);
4798
4799 if (reload_completed
4800 && (GET_CODE (ind) == LABEL_REF
4801 || (GET_CODE (ind) == CONST
4802 && GET_CODE (XEXP (ind, 0)) == PLUS
4803 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4804 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4805 return TRUE;
4806
4807 /* Match: (mem (reg)). */
4808 if (GET_CODE (ind) == REG)
4809 return arm_address_register_rtx_p (ind, 0);
4810
4811 /* Autoincremment addressing modes. */
4812 if (wb
4813 && (GET_CODE (ind) == PRE_INC
4814 || GET_CODE (ind) == POST_INC
4815 || GET_CODE (ind) == PRE_DEC
4816 || GET_CODE (ind) == POST_DEC))
4817 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4818
4819 if (wb
4820 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4821 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4822 && GET_CODE (XEXP (ind, 1)) == PLUS
4823 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4824 ind = XEXP (ind, 1);
4825
4826 /* Match:
4827 (plus (reg)
4828 (const)). */
4829 if (GET_CODE (ind) == PLUS
4830 && GET_CODE (XEXP (ind, 0)) == REG
4831 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4832 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4833 && INTVAL (XEXP (ind, 1)) > -1024
4834 && INTVAL (XEXP (ind, 1)) < 1024
4835 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4836 return TRUE;
4837
4838 return FALSE;
4839 }
4840
4841 /* Return true if X is a register that will be eliminated later on. */
4842 int
4843 arm_eliminable_register (rtx x)
4844 {
4845 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4846 || REGNO (x) == ARG_POINTER_REGNUM
4847 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4848 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4849 }
4850
4851 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4852 VFP registers. Otherwise return NO_REGS. */
4853
4854 enum reg_class
4855 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4856 {
4857 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4858 return NO_REGS;
4859
4860 return GENERAL_REGS;
4861 }
4862
4863
4864 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4865 Use by the Cirrus Maverick code which has to workaround
4866 a hardware bug triggered by such instructions. */
4867 static bool
4868 arm_memory_load_p (rtx insn)
4869 {
4870 rtx body, lhs, rhs;;
4871
4872 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4873 return false;
4874
4875 body = PATTERN (insn);
4876
4877 if (GET_CODE (body) != SET)
4878 return false;
4879
4880 lhs = XEXP (body, 0);
4881 rhs = XEXP (body, 1);
4882
4883 lhs = REG_OR_SUBREG_RTX (lhs);
4884
4885 /* If the destination is not a general purpose
4886 register we do not have to worry. */
4887 if (GET_CODE (lhs) != REG
4888 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4889 return false;
4890
4891 /* As well as loads from memory we also have to react
4892 to loads of invalid constants which will be turned
4893 into loads from the minipool. */
4894 return (GET_CODE (rhs) == MEM
4895 || GET_CODE (rhs) == SYMBOL_REF
4896 || note_invalid_constants (insn, -1, false));
4897 }
4898
4899 /* Return TRUE if INSN is a Cirrus instruction. */
4900 static bool
4901 arm_cirrus_insn_p (rtx insn)
4902 {
4903 enum attr_cirrus attr;
4904
4905 /* get_attr aborts on USE and CLOBBER. */
4906 if (!insn
4907 || GET_CODE (insn) != INSN
4908 || GET_CODE (PATTERN (insn)) == USE
4909 || GET_CODE (PATTERN (insn)) == CLOBBER)
4910 return 0;
4911
4912 attr = get_attr_cirrus (insn);
4913
4914 return attr != CIRRUS_NOT;
4915 }
4916
4917 /* Cirrus reorg for invalid instruction combinations. */
4918 static void
4919 cirrus_reorg (rtx first)
4920 {
4921 enum attr_cirrus attr;
4922 rtx body = PATTERN (first);
4923 rtx t;
4924 int nops;
4925
4926 /* Any branch must be followed by 2 non Cirrus instructions. */
4927 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4928 {
4929 nops = 0;
4930 t = next_nonnote_insn (first);
4931
4932 if (arm_cirrus_insn_p (t))
4933 ++ nops;
4934
4935 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4936 ++ nops;
4937
4938 while (nops --)
4939 emit_insn_after (gen_nop (), first);
4940
4941 return;
4942 }
4943
4944 /* (float (blah)) is in parallel with a clobber. */
4945 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4946 body = XVECEXP (body, 0, 0);
4947
4948 if (GET_CODE (body) == SET)
4949 {
4950 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4951
4952 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4953 be followed by a non Cirrus insn. */
4954 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4955 {
4956 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4957 emit_insn_after (gen_nop (), first);
4958
4959 return;
4960 }
4961 else if (arm_memory_load_p (first))
4962 {
4963 unsigned int arm_regno;
4964
4965 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4966 ldr/cfmv64hr combination where the Rd field is the same
4967 in both instructions must be split with a non Cirrus
4968 insn. Example:
4969
4970 ldr r0, blah
4971 nop
4972 cfmvsr mvf0, r0. */
4973
4974 /* Get Arm register number for ldr insn. */
4975 if (GET_CODE (lhs) == REG)
4976 arm_regno = REGNO (lhs);
4977 else if (GET_CODE (rhs) == REG)
4978 arm_regno = REGNO (rhs);
4979 else
4980 abort ();
4981
4982 /* Next insn. */
4983 first = next_nonnote_insn (first);
4984
4985 if (! arm_cirrus_insn_p (first))
4986 return;
4987
4988 body = PATTERN (first);
4989
4990 /* (float (blah)) is in parallel with a clobber. */
4991 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4992 body = XVECEXP (body, 0, 0);
4993
4994 if (GET_CODE (body) == FLOAT)
4995 body = XEXP (body, 0);
4996
4997 if (get_attr_cirrus (first) == CIRRUS_MOVE
4998 && GET_CODE (XEXP (body, 1)) == REG
4999 && arm_regno == REGNO (XEXP (body, 1)))
5000 emit_insn_after (gen_nop (), first);
5001
5002 return;
5003 }
5004 }
5005
5006 /* get_attr aborts on USE and CLOBBER. */
5007 if (!first
5008 || GET_CODE (first) != INSN
5009 || GET_CODE (PATTERN (first)) == USE
5010 || GET_CODE (PATTERN (first)) == CLOBBER)
5011 return;
5012
5013 attr = get_attr_cirrus (first);
5014
5015 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5016 must be followed by a non-coprocessor instruction. */
5017 if (attr == CIRRUS_COMPARE)
5018 {
5019 nops = 0;
5020
5021 t = next_nonnote_insn (first);
5022
5023 if (arm_cirrus_insn_p (t))
5024 ++ nops;
5025
5026 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5027 ++ nops;
5028
5029 while (nops --)
5030 emit_insn_after (gen_nop (), first);
5031
5032 return;
5033 }
5034 }
5035
5036 /* Return TRUE if X references a SYMBOL_REF. */
5037 int
5038 symbol_mentioned_p (rtx x)
5039 {
5040 const char * fmt;
5041 int i;
5042
5043 if (GET_CODE (x) == SYMBOL_REF)
5044 return 1;
5045
5046 fmt = GET_RTX_FORMAT (GET_CODE (x));
5047
5048 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5049 {
5050 if (fmt[i] == 'E')
5051 {
5052 int j;
5053
5054 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5055 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5056 return 1;
5057 }
5058 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5059 return 1;
5060 }
5061
5062 return 0;
5063 }
5064
5065 /* Return TRUE if X references a LABEL_REF. */
5066 int
5067 label_mentioned_p (rtx x)
5068 {
5069 const char * fmt;
5070 int i;
5071
5072 if (GET_CODE (x) == LABEL_REF)
5073 return 1;
5074
5075 fmt = GET_RTX_FORMAT (GET_CODE (x));
5076 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5077 {
5078 if (fmt[i] == 'E')
5079 {
5080 int j;
5081
5082 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5083 if (label_mentioned_p (XVECEXP (x, i, j)))
5084 return 1;
5085 }
5086 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5087 return 1;
5088 }
5089
5090 return 0;
5091 }
5092
5093 enum rtx_code
5094 minmax_code (rtx x)
5095 {
5096 enum rtx_code code = GET_CODE (x);
5097
5098 if (code == SMAX)
5099 return GE;
5100 else if (code == SMIN)
5101 return LE;
5102 else if (code == UMIN)
5103 return LEU;
5104 else if (code == UMAX)
5105 return GEU;
5106
5107 abort ();
5108 }
5109
5110 /* Return 1 if memory locations are adjacent. */
5111 int
5112 adjacent_mem_locations (rtx a, rtx b)
5113 {
5114 if ((GET_CODE (XEXP (a, 0)) == REG
5115 || (GET_CODE (XEXP (a, 0)) == PLUS
5116 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5117 && (GET_CODE (XEXP (b, 0)) == REG
5118 || (GET_CODE (XEXP (b, 0)) == PLUS
5119 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5120 {
5121 HOST_WIDE_INT val0 = 0, val1 = 0;
5122 rtx reg0, reg1;
5123 int val_diff;
5124
5125 if (GET_CODE (XEXP (a, 0)) == PLUS)
5126 {
5127 reg0 = XEXP (XEXP (a, 0), 0);
5128 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5129 }
5130 else
5131 reg0 = XEXP (a, 0);
5132
5133 if (GET_CODE (XEXP (b, 0)) == PLUS)
5134 {
5135 reg1 = XEXP (XEXP (b, 0), 0);
5136 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5137 }
5138 else
5139 reg1 = XEXP (b, 0);
5140
5141 /* Don't accept any offset that will require multiple
5142 instructions to handle, since this would cause the
5143 arith_adjacentmem pattern to output an overlong sequence. */
5144 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5145 return 0;
5146
5147 /* Don't allow an eliminable register: register elimination can make
5148 the offset too large. */
5149 if (arm_eliminable_register (reg0))
5150 return 0;
5151
5152 val_diff = val1 - val0;
5153 return ((REGNO (reg0) == REGNO (reg1))
5154 && (val_diff == 4 || val_diff == -4));
5155 }
5156
5157 return 0;
5158 }
5159
5160 int
5161 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5162 HOST_WIDE_INT *load_offset)
5163 {
5164 int unsorted_regs[4];
5165 HOST_WIDE_INT unsorted_offsets[4];
5166 int order[4];
5167 int base_reg = -1;
5168 int i;
5169
5170 /* Can only handle 2, 3, or 4 insns at present,
5171 though could be easily extended if required. */
5172 if (nops < 2 || nops > 4)
5173 abort ();
5174
5175 /* Loop over the operands and check that the memory references are
5176 suitable (i.e. immediate offsets from the same base register). At
5177 the same time, extract the target register, and the memory
5178 offsets. */
5179 for (i = 0; i < nops; i++)
5180 {
5181 rtx reg;
5182 rtx offset;
5183
5184 /* Convert a subreg of a mem into the mem itself. */
5185 if (GET_CODE (operands[nops + i]) == SUBREG)
5186 operands[nops + i] = alter_subreg (operands + (nops + i));
5187
5188 if (GET_CODE (operands[nops + i]) != MEM)
5189 abort ();
5190
5191 /* Don't reorder volatile memory references; it doesn't seem worth
5192 looking for the case where the order is ok anyway. */
5193 if (MEM_VOLATILE_P (operands[nops + i]))
5194 return 0;
5195
5196 offset = const0_rtx;
5197
5198 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5199 || (GET_CODE (reg) == SUBREG
5200 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5201 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5202 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5203 == REG)
5204 || (GET_CODE (reg) == SUBREG
5205 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5206 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5207 == CONST_INT)))
5208 {
5209 if (i == 0)
5210 {
5211 base_reg = REGNO (reg);
5212 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5213 ? REGNO (operands[i])
5214 : REGNO (SUBREG_REG (operands[i])));
5215 order[0] = 0;
5216 }
5217 else
5218 {
5219 if (base_reg != (int) REGNO (reg))
5220 /* Not addressed from the same base register. */
5221 return 0;
5222
5223 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5224 ? REGNO (operands[i])
5225 : REGNO (SUBREG_REG (operands[i])));
5226 if (unsorted_regs[i] < unsorted_regs[order[0]])
5227 order[0] = i;
5228 }
5229
5230 /* If it isn't an integer register, or if it overwrites the
5231 base register but isn't the last insn in the list, then
5232 we can't do this. */
5233 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5234 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5235 return 0;
5236
5237 unsorted_offsets[i] = INTVAL (offset);
5238 }
5239 else
5240 /* Not a suitable memory address. */
5241 return 0;
5242 }
5243
5244 /* All the useful information has now been extracted from the
5245 operands into unsorted_regs and unsorted_offsets; additionally,
5246 order[0] has been set to the lowest numbered register in the
5247 list. Sort the registers into order, and check that the memory
5248 offsets are ascending and adjacent. */
5249
5250 for (i = 1; i < nops; i++)
5251 {
5252 int j;
5253
5254 order[i] = order[i - 1];
5255 for (j = 0; j < nops; j++)
5256 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5257 && (order[i] == order[i - 1]
5258 || unsorted_regs[j] < unsorted_regs[order[i]]))
5259 order[i] = j;
5260
5261 /* Have we found a suitable register? if not, one must be used more
5262 than once. */
5263 if (order[i] == order[i - 1])
5264 return 0;
5265
5266 /* Is the memory address adjacent and ascending? */
5267 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5268 return 0;
5269 }
5270
5271 if (base)
5272 {
5273 *base = base_reg;
5274
5275 for (i = 0; i < nops; i++)
5276 regs[i] = unsorted_regs[order[i]];
5277
5278 *load_offset = unsorted_offsets[order[0]];
5279 }
5280
5281 if (unsorted_offsets[order[0]] == 0)
5282 return 1; /* ldmia */
5283
5284 if (unsorted_offsets[order[0]] == 4)
5285 return 2; /* ldmib */
5286
5287 if (unsorted_offsets[order[nops - 1]] == 0)
5288 return 3; /* ldmda */
5289
5290 if (unsorted_offsets[order[nops - 1]] == -4)
5291 return 4; /* ldmdb */
5292
5293 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5294 if the offset isn't small enough. The reason 2 ldrs are faster
5295 is because these ARMs are able to do more than one cache access
5296 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5297 whilst the ARM8 has a double bandwidth cache. This means that
5298 these cores can do both an instruction fetch and a data fetch in
5299 a single cycle, so the trick of calculating the address into a
5300 scratch register (one of the result regs) and then doing a load
5301 multiple actually becomes slower (and no smaller in code size).
5302 That is the transformation
5303
5304 ldr rd1, [rbase + offset]
5305 ldr rd2, [rbase + offset + 4]
5306
5307 to
5308
5309 add rd1, rbase, offset
5310 ldmia rd1, {rd1, rd2}
5311
5312 produces worse code -- '3 cycles + any stalls on rd2' instead of
5313 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5314 access per cycle, the first sequence could never complete in less
5315 than 6 cycles, whereas the ldm sequence would only take 5 and
5316 would make better use of sequential accesses if not hitting the
5317 cache.
5318
5319 We cheat here and test 'arm_ld_sched' which we currently know to
5320 only be true for the ARM8, ARM9 and StrongARM. If this ever
5321 changes, then the test below needs to be reworked. */
5322 if (nops == 2 && arm_ld_sched)
5323 return 0;
5324
5325 /* Can't do it without setting up the offset, only do this if it takes
5326 no more than one insn. */
5327 return (const_ok_for_arm (unsorted_offsets[order[0]])
5328 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5329 }
5330
5331 const char *
5332 emit_ldm_seq (rtx *operands, int nops)
5333 {
5334 int regs[4];
5335 int base_reg;
5336 HOST_WIDE_INT offset;
5337 char buf[100];
5338 int i;
5339
5340 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5341 {
5342 case 1:
5343 strcpy (buf, "ldm%?ia\t");
5344 break;
5345
5346 case 2:
5347 strcpy (buf, "ldm%?ib\t");
5348 break;
5349
5350 case 3:
5351 strcpy (buf, "ldm%?da\t");
5352 break;
5353
5354 case 4:
5355 strcpy (buf, "ldm%?db\t");
5356 break;
5357
5358 case 5:
5359 if (offset >= 0)
5360 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5361 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5362 (long) offset);
5363 else
5364 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5365 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5366 (long) -offset);
5367 output_asm_insn (buf, operands);
5368 base_reg = regs[0];
5369 strcpy (buf, "ldm%?ia\t");
5370 break;
5371
5372 default:
5373 abort ();
5374 }
5375
5376 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5377 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5378
5379 for (i = 1; i < nops; i++)
5380 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5381 reg_names[regs[i]]);
5382
5383 strcat (buf, "}\t%@ phole ldm");
5384
5385 output_asm_insn (buf, operands);
5386 return "";
5387 }
5388
5389 int
5390 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5391 HOST_WIDE_INT * load_offset)
5392 {
5393 int unsorted_regs[4];
5394 HOST_WIDE_INT unsorted_offsets[4];
5395 int order[4];
5396 int base_reg = -1;
5397 int i;
5398
5399 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5400 extended if required. */
5401 if (nops < 2 || nops > 4)
5402 abort ();
5403
5404 /* Loop over the operands and check that the memory references are
5405 suitable (i.e. immediate offsets from the same base register). At
5406 the same time, extract the target register, and the memory
5407 offsets. */
5408 for (i = 0; i < nops; i++)
5409 {
5410 rtx reg;
5411 rtx offset;
5412
5413 /* Convert a subreg of a mem into the mem itself. */
5414 if (GET_CODE (operands[nops + i]) == SUBREG)
5415 operands[nops + i] = alter_subreg (operands + (nops + i));
5416
5417 if (GET_CODE (operands[nops + i]) != MEM)
5418 abort ();
5419
5420 /* Don't reorder volatile memory references; it doesn't seem worth
5421 looking for the case where the order is ok anyway. */
5422 if (MEM_VOLATILE_P (operands[nops + i]))
5423 return 0;
5424
5425 offset = const0_rtx;
5426
5427 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5428 || (GET_CODE (reg) == SUBREG
5429 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5430 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5431 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5432 == REG)
5433 || (GET_CODE (reg) == SUBREG
5434 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5435 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5436 == CONST_INT)))
5437 {
5438 if (i == 0)
5439 {
5440 base_reg = REGNO (reg);
5441 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5442 ? REGNO (operands[i])
5443 : REGNO (SUBREG_REG (operands[i])));
5444 order[0] = 0;
5445 }
5446 else
5447 {
5448 if (base_reg != (int) REGNO (reg))
5449 /* Not addressed from the same base register. */
5450 return 0;
5451
5452 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5453 ? REGNO (operands[i])
5454 : REGNO (SUBREG_REG (operands[i])));
5455 if (unsorted_regs[i] < unsorted_regs[order[0]])
5456 order[0] = i;
5457 }
5458
5459 /* If it isn't an integer register, then we can't do this. */
5460 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5461 return 0;
5462
5463 unsorted_offsets[i] = INTVAL (offset);
5464 }
5465 else
5466 /* Not a suitable memory address. */
5467 return 0;
5468 }
5469
5470 /* All the useful information has now been extracted from the
5471 operands into unsorted_regs and unsorted_offsets; additionally,
5472 order[0] has been set to the lowest numbered register in the
5473 list. Sort the registers into order, and check that the memory
5474 offsets are ascending and adjacent. */
5475
5476 for (i = 1; i < nops; i++)
5477 {
5478 int j;
5479
5480 order[i] = order[i - 1];
5481 for (j = 0; j < nops; j++)
5482 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5483 && (order[i] == order[i - 1]
5484 || unsorted_regs[j] < unsorted_regs[order[i]]))
5485 order[i] = j;
5486
5487 /* Have we found a suitable register? if not, one must be used more
5488 than once. */
5489 if (order[i] == order[i - 1])
5490 return 0;
5491
5492 /* Is the memory address adjacent and ascending? */
5493 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5494 return 0;
5495 }
5496
5497 if (base)
5498 {
5499 *base = base_reg;
5500
5501 for (i = 0; i < nops; i++)
5502 regs[i] = unsorted_regs[order[i]];
5503
5504 *load_offset = unsorted_offsets[order[0]];
5505 }
5506
5507 if (unsorted_offsets[order[0]] == 0)
5508 return 1; /* stmia */
5509
5510 if (unsorted_offsets[order[0]] == 4)
5511 return 2; /* stmib */
5512
5513 if (unsorted_offsets[order[nops - 1]] == 0)
5514 return 3; /* stmda */
5515
5516 if (unsorted_offsets[order[nops - 1]] == -4)
5517 return 4; /* stmdb */
5518
5519 return 0;
5520 }
5521
5522 const char *
5523 emit_stm_seq (rtx *operands, int nops)
5524 {
5525 int regs[4];
5526 int base_reg;
5527 HOST_WIDE_INT offset;
5528 char buf[100];
5529 int i;
5530
5531 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5532 {
5533 case 1:
5534 strcpy (buf, "stm%?ia\t");
5535 break;
5536
5537 case 2:
5538 strcpy (buf, "stm%?ib\t");
5539 break;
5540
5541 case 3:
5542 strcpy (buf, "stm%?da\t");
5543 break;
5544
5545 case 4:
5546 strcpy (buf, "stm%?db\t");
5547 break;
5548
5549 default:
5550 abort ();
5551 }
5552
5553 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5554 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5555
5556 for (i = 1; i < nops; i++)
5557 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5558 reg_names[regs[i]]);
5559
5560 strcat (buf, "}\t%@ phole stm");
5561
5562 output_asm_insn (buf, operands);
5563 return "";
5564 }
5565
5566 \f
5567 /* Routines for use in generating RTL. */
5568
5569 rtx
5570 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5571 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5572 {
5573 HOST_WIDE_INT offset = *offsetp;
5574 int i = 0, j;
5575 rtx result;
5576 int sign = up ? 1 : -1;
5577 rtx mem, addr;
5578
5579 /* XScale has load-store double instructions, but they have stricter
5580 alignment requirements than load-store multiple, so we cannot
5581 use them.
5582
5583 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5584 the pipeline until completion.
5585
5586 NREGS CYCLES
5587 1 3
5588 2 4
5589 3 5
5590 4 6
5591
5592 An ldr instruction takes 1-3 cycles, but does not block the
5593 pipeline.
5594
5595 NREGS CYCLES
5596 1 1-3
5597 2 2-6
5598 3 3-9
5599 4 4-12
5600
5601 Best case ldr will always win. However, the more ldr instructions
5602 we issue, the less likely we are to be able to schedule them well.
5603 Using ldr instructions also increases code size.
5604
5605 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5606 for counts of 3 or 4 regs. */
5607 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5608 {
5609 rtx seq;
5610
5611 start_sequence ();
5612
5613 for (i = 0; i < count; i++)
5614 {
5615 addr = plus_constant (from, i * 4 * sign);
5616 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5617 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5618 offset += 4 * sign;
5619 }
5620
5621 if (write_back)
5622 {
5623 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5624 *offsetp = offset;
5625 }
5626
5627 seq = get_insns ();
5628 end_sequence ();
5629
5630 return seq;
5631 }
5632
5633 result = gen_rtx_PARALLEL (VOIDmode,
5634 rtvec_alloc (count + (write_back ? 1 : 0)));
5635 if (write_back)
5636 {
5637 XVECEXP (result, 0, 0)
5638 = gen_rtx_SET (GET_MODE (from), from,
5639 plus_constant (from, count * 4 * sign));
5640 i = 1;
5641 count++;
5642 }
5643
5644 for (j = 0; i < count; i++, j++)
5645 {
5646 addr = plus_constant (from, j * 4 * sign);
5647 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5648 XVECEXP (result, 0, i)
5649 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5650 offset += 4 * sign;
5651 }
5652
5653 if (write_back)
5654 *offsetp = offset;
5655
5656 return result;
5657 }
5658
5659 rtx
5660 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5661 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5662 {
5663 HOST_WIDE_INT offset = *offsetp;
5664 int i = 0, j;
5665 rtx result;
5666 int sign = up ? 1 : -1;
5667 rtx mem, addr;
5668
5669 /* See arm_gen_load_multiple for discussion of
5670 the pros/cons of ldm/stm usage for XScale. */
5671 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5672 {
5673 rtx seq;
5674
5675 start_sequence ();
5676
5677 for (i = 0; i < count; i++)
5678 {
5679 addr = plus_constant (to, i * 4 * sign);
5680 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5681 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5682 offset += 4 * sign;
5683 }
5684
5685 if (write_back)
5686 {
5687 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5688 *offsetp = offset;
5689 }
5690
5691 seq = get_insns ();
5692 end_sequence ();
5693
5694 return seq;
5695 }
5696
5697 result = gen_rtx_PARALLEL (VOIDmode,
5698 rtvec_alloc (count + (write_back ? 1 : 0)));
5699 if (write_back)
5700 {
5701 XVECEXP (result, 0, 0)
5702 = gen_rtx_SET (GET_MODE (to), to,
5703 plus_constant (to, count * 4 * sign));
5704 i = 1;
5705 count++;
5706 }
5707
5708 for (j = 0; i < count; i++, j++)
5709 {
5710 addr = plus_constant (to, j * 4 * sign);
5711 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5712 XVECEXP (result, 0, i)
5713 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5714 offset += 4 * sign;
5715 }
5716
5717 if (write_back)
5718 *offsetp = offset;
5719
5720 return result;
5721 }
5722
5723 int
5724 arm_gen_movmemqi (rtx *operands)
5725 {
5726 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5727 HOST_WIDE_INT srcoffset, dstoffset;
5728 int i;
5729 rtx src, dst, srcbase, dstbase;
5730 rtx part_bytes_reg = NULL;
5731 rtx mem;
5732
5733 if (GET_CODE (operands[2]) != CONST_INT
5734 || GET_CODE (operands[3]) != CONST_INT
5735 || INTVAL (operands[2]) > 64
5736 || INTVAL (operands[3]) & 3)
5737 return 0;
5738
5739 dstbase = operands[0];
5740 srcbase = operands[1];
5741
5742 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5743 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5744
5745 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5746 out_words_to_go = INTVAL (operands[2]) / 4;
5747 last_bytes = INTVAL (operands[2]) & 3;
5748 dstoffset = srcoffset = 0;
5749
5750 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5751 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5752
5753 for (i = 0; in_words_to_go >= 2; i+=4)
5754 {
5755 if (in_words_to_go > 4)
5756 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5757 srcbase, &srcoffset));
5758 else
5759 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5760 FALSE, srcbase, &srcoffset));
5761
5762 if (out_words_to_go)
5763 {
5764 if (out_words_to_go > 4)
5765 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5766 dstbase, &dstoffset));
5767 else if (out_words_to_go != 1)
5768 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5769 dst, TRUE,
5770 (last_bytes == 0
5771 ? FALSE : TRUE),
5772 dstbase, &dstoffset));
5773 else
5774 {
5775 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5776 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5777 if (last_bytes != 0)
5778 {
5779 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5780 dstoffset += 4;
5781 }
5782 }
5783 }
5784
5785 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5786 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5787 }
5788
5789 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5790 if (out_words_to_go)
5791 {
5792 rtx sreg;
5793
5794 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5795 sreg = copy_to_reg (mem);
5796
5797 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5798 emit_move_insn (mem, sreg);
5799 in_words_to_go--;
5800
5801 if (in_words_to_go) /* Sanity check */
5802 abort ();
5803 }
5804
5805 if (in_words_to_go)
5806 {
5807 if (in_words_to_go < 0)
5808 abort ();
5809
5810 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5811 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5812 }
5813
5814 if (last_bytes && part_bytes_reg == NULL)
5815 abort ();
5816
5817 if (BYTES_BIG_ENDIAN && last_bytes)
5818 {
5819 rtx tmp = gen_reg_rtx (SImode);
5820
5821 /* The bytes we want are in the top end of the word. */
5822 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5823 GEN_INT (8 * (4 - last_bytes))));
5824 part_bytes_reg = tmp;
5825
5826 while (last_bytes)
5827 {
5828 mem = adjust_automodify_address (dstbase, QImode,
5829 plus_constant (dst, last_bytes - 1),
5830 dstoffset + last_bytes - 1);
5831 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5832
5833 if (--last_bytes)
5834 {
5835 tmp = gen_reg_rtx (SImode);
5836 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5837 part_bytes_reg = tmp;
5838 }
5839 }
5840
5841 }
5842 else
5843 {
5844 if (last_bytes > 1)
5845 {
5846 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5847 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5848 last_bytes -= 2;
5849 if (last_bytes)
5850 {
5851 rtx tmp = gen_reg_rtx (SImode);
5852 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5853 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5854 part_bytes_reg = tmp;
5855 dstoffset += 2;
5856 }
5857 }
5858
5859 if (last_bytes)
5860 {
5861 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5862 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5863 }
5864 }
5865
5866 return 1;
5867 }
5868
5869 /* Generate a memory reference for a half word, such that it will be loaded
5870 into the top 16 bits of the word. We can assume that the address is
5871 known to be alignable and of the form reg, or plus (reg, const). */
5872
5873 rtx
5874 arm_gen_rotated_half_load (rtx memref)
5875 {
5876 HOST_WIDE_INT offset = 0;
5877 rtx base = XEXP (memref, 0);
5878
5879 if (GET_CODE (base) == PLUS)
5880 {
5881 offset = INTVAL (XEXP (base, 1));
5882 base = XEXP (base, 0);
5883 }
5884
5885 /* If we aren't allowed to generate unaligned addresses, then fail. */
5886 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5887 return NULL;
5888
5889 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5890
5891 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5892 return base;
5893
5894 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5895 }
5896
5897 /* Select a dominance comparison mode if possible for a test of the general
5898 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5899 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5900 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5901 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5902 In all cases OP will be either EQ or NE, but we don't need to know which
5903 here. If we are unable to support a dominance comparison we return
5904 CC mode. This will then fail to match for the RTL expressions that
5905 generate this call. */
5906 enum machine_mode
5907 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5908 {
5909 enum rtx_code cond1, cond2;
5910 int swapped = 0;
5911
5912 /* Currently we will probably get the wrong result if the individual
5913 comparisons are not simple. This also ensures that it is safe to
5914 reverse a comparison if necessary. */
5915 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5916 != CCmode)
5917 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5918 != CCmode))
5919 return CCmode;
5920
5921 /* The if_then_else variant of this tests the second condition if the
5922 first passes, but is true if the first fails. Reverse the first
5923 condition to get a true "inclusive-or" expression. */
5924 if (cond_or == DOM_CC_NX_OR_Y)
5925 cond1 = reverse_condition (cond1);
5926
5927 /* If the comparisons are not equal, and one doesn't dominate the other,
5928 then we can't do this. */
5929 if (cond1 != cond2
5930 && !comparison_dominates_p (cond1, cond2)
5931 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5932 return CCmode;
5933
5934 if (swapped)
5935 {
5936 enum rtx_code temp = cond1;
5937 cond1 = cond2;
5938 cond2 = temp;
5939 }
5940
5941 switch (cond1)
5942 {
5943 case EQ:
5944 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5945 return CC_DEQmode;
5946
5947 switch (cond2)
5948 {
5949 case LE: return CC_DLEmode;
5950 case LEU: return CC_DLEUmode;
5951 case GE: return CC_DGEmode;
5952 case GEU: return CC_DGEUmode;
5953 default: break;
5954 }
5955
5956 break;
5957
5958 case LT:
5959 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5960 return CC_DLTmode;
5961 if (cond2 == LE)
5962 return CC_DLEmode;
5963 if (cond2 == NE)
5964 return CC_DNEmode;
5965 break;
5966
5967 case GT:
5968 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5969 return CC_DGTmode;
5970 if (cond2 == GE)
5971 return CC_DGEmode;
5972 if (cond2 == NE)
5973 return CC_DNEmode;
5974 break;
5975
5976 case LTU:
5977 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5978 return CC_DLTUmode;
5979 if (cond2 == LEU)
5980 return CC_DLEUmode;
5981 if (cond2 == NE)
5982 return CC_DNEmode;
5983 break;
5984
5985 case GTU:
5986 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5987 return CC_DGTUmode;
5988 if (cond2 == GEU)
5989 return CC_DGEUmode;
5990 if (cond2 == NE)
5991 return CC_DNEmode;
5992 break;
5993
5994 /* The remaining cases only occur when both comparisons are the
5995 same. */
5996 case NE:
5997 return CC_DNEmode;
5998
5999 case LE:
6000 return CC_DLEmode;
6001
6002 case GE:
6003 return CC_DGEmode;
6004
6005 case LEU:
6006 return CC_DLEUmode;
6007
6008 case GEU:
6009 return CC_DGEUmode;
6010
6011 default:
6012 break;
6013 }
6014
6015 abort ();
6016 }
6017
6018 enum machine_mode
6019 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6020 {
6021 /* All floating point compares return CCFP if it is an equality
6022 comparison, and CCFPE otherwise. */
6023 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6024 {
6025 switch (op)
6026 {
6027 case EQ:
6028 case NE:
6029 case UNORDERED:
6030 case ORDERED:
6031 case UNLT:
6032 case UNLE:
6033 case UNGT:
6034 case UNGE:
6035 case UNEQ:
6036 case LTGT:
6037 return CCFPmode;
6038
6039 case LT:
6040 case LE:
6041 case GT:
6042 case GE:
6043 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6044 return CCFPmode;
6045 return CCFPEmode;
6046
6047 default:
6048 abort ();
6049 }
6050 }
6051
6052 /* A compare with a shifted operand. Because of canonicalization, the
6053 comparison will have to be swapped when we emit the assembler. */
6054 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6055 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6056 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6057 || GET_CODE (x) == ROTATERT))
6058 return CC_SWPmode;
6059
6060 /* This is a special case that is used by combine to allow a
6061 comparison of a shifted byte load to be split into a zero-extend
6062 followed by a comparison of the shifted integer (only valid for
6063 equalities and unsigned inequalities). */
6064 if (GET_MODE (x) == SImode
6065 && GET_CODE (x) == ASHIFT
6066 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6067 && GET_CODE (XEXP (x, 0)) == SUBREG
6068 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6069 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6070 && (op == EQ || op == NE
6071 || op == GEU || op == GTU || op == LTU || op == LEU)
6072 && GET_CODE (y) == CONST_INT)
6073 return CC_Zmode;
6074
6075 /* A construct for a conditional compare, if the false arm contains
6076 0, then both conditions must be true, otherwise either condition
6077 must be true. Not all conditions are possible, so CCmode is
6078 returned if it can't be done. */
6079 if (GET_CODE (x) == IF_THEN_ELSE
6080 && (XEXP (x, 2) == const0_rtx
6081 || XEXP (x, 2) == const1_rtx)
6082 && COMPARISON_P (XEXP (x, 0))
6083 && COMPARISON_P (XEXP (x, 1)))
6084 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6085 INTVAL (XEXP (x, 2)));
6086
6087 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6088 if (GET_CODE (x) == AND
6089 && COMPARISON_P (XEXP (x, 0))
6090 && COMPARISON_P (XEXP (x, 1)))
6091 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6092 DOM_CC_X_AND_Y);
6093
6094 if (GET_CODE (x) == IOR
6095 && COMPARISON_P (XEXP (x, 0))
6096 && COMPARISON_P (XEXP (x, 1)))
6097 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6098 DOM_CC_X_OR_Y);
6099
6100 /* An operation (on Thumb) where we want to test for a single bit.
6101 This is done by shifting that bit up into the top bit of a
6102 scratch register; we can then branch on the sign bit. */
6103 if (TARGET_THUMB
6104 && GET_MODE (x) == SImode
6105 && (op == EQ || op == NE)
6106 && (GET_CODE (x) == ZERO_EXTRACT))
6107 return CC_Nmode;
6108
6109 /* An operation that sets the condition codes as a side-effect, the
6110 V flag is not set correctly, so we can only use comparisons where
6111 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6112 instead.) */
6113 if (GET_MODE (x) == SImode
6114 && y == const0_rtx
6115 && (op == EQ || op == NE || op == LT || op == GE)
6116 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6117 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6118 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6119 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6120 || GET_CODE (x) == LSHIFTRT
6121 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6122 || GET_CODE (x) == ROTATERT
6123 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6124 return CC_NOOVmode;
6125
6126 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6127 return CC_Zmode;
6128
6129 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6130 && GET_CODE (x) == PLUS
6131 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6132 return CC_Cmode;
6133
6134 return CCmode;
6135 }
6136
6137 /* X and Y are two things to compare using CODE. Emit the compare insn and
6138 return the rtx for register 0 in the proper mode. FP means this is a
6139 floating point compare: I don't think that it is needed on the arm. */
6140 rtx
6141 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6142 {
6143 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6144 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6145
6146 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6147 gen_rtx_COMPARE (mode, x, y)));
6148
6149 return cc_reg;
6150 }
6151
6152 /* Generate a sequence of insns that will generate the correct return
6153 address mask depending on the physical architecture that the program
6154 is running on. */
6155 rtx
6156 arm_gen_return_addr_mask (void)
6157 {
6158 rtx reg = gen_reg_rtx (Pmode);
6159
6160 emit_insn (gen_return_addr_mask (reg));
6161 return reg;
6162 }
6163
6164 void
6165 arm_reload_in_hi (rtx *operands)
6166 {
6167 rtx ref = operands[1];
6168 rtx base, scratch;
6169 HOST_WIDE_INT offset = 0;
6170
6171 if (GET_CODE (ref) == SUBREG)
6172 {
6173 offset = SUBREG_BYTE (ref);
6174 ref = SUBREG_REG (ref);
6175 }
6176
6177 if (GET_CODE (ref) == REG)
6178 {
6179 /* We have a pseudo which has been spilt onto the stack; there
6180 are two cases here: the first where there is a simple
6181 stack-slot replacement and a second where the stack-slot is
6182 out of range, or is used as a subreg. */
6183 if (reg_equiv_mem[REGNO (ref)])
6184 {
6185 ref = reg_equiv_mem[REGNO (ref)];
6186 base = find_replacement (&XEXP (ref, 0));
6187 }
6188 else
6189 /* The slot is out of range, or was dressed up in a SUBREG. */
6190 base = reg_equiv_address[REGNO (ref)];
6191 }
6192 else
6193 base = find_replacement (&XEXP (ref, 0));
6194
6195 /* Handle the case where the address is too complex to be offset by 1. */
6196 if (GET_CODE (base) == MINUS
6197 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6198 {
6199 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6200
6201 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6202 base = base_plus;
6203 }
6204 else if (GET_CODE (base) == PLUS)
6205 {
6206 /* The addend must be CONST_INT, or we would have dealt with it above. */
6207 HOST_WIDE_INT hi, lo;
6208
6209 offset += INTVAL (XEXP (base, 1));
6210 base = XEXP (base, 0);
6211
6212 /* Rework the address into a legal sequence of insns. */
6213 /* Valid range for lo is -4095 -> 4095 */
6214 lo = (offset >= 0
6215 ? (offset & 0xfff)
6216 : -((-offset) & 0xfff));
6217
6218 /* Corner case, if lo is the max offset then we would be out of range
6219 once we have added the additional 1 below, so bump the msb into the
6220 pre-loading insn(s). */
6221 if (lo == 4095)
6222 lo &= 0x7ff;
6223
6224 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6225 ^ (HOST_WIDE_INT) 0x80000000)
6226 - (HOST_WIDE_INT) 0x80000000);
6227
6228 if (hi + lo != offset)
6229 abort ();
6230
6231 if (hi != 0)
6232 {
6233 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6234
6235 /* Get the base address; addsi3 knows how to handle constants
6236 that require more than one insn. */
6237 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6238 base = base_plus;
6239 offset = lo;
6240 }
6241 }
6242
6243 /* Operands[2] may overlap operands[0] (though it won't overlap
6244 operands[1]), that's why we asked for a DImode reg -- so we can
6245 use the bit that does not overlap. */
6246 if (REGNO (operands[2]) == REGNO (operands[0]))
6247 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6248 else
6249 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6250
6251 emit_insn (gen_zero_extendqisi2 (scratch,
6252 gen_rtx_MEM (QImode,
6253 plus_constant (base,
6254 offset))));
6255 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6256 gen_rtx_MEM (QImode,
6257 plus_constant (base,
6258 offset + 1))));
6259 if (!BYTES_BIG_ENDIAN)
6260 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6261 gen_rtx_IOR (SImode,
6262 gen_rtx_ASHIFT
6263 (SImode,
6264 gen_rtx_SUBREG (SImode, operands[0], 0),
6265 GEN_INT (8)),
6266 scratch)));
6267 else
6268 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6269 gen_rtx_IOR (SImode,
6270 gen_rtx_ASHIFT (SImode, scratch,
6271 GEN_INT (8)),
6272 gen_rtx_SUBREG (SImode, operands[0],
6273 0))));
6274 }
6275
6276 /* Handle storing a half-word to memory during reload by synthesizing as two
6277 byte stores. Take care not to clobber the input values until after we
6278 have moved them somewhere safe. This code assumes that if the DImode
6279 scratch in operands[2] overlaps either the input value or output address
6280 in some way, then that value must die in this insn (we absolutely need
6281 two scratch registers for some corner cases). */
6282 void
6283 arm_reload_out_hi (rtx *operands)
6284 {
6285 rtx ref = operands[0];
6286 rtx outval = operands[1];
6287 rtx base, scratch;
6288 HOST_WIDE_INT offset = 0;
6289
6290 if (GET_CODE (ref) == SUBREG)
6291 {
6292 offset = SUBREG_BYTE (ref);
6293 ref = SUBREG_REG (ref);
6294 }
6295
6296 if (GET_CODE (ref) == REG)
6297 {
6298 /* We have a pseudo which has been spilt onto the stack; there
6299 are two cases here: the first where there is a simple
6300 stack-slot replacement and a second where the stack-slot is
6301 out of range, or is used as a subreg. */
6302 if (reg_equiv_mem[REGNO (ref)])
6303 {
6304 ref = reg_equiv_mem[REGNO (ref)];
6305 base = find_replacement (&XEXP (ref, 0));
6306 }
6307 else
6308 /* The slot is out of range, or was dressed up in a SUBREG. */
6309 base = reg_equiv_address[REGNO (ref)];
6310 }
6311 else
6312 base = find_replacement (&XEXP (ref, 0));
6313
6314 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6315
6316 /* Handle the case where the address is too complex to be offset by 1. */
6317 if (GET_CODE (base) == MINUS
6318 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6319 {
6320 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6321
6322 /* Be careful not to destroy OUTVAL. */
6323 if (reg_overlap_mentioned_p (base_plus, outval))
6324 {
6325 /* Updating base_plus might destroy outval, see if we can
6326 swap the scratch and base_plus. */
6327 if (!reg_overlap_mentioned_p (scratch, outval))
6328 {
6329 rtx tmp = scratch;
6330 scratch = base_plus;
6331 base_plus = tmp;
6332 }
6333 else
6334 {
6335 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6336
6337 /* Be conservative and copy OUTVAL into the scratch now,
6338 this should only be necessary if outval is a subreg
6339 of something larger than a word. */
6340 /* XXX Might this clobber base? I can't see how it can,
6341 since scratch is known to overlap with OUTVAL, and
6342 must be wider than a word. */
6343 emit_insn (gen_movhi (scratch_hi, outval));
6344 outval = scratch_hi;
6345 }
6346 }
6347
6348 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6349 base = base_plus;
6350 }
6351 else if (GET_CODE (base) == PLUS)
6352 {
6353 /* The addend must be CONST_INT, or we would have dealt with it above. */
6354 HOST_WIDE_INT hi, lo;
6355
6356 offset += INTVAL (XEXP (base, 1));
6357 base = XEXP (base, 0);
6358
6359 /* Rework the address into a legal sequence of insns. */
6360 /* Valid range for lo is -4095 -> 4095 */
6361 lo = (offset >= 0
6362 ? (offset & 0xfff)
6363 : -((-offset) & 0xfff));
6364
6365 /* Corner case, if lo is the max offset then we would be out of range
6366 once we have added the additional 1 below, so bump the msb into the
6367 pre-loading insn(s). */
6368 if (lo == 4095)
6369 lo &= 0x7ff;
6370
6371 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6372 ^ (HOST_WIDE_INT) 0x80000000)
6373 - (HOST_WIDE_INT) 0x80000000);
6374
6375 if (hi + lo != offset)
6376 abort ();
6377
6378 if (hi != 0)
6379 {
6380 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6381
6382 /* Be careful not to destroy OUTVAL. */
6383 if (reg_overlap_mentioned_p (base_plus, outval))
6384 {
6385 /* Updating base_plus might destroy outval, see if we
6386 can swap the scratch and base_plus. */
6387 if (!reg_overlap_mentioned_p (scratch, outval))
6388 {
6389 rtx tmp = scratch;
6390 scratch = base_plus;
6391 base_plus = tmp;
6392 }
6393 else
6394 {
6395 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6396
6397 /* Be conservative and copy outval into scratch now,
6398 this should only be necessary if outval is a
6399 subreg of something larger than a word. */
6400 /* XXX Might this clobber base? I can't see how it
6401 can, since scratch is known to overlap with
6402 outval. */
6403 emit_insn (gen_movhi (scratch_hi, outval));
6404 outval = scratch_hi;
6405 }
6406 }
6407
6408 /* Get the base address; addsi3 knows how to handle constants
6409 that require more than one insn. */
6410 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6411 base = base_plus;
6412 offset = lo;
6413 }
6414 }
6415
6416 if (BYTES_BIG_ENDIAN)
6417 {
6418 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6419 plus_constant (base, offset + 1)),
6420 gen_lowpart (QImode, outval)));
6421 emit_insn (gen_lshrsi3 (scratch,
6422 gen_rtx_SUBREG (SImode, outval, 0),
6423 GEN_INT (8)));
6424 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6425 gen_lowpart (QImode, scratch)));
6426 }
6427 else
6428 {
6429 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6430 gen_lowpart (QImode, outval)));
6431 emit_insn (gen_lshrsi3 (scratch,
6432 gen_rtx_SUBREG (SImode, outval, 0),
6433 GEN_INT (8)));
6434 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6435 plus_constant (base, offset + 1)),
6436 gen_lowpart (QImode, scratch)));
6437 }
6438 }
6439 \f
6440 /* Print a symbolic form of X to the debug file, F. */
6441 static void
6442 arm_print_value (FILE *f, rtx x)
6443 {
6444 switch (GET_CODE (x))
6445 {
6446 case CONST_INT:
6447 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6448 return;
6449
6450 case CONST_DOUBLE:
6451 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6452 return;
6453
6454 case CONST_VECTOR:
6455 {
6456 int i;
6457
6458 fprintf (f, "<");
6459 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6460 {
6461 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6462 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6463 fputc (',', f);
6464 }
6465 fprintf (f, ">");
6466 }
6467 return;
6468
6469 case CONST_STRING:
6470 fprintf (f, "\"%s\"", XSTR (x, 0));
6471 return;
6472
6473 case SYMBOL_REF:
6474 fprintf (f, "`%s'", XSTR (x, 0));
6475 return;
6476
6477 case LABEL_REF:
6478 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6479 return;
6480
6481 case CONST:
6482 arm_print_value (f, XEXP (x, 0));
6483 return;
6484
6485 case PLUS:
6486 arm_print_value (f, XEXP (x, 0));
6487 fprintf (f, "+");
6488 arm_print_value (f, XEXP (x, 1));
6489 return;
6490
6491 case PC:
6492 fprintf (f, "pc");
6493 return;
6494
6495 default:
6496 fprintf (f, "????");
6497 return;
6498 }
6499 }
6500 \f
6501 /* Routines for manipulation of the constant pool. */
6502
6503 /* Arm instructions cannot load a large constant directly into a
6504 register; they have to come from a pc relative load. The constant
6505 must therefore be placed in the addressable range of the pc
6506 relative load. Depending on the precise pc relative load
6507 instruction the range is somewhere between 256 bytes and 4k. This
6508 means that we often have to dump a constant inside a function, and
6509 generate code to branch around it.
6510
6511 It is important to minimize this, since the branches will slow
6512 things down and make the code larger.
6513
6514 Normally we can hide the table after an existing unconditional
6515 branch so that there is no interruption of the flow, but in the
6516 worst case the code looks like this:
6517
6518 ldr rn, L1
6519 ...
6520 b L2
6521 align
6522 L1: .long value
6523 L2:
6524 ...
6525
6526 ldr rn, L3
6527 ...
6528 b L4
6529 align
6530 L3: .long value
6531 L4:
6532 ...
6533
6534 We fix this by performing a scan after scheduling, which notices
6535 which instructions need to have their operands fetched from the
6536 constant table and builds the table.
6537
6538 The algorithm starts by building a table of all the constants that
6539 need fixing up and all the natural barriers in the function (places
6540 where a constant table can be dropped without breaking the flow).
6541 For each fixup we note how far the pc-relative replacement will be
6542 able to reach and the offset of the instruction into the function.
6543
6544 Having built the table we then group the fixes together to form
6545 tables that are as large as possible (subject to addressing
6546 constraints) and emit each table of constants after the last
6547 barrier that is within range of all the instructions in the group.
6548 If a group does not contain a barrier, then we forcibly create one
6549 by inserting a jump instruction into the flow. Once the table has
6550 been inserted, the insns are then modified to reference the
6551 relevant entry in the pool.
6552
6553 Possible enhancements to the algorithm (not implemented) are:
6554
6555 1) For some processors and object formats, there may be benefit in
6556 aligning the pools to the start of cache lines; this alignment
6557 would need to be taken into account when calculating addressability
6558 of a pool. */
6559
6560 /* These typedefs are located at the start of this file, so that
6561 they can be used in the prototypes there. This comment is to
6562 remind readers of that fact so that the following structures
6563 can be understood more easily.
6564
6565 typedef struct minipool_node Mnode;
6566 typedef struct minipool_fixup Mfix; */
6567
6568 struct minipool_node
6569 {
6570 /* Doubly linked chain of entries. */
6571 Mnode * next;
6572 Mnode * prev;
6573 /* The maximum offset into the code that this entry can be placed. While
6574 pushing fixes for forward references, all entries are sorted in order
6575 of increasing max_address. */
6576 HOST_WIDE_INT max_address;
6577 /* Similarly for an entry inserted for a backwards ref. */
6578 HOST_WIDE_INT min_address;
6579 /* The number of fixes referencing this entry. This can become zero
6580 if we "unpush" an entry. In this case we ignore the entry when we
6581 come to emit the code. */
6582 int refcount;
6583 /* The offset from the start of the minipool. */
6584 HOST_WIDE_INT offset;
6585 /* The value in table. */
6586 rtx value;
6587 /* The mode of value. */
6588 enum machine_mode mode;
6589 /* The size of the value. With iWMMXt enabled
6590 sizes > 4 also imply an alignment of 8-bytes. */
6591 int fix_size;
6592 };
6593
6594 struct minipool_fixup
6595 {
6596 Mfix * next;
6597 rtx insn;
6598 HOST_WIDE_INT address;
6599 rtx * loc;
6600 enum machine_mode mode;
6601 int fix_size;
6602 rtx value;
6603 Mnode * minipool;
6604 HOST_WIDE_INT forwards;
6605 HOST_WIDE_INT backwards;
6606 };
6607
6608 /* Fixes less than a word need padding out to a word boundary. */
6609 #define MINIPOOL_FIX_SIZE(mode) \
6610 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6611
6612 static Mnode * minipool_vector_head;
6613 static Mnode * minipool_vector_tail;
6614 static rtx minipool_vector_label;
6615
6616 /* The linked list of all minipool fixes required for this function. */
6617 Mfix * minipool_fix_head;
6618 Mfix * minipool_fix_tail;
6619 /* The fix entry for the current minipool, once it has been placed. */
6620 Mfix * minipool_barrier;
6621
6622 /* Determines if INSN is the start of a jump table. Returns the end
6623 of the TABLE or NULL_RTX. */
6624 static rtx
6625 is_jump_table (rtx insn)
6626 {
6627 rtx table;
6628
6629 if (GET_CODE (insn) == JUMP_INSN
6630 && JUMP_LABEL (insn) != NULL
6631 && ((table = next_real_insn (JUMP_LABEL (insn)))
6632 == next_real_insn (insn))
6633 && table != NULL
6634 && GET_CODE (table) == JUMP_INSN
6635 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6636 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6637 return table;
6638
6639 return NULL_RTX;
6640 }
6641
6642 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6643 #define JUMP_TABLES_IN_TEXT_SECTION 0
6644 #endif
6645
6646 static HOST_WIDE_INT
6647 get_jump_table_size (rtx insn)
6648 {
6649 /* ADDR_VECs only take room if read-only data does into the text
6650 section. */
6651 if (JUMP_TABLES_IN_TEXT_SECTION
6652 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6653 || 1
6654 #endif
6655 )
6656 {
6657 rtx body = PATTERN (insn);
6658 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6659
6660 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6661 }
6662
6663 return 0;
6664 }
6665
6666 /* Move a minipool fix MP from its current location to before MAX_MP.
6667 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6668 constraints may need updating. */
6669 static Mnode *
6670 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6671 HOST_WIDE_INT max_address)
6672 {
6673 /* This should never be true and the code below assumes these are
6674 different. */
6675 if (mp == max_mp)
6676 abort ();
6677
6678 if (max_mp == NULL)
6679 {
6680 if (max_address < mp->max_address)
6681 mp->max_address = max_address;
6682 }
6683 else
6684 {
6685 if (max_address > max_mp->max_address - mp->fix_size)
6686 mp->max_address = max_mp->max_address - mp->fix_size;
6687 else
6688 mp->max_address = max_address;
6689
6690 /* Unlink MP from its current position. Since max_mp is non-null,
6691 mp->prev must be non-null. */
6692 mp->prev->next = mp->next;
6693 if (mp->next != NULL)
6694 mp->next->prev = mp->prev;
6695 else
6696 minipool_vector_tail = mp->prev;
6697
6698 /* Re-insert it before MAX_MP. */
6699 mp->next = max_mp;
6700 mp->prev = max_mp->prev;
6701 max_mp->prev = mp;
6702
6703 if (mp->prev != NULL)
6704 mp->prev->next = mp;
6705 else
6706 minipool_vector_head = mp;
6707 }
6708
6709 /* Save the new entry. */
6710 max_mp = mp;
6711
6712 /* Scan over the preceding entries and adjust their addresses as
6713 required. */
6714 while (mp->prev != NULL
6715 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6716 {
6717 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6718 mp = mp->prev;
6719 }
6720
6721 return max_mp;
6722 }
6723
6724 /* Add a constant to the minipool for a forward reference. Returns the
6725 node added or NULL if the constant will not fit in this pool. */
6726 static Mnode *
6727 add_minipool_forward_ref (Mfix *fix)
6728 {
6729 /* If set, max_mp is the first pool_entry that has a lower
6730 constraint than the one we are trying to add. */
6731 Mnode * max_mp = NULL;
6732 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6733 Mnode * mp;
6734
6735 /* If this fix's address is greater than the address of the first
6736 entry, then we can't put the fix in this pool. We subtract the
6737 size of the current fix to ensure that if the table is fully
6738 packed we still have enough room to insert this value by suffling
6739 the other fixes forwards. */
6740 if (minipool_vector_head &&
6741 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6742 return NULL;
6743
6744 /* Scan the pool to see if a constant with the same value has
6745 already been added. While we are doing this, also note the
6746 location where we must insert the constant if it doesn't already
6747 exist. */
6748 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6749 {
6750 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6751 && fix->mode == mp->mode
6752 && (GET_CODE (fix->value) != CODE_LABEL
6753 || (CODE_LABEL_NUMBER (fix->value)
6754 == CODE_LABEL_NUMBER (mp->value)))
6755 && rtx_equal_p (fix->value, mp->value))
6756 {
6757 /* More than one fix references this entry. */
6758 mp->refcount++;
6759 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6760 }
6761
6762 /* Note the insertion point if necessary. */
6763 if (max_mp == NULL
6764 && mp->max_address > max_address)
6765 max_mp = mp;
6766
6767 /* If we are inserting an 8-bytes aligned quantity and
6768 we have not already found an insertion point, then
6769 make sure that all such 8-byte aligned quantities are
6770 placed at the start of the pool. */
6771 if (ARM_DOUBLEWORD_ALIGN
6772 && max_mp == NULL
6773 && fix->fix_size == 8
6774 && mp->fix_size != 8)
6775 {
6776 max_mp = mp;
6777 max_address = mp->max_address;
6778 }
6779 }
6780
6781 /* The value is not currently in the minipool, so we need to create
6782 a new entry for it. If MAX_MP is NULL, the entry will be put on
6783 the end of the list since the placement is less constrained than
6784 any existing entry. Otherwise, we insert the new fix before
6785 MAX_MP and, if necessary, adjust the constraints on the other
6786 entries. */
6787 mp = xmalloc (sizeof (* mp));
6788 mp->fix_size = fix->fix_size;
6789 mp->mode = fix->mode;
6790 mp->value = fix->value;
6791 mp->refcount = 1;
6792 /* Not yet required for a backwards ref. */
6793 mp->min_address = -65536;
6794
6795 if (max_mp == NULL)
6796 {
6797 mp->max_address = max_address;
6798 mp->next = NULL;
6799 mp->prev = minipool_vector_tail;
6800
6801 if (mp->prev == NULL)
6802 {
6803 minipool_vector_head = mp;
6804 minipool_vector_label = gen_label_rtx ();
6805 }
6806 else
6807 mp->prev->next = mp;
6808
6809 minipool_vector_tail = mp;
6810 }
6811 else
6812 {
6813 if (max_address > max_mp->max_address - mp->fix_size)
6814 mp->max_address = max_mp->max_address - mp->fix_size;
6815 else
6816 mp->max_address = max_address;
6817
6818 mp->next = max_mp;
6819 mp->prev = max_mp->prev;
6820 max_mp->prev = mp;
6821 if (mp->prev != NULL)
6822 mp->prev->next = mp;
6823 else
6824 minipool_vector_head = mp;
6825 }
6826
6827 /* Save the new entry. */
6828 max_mp = mp;
6829
6830 /* Scan over the preceding entries and adjust their addresses as
6831 required. */
6832 while (mp->prev != NULL
6833 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6834 {
6835 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6836 mp = mp->prev;
6837 }
6838
6839 return max_mp;
6840 }
6841
6842 static Mnode *
6843 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6844 HOST_WIDE_INT min_address)
6845 {
6846 HOST_WIDE_INT offset;
6847
6848 /* This should never be true, and the code below assumes these are
6849 different. */
6850 if (mp == min_mp)
6851 abort ();
6852
6853 if (min_mp == NULL)
6854 {
6855 if (min_address > mp->min_address)
6856 mp->min_address = min_address;
6857 }
6858 else
6859 {
6860 /* We will adjust this below if it is too loose. */
6861 mp->min_address = min_address;
6862
6863 /* Unlink MP from its current position. Since min_mp is non-null,
6864 mp->next must be non-null. */
6865 mp->next->prev = mp->prev;
6866 if (mp->prev != NULL)
6867 mp->prev->next = mp->next;
6868 else
6869 minipool_vector_head = mp->next;
6870
6871 /* Reinsert it after MIN_MP. */
6872 mp->prev = min_mp;
6873 mp->next = min_mp->next;
6874 min_mp->next = mp;
6875 if (mp->next != NULL)
6876 mp->next->prev = mp;
6877 else
6878 minipool_vector_tail = mp;
6879 }
6880
6881 min_mp = mp;
6882
6883 offset = 0;
6884 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6885 {
6886 mp->offset = offset;
6887 if (mp->refcount > 0)
6888 offset += mp->fix_size;
6889
6890 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6891 mp->next->min_address = mp->min_address + mp->fix_size;
6892 }
6893
6894 return min_mp;
6895 }
6896
6897 /* Add a constant to the minipool for a backward reference. Returns the
6898 node added or NULL if the constant will not fit in this pool.
6899
6900 Note that the code for insertion for a backwards reference can be
6901 somewhat confusing because the calculated offsets for each fix do
6902 not take into account the size of the pool (which is still under
6903 construction. */
6904 static Mnode *
6905 add_minipool_backward_ref (Mfix *fix)
6906 {
6907 /* If set, min_mp is the last pool_entry that has a lower constraint
6908 than the one we are trying to add. */
6909 Mnode *min_mp = NULL;
6910 /* This can be negative, since it is only a constraint. */
6911 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6912 Mnode *mp;
6913
6914 /* If we can't reach the current pool from this insn, or if we can't
6915 insert this entry at the end of the pool without pushing other
6916 fixes out of range, then we don't try. This ensures that we
6917 can't fail later on. */
6918 if (min_address >= minipool_barrier->address
6919 || (minipool_vector_tail->min_address + fix->fix_size
6920 >= minipool_barrier->address))
6921 return NULL;
6922
6923 /* Scan the pool to see if a constant with the same value has
6924 already been added. While we are doing this, also note the
6925 location where we must insert the constant if it doesn't already
6926 exist. */
6927 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6928 {
6929 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6930 && fix->mode == mp->mode
6931 && (GET_CODE (fix->value) != CODE_LABEL
6932 || (CODE_LABEL_NUMBER (fix->value)
6933 == CODE_LABEL_NUMBER (mp->value)))
6934 && rtx_equal_p (fix->value, mp->value)
6935 /* Check that there is enough slack to move this entry to the
6936 end of the table (this is conservative). */
6937 && (mp->max_address
6938 > (minipool_barrier->address
6939 + minipool_vector_tail->offset
6940 + minipool_vector_tail->fix_size)))
6941 {
6942 mp->refcount++;
6943 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6944 }
6945
6946 if (min_mp != NULL)
6947 mp->min_address += fix->fix_size;
6948 else
6949 {
6950 /* Note the insertion point if necessary. */
6951 if (mp->min_address < min_address)
6952 {
6953 /* For now, we do not allow the insertion of 8-byte alignment
6954 requiring nodes anywhere but at the start of the pool. */
6955 if (ARM_DOUBLEWORD_ALIGN
6956 && fix->fix_size == 8 && mp->fix_size != 8)
6957 return NULL;
6958 else
6959 min_mp = mp;
6960 }
6961 else if (mp->max_address
6962 < minipool_barrier->address + mp->offset + fix->fix_size)
6963 {
6964 /* Inserting before this entry would push the fix beyond
6965 its maximum address (which can happen if we have
6966 re-located a forwards fix); force the new fix to come
6967 after it. */
6968 min_mp = mp;
6969 min_address = mp->min_address + fix->fix_size;
6970 }
6971 /* If we are inserting an 8-bytes aligned quantity and
6972 we have not already found an insertion point, then
6973 make sure that all such 8-byte aligned quantities are
6974 placed at the start of the pool. */
6975 else if (ARM_DOUBLEWORD_ALIGN
6976 && min_mp == NULL
6977 && fix->fix_size == 8
6978 && mp->fix_size < 8)
6979 {
6980 min_mp = mp;
6981 min_address = mp->min_address + fix->fix_size;
6982 }
6983 }
6984 }
6985
6986 /* We need to create a new entry. */
6987 mp = xmalloc (sizeof (* mp));
6988 mp->fix_size = fix->fix_size;
6989 mp->mode = fix->mode;
6990 mp->value = fix->value;
6991 mp->refcount = 1;
6992 mp->max_address = minipool_barrier->address + 65536;
6993
6994 mp->min_address = min_address;
6995
6996 if (min_mp == NULL)
6997 {
6998 mp->prev = NULL;
6999 mp->next = minipool_vector_head;
7000
7001 if (mp->next == NULL)
7002 {
7003 minipool_vector_tail = mp;
7004 minipool_vector_label = gen_label_rtx ();
7005 }
7006 else
7007 mp->next->prev = mp;
7008
7009 minipool_vector_head = mp;
7010 }
7011 else
7012 {
7013 mp->next = min_mp->next;
7014 mp->prev = min_mp;
7015 min_mp->next = mp;
7016
7017 if (mp->next != NULL)
7018 mp->next->prev = mp;
7019 else
7020 minipool_vector_tail = mp;
7021 }
7022
7023 /* Save the new entry. */
7024 min_mp = mp;
7025
7026 if (mp->prev)
7027 mp = mp->prev;
7028 else
7029 mp->offset = 0;
7030
7031 /* Scan over the following entries and adjust their offsets. */
7032 while (mp->next != NULL)
7033 {
7034 if (mp->next->min_address < mp->min_address + mp->fix_size)
7035 mp->next->min_address = mp->min_address + mp->fix_size;
7036
7037 if (mp->refcount)
7038 mp->next->offset = mp->offset + mp->fix_size;
7039 else
7040 mp->next->offset = mp->offset;
7041
7042 mp = mp->next;
7043 }
7044
7045 return min_mp;
7046 }
7047
7048 static void
7049 assign_minipool_offsets (Mfix *barrier)
7050 {
7051 HOST_WIDE_INT offset = 0;
7052 Mnode *mp;
7053
7054 minipool_barrier = barrier;
7055
7056 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7057 {
7058 mp->offset = offset;
7059
7060 if (mp->refcount > 0)
7061 offset += mp->fix_size;
7062 }
7063 }
7064
7065 /* Output the literal table */
7066 static void
7067 dump_minipool (rtx scan)
7068 {
7069 Mnode * mp;
7070 Mnode * nmp;
7071 int align64 = 0;
7072
7073 if (ARM_DOUBLEWORD_ALIGN)
7074 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7075 if (mp->refcount > 0 && mp->fix_size == 8)
7076 {
7077 align64 = 1;
7078 break;
7079 }
7080
7081 if (dump_file)
7082 fprintf (dump_file,
7083 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7084 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7085
7086 scan = emit_label_after (gen_label_rtx (), scan);
7087 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7088 scan = emit_label_after (minipool_vector_label, scan);
7089
7090 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7091 {
7092 if (mp->refcount > 0)
7093 {
7094 if (dump_file)
7095 {
7096 fprintf (dump_file,
7097 ";; Offset %u, min %ld, max %ld ",
7098 (unsigned) mp->offset, (unsigned long) mp->min_address,
7099 (unsigned long) mp->max_address);
7100 arm_print_value (dump_file, mp->value);
7101 fputc ('\n', dump_file);
7102 }
7103
7104 switch (mp->fix_size)
7105 {
7106 #ifdef HAVE_consttable_1
7107 case 1:
7108 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7109 break;
7110
7111 #endif
7112 #ifdef HAVE_consttable_2
7113 case 2:
7114 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7115 break;
7116
7117 #endif
7118 #ifdef HAVE_consttable_4
7119 case 4:
7120 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7121 break;
7122
7123 #endif
7124 #ifdef HAVE_consttable_8
7125 case 8:
7126 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7127 break;
7128
7129 #endif
7130 default:
7131 abort ();
7132 break;
7133 }
7134 }
7135
7136 nmp = mp->next;
7137 free (mp);
7138 }
7139
7140 minipool_vector_head = minipool_vector_tail = NULL;
7141 scan = emit_insn_after (gen_consttable_end (), scan);
7142 scan = emit_barrier_after (scan);
7143 }
7144
7145 /* Return the cost of forcibly inserting a barrier after INSN. */
7146 static int
7147 arm_barrier_cost (rtx insn)
7148 {
7149 /* Basing the location of the pool on the loop depth is preferable,
7150 but at the moment, the basic block information seems to be
7151 corrupt by this stage of the compilation. */
7152 int base_cost = 50;
7153 rtx next = next_nonnote_insn (insn);
7154
7155 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7156 base_cost -= 20;
7157
7158 switch (GET_CODE (insn))
7159 {
7160 case CODE_LABEL:
7161 /* It will always be better to place the table before the label, rather
7162 than after it. */
7163 return 50;
7164
7165 case INSN:
7166 case CALL_INSN:
7167 return base_cost;
7168
7169 case JUMP_INSN:
7170 return base_cost - 10;
7171
7172 default:
7173 return base_cost + 10;
7174 }
7175 }
7176
7177 /* Find the best place in the insn stream in the range
7178 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7179 Create the barrier by inserting a jump and add a new fix entry for
7180 it. */
7181 static Mfix *
7182 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7183 {
7184 HOST_WIDE_INT count = 0;
7185 rtx barrier;
7186 rtx from = fix->insn;
7187 rtx selected = from;
7188 int selected_cost;
7189 HOST_WIDE_INT selected_address;
7190 Mfix * new_fix;
7191 HOST_WIDE_INT max_count = max_address - fix->address;
7192 rtx label = gen_label_rtx ();
7193
7194 selected_cost = arm_barrier_cost (from);
7195 selected_address = fix->address;
7196
7197 while (from && count < max_count)
7198 {
7199 rtx tmp;
7200 int new_cost;
7201
7202 /* This code shouldn't have been called if there was a natural barrier
7203 within range. */
7204 if (GET_CODE (from) == BARRIER)
7205 abort ();
7206
7207 /* Count the length of this insn. */
7208 count += get_attr_length (from);
7209
7210 /* If there is a jump table, add its length. */
7211 tmp = is_jump_table (from);
7212 if (tmp != NULL)
7213 {
7214 count += get_jump_table_size (tmp);
7215
7216 /* Jump tables aren't in a basic block, so base the cost on
7217 the dispatch insn. If we select this location, we will
7218 still put the pool after the table. */
7219 new_cost = arm_barrier_cost (from);
7220
7221 if (count < max_count && new_cost <= selected_cost)
7222 {
7223 selected = tmp;
7224 selected_cost = new_cost;
7225 selected_address = fix->address + count;
7226 }
7227
7228 /* Continue after the dispatch table. */
7229 from = NEXT_INSN (tmp);
7230 continue;
7231 }
7232
7233 new_cost = arm_barrier_cost (from);
7234
7235 if (count < max_count && new_cost <= selected_cost)
7236 {
7237 selected = from;
7238 selected_cost = new_cost;
7239 selected_address = fix->address + count;
7240 }
7241
7242 from = NEXT_INSN (from);
7243 }
7244
7245 /* Create a new JUMP_INSN that branches around a barrier. */
7246 from = emit_jump_insn_after (gen_jump (label), selected);
7247 JUMP_LABEL (from) = label;
7248 barrier = emit_barrier_after (from);
7249 emit_label_after (label, barrier);
7250
7251 /* Create a minipool barrier entry for the new barrier. */
7252 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7253 new_fix->insn = barrier;
7254 new_fix->address = selected_address;
7255 new_fix->next = fix->next;
7256 fix->next = new_fix;
7257
7258 return new_fix;
7259 }
7260
7261 /* Record that there is a natural barrier in the insn stream at
7262 ADDRESS. */
7263 static void
7264 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7265 {
7266 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7267
7268 fix->insn = insn;
7269 fix->address = address;
7270
7271 fix->next = NULL;
7272 if (minipool_fix_head != NULL)
7273 minipool_fix_tail->next = fix;
7274 else
7275 minipool_fix_head = fix;
7276
7277 minipool_fix_tail = fix;
7278 }
7279
7280 /* Record INSN, which will need fixing up to load a value from the
7281 minipool. ADDRESS is the offset of the insn since the start of the
7282 function; LOC is a pointer to the part of the insn which requires
7283 fixing; VALUE is the constant that must be loaded, which is of type
7284 MODE. */
7285 static void
7286 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7287 enum machine_mode mode, rtx value)
7288 {
7289 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7290
7291 #ifdef AOF_ASSEMBLER
7292 /* PIC symbol references need to be converted into offsets into the
7293 based area. */
7294 /* XXX This shouldn't be done here. */
7295 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7296 value = aof_pic_entry (value);
7297 #endif /* AOF_ASSEMBLER */
7298
7299 fix->insn = insn;
7300 fix->address = address;
7301 fix->loc = loc;
7302 fix->mode = mode;
7303 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7304 fix->value = value;
7305 fix->forwards = get_attr_pool_range (insn);
7306 fix->backwards = get_attr_neg_pool_range (insn);
7307 fix->minipool = NULL;
7308
7309 /* If an insn doesn't have a range defined for it, then it isn't
7310 expecting to be reworked by this code. Better to abort now than
7311 to generate duff assembly code. */
7312 if (fix->forwards == 0 && fix->backwards == 0)
7313 abort ();
7314
7315 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7316 So there might be an empty word before the start of the pool.
7317 Hence we reduce the forward range by 4 to allow for this
7318 possibility. */
7319 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7320 fix->forwards -= 4;
7321
7322 if (dump_file)
7323 {
7324 fprintf (dump_file,
7325 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7326 GET_MODE_NAME (mode),
7327 INSN_UID (insn), (unsigned long) address,
7328 -1 * (long)fix->backwards, (long)fix->forwards);
7329 arm_print_value (dump_file, fix->value);
7330 fprintf (dump_file, "\n");
7331 }
7332
7333 /* Add it to the chain of fixes. */
7334 fix->next = NULL;
7335
7336 if (minipool_fix_head != NULL)
7337 minipool_fix_tail->next = fix;
7338 else
7339 minipool_fix_head = fix;
7340
7341 minipool_fix_tail = fix;
7342 }
7343
7344 /* Return the cost of synthesizing the const_double VAL inline.
7345 Returns the number of insns needed, or 99 if we don't know how to
7346 do it. */
7347 int
7348 arm_const_double_inline_cost (rtx val)
7349 {
7350 long parts[2];
7351
7352 if (GET_MODE (val) == DFmode)
7353 {
7354 REAL_VALUE_TYPE r;
7355 if (!TARGET_SOFT_FLOAT)
7356 return 99;
7357 REAL_VALUE_FROM_CONST_DOUBLE (r, val);
7358 REAL_VALUE_TO_TARGET_DOUBLE (r, parts);
7359 }
7360 else if (GET_MODE (val) != VOIDmode)
7361 return 99;
7362 else
7363 {
7364 parts[0] = CONST_DOUBLE_LOW (val);
7365 parts[1] = CONST_DOUBLE_HIGH (val);
7366 }
7367
7368 return (arm_gen_constant (SET, SImode, NULL_RTX, parts[0],
7369 NULL_RTX, NULL_RTX, 0, 0)
7370 + arm_gen_constant (SET, SImode, NULL_RTX, parts[1],
7371 NULL_RTX, NULL_RTX, 0, 0));
7372 }
7373
7374 /* Determine if a CONST_DOUBLE should be pushed to the minipool */
7375 static bool
7376 const_double_needs_minipool (rtx val)
7377 {
7378 /* thumb only knows to load a CONST_DOUBLE from memory at the moment */
7379 if (TARGET_THUMB)
7380 return true;
7381
7382 /* Don't push anything to the minipool if a CONST_DOUBLE can be built with
7383 a few ALU insns directly. On balance, the optimum is likely to be around
7384 3 insns, except when there are no load delay slots where it should be 4.
7385 When optimizing for size, a limit of 3 allows saving at least one word
7386 except for cases where a single minipool entry could be shared more than
7387 2 times which is rather unlikely to outweight the overall savings. */
7388 return (arm_const_double_inline_cost (val)
7389 > ((optimize_size || arm_ld_sched) ? 3 : 4));
7390 }
7391
7392 /* Scan INSN and note any of its operands that need fixing.
7393 If DO_PUSHES is false we do not actually push any of the fixups
7394 needed. The function returns TRUE is any fixups were needed/pushed.
7395 This is used by arm_memory_load_p() which needs to know about loads
7396 of constants that will be converted into minipool loads. */
7397 static bool
7398 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7399 {
7400 bool result = false;
7401 int opno;
7402
7403 extract_insn (insn);
7404
7405 if (!constrain_operands (1))
7406 fatal_insn_not_found (insn);
7407
7408 if (recog_data.n_alternatives == 0)
7409 return false;
7410
7411 /* Fill in recog_op_alt with information about the constraints of this insn. */
7412 preprocess_constraints ();
7413
7414 for (opno = 0; opno < recog_data.n_operands; opno++)
7415 {
7416 /* Things we need to fix can only occur in inputs. */
7417 if (recog_data.operand_type[opno] != OP_IN)
7418 continue;
7419
7420 /* If this alternative is a memory reference, then any mention
7421 of constants in this alternative is really to fool reload
7422 into allowing us to accept one there. We need to fix them up
7423 now so that we output the right code. */
7424 if (recog_op_alt[opno][which_alternative].memory_ok)
7425 {
7426 rtx op = recog_data.operand[opno];
7427
7428 if (CONSTANT_P (op)
7429 && (GET_CODE (op) != CONST_DOUBLE
7430 || const_double_needs_minipool (op)))
7431 {
7432 if (do_pushes)
7433 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7434 recog_data.operand_mode[opno], op);
7435 result = true;
7436 }
7437 else if (GET_CODE (op) == MEM
7438 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7439 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7440 {
7441 if (do_pushes)
7442 {
7443 rtx cop = avoid_constant_pool_reference (op);
7444
7445 /* Casting the address of something to a mode narrower
7446 than a word can cause avoid_constant_pool_reference()
7447 to return the pool reference itself. That's no good to
7448 us here. Lets just hope that we can use the
7449 constant pool value directly. */
7450 if (op == cop)
7451 cop = get_pool_constant (XEXP (op, 0));
7452
7453 push_minipool_fix (insn, address,
7454 recog_data.operand_loc[opno],
7455 recog_data.operand_mode[opno], cop);
7456 }
7457
7458 result = true;
7459 }
7460 }
7461 }
7462
7463 return result;
7464 }
7465
7466 /* Gcc puts the pool in the wrong place for ARM, since we can only
7467 load addresses a limited distance around the pc. We do some
7468 special munging to move the constant pool values to the correct
7469 point in the code. */
7470 static void
7471 arm_reorg (void)
7472 {
7473 rtx insn;
7474 HOST_WIDE_INT address = 0;
7475 Mfix * fix;
7476
7477 minipool_fix_head = minipool_fix_tail = NULL;
7478
7479 /* The first insn must always be a note, or the code below won't
7480 scan it properly. */
7481 insn = get_insns ();
7482 if (GET_CODE (insn) != NOTE)
7483 abort ();
7484
7485 /* Scan all the insns and record the operands that will need fixing. */
7486 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7487 {
7488 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7489 && (arm_cirrus_insn_p (insn)
7490 || GET_CODE (insn) == JUMP_INSN
7491 || arm_memory_load_p (insn)))
7492 cirrus_reorg (insn);
7493
7494 if (GET_CODE (insn) == BARRIER)
7495 push_minipool_barrier (insn, address);
7496 else if (INSN_P (insn))
7497 {
7498 rtx table;
7499
7500 note_invalid_constants (insn, address, true);
7501 address += get_attr_length (insn);
7502
7503 /* If the insn is a vector jump, add the size of the table
7504 and skip the table. */
7505 if ((table = is_jump_table (insn)) != NULL)
7506 {
7507 address += get_jump_table_size (table);
7508 insn = table;
7509 }
7510 }
7511 }
7512
7513 fix = minipool_fix_head;
7514
7515 /* Now scan the fixups and perform the required changes. */
7516 while (fix)
7517 {
7518 Mfix * ftmp;
7519 Mfix * fdel;
7520 Mfix * last_added_fix;
7521 Mfix * last_barrier = NULL;
7522 Mfix * this_fix;
7523
7524 /* Skip any further barriers before the next fix. */
7525 while (fix && GET_CODE (fix->insn) == BARRIER)
7526 fix = fix->next;
7527
7528 /* No more fixes. */
7529 if (fix == NULL)
7530 break;
7531
7532 last_added_fix = NULL;
7533
7534 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7535 {
7536 if (GET_CODE (ftmp->insn) == BARRIER)
7537 {
7538 if (ftmp->address >= minipool_vector_head->max_address)
7539 break;
7540
7541 last_barrier = ftmp;
7542 }
7543 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7544 break;
7545
7546 last_added_fix = ftmp; /* Keep track of the last fix added. */
7547 }
7548
7549 /* If we found a barrier, drop back to that; any fixes that we
7550 could have reached but come after the barrier will now go in
7551 the next mini-pool. */
7552 if (last_barrier != NULL)
7553 {
7554 /* Reduce the refcount for those fixes that won't go into this
7555 pool after all. */
7556 for (fdel = last_barrier->next;
7557 fdel && fdel != ftmp;
7558 fdel = fdel->next)
7559 {
7560 fdel->minipool->refcount--;
7561 fdel->minipool = NULL;
7562 }
7563
7564 ftmp = last_barrier;
7565 }
7566 else
7567 {
7568 /* ftmp is first fix that we can't fit into this pool and
7569 there no natural barriers that we could use. Insert a
7570 new barrier in the code somewhere between the previous
7571 fix and this one, and arrange to jump around it. */
7572 HOST_WIDE_INT max_address;
7573
7574 /* The last item on the list of fixes must be a barrier, so
7575 we can never run off the end of the list of fixes without
7576 last_barrier being set. */
7577 if (ftmp == NULL)
7578 abort ();
7579
7580 max_address = minipool_vector_head->max_address;
7581 /* Check that there isn't another fix that is in range that
7582 we couldn't fit into this pool because the pool was
7583 already too large: we need to put the pool before such an
7584 instruction. */
7585 if (ftmp->address < max_address)
7586 max_address = ftmp->address;
7587
7588 last_barrier = create_fix_barrier (last_added_fix, max_address);
7589 }
7590
7591 assign_minipool_offsets (last_barrier);
7592
7593 while (ftmp)
7594 {
7595 if (GET_CODE (ftmp->insn) != BARRIER
7596 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7597 == NULL))
7598 break;
7599
7600 ftmp = ftmp->next;
7601 }
7602
7603 /* Scan over the fixes we have identified for this pool, fixing them
7604 up and adding the constants to the pool itself. */
7605 for (this_fix = fix; this_fix && ftmp != this_fix;
7606 this_fix = this_fix->next)
7607 if (GET_CODE (this_fix->insn) != BARRIER)
7608 {
7609 rtx addr
7610 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7611 minipool_vector_label),
7612 this_fix->minipool->offset);
7613 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7614 }
7615
7616 dump_minipool (last_barrier->insn);
7617 fix = ftmp;
7618 }
7619
7620 /* From now on we must synthesize any constants that we can't handle
7621 directly. This can happen if the RTL gets split during final
7622 instruction generation. */
7623 after_arm_reorg = 1;
7624
7625 /* Free the minipool memory. */
7626 obstack_free (&minipool_obstack, minipool_startobj);
7627 }
7628 \f
7629 /* Routines to output assembly language. */
7630
7631 /* If the rtx is the correct value then return the string of the number.
7632 In this way we can ensure that valid double constants are generated even
7633 when cross compiling. */
7634 const char *
7635 fp_immediate_constant (rtx x)
7636 {
7637 REAL_VALUE_TYPE r;
7638 int i;
7639
7640 if (!fp_consts_inited)
7641 init_fp_table ();
7642
7643 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7644 for (i = 0; i < 8; i++)
7645 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7646 return strings_fp[i];
7647
7648 abort ();
7649 }
7650
7651 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7652 static const char *
7653 fp_const_from_val (REAL_VALUE_TYPE *r)
7654 {
7655 int i;
7656
7657 if (!fp_consts_inited)
7658 init_fp_table ();
7659
7660 for (i = 0; i < 8; i++)
7661 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7662 return strings_fp[i];
7663
7664 abort ();
7665 }
7666
7667 /* Output the operands of a LDM/STM instruction to STREAM.
7668 MASK is the ARM register set mask of which only bits 0-15 are important.
7669 REG is the base register, either the frame pointer or the stack pointer,
7670 INSTR is the possibly suffixed load or store instruction. */
7671 static void
7672 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7673 {
7674 int i;
7675 int not_first = FALSE;
7676
7677 fputc ('\t', stream);
7678 asm_fprintf (stream, instr, reg);
7679 fputs (", {", stream);
7680
7681 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7682 if (mask & (1 << i))
7683 {
7684 if (not_first)
7685 fprintf (stream, ", ");
7686
7687 asm_fprintf (stream, "%r", i);
7688 not_first = TRUE;
7689 }
7690
7691 fprintf (stream, "}\n");
7692 }
7693
7694
7695 /* Output a FLDMX instruction to STREAM.
7696 BASE if the register containing the address.
7697 REG and COUNT specify the register range.
7698 Extra registers may be added to avoid hardware bugs. */
7699
7700 static void
7701 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7702 {
7703 int i;
7704
7705 /* Workaround ARM10 VFPr1 bug. */
7706 if (count == 2 && !arm_arch6)
7707 {
7708 if (reg == 15)
7709 reg--;
7710 count++;
7711 }
7712
7713 fputc ('\t', stream);
7714 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7715
7716 for (i = reg; i < reg + count; i++)
7717 {
7718 if (i > reg)
7719 fputs (", ", stream);
7720 asm_fprintf (stream, "d%d", i);
7721 }
7722 fputs ("}\n", stream);
7723
7724 }
7725
7726
7727 /* Output the assembly for a store multiple. */
7728
7729 const char *
7730 vfp_output_fstmx (rtx * operands)
7731 {
7732 char pattern[100];
7733 int p;
7734 int base;
7735 int i;
7736
7737 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7738 p = strlen (pattern);
7739
7740 if (GET_CODE (operands[1]) != REG)
7741 abort ();
7742
7743 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7744 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7745 {
7746 p += sprintf (&pattern[p], ", d%d", base + i);
7747 }
7748 strcpy (&pattern[p], "}");
7749
7750 output_asm_insn (pattern, operands);
7751 return "";
7752 }
7753
7754
7755 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7756 number of bytes pushed. */
7757
7758 static int
7759 vfp_emit_fstmx (int base_reg, int count)
7760 {
7761 rtx par;
7762 rtx dwarf;
7763 rtx tmp, reg;
7764 int i;
7765
7766 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7767 register pairs are stored by a store multiple insn. We avoid this
7768 by pushing an extra pair. */
7769 if (count == 2 && !arm_arch6)
7770 {
7771 if (base_reg == LAST_VFP_REGNUM - 3)
7772 base_reg -= 2;
7773 count++;
7774 }
7775
7776 /* ??? The frame layout is implementation defined. We describe
7777 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7778 We really need some way of representing the whole block so that the
7779 unwinder can figure it out at runtime. */
7780 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7781 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7782
7783 reg = gen_rtx_REG (DFmode, base_reg);
7784 base_reg += 2;
7785
7786 XVECEXP (par, 0, 0)
7787 = gen_rtx_SET (VOIDmode,
7788 gen_rtx_MEM (BLKmode,
7789 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7790 gen_rtx_UNSPEC (BLKmode,
7791 gen_rtvec (1, reg),
7792 UNSPEC_PUSH_MULT));
7793
7794 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7795 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7796 GEN_INT (-(count * 8 + 4))));
7797 RTX_FRAME_RELATED_P (tmp) = 1;
7798 XVECEXP (dwarf, 0, 0) = tmp;
7799
7800 tmp = gen_rtx_SET (VOIDmode,
7801 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7802 reg);
7803 RTX_FRAME_RELATED_P (tmp) = 1;
7804 XVECEXP (dwarf, 0, 1) = tmp;
7805
7806 for (i = 1; i < count; i++)
7807 {
7808 reg = gen_rtx_REG (DFmode, base_reg);
7809 base_reg += 2;
7810 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7811
7812 tmp = gen_rtx_SET (VOIDmode,
7813 gen_rtx_MEM (DFmode,
7814 gen_rtx_PLUS (SImode,
7815 stack_pointer_rtx,
7816 GEN_INT (i * 8))),
7817 reg);
7818 RTX_FRAME_RELATED_P (tmp) = 1;
7819 XVECEXP (dwarf, 0, i + 1) = tmp;
7820 }
7821
7822 par = emit_insn (par);
7823 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7824 REG_NOTES (par));
7825 RTX_FRAME_RELATED_P (par) = 1;
7826
7827 return count * 8 + 4;
7828 }
7829
7830
7831 /* Output a 'call' insn. */
7832 const char *
7833 output_call (rtx *operands)
7834 {
7835 if (arm_arch5)
7836 abort (); /* Patterns should call blx <reg> directly. */
7837
7838 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7839 if (REGNO (operands[0]) == LR_REGNUM)
7840 {
7841 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7842 output_asm_insn ("mov%?\t%0, %|lr", operands);
7843 }
7844
7845 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7846
7847 if (TARGET_INTERWORK || arm_arch4t)
7848 output_asm_insn ("bx%?\t%0", operands);
7849 else
7850 output_asm_insn ("mov%?\t%|pc, %0", operands);
7851
7852 return "";
7853 }
7854
7855 /* Output a 'call' insn that is a reference in memory. */
7856 const char *
7857 output_call_mem (rtx *operands)
7858 {
7859 if (TARGET_INTERWORK && !arm_arch5)
7860 {
7861 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7862 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7863 output_asm_insn ("bx%?\t%|ip", operands);
7864 }
7865 else if (regno_use_in (LR_REGNUM, operands[0]))
7866 {
7867 /* LR is used in the memory address. We load the address in the
7868 first instruction. It's safe to use IP as the target of the
7869 load since the call will kill it anyway. */
7870 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7871 if (arm_arch5)
7872 output_asm_insn ("blx%?\t%|ip", operands);
7873 else
7874 {
7875 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7876 if (arm_arch4t)
7877 output_asm_insn ("bx%?\t%|ip", operands);
7878 else
7879 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7880 }
7881 }
7882 else
7883 {
7884 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7885 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7886 }
7887
7888 return "";
7889 }
7890
7891
7892 /* Output a move from arm registers to an fpa registers.
7893 OPERANDS[0] is an fpa register.
7894 OPERANDS[1] is the first registers of an arm register pair. */
7895 const char *
7896 output_mov_long_double_fpa_from_arm (rtx *operands)
7897 {
7898 int arm_reg0 = REGNO (operands[1]);
7899 rtx ops[3];
7900
7901 if (arm_reg0 == IP_REGNUM)
7902 abort ();
7903
7904 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7905 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7906 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7907
7908 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7909 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7910
7911 return "";
7912 }
7913
7914 /* Output a move from an fpa register to arm registers.
7915 OPERANDS[0] is the first registers of an arm register pair.
7916 OPERANDS[1] is an fpa register. */
7917 const char *
7918 output_mov_long_double_arm_from_fpa (rtx *operands)
7919 {
7920 int arm_reg0 = REGNO (operands[0]);
7921 rtx ops[3];
7922
7923 if (arm_reg0 == IP_REGNUM)
7924 abort ();
7925
7926 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7927 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7928 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7929
7930 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7931 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7932 return "";
7933 }
7934
7935 /* Output a move from arm registers to arm registers of a long double
7936 OPERANDS[0] is the destination.
7937 OPERANDS[1] is the source. */
7938 const char *
7939 output_mov_long_double_arm_from_arm (rtx *operands)
7940 {
7941 /* We have to be careful here because the two might overlap. */
7942 int dest_start = REGNO (operands[0]);
7943 int src_start = REGNO (operands[1]);
7944 rtx ops[2];
7945 int i;
7946
7947 if (dest_start < src_start)
7948 {
7949 for (i = 0; i < 3; i++)
7950 {
7951 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7952 ops[1] = gen_rtx_REG (SImode, src_start + i);
7953 output_asm_insn ("mov%?\t%0, %1", ops);
7954 }
7955 }
7956 else
7957 {
7958 for (i = 2; i >= 0; i--)
7959 {
7960 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7961 ops[1] = gen_rtx_REG (SImode, src_start + i);
7962 output_asm_insn ("mov%?\t%0, %1", ops);
7963 }
7964 }
7965
7966 return "";
7967 }
7968
7969
7970 /* Output a move from arm registers to an fpa registers.
7971 OPERANDS[0] is an fpa register.
7972 OPERANDS[1] is the first registers of an arm register pair. */
7973 const char *
7974 output_mov_double_fpa_from_arm (rtx *operands)
7975 {
7976 int arm_reg0 = REGNO (operands[1]);
7977 rtx ops[2];
7978
7979 if (arm_reg0 == IP_REGNUM)
7980 abort ();
7981
7982 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7983 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7984 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7985 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7986 return "";
7987 }
7988
7989 /* Output a move from an fpa register to arm registers.
7990 OPERANDS[0] is the first registers of an arm register pair.
7991 OPERANDS[1] is an fpa register. */
7992 const char *
7993 output_mov_double_arm_from_fpa (rtx *operands)
7994 {
7995 int arm_reg0 = REGNO (operands[0]);
7996 rtx ops[2];
7997
7998 if (arm_reg0 == IP_REGNUM)
7999 abort ();
8000
8001 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8002 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8003 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8004 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8005 return "";
8006 }
8007
8008 /* Output a move between double words.
8009 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8010 or MEM<-REG and all MEMs must be offsettable addresses. */
8011 const char *
8012 output_move_double (rtx *operands)
8013 {
8014 enum rtx_code code0 = GET_CODE (operands[0]);
8015 enum rtx_code code1 = GET_CODE (operands[1]);
8016 rtx otherops[3];
8017
8018 if (code0 == REG)
8019 {
8020 int reg0 = REGNO (operands[0]);
8021
8022 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8023
8024 if (code1 == REG)
8025 {
8026 int reg1 = REGNO (operands[1]);
8027 if (reg1 == IP_REGNUM)
8028 abort ();
8029
8030 /* Ensure the second source is not overwritten. */
8031 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8032 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8033 else
8034 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8035 }
8036 else if (code1 == CONST_VECTOR)
8037 {
8038 HOST_WIDE_INT hint = 0;
8039
8040 switch (GET_MODE (operands[1]))
8041 {
8042 case V2SImode:
8043 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8044 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8045 break;
8046
8047 case V4HImode:
8048 if (BYTES_BIG_ENDIAN)
8049 {
8050 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8051 hint <<= 16;
8052 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8053 }
8054 else
8055 {
8056 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8057 hint <<= 16;
8058 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8059 }
8060
8061 otherops[1] = GEN_INT (hint);
8062 hint = 0;
8063
8064 if (BYTES_BIG_ENDIAN)
8065 {
8066 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8067 hint <<= 16;
8068 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8069 }
8070 else
8071 {
8072 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8073 hint <<= 16;
8074 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8075 }
8076
8077 operands[1] = GEN_INT (hint);
8078 break;
8079
8080 case V8QImode:
8081 if (BYTES_BIG_ENDIAN)
8082 {
8083 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8084 hint <<= 8;
8085 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8086 hint <<= 8;
8087 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8088 hint <<= 8;
8089 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8090 }
8091 else
8092 {
8093 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8094 hint <<= 8;
8095 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8096 hint <<= 8;
8097 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8098 hint <<= 8;
8099 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8100 }
8101
8102 otherops[1] = GEN_INT (hint);
8103 hint = 0;
8104
8105 if (BYTES_BIG_ENDIAN)
8106 {
8107 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8108 hint <<= 8;
8109 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8110 hint <<= 8;
8111 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8112 hint <<= 8;
8113 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8114 }
8115 else
8116 {
8117 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8118 hint <<= 8;
8119 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8120 hint <<= 8;
8121 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8122 hint <<= 8;
8123 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8124 }
8125
8126 operands[1] = GEN_INT (hint);
8127 break;
8128
8129 default:
8130 abort ();
8131 }
8132 output_mov_immediate (operands);
8133 output_mov_immediate (otherops);
8134 }
8135 else if (code1 == CONST_DOUBLE)
8136 {
8137 if (GET_MODE (operands[1]) == DFmode)
8138 {
8139 REAL_VALUE_TYPE r;
8140 long l[2];
8141
8142 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8143 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8144 otherops[1] = GEN_INT (l[1]);
8145 operands[1] = GEN_INT (l[0]);
8146 }
8147 else if (GET_MODE (operands[1]) != VOIDmode)
8148 abort ();
8149 else if (WORDS_BIG_ENDIAN)
8150 {
8151 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8152 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8153 }
8154 else
8155 {
8156 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8157 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8158 }
8159
8160 output_mov_immediate (operands);
8161 output_mov_immediate (otherops);
8162 }
8163 else if (code1 == CONST_INT)
8164 {
8165 #if HOST_BITS_PER_WIDE_INT > 32
8166 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8167 what the upper word is. */
8168 if (WORDS_BIG_ENDIAN)
8169 {
8170 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8171 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8172 }
8173 else
8174 {
8175 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8176 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8177 }
8178 #else
8179 /* Sign extend the intval into the high-order word. */
8180 if (WORDS_BIG_ENDIAN)
8181 {
8182 otherops[1] = operands[1];
8183 operands[1] = (INTVAL (operands[1]) < 0
8184 ? constm1_rtx : const0_rtx);
8185 }
8186 else
8187 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8188 #endif
8189 output_mov_immediate (otherops);
8190 output_mov_immediate (operands);
8191 }
8192 else if (code1 == MEM)
8193 {
8194 switch (GET_CODE (XEXP (operands[1], 0)))
8195 {
8196 case REG:
8197 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8198 break;
8199
8200 case PRE_INC:
8201 if (!TARGET_LDRD)
8202 abort (); /* Should never happen now. */
8203 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8204 break;
8205
8206 case PRE_DEC:
8207 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8208 break;
8209
8210 case POST_INC:
8211 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8212 break;
8213
8214 case POST_DEC:
8215 if (!TARGET_LDRD)
8216 abort (); /* Should never happen now. */
8217 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8218 break;
8219
8220 case PRE_MODIFY:
8221 case POST_MODIFY:
8222 otherops[0] = operands[0];
8223 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8224 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8225
8226 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8227 {
8228 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8229 {
8230 /* Registers overlap so split out the increment. */
8231 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8232 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8233 }
8234 else
8235 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8236 }
8237 else
8238 {
8239 /* We only allow constant increments, so this is safe. */
8240 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8241 }
8242 break;
8243
8244 case LABEL_REF:
8245 case CONST:
8246 output_asm_insn ("adr%?\t%0, %1", operands);
8247 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8248 break;
8249
8250 default:
8251 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8252 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8253 {
8254 otherops[0] = operands[0];
8255 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8256 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8257
8258 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8259 {
8260 if (GET_CODE (otherops[2]) == CONST_INT)
8261 {
8262 switch ((int) INTVAL (otherops[2]))
8263 {
8264 case -8:
8265 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8266 return "";
8267 case -4:
8268 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8269 return "";
8270 case 4:
8271 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8272 return "";
8273 }
8274 }
8275 if (TARGET_LDRD
8276 && (GET_CODE (otherops[2]) == REG
8277 || (GET_CODE (otherops[2]) == CONST_INT
8278 && INTVAL (otherops[2]) > -256
8279 && INTVAL (otherops[2]) < 256)))
8280 {
8281 if (reg_overlap_mentioned_p (otherops[0],
8282 otherops[2]))
8283 {
8284 /* Swap base and index registers over to
8285 avoid a conflict. */
8286 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8287 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8288
8289 }
8290 /* If both registers conflict, it will usually
8291 have been fixed by a splitter. */
8292 if (reg_overlap_mentioned_p (otherops[0],
8293 otherops[2]))
8294 {
8295 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8296 output_asm_insn ("ldr%?d\t%0, [%1]",
8297 otherops);
8298 return "";
8299 }
8300 else
8301 {
8302 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8303 otherops);
8304 return "";
8305 }
8306 }
8307 if (GET_CODE (otherops[2]) == CONST_INT)
8308 {
8309 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8310 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8311 else
8312 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8313 }
8314 else
8315 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8316 }
8317 else
8318 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8319
8320 return "ldm%?ia\t%0, %M0";
8321 }
8322 else
8323 {
8324 otherops[1] = adjust_address (operands[1], SImode, 4);
8325 /* Take care of overlapping base/data reg. */
8326 if (reg_mentioned_p (operands[0], operands[1]))
8327 {
8328 output_asm_insn ("ldr%?\t%0, %1", otherops);
8329 output_asm_insn ("ldr%?\t%0, %1", operands);
8330 }
8331 else
8332 {
8333 output_asm_insn ("ldr%?\t%0, %1", operands);
8334 output_asm_insn ("ldr%?\t%0, %1", otherops);
8335 }
8336 }
8337 }
8338 }
8339 else
8340 abort (); /* Constraints should prevent this. */
8341 }
8342 else if (code0 == MEM && code1 == REG)
8343 {
8344 if (REGNO (operands[1]) == IP_REGNUM)
8345 abort ();
8346
8347 switch (GET_CODE (XEXP (operands[0], 0)))
8348 {
8349 case REG:
8350 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8351 break;
8352
8353 case PRE_INC:
8354 if (!TARGET_LDRD)
8355 abort (); /* Should never happen now. */
8356 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8357 break;
8358
8359 case PRE_DEC:
8360 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8361 break;
8362
8363 case POST_INC:
8364 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8365 break;
8366
8367 case POST_DEC:
8368 if (!TARGET_LDRD)
8369 abort (); /* Should never happen now. */
8370 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8371 break;
8372
8373 case PRE_MODIFY:
8374 case POST_MODIFY:
8375 otherops[0] = operands[1];
8376 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8377 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8378
8379 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8380 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8381 else
8382 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8383 break;
8384
8385 case PLUS:
8386 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8387 if (GET_CODE (otherops[2]) == CONST_INT)
8388 {
8389 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8390 {
8391 case -8:
8392 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8393 return "";
8394
8395 case -4:
8396 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8397 return "";
8398
8399 case 4:
8400 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8401 return "";
8402 }
8403 }
8404 if (TARGET_LDRD
8405 && (GET_CODE (otherops[2]) == REG
8406 || (GET_CODE (otherops[2]) == CONST_INT
8407 && INTVAL (otherops[2]) > -256
8408 && INTVAL (otherops[2]) < 256)))
8409 {
8410 otherops[0] = operands[1];
8411 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8412 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8413 return "";
8414 }
8415 /* Fall through */
8416
8417 default:
8418 otherops[0] = adjust_address (operands[0], SImode, 4);
8419 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8420 output_asm_insn ("str%?\t%1, %0", operands);
8421 output_asm_insn ("str%?\t%1, %0", otherops);
8422 }
8423 }
8424 else
8425 /* Constraints should prevent this. */
8426 abort ();
8427
8428 return "";
8429 }
8430
8431
8432 /* Output an arbitrary MOV reg, #n.
8433 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8434 const char *
8435 output_mov_immediate (rtx *operands)
8436 {
8437 HOST_WIDE_INT n = INTVAL (operands[1]);
8438
8439 /* Try to use one MOV. */
8440 if (const_ok_for_arm (n))
8441 output_asm_insn ("mov%?\t%0, %1", operands);
8442
8443 /* Try to use one MVN. */
8444 else if (const_ok_for_arm (~n))
8445 {
8446 operands[1] = GEN_INT (~n);
8447 output_asm_insn ("mvn%?\t%0, %1", operands);
8448 }
8449 else
8450 {
8451 int n_ones = 0;
8452 int i;
8453
8454 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8455 for (i = 0; i < 32; i++)
8456 if (n & 1 << i)
8457 n_ones++;
8458
8459 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8460 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8461 else
8462 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8463 }
8464
8465 return "";
8466 }
8467
8468 /* Output an ADD r, s, #n where n may be too big for one instruction.
8469 If adding zero to one register, output nothing. */
8470 const char *
8471 output_add_immediate (rtx *operands)
8472 {
8473 HOST_WIDE_INT n = INTVAL (operands[2]);
8474
8475 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8476 {
8477 if (n < 0)
8478 output_multi_immediate (operands,
8479 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8480 -n);
8481 else
8482 output_multi_immediate (operands,
8483 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8484 n);
8485 }
8486
8487 return "";
8488 }
8489
8490 /* Output a multiple immediate operation.
8491 OPERANDS is the vector of operands referred to in the output patterns.
8492 INSTR1 is the output pattern to use for the first constant.
8493 INSTR2 is the output pattern to use for subsequent constants.
8494 IMMED_OP is the index of the constant slot in OPERANDS.
8495 N is the constant value. */
8496 static const char *
8497 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8498 int immed_op, HOST_WIDE_INT n)
8499 {
8500 #if HOST_BITS_PER_WIDE_INT > 32
8501 n &= 0xffffffff;
8502 #endif
8503
8504 if (n == 0)
8505 {
8506 /* Quick and easy output. */
8507 operands[immed_op] = const0_rtx;
8508 output_asm_insn (instr1, operands);
8509 }
8510 else
8511 {
8512 int i;
8513 const char * instr = instr1;
8514
8515 /* Note that n is never zero here (which would give no output). */
8516 for (i = 0; i < 32; i += 2)
8517 {
8518 if (n & (3 << i))
8519 {
8520 operands[immed_op] = GEN_INT (n & (255 << i));
8521 output_asm_insn (instr, operands);
8522 instr = instr2;
8523 i += 6;
8524 }
8525 }
8526 }
8527
8528 return "";
8529 }
8530
8531 /* Return the appropriate ARM instruction for the operation code.
8532 The returned result should not be overwritten. OP is the rtx of the
8533 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8534 was shifted. */
8535 const char *
8536 arithmetic_instr (rtx op, int shift_first_arg)
8537 {
8538 switch (GET_CODE (op))
8539 {
8540 case PLUS:
8541 return "add";
8542
8543 case MINUS:
8544 return shift_first_arg ? "rsb" : "sub";
8545
8546 case IOR:
8547 return "orr";
8548
8549 case XOR:
8550 return "eor";
8551
8552 case AND:
8553 return "and";
8554
8555 default:
8556 abort ();
8557 }
8558 }
8559
8560 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8561 for the operation code. The returned result should not be overwritten.
8562 OP is the rtx code of the shift.
8563 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8564 shift. */
8565 static const char *
8566 shift_op (rtx op, HOST_WIDE_INT *amountp)
8567 {
8568 const char * mnem;
8569 enum rtx_code code = GET_CODE (op);
8570
8571 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8572 *amountp = -1;
8573 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8574 *amountp = INTVAL (XEXP (op, 1));
8575 else
8576 abort ();
8577
8578 switch (code)
8579 {
8580 case ASHIFT:
8581 mnem = "asl";
8582 break;
8583
8584 case ASHIFTRT:
8585 mnem = "asr";
8586 break;
8587
8588 case LSHIFTRT:
8589 mnem = "lsr";
8590 break;
8591
8592 case ROTATE:
8593 if (*amountp == -1)
8594 abort ();
8595 *amountp = 32 - *amountp;
8596
8597 /* Fall through. */
8598
8599 case ROTATERT:
8600 mnem = "ror";
8601 break;
8602
8603 case MULT:
8604 /* We never have to worry about the amount being other than a
8605 power of 2, since this case can never be reloaded from a reg. */
8606 if (*amountp != -1)
8607 *amountp = int_log2 (*amountp);
8608 else
8609 abort ();
8610 return "asl";
8611
8612 default:
8613 abort ();
8614 }
8615
8616 if (*amountp != -1)
8617 {
8618 /* This is not 100% correct, but follows from the desire to merge
8619 multiplication by a power of 2 with the recognizer for a
8620 shift. >=32 is not a valid shift for "asl", so we must try and
8621 output a shift that produces the correct arithmetical result.
8622 Using lsr #32 is identical except for the fact that the carry bit
8623 is not set correctly if we set the flags; but we never use the
8624 carry bit from such an operation, so we can ignore that. */
8625 if (code == ROTATERT)
8626 /* Rotate is just modulo 32. */
8627 *amountp &= 31;
8628 else if (*amountp != (*amountp & 31))
8629 {
8630 if (code == ASHIFT)
8631 mnem = "lsr";
8632 *amountp = 32;
8633 }
8634
8635 /* Shifts of 0 are no-ops. */
8636 if (*amountp == 0)
8637 return NULL;
8638 }
8639
8640 return mnem;
8641 }
8642
8643 /* Obtain the shift from the POWER of two. */
8644
8645 static HOST_WIDE_INT
8646 int_log2 (HOST_WIDE_INT power)
8647 {
8648 HOST_WIDE_INT shift = 0;
8649
8650 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8651 {
8652 if (shift > 31)
8653 abort ();
8654 shift++;
8655 }
8656
8657 return shift;
8658 }
8659
8660 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8661 /bin/as is horribly restrictive. */
8662 #define MAX_ASCII_LEN 51
8663
8664 void
8665 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8666 {
8667 int i;
8668 int len_so_far = 0;
8669
8670 fputs ("\t.ascii\t\"", stream);
8671
8672 for (i = 0; i < len; i++)
8673 {
8674 int c = p[i];
8675
8676 if (len_so_far >= MAX_ASCII_LEN)
8677 {
8678 fputs ("\"\n\t.ascii\t\"", stream);
8679 len_so_far = 0;
8680 }
8681
8682 switch (c)
8683 {
8684 case TARGET_TAB:
8685 fputs ("\\t", stream);
8686 len_so_far += 2;
8687 break;
8688
8689 case TARGET_FF:
8690 fputs ("\\f", stream);
8691 len_so_far += 2;
8692 break;
8693
8694 case TARGET_BS:
8695 fputs ("\\b", stream);
8696 len_so_far += 2;
8697 break;
8698
8699 case TARGET_CR:
8700 fputs ("\\r", stream);
8701 len_so_far += 2;
8702 break;
8703
8704 case TARGET_NEWLINE:
8705 fputs ("\\n", stream);
8706 c = p [i + 1];
8707 if ((c >= ' ' && c <= '~')
8708 || c == TARGET_TAB)
8709 /* This is a good place for a line break. */
8710 len_so_far = MAX_ASCII_LEN;
8711 else
8712 len_so_far += 2;
8713 break;
8714
8715 case '\"':
8716 case '\\':
8717 putc ('\\', stream);
8718 len_so_far++;
8719 /* Drop through. */
8720
8721 default:
8722 if (c >= ' ' && c <= '~')
8723 {
8724 putc (c, stream);
8725 len_so_far++;
8726 }
8727 else
8728 {
8729 fprintf (stream, "\\%03o", c);
8730 len_so_far += 4;
8731 }
8732 break;
8733 }
8734 }
8735
8736 fputs ("\"\n", stream);
8737 }
8738 \f
8739 /* Compute the register save mask for registers 0 through 12
8740 inclusive. This code is used by arm_compute_save_reg_mask. */
8741 static unsigned long
8742 arm_compute_save_reg0_reg12_mask (void)
8743 {
8744 unsigned long func_type = arm_current_func_type ();
8745 unsigned int save_reg_mask = 0;
8746 unsigned int reg;
8747
8748 if (IS_INTERRUPT (func_type))
8749 {
8750 unsigned int max_reg;
8751 /* Interrupt functions must not corrupt any registers,
8752 even call clobbered ones. If this is a leaf function
8753 we can just examine the registers used by the RTL, but
8754 otherwise we have to assume that whatever function is
8755 called might clobber anything, and so we have to save
8756 all the call-clobbered registers as well. */
8757 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8758 /* FIQ handlers have registers r8 - r12 banked, so
8759 we only need to check r0 - r7, Normal ISRs only
8760 bank r14 and r15, so we must check up to r12.
8761 r13 is the stack pointer which is always preserved,
8762 so we do not need to consider it here. */
8763 max_reg = 7;
8764 else
8765 max_reg = 12;
8766
8767 for (reg = 0; reg <= max_reg; reg++)
8768 if (regs_ever_live[reg]
8769 || (! current_function_is_leaf && call_used_regs [reg]))
8770 save_reg_mask |= (1 << reg);
8771
8772 /* Also save the pic base register if necessary. */
8773 if (flag_pic
8774 && !TARGET_SINGLE_PIC_BASE
8775 && current_function_uses_pic_offset_table)
8776 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8777 }
8778 else
8779 {
8780 /* In the normal case we only need to save those registers
8781 which are call saved and which are used by this function. */
8782 for (reg = 0; reg <= 10; reg++)
8783 if (regs_ever_live[reg] && ! call_used_regs [reg])
8784 save_reg_mask |= (1 << reg);
8785
8786 /* Handle the frame pointer as a special case. */
8787 if (! TARGET_APCS_FRAME
8788 && ! frame_pointer_needed
8789 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8790 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8791 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8792
8793 /* If we aren't loading the PIC register,
8794 don't stack it even though it may be live. */
8795 if (flag_pic
8796 && !TARGET_SINGLE_PIC_BASE
8797 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8798 || current_function_uses_pic_offset_table))
8799 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8800 }
8801
8802 /* Save registers so the exception handler can modify them. */
8803 if (current_function_calls_eh_return)
8804 {
8805 unsigned int i;
8806
8807 for (i = 0; ; i++)
8808 {
8809 reg = EH_RETURN_DATA_REGNO (i);
8810 if (reg == INVALID_REGNUM)
8811 break;
8812 save_reg_mask |= 1 << reg;
8813 }
8814 }
8815
8816 return save_reg_mask;
8817 }
8818
8819 /* Compute a bit mask of which registers need to be
8820 saved on the stack for the current function. */
8821
8822 static unsigned long
8823 arm_compute_save_reg_mask (void)
8824 {
8825 unsigned int save_reg_mask = 0;
8826 unsigned long func_type = arm_current_func_type ();
8827
8828 if (IS_NAKED (func_type))
8829 /* This should never really happen. */
8830 return 0;
8831
8832 /* If we are creating a stack frame, then we must save the frame pointer,
8833 IP (which will hold the old stack pointer), LR and the PC. */
8834 if (frame_pointer_needed)
8835 save_reg_mask |=
8836 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8837 | (1 << IP_REGNUM)
8838 | (1 << LR_REGNUM)
8839 | (1 << PC_REGNUM);
8840
8841 /* Volatile functions do not return, so there
8842 is no need to save any other registers. */
8843 if (IS_VOLATILE (func_type))
8844 return save_reg_mask;
8845
8846 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8847
8848 /* Decide if we need to save the link register.
8849 Interrupt routines have their own banked link register,
8850 so they never need to save it.
8851 Otherwise if we do not use the link register we do not need to save
8852 it. If we are pushing other registers onto the stack however, we
8853 can save an instruction in the epilogue by pushing the link register
8854 now and then popping it back into the PC. This incurs extra memory
8855 accesses though, so we only do it when optimizing for size, and only
8856 if we know that we will not need a fancy return sequence. */
8857 if (regs_ever_live [LR_REGNUM]
8858 || (save_reg_mask
8859 && optimize_size
8860 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8861 && !current_function_calls_eh_return))
8862 save_reg_mask |= 1 << LR_REGNUM;
8863
8864 if (cfun->machine->lr_save_eliminated)
8865 save_reg_mask &= ~ (1 << LR_REGNUM);
8866
8867 if (TARGET_REALLY_IWMMXT
8868 && ((bit_count (save_reg_mask)
8869 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8870 {
8871 unsigned int reg;
8872
8873 /* The total number of registers that are going to be pushed
8874 onto the stack is odd. We need to ensure that the stack
8875 is 64-bit aligned before we start to save iWMMXt registers,
8876 and also before we start to create locals. (A local variable
8877 might be a double or long long which we will load/store using
8878 an iWMMXt instruction). Therefore we need to push another
8879 ARM register, so that the stack will be 64-bit aligned. We
8880 try to avoid using the arg registers (r0 -r3) as they might be
8881 used to pass values in a tail call. */
8882 for (reg = 4; reg <= 12; reg++)
8883 if ((save_reg_mask & (1 << reg)) == 0)
8884 break;
8885
8886 if (reg <= 12)
8887 save_reg_mask |= (1 << reg);
8888 else
8889 {
8890 cfun->machine->sibcall_blocked = 1;
8891 save_reg_mask |= (1 << 3);
8892 }
8893 }
8894
8895 return save_reg_mask;
8896 }
8897
8898
8899 /* Compute a bit mask of which registers need to be
8900 saved on the stack for the current function. */
8901 static unsigned long
8902 thumb_compute_save_reg_mask (void)
8903 {
8904 unsigned long mask;
8905 int reg;
8906
8907 mask = 0;
8908 for (reg = 0; reg < 12; reg ++)
8909 {
8910 if (regs_ever_live[reg] && !call_used_regs[reg])
8911 mask |= 1 << reg;
8912 }
8913
8914 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8915 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8916 if (TARGET_SINGLE_PIC_BASE)
8917 mask &= ~(1 << arm_pic_register);
8918 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8919 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8920 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8921
8922 /* lr will also be pushed if any lo regs are pushed. */
8923 if (mask & 0xff || thumb_force_lr_save ())
8924 mask |= (1 << LR_REGNUM);
8925
8926 /* Make sure we have a low work register if we need one. */
8927 if (((mask & 0xff) == 0 && regs_ever_live[LAST_ARG_REGNUM])
8928 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8929 mask |= 1 << LAST_LO_REGNUM;
8930
8931 return mask;
8932 }
8933
8934
8935 /* Return the number of bytes required to save VFP registers. */
8936 static int
8937 arm_get_vfp_saved_size (void)
8938 {
8939 unsigned int regno;
8940 int count;
8941 int saved;
8942
8943 saved = 0;
8944 /* Space for saved VFP registers. */
8945 if (TARGET_HARD_FLOAT && TARGET_VFP)
8946 {
8947 count = 0;
8948 for (regno = FIRST_VFP_REGNUM;
8949 regno < LAST_VFP_REGNUM;
8950 regno += 2)
8951 {
8952 if ((!regs_ever_live[regno] || call_used_regs[regno])
8953 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8954 {
8955 if (count > 0)
8956 {
8957 /* Workaround ARM10 VFPr1 bug. */
8958 if (count == 2 && !arm_arch6)
8959 count++;
8960 saved += count * 8 + 4;
8961 }
8962 count = 0;
8963 }
8964 else
8965 count++;
8966 }
8967 if (count > 0)
8968 {
8969 if (count == 2 && !arm_arch6)
8970 count++;
8971 saved += count * 8 + 4;
8972 }
8973 }
8974 return saved;
8975 }
8976
8977
8978 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8979 everything bar the final return instruction. */
8980 const char *
8981 output_return_instruction (rtx operand, int really_return, int reverse)
8982 {
8983 char conditional[10];
8984 char instr[100];
8985 int reg;
8986 unsigned long live_regs_mask;
8987 unsigned long func_type;
8988 arm_stack_offsets *offsets;
8989
8990 func_type = arm_current_func_type ();
8991
8992 if (IS_NAKED (func_type))
8993 return "";
8994
8995 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8996 {
8997 /* If this function was declared non-returning, and we have
8998 found a tail call, then we have to trust that the called
8999 function won't return. */
9000 if (really_return)
9001 {
9002 rtx ops[2];
9003
9004 /* Otherwise, trap an attempted return by aborting. */
9005 ops[0] = operand;
9006 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9007 : "abort");
9008 assemble_external_libcall (ops[1]);
9009 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9010 }
9011
9012 return "";
9013 }
9014
9015 if (current_function_calls_alloca && !really_return)
9016 abort ();
9017
9018 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9019
9020 return_used_this_function = 1;
9021
9022 live_regs_mask = arm_compute_save_reg_mask ();
9023
9024 if (live_regs_mask)
9025 {
9026 const char * return_reg;
9027
9028 /* If we do not have any special requirements for function exit
9029 (e.g. interworking, or ISR) then we can load the return address
9030 directly into the PC. Otherwise we must load it into LR. */
9031 if (really_return
9032 && ! TARGET_INTERWORK)
9033 return_reg = reg_names[PC_REGNUM];
9034 else
9035 return_reg = reg_names[LR_REGNUM];
9036
9037 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9038 {
9039 /* There are three possible reasons for the IP register
9040 being saved. 1) a stack frame was created, in which case
9041 IP contains the old stack pointer, or 2) an ISR routine
9042 corrupted it, or 3) it was saved to align the stack on
9043 iWMMXt. In case 1, restore IP into SP, otherwise just
9044 restore IP. */
9045 if (frame_pointer_needed)
9046 {
9047 live_regs_mask &= ~ (1 << IP_REGNUM);
9048 live_regs_mask |= (1 << SP_REGNUM);
9049 }
9050 else
9051 {
9052 if (! IS_INTERRUPT (func_type)
9053 && ! TARGET_REALLY_IWMMXT)
9054 abort ();
9055 }
9056 }
9057
9058 /* On some ARM architectures it is faster to use LDR rather than
9059 LDM to load a single register. On other architectures, the
9060 cost is the same. In 26 bit mode, or for exception handlers,
9061 we have to use LDM to load the PC so that the CPSR is also
9062 restored. */
9063 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9064 {
9065 if (live_regs_mask == (unsigned int)(1 << reg))
9066 break;
9067 }
9068 if (reg <= LAST_ARM_REGNUM
9069 && (reg != LR_REGNUM
9070 || ! really_return
9071 || ! IS_INTERRUPT (func_type)))
9072 {
9073 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9074 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9075 }
9076 else
9077 {
9078 char *p;
9079 int first = 1;
9080
9081 /* Generate the load multiple instruction to restore the
9082 registers. Note we can get here, even if
9083 frame_pointer_needed is true, but only if sp already
9084 points to the base of the saved core registers. */
9085 if (live_regs_mask & (1 << SP_REGNUM))
9086 {
9087 unsigned HOST_WIDE_INT stack_adjust;
9088
9089 offsets = arm_get_frame_offsets ();
9090 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9091 if (stack_adjust != 0 && stack_adjust != 4)
9092 abort ();
9093
9094 if (stack_adjust && arm_arch5)
9095 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9096 else
9097 {
9098 /* If we can't use ldmib (SA110 bug), then try to pop r3
9099 instead. */
9100 if (stack_adjust)
9101 live_regs_mask |= 1 << 3;
9102 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9103 }
9104 }
9105 else
9106 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9107
9108 p = instr + strlen (instr);
9109
9110 for (reg = 0; reg <= SP_REGNUM; reg++)
9111 if (live_regs_mask & (1 << reg))
9112 {
9113 int l = strlen (reg_names[reg]);
9114
9115 if (first)
9116 first = 0;
9117 else
9118 {
9119 memcpy (p, ", ", 2);
9120 p += 2;
9121 }
9122
9123 memcpy (p, "%|", 2);
9124 memcpy (p + 2, reg_names[reg], l);
9125 p += l + 2;
9126 }
9127
9128 if (live_regs_mask & (1 << LR_REGNUM))
9129 {
9130 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9131 /* If returning from an interrupt, restore the CPSR. */
9132 if (IS_INTERRUPT (func_type))
9133 strcat (p, "^");
9134 }
9135 else
9136 strcpy (p, "}");
9137 }
9138
9139 output_asm_insn (instr, & operand);
9140
9141 /* See if we need to generate an extra instruction to
9142 perform the actual function return. */
9143 if (really_return
9144 && func_type != ARM_FT_INTERWORKED
9145 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9146 {
9147 /* The return has already been handled
9148 by loading the LR into the PC. */
9149 really_return = 0;
9150 }
9151 }
9152
9153 if (really_return)
9154 {
9155 switch ((int) ARM_FUNC_TYPE (func_type))
9156 {
9157 case ARM_FT_ISR:
9158 case ARM_FT_FIQ:
9159 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9160 break;
9161
9162 case ARM_FT_INTERWORKED:
9163 sprintf (instr, "bx%s\t%%|lr", conditional);
9164 break;
9165
9166 case ARM_FT_EXCEPTION:
9167 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9168 break;
9169
9170 default:
9171 /* Use bx if it's available. */
9172 if (arm_arch5 || arm_arch4t)
9173 sprintf (instr, "bx%s\t%%|lr", conditional);
9174 else
9175 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9176 break;
9177 }
9178
9179 output_asm_insn (instr, & operand);
9180 }
9181
9182 return "";
9183 }
9184
9185 /* Write the function name into the code section, directly preceding
9186 the function prologue.
9187
9188 Code will be output similar to this:
9189 t0
9190 .ascii "arm_poke_function_name", 0
9191 .align
9192 t1
9193 .word 0xff000000 + (t1 - t0)
9194 arm_poke_function_name
9195 mov ip, sp
9196 stmfd sp!, {fp, ip, lr, pc}
9197 sub fp, ip, #4
9198
9199 When performing a stack backtrace, code can inspect the value
9200 of 'pc' stored at 'fp' + 0. If the trace function then looks
9201 at location pc - 12 and the top 8 bits are set, then we know
9202 that there is a function name embedded immediately preceding this
9203 location and has length ((pc[-3]) & 0xff000000).
9204
9205 We assume that pc is declared as a pointer to an unsigned long.
9206
9207 It is of no benefit to output the function name if we are assembling
9208 a leaf function. These function types will not contain a stack
9209 backtrace structure, therefore it is not possible to determine the
9210 function name. */
9211 void
9212 arm_poke_function_name (FILE *stream, const char *name)
9213 {
9214 unsigned long alignlength;
9215 unsigned long length;
9216 rtx x;
9217
9218 length = strlen (name) + 1;
9219 alignlength = ROUND_UP_WORD (length);
9220
9221 ASM_OUTPUT_ASCII (stream, name, length);
9222 ASM_OUTPUT_ALIGN (stream, 2);
9223 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9224 assemble_aligned_integer (UNITS_PER_WORD, x);
9225 }
9226
9227 /* Place some comments into the assembler stream
9228 describing the current function. */
9229 static void
9230 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9231 {
9232 unsigned long func_type;
9233
9234 if (!TARGET_ARM)
9235 {
9236 thumb_output_function_prologue (f, frame_size);
9237 return;
9238 }
9239
9240 /* Sanity check. */
9241 if (arm_ccfsm_state || arm_target_insn)
9242 abort ();
9243
9244 func_type = arm_current_func_type ();
9245
9246 switch ((int) ARM_FUNC_TYPE (func_type))
9247 {
9248 default:
9249 case ARM_FT_NORMAL:
9250 break;
9251 case ARM_FT_INTERWORKED:
9252 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9253 break;
9254 case ARM_FT_ISR:
9255 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9256 break;
9257 case ARM_FT_FIQ:
9258 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9259 break;
9260 case ARM_FT_EXCEPTION:
9261 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9262 break;
9263 }
9264
9265 if (IS_NAKED (func_type))
9266 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9267
9268 if (IS_VOLATILE (func_type))
9269 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9270
9271 if (IS_NESTED (func_type))
9272 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9273
9274 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9275 current_function_args_size,
9276 current_function_pretend_args_size, frame_size);
9277
9278 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9279 frame_pointer_needed,
9280 cfun->machine->uses_anonymous_args);
9281
9282 if (cfun->machine->lr_save_eliminated)
9283 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9284
9285 if (current_function_calls_eh_return)
9286 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9287
9288 #ifdef AOF_ASSEMBLER
9289 if (flag_pic)
9290 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9291 #endif
9292
9293 return_used_this_function = 0;
9294 }
9295
9296 const char *
9297 arm_output_epilogue (rtx sibling)
9298 {
9299 int reg;
9300 unsigned long saved_regs_mask;
9301 unsigned long func_type;
9302 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9303 frame that is $fp + 4 for a non-variadic function. */
9304 int floats_offset = 0;
9305 rtx operands[3];
9306 FILE * f = asm_out_file;
9307 unsigned int lrm_count = 0;
9308 int really_return = (sibling == NULL);
9309 int start_reg;
9310 arm_stack_offsets *offsets;
9311
9312 /* If we have already generated the return instruction
9313 then it is futile to generate anything else. */
9314 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9315 return "";
9316
9317 func_type = arm_current_func_type ();
9318
9319 if (IS_NAKED (func_type))
9320 /* Naked functions don't have epilogues. */
9321 return "";
9322
9323 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9324 {
9325 rtx op;
9326
9327 /* A volatile function should never return. Call abort. */
9328 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9329 assemble_external_libcall (op);
9330 output_asm_insn ("bl\t%a0", &op);
9331
9332 return "";
9333 }
9334
9335 if (current_function_calls_eh_return
9336 && ! really_return)
9337 /* If we are throwing an exception, then we really must
9338 be doing a return, so we can't tail-call. */
9339 abort ();
9340
9341 offsets = arm_get_frame_offsets ();
9342 saved_regs_mask = arm_compute_save_reg_mask ();
9343
9344 if (TARGET_IWMMXT)
9345 lrm_count = bit_count (saved_regs_mask);
9346
9347 floats_offset = offsets->saved_args;
9348 /* Compute how far away the floats will be. */
9349 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9350 if (saved_regs_mask & (1 << reg))
9351 floats_offset += 4;
9352
9353 if (frame_pointer_needed)
9354 {
9355 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9356 int vfp_offset = offsets->frame;
9357
9358 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9359 {
9360 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9361 if (regs_ever_live[reg] && !call_used_regs[reg])
9362 {
9363 floats_offset += 12;
9364 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9365 reg, FP_REGNUM, floats_offset - vfp_offset);
9366 }
9367 }
9368 else
9369 {
9370 start_reg = LAST_FPA_REGNUM;
9371
9372 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9373 {
9374 if (regs_ever_live[reg] && !call_used_regs[reg])
9375 {
9376 floats_offset += 12;
9377
9378 /* We can't unstack more than four registers at once. */
9379 if (start_reg - reg == 3)
9380 {
9381 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9382 reg, FP_REGNUM, floats_offset - vfp_offset);
9383 start_reg = reg - 1;
9384 }
9385 }
9386 else
9387 {
9388 if (reg != start_reg)
9389 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9390 reg + 1, start_reg - reg,
9391 FP_REGNUM, floats_offset - vfp_offset);
9392 start_reg = reg - 1;
9393 }
9394 }
9395
9396 /* Just in case the last register checked also needs unstacking. */
9397 if (reg != start_reg)
9398 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9399 reg + 1, start_reg - reg,
9400 FP_REGNUM, floats_offset - vfp_offset);
9401 }
9402
9403 if (TARGET_HARD_FLOAT && TARGET_VFP)
9404 {
9405 int saved_size;
9406
9407 /* The fldmx insn does not have base+offset addressing modes,
9408 so we use IP to hold the address. */
9409 saved_size = arm_get_vfp_saved_size ();
9410
9411 if (saved_size > 0)
9412 {
9413 floats_offset += saved_size;
9414 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9415 FP_REGNUM, floats_offset - vfp_offset);
9416 }
9417 start_reg = FIRST_VFP_REGNUM;
9418 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9419 {
9420 if ((!regs_ever_live[reg] || call_used_regs[reg])
9421 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9422 {
9423 if (start_reg != reg)
9424 arm_output_fldmx (f, IP_REGNUM,
9425 (start_reg - FIRST_VFP_REGNUM) / 2,
9426 (reg - start_reg) / 2);
9427 start_reg = reg + 2;
9428 }
9429 }
9430 if (start_reg != reg)
9431 arm_output_fldmx (f, IP_REGNUM,
9432 (start_reg - FIRST_VFP_REGNUM) / 2,
9433 (reg - start_reg) / 2);
9434 }
9435
9436 if (TARGET_IWMMXT)
9437 {
9438 /* The frame pointer is guaranteed to be non-double-word aligned.
9439 This is because it is set to (old_stack_pointer - 4) and the
9440 old_stack_pointer was double word aligned. Thus the offset to
9441 the iWMMXt registers to be loaded must also be non-double-word
9442 sized, so that the resultant address *is* double-word aligned.
9443 We can ignore floats_offset since that was already included in
9444 the live_regs_mask. */
9445 lrm_count += (lrm_count % 2 ? 2 : 1);
9446
9447 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9448 if (regs_ever_live[reg] && !call_used_regs[reg])
9449 {
9450 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9451 reg, FP_REGNUM, lrm_count * 4);
9452 lrm_count += 2;
9453 }
9454 }
9455
9456 /* saved_regs_mask should contain the IP, which at the time of stack
9457 frame generation actually contains the old stack pointer. So a
9458 quick way to unwind the stack is just pop the IP register directly
9459 into the stack pointer. */
9460 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9461 abort ();
9462 saved_regs_mask &= ~ (1 << IP_REGNUM);
9463 saved_regs_mask |= (1 << SP_REGNUM);
9464
9465 /* There are two registers left in saved_regs_mask - LR and PC. We
9466 only need to restore the LR register (the return address), but to
9467 save time we can load it directly into the PC, unless we need a
9468 special function exit sequence, or we are not really returning. */
9469 if (really_return
9470 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9471 && !current_function_calls_eh_return)
9472 /* Delete the LR from the register mask, so that the LR on
9473 the stack is loaded into the PC in the register mask. */
9474 saved_regs_mask &= ~ (1 << LR_REGNUM);
9475 else
9476 saved_regs_mask &= ~ (1 << PC_REGNUM);
9477
9478 /* We must use SP as the base register, because SP is one of the
9479 registers being restored. If an interrupt or page fault
9480 happens in the ldm instruction, the SP might or might not
9481 have been restored. That would be bad, as then SP will no
9482 longer indicate the safe area of stack, and we can get stack
9483 corruption. Using SP as the base register means that it will
9484 be reset correctly to the original value, should an interrupt
9485 occur. If the stack pointer already points at the right
9486 place, then omit the subtraction. */
9487 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9488 || current_function_calls_alloca)
9489 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9490 4 * bit_count (saved_regs_mask));
9491 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9492
9493 if (IS_INTERRUPT (func_type))
9494 /* Interrupt handlers will have pushed the
9495 IP onto the stack, so restore it now. */
9496 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9497 }
9498 else
9499 {
9500 /* Restore stack pointer if necessary. */
9501 if (offsets->outgoing_args != offsets->saved_regs)
9502 {
9503 operands[0] = operands[1] = stack_pointer_rtx;
9504 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9505 output_add_immediate (operands);
9506 }
9507
9508 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9509 {
9510 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9511 if (regs_ever_live[reg] && !call_used_regs[reg])
9512 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9513 reg, SP_REGNUM);
9514 }
9515 else
9516 {
9517 start_reg = FIRST_FPA_REGNUM;
9518
9519 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9520 {
9521 if (regs_ever_live[reg] && !call_used_regs[reg])
9522 {
9523 if (reg - start_reg == 3)
9524 {
9525 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9526 start_reg, SP_REGNUM);
9527 start_reg = reg + 1;
9528 }
9529 }
9530 else
9531 {
9532 if (reg != start_reg)
9533 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9534 start_reg, reg - start_reg,
9535 SP_REGNUM);
9536
9537 start_reg = reg + 1;
9538 }
9539 }
9540
9541 /* Just in case the last register checked also needs unstacking. */
9542 if (reg != start_reg)
9543 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9544 start_reg, reg - start_reg, SP_REGNUM);
9545 }
9546
9547 if (TARGET_HARD_FLOAT && TARGET_VFP)
9548 {
9549 start_reg = FIRST_VFP_REGNUM;
9550 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9551 {
9552 if ((!regs_ever_live[reg] || call_used_regs[reg])
9553 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9554 {
9555 if (start_reg != reg)
9556 arm_output_fldmx (f, SP_REGNUM,
9557 (start_reg - FIRST_VFP_REGNUM) / 2,
9558 (reg - start_reg) / 2);
9559 start_reg = reg + 2;
9560 }
9561 }
9562 if (start_reg != reg)
9563 arm_output_fldmx (f, SP_REGNUM,
9564 (start_reg - FIRST_VFP_REGNUM) / 2,
9565 (reg - start_reg) / 2);
9566 }
9567 if (TARGET_IWMMXT)
9568 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9569 if (regs_ever_live[reg] && !call_used_regs[reg])
9570 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9571
9572 /* If we can, restore the LR into the PC. */
9573 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9574 && really_return
9575 && current_function_pretend_args_size == 0
9576 && saved_regs_mask & (1 << LR_REGNUM)
9577 && !current_function_calls_eh_return)
9578 {
9579 saved_regs_mask &= ~ (1 << LR_REGNUM);
9580 saved_regs_mask |= (1 << PC_REGNUM);
9581 }
9582
9583 /* Load the registers off the stack. If we only have one register
9584 to load use the LDR instruction - it is faster. */
9585 if (saved_regs_mask == (1 << LR_REGNUM))
9586 {
9587 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9588 }
9589 else if (saved_regs_mask)
9590 {
9591 if (saved_regs_mask & (1 << SP_REGNUM))
9592 /* Note - write back to the stack register is not enabled
9593 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9594 in the list of registers and if we add writeback the
9595 instruction becomes UNPREDICTABLE. */
9596 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9597 else
9598 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9599 }
9600
9601 if (current_function_pretend_args_size)
9602 {
9603 /* Unwind the pre-pushed regs. */
9604 operands[0] = operands[1] = stack_pointer_rtx;
9605 operands[2] = GEN_INT (current_function_pretend_args_size);
9606 output_add_immediate (operands);
9607 }
9608 }
9609
9610 /* We may have already restored PC directly from the stack. */
9611 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9612 return "";
9613
9614 /* Stack adjustment for exception handler. */
9615 if (current_function_calls_eh_return)
9616 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9617 ARM_EH_STACKADJ_REGNUM);
9618
9619 /* Generate the return instruction. */
9620 switch ((int) ARM_FUNC_TYPE (func_type))
9621 {
9622 case ARM_FT_ISR:
9623 case ARM_FT_FIQ:
9624 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9625 break;
9626
9627 case ARM_FT_EXCEPTION:
9628 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9629 break;
9630
9631 case ARM_FT_INTERWORKED:
9632 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9633 break;
9634
9635 default:
9636 if (arm_arch5 || arm_arch4t)
9637 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9638 else
9639 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9640 break;
9641 }
9642
9643 return "";
9644 }
9645
9646 static void
9647 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9648 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9649 {
9650 arm_stack_offsets *offsets;
9651
9652 if (TARGET_THUMB)
9653 {
9654 int regno;
9655
9656 /* Emit any call-via-reg trampolines that are needed for v4t support
9657 of call_reg and call_value_reg type insns. */
9658 for (regno = 0; regno < SP_REGNUM; regno++)
9659 {
9660 rtx label = cfun->machine->call_via[regno];
9661
9662 if (label != NULL)
9663 {
9664 function_section (current_function_decl);
9665 targetm.asm_out.internal_label (asm_out_file, "L",
9666 CODE_LABEL_NUMBER (label));
9667 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9668 }
9669 }
9670
9671 /* ??? Probably not safe to set this here, since it assumes that a
9672 function will be emitted as assembly immediately after we generate
9673 RTL for it. This does not happen for inline functions. */
9674 return_used_this_function = 0;
9675 }
9676 else
9677 {
9678 /* We need to take into account any stack-frame rounding. */
9679 offsets = arm_get_frame_offsets ();
9680
9681 if (use_return_insn (FALSE, NULL)
9682 && return_used_this_function
9683 && offsets->saved_regs != offsets->outgoing_args
9684 && !frame_pointer_needed)
9685 abort ();
9686
9687 /* Reset the ARM-specific per-function variables. */
9688 after_arm_reorg = 0;
9689 }
9690 }
9691
9692 /* Generate and emit an insn that we will recognize as a push_multi.
9693 Unfortunately, since this insn does not reflect very well the actual
9694 semantics of the operation, we need to annotate the insn for the benefit
9695 of DWARF2 frame unwind information. */
9696 static rtx
9697 emit_multi_reg_push (int mask)
9698 {
9699 int num_regs = 0;
9700 int num_dwarf_regs;
9701 int i, j;
9702 rtx par;
9703 rtx dwarf;
9704 int dwarf_par_index;
9705 rtx tmp, reg;
9706
9707 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9708 if (mask & (1 << i))
9709 num_regs++;
9710
9711 if (num_regs == 0 || num_regs > 16)
9712 abort ();
9713
9714 /* We don't record the PC in the dwarf frame information. */
9715 num_dwarf_regs = num_regs;
9716 if (mask & (1 << PC_REGNUM))
9717 num_dwarf_regs--;
9718
9719 /* For the body of the insn we are going to generate an UNSPEC in
9720 parallel with several USEs. This allows the insn to be recognized
9721 by the push_multi pattern in the arm.md file. The insn looks
9722 something like this:
9723
9724 (parallel [
9725 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9726 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9727 (use (reg:SI 11 fp))
9728 (use (reg:SI 12 ip))
9729 (use (reg:SI 14 lr))
9730 (use (reg:SI 15 pc))
9731 ])
9732
9733 For the frame note however, we try to be more explicit and actually
9734 show each register being stored into the stack frame, plus a (single)
9735 decrement of the stack pointer. We do it this way in order to be
9736 friendly to the stack unwinding code, which only wants to see a single
9737 stack decrement per instruction. The RTL we generate for the note looks
9738 something like this:
9739
9740 (sequence [
9741 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9742 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9743 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9744 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9745 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9746 ])
9747
9748 This sequence is used both by the code to support stack unwinding for
9749 exceptions handlers and the code to generate dwarf2 frame debugging. */
9750
9751 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9752 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9753 dwarf_par_index = 1;
9754
9755 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9756 {
9757 if (mask & (1 << i))
9758 {
9759 reg = gen_rtx_REG (SImode, i);
9760
9761 XVECEXP (par, 0, 0)
9762 = gen_rtx_SET (VOIDmode,
9763 gen_rtx_MEM (BLKmode,
9764 gen_rtx_PRE_DEC (BLKmode,
9765 stack_pointer_rtx)),
9766 gen_rtx_UNSPEC (BLKmode,
9767 gen_rtvec (1, reg),
9768 UNSPEC_PUSH_MULT));
9769
9770 if (i != PC_REGNUM)
9771 {
9772 tmp = gen_rtx_SET (VOIDmode,
9773 gen_rtx_MEM (SImode, stack_pointer_rtx),
9774 reg);
9775 RTX_FRAME_RELATED_P (tmp) = 1;
9776 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9777 dwarf_par_index++;
9778 }
9779
9780 break;
9781 }
9782 }
9783
9784 for (j = 1, i++; j < num_regs; i++)
9785 {
9786 if (mask & (1 << i))
9787 {
9788 reg = gen_rtx_REG (SImode, i);
9789
9790 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9791
9792 if (i != PC_REGNUM)
9793 {
9794 tmp = gen_rtx_SET (VOIDmode,
9795 gen_rtx_MEM (SImode,
9796 plus_constant (stack_pointer_rtx,
9797 4 * j)),
9798 reg);
9799 RTX_FRAME_RELATED_P (tmp) = 1;
9800 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9801 }
9802
9803 j++;
9804 }
9805 }
9806
9807 par = emit_insn (par);
9808
9809 tmp = gen_rtx_SET (SImode,
9810 stack_pointer_rtx,
9811 gen_rtx_PLUS (SImode,
9812 stack_pointer_rtx,
9813 GEN_INT (-4 * num_regs)));
9814 RTX_FRAME_RELATED_P (tmp) = 1;
9815 XVECEXP (dwarf, 0, 0) = tmp;
9816
9817 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9818 REG_NOTES (par));
9819 return par;
9820 }
9821
9822 static rtx
9823 emit_sfm (int base_reg, int count)
9824 {
9825 rtx par;
9826 rtx dwarf;
9827 rtx tmp, reg;
9828 int i;
9829
9830 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9831 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9832
9833 reg = gen_rtx_REG (XFmode, base_reg++);
9834
9835 XVECEXP (par, 0, 0)
9836 = gen_rtx_SET (VOIDmode,
9837 gen_rtx_MEM (BLKmode,
9838 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9839 gen_rtx_UNSPEC (BLKmode,
9840 gen_rtvec (1, reg),
9841 UNSPEC_PUSH_MULT));
9842 tmp = gen_rtx_SET (VOIDmode,
9843 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9844 RTX_FRAME_RELATED_P (tmp) = 1;
9845 XVECEXP (dwarf, 0, 1) = tmp;
9846
9847 for (i = 1; i < count; i++)
9848 {
9849 reg = gen_rtx_REG (XFmode, base_reg++);
9850 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9851
9852 tmp = gen_rtx_SET (VOIDmode,
9853 gen_rtx_MEM (XFmode,
9854 plus_constant (stack_pointer_rtx,
9855 i * 12)),
9856 reg);
9857 RTX_FRAME_RELATED_P (tmp) = 1;
9858 XVECEXP (dwarf, 0, i + 1) = tmp;
9859 }
9860
9861 tmp = gen_rtx_SET (VOIDmode,
9862 stack_pointer_rtx,
9863 gen_rtx_PLUS (SImode,
9864 stack_pointer_rtx,
9865 GEN_INT (-12 * count)));
9866 RTX_FRAME_RELATED_P (tmp) = 1;
9867 XVECEXP (dwarf, 0, 0) = tmp;
9868
9869 par = emit_insn (par);
9870 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9871 REG_NOTES (par));
9872 return par;
9873 }
9874
9875
9876 /* Return true if the current function needs to save/restore LR. */
9877
9878 static bool
9879 thumb_force_lr_save (void)
9880 {
9881 return !cfun->machine->lr_save_eliminated
9882 && (!leaf_function_p ()
9883 || thumb_far_jump_used_p ()
9884 || regs_ever_live [LR_REGNUM]);
9885 }
9886
9887
9888 /* Compute the distance from register FROM to register TO.
9889 These can be the arg pointer (26), the soft frame pointer (25),
9890 the stack pointer (13) or the hard frame pointer (11).
9891 In thumb mode r7 is used as the soft frame pointer, if needed.
9892 Typical stack layout looks like this:
9893
9894 old stack pointer -> | |
9895 ----
9896 | | \
9897 | | saved arguments for
9898 | | vararg functions
9899 | | /
9900 --
9901 hard FP & arg pointer -> | | \
9902 | | stack
9903 | | frame
9904 | | /
9905 --
9906 | | \
9907 | | call saved
9908 | | registers
9909 soft frame pointer -> | | /
9910 --
9911 | | \
9912 | | local
9913 | | variables
9914 | | /
9915 --
9916 | | \
9917 | | outgoing
9918 | | arguments
9919 current stack pointer -> | | /
9920 --
9921
9922 For a given function some or all of these stack components
9923 may not be needed, giving rise to the possibility of
9924 eliminating some of the registers.
9925
9926 The values returned by this function must reflect the behavior
9927 of arm_expand_prologue() and arm_compute_save_reg_mask().
9928
9929 The sign of the number returned reflects the direction of stack
9930 growth, so the values are positive for all eliminations except
9931 from the soft frame pointer to the hard frame pointer.
9932
9933 SFP may point just inside the local variables block to ensure correct
9934 alignment. */
9935
9936
9937 /* Calculate stack offsets. These are used to calculate register elimination
9938 offsets and in prologue/epilogue code. */
9939
9940 static arm_stack_offsets *
9941 arm_get_frame_offsets (void)
9942 {
9943 struct arm_stack_offsets *offsets;
9944 unsigned long func_type;
9945 int leaf;
9946 int saved;
9947 HOST_WIDE_INT frame_size;
9948
9949 offsets = &cfun->machine->stack_offsets;
9950
9951 /* We need to know if we are a leaf function. Unfortunately, it
9952 is possible to be called after start_sequence has been called,
9953 which causes get_insns to return the insns for the sequence,
9954 not the function, which will cause leaf_function_p to return
9955 the incorrect result.
9956
9957 to know about leaf functions once reload has completed, and the
9958 frame size cannot be changed after that time, so we can safely
9959 use the cached value. */
9960
9961 if (reload_completed)
9962 return offsets;
9963
9964 /* Initially this is the size of the local variables. It will translated
9965 into an offset once we have determined the size of preceding data. */
9966 frame_size = ROUND_UP_WORD (get_frame_size ());
9967
9968 leaf = leaf_function_p ();
9969
9970 /* Space for variadic functions. */
9971 offsets->saved_args = current_function_pretend_args_size;
9972
9973 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9974
9975 if (TARGET_ARM)
9976 {
9977 unsigned int regno;
9978
9979 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9980
9981 /* We know that SP will be doubleword aligned on entry, and we must
9982 preserve that condition at any subroutine call. We also require the
9983 soft frame pointer to be doubleword aligned. */
9984
9985 if (TARGET_REALLY_IWMMXT)
9986 {
9987 /* Check for the call-saved iWMMXt registers. */
9988 for (regno = FIRST_IWMMXT_REGNUM;
9989 regno <= LAST_IWMMXT_REGNUM;
9990 regno++)
9991 if (regs_ever_live [regno] && ! call_used_regs [regno])
9992 saved += 8;
9993 }
9994
9995 func_type = arm_current_func_type ();
9996 if (! IS_VOLATILE (func_type))
9997 {
9998 /* Space for saved FPA registers. */
9999 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10000 if (regs_ever_live[regno] && ! call_used_regs[regno])
10001 saved += 12;
10002
10003 /* Space for saved VFP registers. */
10004 if (TARGET_HARD_FLOAT && TARGET_VFP)
10005 saved += arm_get_vfp_saved_size ();
10006 }
10007 }
10008 else /* TARGET_THUMB */
10009 {
10010 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10011 if (TARGET_BACKTRACE)
10012 saved += 16;
10013 }
10014
10015 /* Saved registers include the stack frame. */
10016 offsets->saved_regs = offsets->saved_args + saved;
10017 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10018 /* A leaf function does not need any stack alignment if it has nothing
10019 on the stack. */
10020 if (leaf && frame_size == 0)
10021 {
10022 offsets->outgoing_args = offsets->soft_frame;
10023 return offsets;
10024 }
10025
10026 /* Ensure SFP has the correct alignment. */
10027 if (ARM_DOUBLEWORD_ALIGN
10028 && (offsets->soft_frame & 7))
10029 offsets->soft_frame += 4;
10030
10031 offsets->outgoing_args = offsets->soft_frame + frame_size
10032 + current_function_outgoing_args_size;
10033
10034 if (ARM_DOUBLEWORD_ALIGN)
10035 {
10036 /* Ensure SP remains doubleword aligned. */
10037 if (offsets->outgoing_args & 7)
10038 offsets->outgoing_args += 4;
10039 if (offsets->outgoing_args & 7)
10040 abort ();
10041 }
10042
10043 return offsets;
10044 }
10045
10046
10047 /* Calculate the relative offsets for the different stack pointers. Positive
10048 offsets are in the direction of stack growth. */
10049
10050 HOST_WIDE_INT
10051 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10052 {
10053 arm_stack_offsets *offsets;
10054
10055 offsets = arm_get_frame_offsets ();
10056
10057 /* OK, now we have enough information to compute the distances.
10058 There must be an entry in these switch tables for each pair
10059 of registers in ELIMINABLE_REGS, even if some of the entries
10060 seem to be redundant or useless. */
10061 switch (from)
10062 {
10063 case ARG_POINTER_REGNUM:
10064 switch (to)
10065 {
10066 case THUMB_HARD_FRAME_POINTER_REGNUM:
10067 return 0;
10068
10069 case FRAME_POINTER_REGNUM:
10070 /* This is the reverse of the soft frame pointer
10071 to hard frame pointer elimination below. */
10072 return offsets->soft_frame - offsets->saved_args;
10073
10074 case ARM_HARD_FRAME_POINTER_REGNUM:
10075 /* If there is no stack frame then the hard
10076 frame pointer and the arg pointer coincide. */
10077 if (offsets->frame == offsets->saved_regs)
10078 return 0;
10079 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10080 return (frame_pointer_needed
10081 && cfun->static_chain_decl != NULL
10082 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10083
10084 case STACK_POINTER_REGNUM:
10085 /* If nothing has been pushed on the stack at all
10086 then this will return -4. This *is* correct! */
10087 return offsets->outgoing_args - (offsets->saved_args + 4);
10088
10089 default:
10090 abort ();
10091 }
10092 break;
10093
10094 case FRAME_POINTER_REGNUM:
10095 switch (to)
10096 {
10097 case THUMB_HARD_FRAME_POINTER_REGNUM:
10098 return 0;
10099
10100 case ARM_HARD_FRAME_POINTER_REGNUM:
10101 /* The hard frame pointer points to the top entry in the
10102 stack frame. The soft frame pointer to the bottom entry
10103 in the stack frame. If there is no stack frame at all,
10104 then they are identical. */
10105
10106 return offsets->frame - offsets->soft_frame;
10107
10108 case STACK_POINTER_REGNUM:
10109 return offsets->outgoing_args - offsets->soft_frame;
10110
10111 default:
10112 abort ();
10113 }
10114 break;
10115
10116 default:
10117 /* You cannot eliminate from the stack pointer.
10118 In theory you could eliminate from the hard frame
10119 pointer to the stack pointer, but this will never
10120 happen, since if a stack frame is not needed the
10121 hard frame pointer will never be used. */
10122 abort ();
10123 }
10124 }
10125
10126
10127 /* Generate the prologue instructions for entry into an ARM function. */
10128 void
10129 arm_expand_prologue (void)
10130 {
10131 int reg;
10132 rtx amount;
10133 rtx insn;
10134 rtx ip_rtx;
10135 unsigned long live_regs_mask;
10136 unsigned long func_type;
10137 int fp_offset = 0;
10138 int saved_pretend_args = 0;
10139 int saved_regs = 0;
10140 unsigned HOST_WIDE_INT args_to_push;
10141 arm_stack_offsets *offsets;
10142
10143 func_type = arm_current_func_type ();
10144
10145 /* Naked functions don't have prologues. */
10146 if (IS_NAKED (func_type))
10147 return;
10148
10149 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10150 args_to_push = current_function_pretend_args_size;
10151
10152 /* Compute which register we will have to save onto the stack. */
10153 live_regs_mask = arm_compute_save_reg_mask ();
10154
10155 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10156
10157 if (frame_pointer_needed)
10158 {
10159 if (IS_INTERRUPT (func_type))
10160 {
10161 /* Interrupt functions must not corrupt any registers.
10162 Creating a frame pointer however, corrupts the IP
10163 register, so we must push it first. */
10164 insn = emit_multi_reg_push (1 << IP_REGNUM);
10165
10166 /* Do not set RTX_FRAME_RELATED_P on this insn.
10167 The dwarf stack unwinding code only wants to see one
10168 stack decrement per function, and this is not it. If
10169 this instruction is labeled as being part of the frame
10170 creation sequence then dwarf2out_frame_debug_expr will
10171 abort when it encounters the assignment of IP to FP
10172 later on, since the use of SP here establishes SP as
10173 the CFA register and not IP.
10174
10175 Anyway this instruction is not really part of the stack
10176 frame creation although it is part of the prologue. */
10177 }
10178 else if (IS_NESTED (func_type))
10179 {
10180 /* The Static chain register is the same as the IP register
10181 used as a scratch register during stack frame creation.
10182 To get around this need to find somewhere to store IP
10183 whilst the frame is being created. We try the following
10184 places in order:
10185
10186 1. The last argument register.
10187 2. A slot on the stack above the frame. (This only
10188 works if the function is not a varargs function).
10189 3. Register r3, after pushing the argument registers
10190 onto the stack.
10191
10192 Note - we only need to tell the dwarf2 backend about the SP
10193 adjustment in the second variant; the static chain register
10194 doesn't need to be unwound, as it doesn't contain a value
10195 inherited from the caller. */
10196
10197 if (regs_ever_live[3] == 0)
10198 {
10199 insn = gen_rtx_REG (SImode, 3);
10200 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10201 insn = emit_insn (insn);
10202 }
10203 else if (args_to_push == 0)
10204 {
10205 rtx dwarf;
10206 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10207 insn = gen_rtx_MEM (SImode, insn);
10208 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10209 insn = emit_insn (insn);
10210
10211 fp_offset = 4;
10212
10213 /* Just tell the dwarf backend that we adjusted SP. */
10214 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10215 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10216 GEN_INT (-fp_offset)));
10217 RTX_FRAME_RELATED_P (insn) = 1;
10218 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10219 dwarf, REG_NOTES (insn));
10220 }
10221 else
10222 {
10223 /* Store the args on the stack. */
10224 if (cfun->machine->uses_anonymous_args)
10225 insn = emit_multi_reg_push
10226 ((0xf0 >> (args_to_push / 4)) & 0xf);
10227 else
10228 insn = emit_insn
10229 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10230 GEN_INT (- args_to_push)));
10231
10232 RTX_FRAME_RELATED_P (insn) = 1;
10233
10234 saved_pretend_args = 1;
10235 fp_offset = args_to_push;
10236 args_to_push = 0;
10237
10238 /* Now reuse r3 to preserve IP. */
10239 insn = gen_rtx_REG (SImode, 3);
10240 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10241 (void) emit_insn (insn);
10242 }
10243 }
10244
10245 if (fp_offset)
10246 {
10247 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10248 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10249 }
10250 else
10251 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10252
10253 insn = emit_insn (insn);
10254 RTX_FRAME_RELATED_P (insn) = 1;
10255 }
10256
10257 if (args_to_push)
10258 {
10259 /* Push the argument registers, or reserve space for them. */
10260 if (cfun->machine->uses_anonymous_args)
10261 insn = emit_multi_reg_push
10262 ((0xf0 >> (args_to_push / 4)) & 0xf);
10263 else
10264 insn = emit_insn
10265 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10266 GEN_INT (- args_to_push)));
10267 RTX_FRAME_RELATED_P (insn) = 1;
10268 }
10269
10270 /* If this is an interrupt service routine, and the link register
10271 is going to be pushed, and we are not creating a stack frame,
10272 (which would involve an extra push of IP and a pop in the epilogue)
10273 subtracting four from LR now will mean that the function return
10274 can be done with a single instruction. */
10275 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10276 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10277 && ! frame_pointer_needed)
10278 emit_insn (gen_rtx_SET (SImode,
10279 gen_rtx_REG (SImode, LR_REGNUM),
10280 gen_rtx_PLUS (SImode,
10281 gen_rtx_REG (SImode, LR_REGNUM),
10282 GEN_INT (-4))));
10283
10284 if (live_regs_mask)
10285 {
10286 insn = emit_multi_reg_push (live_regs_mask);
10287 saved_regs += bit_count (live_regs_mask) * 4;
10288 RTX_FRAME_RELATED_P (insn) = 1;
10289 }
10290
10291 if (TARGET_IWMMXT)
10292 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10293 if (regs_ever_live[reg] && ! call_used_regs [reg])
10294 {
10295 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10296 insn = gen_rtx_MEM (V2SImode, insn);
10297 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10298 gen_rtx_REG (V2SImode, reg)));
10299 RTX_FRAME_RELATED_P (insn) = 1;
10300 saved_regs += 8;
10301 }
10302
10303 if (! IS_VOLATILE (func_type))
10304 {
10305 int start_reg;
10306
10307 /* Save any floating point call-saved registers used by this
10308 function. */
10309 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10310 {
10311 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10312 if (regs_ever_live[reg] && !call_used_regs[reg])
10313 {
10314 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10315 insn = gen_rtx_MEM (XFmode, insn);
10316 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10317 gen_rtx_REG (XFmode, reg)));
10318 RTX_FRAME_RELATED_P (insn) = 1;
10319 saved_regs += 12;
10320 }
10321 }
10322 else
10323 {
10324 start_reg = LAST_FPA_REGNUM;
10325
10326 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10327 {
10328 if (regs_ever_live[reg] && !call_used_regs[reg])
10329 {
10330 if (start_reg - reg == 3)
10331 {
10332 insn = emit_sfm (reg, 4);
10333 RTX_FRAME_RELATED_P (insn) = 1;
10334 saved_regs += 48;
10335 start_reg = reg - 1;
10336 }
10337 }
10338 else
10339 {
10340 if (start_reg != reg)
10341 {
10342 insn = emit_sfm (reg + 1, start_reg - reg);
10343 RTX_FRAME_RELATED_P (insn) = 1;
10344 saved_regs += (start_reg - reg) * 12;
10345 }
10346 start_reg = reg - 1;
10347 }
10348 }
10349
10350 if (start_reg != reg)
10351 {
10352 insn = emit_sfm (reg + 1, start_reg - reg);
10353 saved_regs += (start_reg - reg) * 12;
10354 RTX_FRAME_RELATED_P (insn) = 1;
10355 }
10356 }
10357 if (TARGET_HARD_FLOAT && TARGET_VFP)
10358 {
10359 start_reg = FIRST_VFP_REGNUM;
10360
10361 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10362 {
10363 if ((!regs_ever_live[reg] || call_used_regs[reg])
10364 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10365 {
10366 if (start_reg != reg)
10367 saved_regs += vfp_emit_fstmx (start_reg,
10368 (reg - start_reg) / 2);
10369 start_reg = reg + 2;
10370 }
10371 }
10372 if (start_reg != reg)
10373 saved_regs += vfp_emit_fstmx (start_reg,
10374 (reg - start_reg) / 2);
10375 }
10376 }
10377
10378 if (frame_pointer_needed)
10379 {
10380 /* Create the new frame pointer. */
10381 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10382 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10383 RTX_FRAME_RELATED_P (insn) = 1;
10384
10385 if (IS_NESTED (func_type))
10386 {
10387 /* Recover the static chain register. */
10388 if (regs_ever_live [3] == 0
10389 || saved_pretend_args)
10390 insn = gen_rtx_REG (SImode, 3);
10391 else /* if (current_function_pretend_args_size == 0) */
10392 {
10393 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10394 GEN_INT (4));
10395 insn = gen_rtx_MEM (SImode, insn);
10396 }
10397
10398 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10399 /* Add a USE to stop propagate_one_insn() from barfing. */
10400 emit_insn (gen_prologue_use (ip_rtx));
10401 }
10402 }
10403
10404 offsets = arm_get_frame_offsets ();
10405 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10406 {
10407 /* This add can produce multiple insns for a large constant, so we
10408 need to get tricky. */
10409 rtx last = get_last_insn ();
10410
10411 amount = GEN_INT (offsets->saved_args + saved_regs
10412 - offsets->outgoing_args);
10413
10414 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10415 amount));
10416 do
10417 {
10418 last = last ? NEXT_INSN (last) : get_insns ();
10419 RTX_FRAME_RELATED_P (last) = 1;
10420 }
10421 while (last != insn);
10422
10423 /* If the frame pointer is needed, emit a special barrier that
10424 will prevent the scheduler from moving stores to the frame
10425 before the stack adjustment. */
10426 if (frame_pointer_needed)
10427 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10428 hard_frame_pointer_rtx));
10429 }
10430
10431
10432 if (flag_pic)
10433 arm_load_pic_register (INVALID_REGNUM);
10434
10435 /* If we are profiling, make sure no instructions are scheduled before
10436 the call to mcount. Similarly if the user has requested no
10437 scheduling in the prolog. */
10438 if (current_function_profile || TARGET_NO_SCHED_PRO)
10439 emit_insn (gen_blockage ());
10440
10441 /* If the link register is being kept alive, with the return address in it,
10442 then make sure that it does not get reused by the ce2 pass. */
10443 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10444 {
10445 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10446 cfun->machine->lr_save_eliminated = 1;
10447 }
10448 }
10449 \f
10450 /* If CODE is 'd', then the X is a condition operand and the instruction
10451 should only be executed if the condition is true.
10452 if CODE is 'D', then the X is a condition operand and the instruction
10453 should only be executed if the condition is false: however, if the mode
10454 of the comparison is CCFPEmode, then always execute the instruction -- we
10455 do this because in these circumstances !GE does not necessarily imply LT;
10456 in these cases the instruction pattern will take care to make sure that
10457 an instruction containing %d will follow, thereby undoing the effects of
10458 doing this instruction unconditionally.
10459 If CODE is 'N' then X is a floating point operand that must be negated
10460 before output.
10461 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10462 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10463 void
10464 arm_print_operand (FILE *stream, rtx x, int code)
10465 {
10466 switch (code)
10467 {
10468 case '@':
10469 fputs (ASM_COMMENT_START, stream);
10470 return;
10471
10472 case '_':
10473 fputs (user_label_prefix, stream);
10474 return;
10475
10476 case '|':
10477 fputs (REGISTER_PREFIX, stream);
10478 return;
10479
10480 case '?':
10481 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10482 {
10483 if (TARGET_THUMB)
10484 {
10485 output_operand_lossage ("predicated Thumb instruction");
10486 break;
10487 }
10488 if (current_insn_predicate != NULL)
10489 {
10490 output_operand_lossage
10491 ("predicated instruction in conditional sequence");
10492 break;
10493 }
10494
10495 fputs (arm_condition_codes[arm_current_cc], stream);
10496 }
10497 else if (current_insn_predicate)
10498 {
10499 enum arm_cond_code code;
10500
10501 if (TARGET_THUMB)
10502 {
10503 output_operand_lossage ("predicated Thumb instruction");
10504 break;
10505 }
10506
10507 code = get_arm_condition_code (current_insn_predicate);
10508 fputs (arm_condition_codes[code], stream);
10509 }
10510 return;
10511
10512 case 'N':
10513 {
10514 REAL_VALUE_TYPE r;
10515 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10516 r = REAL_VALUE_NEGATE (r);
10517 fprintf (stream, "%s", fp_const_from_val (&r));
10518 }
10519 return;
10520
10521 case 'B':
10522 if (GET_CODE (x) == CONST_INT)
10523 {
10524 HOST_WIDE_INT val;
10525 val = ARM_SIGN_EXTEND (~INTVAL (x));
10526 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10527 }
10528 else
10529 {
10530 putc ('~', stream);
10531 output_addr_const (stream, x);
10532 }
10533 return;
10534
10535 case 'i':
10536 fprintf (stream, "%s", arithmetic_instr (x, 1));
10537 return;
10538
10539 /* Truncate Cirrus shift counts. */
10540 case 's':
10541 if (GET_CODE (x) == CONST_INT)
10542 {
10543 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10544 return;
10545 }
10546 arm_print_operand (stream, x, 0);
10547 return;
10548
10549 case 'I':
10550 fprintf (stream, "%s", arithmetic_instr (x, 0));
10551 return;
10552
10553 case 'S':
10554 {
10555 HOST_WIDE_INT val;
10556 const char * shift = shift_op (x, &val);
10557
10558 if (shift)
10559 {
10560 fprintf (stream, ", %s ", shift_op (x, &val));
10561 if (val == -1)
10562 arm_print_operand (stream, XEXP (x, 1), 0);
10563 else
10564 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10565 }
10566 }
10567 return;
10568
10569 /* An explanation of the 'Q', 'R' and 'H' register operands:
10570
10571 In a pair of registers containing a DI or DF value the 'Q'
10572 operand returns the register number of the register containing
10573 the least significant part of the value. The 'R' operand returns
10574 the register number of the register containing the most
10575 significant part of the value.
10576
10577 The 'H' operand returns the higher of the two register numbers.
10578 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10579 same as the 'Q' operand, since the most significant part of the
10580 value is held in the lower number register. The reverse is true
10581 on systems where WORDS_BIG_ENDIAN is false.
10582
10583 The purpose of these operands is to distinguish between cases
10584 where the endian-ness of the values is important (for example
10585 when they are added together), and cases where the endian-ness
10586 is irrelevant, but the order of register operations is important.
10587 For example when loading a value from memory into a register
10588 pair, the endian-ness does not matter. Provided that the value
10589 from the lower memory address is put into the lower numbered
10590 register, and the value from the higher address is put into the
10591 higher numbered register, the load will work regardless of whether
10592 the value being loaded is big-wordian or little-wordian. The
10593 order of the two register loads can matter however, if the address
10594 of the memory location is actually held in one of the registers
10595 being overwritten by the load. */
10596 case 'Q':
10597 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10598 {
10599 output_operand_lossage ("invalid operand for code '%c'", code);
10600 return;
10601 }
10602
10603 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10604 return;
10605
10606 case 'R':
10607 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10608 {
10609 output_operand_lossage ("invalid operand for code '%c'", code);
10610 return;
10611 }
10612
10613 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10614 return;
10615
10616 case 'H':
10617 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10618 {
10619 output_operand_lossage ("invalid operand for code '%c'", code);
10620 return;
10621 }
10622
10623 asm_fprintf (stream, "%r", REGNO (x) + 1);
10624 return;
10625
10626 case 'm':
10627 asm_fprintf (stream, "%r",
10628 GET_CODE (XEXP (x, 0)) == REG
10629 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10630 return;
10631
10632 case 'M':
10633 asm_fprintf (stream, "{%r-%r}",
10634 REGNO (x),
10635 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10636 return;
10637
10638 case 'd':
10639 /* CONST_TRUE_RTX means always -- that's the default. */
10640 if (x == const_true_rtx)
10641 return;
10642
10643 if (!COMPARISON_P (x))
10644 {
10645 output_operand_lossage ("invalid operand for code '%c'", code);
10646 return;
10647 }
10648
10649 fputs (arm_condition_codes[get_arm_condition_code (x)],
10650 stream);
10651 return;
10652
10653 case 'D':
10654 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10655 want to do that. */
10656 if (x == const_true_rtx)
10657 {
10658 output_operand_lossage ("instruction never exectued");
10659 return;
10660 }
10661 if (!COMPARISON_P (x))
10662 {
10663 output_operand_lossage ("invalid operand for code '%c'", code);
10664 return;
10665 }
10666
10667 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10668 (get_arm_condition_code (x))],
10669 stream);
10670 return;
10671
10672 /* Cirrus registers can be accessed in a variety of ways:
10673 single floating point (f)
10674 double floating point (d)
10675 32bit integer (fx)
10676 64bit integer (dx). */
10677 case 'W': /* Cirrus register in F mode. */
10678 case 'X': /* Cirrus register in D mode. */
10679 case 'Y': /* Cirrus register in FX mode. */
10680 case 'Z': /* Cirrus register in DX mode. */
10681 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10682 abort ();
10683
10684 fprintf (stream, "mv%s%s",
10685 code == 'W' ? "f"
10686 : code == 'X' ? "d"
10687 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10688
10689 return;
10690
10691 /* Print cirrus register in the mode specified by the register's mode. */
10692 case 'V':
10693 {
10694 int mode = GET_MODE (x);
10695
10696 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10697 {
10698 output_operand_lossage ("invalid operand for code '%c'", code);
10699 return;
10700 }
10701
10702 fprintf (stream, "mv%s%s",
10703 mode == DFmode ? "d"
10704 : mode == SImode ? "fx"
10705 : mode == DImode ? "dx"
10706 : "f", reg_names[REGNO (x)] + 2);
10707
10708 return;
10709 }
10710
10711 case 'U':
10712 if (GET_CODE (x) != REG
10713 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10714 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10715 /* Bad value for wCG register number. */
10716 {
10717 output_operand_lossage ("invalid operand for code '%c'", code);
10718 return;
10719 }
10720
10721 else
10722 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10723 return;
10724
10725 /* Print an iWMMXt control register name. */
10726 case 'w':
10727 if (GET_CODE (x) != CONST_INT
10728 || INTVAL (x) < 0
10729 || INTVAL (x) >= 16)
10730 /* Bad value for wC register number. */
10731 {
10732 output_operand_lossage ("invalid operand for code '%c'", code);
10733 return;
10734 }
10735
10736 else
10737 {
10738 static const char * wc_reg_names [16] =
10739 {
10740 "wCID", "wCon", "wCSSF", "wCASF",
10741 "wC4", "wC5", "wC6", "wC7",
10742 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10743 "wC12", "wC13", "wC14", "wC15"
10744 };
10745
10746 fprintf (stream, wc_reg_names [INTVAL (x)]);
10747 }
10748 return;
10749
10750 /* Print a VFP double precision register name. */
10751 case 'P':
10752 {
10753 int mode = GET_MODE (x);
10754 int num;
10755
10756 if (mode != DImode && mode != DFmode)
10757 {
10758 output_operand_lossage ("invalid operand for code '%c'", code);
10759 return;
10760 }
10761
10762 if (GET_CODE (x) != REG
10763 || !IS_VFP_REGNUM (REGNO (x)))
10764 {
10765 output_operand_lossage ("invalid operand for code '%c'", code);
10766 return;
10767 }
10768
10769 num = REGNO(x) - FIRST_VFP_REGNUM;
10770 if (num & 1)
10771 {
10772 output_operand_lossage ("invalid operand for code '%c'", code);
10773 return;
10774 }
10775
10776 fprintf (stream, "d%d", num >> 1);
10777 }
10778 return;
10779
10780 default:
10781 if (x == 0)
10782 {
10783 output_operand_lossage ("missing operand");
10784 return;
10785 }
10786
10787 if (GET_CODE (x) == REG)
10788 asm_fprintf (stream, "%r", REGNO (x));
10789 else if (GET_CODE (x) == MEM)
10790 {
10791 output_memory_reference_mode = GET_MODE (x);
10792 output_address (XEXP (x, 0));
10793 }
10794 else if (GET_CODE (x) == CONST_DOUBLE)
10795 fprintf (stream, "#%s", fp_immediate_constant (x));
10796 else if (GET_CODE (x) == NEG)
10797 abort (); /* This should never happen now. */
10798 else
10799 {
10800 fputc ('#', stream);
10801 output_addr_const (stream, x);
10802 }
10803 }
10804 }
10805 \f
10806 #ifndef AOF_ASSEMBLER
10807 /* Target hook for assembling integer objects. The ARM version needs to
10808 handle word-sized values specially. */
10809 static bool
10810 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10811 {
10812 if (size == UNITS_PER_WORD && aligned_p)
10813 {
10814 fputs ("\t.word\t", asm_out_file);
10815 output_addr_const (asm_out_file, x);
10816
10817 /* Mark symbols as position independent. We only do this in the
10818 .text segment, not in the .data segment. */
10819 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10820 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10821 {
10822 if (GET_CODE (x) == SYMBOL_REF
10823 && (CONSTANT_POOL_ADDRESS_P (x)
10824 || SYMBOL_REF_LOCAL_P (x)))
10825 fputs ("(GOTOFF)", asm_out_file);
10826 else if (GET_CODE (x) == LABEL_REF)
10827 fputs ("(GOTOFF)", asm_out_file);
10828 else
10829 fputs ("(GOT)", asm_out_file);
10830 }
10831 fputc ('\n', asm_out_file);
10832 return true;
10833 }
10834
10835 if (arm_vector_mode_supported_p (GET_MODE (x)))
10836 {
10837 int i, units;
10838
10839 if (GET_CODE (x) != CONST_VECTOR)
10840 abort ();
10841
10842 units = CONST_VECTOR_NUNITS (x);
10843
10844 switch (GET_MODE (x))
10845 {
10846 case V2SImode: size = 4; break;
10847 case V4HImode: size = 2; break;
10848 case V8QImode: size = 1; break;
10849 default:
10850 abort ();
10851 }
10852
10853 for (i = 0; i < units; i++)
10854 {
10855 rtx elt;
10856
10857 elt = CONST_VECTOR_ELT (x, i);
10858 assemble_integer
10859 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10860 }
10861
10862 return true;
10863 }
10864
10865 return default_assemble_integer (x, size, aligned_p);
10866 }
10867 #endif
10868 \f
10869 /* A finite state machine takes care of noticing whether or not instructions
10870 can be conditionally executed, and thus decrease execution time and code
10871 size by deleting branch instructions. The fsm is controlled by
10872 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10873
10874 /* The state of the fsm controlling condition codes are:
10875 0: normal, do nothing special
10876 1: make ASM_OUTPUT_OPCODE not output this instruction
10877 2: make ASM_OUTPUT_OPCODE not output this instruction
10878 3: make instructions conditional
10879 4: make instructions conditional
10880
10881 State transitions (state->state by whom under condition):
10882 0 -> 1 final_prescan_insn if the `target' is a label
10883 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10884 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10885 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10886 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10887 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10888 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10889 (the target insn is arm_target_insn).
10890
10891 If the jump clobbers the conditions then we use states 2 and 4.
10892
10893 A similar thing can be done with conditional return insns.
10894
10895 XXX In case the `target' is an unconditional branch, this conditionalising
10896 of the instructions always reduces code size, but not always execution
10897 time. But then, I want to reduce the code size to somewhere near what
10898 /bin/cc produces. */
10899
10900 /* Returns the index of the ARM condition code string in
10901 `arm_condition_codes'. COMPARISON should be an rtx like
10902 `(eq (...) (...))'. */
10903 static enum arm_cond_code
10904 get_arm_condition_code (rtx comparison)
10905 {
10906 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10907 int code;
10908 enum rtx_code comp_code = GET_CODE (comparison);
10909
10910 if (GET_MODE_CLASS (mode) != MODE_CC)
10911 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10912 XEXP (comparison, 1));
10913
10914 switch (mode)
10915 {
10916 case CC_DNEmode: code = ARM_NE; goto dominance;
10917 case CC_DEQmode: code = ARM_EQ; goto dominance;
10918 case CC_DGEmode: code = ARM_GE; goto dominance;
10919 case CC_DGTmode: code = ARM_GT; goto dominance;
10920 case CC_DLEmode: code = ARM_LE; goto dominance;
10921 case CC_DLTmode: code = ARM_LT; goto dominance;
10922 case CC_DGEUmode: code = ARM_CS; goto dominance;
10923 case CC_DGTUmode: code = ARM_HI; goto dominance;
10924 case CC_DLEUmode: code = ARM_LS; goto dominance;
10925 case CC_DLTUmode: code = ARM_CC;
10926
10927 dominance:
10928 if (comp_code != EQ && comp_code != NE)
10929 abort ();
10930
10931 if (comp_code == EQ)
10932 return ARM_INVERSE_CONDITION_CODE (code);
10933 return code;
10934
10935 case CC_NOOVmode:
10936 switch (comp_code)
10937 {
10938 case NE: return ARM_NE;
10939 case EQ: return ARM_EQ;
10940 case GE: return ARM_PL;
10941 case LT: return ARM_MI;
10942 default: abort ();
10943 }
10944
10945 case CC_Zmode:
10946 switch (comp_code)
10947 {
10948 case NE: return ARM_NE;
10949 case EQ: return ARM_EQ;
10950 default: abort ();
10951 }
10952
10953 case CC_Nmode:
10954 switch (comp_code)
10955 {
10956 case NE: return ARM_MI;
10957 case EQ: return ARM_PL;
10958 default: abort ();
10959 }
10960
10961 case CCFPEmode:
10962 case CCFPmode:
10963 /* These encodings assume that AC=1 in the FPA system control
10964 byte. This allows us to handle all cases except UNEQ and
10965 LTGT. */
10966 switch (comp_code)
10967 {
10968 case GE: return ARM_GE;
10969 case GT: return ARM_GT;
10970 case LE: return ARM_LS;
10971 case LT: return ARM_MI;
10972 case NE: return ARM_NE;
10973 case EQ: return ARM_EQ;
10974 case ORDERED: return ARM_VC;
10975 case UNORDERED: return ARM_VS;
10976 case UNLT: return ARM_LT;
10977 case UNLE: return ARM_LE;
10978 case UNGT: return ARM_HI;
10979 case UNGE: return ARM_PL;
10980 /* UNEQ and LTGT do not have a representation. */
10981 case UNEQ: /* Fall through. */
10982 case LTGT: /* Fall through. */
10983 default: abort ();
10984 }
10985
10986 case CC_SWPmode:
10987 switch (comp_code)
10988 {
10989 case NE: return ARM_NE;
10990 case EQ: return ARM_EQ;
10991 case GE: return ARM_LE;
10992 case GT: return ARM_LT;
10993 case LE: return ARM_GE;
10994 case LT: return ARM_GT;
10995 case GEU: return ARM_LS;
10996 case GTU: return ARM_CC;
10997 case LEU: return ARM_CS;
10998 case LTU: return ARM_HI;
10999 default: abort ();
11000 }
11001
11002 case CC_Cmode:
11003 switch (comp_code)
11004 {
11005 case LTU: return ARM_CS;
11006 case GEU: return ARM_CC;
11007 default: abort ();
11008 }
11009
11010 case CCmode:
11011 switch (comp_code)
11012 {
11013 case NE: return ARM_NE;
11014 case EQ: return ARM_EQ;
11015 case GE: return ARM_GE;
11016 case GT: return ARM_GT;
11017 case LE: return ARM_LE;
11018 case LT: return ARM_LT;
11019 case GEU: return ARM_CS;
11020 case GTU: return ARM_HI;
11021 case LEU: return ARM_LS;
11022 case LTU: return ARM_CC;
11023 default: abort ();
11024 }
11025
11026 default: abort ();
11027 }
11028
11029 abort ();
11030 }
11031
11032 void
11033 arm_final_prescan_insn (rtx insn)
11034 {
11035 /* BODY will hold the body of INSN. */
11036 rtx body = PATTERN (insn);
11037
11038 /* This will be 1 if trying to repeat the trick, and things need to be
11039 reversed if it appears to fail. */
11040 int reverse = 0;
11041
11042 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11043 taken are clobbered, even if the rtl suggests otherwise. It also
11044 means that we have to grub around within the jump expression to find
11045 out what the conditions are when the jump isn't taken. */
11046 int jump_clobbers = 0;
11047
11048 /* If we start with a return insn, we only succeed if we find another one. */
11049 int seeking_return = 0;
11050
11051 /* START_INSN will hold the insn from where we start looking. This is the
11052 first insn after the following code_label if REVERSE is true. */
11053 rtx start_insn = insn;
11054
11055 /* If in state 4, check if the target branch is reached, in order to
11056 change back to state 0. */
11057 if (arm_ccfsm_state == 4)
11058 {
11059 if (insn == arm_target_insn)
11060 {
11061 arm_target_insn = NULL;
11062 arm_ccfsm_state = 0;
11063 }
11064 return;
11065 }
11066
11067 /* If in state 3, it is possible to repeat the trick, if this insn is an
11068 unconditional branch to a label, and immediately following this branch
11069 is the previous target label which is only used once, and the label this
11070 branch jumps to is not too far off. */
11071 if (arm_ccfsm_state == 3)
11072 {
11073 if (simplejump_p (insn))
11074 {
11075 start_insn = next_nonnote_insn (start_insn);
11076 if (GET_CODE (start_insn) == BARRIER)
11077 {
11078 /* XXX Isn't this always a barrier? */
11079 start_insn = next_nonnote_insn (start_insn);
11080 }
11081 if (GET_CODE (start_insn) == CODE_LABEL
11082 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11083 && LABEL_NUSES (start_insn) == 1)
11084 reverse = TRUE;
11085 else
11086 return;
11087 }
11088 else if (GET_CODE (body) == RETURN)
11089 {
11090 start_insn = next_nonnote_insn (start_insn);
11091 if (GET_CODE (start_insn) == BARRIER)
11092 start_insn = next_nonnote_insn (start_insn);
11093 if (GET_CODE (start_insn) == CODE_LABEL
11094 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11095 && LABEL_NUSES (start_insn) == 1)
11096 {
11097 reverse = TRUE;
11098 seeking_return = 1;
11099 }
11100 else
11101 return;
11102 }
11103 else
11104 return;
11105 }
11106
11107 if (arm_ccfsm_state != 0 && !reverse)
11108 abort ();
11109 if (GET_CODE (insn) != JUMP_INSN)
11110 return;
11111
11112 /* This jump might be paralleled with a clobber of the condition codes
11113 the jump should always come first */
11114 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11115 body = XVECEXP (body, 0, 0);
11116
11117 if (reverse
11118 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11119 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11120 {
11121 int insns_skipped;
11122 int fail = FALSE, succeed = FALSE;
11123 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11124 int then_not_else = TRUE;
11125 rtx this_insn = start_insn, label = 0;
11126
11127 /* If the jump cannot be done with one instruction, we cannot
11128 conditionally execute the instruction in the inverse case. */
11129 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11130 {
11131 jump_clobbers = 1;
11132 return;
11133 }
11134
11135 /* Register the insn jumped to. */
11136 if (reverse)
11137 {
11138 if (!seeking_return)
11139 label = XEXP (SET_SRC (body), 0);
11140 }
11141 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11142 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11143 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11144 {
11145 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11146 then_not_else = FALSE;
11147 }
11148 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11149 seeking_return = 1;
11150 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11151 {
11152 seeking_return = 1;
11153 then_not_else = FALSE;
11154 }
11155 else
11156 abort ();
11157
11158 /* See how many insns this branch skips, and what kind of insns. If all
11159 insns are okay, and the label or unconditional branch to the same
11160 label is not too far away, succeed. */
11161 for (insns_skipped = 0;
11162 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11163 {
11164 rtx scanbody;
11165
11166 this_insn = next_nonnote_insn (this_insn);
11167 if (!this_insn)
11168 break;
11169
11170 switch (GET_CODE (this_insn))
11171 {
11172 case CODE_LABEL:
11173 /* Succeed if it is the target label, otherwise fail since
11174 control falls in from somewhere else. */
11175 if (this_insn == label)
11176 {
11177 if (jump_clobbers)
11178 {
11179 arm_ccfsm_state = 2;
11180 this_insn = next_nonnote_insn (this_insn);
11181 }
11182 else
11183 arm_ccfsm_state = 1;
11184 succeed = TRUE;
11185 }
11186 else
11187 fail = TRUE;
11188 break;
11189
11190 case BARRIER:
11191 /* Succeed if the following insn is the target label.
11192 Otherwise fail.
11193 If return insns are used then the last insn in a function
11194 will be a barrier. */
11195 this_insn = next_nonnote_insn (this_insn);
11196 if (this_insn && this_insn == label)
11197 {
11198 if (jump_clobbers)
11199 {
11200 arm_ccfsm_state = 2;
11201 this_insn = next_nonnote_insn (this_insn);
11202 }
11203 else
11204 arm_ccfsm_state = 1;
11205 succeed = TRUE;
11206 }
11207 else
11208 fail = TRUE;
11209 break;
11210
11211 case CALL_INSN:
11212 /* The AAPCS says that conditional calls should not be
11213 used since they make interworking inefficient (the
11214 linker can't transform BL<cond> into BLX). That's
11215 only a problem if the machine has BLX. */
11216 if (arm_arch5)
11217 {
11218 fail = TRUE;
11219 break;
11220 }
11221
11222 /* Succeed if the following insn is the target label, or
11223 if the following two insns are a barrier and the
11224 target label. */
11225 this_insn = next_nonnote_insn (this_insn);
11226 if (this_insn && GET_CODE (this_insn) == BARRIER)
11227 this_insn = next_nonnote_insn (this_insn);
11228
11229 if (this_insn && this_insn == label
11230 && insns_skipped < max_insns_skipped)
11231 {
11232 if (jump_clobbers)
11233 {
11234 arm_ccfsm_state = 2;
11235 this_insn = next_nonnote_insn (this_insn);
11236 }
11237 else
11238 arm_ccfsm_state = 1;
11239 succeed = TRUE;
11240 }
11241 else
11242 fail = TRUE;
11243 break;
11244
11245 case JUMP_INSN:
11246 /* If this is an unconditional branch to the same label, succeed.
11247 If it is to another label, do nothing. If it is conditional,
11248 fail. */
11249 /* XXX Probably, the tests for SET and the PC are
11250 unnecessary. */
11251
11252 scanbody = PATTERN (this_insn);
11253 if (GET_CODE (scanbody) == SET
11254 && GET_CODE (SET_DEST (scanbody)) == PC)
11255 {
11256 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11257 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11258 {
11259 arm_ccfsm_state = 2;
11260 succeed = TRUE;
11261 }
11262 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11263 fail = TRUE;
11264 }
11265 /* Fail if a conditional return is undesirable (e.g. on a
11266 StrongARM), but still allow this if optimizing for size. */
11267 else if (GET_CODE (scanbody) == RETURN
11268 && !use_return_insn (TRUE, NULL)
11269 && !optimize_size)
11270 fail = TRUE;
11271 else if (GET_CODE (scanbody) == RETURN
11272 && seeking_return)
11273 {
11274 arm_ccfsm_state = 2;
11275 succeed = TRUE;
11276 }
11277 else if (GET_CODE (scanbody) == PARALLEL)
11278 {
11279 switch (get_attr_conds (this_insn))
11280 {
11281 case CONDS_NOCOND:
11282 break;
11283 default:
11284 fail = TRUE;
11285 break;
11286 }
11287 }
11288 else
11289 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11290
11291 break;
11292
11293 case INSN:
11294 /* Instructions using or affecting the condition codes make it
11295 fail. */
11296 scanbody = PATTERN (this_insn);
11297 if (!(GET_CODE (scanbody) == SET
11298 || GET_CODE (scanbody) == PARALLEL)
11299 || get_attr_conds (this_insn) != CONDS_NOCOND)
11300 fail = TRUE;
11301
11302 /* A conditional cirrus instruction must be followed by
11303 a non Cirrus instruction. However, since we
11304 conditionalize instructions in this function and by
11305 the time we get here we can't add instructions
11306 (nops), because shorten_branches() has already been
11307 called, we will disable conditionalizing Cirrus
11308 instructions to be safe. */
11309 if (GET_CODE (scanbody) != USE
11310 && GET_CODE (scanbody) != CLOBBER
11311 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11312 fail = TRUE;
11313 break;
11314
11315 default:
11316 break;
11317 }
11318 }
11319 if (succeed)
11320 {
11321 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11322 arm_target_label = CODE_LABEL_NUMBER (label);
11323 else if (seeking_return || arm_ccfsm_state == 2)
11324 {
11325 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11326 {
11327 this_insn = next_nonnote_insn (this_insn);
11328 if (this_insn && (GET_CODE (this_insn) == BARRIER
11329 || GET_CODE (this_insn) == CODE_LABEL))
11330 abort ();
11331 }
11332 if (!this_insn)
11333 {
11334 /* Oh, dear! we ran off the end.. give up. */
11335 recog (PATTERN (insn), insn, NULL);
11336 arm_ccfsm_state = 0;
11337 arm_target_insn = NULL;
11338 return;
11339 }
11340 arm_target_insn = this_insn;
11341 }
11342 else
11343 abort ();
11344 if (jump_clobbers)
11345 {
11346 if (reverse)
11347 abort ();
11348 arm_current_cc =
11349 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11350 0), 0), 1));
11351 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11352 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11353 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11354 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11355 }
11356 else
11357 {
11358 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11359 what it was. */
11360 if (!reverse)
11361 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11362 0));
11363 }
11364
11365 if (reverse || then_not_else)
11366 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11367 }
11368
11369 /* Restore recog_data (getting the attributes of other insns can
11370 destroy this array, but final.c assumes that it remains intact
11371 across this call; since the insn has been recognized already we
11372 call recog direct). */
11373 recog (PATTERN (insn), insn, NULL);
11374 }
11375 }
11376
11377 /* Returns true if REGNO is a valid register
11378 for holding a quantity of type MODE. */
11379 int
11380 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11381 {
11382 if (GET_MODE_CLASS (mode) == MODE_CC)
11383 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11384
11385 if (TARGET_THUMB)
11386 /* For the Thumb we only allow values bigger than SImode in
11387 registers 0 - 6, so that there is always a second low
11388 register available to hold the upper part of the value.
11389 We probably we ought to ensure that the register is the
11390 start of an even numbered register pair. */
11391 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11392
11393 if (IS_CIRRUS_REGNUM (regno))
11394 /* We have outlawed SI values in Cirrus registers because they
11395 reside in the lower 32 bits, but SF values reside in the
11396 upper 32 bits. This causes gcc all sorts of grief. We can't
11397 even split the registers into pairs because Cirrus SI values
11398 get sign extended to 64bits-- aldyh. */
11399 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11400
11401 if (IS_VFP_REGNUM (regno))
11402 {
11403 if (mode == SFmode || mode == SImode)
11404 return TRUE;
11405
11406 /* DFmode values are only valid in even register pairs. */
11407 if (mode == DFmode)
11408 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11409 return FALSE;
11410 }
11411
11412 if (IS_IWMMXT_GR_REGNUM (regno))
11413 return mode == SImode;
11414
11415 if (IS_IWMMXT_REGNUM (regno))
11416 return VALID_IWMMXT_REG_MODE (mode);
11417
11418 /* We allow any value to be stored in the general registers.
11419 Restrict doubleword quantities to even register pairs so that we can
11420 use ldrd. */
11421 if (regno <= LAST_ARM_REGNUM)
11422 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11423
11424 if ( regno == FRAME_POINTER_REGNUM
11425 || regno == ARG_POINTER_REGNUM)
11426 /* We only allow integers in the fake hard registers. */
11427 return GET_MODE_CLASS (mode) == MODE_INT;
11428
11429 /* The only registers left are the FPA registers
11430 which we only allow to hold FP values. */
11431 return GET_MODE_CLASS (mode) == MODE_FLOAT
11432 && regno >= FIRST_FPA_REGNUM
11433 && regno <= LAST_FPA_REGNUM;
11434 }
11435
11436 int
11437 arm_regno_class (int regno)
11438 {
11439 if (TARGET_THUMB)
11440 {
11441 if (regno == STACK_POINTER_REGNUM)
11442 return STACK_REG;
11443 if (regno == CC_REGNUM)
11444 return CC_REG;
11445 if (regno < 8)
11446 return LO_REGS;
11447 return HI_REGS;
11448 }
11449
11450 if ( regno <= LAST_ARM_REGNUM
11451 || regno == FRAME_POINTER_REGNUM
11452 || regno == ARG_POINTER_REGNUM)
11453 return GENERAL_REGS;
11454
11455 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11456 return NO_REGS;
11457
11458 if (IS_CIRRUS_REGNUM (regno))
11459 return CIRRUS_REGS;
11460
11461 if (IS_VFP_REGNUM (regno))
11462 return VFP_REGS;
11463
11464 if (IS_IWMMXT_REGNUM (regno))
11465 return IWMMXT_REGS;
11466
11467 if (IS_IWMMXT_GR_REGNUM (regno))
11468 return IWMMXT_GR_REGS;
11469
11470 return FPA_REGS;
11471 }
11472
11473 /* Handle a special case when computing the offset
11474 of an argument from the frame pointer. */
11475 int
11476 arm_debugger_arg_offset (int value, rtx addr)
11477 {
11478 rtx insn;
11479
11480 /* We are only interested if dbxout_parms() failed to compute the offset. */
11481 if (value != 0)
11482 return 0;
11483
11484 /* We can only cope with the case where the address is held in a register. */
11485 if (GET_CODE (addr) != REG)
11486 return 0;
11487
11488 /* If we are using the frame pointer to point at the argument, then
11489 an offset of 0 is correct. */
11490 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11491 return 0;
11492
11493 /* If we are using the stack pointer to point at the
11494 argument, then an offset of 0 is correct. */
11495 if ((TARGET_THUMB || !frame_pointer_needed)
11496 && REGNO (addr) == SP_REGNUM)
11497 return 0;
11498
11499 /* Oh dear. The argument is pointed to by a register rather
11500 than being held in a register, or being stored at a known
11501 offset from the frame pointer. Since GDB only understands
11502 those two kinds of argument we must translate the address
11503 held in the register into an offset from the frame pointer.
11504 We do this by searching through the insns for the function
11505 looking to see where this register gets its value. If the
11506 register is initialized from the frame pointer plus an offset
11507 then we are in luck and we can continue, otherwise we give up.
11508
11509 This code is exercised by producing debugging information
11510 for a function with arguments like this:
11511
11512 double func (double a, double b, int c, double d) {return d;}
11513
11514 Without this code the stab for parameter 'd' will be set to
11515 an offset of 0 from the frame pointer, rather than 8. */
11516
11517 /* The if() statement says:
11518
11519 If the insn is a normal instruction
11520 and if the insn is setting the value in a register
11521 and if the register being set is the register holding the address of the argument
11522 and if the address is computing by an addition
11523 that involves adding to a register
11524 which is the frame pointer
11525 a constant integer
11526
11527 then... */
11528
11529 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11530 {
11531 if ( GET_CODE (insn) == INSN
11532 && GET_CODE (PATTERN (insn)) == SET
11533 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11534 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11535 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11536 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11537 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11538 )
11539 {
11540 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11541
11542 break;
11543 }
11544 }
11545
11546 if (value == 0)
11547 {
11548 debug_rtx (addr);
11549 warning ("unable to compute real location of stacked parameter");
11550 value = 8; /* XXX magic hack */
11551 }
11552
11553 return value;
11554 }
11555 \f
11556 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11557 do \
11558 { \
11559 if ((MASK) & insn_flags) \
11560 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11561 BUILT_IN_MD, NULL, NULL_TREE); \
11562 } \
11563 while (0)
11564
11565 struct builtin_description
11566 {
11567 const unsigned int mask;
11568 const enum insn_code icode;
11569 const char * const name;
11570 const enum arm_builtins code;
11571 const enum rtx_code comparison;
11572 const unsigned int flag;
11573 };
11574
11575 static const struct builtin_description bdesc_2arg[] =
11576 {
11577 #define IWMMXT_BUILTIN(code, string, builtin) \
11578 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11579 ARM_BUILTIN_##builtin, 0, 0 },
11580
11581 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11582 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11583 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11584 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11585 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11586 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11587 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11588 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11589 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11590 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11591 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11592 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11593 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11594 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11595 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11596 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11597 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11598 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11599 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11600 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11601 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11602 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11603 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11604 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11605 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11606 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11607 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11608 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11609 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11610 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11611 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11612 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11613 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11614 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11615 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11616 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11617 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11618 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11619 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11620 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11621 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11622 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11623 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11624 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11625 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11626 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11627 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11628 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11629 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11630 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11631 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11632 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11633 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11634 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11635 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11636 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11637 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11638 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11639
11640 #define IWMMXT_BUILTIN2(code, builtin) \
11641 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11642
11643 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11644 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11645 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11646 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11647 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11648 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11649 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11650 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11651 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11652 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11653 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11654 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11655 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11656 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11657 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11658 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11659 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11660 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11661 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11662 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11663 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11664 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11665 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11666 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11667 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11668 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11669 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11670 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11671 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11672 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11673 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11674 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11675 };
11676
11677 static const struct builtin_description bdesc_1arg[] =
11678 {
11679 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11680 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11681 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11682 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11683 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11684 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11685 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11686 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11687 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11688 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11689 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11690 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11691 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11692 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11693 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11694 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11695 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11696 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11697 };
11698
11699 /* Set up all the iWMMXt builtins. This is
11700 not called if TARGET_IWMMXT is zero. */
11701
11702 static void
11703 arm_init_iwmmxt_builtins (void)
11704 {
11705 const struct builtin_description * d;
11706 size_t i;
11707 tree endlink = void_list_node;
11708
11709 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11710 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11711 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11712
11713 tree int_ftype_int
11714 = build_function_type (integer_type_node,
11715 tree_cons (NULL_TREE, integer_type_node, endlink));
11716 tree v8qi_ftype_v8qi_v8qi_int
11717 = build_function_type (V8QI_type_node,
11718 tree_cons (NULL_TREE, V8QI_type_node,
11719 tree_cons (NULL_TREE, V8QI_type_node,
11720 tree_cons (NULL_TREE,
11721 integer_type_node,
11722 endlink))));
11723 tree v4hi_ftype_v4hi_int
11724 = build_function_type (V4HI_type_node,
11725 tree_cons (NULL_TREE, V4HI_type_node,
11726 tree_cons (NULL_TREE, integer_type_node,
11727 endlink)));
11728 tree v2si_ftype_v2si_int
11729 = build_function_type (V2SI_type_node,
11730 tree_cons (NULL_TREE, V2SI_type_node,
11731 tree_cons (NULL_TREE, integer_type_node,
11732 endlink)));
11733 tree v2si_ftype_di_di
11734 = build_function_type (V2SI_type_node,
11735 tree_cons (NULL_TREE, long_long_integer_type_node,
11736 tree_cons (NULL_TREE, long_long_integer_type_node,
11737 endlink)));
11738 tree di_ftype_di_int
11739 = build_function_type (long_long_integer_type_node,
11740 tree_cons (NULL_TREE, long_long_integer_type_node,
11741 tree_cons (NULL_TREE, integer_type_node,
11742 endlink)));
11743 tree di_ftype_di_int_int
11744 = build_function_type (long_long_integer_type_node,
11745 tree_cons (NULL_TREE, long_long_integer_type_node,
11746 tree_cons (NULL_TREE, integer_type_node,
11747 tree_cons (NULL_TREE,
11748 integer_type_node,
11749 endlink))));
11750 tree int_ftype_v8qi
11751 = build_function_type (integer_type_node,
11752 tree_cons (NULL_TREE, V8QI_type_node,
11753 endlink));
11754 tree int_ftype_v4hi
11755 = build_function_type (integer_type_node,
11756 tree_cons (NULL_TREE, V4HI_type_node,
11757 endlink));
11758 tree int_ftype_v2si
11759 = build_function_type (integer_type_node,
11760 tree_cons (NULL_TREE, V2SI_type_node,
11761 endlink));
11762 tree int_ftype_v8qi_int
11763 = build_function_type (integer_type_node,
11764 tree_cons (NULL_TREE, V8QI_type_node,
11765 tree_cons (NULL_TREE, integer_type_node,
11766 endlink)));
11767 tree int_ftype_v4hi_int
11768 = build_function_type (integer_type_node,
11769 tree_cons (NULL_TREE, V4HI_type_node,
11770 tree_cons (NULL_TREE, integer_type_node,
11771 endlink)));
11772 tree int_ftype_v2si_int
11773 = build_function_type (integer_type_node,
11774 tree_cons (NULL_TREE, V2SI_type_node,
11775 tree_cons (NULL_TREE, integer_type_node,
11776 endlink)));
11777 tree v8qi_ftype_v8qi_int_int
11778 = build_function_type (V8QI_type_node,
11779 tree_cons (NULL_TREE, V8QI_type_node,
11780 tree_cons (NULL_TREE, integer_type_node,
11781 tree_cons (NULL_TREE,
11782 integer_type_node,
11783 endlink))));
11784 tree v4hi_ftype_v4hi_int_int
11785 = build_function_type (V4HI_type_node,
11786 tree_cons (NULL_TREE, V4HI_type_node,
11787 tree_cons (NULL_TREE, integer_type_node,
11788 tree_cons (NULL_TREE,
11789 integer_type_node,
11790 endlink))));
11791 tree v2si_ftype_v2si_int_int
11792 = build_function_type (V2SI_type_node,
11793 tree_cons (NULL_TREE, V2SI_type_node,
11794 tree_cons (NULL_TREE, integer_type_node,
11795 tree_cons (NULL_TREE,
11796 integer_type_node,
11797 endlink))));
11798 /* Miscellaneous. */
11799 tree v8qi_ftype_v4hi_v4hi
11800 = build_function_type (V8QI_type_node,
11801 tree_cons (NULL_TREE, V4HI_type_node,
11802 tree_cons (NULL_TREE, V4HI_type_node,
11803 endlink)));
11804 tree v4hi_ftype_v2si_v2si
11805 = build_function_type (V4HI_type_node,
11806 tree_cons (NULL_TREE, V2SI_type_node,
11807 tree_cons (NULL_TREE, V2SI_type_node,
11808 endlink)));
11809 tree v2si_ftype_v4hi_v4hi
11810 = build_function_type (V2SI_type_node,
11811 tree_cons (NULL_TREE, V4HI_type_node,
11812 tree_cons (NULL_TREE, V4HI_type_node,
11813 endlink)));
11814 tree v2si_ftype_v8qi_v8qi
11815 = build_function_type (V2SI_type_node,
11816 tree_cons (NULL_TREE, V8QI_type_node,
11817 tree_cons (NULL_TREE, V8QI_type_node,
11818 endlink)));
11819 tree v4hi_ftype_v4hi_di
11820 = build_function_type (V4HI_type_node,
11821 tree_cons (NULL_TREE, V4HI_type_node,
11822 tree_cons (NULL_TREE,
11823 long_long_integer_type_node,
11824 endlink)));
11825 tree v2si_ftype_v2si_di
11826 = build_function_type (V2SI_type_node,
11827 tree_cons (NULL_TREE, V2SI_type_node,
11828 tree_cons (NULL_TREE,
11829 long_long_integer_type_node,
11830 endlink)));
11831 tree void_ftype_int_int
11832 = build_function_type (void_type_node,
11833 tree_cons (NULL_TREE, integer_type_node,
11834 tree_cons (NULL_TREE, integer_type_node,
11835 endlink)));
11836 tree di_ftype_void
11837 = build_function_type (long_long_unsigned_type_node, endlink);
11838 tree di_ftype_v8qi
11839 = build_function_type (long_long_integer_type_node,
11840 tree_cons (NULL_TREE, V8QI_type_node,
11841 endlink));
11842 tree di_ftype_v4hi
11843 = build_function_type (long_long_integer_type_node,
11844 tree_cons (NULL_TREE, V4HI_type_node,
11845 endlink));
11846 tree di_ftype_v2si
11847 = build_function_type (long_long_integer_type_node,
11848 tree_cons (NULL_TREE, V2SI_type_node,
11849 endlink));
11850 tree v2si_ftype_v4hi
11851 = build_function_type (V2SI_type_node,
11852 tree_cons (NULL_TREE, V4HI_type_node,
11853 endlink));
11854 tree v4hi_ftype_v8qi
11855 = build_function_type (V4HI_type_node,
11856 tree_cons (NULL_TREE, V8QI_type_node,
11857 endlink));
11858
11859 tree di_ftype_di_v4hi_v4hi
11860 = build_function_type (long_long_unsigned_type_node,
11861 tree_cons (NULL_TREE,
11862 long_long_unsigned_type_node,
11863 tree_cons (NULL_TREE, V4HI_type_node,
11864 tree_cons (NULL_TREE,
11865 V4HI_type_node,
11866 endlink))));
11867
11868 tree di_ftype_v4hi_v4hi
11869 = build_function_type (long_long_unsigned_type_node,
11870 tree_cons (NULL_TREE, V4HI_type_node,
11871 tree_cons (NULL_TREE, V4HI_type_node,
11872 endlink)));
11873
11874 /* Normal vector binops. */
11875 tree v8qi_ftype_v8qi_v8qi
11876 = build_function_type (V8QI_type_node,
11877 tree_cons (NULL_TREE, V8QI_type_node,
11878 tree_cons (NULL_TREE, V8QI_type_node,
11879 endlink)));
11880 tree v4hi_ftype_v4hi_v4hi
11881 = build_function_type (V4HI_type_node,
11882 tree_cons (NULL_TREE, V4HI_type_node,
11883 tree_cons (NULL_TREE, V4HI_type_node,
11884 endlink)));
11885 tree v2si_ftype_v2si_v2si
11886 = build_function_type (V2SI_type_node,
11887 tree_cons (NULL_TREE, V2SI_type_node,
11888 tree_cons (NULL_TREE, V2SI_type_node,
11889 endlink)));
11890 tree di_ftype_di_di
11891 = build_function_type (long_long_unsigned_type_node,
11892 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11893 tree_cons (NULL_TREE,
11894 long_long_unsigned_type_node,
11895 endlink)));
11896
11897 /* Add all builtins that are more or less simple operations on two
11898 operands. */
11899 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11900 {
11901 /* Use one of the operands; the target can have a different mode for
11902 mask-generating compares. */
11903 enum machine_mode mode;
11904 tree type;
11905
11906 if (d->name == 0)
11907 continue;
11908
11909 mode = insn_data[d->icode].operand[1].mode;
11910
11911 switch (mode)
11912 {
11913 case V8QImode:
11914 type = v8qi_ftype_v8qi_v8qi;
11915 break;
11916 case V4HImode:
11917 type = v4hi_ftype_v4hi_v4hi;
11918 break;
11919 case V2SImode:
11920 type = v2si_ftype_v2si_v2si;
11921 break;
11922 case DImode:
11923 type = di_ftype_di_di;
11924 break;
11925
11926 default:
11927 abort ();
11928 }
11929
11930 def_mbuiltin (d->mask, d->name, type, d->code);
11931 }
11932
11933 /* Add the remaining MMX insns with somewhat more complicated types. */
11934 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11935 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11936 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11937
11938 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11939 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11940 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11941 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11942 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11944
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11946 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11951
11952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11953 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11958
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11965
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11967
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11972
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11982
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11986
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11990
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11997
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12010
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12015
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12023 }
12024
12025 static void
12026 arm_init_builtins (void)
12027 {
12028 if (TARGET_REALLY_IWMMXT)
12029 arm_init_iwmmxt_builtins ();
12030 }
12031
12032 /* Errors in the source file can cause expand_expr to return const0_rtx
12033 where we expect a vector. To avoid crashing, use one of the vector
12034 clear instructions. */
12035
12036 static rtx
12037 safe_vector_operand (rtx x, enum machine_mode mode)
12038 {
12039 if (x != const0_rtx)
12040 return x;
12041 x = gen_reg_rtx (mode);
12042
12043 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12044 : gen_rtx_SUBREG (DImode, x, 0)));
12045 return x;
12046 }
12047
12048 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12049
12050 static rtx
12051 arm_expand_binop_builtin (enum insn_code icode,
12052 tree arglist, rtx target)
12053 {
12054 rtx pat;
12055 tree arg0 = TREE_VALUE (arglist);
12056 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12057 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12058 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12059 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12060 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12061 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12062
12063 if (VECTOR_MODE_P (mode0))
12064 op0 = safe_vector_operand (op0, mode0);
12065 if (VECTOR_MODE_P (mode1))
12066 op1 = safe_vector_operand (op1, mode1);
12067
12068 if (! target
12069 || GET_MODE (target) != tmode
12070 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12071 target = gen_reg_rtx (tmode);
12072
12073 /* In case the insn wants input operands in modes different from
12074 the result, abort. */
12075 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12076 abort ();
12077
12078 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12079 op0 = copy_to_mode_reg (mode0, op0);
12080 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12081 op1 = copy_to_mode_reg (mode1, op1);
12082
12083 pat = GEN_FCN (icode) (target, op0, op1);
12084 if (! pat)
12085 return 0;
12086 emit_insn (pat);
12087 return target;
12088 }
12089
12090 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12091
12092 static rtx
12093 arm_expand_unop_builtin (enum insn_code icode,
12094 tree arglist, rtx target, int do_load)
12095 {
12096 rtx pat;
12097 tree arg0 = TREE_VALUE (arglist);
12098 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12099 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12100 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12101
12102 if (! target
12103 || GET_MODE (target) != tmode
12104 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12105 target = gen_reg_rtx (tmode);
12106 if (do_load)
12107 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12108 else
12109 {
12110 if (VECTOR_MODE_P (mode0))
12111 op0 = safe_vector_operand (op0, mode0);
12112
12113 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12114 op0 = copy_to_mode_reg (mode0, op0);
12115 }
12116
12117 pat = GEN_FCN (icode) (target, op0);
12118 if (! pat)
12119 return 0;
12120 emit_insn (pat);
12121 return target;
12122 }
12123
12124 /* Expand an expression EXP that calls a built-in function,
12125 with result going to TARGET if that's convenient
12126 (and in mode MODE if that's convenient).
12127 SUBTARGET may be used as the target for computing one of EXP's operands.
12128 IGNORE is nonzero if the value is to be ignored. */
12129
12130 static rtx
12131 arm_expand_builtin (tree exp,
12132 rtx target,
12133 rtx subtarget ATTRIBUTE_UNUSED,
12134 enum machine_mode mode ATTRIBUTE_UNUSED,
12135 int ignore ATTRIBUTE_UNUSED)
12136 {
12137 const struct builtin_description * d;
12138 enum insn_code icode;
12139 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12140 tree arglist = TREE_OPERAND (exp, 1);
12141 tree arg0;
12142 tree arg1;
12143 tree arg2;
12144 rtx op0;
12145 rtx op1;
12146 rtx op2;
12147 rtx pat;
12148 int fcode = DECL_FUNCTION_CODE (fndecl);
12149 size_t i;
12150 enum machine_mode tmode;
12151 enum machine_mode mode0;
12152 enum machine_mode mode1;
12153 enum machine_mode mode2;
12154
12155 switch (fcode)
12156 {
12157 case ARM_BUILTIN_TEXTRMSB:
12158 case ARM_BUILTIN_TEXTRMUB:
12159 case ARM_BUILTIN_TEXTRMSH:
12160 case ARM_BUILTIN_TEXTRMUH:
12161 case ARM_BUILTIN_TEXTRMSW:
12162 case ARM_BUILTIN_TEXTRMUW:
12163 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12164 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12165 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12166 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12167 : CODE_FOR_iwmmxt_textrmw);
12168
12169 arg0 = TREE_VALUE (arglist);
12170 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12171 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12172 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12173 tmode = insn_data[icode].operand[0].mode;
12174 mode0 = insn_data[icode].operand[1].mode;
12175 mode1 = insn_data[icode].operand[2].mode;
12176
12177 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12178 op0 = copy_to_mode_reg (mode0, op0);
12179 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12180 {
12181 /* @@@ better error message */
12182 error ("selector must be an immediate");
12183 return gen_reg_rtx (tmode);
12184 }
12185 if (target == 0
12186 || GET_MODE (target) != tmode
12187 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12188 target = gen_reg_rtx (tmode);
12189 pat = GEN_FCN (icode) (target, op0, op1);
12190 if (! pat)
12191 return 0;
12192 emit_insn (pat);
12193 return target;
12194
12195 case ARM_BUILTIN_TINSRB:
12196 case ARM_BUILTIN_TINSRH:
12197 case ARM_BUILTIN_TINSRW:
12198 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12199 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12200 : CODE_FOR_iwmmxt_tinsrw);
12201 arg0 = TREE_VALUE (arglist);
12202 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12203 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12204 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12205 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12206 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12207 tmode = insn_data[icode].operand[0].mode;
12208 mode0 = insn_data[icode].operand[1].mode;
12209 mode1 = insn_data[icode].operand[2].mode;
12210 mode2 = insn_data[icode].operand[3].mode;
12211
12212 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12213 op0 = copy_to_mode_reg (mode0, op0);
12214 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12215 op1 = copy_to_mode_reg (mode1, op1);
12216 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12217 {
12218 /* @@@ better error message */
12219 error ("selector must be an immediate");
12220 return const0_rtx;
12221 }
12222 if (target == 0
12223 || GET_MODE (target) != tmode
12224 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12225 target = gen_reg_rtx (tmode);
12226 pat = GEN_FCN (icode) (target, op0, op1, op2);
12227 if (! pat)
12228 return 0;
12229 emit_insn (pat);
12230 return target;
12231
12232 case ARM_BUILTIN_SETWCX:
12233 arg0 = TREE_VALUE (arglist);
12234 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12235 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12236 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12237 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12238 return 0;
12239
12240 case ARM_BUILTIN_GETWCX:
12241 arg0 = TREE_VALUE (arglist);
12242 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12243 target = gen_reg_rtx (SImode);
12244 emit_insn (gen_iwmmxt_tmrc (target, op0));
12245 return target;
12246
12247 case ARM_BUILTIN_WSHUFH:
12248 icode = CODE_FOR_iwmmxt_wshufh;
12249 arg0 = TREE_VALUE (arglist);
12250 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12251 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12252 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12253 tmode = insn_data[icode].operand[0].mode;
12254 mode1 = insn_data[icode].operand[1].mode;
12255 mode2 = insn_data[icode].operand[2].mode;
12256
12257 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12258 op0 = copy_to_mode_reg (mode1, op0);
12259 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12260 {
12261 /* @@@ better error message */
12262 error ("mask must be an immediate");
12263 return const0_rtx;
12264 }
12265 if (target == 0
12266 || GET_MODE (target) != tmode
12267 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12268 target = gen_reg_rtx (tmode);
12269 pat = GEN_FCN (icode) (target, op0, op1);
12270 if (! pat)
12271 return 0;
12272 emit_insn (pat);
12273 return target;
12274
12275 case ARM_BUILTIN_WSADB:
12276 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12277 case ARM_BUILTIN_WSADH:
12278 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12279 case ARM_BUILTIN_WSADBZ:
12280 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12281 case ARM_BUILTIN_WSADHZ:
12282 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12283
12284 /* Several three-argument builtins. */
12285 case ARM_BUILTIN_WMACS:
12286 case ARM_BUILTIN_WMACU:
12287 case ARM_BUILTIN_WALIGN:
12288 case ARM_BUILTIN_TMIA:
12289 case ARM_BUILTIN_TMIAPH:
12290 case ARM_BUILTIN_TMIATT:
12291 case ARM_BUILTIN_TMIATB:
12292 case ARM_BUILTIN_TMIABT:
12293 case ARM_BUILTIN_TMIABB:
12294 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12295 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12296 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12297 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12298 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12299 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12300 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12301 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12302 : CODE_FOR_iwmmxt_walign);
12303 arg0 = TREE_VALUE (arglist);
12304 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12305 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12306 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12307 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12308 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12309 tmode = insn_data[icode].operand[0].mode;
12310 mode0 = insn_data[icode].operand[1].mode;
12311 mode1 = insn_data[icode].operand[2].mode;
12312 mode2 = insn_data[icode].operand[3].mode;
12313
12314 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12315 op0 = copy_to_mode_reg (mode0, op0);
12316 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12317 op1 = copy_to_mode_reg (mode1, op1);
12318 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12319 op2 = copy_to_mode_reg (mode2, op2);
12320 if (target == 0
12321 || GET_MODE (target) != tmode
12322 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12323 target = gen_reg_rtx (tmode);
12324 pat = GEN_FCN (icode) (target, op0, op1, op2);
12325 if (! pat)
12326 return 0;
12327 emit_insn (pat);
12328 return target;
12329
12330 case ARM_BUILTIN_WZERO:
12331 target = gen_reg_rtx (DImode);
12332 emit_insn (gen_iwmmxt_clrdi (target));
12333 return target;
12334
12335 default:
12336 break;
12337 }
12338
12339 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12340 if (d->code == (const enum arm_builtins) fcode)
12341 return arm_expand_binop_builtin (d->icode, arglist, target);
12342
12343 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12344 if (d->code == (const enum arm_builtins) fcode)
12345 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12346
12347 /* @@@ Should really do something sensible here. */
12348 return NULL_RTX;
12349 }
12350 \f
12351 /* Recursively search through all of the blocks in a function
12352 checking to see if any of the variables created in that
12353 function match the RTX called 'orig'. If they do then
12354 replace them with the RTX called 'new'. */
12355 static void
12356 replace_symbols_in_block (tree block, rtx orig, rtx new)
12357 {
12358 for (; block; block = BLOCK_CHAIN (block))
12359 {
12360 tree sym;
12361
12362 if (!TREE_USED (block))
12363 continue;
12364
12365 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12366 {
12367 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12368 || DECL_IGNORED_P (sym)
12369 || TREE_CODE (sym) != VAR_DECL
12370 || DECL_EXTERNAL (sym)
12371 || !rtx_equal_p (DECL_RTL (sym), orig)
12372 )
12373 continue;
12374
12375 SET_DECL_RTL (sym, new);
12376 }
12377
12378 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12379 }
12380 }
12381
12382 /* Return the number (counting from 0) of
12383 the least significant set bit in MASK. */
12384
12385 inline static int
12386 number_of_first_bit_set (int mask)
12387 {
12388 int bit;
12389
12390 for (bit = 0;
12391 (mask & (1 << bit)) == 0;
12392 ++bit)
12393 continue;
12394
12395 return bit;
12396 }
12397
12398 /* Generate code to return from a thumb function.
12399 If 'reg_containing_return_addr' is -1, then the return address is
12400 actually on the stack, at the stack pointer. */
12401 static void
12402 thumb_exit (FILE *f, int reg_containing_return_addr)
12403 {
12404 unsigned regs_available_for_popping;
12405 unsigned regs_to_pop;
12406 int pops_needed;
12407 unsigned available;
12408 unsigned required;
12409 int mode;
12410 int size;
12411 int restore_a4 = FALSE;
12412
12413 /* Compute the registers we need to pop. */
12414 regs_to_pop = 0;
12415 pops_needed = 0;
12416
12417 if (reg_containing_return_addr == -1)
12418 {
12419 regs_to_pop |= 1 << LR_REGNUM;
12420 ++pops_needed;
12421 }
12422
12423 if (TARGET_BACKTRACE)
12424 {
12425 /* Restore the (ARM) frame pointer and stack pointer. */
12426 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12427 pops_needed += 2;
12428 }
12429
12430 /* If there is nothing to pop then just emit the BX instruction and
12431 return. */
12432 if (pops_needed == 0)
12433 {
12434 if (current_function_calls_eh_return)
12435 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12436
12437 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12438 return;
12439 }
12440 /* Otherwise if we are not supporting interworking and we have not created
12441 a backtrace structure and the function was not entered in ARM mode then
12442 just pop the return address straight into the PC. */
12443 else if (!TARGET_INTERWORK
12444 && !TARGET_BACKTRACE
12445 && !is_called_in_ARM_mode (current_function_decl)
12446 && !current_function_calls_eh_return)
12447 {
12448 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12449 return;
12450 }
12451
12452 /* Find out how many of the (return) argument registers we can corrupt. */
12453 regs_available_for_popping = 0;
12454
12455 /* If returning via __builtin_eh_return, the bottom three registers
12456 all contain information needed for the return. */
12457 if (current_function_calls_eh_return)
12458 size = 12;
12459 else
12460 {
12461 /* If we can deduce the registers used from the function's
12462 return value. This is more reliable that examining
12463 regs_ever_live[] because that will be set if the register is
12464 ever used in the function, not just if the register is used
12465 to hold a return value. */
12466
12467 if (current_function_return_rtx != 0)
12468 mode = GET_MODE (current_function_return_rtx);
12469 else
12470 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12471
12472 size = GET_MODE_SIZE (mode);
12473
12474 if (size == 0)
12475 {
12476 /* In a void function we can use any argument register.
12477 In a function that returns a structure on the stack
12478 we can use the second and third argument registers. */
12479 if (mode == VOIDmode)
12480 regs_available_for_popping =
12481 (1 << ARG_REGISTER (1))
12482 | (1 << ARG_REGISTER (2))
12483 | (1 << ARG_REGISTER (3));
12484 else
12485 regs_available_for_popping =
12486 (1 << ARG_REGISTER (2))
12487 | (1 << ARG_REGISTER (3));
12488 }
12489 else if (size <= 4)
12490 regs_available_for_popping =
12491 (1 << ARG_REGISTER (2))
12492 | (1 << ARG_REGISTER (3));
12493 else if (size <= 8)
12494 regs_available_for_popping =
12495 (1 << ARG_REGISTER (3));
12496 }
12497
12498 /* Match registers to be popped with registers into which we pop them. */
12499 for (available = regs_available_for_popping,
12500 required = regs_to_pop;
12501 required != 0 && available != 0;
12502 available &= ~(available & - available),
12503 required &= ~(required & - required))
12504 -- pops_needed;
12505
12506 /* If we have any popping registers left over, remove them. */
12507 if (available > 0)
12508 regs_available_for_popping &= ~available;
12509
12510 /* Otherwise if we need another popping register we can use
12511 the fourth argument register. */
12512 else if (pops_needed)
12513 {
12514 /* If we have not found any free argument registers and
12515 reg a4 contains the return address, we must move it. */
12516 if (regs_available_for_popping == 0
12517 && reg_containing_return_addr == LAST_ARG_REGNUM)
12518 {
12519 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12520 reg_containing_return_addr = LR_REGNUM;
12521 }
12522 else if (size > 12)
12523 {
12524 /* Register a4 is being used to hold part of the return value,
12525 but we have dire need of a free, low register. */
12526 restore_a4 = TRUE;
12527
12528 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12529 }
12530
12531 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12532 {
12533 /* The fourth argument register is available. */
12534 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12535
12536 --pops_needed;
12537 }
12538 }
12539
12540 /* Pop as many registers as we can. */
12541 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12542 regs_available_for_popping);
12543
12544 /* Process the registers we popped. */
12545 if (reg_containing_return_addr == -1)
12546 {
12547 /* The return address was popped into the lowest numbered register. */
12548 regs_to_pop &= ~(1 << LR_REGNUM);
12549
12550 reg_containing_return_addr =
12551 number_of_first_bit_set (regs_available_for_popping);
12552
12553 /* Remove this register for the mask of available registers, so that
12554 the return address will not be corrupted by further pops. */
12555 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12556 }
12557
12558 /* If we popped other registers then handle them here. */
12559 if (regs_available_for_popping)
12560 {
12561 int frame_pointer;
12562
12563 /* Work out which register currently contains the frame pointer. */
12564 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12565
12566 /* Move it into the correct place. */
12567 asm_fprintf (f, "\tmov\t%r, %r\n",
12568 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12569
12570 /* (Temporarily) remove it from the mask of popped registers. */
12571 regs_available_for_popping &= ~(1 << frame_pointer);
12572 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12573
12574 if (regs_available_for_popping)
12575 {
12576 int stack_pointer;
12577
12578 /* We popped the stack pointer as well,
12579 find the register that contains it. */
12580 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12581
12582 /* Move it into the stack register. */
12583 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12584
12585 /* At this point we have popped all necessary registers, so
12586 do not worry about restoring regs_available_for_popping
12587 to its correct value:
12588
12589 assert (pops_needed == 0)
12590 assert (regs_available_for_popping == (1 << frame_pointer))
12591 assert (regs_to_pop == (1 << STACK_POINTER)) */
12592 }
12593 else
12594 {
12595 /* Since we have just move the popped value into the frame
12596 pointer, the popping register is available for reuse, and
12597 we know that we still have the stack pointer left to pop. */
12598 regs_available_for_popping |= (1 << frame_pointer);
12599 }
12600 }
12601
12602 /* If we still have registers left on the stack, but we no longer have
12603 any registers into which we can pop them, then we must move the return
12604 address into the link register and make available the register that
12605 contained it. */
12606 if (regs_available_for_popping == 0 && pops_needed > 0)
12607 {
12608 regs_available_for_popping |= 1 << reg_containing_return_addr;
12609
12610 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12611 reg_containing_return_addr);
12612
12613 reg_containing_return_addr = LR_REGNUM;
12614 }
12615
12616 /* If we have registers left on the stack then pop some more.
12617 We know that at most we will want to pop FP and SP. */
12618 if (pops_needed > 0)
12619 {
12620 int popped_into;
12621 int move_to;
12622
12623 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12624 regs_available_for_popping);
12625
12626 /* We have popped either FP or SP.
12627 Move whichever one it is into the correct register. */
12628 popped_into = number_of_first_bit_set (regs_available_for_popping);
12629 move_to = number_of_first_bit_set (regs_to_pop);
12630
12631 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12632
12633 regs_to_pop &= ~(1 << move_to);
12634
12635 --pops_needed;
12636 }
12637
12638 /* If we still have not popped everything then we must have only
12639 had one register available to us and we are now popping the SP. */
12640 if (pops_needed > 0)
12641 {
12642 int popped_into;
12643
12644 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12645 regs_available_for_popping);
12646
12647 popped_into = number_of_first_bit_set (regs_available_for_popping);
12648
12649 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12650 /*
12651 assert (regs_to_pop == (1 << STACK_POINTER))
12652 assert (pops_needed == 1)
12653 */
12654 }
12655
12656 /* If necessary restore the a4 register. */
12657 if (restore_a4)
12658 {
12659 if (reg_containing_return_addr != LR_REGNUM)
12660 {
12661 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12662 reg_containing_return_addr = LR_REGNUM;
12663 }
12664
12665 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12666 }
12667
12668 if (current_function_calls_eh_return)
12669 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12670
12671 /* Return to caller. */
12672 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12673 }
12674
12675 /* Emit code to push or pop registers to or from the stack. F is the
12676 assembly file. MASK is the registers to push or pop. PUSH is
12677 nonzero if we should push, and zero if we should pop. For debugging
12678 output, if pushing, adjust CFA_OFFSET by the amount of space added
12679 to the stack. REAL_REGS should have the same number of bits set as
12680 MASK, and will be used instead (in the same order) to describe which
12681 registers were saved - this is used to mark the save slots when we
12682 push high registers after moving them to low registers. */
12683 static void
12684 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12685 {
12686 int regno;
12687 int lo_mask = mask & 0xFF;
12688 int pushed_words = 0;
12689
12690 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12691 {
12692 /* Special case. Do not generate a POP PC statement here, do it in
12693 thumb_exit() */
12694 thumb_exit (f, -1);
12695 return;
12696 }
12697
12698 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12699
12700 /* Look at the low registers first. */
12701 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12702 {
12703 if (lo_mask & 1)
12704 {
12705 asm_fprintf (f, "%r", regno);
12706
12707 if ((lo_mask & ~1) != 0)
12708 fprintf (f, ", ");
12709
12710 pushed_words++;
12711 }
12712 }
12713
12714 if (push && (mask & (1 << LR_REGNUM)))
12715 {
12716 /* Catch pushing the LR. */
12717 if (mask & 0xFF)
12718 fprintf (f, ", ");
12719
12720 asm_fprintf (f, "%r", LR_REGNUM);
12721
12722 pushed_words++;
12723 }
12724 else if (!push && (mask & (1 << PC_REGNUM)))
12725 {
12726 /* Catch popping the PC. */
12727 if (TARGET_INTERWORK || TARGET_BACKTRACE
12728 || current_function_calls_eh_return)
12729 {
12730 /* The PC is never poped directly, instead
12731 it is popped into r3 and then BX is used. */
12732 fprintf (f, "}\n");
12733
12734 thumb_exit (f, -1);
12735
12736 return;
12737 }
12738 else
12739 {
12740 if (mask & 0xFF)
12741 fprintf (f, ", ");
12742
12743 asm_fprintf (f, "%r", PC_REGNUM);
12744 }
12745 }
12746
12747 fprintf (f, "}\n");
12748
12749 if (push && pushed_words && dwarf2out_do_frame ())
12750 {
12751 char *l = dwarf2out_cfi_label ();
12752 int pushed_mask = real_regs;
12753
12754 *cfa_offset += pushed_words * 4;
12755 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12756
12757 pushed_words = 0;
12758 pushed_mask = real_regs;
12759 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12760 {
12761 if (pushed_mask & 1)
12762 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12763 }
12764 }
12765 }
12766 \f
12767 void
12768 thumb_final_prescan_insn (rtx insn)
12769 {
12770 if (flag_print_asm_name)
12771 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12772 INSN_ADDRESSES (INSN_UID (insn)));
12773 }
12774
12775 int
12776 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12777 {
12778 unsigned HOST_WIDE_INT mask = 0xff;
12779 int i;
12780
12781 if (val == 0) /* XXX */
12782 return 0;
12783
12784 for (i = 0; i < 25; i++)
12785 if ((val & (mask << i)) == val)
12786 return 1;
12787
12788 return 0;
12789 }
12790
12791 /* Returns nonzero if the current function contains,
12792 or might contain a far jump. */
12793 static int
12794 thumb_far_jump_used_p (void)
12795 {
12796 rtx insn;
12797
12798 /* This test is only important for leaf functions. */
12799 /* assert (!leaf_function_p ()); */
12800
12801 /* If we have already decided that far jumps may be used,
12802 do not bother checking again, and always return true even if
12803 it turns out that they are not being used. Once we have made
12804 the decision that far jumps are present (and that hence the link
12805 register will be pushed onto the stack) we cannot go back on it. */
12806 if (cfun->machine->far_jump_used)
12807 return 1;
12808
12809 /* If this function is not being called from the prologue/epilogue
12810 generation code then it must be being called from the
12811 INITIAL_ELIMINATION_OFFSET macro. */
12812 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12813 {
12814 /* In this case we know that we are being asked about the elimination
12815 of the arg pointer register. If that register is not being used,
12816 then there are no arguments on the stack, and we do not have to
12817 worry that a far jump might force the prologue to push the link
12818 register, changing the stack offsets. In this case we can just
12819 return false, since the presence of far jumps in the function will
12820 not affect stack offsets.
12821
12822 If the arg pointer is live (or if it was live, but has now been
12823 eliminated and so set to dead) then we do have to test to see if
12824 the function might contain a far jump. This test can lead to some
12825 false negatives, since before reload is completed, then length of
12826 branch instructions is not known, so gcc defaults to returning their
12827 longest length, which in turn sets the far jump attribute to true.
12828
12829 A false negative will not result in bad code being generated, but it
12830 will result in a needless push and pop of the link register. We
12831 hope that this does not occur too often.
12832
12833 If we need doubleword stack alignment this could affect the other
12834 elimination offsets so we can't risk getting it wrong. */
12835 if (regs_ever_live [ARG_POINTER_REGNUM])
12836 cfun->machine->arg_pointer_live = 1;
12837 else if (!cfun->machine->arg_pointer_live)
12838 return 0;
12839 }
12840
12841 /* Check to see if the function contains a branch
12842 insn with the far jump attribute set. */
12843 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12844 {
12845 if (GET_CODE (insn) == JUMP_INSN
12846 /* Ignore tablejump patterns. */
12847 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12848 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12849 && get_attr_far_jump (insn) == FAR_JUMP_YES
12850 )
12851 {
12852 /* Record the fact that we have decided that
12853 the function does use far jumps. */
12854 cfun->machine->far_jump_used = 1;
12855 return 1;
12856 }
12857 }
12858
12859 return 0;
12860 }
12861
12862 /* Return nonzero if FUNC must be entered in ARM mode. */
12863 int
12864 is_called_in_ARM_mode (tree func)
12865 {
12866 if (TREE_CODE (func) != FUNCTION_DECL)
12867 abort ();
12868
12869 /* Ignore the problem about functions whoes address is taken. */
12870 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12871 return TRUE;
12872
12873 #ifdef ARM_PE
12874 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12875 #else
12876 return FALSE;
12877 #endif
12878 }
12879
12880 /* The bits which aren't usefully expanded as rtl. */
12881 const char *
12882 thumb_unexpanded_epilogue (void)
12883 {
12884 int regno;
12885 int live_regs_mask = 0;
12886 int high_regs_pushed = 0;
12887 int had_to_push_lr;
12888 int size;
12889 int mode;
12890
12891 if (return_used_this_function)
12892 return "";
12893
12894 if (IS_NAKED (arm_current_func_type ()))
12895 return "";
12896
12897 live_regs_mask = thumb_compute_save_reg_mask ();
12898 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12899
12900 /* If we can deduce the registers used from the function's return value.
12901 This is more reliable that examining regs_ever_live[] because that
12902 will be set if the register is ever used in the function, not just if
12903 the register is used to hold a return value. */
12904
12905 if (current_function_return_rtx != 0)
12906 mode = GET_MODE (current_function_return_rtx);
12907 else
12908 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12909
12910 size = GET_MODE_SIZE (mode);
12911
12912 /* The prolog may have pushed some high registers to use as
12913 work registers. e.g. the testsuite file:
12914 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12915 compiles to produce:
12916 push {r4, r5, r6, r7, lr}
12917 mov r7, r9
12918 mov r6, r8
12919 push {r6, r7}
12920 as part of the prolog. We have to undo that pushing here. */
12921
12922 if (high_regs_pushed)
12923 {
12924 int mask = live_regs_mask & 0xff;
12925 int next_hi_reg;
12926
12927 /* The available low registers depend on the size of the value we are
12928 returning. */
12929 if (size <= 12)
12930 mask |= 1 << 3;
12931 if (size <= 8)
12932 mask |= 1 << 2;
12933
12934 if (mask == 0)
12935 /* Oh dear! We have no low registers into which we can pop
12936 high registers! */
12937 internal_error
12938 ("no low registers available for popping high registers");
12939
12940 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12941 if (live_regs_mask & (1 << next_hi_reg))
12942 break;
12943
12944 while (high_regs_pushed)
12945 {
12946 /* Find lo register(s) into which the high register(s) can
12947 be popped. */
12948 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12949 {
12950 if (mask & (1 << regno))
12951 high_regs_pushed--;
12952 if (high_regs_pushed == 0)
12953 break;
12954 }
12955
12956 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12957
12958 /* Pop the values into the low register(s). */
12959 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12960
12961 /* Move the value(s) into the high registers. */
12962 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12963 {
12964 if (mask & (1 << regno))
12965 {
12966 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12967 regno);
12968
12969 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12970 if (live_regs_mask & (1 << next_hi_reg))
12971 break;
12972 }
12973 }
12974 }
12975 live_regs_mask &= ~0x0f00;
12976 }
12977
12978 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12979 live_regs_mask &= 0xff;
12980
12981 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12982 {
12983 /* Pop the return address into the PC. */
12984 if (had_to_push_lr)
12985 live_regs_mask |= 1 << PC_REGNUM;
12986
12987 /* Either no argument registers were pushed or a backtrace
12988 structure was created which includes an adjusted stack
12989 pointer, so just pop everything. */
12990 if (live_regs_mask)
12991 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12992 live_regs_mask);
12993
12994 /* We have either just popped the return address into the
12995 PC or it is was kept in LR for the entire function. */
12996 if (!had_to_push_lr)
12997 thumb_exit (asm_out_file, LR_REGNUM);
12998 }
12999 else
13000 {
13001 /* Pop everything but the return address. */
13002 if (live_regs_mask)
13003 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13004 live_regs_mask);
13005
13006 if (had_to_push_lr)
13007 {
13008 if (size > 12)
13009 {
13010 /* We have no free low regs, so save one. */
13011 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13012 LAST_ARG_REGNUM);
13013 }
13014
13015 /* Get the return address into a temporary register. */
13016 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13017 1 << LAST_ARG_REGNUM);
13018
13019 if (size > 12)
13020 {
13021 /* Move the return address to lr. */
13022 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13023 LAST_ARG_REGNUM);
13024 /* Restore the low register. */
13025 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13026 IP_REGNUM);
13027 regno = LR_REGNUM;
13028 }
13029 else
13030 regno = LAST_ARG_REGNUM;
13031 }
13032 else
13033 regno = LR_REGNUM;
13034
13035 /* Remove the argument registers that were pushed onto the stack. */
13036 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13037 SP_REGNUM, SP_REGNUM,
13038 current_function_pretend_args_size);
13039
13040 thumb_exit (asm_out_file, regno);
13041 }
13042
13043 return "";
13044 }
13045
13046 /* Functions to save and restore machine-specific function data. */
13047 static struct machine_function *
13048 arm_init_machine_status (void)
13049 {
13050 struct machine_function *machine;
13051 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13052
13053 #if ARM_FT_UNKNOWN != 0
13054 machine->func_type = ARM_FT_UNKNOWN;
13055 #endif
13056 return machine;
13057 }
13058
13059 /* Return an RTX indicating where the return address to the
13060 calling function can be found. */
13061 rtx
13062 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13063 {
13064 if (count != 0)
13065 return NULL_RTX;
13066
13067 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13068 }
13069
13070 /* Do anything needed before RTL is emitted for each function. */
13071 void
13072 arm_init_expanders (void)
13073 {
13074 /* Arrange to initialize and mark the machine per-function status. */
13075 init_machine_status = arm_init_machine_status;
13076
13077 /* This is to stop the combine pass optimizing away the alignment
13078 adjustment of va_arg. */
13079 /* ??? It is claimed that this should not be necessary. */
13080 if (cfun)
13081 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13082 }
13083
13084
13085 /* Like arm_compute_initial_elimination offset. Simpler because
13086 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13087
13088 HOST_WIDE_INT
13089 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13090 {
13091 arm_stack_offsets *offsets;
13092
13093 offsets = arm_get_frame_offsets ();
13094
13095 switch (from)
13096 {
13097 case ARG_POINTER_REGNUM:
13098 switch (to)
13099 {
13100 case STACK_POINTER_REGNUM:
13101 return offsets->outgoing_args - offsets->saved_args;
13102
13103 case FRAME_POINTER_REGNUM:
13104 return offsets->soft_frame - offsets->saved_args;
13105
13106 case THUMB_HARD_FRAME_POINTER_REGNUM:
13107 case ARM_HARD_FRAME_POINTER_REGNUM:
13108 return offsets->saved_regs - offsets->saved_args;
13109
13110 default:
13111 abort();
13112 }
13113 break;
13114
13115 case FRAME_POINTER_REGNUM:
13116 switch (to)
13117 {
13118 case STACK_POINTER_REGNUM:
13119 return offsets->outgoing_args - offsets->soft_frame;
13120
13121 case THUMB_HARD_FRAME_POINTER_REGNUM:
13122 case ARM_HARD_FRAME_POINTER_REGNUM:
13123 return offsets->saved_regs - offsets->soft_frame;
13124
13125 default:
13126 abort();
13127 }
13128 break;
13129
13130 default:
13131 abort ();
13132 }
13133 }
13134
13135
13136 /* Generate the rest of a function's prologue. */
13137 void
13138 thumb_expand_prologue (void)
13139 {
13140 rtx insn, dwarf;
13141
13142 HOST_WIDE_INT amount;
13143 arm_stack_offsets *offsets;
13144 unsigned long func_type;
13145 int regno;
13146 unsigned long live_regs_mask;
13147
13148 func_type = arm_current_func_type ();
13149
13150 /* Naked functions don't have prologues. */
13151 if (IS_NAKED (func_type))
13152 return;
13153
13154 if (IS_INTERRUPT (func_type))
13155 {
13156 error ("interrupt Service Routines cannot be coded in Thumb mode");
13157 return;
13158 }
13159
13160 live_regs_mask = thumb_compute_save_reg_mask ();
13161 /* Load the pic register before setting the frame pointer, so we can use r7
13162 as a temporary work register. */
13163 if (flag_pic)
13164 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13165
13166 offsets = arm_get_frame_offsets ();
13167
13168 if (frame_pointer_needed)
13169 {
13170 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13171 stack_pointer_rtx));
13172 RTX_FRAME_RELATED_P (insn) = 1;
13173 }
13174 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13175 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13176 stack_pointer_rtx);
13177
13178 amount = offsets->outgoing_args - offsets->saved_regs;
13179 if (amount)
13180 {
13181 if (amount < 512)
13182 {
13183 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13184 GEN_INT (- amount)));
13185 RTX_FRAME_RELATED_P (insn) = 1;
13186 }
13187 else
13188 {
13189 rtx reg;
13190
13191 /* The stack decrement is too big for an immediate value in a single
13192 insn. In theory we could issue multiple subtracts, but after
13193 three of them it becomes more space efficient to place the full
13194 value in the constant pool and load into a register. (Also the
13195 ARM debugger really likes to see only one stack decrement per
13196 function). So instead we look for a scratch register into which
13197 we can load the decrement, and then we subtract this from the
13198 stack pointer. Unfortunately on the thumb the only available
13199 scratch registers are the argument registers, and we cannot use
13200 these as they may hold arguments to the function. Instead we
13201 attempt to locate a call preserved register which is used by this
13202 function. If we can find one, then we know that it will have
13203 been pushed at the start of the prologue and so we can corrupt
13204 it now. */
13205 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13206 if (live_regs_mask & (1 << regno)
13207 && !(frame_pointer_needed
13208 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13209 break;
13210
13211 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13212 {
13213 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13214
13215 /* Choose an arbitrary, non-argument low register. */
13216 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13217
13218 /* Save it by copying it into a high, scratch register. */
13219 emit_insn (gen_movsi (spare, reg));
13220 /* Add a USE to stop propagate_one_insn() from barfing. */
13221 emit_insn (gen_prologue_use (spare));
13222
13223 /* Decrement the stack. */
13224 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13225 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13226 stack_pointer_rtx, reg));
13227 RTX_FRAME_RELATED_P (insn) = 1;
13228 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13229 plus_constant (stack_pointer_rtx,
13230 -amount));
13231 RTX_FRAME_RELATED_P (dwarf) = 1;
13232 REG_NOTES (insn)
13233 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13234 REG_NOTES (insn));
13235
13236 /* Restore the low register's original value. */
13237 emit_insn (gen_movsi (reg, spare));
13238
13239 /* Emit a USE of the restored scratch register, so that flow
13240 analysis will not consider the restore redundant. The
13241 register won't be used again in this function and isn't
13242 restored by the epilogue. */
13243 emit_insn (gen_prologue_use (reg));
13244 }
13245 else
13246 {
13247 reg = gen_rtx_REG (SImode, regno);
13248
13249 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13250
13251 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13252 stack_pointer_rtx, reg));
13253 RTX_FRAME_RELATED_P (insn) = 1;
13254 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13255 plus_constant (stack_pointer_rtx,
13256 -amount));
13257 RTX_FRAME_RELATED_P (dwarf) = 1;
13258 REG_NOTES (insn)
13259 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13260 REG_NOTES (insn));
13261 }
13262 }
13263 /* If the frame pointer is needed, emit a special barrier that
13264 will prevent the scheduler from moving stores to the frame
13265 before the stack adjustment. */
13266 if (frame_pointer_needed)
13267 emit_insn (gen_stack_tie (stack_pointer_rtx,
13268 hard_frame_pointer_rtx));
13269 }
13270
13271 if (current_function_profile || TARGET_NO_SCHED_PRO)
13272 emit_insn (gen_blockage ());
13273
13274 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13275 if (live_regs_mask & 0xff)
13276 cfun->machine->lr_save_eliminated = 0;
13277
13278 /* If the link register is being kept alive, with the return address in it,
13279 then make sure that it does not get reused by the ce2 pass. */
13280 if (cfun->machine->lr_save_eliminated)
13281 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13282 }
13283
13284
13285 void
13286 thumb_expand_epilogue (void)
13287 {
13288 HOST_WIDE_INT amount;
13289 arm_stack_offsets *offsets;
13290 int regno;
13291
13292 /* Naked functions don't have prologues. */
13293 if (IS_NAKED (arm_current_func_type ()))
13294 return;
13295
13296 offsets = arm_get_frame_offsets ();
13297 amount = offsets->outgoing_args - offsets->saved_regs;
13298
13299 if (frame_pointer_needed)
13300 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13301 else if (amount)
13302 {
13303 if (amount < 512)
13304 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13305 GEN_INT (amount)));
13306 else
13307 {
13308 /* r3 is always free in the epilogue. */
13309 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13310
13311 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13312 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13313 }
13314 }
13315
13316 /* Emit a USE (stack_pointer_rtx), so that
13317 the stack adjustment will not be deleted. */
13318 emit_insn (gen_prologue_use (stack_pointer_rtx));
13319
13320 if (current_function_profile || TARGET_NO_SCHED_PRO)
13321 emit_insn (gen_blockage ());
13322
13323 /* Emit a clobber for each insn that will be restored in the epilogue,
13324 so that flow2 will get register lifetimes correct. */
13325 for (regno = 0; regno < 13; regno++)
13326 if (regs_ever_live[regno] && !call_used_regs[regno])
13327 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13328
13329 if (! regs_ever_live[LR_REGNUM])
13330 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13331 }
13332
13333 static void
13334 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13335 {
13336 int live_regs_mask = 0;
13337 int l_mask;
13338 int high_regs_pushed = 0;
13339 int cfa_offset = 0;
13340 int regno;
13341
13342 if (IS_NAKED (arm_current_func_type ()))
13343 return;
13344
13345 if (is_called_in_ARM_mode (current_function_decl))
13346 {
13347 const char * name;
13348
13349 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13350 abort ();
13351 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13352 abort ();
13353 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13354
13355 /* Generate code sequence to switch us into Thumb mode. */
13356 /* The .code 32 directive has already been emitted by
13357 ASM_DECLARE_FUNCTION_NAME. */
13358 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13359 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13360
13361 /* Generate a label, so that the debugger will notice the
13362 change in instruction sets. This label is also used by
13363 the assembler to bypass the ARM code when this function
13364 is called from a Thumb encoded function elsewhere in the
13365 same file. Hence the definition of STUB_NAME here must
13366 agree with the definition in gas/config/tc-arm.c. */
13367
13368 #define STUB_NAME ".real_start_of"
13369
13370 fprintf (f, "\t.code\t16\n");
13371 #ifdef ARM_PE
13372 if (arm_dllexport_name_p (name))
13373 name = arm_strip_name_encoding (name);
13374 #endif
13375 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13376 fprintf (f, "\t.thumb_func\n");
13377 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13378 }
13379
13380 if (current_function_pretend_args_size)
13381 {
13382 if (cfun->machine->uses_anonymous_args)
13383 {
13384 int num_pushes;
13385
13386 fprintf (f, "\tpush\t{");
13387
13388 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13389
13390 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13391 regno <= LAST_ARG_REGNUM;
13392 regno++)
13393 asm_fprintf (f, "%r%s", regno,
13394 regno == LAST_ARG_REGNUM ? "" : ", ");
13395
13396 fprintf (f, "}\n");
13397 }
13398 else
13399 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13400 SP_REGNUM, SP_REGNUM,
13401 current_function_pretend_args_size);
13402
13403 /* We don't need to record the stores for unwinding (would it
13404 help the debugger any if we did?), but record the change in
13405 the stack pointer. */
13406 if (dwarf2out_do_frame ())
13407 {
13408 char *l = dwarf2out_cfi_label ();
13409 cfa_offset = cfa_offset + current_function_pretend_args_size;
13410 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13411 }
13412 }
13413
13414 live_regs_mask = thumb_compute_save_reg_mask ();
13415 /* Just low regs and lr. */
13416 l_mask = live_regs_mask & 0x40ff;
13417
13418 if (TARGET_BACKTRACE)
13419 {
13420 int offset;
13421 int work_register;
13422
13423 /* We have been asked to create a stack backtrace structure.
13424 The code looks like this:
13425
13426 0 .align 2
13427 0 func:
13428 0 sub SP, #16 Reserve space for 4 registers.
13429 2 push {R7} Push low registers.
13430 4 add R7, SP, #20 Get the stack pointer before the push.
13431 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13432 8 mov R7, PC Get hold of the start of this code plus 12.
13433 10 str R7, [SP, #16] Store it.
13434 12 mov R7, FP Get hold of the current frame pointer.
13435 14 str R7, [SP, #4] Store it.
13436 16 mov R7, LR Get hold of the current return address.
13437 18 str R7, [SP, #12] Store it.
13438 20 add R7, SP, #16 Point at the start of the backtrace structure.
13439 22 mov FP, R7 Put this value into the frame pointer. */
13440
13441 work_register = thumb_find_work_register (live_regs_mask);
13442
13443 asm_fprintf
13444 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13445 SP_REGNUM, SP_REGNUM);
13446
13447 if (dwarf2out_do_frame ())
13448 {
13449 char *l = dwarf2out_cfi_label ();
13450 cfa_offset = cfa_offset + 16;
13451 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13452 }
13453
13454 if (l_mask)
13455 {
13456 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13457 offset = bit_count (l_mask);
13458 }
13459 else
13460 offset = 0;
13461
13462 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13463 offset + 16 + current_function_pretend_args_size);
13464
13465 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13466 offset + 4);
13467
13468 /* Make sure that the instruction fetching the PC is in the right place
13469 to calculate "start of backtrace creation code + 12". */
13470 if (l_mask)
13471 {
13472 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13473 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13474 offset + 12);
13475 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13476 ARM_HARD_FRAME_POINTER_REGNUM);
13477 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13478 offset);
13479 }
13480 else
13481 {
13482 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13483 ARM_HARD_FRAME_POINTER_REGNUM);
13484 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13485 offset);
13486 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13487 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13488 offset + 12);
13489 }
13490
13491 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13492 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13493 offset + 8);
13494 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13495 offset + 12);
13496 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13497 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13498 }
13499 else if (l_mask)
13500 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13501
13502 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13503
13504 if (high_regs_pushed)
13505 {
13506 int pushable_regs = 0;
13507 int next_hi_reg;
13508
13509 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13510 if (live_regs_mask & (1 << next_hi_reg))
13511 break;
13512
13513 pushable_regs = l_mask & 0xff;
13514
13515 if (pushable_regs == 0)
13516 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13517
13518 while (high_regs_pushed > 0)
13519 {
13520 int real_regs_mask = 0;
13521
13522 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13523 {
13524 if (pushable_regs & (1 << regno))
13525 {
13526 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13527
13528 high_regs_pushed--;
13529 real_regs_mask |= (1 << next_hi_reg);
13530
13531 if (high_regs_pushed)
13532 {
13533 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13534 next_hi_reg--)
13535 if (live_regs_mask & (1 << next_hi_reg))
13536 break;
13537 }
13538 else
13539 {
13540 pushable_regs &= ~((1 << regno) - 1);
13541 break;
13542 }
13543 }
13544 }
13545
13546 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13547 }
13548 }
13549 }
13550
13551 /* Handle the case of a double word load into a low register from
13552 a computed memory address. The computed address may involve a
13553 register which is overwritten by the load. */
13554 const char *
13555 thumb_load_double_from_address (rtx *operands)
13556 {
13557 rtx addr;
13558 rtx base;
13559 rtx offset;
13560 rtx arg1;
13561 rtx arg2;
13562
13563 if (GET_CODE (operands[0]) != REG)
13564 abort ();
13565
13566 if (GET_CODE (operands[1]) != MEM)
13567 abort ();
13568
13569 /* Get the memory address. */
13570 addr = XEXP (operands[1], 0);
13571
13572 /* Work out how the memory address is computed. */
13573 switch (GET_CODE (addr))
13574 {
13575 case REG:
13576 operands[2] = gen_rtx_MEM (SImode,
13577 plus_constant (XEXP (operands[1], 0), 4));
13578
13579 if (REGNO (operands[0]) == REGNO (addr))
13580 {
13581 output_asm_insn ("ldr\t%H0, %2", operands);
13582 output_asm_insn ("ldr\t%0, %1", operands);
13583 }
13584 else
13585 {
13586 output_asm_insn ("ldr\t%0, %1", operands);
13587 output_asm_insn ("ldr\t%H0, %2", operands);
13588 }
13589 break;
13590
13591 case CONST:
13592 /* Compute <address> + 4 for the high order load. */
13593 operands[2] = gen_rtx_MEM (SImode,
13594 plus_constant (XEXP (operands[1], 0), 4));
13595
13596 output_asm_insn ("ldr\t%0, %1", operands);
13597 output_asm_insn ("ldr\t%H0, %2", operands);
13598 break;
13599
13600 case PLUS:
13601 arg1 = XEXP (addr, 0);
13602 arg2 = XEXP (addr, 1);
13603
13604 if (CONSTANT_P (arg1))
13605 base = arg2, offset = arg1;
13606 else
13607 base = arg1, offset = arg2;
13608
13609 if (GET_CODE (base) != REG)
13610 abort ();
13611
13612 /* Catch the case of <address> = <reg> + <reg> */
13613 if (GET_CODE (offset) == REG)
13614 {
13615 int reg_offset = REGNO (offset);
13616 int reg_base = REGNO (base);
13617 int reg_dest = REGNO (operands[0]);
13618
13619 /* Add the base and offset registers together into the
13620 higher destination register. */
13621 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13622 reg_dest + 1, reg_base, reg_offset);
13623
13624 /* Load the lower destination register from the address in
13625 the higher destination register. */
13626 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13627 reg_dest, reg_dest + 1);
13628
13629 /* Load the higher destination register from its own address
13630 plus 4. */
13631 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13632 reg_dest + 1, reg_dest + 1);
13633 }
13634 else
13635 {
13636 /* Compute <address> + 4 for the high order load. */
13637 operands[2] = gen_rtx_MEM (SImode,
13638 plus_constant (XEXP (operands[1], 0), 4));
13639
13640 /* If the computed address is held in the low order register
13641 then load the high order register first, otherwise always
13642 load the low order register first. */
13643 if (REGNO (operands[0]) == REGNO (base))
13644 {
13645 output_asm_insn ("ldr\t%H0, %2", operands);
13646 output_asm_insn ("ldr\t%0, %1", operands);
13647 }
13648 else
13649 {
13650 output_asm_insn ("ldr\t%0, %1", operands);
13651 output_asm_insn ("ldr\t%H0, %2", operands);
13652 }
13653 }
13654 break;
13655
13656 case LABEL_REF:
13657 /* With no registers to worry about we can just load the value
13658 directly. */
13659 operands[2] = gen_rtx_MEM (SImode,
13660 plus_constant (XEXP (operands[1], 0), 4));
13661
13662 output_asm_insn ("ldr\t%H0, %2", operands);
13663 output_asm_insn ("ldr\t%0, %1", operands);
13664 break;
13665
13666 default:
13667 abort ();
13668 break;
13669 }
13670
13671 return "";
13672 }
13673
13674 const char *
13675 thumb_output_move_mem_multiple (int n, rtx *operands)
13676 {
13677 rtx tmp;
13678
13679 switch (n)
13680 {
13681 case 2:
13682 if (REGNO (operands[4]) > REGNO (operands[5]))
13683 {
13684 tmp = operands[4];
13685 operands[4] = operands[5];
13686 operands[5] = tmp;
13687 }
13688 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13689 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13690 break;
13691
13692 case 3:
13693 if (REGNO (operands[4]) > REGNO (operands[5]))
13694 {
13695 tmp = operands[4];
13696 operands[4] = operands[5];
13697 operands[5] = tmp;
13698 }
13699 if (REGNO (operands[5]) > REGNO (operands[6]))
13700 {
13701 tmp = operands[5];
13702 operands[5] = operands[6];
13703 operands[6] = tmp;
13704 }
13705 if (REGNO (operands[4]) > REGNO (operands[5]))
13706 {
13707 tmp = operands[4];
13708 operands[4] = operands[5];
13709 operands[5] = tmp;
13710 }
13711
13712 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13713 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13714 break;
13715
13716 default:
13717 abort ();
13718 }
13719
13720 return "";
13721 }
13722
13723 /* Output a call-via instruction for thumb state. */
13724 const char *
13725 thumb_call_via_reg (rtx reg)
13726 {
13727 int regno = REGNO (reg);
13728 rtx *labelp;
13729
13730 gcc_assert (regno < SP_REGNUM);
13731
13732 /* If we are in the normal text section we can use a single instance
13733 per compilation unit. If we are doing function sections, then we need
13734 an entry per section, since we can't rely on reachability. */
13735 if (in_text_section ())
13736 {
13737 thumb_call_reg_needed = 1;
13738
13739 if (thumb_call_via_label[regno] == NULL)
13740 thumb_call_via_label[regno] = gen_label_rtx ();
13741 labelp = thumb_call_via_label + regno;
13742 }
13743 else
13744 {
13745 if (cfun->machine->call_via[regno] == NULL)
13746 cfun->machine->call_via[regno] = gen_label_rtx ();
13747 labelp = cfun->machine->call_via + regno;
13748 }
13749
13750 output_asm_insn ("bl\t%a0", labelp);
13751 return "";
13752 }
13753
13754 /* Routines for generating rtl. */
13755 void
13756 thumb_expand_movmemqi (rtx *operands)
13757 {
13758 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13759 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13760 HOST_WIDE_INT len = INTVAL (operands[2]);
13761 HOST_WIDE_INT offset = 0;
13762
13763 while (len >= 12)
13764 {
13765 emit_insn (gen_movmem12b (out, in, out, in));
13766 len -= 12;
13767 }
13768
13769 if (len >= 8)
13770 {
13771 emit_insn (gen_movmem8b (out, in, out, in));
13772 len -= 8;
13773 }
13774
13775 if (len >= 4)
13776 {
13777 rtx reg = gen_reg_rtx (SImode);
13778 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13779 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13780 len -= 4;
13781 offset += 4;
13782 }
13783
13784 if (len >= 2)
13785 {
13786 rtx reg = gen_reg_rtx (HImode);
13787 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13788 plus_constant (in, offset))));
13789 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13790 reg));
13791 len -= 2;
13792 offset += 2;
13793 }
13794
13795 if (len)
13796 {
13797 rtx reg = gen_reg_rtx (QImode);
13798 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13799 plus_constant (in, offset))));
13800 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13801 reg));
13802 }
13803 }
13804
13805 void
13806 thumb_reload_out_hi (rtx *operands)
13807 {
13808 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13809 }
13810
13811 /* Handle reading a half-word from memory during reload. */
13812 void
13813 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13814 {
13815 abort ();
13816 }
13817
13818 /* Return the length of a function name prefix
13819 that starts with the character 'c'. */
13820 static int
13821 arm_get_strip_length (int c)
13822 {
13823 switch (c)
13824 {
13825 ARM_NAME_ENCODING_LENGTHS
13826 default: return 0;
13827 }
13828 }
13829
13830 /* Return a pointer to a function's name with any
13831 and all prefix encodings stripped from it. */
13832 const char *
13833 arm_strip_name_encoding (const char *name)
13834 {
13835 int skip;
13836
13837 while ((skip = arm_get_strip_length (* name)))
13838 name += skip;
13839
13840 return name;
13841 }
13842
13843 /* If there is a '*' anywhere in the name's prefix, then
13844 emit the stripped name verbatim, otherwise prepend an
13845 underscore if leading underscores are being used. */
13846 void
13847 arm_asm_output_labelref (FILE *stream, const char *name)
13848 {
13849 int skip;
13850 int verbatim = 0;
13851
13852 while ((skip = arm_get_strip_length (* name)))
13853 {
13854 verbatim |= (*name == '*');
13855 name += skip;
13856 }
13857
13858 if (verbatim)
13859 fputs (name, stream);
13860 else
13861 asm_fprintf (stream, "%U%s", name);
13862 }
13863
13864 static void
13865 arm_file_end (void)
13866 {
13867 int regno;
13868
13869 if (! thumb_call_reg_needed)
13870 return;
13871
13872 text_section ();
13873 asm_fprintf (asm_out_file, "\t.code 16\n");
13874 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13875
13876 for (regno = 0; regno < SP_REGNUM; regno++)
13877 {
13878 rtx label = thumb_call_via_label[regno];
13879
13880 if (label != 0)
13881 {
13882 targetm.asm_out.internal_label (asm_out_file, "L",
13883 CODE_LABEL_NUMBER (label));
13884 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13885 }
13886 }
13887 }
13888
13889 rtx aof_pic_label;
13890
13891 #ifdef AOF_ASSEMBLER
13892 /* Special functions only needed when producing AOF syntax assembler. */
13893
13894 struct pic_chain
13895 {
13896 struct pic_chain * next;
13897 const char * symname;
13898 };
13899
13900 static struct pic_chain * aof_pic_chain = NULL;
13901
13902 rtx
13903 aof_pic_entry (rtx x)
13904 {
13905 struct pic_chain ** chainp;
13906 int offset;
13907
13908 if (aof_pic_label == NULL_RTX)
13909 {
13910 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13911 }
13912
13913 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13914 offset += 4, chainp = &(*chainp)->next)
13915 if ((*chainp)->symname == XSTR (x, 0))
13916 return plus_constant (aof_pic_label, offset);
13917
13918 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13919 (*chainp)->next = NULL;
13920 (*chainp)->symname = XSTR (x, 0);
13921 return plus_constant (aof_pic_label, offset);
13922 }
13923
13924 void
13925 aof_dump_pic_table (FILE *f)
13926 {
13927 struct pic_chain * chain;
13928
13929 if (aof_pic_chain == NULL)
13930 return;
13931
13932 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13933 PIC_OFFSET_TABLE_REGNUM,
13934 PIC_OFFSET_TABLE_REGNUM);
13935 fputs ("|x$adcons|\n", f);
13936
13937 for (chain = aof_pic_chain; chain; chain = chain->next)
13938 {
13939 fputs ("\tDCD\t", f);
13940 assemble_name (f, chain->symname);
13941 fputs ("\n", f);
13942 }
13943 }
13944
13945 int arm_text_section_count = 1;
13946
13947 char *
13948 aof_text_section (void )
13949 {
13950 static char buf[100];
13951 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13952 arm_text_section_count++);
13953 if (flag_pic)
13954 strcat (buf, ", PIC, REENTRANT");
13955 return buf;
13956 }
13957
13958 static int arm_data_section_count = 1;
13959
13960 char *
13961 aof_data_section (void)
13962 {
13963 static char buf[100];
13964 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13965 return buf;
13966 }
13967
13968 /* The AOF assembler is religiously strict about declarations of
13969 imported and exported symbols, so that it is impossible to declare
13970 a function as imported near the beginning of the file, and then to
13971 export it later on. It is, however, possible to delay the decision
13972 until all the functions in the file have been compiled. To get
13973 around this, we maintain a list of the imports and exports, and
13974 delete from it any that are subsequently defined. At the end of
13975 compilation we spit the remainder of the list out before the END
13976 directive. */
13977
13978 struct import
13979 {
13980 struct import * next;
13981 const char * name;
13982 };
13983
13984 static struct import * imports_list = NULL;
13985
13986 void
13987 aof_add_import (const char *name)
13988 {
13989 struct import * new;
13990
13991 for (new = imports_list; new; new = new->next)
13992 if (new->name == name)
13993 return;
13994
13995 new = (struct import *) xmalloc (sizeof (struct import));
13996 new->next = imports_list;
13997 imports_list = new;
13998 new->name = name;
13999 }
14000
14001 void
14002 aof_delete_import (const char *name)
14003 {
14004 struct import ** old;
14005
14006 for (old = &imports_list; *old; old = & (*old)->next)
14007 {
14008 if ((*old)->name == name)
14009 {
14010 *old = (*old)->next;
14011 return;
14012 }
14013 }
14014 }
14015
14016 int arm_main_function = 0;
14017
14018 static void
14019 aof_dump_imports (FILE *f)
14020 {
14021 /* The AOF assembler needs this to cause the startup code to be extracted
14022 from the library. Brining in __main causes the whole thing to work
14023 automagically. */
14024 if (arm_main_function)
14025 {
14026 text_section ();
14027 fputs ("\tIMPORT __main\n", f);
14028 fputs ("\tDCD __main\n", f);
14029 }
14030
14031 /* Now dump the remaining imports. */
14032 while (imports_list)
14033 {
14034 fprintf (f, "\tIMPORT\t");
14035 assemble_name (f, imports_list->name);
14036 fputc ('\n', f);
14037 imports_list = imports_list->next;
14038 }
14039 }
14040
14041 static void
14042 aof_globalize_label (FILE *stream, const char *name)
14043 {
14044 default_globalize_label (stream, name);
14045 if (! strcmp (name, "main"))
14046 arm_main_function = 1;
14047 }
14048
14049 static void
14050 aof_file_start (void)
14051 {
14052 fputs ("__r0\tRN\t0\n", asm_out_file);
14053 fputs ("__a1\tRN\t0\n", asm_out_file);
14054 fputs ("__a2\tRN\t1\n", asm_out_file);
14055 fputs ("__a3\tRN\t2\n", asm_out_file);
14056 fputs ("__a4\tRN\t3\n", asm_out_file);
14057 fputs ("__v1\tRN\t4\n", asm_out_file);
14058 fputs ("__v2\tRN\t5\n", asm_out_file);
14059 fputs ("__v3\tRN\t6\n", asm_out_file);
14060 fputs ("__v4\tRN\t7\n", asm_out_file);
14061 fputs ("__v5\tRN\t8\n", asm_out_file);
14062 fputs ("__v6\tRN\t9\n", asm_out_file);
14063 fputs ("__sl\tRN\t10\n", asm_out_file);
14064 fputs ("__fp\tRN\t11\n", asm_out_file);
14065 fputs ("__ip\tRN\t12\n", asm_out_file);
14066 fputs ("__sp\tRN\t13\n", asm_out_file);
14067 fputs ("__lr\tRN\t14\n", asm_out_file);
14068 fputs ("__pc\tRN\t15\n", asm_out_file);
14069 fputs ("__f0\tFN\t0\n", asm_out_file);
14070 fputs ("__f1\tFN\t1\n", asm_out_file);
14071 fputs ("__f2\tFN\t2\n", asm_out_file);
14072 fputs ("__f3\tFN\t3\n", asm_out_file);
14073 fputs ("__f4\tFN\t4\n", asm_out_file);
14074 fputs ("__f5\tFN\t5\n", asm_out_file);
14075 fputs ("__f6\tFN\t6\n", asm_out_file);
14076 fputs ("__f7\tFN\t7\n", asm_out_file);
14077 text_section ();
14078 }
14079
14080 static void
14081 aof_file_end (void)
14082 {
14083 if (flag_pic)
14084 aof_dump_pic_table (asm_out_file);
14085 arm_file_end ();
14086 aof_dump_imports (asm_out_file);
14087 fputs ("\tEND\n", asm_out_file);
14088 }
14089 #endif /* AOF_ASSEMBLER */
14090
14091 #ifndef ARM_PE
14092 /* Symbols in the text segment can be accessed without indirecting via the
14093 constant pool; it may take an extra binary operation, but this is still
14094 faster than indirecting via memory. Don't do this when not optimizing,
14095 since we won't be calculating al of the offsets necessary to do this
14096 simplification. */
14097
14098 static void
14099 arm_encode_section_info (tree decl, rtx rtl, int first)
14100 {
14101 /* This doesn't work with AOF syntax, since the string table may be in
14102 a different AREA. */
14103 #ifndef AOF_ASSEMBLER
14104 if (optimize > 0 && TREE_CONSTANT (decl))
14105 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14106 #endif
14107
14108 /* If we are referencing a function that is weak then encode a long call
14109 flag in the function name, otherwise if the function is static or
14110 or known to be defined in this file then encode a short call flag. */
14111 if (first && DECL_P (decl))
14112 {
14113 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14114 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14115 else if (! TREE_PUBLIC (decl))
14116 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14117 }
14118 }
14119 #endif /* !ARM_PE */
14120
14121 static void
14122 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14123 {
14124 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14125 && !strcmp (prefix, "L"))
14126 {
14127 arm_ccfsm_state = 0;
14128 arm_target_insn = NULL;
14129 }
14130 default_internal_label (stream, prefix, labelno);
14131 }
14132
14133 /* Output code to add DELTA to the first argument, and then jump
14134 to FUNCTION. Used for C++ multiple inheritance. */
14135 static void
14136 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14137 HOST_WIDE_INT delta,
14138 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14139 tree function)
14140 {
14141 static int thunk_label = 0;
14142 char label[256];
14143 int mi_delta = delta;
14144 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14145 int shift = 0;
14146 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14147 ? 1 : 0);
14148 if (mi_delta < 0)
14149 mi_delta = - mi_delta;
14150 if (TARGET_THUMB)
14151 {
14152 int labelno = thunk_label++;
14153 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14154 fputs ("\tldr\tr12, ", file);
14155 assemble_name (file, label);
14156 fputc ('\n', file);
14157 }
14158 while (mi_delta != 0)
14159 {
14160 if ((mi_delta & (3 << shift)) == 0)
14161 shift += 2;
14162 else
14163 {
14164 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14165 mi_op, this_regno, this_regno,
14166 mi_delta & (0xff << shift));
14167 mi_delta &= ~(0xff << shift);
14168 shift += 8;
14169 }
14170 }
14171 if (TARGET_THUMB)
14172 {
14173 fprintf (file, "\tbx\tr12\n");
14174 ASM_OUTPUT_ALIGN (file, 2);
14175 assemble_name (file, label);
14176 fputs (":\n", file);
14177 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14178 }
14179 else
14180 {
14181 fputs ("\tb\t", file);
14182 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14183 if (NEED_PLT_RELOC)
14184 fputs ("(PLT)", file);
14185 fputc ('\n', file);
14186 }
14187 }
14188
14189 int
14190 arm_emit_vector_const (FILE *file, rtx x)
14191 {
14192 int i;
14193 const char * pattern;
14194
14195 if (GET_CODE (x) != CONST_VECTOR)
14196 abort ();
14197
14198 switch (GET_MODE (x))
14199 {
14200 case V2SImode: pattern = "%08x"; break;
14201 case V4HImode: pattern = "%04x"; break;
14202 case V8QImode: pattern = "%02x"; break;
14203 default: abort ();
14204 }
14205
14206 fprintf (file, "0x");
14207 for (i = CONST_VECTOR_NUNITS (x); i--;)
14208 {
14209 rtx element;
14210
14211 element = CONST_VECTOR_ELT (x, i);
14212 fprintf (file, pattern, INTVAL (element));
14213 }
14214
14215 return 1;
14216 }
14217
14218 const char *
14219 arm_output_load_gr (rtx *operands)
14220 {
14221 rtx reg;
14222 rtx offset;
14223 rtx wcgr;
14224 rtx sum;
14225
14226 if (GET_CODE (operands [1]) != MEM
14227 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14228 || GET_CODE (reg = XEXP (sum, 0)) != REG
14229 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14230 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14231 return "wldrw%?\t%0, %1";
14232
14233 /* Fix up an out-of-range load of a GR register. */
14234 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14235 wcgr = operands[0];
14236 operands[0] = reg;
14237 output_asm_insn ("ldr%?\t%0, %1", operands);
14238
14239 operands[0] = wcgr;
14240 operands[1] = reg;
14241 output_asm_insn ("tmcr%?\t%0, %1", operands);
14242 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14243
14244 return "";
14245 }
14246
14247 static rtx
14248 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14249 int incoming ATTRIBUTE_UNUSED)
14250 {
14251 #if 0
14252 /* FIXME: The ARM backend has special code to handle structure
14253 returns, and will reserve its own hidden first argument. So
14254 if this macro is enabled a *second* hidden argument will be
14255 reserved, which will break binary compatibility with old
14256 toolchains and also thunk handling. One day this should be
14257 fixed. */
14258 return 0;
14259 #else
14260 /* Register in which address to store a structure value
14261 is passed to a function. */
14262 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14263 #endif
14264 }
14265
14266 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14267
14268 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14269 named arg and all anonymous args onto the stack.
14270 XXX I know the prologue shouldn't be pushing registers, but it is faster
14271 that way. */
14272
14273 static void
14274 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14275 enum machine_mode mode ATTRIBUTE_UNUSED,
14276 tree type ATTRIBUTE_UNUSED,
14277 int *pretend_size,
14278 int second_time ATTRIBUTE_UNUSED)
14279 {
14280 cfun->machine->uses_anonymous_args = 1;
14281 if (cum->nregs < NUM_ARG_REGS)
14282 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14283 }
14284
14285 /* Return nonzero if the CONSUMER instruction (a store) does not need
14286 PRODUCER's value to calculate the address. */
14287
14288 int
14289 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14290 {
14291 rtx value = PATTERN (producer);
14292 rtx addr = PATTERN (consumer);
14293
14294 if (GET_CODE (value) == COND_EXEC)
14295 value = COND_EXEC_CODE (value);
14296 if (GET_CODE (value) == PARALLEL)
14297 value = XVECEXP (value, 0, 0);
14298 value = XEXP (value, 0);
14299 if (GET_CODE (addr) == COND_EXEC)
14300 addr = COND_EXEC_CODE (addr);
14301 if (GET_CODE (addr) == PARALLEL)
14302 addr = XVECEXP (addr, 0, 0);
14303 addr = XEXP (addr, 0);
14304
14305 return !reg_overlap_mentioned_p (value, addr);
14306 }
14307
14308 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14309 have an early register shift value or amount dependency on the
14310 result of PRODUCER. */
14311
14312 int
14313 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14314 {
14315 rtx value = PATTERN (producer);
14316 rtx op = PATTERN (consumer);
14317 rtx early_op;
14318
14319 if (GET_CODE (value) == COND_EXEC)
14320 value = COND_EXEC_CODE (value);
14321 if (GET_CODE (value) == PARALLEL)
14322 value = XVECEXP (value, 0, 0);
14323 value = XEXP (value, 0);
14324 if (GET_CODE (op) == COND_EXEC)
14325 op = COND_EXEC_CODE (op);
14326 if (GET_CODE (op) == PARALLEL)
14327 op = XVECEXP (op, 0, 0);
14328 op = XEXP (op, 1);
14329
14330 early_op = XEXP (op, 0);
14331 /* This is either an actual independent shift, or a shift applied to
14332 the first operand of another operation. We want the whole shift
14333 operation. */
14334 if (GET_CODE (early_op) == REG)
14335 early_op = op;
14336
14337 return !reg_overlap_mentioned_p (value, early_op);
14338 }
14339
14340 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14341 have an early register shift value dependency on the result of
14342 PRODUCER. */
14343
14344 int
14345 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14346 {
14347 rtx value = PATTERN (producer);
14348 rtx op = PATTERN (consumer);
14349 rtx early_op;
14350
14351 if (GET_CODE (value) == COND_EXEC)
14352 value = COND_EXEC_CODE (value);
14353 if (GET_CODE (value) == PARALLEL)
14354 value = XVECEXP (value, 0, 0);
14355 value = XEXP (value, 0);
14356 if (GET_CODE (op) == COND_EXEC)
14357 op = COND_EXEC_CODE (op);
14358 if (GET_CODE (op) == PARALLEL)
14359 op = XVECEXP (op, 0, 0);
14360 op = XEXP (op, 1);
14361
14362 early_op = XEXP (op, 0);
14363
14364 /* This is either an actual independent shift, or a shift applied to
14365 the first operand of another operation. We want the value being
14366 shifted, in either case. */
14367 if (GET_CODE (early_op) != REG)
14368 early_op = XEXP (early_op, 0);
14369
14370 return !reg_overlap_mentioned_p (value, early_op);
14371 }
14372
14373 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14374 have an early register mult dependency on the result of
14375 PRODUCER. */
14376
14377 int
14378 arm_no_early_mul_dep (rtx producer, rtx consumer)
14379 {
14380 rtx value = PATTERN (producer);
14381 rtx op = PATTERN (consumer);
14382
14383 if (GET_CODE (value) == COND_EXEC)
14384 value = COND_EXEC_CODE (value);
14385 if (GET_CODE (value) == PARALLEL)
14386 value = XVECEXP (value, 0, 0);
14387 value = XEXP (value, 0);
14388 if (GET_CODE (op) == COND_EXEC)
14389 op = COND_EXEC_CODE (op);
14390 if (GET_CODE (op) == PARALLEL)
14391 op = XVECEXP (op, 0, 0);
14392 op = XEXP (op, 1);
14393
14394 return (GET_CODE (op) == PLUS
14395 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14396 }
14397
14398
14399 /* We can't rely on the caller doing the proper promotion when
14400 using APCS or ATPCS. */
14401
14402 static bool
14403 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14404 {
14405 return !TARGET_AAPCS_BASED;
14406 }
14407
14408
14409 /* AAPCS based ABIs use short enums by default. */
14410
14411 static bool
14412 arm_default_short_enums (void)
14413 {
14414 return TARGET_AAPCS_BASED;
14415 }
14416
14417
14418 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14419
14420 static bool
14421 arm_align_anon_bitfield (void)
14422 {
14423 return TARGET_AAPCS_BASED;
14424 }
14425
14426
14427 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14428
14429 static tree
14430 arm_cxx_guard_type (void)
14431 {
14432 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14433 }
14434
14435
14436 /* The EABI says test the least significan bit of a guard variable. */
14437
14438 static bool
14439 arm_cxx_guard_mask_bit (void)
14440 {
14441 return TARGET_AAPCS_BASED;
14442 }
14443
14444
14445 /* The EABI specifies that all array cookies are 8 bytes long. */
14446
14447 static tree
14448 arm_get_cookie_size (tree type)
14449 {
14450 tree size;
14451
14452 if (!TARGET_AAPCS_BASED)
14453 return default_cxx_get_cookie_size (type);
14454
14455 size = build_int_cst (sizetype, 8);
14456 return size;
14457 }
14458
14459
14460 /* The EABI says that array cookies should also contain the element size. */
14461
14462 static bool
14463 arm_cookie_has_size (void)
14464 {
14465 return TARGET_AAPCS_BASED;
14466 }
14467
14468
14469 /* The EABI says constructors and destructors should return a pointer to
14470 the object constructed/destroyed. */
14471
14472 static bool
14473 arm_cxx_cdtor_returns_this (void)
14474 {
14475 return TARGET_AAPCS_BASED;
14476 }
14477
14478 /* The EABI says that an inline function may never be the key
14479 method. */
14480
14481 static bool
14482 arm_cxx_key_method_may_be_inline (void)
14483 {
14484 return !TARGET_AAPCS_BASED;
14485 }
14486
14487 /* The EABI says that the virtual table, etc., for a class must be
14488 exported if it has a key method. The EABI does not specific the
14489 behavior if there is no key method, but there is no harm in
14490 exporting the class data in that case too. */
14491
14492 static bool
14493 arm_cxx_export_class_data (void)
14494 {
14495 return TARGET_AAPCS_BASED;
14496 }
14497
14498 void
14499 arm_set_return_address (rtx source, rtx scratch)
14500 {
14501 arm_stack_offsets *offsets;
14502 HOST_WIDE_INT delta;
14503 rtx addr;
14504 unsigned long saved_regs;
14505
14506 saved_regs = arm_compute_save_reg_mask ();
14507
14508 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14509 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14510 else
14511 {
14512 if (frame_pointer_needed)
14513 addr = plus_constant(hard_frame_pointer_rtx, -4);
14514 else
14515 {
14516 /* LR will be the first saved register. */
14517 offsets = arm_get_frame_offsets ();
14518 delta = offsets->outgoing_args - (offsets->frame + 4);
14519
14520
14521 if (delta >= 4096)
14522 {
14523 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14524 GEN_INT (delta & ~4095)));
14525 addr = scratch;
14526 delta &= 4095;
14527 }
14528 else
14529 addr = stack_pointer_rtx;
14530
14531 addr = plus_constant (addr, delta);
14532 }
14533 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14534 }
14535 }
14536
14537
14538 void
14539 thumb_set_return_address (rtx source, rtx scratch)
14540 {
14541 arm_stack_offsets *offsets;
14542 HOST_WIDE_INT delta;
14543 int reg;
14544 rtx addr;
14545 unsigned long mask;
14546
14547 emit_insn (gen_rtx_USE (VOIDmode, source));
14548
14549 mask = thumb_compute_save_reg_mask ();
14550 if (mask & (1 << LR_REGNUM))
14551 {
14552 offsets = arm_get_frame_offsets ();
14553
14554 /* Find the saved regs. */
14555 if (frame_pointer_needed)
14556 {
14557 delta = offsets->soft_frame - offsets->saved_args;
14558 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14559 }
14560 else
14561 {
14562 delta = offsets->outgoing_args - offsets->saved_args;
14563 reg = SP_REGNUM;
14564 }
14565 /* Allow for the stack frame. */
14566 if (TARGET_BACKTRACE)
14567 delta -= 16;
14568 /* The link register is always the first saved register. */
14569 delta -= 4;
14570
14571 /* Construct the address. */
14572 addr = gen_rtx_REG (SImode, reg);
14573 if ((reg != SP_REGNUM && delta >= 128)
14574 || delta >= 1024)
14575 {
14576 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14577 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14578 addr = scratch;
14579 }
14580 else
14581 addr = plus_constant (addr, delta);
14582
14583 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14584 }
14585 else
14586 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14587 }
14588
14589 /* Implements target hook vector_mode_supported_p. */
14590 bool
14591 arm_vector_mode_supported_p (enum machine_mode mode)
14592 {
14593 if ((mode == V2SImode)
14594 || (mode == V4HImode)
14595 || (mode == V8QImode))
14596 return true;
14597
14598 return false;
14599 }
14600
14601 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14602 ARM insns and therefore guarantee that the shift count is modulo 256.
14603 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14604 guarantee no particular behavior for out-of-range counts. */
14605
14606 static unsigned HOST_WIDE_INT
14607 arm_shift_truncation_mask (enum machine_mode mode)
14608 {
14609 return mode == SImode ? 255 : 0;
14610 }
This page took 2.719183 seconds and 4 git commands to generate.