]> gcc.gnu.org Git - gcc.git/blob - gcc/config/arm/arm.c
arm.c (arm_legitimate_address_p): Don't check the mode size for minipool references.
[gcc.git] / gcc / config / arm / arm.c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
58
59 const struct attribute_spec arm_attribute_table[];
60
61 /* Forward function declarations. */
62 static void arm_add_gc_roots (void);
63 static int arm_gen_constant (enum rtx_code, enum machine_mode, HOST_WIDE_INT,
64 rtx, rtx, int, int);
65 static unsigned bit_count (unsigned long);
66 static int arm_address_register_rtx_p (rtx, int);
67 static int arm_legitimate_index_p (enum machine_mode, rtx, int);
68 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
69 inline static int thumb_index_register_rtx_p (rtx, int);
70 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
71 static rtx emit_multi_reg_push (int);
72 static rtx emit_sfm (int, int);
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer (rtx, unsigned int, int);
75 #endif
76 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
77 static arm_cc get_arm_condition_code (rtx);
78 static void init_fpa_table (void);
79 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
80 static rtx is_jump_table (rtx);
81 static const char *output_multi_immediate (rtx *, const char *, const char *,
82 int, HOST_WIDE_INT);
83 static void print_multi_reg (FILE *, const char *, int, int);
84 static const char *shift_op (rtx, HOST_WIDE_INT *);
85 static struct machine_function *arm_init_machine_status (void);
86 static int number_of_first_bit_set (int);
87 static void replace_symbols_in_block (tree, rtx, rtx);
88 static void thumb_exit (FILE *, int, rtx);
89 static void thumb_pushpop (FILE *, int, int, int *, int);
90 static rtx is_jump_table (rtx);
91 static HOST_WIDE_INT get_jump_table_size (rtx);
92 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_forward_ref (Mfix *);
94 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
95 static Mnode *add_minipool_backward_ref (Mfix *);
96 static void assign_minipool_offsets (Mfix *);
97 static void arm_print_value (FILE *, rtx);
98 static void dump_minipool (rtx);
99 static int arm_barrier_cost (rtx);
100 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
101 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
102 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
103 rtx);
104 static void arm_reorg (void);
105 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
106 static int current_file_function_operand (rtx);
107 static unsigned long arm_compute_save_reg0_reg12_mask (void);
108 static unsigned long arm_compute_save_reg_mask (void);
109 static unsigned long arm_isr_value (tree);
110 static unsigned long arm_compute_func_type (void);
111 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
112 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
113 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
114 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static int arm_comp_type_attributes (tree, tree);
117 static void arm_set_default_type_attributes (tree);
118 static int arm_adjust_cost (rtx, rtx, rtx, int);
119 static int arm_use_dfa_pipeline_interface (void);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_rtx_costs (rtx, int, int, int *);
128 static int arm_address_cost (rtx);
129 static bool arm_memory_load_p (rtx);
130 static bool arm_cirrus_insn_p (rtx);
131 static void cirrus_reorg (rtx);
132 static void arm_init_builtins (void);
133 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
134 static void arm_init_iwmmxt_builtins (void);
135 static rtx safe_vector_operand (rtx, enum machine_mode);
136 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
137 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
139
140 #ifdef OBJECT_FORMAT_ELF
141 static void arm_elf_asm_named_section (const char *, unsigned int);
142 #endif
143 #ifndef ARM_PE
144 static void arm_encode_section_info (tree, rtx, int);
145 #endif
146 #ifdef AOF_ASSEMBLER
147 static void aof_globalize_label (FILE *, const char *);
148 static void aof_dump_imports (FILE *);
149 static void aof_dump_pic_table (FILE *);
150 static void aof_file_start (void);
151 static void aof_file_end (void);
152 #endif
153
154 \f
155 /* Initialize the GCC target structure. */
156 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
157 #undef TARGET_MERGE_DECL_ATTRIBUTES
158 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
159 #endif
160
161 #undef TARGET_ATTRIBUTE_TABLE
162 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
163
164 #ifdef AOF_ASSEMBLER
165 #undef TARGET_ASM_BYTE_OP
166 #define TARGET_ASM_BYTE_OP "\tDCB\t"
167 #undef TARGET_ASM_ALIGNED_HI_OP
168 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
169 #undef TARGET_ASM_ALIGNED_SI_OP
170 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
171 #undef TARGET_ASM_GLOBALIZE_LABEL
172 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
173 #undef TARGET_ASM_FILE_START
174 #define TARGET_ASM_FILE_START aof_file_start
175 #undef TARGET_ASM_FILE_END
176 #define TARGET_ASM_FILE_END aof_file_end
177 #else
178 #undef TARGET_ASM_ALIGNED_SI_OP
179 #define TARGET_ASM_ALIGNED_SI_OP NULL
180 #undef TARGET_ASM_INTEGER
181 #define TARGET_ASM_INTEGER arm_assemble_integer
182 #endif
183
184 #undef TARGET_ASM_FUNCTION_PROLOGUE
185 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
186
187 #undef TARGET_ASM_FUNCTION_EPILOGUE
188 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
189
190 #undef TARGET_COMP_TYPE_ATTRIBUTES
191 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
192
193 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
194 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
195
196 #undef TARGET_SCHED_ADJUST_COST
197 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
198
199 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
200 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
201
202 #undef TARGET_ENCODE_SECTION_INFO
203 #ifdef ARM_PE
204 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
205 #else
206 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
207 #endif
208
209 #undef TARGET_STRIP_NAME_ENCODING
210 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
211
212 #undef TARGET_ASM_INTERNAL_LABEL
213 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
214
215 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
216 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
217
218 #undef TARGET_ASM_OUTPUT_MI_THUNK
219 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
220 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
221 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
222
223 #undef TARGET_RTX_COSTS
224 #define TARGET_RTX_COSTS arm_rtx_costs
225 #undef TARGET_ADDRESS_COST
226 #define TARGET_ADDRESS_COST arm_address_cost
227
228 #undef TARGET_MACHINE_DEPENDENT_REORG
229 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
230
231 #undef TARGET_INIT_BUILTINS
232 #define TARGET_INIT_BUILTINS arm_init_builtins
233 #undef TARGET_EXPAND_BUILTIN
234 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
235
236 struct gcc_target targetm = TARGET_INITIALIZER;
237 \f
238 /* Obstack for minipool constant handling. */
239 static struct obstack minipool_obstack;
240 static char * minipool_startobj;
241
242 /* The maximum number of insns skipped which
243 will be conditionalised if possible. */
244 static int max_insns_skipped = 5;
245
246 extern FILE * asm_out_file;
247
248 /* True if we are currently building a constant table. */
249 int making_const_table;
250
251 /* Define the information needed to generate branch insns. This is
252 stored from the compare operation. */
253 rtx arm_compare_op0, arm_compare_op1;
254
255 /* What type of floating point are we tuning for? */
256 enum fputype arm_fpu_tune;
257
258 /* What type of floating point instructions are available? */
259 enum fputype arm_fpu_arch;
260
261 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
262 enum prog_mode_type arm_prgmode;
263
264 /* Set by the -mfp=... option. */
265 const char * target_fp_name = NULL;
266
267 /* Used to parse -mstructure_size_boundary command line option. */
268 const char * structure_size_string = NULL;
269 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
270
271 /* Bit values used to identify processor capabilities. */
272 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
273 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
274 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
275 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
276 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
277 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
278 #define FL_THUMB (1 << 6) /* Thumb aware */
279 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
280 #define FL_STRONG (1 << 8) /* StrongARM */
281 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
282 #define FL_XSCALE (1 << 10) /* XScale */
283 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
284 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
285 #define FL_ARCH6J (1 << 12) /* Architecture rel 6. Adds
286 media instructions. */
287 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
288
289 /* The bits in this mask specify which
290 instructions we are allowed to generate. */
291 static unsigned long insn_flags = 0;
292
293 /* The bits in this mask specify which instruction scheduling options should
294 be used. Note - there is an overlap with the FL_FAST_MULT. For some
295 hardware we want to be able to generate the multiply instructions, but to
296 tune as if they were not present in the architecture. */
297 static unsigned long tune_flags = 0;
298
299 /* The following are used in the arm.md file as equivalents to bits
300 in the above two flag variables. */
301
302 /* Nonzero if this is an "M" variant of the processor. */
303 int arm_fast_multiply = 0;
304
305 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
306 int arm_arch4 = 0;
307
308 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
309 int arm_arch5 = 0;
310
311 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
312 int arm_arch5e = 0;
313
314 /* Nonzero if this chip can benefit from load scheduling. */
315 int arm_ld_sched = 0;
316
317 /* Nonzero if this chip is a StrongARM. */
318 int arm_is_strong = 0;
319
320 /* Nonzero if this chip supports Intel Wireless MMX technology. */
321 int arm_arch_iwmmxt = 0;
322
323 /* Nonzero if this chip is an XScale. */
324 int arm_arch_xscale = 0;
325
326 /* Nonzero if tuning for XScale */
327 int arm_tune_xscale = 0;
328
329 /* Nonzero if this chip is an ARM6 or an ARM7. */
330 int arm_is_6_or_7 = 0;
331
332 /* Nonzero if this chip is a Cirrus/DSP. */
333 int arm_is_cirrus = 0;
334
335 /* Nonzero if generating Thumb instructions. */
336 int thumb_code = 0;
337
338 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
339 must report the mode of the memory reference from PRINT_OPERAND to
340 PRINT_OPERAND_ADDRESS. */
341 enum machine_mode output_memory_reference_mode;
342
343 /* The register number to be used for the PIC offset register. */
344 const char * arm_pic_register_string = NULL;
345 int arm_pic_register = INVALID_REGNUM;
346
347 /* Set to 1 when a return insn is output, this means that the epilogue
348 is not needed. */
349 int return_used_this_function;
350
351 /* Set to 1 after arm_reorg has started. Reset to start at the start of
352 the next function. */
353 static int after_arm_reorg = 0;
354
355 /* The maximum number of insns to be used when loading a constant. */
356 static int arm_constant_limit = 3;
357
358 /* For an explanation of these variables, see final_prescan_insn below. */
359 int arm_ccfsm_state;
360 enum arm_cond_code arm_current_cc;
361 rtx arm_target_insn;
362 int arm_target_label;
363
364 /* The condition codes of the ARM, and the inverse function. */
365 static const char * const arm_condition_codes[] =
366 {
367 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
368 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
369 };
370
371 #define streq(string1, string2) (strcmp (string1, string2) == 0)
372 \f
373 /* Initialization code. */
374
375 struct processors
376 {
377 const char *const name;
378 const unsigned long flags;
379 };
380
381 /* Not all of these give usefully different compilation alternatives,
382 but there is no simple way of generalizing them. */
383 static const struct processors all_cores[] =
384 {
385 /* ARM Cores */
386
387 {"arm2", FL_CO_PROC | FL_MODE26 },
388 {"arm250", FL_CO_PROC | FL_MODE26 },
389 {"arm3", FL_CO_PROC | FL_MODE26 },
390 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
391 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
392 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
393 {"arm610", FL_MODE26 | FL_MODE32 },
394 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
395 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
396 /* arm7m doesn't exist on its own, but only with D, (and I), but
397 those don't alter the code, so arm7m is sometimes used. */
398 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
399 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
400 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
401 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
402 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
403 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
404 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
405 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
406 {"arm710", FL_MODE26 | FL_MODE32 },
407 {"arm720", FL_MODE26 | FL_MODE32 },
408 {"arm710c", FL_MODE26 | FL_MODE32 },
409 {"arm7100", FL_MODE26 | FL_MODE32 },
410 {"arm7500", FL_MODE26 | FL_MODE32 },
411 /* Doesn't have an external co-proc, but does have embedded fpa. */
412 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
413 /* V4 Architecture Processors */
414 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
415 {"arm710t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
416 {"arm720t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
417 {"arm740t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
418 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
419 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
420 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
421 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
422 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
423 {"arm940t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
424 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
425 {"arm9e", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
426 {"ep9312", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS },
427 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
428 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
429 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
430 {"strongarm1110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
431 /* V5 Architecture Processors */
432 {"arm10tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
433 {"arm1020t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
434 {"arm926ejs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
435 {"arm1026ejs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
436 {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE },
437 {"iwmmxt", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT },
438 /* V6 Architecture Processors */
439 {"arm1136js", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J },
440 {"arm1136jfs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J | FL_VFPV2 },
441 {NULL, 0}
442 };
443
444 static const struct processors all_architectures[] =
445 {
446 /* ARM Architectures */
447
448 { "armv2", FL_CO_PROC | FL_MODE26 },
449 { "armv2a", FL_CO_PROC | FL_MODE26 },
450 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
451 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
452 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
453 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
454 implementations that support it, so we will leave it out for now. */
455 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
456 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
457 { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
458 { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
459 { "armv6j", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J },
460 { "ep9312", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS },
461 {"iwmmxt", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT },
462 { NULL, 0 }
463 };
464
465 /* This is a magic structure. The 'string' field is magically filled in
466 with a pointer to the value specified by the user on the command line
467 assuming that the user has specified such a value. */
468
469 struct arm_cpu_select arm_select[] =
470 {
471 /* string name processors */
472 { NULL, "-mcpu=", all_cores },
473 { NULL, "-march=", all_architectures },
474 { NULL, "-mtune=", all_cores }
475 };
476
477 /* Return the number of bits set in VALUE. */
478 static unsigned
479 bit_count (unsigned long value)
480 {
481 unsigned long count = 0;
482
483 while (value)
484 {
485 count++;
486 value &= value - 1; /* Clear the least-significant set bit. */
487 }
488
489 return count;
490 }
491
492 /* Fix up any incompatible options that the user has specified.
493 This has now turned into a maze. */
494 void
495 arm_override_options (void)
496 {
497 unsigned i;
498
499 /* Set up the flags based on the cpu/architecture selected by the user. */
500 for (i = ARRAY_SIZE (arm_select); i--;)
501 {
502 struct arm_cpu_select * ptr = arm_select + i;
503
504 if (ptr->string != NULL && ptr->string[0] != '\0')
505 {
506 const struct processors * sel;
507
508 for (sel = ptr->processors; sel->name != NULL; sel++)
509 if (streq (ptr->string, sel->name))
510 {
511 if (i == 2)
512 tune_flags = sel->flags;
513 else
514 {
515 /* If we have been given an architecture and a processor
516 make sure that they are compatible. We only generate
517 a warning though, and we prefer the CPU over the
518 architecture. */
519 if (insn_flags != 0 && (insn_flags ^ sel->flags))
520 warning ("switch -mcpu=%s conflicts with -march= switch",
521 ptr->string);
522
523 insn_flags = sel->flags;
524 }
525
526 break;
527 }
528
529 if (sel->name == NULL)
530 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
531 }
532 }
533
534 /* If the user did not specify a processor, choose one for them. */
535 if (insn_flags == 0)
536 {
537 const struct processors * sel;
538 unsigned int sought;
539 static const struct cpu_default
540 {
541 const int cpu;
542 const char *const name;
543 }
544 cpu_defaults[] =
545 {
546 { TARGET_CPU_arm2, "arm2" },
547 { TARGET_CPU_arm6, "arm6" },
548 { TARGET_CPU_arm610, "arm610" },
549 { TARGET_CPU_arm710, "arm710" },
550 { TARGET_CPU_arm7m, "arm7m" },
551 { TARGET_CPU_arm7500fe, "arm7500fe" },
552 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
553 { TARGET_CPU_arm8, "arm8" },
554 { TARGET_CPU_arm810, "arm810" },
555 { TARGET_CPU_arm9, "arm9" },
556 { TARGET_CPU_strongarm, "strongarm" },
557 { TARGET_CPU_xscale, "xscale" },
558 { TARGET_CPU_ep9312, "ep9312" },
559 { TARGET_CPU_iwmmxt, "iwmmxt" },
560 { TARGET_CPU_arm926ej_s, "arm926ej-s" },
561 { TARGET_CPU_arm1026ej_s, "arm1026ej-s" },
562 { TARGET_CPU_arm1136j_s, "arm1136j_s" },
563 { TARGET_CPU_arm1136jf_s, "arm1136jf_s" },
564 { TARGET_CPU_generic, "arm" },
565 { 0, 0 }
566 };
567 const struct cpu_default * def;
568
569 /* Find the default. */
570 for (def = cpu_defaults; def->name; def++)
571 if (def->cpu == TARGET_CPU_DEFAULT)
572 break;
573
574 /* Make sure we found the default CPU. */
575 if (def->name == NULL)
576 abort ();
577
578 /* Find the default CPU's flags. */
579 for (sel = all_cores; sel->name != NULL; sel++)
580 if (streq (def->name, sel->name))
581 break;
582
583 if (sel->name == NULL)
584 abort ();
585
586 insn_flags = sel->flags;
587
588 /* Now check to see if the user has specified some command line
589 switch that require certain abilities from the cpu. */
590 sought = 0;
591
592 if (TARGET_INTERWORK || TARGET_THUMB)
593 {
594 sought |= (FL_THUMB | FL_MODE32);
595
596 /* Force apcs-32 to be used for interworking. */
597 target_flags |= ARM_FLAG_APCS_32;
598
599 /* There are no ARM processors that support both APCS-26 and
600 interworking. Therefore we force FL_MODE26 to be removed
601 from insn_flags here (if it was set), so that the search
602 below will always be able to find a compatible processor. */
603 insn_flags &= ~FL_MODE26;
604 }
605 else if (!TARGET_APCS_32)
606 sought |= FL_MODE26;
607
608 if (sought != 0 && ((sought & insn_flags) != sought))
609 {
610 /* Try to locate a CPU type that supports all of the abilities
611 of the default CPU, plus the extra abilities requested by
612 the user. */
613 for (sel = all_cores; sel->name != NULL; sel++)
614 if ((sel->flags & sought) == (sought | insn_flags))
615 break;
616
617 if (sel->name == NULL)
618 {
619 unsigned current_bit_count = 0;
620 const struct processors * best_fit = NULL;
621
622 /* Ideally we would like to issue an error message here
623 saying that it was not possible to find a CPU compatible
624 with the default CPU, but which also supports the command
625 line options specified by the programmer, and so they
626 ought to use the -mcpu=<name> command line option to
627 override the default CPU type.
628
629 Unfortunately this does not work with multilibing. We
630 need to be able to support multilibs for -mapcs-26 and for
631 -mthumb-interwork and there is no CPU that can support both
632 options. Instead if we cannot find a cpu that has both the
633 characteristics of the default cpu and the given command line
634 options we scan the array again looking for a best match. */
635 for (sel = all_cores; sel->name != NULL; sel++)
636 if ((sel->flags & sought) == sought)
637 {
638 unsigned count;
639
640 count = bit_count (sel->flags & insn_flags);
641
642 if (count >= current_bit_count)
643 {
644 best_fit = sel;
645 current_bit_count = count;
646 }
647 }
648
649 if (best_fit == NULL)
650 abort ();
651 else
652 sel = best_fit;
653 }
654
655 insn_flags = sel->flags;
656 }
657 }
658
659 /* If tuning has not been specified, tune for whichever processor or
660 architecture has been selected. */
661 if (tune_flags == 0)
662 tune_flags = insn_flags;
663
664 /* Make sure that the processor choice does not conflict with any of the
665 other command line choices. */
666 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
667 {
668 /* If APCS-32 was not the default then it must have been set by the
669 user, so issue a warning message. If the user has specified
670 "-mapcs-32 -mcpu=arm2" then we loose here. */
671 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
672 warning ("target CPU does not support APCS-32" );
673 target_flags &= ~ARM_FLAG_APCS_32;
674 }
675 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
676 {
677 warning ("target CPU does not support APCS-26" );
678 target_flags |= ARM_FLAG_APCS_32;
679 }
680
681 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
682 {
683 warning ("target CPU does not support interworking" );
684 target_flags &= ~ARM_FLAG_INTERWORK;
685 }
686
687 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
688 {
689 warning ("target CPU does not support THUMB instructions");
690 target_flags &= ~ARM_FLAG_THUMB;
691 }
692
693 if (TARGET_APCS_FRAME && TARGET_THUMB)
694 {
695 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
696 target_flags &= ~ARM_FLAG_APCS_FRAME;
697 }
698
699 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
700 from here where no function is being compiled currently. */
701 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
702 && TARGET_ARM)
703 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
704
705 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
706 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
707
708 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
709 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
710
711 /* If interworking is enabled then APCS-32 must be selected as well. */
712 if (TARGET_INTERWORK)
713 {
714 if (!TARGET_APCS_32)
715 warning ("interworking forces APCS-32 to be used" );
716 target_flags |= ARM_FLAG_APCS_32;
717 }
718
719 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
720 {
721 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
722 target_flags |= ARM_FLAG_APCS_FRAME;
723 }
724
725 if (TARGET_POKE_FUNCTION_NAME)
726 target_flags |= ARM_FLAG_APCS_FRAME;
727
728 if (TARGET_APCS_REENT && flag_pic)
729 error ("-fpic and -mapcs-reent are incompatible");
730
731 if (TARGET_APCS_REENT)
732 warning ("APCS reentrant code not supported. Ignored");
733
734 /* If this target is normally configured to use APCS frames, warn if they
735 are turned off and debugging is turned on. */
736 if (TARGET_ARM
737 && write_symbols != NO_DEBUG
738 && !TARGET_APCS_FRAME
739 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
740 warning ("-g with -mno-apcs-frame may not give sensible debugging");
741
742 /* If stack checking is disabled, we can use r10 as the PIC register,
743 which keeps r9 available. */
744 if (flag_pic)
745 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
746
747 if (TARGET_APCS_FLOAT)
748 warning ("passing floating point arguments in fp regs not yet supported");
749
750 /* Initialize boolean versions of the flags, for use in the arm.md file. */
751 arm_fast_multiply = (insn_flags & FL_FAST_MULT) != 0;
752 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
753 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
754 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
755 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
756
757 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
758 arm_is_strong = (tune_flags & FL_STRONG) != 0;
759 thumb_code = (TARGET_ARM == 0);
760 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
761 && !(tune_flags & FL_ARCH4))) != 0;
762 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
763 arm_is_cirrus = (tune_flags & FL_CIRRUS) != 0;
764 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
765
766 if (TARGET_IWMMXT && (! TARGET_ATPCS))
767 target_flags |= ARM_FLAG_ATPCS;
768
769 if (arm_is_cirrus)
770 {
771 arm_fpu_tune = FPUTYPE_MAVERICK;
772
773 /* Ignore -mhard-float if -mcpu=ep9312. */
774 if (TARGET_HARD_FLOAT)
775 target_flags ^= ARM_FLAG_SOFT_FLOAT;
776 }
777 else
778 /* Default value for floating point code... if no co-processor
779 bus, then schedule for emulated floating point. Otherwise,
780 assume the user has an FPA.
781 Note: this does not prevent use of floating point instructions,
782 -msoft-float does that. */
783 arm_fpu_tune = (tune_flags & FL_CO_PROC) ? FPUTYPE_FPA : FPUTYPE_FPA_EMU3;
784
785 if (target_fp_name)
786 {
787 if (streq (target_fp_name, "2"))
788 arm_fpu_arch = FPUTYPE_FPA_EMU2;
789 else if (streq (target_fp_name, "3"))
790 arm_fpu_arch = FPUTYPE_FPA_EMU3;
791 else
792 error ("invalid floating point emulation option: -mfpe-%s",
793 target_fp_name);
794 }
795 else
796 arm_fpu_arch = FPUTYPE_DEFAULT;
797
798 if (TARGET_FPE)
799 {
800 if (arm_fpu_tune == FPUTYPE_FPA_EMU3)
801 arm_fpu_tune = FPUTYPE_FPA_EMU2;
802 else if (arm_fpu_tune == FPUTYPE_MAVERICK)
803 warning ("-mfpe switch not supported by ep9312 target cpu - ignored.");
804 else if (arm_fpu_tune != FPUTYPE_FPA)
805 arm_fpu_tune = FPUTYPE_FPA_EMU2;
806 }
807
808 /* For arm2/3 there is no need to do any scheduling if there is only
809 a floating point emulator, or we are doing software floating-point. */
810 if ((TARGET_SOFT_FLOAT || arm_fpu_tune != FPUTYPE_FPA)
811 && (tune_flags & FL_MODE32) == 0)
812 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
813
814 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
815
816 if (structure_size_string != NULL)
817 {
818 int size = strtol (structure_size_string, NULL, 0);
819
820 if (size == 8 || size == 32)
821 arm_structure_size_boundary = size;
822 else
823 warning ("structure size boundary can only be set to 8 or 32");
824 }
825
826 if (arm_pic_register_string != NULL)
827 {
828 int pic_register = decode_reg_name (arm_pic_register_string);
829
830 if (!flag_pic)
831 warning ("-mpic-register= is useless without -fpic");
832
833 /* Prevent the user from choosing an obviously stupid PIC register. */
834 else if (pic_register < 0 || call_used_regs[pic_register]
835 || pic_register == HARD_FRAME_POINTER_REGNUM
836 || pic_register == STACK_POINTER_REGNUM
837 || pic_register >= PC_REGNUM)
838 error ("unable to use '%s' for PIC register", arm_pic_register_string);
839 else
840 arm_pic_register = pic_register;
841 }
842
843 if (TARGET_THUMB && flag_schedule_insns)
844 {
845 /* Don't warn since it's on by default in -O2. */
846 flag_schedule_insns = 0;
847 }
848
849 if (optimize_size)
850 {
851 /* There's some dispute as to whether this should be 1 or 2. However,
852 experiments seem to show that in pathological cases a setting of
853 1 degrades less severely than a setting of 2. This could change if
854 other parts of the compiler change their behavior. */
855 arm_constant_limit = 1;
856
857 /* If optimizing for size, bump the number of instructions that we
858 are prepared to conditionally execute (even on a StrongARM). */
859 max_insns_skipped = 6;
860 }
861 else
862 {
863 /* For processors with load scheduling, it never costs more than
864 2 cycles to load a constant, and the load scheduler may well
865 reduce that to 1. */
866 if (tune_flags & FL_LDSCHED)
867 arm_constant_limit = 1;
868
869 /* On XScale the longer latency of a load makes it more difficult
870 to achieve a good schedule, so it's faster to synthesize
871 constants that can be done in two insns. */
872 if (arm_tune_xscale)
873 arm_constant_limit = 2;
874
875 /* StrongARM has early execution of branches, so a sequence
876 that is worth skipping is shorter. */
877 if (arm_is_strong)
878 max_insns_skipped = 3;
879 }
880
881 /* Register global variables with the garbage collector. */
882 arm_add_gc_roots ();
883 }
884
885 static void
886 arm_add_gc_roots (void)
887 {
888 gcc_obstack_init(&minipool_obstack);
889 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
890 }
891 \f
892 /* A table of known ARM exception types.
893 For use with the interrupt function attribute. */
894
895 typedef struct
896 {
897 const char *const arg;
898 const unsigned long return_value;
899 }
900 isr_attribute_arg;
901
902 static const isr_attribute_arg isr_attribute_args [] =
903 {
904 { "IRQ", ARM_FT_ISR },
905 { "irq", ARM_FT_ISR },
906 { "FIQ", ARM_FT_FIQ },
907 { "fiq", ARM_FT_FIQ },
908 { "ABORT", ARM_FT_ISR },
909 { "abort", ARM_FT_ISR },
910 { "ABORT", ARM_FT_ISR },
911 { "abort", ARM_FT_ISR },
912 { "UNDEF", ARM_FT_EXCEPTION },
913 { "undef", ARM_FT_EXCEPTION },
914 { "SWI", ARM_FT_EXCEPTION },
915 { "swi", ARM_FT_EXCEPTION },
916 { NULL, ARM_FT_NORMAL }
917 };
918
919 /* Returns the (interrupt) function type of the current
920 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
921
922 static unsigned long
923 arm_isr_value (tree argument)
924 {
925 const isr_attribute_arg * ptr;
926 const char * arg;
927
928 /* No argument - default to IRQ. */
929 if (argument == NULL_TREE)
930 return ARM_FT_ISR;
931
932 /* Get the value of the argument. */
933 if (TREE_VALUE (argument) == NULL_TREE
934 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
935 return ARM_FT_UNKNOWN;
936
937 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
938
939 /* Check it against the list of known arguments. */
940 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
941 if (streq (arg, ptr->arg))
942 return ptr->return_value;
943
944 /* An unrecognized interrupt type. */
945 return ARM_FT_UNKNOWN;
946 }
947
948 /* Computes the type of the current function. */
949
950 static unsigned long
951 arm_compute_func_type (void)
952 {
953 unsigned long type = ARM_FT_UNKNOWN;
954 tree a;
955 tree attr;
956
957 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
958 abort ();
959
960 /* Decide if the current function is volatile. Such functions
961 never return, and many memory cycles can be saved by not storing
962 register values that will never be needed again. This optimization
963 was added to speed up context switching in a kernel application. */
964 if (optimize > 0
965 && current_function_nothrow
966 && TREE_THIS_VOLATILE (current_function_decl))
967 type |= ARM_FT_VOLATILE;
968
969 if (current_function_needs_context)
970 type |= ARM_FT_NESTED;
971
972 attr = DECL_ATTRIBUTES (current_function_decl);
973
974 a = lookup_attribute ("naked", attr);
975 if (a != NULL_TREE)
976 type |= ARM_FT_NAKED;
977
978 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
979 type |= ARM_FT_EXCEPTION_HANDLER;
980 else
981 {
982 a = lookup_attribute ("isr", attr);
983 if (a == NULL_TREE)
984 a = lookup_attribute ("interrupt", attr);
985
986 if (a == NULL_TREE)
987 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
988 else
989 type |= arm_isr_value (TREE_VALUE (a));
990 }
991
992 return type;
993 }
994
995 /* Returns the type of the current function. */
996
997 unsigned long
998 arm_current_func_type (void)
999 {
1000 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1001 cfun->machine->func_type = arm_compute_func_type ();
1002
1003 return cfun->machine->func_type;
1004 }
1005 \f
1006 /* Return 1 if it is possible to return using a single instruction.
1007 If SIBLING is non-null, this is a test for a return before a sibling
1008 call. SIBLING is the call insn, so we can examine its register usage. */
1009
1010 int
1011 use_return_insn (int iscond, rtx sibling)
1012 {
1013 int regno;
1014 unsigned int func_type;
1015 unsigned long saved_int_regs;
1016 unsigned HOST_WIDE_INT stack_adjust;
1017
1018 /* Never use a return instruction before reload has run. */
1019 if (!reload_completed)
1020 return 0;
1021
1022 func_type = arm_current_func_type ();
1023
1024 /* Naked functions and volatile functions need special
1025 consideration. */
1026 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1027 return 0;
1028
1029 /* So do interrupt functions that use the frame pointer. */
1030 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1031 return 0;
1032
1033 stack_adjust = arm_get_frame_size () + current_function_outgoing_args_size;
1034
1035 /* As do variadic functions. */
1036 if (current_function_pretend_args_size
1037 || cfun->machine->uses_anonymous_args
1038 /* Or if the function calls __builtin_eh_return () */
1039 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1040 /* Or if the function calls alloca */
1041 || current_function_calls_alloca
1042 /* Or if there is a stack adjustment. However, if the stack pointer
1043 is saved on the stack, we can use a pre-incrementing stack load. */
1044 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1045 return 0;
1046
1047 saved_int_regs = arm_compute_save_reg_mask ();
1048
1049 /* Unfortunately, the insn
1050
1051 ldmib sp, {..., sp, ...}
1052
1053 triggers a bug on most SA-110 based devices, such that the stack
1054 pointer won't be correctly restored if the instruction takes a
1055 page fault. We work around this problem by popping r3 along with
1056 the other registers, since that is never slower than executing
1057 another instruction.
1058
1059 We test for !arm_arch5 here, because code for any architecture
1060 less than this could potentially be run on one of the buggy
1061 chips. */
1062 if (stack_adjust == 4 && !arm_arch5)
1063 {
1064 /* Validate that r3 is a call-clobbered register (always true in
1065 the default abi) ... */
1066 if (!call_used_regs[3])
1067 return 0;
1068
1069 /* ... that it isn't being used for a return value (always true
1070 until we implement return-in-regs), or for a tail-call
1071 argument ... */
1072 if (sibling)
1073 {
1074 if (GET_CODE (sibling) != CALL_INSN)
1075 abort ();
1076
1077 if (find_regno_fusage (sibling, USE, 3))
1078 return 0;
1079 }
1080
1081 /* ... and that there are no call-saved registers in r0-r2
1082 (always true in the default ABI). */
1083 if (saved_int_regs & 0x7)
1084 return 0;
1085 }
1086
1087 /* Can't be done if interworking with Thumb, and any registers have been
1088 stacked. */
1089 if (TARGET_INTERWORK && saved_int_regs != 0)
1090 return 0;
1091
1092 /* On StrongARM, conditional returns are expensive if they aren't
1093 taken and multiple registers have been stacked. */
1094 if (iscond && arm_is_strong)
1095 {
1096 /* Conditional return when just the LR is stored is a simple
1097 conditional-load instruction, that's not expensive. */
1098 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1099 return 0;
1100
1101 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1102 return 0;
1103 }
1104
1105 /* If there are saved registers but the LR isn't saved, then we need
1106 two instructions for the return. */
1107 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1108 return 0;
1109
1110 /* Can't be done if any of the FPA regs are pushed,
1111 since this also requires an insn. */
1112 if (TARGET_HARD_FLOAT)
1113 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
1114 if (regs_ever_live[regno] && !call_used_regs[regno])
1115 return 0;
1116
1117 if (TARGET_REALLY_IWMMXT)
1118 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1119 if (regs_ever_live[regno] && ! call_used_regs [regno])
1120 return 0;
1121
1122 return 1;
1123 }
1124
1125 /* Return TRUE if int I is a valid immediate ARM constant. */
1126
1127 int
1128 const_ok_for_arm (HOST_WIDE_INT i)
1129 {
1130 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1131
1132 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1133 be all zero, or all one. */
1134 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1135 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1136 != ((~(unsigned HOST_WIDE_INT) 0)
1137 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1138 return FALSE;
1139
1140 /* Fast return for 0 and powers of 2 */
1141 if ((i & (i - 1)) == 0)
1142 return TRUE;
1143
1144 do
1145 {
1146 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1147 return TRUE;
1148 mask =
1149 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1150 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1151 }
1152 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1153
1154 return FALSE;
1155 }
1156
1157 /* Return true if I is a valid constant for the operation CODE. */
1158 static int
1159 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1160 {
1161 if (const_ok_for_arm (i))
1162 return 1;
1163
1164 switch (code)
1165 {
1166 case PLUS:
1167 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1168
1169 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1170 case XOR:
1171 case IOR:
1172 return 0;
1173
1174 case AND:
1175 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1176
1177 default:
1178 abort ();
1179 }
1180 }
1181
1182 /* Emit a sequence of insns to handle a large constant.
1183 CODE is the code of the operation required, it can be any of SET, PLUS,
1184 IOR, AND, XOR, MINUS;
1185 MODE is the mode in which the operation is being performed;
1186 VAL is the integer to operate on;
1187 SOURCE is the other operand (a register, or a null-pointer for SET);
1188 SUBTARGETS means it is safe to create scratch registers if that will
1189 either produce a simpler sequence, or we will want to cse the values.
1190 Return value is the number of insns emitted. */
1191
1192 int
1193 arm_split_constant (enum rtx_code code, enum machine_mode mode,
1194 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1195 {
1196 if (subtargets || code == SET
1197 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1198 && REGNO (target) != REGNO (source)))
1199 {
1200 /* After arm_reorg has been called, we can't fix up expensive
1201 constants by pushing them into memory so we must synthesize
1202 them in-line, regardless of the cost. This is only likely to
1203 be more costly on chips that have load delay slots and we are
1204 compiling without running the scheduler (so no splitting
1205 occurred before the final instruction emission).
1206
1207 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1208 */
1209 if (!after_arm_reorg
1210 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1211 > arm_constant_limit + (code != SET)))
1212 {
1213 if (code == SET)
1214 {
1215 /* Currently SET is the only monadic value for CODE, all
1216 the rest are diadic. */
1217 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1218 return 1;
1219 }
1220 else
1221 {
1222 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1223
1224 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1225 /* For MINUS, the value is subtracted from, since we never
1226 have subtraction of a constant. */
1227 if (code == MINUS)
1228 emit_insn (gen_rtx_SET (VOIDmode, target,
1229 gen_rtx_MINUS (mode, temp, source)));
1230 else
1231 emit_insn (gen_rtx_SET (VOIDmode, target,
1232 gen_rtx (code, mode, source, temp)));
1233 return 2;
1234 }
1235 }
1236 }
1237
1238 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1239 }
1240
1241 static int
1242 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1243 {
1244 HOST_WIDE_INT temp1;
1245 int num_insns = 0;
1246 do
1247 {
1248 int end;
1249
1250 if (i <= 0)
1251 i += 32;
1252 if (remainder & (3 << (i - 2)))
1253 {
1254 end = i - 8;
1255 if (end < 0)
1256 end += 32;
1257 temp1 = remainder & ((0x0ff << end)
1258 | ((i < end) ? (0xff >> (32 - end)) : 0));
1259 remainder &= ~temp1;
1260 num_insns++;
1261 i -= 6;
1262 }
1263 i -= 2;
1264 } while (remainder);
1265 return num_insns;
1266 }
1267
1268 /* As above, but extra parameter GENERATE which, if clear, suppresses
1269 RTL generation. */
1270
1271 static int
1272 arm_gen_constant (enum rtx_code code, enum machine_mode mode,
1273 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1274 int generate)
1275 {
1276 int can_invert = 0;
1277 int can_negate = 0;
1278 int can_negate_initial = 0;
1279 int can_shift = 0;
1280 int i;
1281 int num_bits_set = 0;
1282 int set_sign_bit_copies = 0;
1283 int clear_sign_bit_copies = 0;
1284 int clear_zero_bit_copies = 0;
1285 int set_zero_bit_copies = 0;
1286 int insns = 0;
1287 unsigned HOST_WIDE_INT temp1, temp2;
1288 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1289
1290 /* Find out which operations are safe for a given CODE. Also do a quick
1291 check for degenerate cases; these can occur when DImode operations
1292 are split. */
1293 switch (code)
1294 {
1295 case SET:
1296 can_invert = 1;
1297 can_shift = 1;
1298 can_negate = 1;
1299 break;
1300
1301 case PLUS:
1302 can_negate = 1;
1303 can_negate_initial = 1;
1304 break;
1305
1306 case IOR:
1307 if (remainder == 0xffffffff)
1308 {
1309 if (generate)
1310 emit_insn (gen_rtx_SET (VOIDmode, target,
1311 GEN_INT (ARM_SIGN_EXTEND (val))));
1312 return 1;
1313 }
1314 if (remainder == 0)
1315 {
1316 if (reload_completed && rtx_equal_p (target, source))
1317 return 0;
1318 if (generate)
1319 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1320 return 1;
1321 }
1322 break;
1323
1324 case AND:
1325 if (remainder == 0)
1326 {
1327 if (generate)
1328 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1329 return 1;
1330 }
1331 if (remainder == 0xffffffff)
1332 {
1333 if (reload_completed && rtx_equal_p (target, source))
1334 return 0;
1335 if (generate)
1336 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1337 return 1;
1338 }
1339 can_invert = 1;
1340 break;
1341
1342 case XOR:
1343 if (remainder == 0)
1344 {
1345 if (reload_completed && rtx_equal_p (target, source))
1346 return 0;
1347 if (generate)
1348 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1349 return 1;
1350 }
1351 if (remainder == 0xffffffff)
1352 {
1353 if (generate)
1354 emit_insn (gen_rtx_SET (VOIDmode, target,
1355 gen_rtx_NOT (mode, source)));
1356 return 1;
1357 }
1358
1359 /* We don't know how to handle this yet below. */
1360 abort ();
1361
1362 case MINUS:
1363 /* We treat MINUS as (val - source), since (source - val) is always
1364 passed as (source + (-val)). */
1365 if (remainder == 0)
1366 {
1367 if (generate)
1368 emit_insn (gen_rtx_SET (VOIDmode, target,
1369 gen_rtx_NEG (mode, source)));
1370 return 1;
1371 }
1372 if (const_ok_for_arm (val))
1373 {
1374 if (generate)
1375 emit_insn (gen_rtx_SET (VOIDmode, target,
1376 gen_rtx_MINUS (mode, GEN_INT (val),
1377 source)));
1378 return 1;
1379 }
1380 can_negate = 1;
1381
1382 break;
1383
1384 default:
1385 abort ();
1386 }
1387
1388 /* If we can do it in one insn get out quickly. */
1389 if (const_ok_for_arm (val)
1390 || (can_negate_initial && const_ok_for_arm (-val))
1391 || (can_invert && const_ok_for_arm (~val)))
1392 {
1393 if (generate)
1394 emit_insn (gen_rtx_SET (VOIDmode, target,
1395 (source ? gen_rtx (code, mode, source,
1396 GEN_INT (val))
1397 : GEN_INT (val))));
1398 return 1;
1399 }
1400
1401 /* Calculate a few attributes that may be useful for specific
1402 optimizations. */
1403 for (i = 31; i >= 0; i--)
1404 {
1405 if ((remainder & (1 << i)) == 0)
1406 clear_sign_bit_copies++;
1407 else
1408 break;
1409 }
1410
1411 for (i = 31; i >= 0; i--)
1412 {
1413 if ((remainder & (1 << i)) != 0)
1414 set_sign_bit_copies++;
1415 else
1416 break;
1417 }
1418
1419 for (i = 0; i <= 31; i++)
1420 {
1421 if ((remainder & (1 << i)) == 0)
1422 clear_zero_bit_copies++;
1423 else
1424 break;
1425 }
1426
1427 for (i = 0; i <= 31; i++)
1428 {
1429 if ((remainder & (1 << i)) != 0)
1430 set_zero_bit_copies++;
1431 else
1432 break;
1433 }
1434
1435 switch (code)
1436 {
1437 case SET:
1438 /* See if we can do this by sign_extending a constant that is known
1439 to be negative. This is a good, way of doing it, since the shift
1440 may well merge into a subsequent insn. */
1441 if (set_sign_bit_copies > 1)
1442 {
1443 if (const_ok_for_arm
1444 (temp1 = ARM_SIGN_EXTEND (remainder
1445 << (set_sign_bit_copies - 1))))
1446 {
1447 if (generate)
1448 {
1449 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1450 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1451 GEN_INT (temp1)));
1452 emit_insn (gen_ashrsi3 (target, new_src,
1453 GEN_INT (set_sign_bit_copies - 1)));
1454 }
1455 return 2;
1456 }
1457 /* For an inverted constant, we will need to set the low bits,
1458 these will be shifted out of harm's way. */
1459 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1460 if (const_ok_for_arm (~temp1))
1461 {
1462 if (generate)
1463 {
1464 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1465 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1466 GEN_INT (temp1)));
1467 emit_insn (gen_ashrsi3 (target, new_src,
1468 GEN_INT (set_sign_bit_copies - 1)));
1469 }
1470 return 2;
1471 }
1472 }
1473
1474 /* See if we can generate this by setting the bottom (or the top)
1475 16 bits, and then shifting these into the other half of the
1476 word. We only look for the simplest cases, to do more would cost
1477 too much. Be careful, however, not to generate this when the
1478 alternative would take fewer insns. */
1479 if (val & 0xffff0000)
1480 {
1481 temp1 = remainder & 0xffff0000;
1482 temp2 = remainder & 0x0000ffff;
1483
1484 /* Overlaps outside this range are best done using other methods. */
1485 for (i = 9; i < 24; i++)
1486 {
1487 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1488 && !const_ok_for_arm (temp2))
1489 {
1490 rtx new_src = (subtargets
1491 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1492 : target);
1493 insns = arm_gen_constant (code, mode, temp2, new_src,
1494 source, subtargets, generate);
1495 source = new_src;
1496 if (generate)
1497 emit_insn (gen_rtx_SET
1498 (VOIDmode, target,
1499 gen_rtx_IOR (mode,
1500 gen_rtx_ASHIFT (mode, source,
1501 GEN_INT (i)),
1502 source)));
1503 return insns + 1;
1504 }
1505 }
1506
1507 /* Don't duplicate cases already considered. */
1508 for (i = 17; i < 24; i++)
1509 {
1510 if (((temp1 | (temp1 >> i)) == remainder)
1511 && !const_ok_for_arm (temp1))
1512 {
1513 rtx new_src = (subtargets
1514 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1515 : target);
1516 insns = arm_gen_constant (code, mode, temp1, new_src,
1517 source, subtargets, generate);
1518 source = new_src;
1519 if (generate)
1520 emit_insn
1521 (gen_rtx_SET (VOIDmode, target,
1522 gen_rtx_IOR
1523 (mode,
1524 gen_rtx_LSHIFTRT (mode, source,
1525 GEN_INT (i)),
1526 source)));
1527 return insns + 1;
1528 }
1529 }
1530 }
1531 break;
1532
1533 case IOR:
1534 case XOR:
1535 /* If we have IOR or XOR, and the constant can be loaded in a
1536 single instruction, and we can find a temporary to put it in,
1537 then this can be done in two instructions instead of 3-4. */
1538 if (subtargets
1539 /* TARGET can't be NULL if SUBTARGETS is 0 */
1540 || (reload_completed && !reg_mentioned_p (target, source)))
1541 {
1542 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1543 {
1544 if (generate)
1545 {
1546 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1547
1548 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1549 emit_insn (gen_rtx_SET (VOIDmode, target,
1550 gen_rtx (code, mode, source, sub)));
1551 }
1552 return 2;
1553 }
1554 }
1555
1556 if (code == XOR)
1557 break;
1558
1559 if (set_sign_bit_copies > 8
1560 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1561 {
1562 if (generate)
1563 {
1564 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1565 rtx shift = GEN_INT (set_sign_bit_copies);
1566
1567 emit_insn (gen_rtx_SET (VOIDmode, sub,
1568 gen_rtx_NOT (mode,
1569 gen_rtx_ASHIFT (mode,
1570 source,
1571 shift))));
1572 emit_insn (gen_rtx_SET (VOIDmode, target,
1573 gen_rtx_NOT (mode,
1574 gen_rtx_LSHIFTRT (mode, sub,
1575 shift))));
1576 }
1577 return 2;
1578 }
1579
1580 if (set_zero_bit_copies > 8
1581 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1582 {
1583 if (generate)
1584 {
1585 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1586 rtx shift = GEN_INT (set_zero_bit_copies);
1587
1588 emit_insn (gen_rtx_SET (VOIDmode, sub,
1589 gen_rtx_NOT (mode,
1590 gen_rtx_LSHIFTRT (mode,
1591 source,
1592 shift))));
1593 emit_insn (gen_rtx_SET (VOIDmode, target,
1594 gen_rtx_NOT (mode,
1595 gen_rtx_ASHIFT (mode, sub,
1596 shift))));
1597 }
1598 return 2;
1599 }
1600
1601 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1602 {
1603 if (generate)
1604 {
1605 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1606 emit_insn (gen_rtx_SET (VOIDmode, sub,
1607 gen_rtx_NOT (mode, source)));
1608 source = sub;
1609 if (subtargets)
1610 sub = gen_reg_rtx (mode);
1611 emit_insn (gen_rtx_SET (VOIDmode, sub,
1612 gen_rtx_AND (mode, source,
1613 GEN_INT (temp1))));
1614 emit_insn (gen_rtx_SET (VOIDmode, target,
1615 gen_rtx_NOT (mode, sub)));
1616 }
1617 return 3;
1618 }
1619 break;
1620
1621 case AND:
1622 /* See if two shifts will do 2 or more insn's worth of work. */
1623 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1624 {
1625 HOST_WIDE_INT shift_mask = ((0xffffffff
1626 << (32 - clear_sign_bit_copies))
1627 & 0xffffffff);
1628
1629 if ((remainder | shift_mask) != 0xffffffff)
1630 {
1631 if (generate)
1632 {
1633 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1634 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1635 new_src, source, subtargets, 1);
1636 source = new_src;
1637 }
1638 else
1639 {
1640 rtx targ = subtargets ? NULL_RTX : target;
1641 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1642 targ, source, subtargets, 0);
1643 }
1644 }
1645
1646 if (generate)
1647 {
1648 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1649 rtx shift = GEN_INT (clear_sign_bit_copies);
1650
1651 emit_insn (gen_ashlsi3 (new_src, source, shift));
1652 emit_insn (gen_lshrsi3 (target, new_src, shift));
1653 }
1654
1655 return insns + 2;
1656 }
1657
1658 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1659 {
1660 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1661
1662 if ((remainder | shift_mask) != 0xffffffff)
1663 {
1664 if (generate)
1665 {
1666 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1667
1668 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1669 new_src, source, subtargets, 1);
1670 source = new_src;
1671 }
1672 else
1673 {
1674 rtx targ = subtargets ? NULL_RTX : target;
1675
1676 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1677 targ, source, subtargets, 0);
1678 }
1679 }
1680
1681 if (generate)
1682 {
1683 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1684 rtx shift = GEN_INT (clear_zero_bit_copies);
1685
1686 emit_insn (gen_lshrsi3 (new_src, source, shift));
1687 emit_insn (gen_ashlsi3 (target, new_src, shift));
1688 }
1689
1690 return insns + 2;
1691 }
1692
1693 break;
1694
1695 default:
1696 break;
1697 }
1698
1699 for (i = 0; i < 32; i++)
1700 if (remainder & (1 << i))
1701 num_bits_set++;
1702
1703 if (code == AND || (can_invert && num_bits_set > 16))
1704 remainder = (~remainder) & 0xffffffff;
1705 else if (code == PLUS && num_bits_set > 16)
1706 remainder = (-remainder) & 0xffffffff;
1707 else
1708 {
1709 can_invert = 0;
1710 can_negate = 0;
1711 }
1712
1713 /* Now try and find a way of doing the job in either two or three
1714 instructions.
1715 We start by looking for the largest block of zeros that are aligned on
1716 a 2-bit boundary, we then fill up the temps, wrapping around to the
1717 top of the word when we drop off the bottom.
1718 In the worst case this code should produce no more than four insns. */
1719 {
1720 int best_start = 0;
1721 int best_consecutive_zeros = 0;
1722
1723 for (i = 0; i < 32; i += 2)
1724 {
1725 int consecutive_zeros = 0;
1726
1727 if (!(remainder & (3 << i)))
1728 {
1729 while ((i < 32) && !(remainder & (3 << i)))
1730 {
1731 consecutive_zeros += 2;
1732 i += 2;
1733 }
1734 if (consecutive_zeros > best_consecutive_zeros)
1735 {
1736 best_consecutive_zeros = consecutive_zeros;
1737 best_start = i - consecutive_zeros;
1738 }
1739 i -= 2;
1740 }
1741 }
1742
1743 /* So long as it won't require any more insns to do so, it's
1744 desirable to emit a small constant (in bits 0...9) in the last
1745 insn. This way there is more chance that it can be combined with
1746 a later addressing insn to form a pre-indexed load or store
1747 operation. Consider:
1748
1749 *((volatile int *)0xe0000100) = 1;
1750 *((volatile int *)0xe0000110) = 2;
1751
1752 We want this to wind up as:
1753
1754 mov rA, #0xe0000000
1755 mov rB, #1
1756 str rB, [rA, #0x100]
1757 mov rB, #2
1758 str rB, [rA, #0x110]
1759
1760 rather than having to synthesize both large constants from scratch.
1761
1762 Therefore, we calculate how many insns would be required to emit
1763 the constant starting from `best_start', and also starting from
1764 zero (ie with bit 31 first to be output). If `best_start' doesn't
1765 yield a shorter sequence, we may as well use zero. */
1766 if (best_start != 0
1767 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1768 && (count_insns_for_constant (remainder, 0) <=
1769 count_insns_for_constant (remainder, best_start)))
1770 best_start = 0;
1771
1772 /* Now start emitting the insns. */
1773 i = best_start;
1774 do
1775 {
1776 int end;
1777
1778 if (i <= 0)
1779 i += 32;
1780 if (remainder & (3 << (i - 2)))
1781 {
1782 end = i - 8;
1783 if (end < 0)
1784 end += 32;
1785 temp1 = remainder & ((0x0ff << end)
1786 | ((i < end) ? (0xff >> (32 - end)) : 0));
1787 remainder &= ~temp1;
1788
1789 if (generate)
1790 {
1791 rtx new_src, temp1_rtx;
1792
1793 if (code == SET || code == MINUS)
1794 {
1795 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1796 if (can_invert && code != MINUS)
1797 temp1 = ~temp1;
1798 }
1799 else
1800 {
1801 if (remainder && subtargets)
1802 new_src = gen_reg_rtx (mode);
1803 else
1804 new_src = target;
1805 if (can_invert)
1806 temp1 = ~temp1;
1807 else if (can_negate)
1808 temp1 = -temp1;
1809 }
1810
1811 temp1 = trunc_int_for_mode (temp1, mode);
1812 temp1_rtx = GEN_INT (temp1);
1813
1814 if (code == SET)
1815 ;
1816 else if (code == MINUS)
1817 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1818 else
1819 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1820
1821 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1822 source = new_src;
1823 }
1824
1825 if (code == SET)
1826 {
1827 can_invert = 0;
1828 code = PLUS;
1829 }
1830 else if (code == MINUS)
1831 code = PLUS;
1832
1833 insns++;
1834 i -= 6;
1835 }
1836 i -= 2;
1837 }
1838 while (remainder);
1839 }
1840
1841 return insns;
1842 }
1843
1844 /* Canonicalize a comparison so that we are more likely to recognize it.
1845 This can be done for a few constant compares, where we can make the
1846 immediate value easier to load. */
1847
1848 enum rtx_code
1849 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
1850 {
1851 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1852
1853 switch (code)
1854 {
1855 case EQ:
1856 case NE:
1857 return code;
1858
1859 case GT:
1860 case LE:
1861 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1862 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1863 {
1864 *op1 = GEN_INT (i + 1);
1865 return code == GT ? GE : LT;
1866 }
1867 break;
1868
1869 case GE:
1870 case LT:
1871 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1872 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1873 {
1874 *op1 = GEN_INT (i - 1);
1875 return code == GE ? GT : LE;
1876 }
1877 break;
1878
1879 case GTU:
1880 case LEU:
1881 if (i != ~((unsigned HOST_WIDE_INT) 0)
1882 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1883 {
1884 *op1 = GEN_INT (i + 1);
1885 return code == GTU ? GEU : LTU;
1886 }
1887 break;
1888
1889 case GEU:
1890 case LTU:
1891 if (i != 0
1892 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1893 {
1894 *op1 = GEN_INT (i - 1);
1895 return code == GEU ? GTU : LEU;
1896 }
1897 break;
1898
1899 default:
1900 abort ();
1901 }
1902
1903 return code;
1904 }
1905
1906 /* Decide whether a type should be returned in memory (true)
1907 or in a register (false). This is called by the macro
1908 RETURN_IN_MEMORY. */
1909 int
1910 arm_return_in_memory (tree type)
1911 {
1912 HOST_WIDE_INT size;
1913
1914 if (!AGGREGATE_TYPE_P (type))
1915 /* All simple types are returned in registers. */
1916 return 0;
1917
1918 size = int_size_in_bytes (type);
1919
1920 if (TARGET_ATPCS)
1921 {
1922 /* ATPCS returns aggregate types in memory only if they are
1923 larger than a word (or are variable size). */
1924 return (size < 0 || size > UNITS_PER_WORD);
1925 }
1926
1927 /* For the arm-wince targets we choose to be compatible with Microsoft's
1928 ARM and Thumb compilers, which always return aggregates in memory. */
1929 #ifndef ARM_WINCE
1930 /* All structures/unions bigger than one word are returned in memory.
1931 Also catch the case where int_size_in_bytes returns -1. In this case
1932 the aggregate is either huge or of variable size, and in either case
1933 we will want to return it via memory and not in a register. */
1934 if (size < 0 || size > UNITS_PER_WORD)
1935 return 1;
1936
1937 if (TREE_CODE (type) == RECORD_TYPE)
1938 {
1939 tree field;
1940
1941 /* For a struct the APCS says that we only return in a register
1942 if the type is 'integer like' and every addressable element
1943 has an offset of zero. For practical purposes this means
1944 that the structure can have at most one non bit-field element
1945 and that this element must be the first one in the structure. */
1946
1947 /* Find the first field, ignoring non FIELD_DECL things which will
1948 have been created by C++. */
1949 for (field = TYPE_FIELDS (type);
1950 field && TREE_CODE (field) != FIELD_DECL;
1951 field = TREE_CHAIN (field))
1952 continue;
1953
1954 if (field == NULL)
1955 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1956
1957 /* Check that the first field is valid for returning in a register. */
1958
1959 /* ... Floats are not allowed */
1960 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1961 return 1;
1962
1963 /* ... Aggregates that are not themselves valid for returning in
1964 a register are not allowed. */
1965 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1966 return 1;
1967
1968 /* Now check the remaining fields, if any. Only bitfields are allowed,
1969 since they are not addressable. */
1970 for (field = TREE_CHAIN (field);
1971 field;
1972 field = TREE_CHAIN (field))
1973 {
1974 if (TREE_CODE (field) != FIELD_DECL)
1975 continue;
1976
1977 if (!DECL_BIT_FIELD_TYPE (field))
1978 return 1;
1979 }
1980
1981 return 0;
1982 }
1983
1984 if (TREE_CODE (type) == UNION_TYPE)
1985 {
1986 tree field;
1987
1988 /* Unions can be returned in registers if every element is
1989 integral, or can be returned in an integer register. */
1990 for (field = TYPE_FIELDS (type);
1991 field;
1992 field = TREE_CHAIN (field))
1993 {
1994 if (TREE_CODE (field) != FIELD_DECL)
1995 continue;
1996
1997 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1998 return 1;
1999
2000 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2001 return 1;
2002 }
2003
2004 return 0;
2005 }
2006 #endif /* not ARM_WINCE */
2007
2008 /* Return all other types in memory. */
2009 return 1;
2010 }
2011
2012 /* Indicate whether or not words of a double are in big-endian order. */
2013
2014 int
2015 arm_float_words_big_endian (void)
2016 {
2017 if (TARGET_CIRRUS)
2018 return 0;
2019
2020 /* For FPA, float words are always big-endian. For VFP, floats words
2021 follow the memory system mode. */
2022
2023 if (TARGET_HARD_FLOAT)
2024 {
2025 /* FIXME: TARGET_HARD_FLOAT currently implies FPA. */
2026 return 1;
2027 }
2028
2029 if (TARGET_VFP)
2030 return (TARGET_BIG_END ? 1 : 0);
2031
2032 return 1;
2033 }
2034
2035 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2036 for a call to a function whose data type is FNTYPE.
2037 For a library call, FNTYPE is NULL. */
2038 void
2039 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2040 rtx libname ATTRIBUTE_UNUSED,
2041 tree fndecl ATTRIBUTE_UNUSED)
2042 {
2043 /* On the ARM, the offset starts at 0. */
2044 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2045 pcum->iwmmxt_nregs = 0;
2046
2047 pcum->call_cookie = CALL_NORMAL;
2048
2049 if (TARGET_LONG_CALLS)
2050 pcum->call_cookie = CALL_LONG;
2051
2052 /* Check for long call/short call attributes. The attributes
2053 override any command line option. */
2054 if (fntype)
2055 {
2056 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2057 pcum->call_cookie = CALL_SHORT;
2058 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2059 pcum->call_cookie = CALL_LONG;
2060 }
2061
2062 /* Varargs vectors are treated the same as long long.
2063 named_count avoids having to change the way arm handles 'named' */
2064 pcum->named_count = 0;
2065 pcum->nargs = 0;
2066
2067 if (TARGET_REALLY_IWMMXT && fntype)
2068 {
2069 tree fn_arg;
2070
2071 for (fn_arg = TYPE_ARG_TYPES (fntype);
2072 fn_arg;
2073 fn_arg = TREE_CHAIN (fn_arg))
2074 pcum->named_count += 1;
2075
2076 if (! pcum->named_count)
2077 pcum->named_count = INT_MAX;
2078 }
2079 }
2080
2081 /* Determine where to put an argument to a function.
2082 Value is zero to push the argument on the stack,
2083 or a hard register in which to store the argument.
2084
2085 MODE is the argument's machine mode.
2086 TYPE is the data type of the argument (as a tree).
2087 This is null for libcalls where that information may
2088 not be available.
2089 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2090 the preceding args and about the function being called.
2091 NAMED is nonzero if this argument is a named parameter
2092 (otherwise it is an extra parameter matching an ellipsis). */
2093
2094 rtx
2095 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2096 tree type ATTRIBUTE_UNUSED, int named)
2097 {
2098 if (TARGET_REALLY_IWMMXT)
2099 {
2100 if (VECTOR_MODE_SUPPORTED_P (mode))
2101 {
2102 /* varargs vectors are treated the same as long long.
2103 named_count avoids having to change the way arm handles 'named' */
2104 if (pcum->named_count <= pcum->nargs + 1)
2105 {
2106 if (pcum->nregs == 1)
2107 pcum->nregs += 1;
2108 if (pcum->nregs <= 2)
2109 return gen_rtx_REG (mode, pcum->nregs);
2110 else
2111 return NULL_RTX;
2112 }
2113 else if (pcum->iwmmxt_nregs <= 9)
2114 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2115 else
2116 return NULL_RTX;
2117 }
2118 else if ((mode == DImode || mode == DFmode) && pcum->nregs & 1)
2119 pcum->nregs += 1;
2120 }
2121
2122 if (mode == VOIDmode)
2123 /* Compute operand 2 of the call insn. */
2124 return GEN_INT (pcum->call_cookie);
2125
2126 if (!named || pcum->nregs >= NUM_ARG_REGS)
2127 return NULL_RTX;
2128
2129 return gen_rtx_REG (mode, pcum->nregs);
2130 }
2131
2132 /* Variable sized types are passed by reference. This is a GCC
2133 extension to the ARM ABI. */
2134
2135 int
2136 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2137 enum machine_mode mode ATTRIBUTE_UNUSED,
2138 tree type, int named ATTRIBUTE_UNUSED)
2139 {
2140 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2141 }
2142
2143 /* Implement va_arg. */
2144
2145 rtx
2146 arm_va_arg (tree valist, tree type)
2147 {
2148 /* Variable sized types are passed by reference. */
2149 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2150 {
2151 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2152 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2153 }
2154
2155 if (FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), NULL) == IWMMXT_ALIGNMENT)
2156 {
2157 tree minus_eight;
2158 tree t;
2159
2160 /* Maintain 64-bit alignment of the valist pointer by
2161 constructing: valist = ((valist + (8 - 1)) & -8). */
2162 minus_eight = build_int_2 (- (IWMMXT_ALIGNMENT / BITS_PER_UNIT), -1);
2163 t = build_int_2 ((IWMMXT_ALIGNMENT / BITS_PER_UNIT) - 1, 0);
2164 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2165 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, minus_eight);
2166 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2167 TREE_SIDE_EFFECTS (t) = 1;
2168 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2169
2170 /* This is to stop the combine pass optimizing
2171 away the alignment adjustment. */
2172 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2173 }
2174
2175 return std_expand_builtin_va_arg (valist, type);
2176 }
2177 \f
2178 /* Encode the current state of the #pragma [no_]long_calls. */
2179 typedef enum
2180 {
2181 OFF, /* No #pramgma [no_]long_calls is in effect. */
2182 LONG, /* #pragma long_calls is in effect. */
2183 SHORT /* #pragma no_long_calls is in effect. */
2184 } arm_pragma_enum;
2185
2186 static arm_pragma_enum arm_pragma_long_calls = OFF;
2187
2188 void
2189 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2190 {
2191 arm_pragma_long_calls = LONG;
2192 }
2193
2194 void
2195 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2196 {
2197 arm_pragma_long_calls = SHORT;
2198 }
2199
2200 void
2201 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2202 {
2203 arm_pragma_long_calls = OFF;
2204 }
2205 \f
2206 /* Table of machine attributes. */
2207 const struct attribute_spec arm_attribute_table[] =
2208 {
2209 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2210 /* Function calls made to this symbol must be done indirectly, because
2211 it may lie outside of the 26 bit addressing range of a normal function
2212 call. */
2213 { "long_call", 0, 0, false, true, true, NULL },
2214 /* Whereas these functions are always known to reside within the 26 bit
2215 addressing range. */
2216 { "short_call", 0, 0, false, true, true, NULL },
2217 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2218 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2219 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2220 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2221 #ifdef ARM_PE
2222 /* ARM/PE has three new attributes:
2223 interfacearm - ?
2224 dllexport - for exporting a function/variable that will live in a dll
2225 dllimport - for importing a function/variable from a dll
2226
2227 Microsoft allows multiple declspecs in one __declspec, separating
2228 them with spaces. We do NOT support this. Instead, use __declspec
2229 multiple times.
2230 */
2231 { "dllimport", 0, 0, true, false, false, NULL },
2232 { "dllexport", 0, 0, true, false, false, NULL },
2233 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2234 #endif
2235 { NULL, 0, 0, false, false, false, NULL }
2236 };
2237
2238 /* Handle an attribute requiring a FUNCTION_DECL;
2239 arguments as in struct attribute_spec.handler. */
2240 static tree
2241 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2242 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2243 {
2244 if (TREE_CODE (*node) != FUNCTION_DECL)
2245 {
2246 warning ("`%s' attribute only applies to functions",
2247 IDENTIFIER_POINTER (name));
2248 *no_add_attrs = true;
2249 }
2250
2251 return NULL_TREE;
2252 }
2253
2254 /* Handle an "interrupt" or "isr" attribute;
2255 arguments as in struct attribute_spec.handler. */
2256 static tree
2257 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2258 bool *no_add_attrs)
2259 {
2260 if (DECL_P (*node))
2261 {
2262 if (TREE_CODE (*node) != FUNCTION_DECL)
2263 {
2264 warning ("`%s' attribute only applies to functions",
2265 IDENTIFIER_POINTER (name));
2266 *no_add_attrs = true;
2267 }
2268 /* FIXME: the argument if any is checked for type attributes;
2269 should it be checked for decl ones? */
2270 }
2271 else
2272 {
2273 if (TREE_CODE (*node) == FUNCTION_TYPE
2274 || TREE_CODE (*node) == METHOD_TYPE)
2275 {
2276 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2277 {
2278 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2279 *no_add_attrs = true;
2280 }
2281 }
2282 else if (TREE_CODE (*node) == POINTER_TYPE
2283 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2284 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2285 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2286 {
2287 *node = build_type_copy (*node);
2288 TREE_TYPE (*node) = build_type_attribute_variant
2289 (TREE_TYPE (*node),
2290 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2291 *no_add_attrs = true;
2292 }
2293 else
2294 {
2295 /* Possibly pass this attribute on from the type to a decl. */
2296 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2297 | (int) ATTR_FLAG_FUNCTION_NEXT
2298 | (int) ATTR_FLAG_ARRAY_NEXT))
2299 {
2300 *no_add_attrs = true;
2301 return tree_cons (name, args, NULL_TREE);
2302 }
2303 else
2304 {
2305 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2306 }
2307 }
2308 }
2309
2310 return NULL_TREE;
2311 }
2312
2313 /* Return 0 if the attributes for two types are incompatible, 1 if they
2314 are compatible, and 2 if they are nearly compatible (which causes a
2315 warning to be generated). */
2316 static int
2317 arm_comp_type_attributes (tree type1, tree type2)
2318 {
2319 int l1, l2, s1, s2;
2320
2321 /* Check for mismatch of non-default calling convention. */
2322 if (TREE_CODE (type1) != FUNCTION_TYPE)
2323 return 1;
2324
2325 /* Check for mismatched call attributes. */
2326 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2327 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2328 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2329 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2330
2331 /* Only bother to check if an attribute is defined. */
2332 if (l1 | l2 | s1 | s2)
2333 {
2334 /* If one type has an attribute, the other must have the same attribute. */
2335 if ((l1 != l2) || (s1 != s2))
2336 return 0;
2337
2338 /* Disallow mixed attributes. */
2339 if ((l1 & s2) || (l2 & s1))
2340 return 0;
2341 }
2342
2343 /* Check for mismatched ISR attribute. */
2344 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2345 if (! l1)
2346 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2347 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2348 if (! l2)
2349 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2350 if (l1 != l2)
2351 return 0;
2352
2353 return 1;
2354 }
2355
2356 /* Encode long_call or short_call attribute by prefixing
2357 symbol name in DECL with a special character FLAG. */
2358 void
2359 arm_encode_call_attribute (tree decl, int flag)
2360 {
2361 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2362 int len = strlen (str);
2363 char * newstr;
2364
2365 /* Do not allow weak functions to be treated as short call. */
2366 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2367 return;
2368
2369 newstr = alloca (len + 2);
2370 newstr[0] = flag;
2371 strcpy (newstr + 1, str);
2372
2373 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2374 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2375 }
2376
2377 /* Assigns default attributes to newly defined type. This is used to
2378 set short_call/long_call attributes for function types of
2379 functions defined inside corresponding #pragma scopes. */
2380 static void
2381 arm_set_default_type_attributes (tree type)
2382 {
2383 /* Add __attribute__ ((long_call)) to all functions, when
2384 inside #pragma long_calls or __attribute__ ((short_call)),
2385 when inside #pragma no_long_calls. */
2386 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2387 {
2388 tree type_attr_list, attr_name;
2389 type_attr_list = TYPE_ATTRIBUTES (type);
2390
2391 if (arm_pragma_long_calls == LONG)
2392 attr_name = get_identifier ("long_call");
2393 else if (arm_pragma_long_calls == SHORT)
2394 attr_name = get_identifier ("short_call");
2395 else
2396 return;
2397
2398 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2399 TYPE_ATTRIBUTES (type) = type_attr_list;
2400 }
2401 }
2402 \f
2403 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2404 defined within the current compilation unit. If this cannot be
2405 determined, then 0 is returned. */
2406 static int
2407 current_file_function_operand (rtx sym_ref)
2408 {
2409 /* This is a bit of a fib. A function will have a short call flag
2410 applied to its name if it has the short call attribute, or it has
2411 already been defined within the current compilation unit. */
2412 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2413 return 1;
2414
2415 /* The current function is always defined within the current compilation
2416 unit. if it s a weak definition however, then this may not be the real
2417 definition of the function, and so we have to say no. */
2418 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2419 && !DECL_WEAK (current_function_decl))
2420 return 1;
2421
2422 /* We cannot make the determination - default to returning 0. */
2423 return 0;
2424 }
2425
2426 /* Return nonzero if a 32 bit "long_call" should be generated for
2427 this call. We generate a long_call if the function:
2428
2429 a. has an __attribute__((long call))
2430 or b. is within the scope of a #pragma long_calls
2431 or c. the -mlong-calls command line switch has been specified
2432
2433 However we do not generate a long call if the function:
2434
2435 d. has an __attribute__ ((short_call))
2436 or e. is inside the scope of a #pragma no_long_calls
2437 or f. has an __attribute__ ((section))
2438 or g. is defined within the current compilation unit.
2439
2440 This function will be called by C fragments contained in the machine
2441 description file. CALL_REF and CALL_COOKIE correspond to the matched
2442 rtl operands. CALL_SYMBOL is used to distinguish between
2443 two different callers of the function. It is set to 1 in the
2444 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2445 and "call_value" patterns. This is because of the difference in the
2446 SYM_REFs passed by these patterns. */
2447 int
2448 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2449 {
2450 if (!call_symbol)
2451 {
2452 if (GET_CODE (sym_ref) != MEM)
2453 return 0;
2454
2455 sym_ref = XEXP (sym_ref, 0);
2456 }
2457
2458 if (GET_CODE (sym_ref) != SYMBOL_REF)
2459 return 0;
2460
2461 if (call_cookie & CALL_SHORT)
2462 return 0;
2463
2464 if (TARGET_LONG_CALLS && flag_function_sections)
2465 return 1;
2466
2467 if (current_file_function_operand (sym_ref))
2468 return 0;
2469
2470 return (call_cookie & CALL_LONG)
2471 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2472 || TARGET_LONG_CALLS;
2473 }
2474
2475 /* Return nonzero if it is ok to make a tail-call to DECL. */
2476 static bool
2477 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2478 {
2479 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2480
2481 if (cfun->machine->sibcall_blocked)
2482 return false;
2483
2484 /* Never tailcall something for which we have no decl, or if we
2485 are in Thumb mode. */
2486 if (decl == NULL || TARGET_THUMB)
2487 return false;
2488
2489 /* Get the calling method. */
2490 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2491 call_type = CALL_SHORT;
2492 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2493 call_type = CALL_LONG;
2494
2495 /* Cannot tail-call to long calls, since these are out of range of
2496 a branch instruction. However, if not compiling PIC, we know
2497 we can reach the symbol if it is in this compilation unit. */
2498 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2499 return false;
2500
2501 /* If we are interworking and the function is not declared static
2502 then we can't tail-call it unless we know that it exists in this
2503 compilation unit (since it might be a Thumb routine). */
2504 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2505 return false;
2506
2507 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2508 if (IS_INTERRUPT (arm_current_func_type ()))
2509 return false;
2510
2511 /* Everything else is ok. */
2512 return true;
2513 }
2514
2515 \f
2516 /* Addressing mode support functions. */
2517
2518 /* Return nonzero if X is a legitimate immediate operand when compiling
2519 for PIC. */
2520 int
2521 legitimate_pic_operand_p (rtx x)
2522 {
2523 if (CONSTANT_P (x)
2524 && flag_pic
2525 && (GET_CODE (x) == SYMBOL_REF
2526 || (GET_CODE (x) == CONST
2527 && GET_CODE (XEXP (x, 0)) == PLUS
2528 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2529 return 0;
2530
2531 return 1;
2532 }
2533
2534 rtx
2535 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2536 {
2537 if (GET_CODE (orig) == SYMBOL_REF
2538 || GET_CODE (orig) == LABEL_REF)
2539 {
2540 #ifndef AOF_ASSEMBLER
2541 rtx pic_ref, address;
2542 #endif
2543 rtx insn;
2544 int subregs = 0;
2545
2546 if (reg == 0)
2547 {
2548 if (no_new_pseudos)
2549 abort ();
2550 else
2551 reg = gen_reg_rtx (Pmode);
2552
2553 subregs = 1;
2554 }
2555
2556 #ifdef AOF_ASSEMBLER
2557 /* The AOF assembler can generate relocations for these directly, and
2558 understands that the PIC register has to be added into the offset. */
2559 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2560 #else
2561 if (subregs)
2562 address = gen_reg_rtx (Pmode);
2563 else
2564 address = reg;
2565
2566 if (TARGET_ARM)
2567 emit_insn (gen_pic_load_addr_arm (address, orig));
2568 else
2569 emit_insn (gen_pic_load_addr_thumb (address, orig));
2570
2571 if ((GET_CODE (orig) == LABEL_REF
2572 || (GET_CODE (orig) == SYMBOL_REF &&
2573 SYMBOL_REF_LOCAL_P (orig)))
2574 && NEED_GOT_RELOC)
2575 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2576 else
2577 {
2578 pic_ref = gen_rtx_MEM (Pmode,
2579 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2580 address));
2581 RTX_UNCHANGING_P (pic_ref) = 1;
2582 }
2583
2584 insn = emit_move_insn (reg, pic_ref);
2585 #endif
2586 current_function_uses_pic_offset_table = 1;
2587 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2588 by loop. */
2589 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2590 REG_NOTES (insn));
2591 return reg;
2592 }
2593 else if (GET_CODE (orig) == CONST)
2594 {
2595 rtx base, offset;
2596
2597 if (GET_CODE (XEXP (orig, 0)) == PLUS
2598 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2599 return orig;
2600
2601 if (reg == 0)
2602 {
2603 if (no_new_pseudos)
2604 abort ();
2605 else
2606 reg = gen_reg_rtx (Pmode);
2607 }
2608
2609 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2610 {
2611 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2612 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2613 base == reg ? 0 : reg);
2614 }
2615 else
2616 abort ();
2617
2618 if (GET_CODE (offset) == CONST_INT)
2619 {
2620 /* The base register doesn't really matter, we only want to
2621 test the index for the appropriate mode. */
2622 if (!arm_legitimate_index_p (mode, offset, 0))
2623 {
2624 if (!no_new_pseudos)
2625 offset = force_reg (Pmode, offset);
2626 else
2627 abort ();
2628 }
2629
2630 if (GET_CODE (offset) == CONST_INT)
2631 return plus_constant (base, INTVAL (offset));
2632 }
2633
2634 if (GET_MODE_SIZE (mode) > 4
2635 && (GET_MODE_CLASS (mode) == MODE_INT
2636 || TARGET_SOFT_FLOAT))
2637 {
2638 emit_insn (gen_addsi3 (reg, base, offset));
2639 return reg;
2640 }
2641
2642 return gen_rtx_PLUS (Pmode, base, offset);
2643 }
2644
2645 return orig;
2646 }
2647
2648 /* Generate code to load the PIC register. PROLOGUE is true if
2649 called from arm_expand_prologue (in which case we want the
2650 generated insns at the start of the function); false if called
2651 by an exception receiver that needs the PIC register reloaded
2652 (in which case the insns are just dumped at the current location). */
2653 void
2654 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2655 {
2656 #ifndef AOF_ASSEMBLER
2657 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2658 rtx global_offset_table;
2659
2660 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2661 return;
2662
2663 if (!flag_pic)
2664 abort ();
2665
2666 start_sequence ();
2667 l1 = gen_label_rtx ();
2668
2669 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2670 /* On the ARM the PC register contains 'dot + 8' at the time of the
2671 addition, on the Thumb it is 'dot + 4'. */
2672 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2673 if (GOT_PCREL)
2674 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2675 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2676 else
2677 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2678
2679 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2680
2681 if (TARGET_ARM)
2682 {
2683 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2684 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2685 }
2686 else
2687 {
2688 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2689 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2690 }
2691
2692 seq = get_insns ();
2693 end_sequence ();
2694 if (prologue)
2695 emit_insn_after (seq, get_insns ());
2696 else
2697 emit_insn (seq);
2698
2699 /* Need to emit this whether or not we obey regdecls,
2700 since setjmp/longjmp can cause life info to screw up. */
2701 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2702 #endif /* AOF_ASSEMBLER */
2703 }
2704
2705 /* Return nonzero if X is valid as an ARM state addressing register. */
2706 static int
2707 arm_address_register_rtx_p (rtx x, int strict_p)
2708 {
2709 int regno;
2710
2711 if (GET_CODE (x) != REG)
2712 return 0;
2713
2714 regno = REGNO (x);
2715
2716 if (strict_p)
2717 return ARM_REGNO_OK_FOR_BASE_P (regno);
2718
2719 return (regno <= LAST_ARM_REGNUM
2720 || regno >= FIRST_PSEUDO_REGISTER
2721 || regno == FRAME_POINTER_REGNUM
2722 || regno == ARG_POINTER_REGNUM);
2723 }
2724
2725 /* Return nonzero if X is a valid ARM state address operand. */
2726 int
2727 arm_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2728 {
2729 if (arm_address_register_rtx_p (x, strict_p))
2730 return 1;
2731
2732 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2733 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2734
2735 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2736 && GET_MODE_SIZE (mode) <= 4
2737 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2738 && GET_CODE (XEXP (x, 1)) == PLUS
2739 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2740 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), strict_p);
2741
2742 /* After reload constants split into minipools will have addresses
2743 from a LABEL_REF. */
2744 else if (reload_completed
2745 && (GET_CODE (x) == LABEL_REF
2746 || (GET_CODE (x) == CONST
2747 && GET_CODE (XEXP (x, 0)) == PLUS
2748 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2749 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2750 return 1;
2751
2752 else if (mode == TImode)
2753 return 0;
2754
2755 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2756 {
2757 if (GET_CODE (x) == PLUS
2758 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2759 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2760 {
2761 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2762
2763 if (val == 4 || val == -4 || val == -8)
2764 return 1;
2765 }
2766 }
2767
2768 else if (GET_CODE (x) == PLUS)
2769 {
2770 rtx xop0 = XEXP (x, 0);
2771 rtx xop1 = XEXP (x, 1);
2772
2773 return ((arm_address_register_rtx_p (xop0, strict_p)
2774 && arm_legitimate_index_p (mode, xop1, strict_p))
2775 || (arm_address_register_rtx_p (xop1, strict_p)
2776 && arm_legitimate_index_p (mode, xop0, strict_p)));
2777 }
2778
2779 #if 0
2780 /* Reload currently can't handle MINUS, so disable this for now */
2781 else if (GET_CODE (x) == MINUS)
2782 {
2783 rtx xop0 = XEXP (x, 0);
2784 rtx xop1 = XEXP (x, 1);
2785
2786 return (arm_address_register_rtx_p (xop0, strict_p)
2787 && arm_legitimate_index_p (mode, xop1, strict_p));
2788 }
2789 #endif
2790
2791 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
2792 && GET_CODE (x) == SYMBOL_REF
2793 && CONSTANT_POOL_ADDRESS_P (x)
2794 && ! (flag_pic
2795 && symbol_mentioned_p (get_pool_constant (x))))
2796 return 1;
2797
2798 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
2799 && (GET_MODE_SIZE (mode) <= 4)
2800 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
2801 return 1;
2802
2803 return 0;
2804 }
2805
2806 /* Return nonzero if INDEX is valid for an address index operand in
2807 ARM state. */
2808 static int
2809 arm_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
2810 {
2811 HOST_WIDE_INT range;
2812 enum rtx_code code = GET_CODE (index);
2813
2814 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2815 return (code == CONST_INT && INTVAL (index) < 1024
2816 && INTVAL (index) > -1024
2817 && (INTVAL (index) & 3) == 0);
2818
2819 if (TARGET_CIRRUS
2820 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
2821 return (code == CONST_INT
2822 && INTVAL (index) < 255
2823 && INTVAL (index) > -255);
2824
2825 if (arm_address_register_rtx_p (index, strict_p)
2826 && GET_MODE_SIZE (mode) <= 4)
2827 return 1;
2828
2829 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
2830 return (code == CONST_INT
2831 && INTVAL (index) < 256
2832 && INTVAL (index) > -256);
2833
2834 /* XXX What about ldrsb? */
2835 if (GET_MODE_SIZE (mode) <= 4 && code == MULT
2836 && (!arm_arch4 || (mode) != HImode))
2837 {
2838 rtx xiop0 = XEXP (index, 0);
2839 rtx xiop1 = XEXP (index, 1);
2840
2841 return ((arm_address_register_rtx_p (xiop0, strict_p)
2842 && power_of_two_operand (xiop1, SImode))
2843 || (arm_address_register_rtx_p (xiop1, strict_p)
2844 && power_of_two_operand (xiop0, SImode)));
2845 }
2846
2847 if (GET_MODE_SIZE (mode) <= 4
2848 && (code == LSHIFTRT || code == ASHIFTRT
2849 || code == ASHIFT || code == ROTATERT)
2850 && (!arm_arch4 || (mode) != HImode))
2851 {
2852 rtx op = XEXP (index, 1);
2853
2854 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
2855 && GET_CODE (op) == CONST_INT
2856 && INTVAL (op) > 0
2857 && INTVAL (op) <= 31);
2858 }
2859
2860 /* XXX For ARM v4 we may be doing a sign-extend operation during the
2861 load, but that has a restricted addressing range and we are unable
2862 to tell here whether that is the case. To be safe we restrict all
2863 loads to that range. */
2864 range = ((mode) == HImode || (mode) == QImode)
2865 ? (arm_arch4 ? 256 : 4095) : 4096;
2866
2867 return (code == CONST_INT
2868 && INTVAL (index) < range
2869 && INTVAL (index) > -range);
2870 }
2871
2872 /* Return nonzero if X is valid as a Thumb state base register. */
2873 static int
2874 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
2875 {
2876 int regno;
2877
2878 if (GET_CODE (x) != REG)
2879 return 0;
2880
2881 regno = REGNO (x);
2882
2883 if (strict_p)
2884 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
2885
2886 return (regno <= LAST_LO_REGNUM
2887 || regno > LAST_VIRTUAL_REGISTER
2888 || regno == FRAME_POINTER_REGNUM
2889 || (GET_MODE_SIZE (mode) >= 4
2890 && (regno == STACK_POINTER_REGNUM
2891 || regno >= FIRST_PSEUDO_REGISTER
2892 || x == hard_frame_pointer_rtx
2893 || x == arg_pointer_rtx)));
2894 }
2895
2896 /* Return nonzero if x is a legitimate index register. This is the case
2897 for any base register that can access a QImode object. */
2898 inline static int
2899 thumb_index_register_rtx_p (rtx x, int strict_p)
2900 {
2901 return thumb_base_register_rtx_p (x, QImode, strict_p);
2902 }
2903
2904 /* Return nonzero if x is a legitimate Thumb-state address.
2905
2906 The AP may be eliminated to either the SP or the FP, so we use the
2907 least common denominator, e.g. SImode, and offsets from 0 to 64.
2908
2909 ??? Verify whether the above is the right approach.
2910
2911 ??? Also, the FP may be eliminated to the SP, so perhaps that
2912 needs special handling also.
2913
2914 ??? Look at how the mips16 port solves this problem. It probably uses
2915 better ways to solve some of these problems.
2916
2917 Although it is not incorrect, we don't accept QImode and HImode
2918 addresses based on the frame pointer or arg pointer until the
2919 reload pass starts. This is so that eliminating such addresses
2920 into stack based ones won't produce impossible code. */
2921 int
2922 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2923 {
2924 /* ??? Not clear if this is right. Experiment. */
2925 if (GET_MODE_SIZE (mode) < 4
2926 && !(reload_in_progress || reload_completed)
2927 && (reg_mentioned_p (frame_pointer_rtx, x)
2928 || reg_mentioned_p (arg_pointer_rtx, x)
2929 || reg_mentioned_p (virtual_incoming_args_rtx, x)
2930 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
2931 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
2932 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
2933 return 0;
2934
2935 /* Accept any base register. SP only in SImode or larger. */
2936 else if (thumb_base_register_rtx_p (x, mode, strict_p))
2937 return 1;
2938
2939 /* This is PC relative data before arm_reorg runs. */
2940 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
2941 && GET_CODE (x) == SYMBOL_REF
2942 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
2943 return 1;
2944
2945 /* This is PC relative data after arm_reorg runs. */
2946 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
2947 && (GET_CODE (x) == LABEL_REF
2948 || (GET_CODE (x) == CONST
2949 && GET_CODE (XEXP (x, 0)) == PLUS
2950 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2951 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2952 return 1;
2953
2954 /* Post-inc indexing only supported for SImode and larger. */
2955 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
2956 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
2957 return 1;
2958
2959 else if (GET_CODE (x) == PLUS)
2960 {
2961 /* REG+REG address can be any two index registers. */
2962 /* We disallow FRAME+REG addressing since we know that FRAME
2963 will be replaced with STACK, and SP relative addressing only
2964 permits SP+OFFSET. */
2965 if (GET_MODE_SIZE (mode) <= 4
2966 && XEXP (x, 0) != frame_pointer_rtx
2967 && XEXP (x, 1) != frame_pointer_rtx
2968 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
2969 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
2970 return 1;
2971
2972 /* REG+const has 5-7 bit offset for non-SP registers. */
2973 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
2974 || XEXP (x, 0) == arg_pointer_rtx)
2975 && GET_CODE (XEXP (x, 1)) == CONST_INT
2976 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
2977 return 1;
2978
2979 /* REG+const has 10 bit offset for SP, but only SImode and
2980 larger is supported. */
2981 /* ??? Should probably check for DI/DFmode overflow here
2982 just like GO_IF_LEGITIMATE_OFFSET does. */
2983 else if (GET_CODE (XEXP (x, 0)) == REG
2984 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
2985 && GET_MODE_SIZE (mode) >= 4
2986 && GET_CODE (XEXP (x, 1)) == CONST_INT
2987 && INTVAL (XEXP (x, 1)) >= 0
2988 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
2989 && (INTVAL (XEXP (x, 1)) & 3) == 0)
2990 return 1;
2991
2992 else if (GET_CODE (XEXP (x, 0)) == REG
2993 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
2994 && GET_MODE_SIZE (mode) >= 4
2995 && GET_CODE (XEXP (x, 1)) == CONST_INT
2996 && (INTVAL (XEXP (x, 1)) & 3) == 0)
2997 return 1;
2998 }
2999
3000 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3001 && GET_MODE_SIZE (mode) == 4
3002 && GET_CODE (x) == SYMBOL_REF
3003 && CONSTANT_POOL_ADDRESS_P (x)
3004 && !(flag_pic
3005 && symbol_mentioned_p (get_pool_constant (x))))
3006 return 1;
3007
3008 return 0;
3009 }
3010
3011 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3012 instruction of mode MODE. */
3013 int
3014 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3015 {
3016 switch (GET_MODE_SIZE (mode))
3017 {
3018 case 1:
3019 return val >= 0 && val < 32;
3020
3021 case 2:
3022 return val >= 0 && val < 64 && (val & 1) == 0;
3023
3024 default:
3025 return (val >= 0
3026 && (val + GET_MODE_SIZE (mode)) <= 128
3027 && (val & 3) == 0);
3028 }
3029 }
3030
3031 /* Try machine-dependent ways of modifying an illegitimate address
3032 to be legitimate. If we find one, return the new, valid address. */
3033 rtx
3034 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3035 {
3036 if (GET_CODE (x) == PLUS)
3037 {
3038 rtx xop0 = XEXP (x, 0);
3039 rtx xop1 = XEXP (x, 1);
3040
3041 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3042 xop0 = force_reg (SImode, xop0);
3043
3044 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3045 xop1 = force_reg (SImode, xop1);
3046
3047 if (ARM_BASE_REGISTER_RTX_P (xop0)
3048 && GET_CODE (xop1) == CONST_INT)
3049 {
3050 HOST_WIDE_INT n, low_n;
3051 rtx base_reg, val;
3052 n = INTVAL (xop1);
3053
3054 if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
3055 {
3056 low_n = n & 0x0f;
3057 n &= ~0x0f;
3058 if (low_n > 4)
3059 {
3060 n += 16;
3061 low_n -= 16;
3062 }
3063 }
3064 else
3065 {
3066 low_n = ((mode) == TImode ? 0
3067 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3068 n -= low_n;
3069 }
3070
3071 base_reg = gen_reg_rtx (SImode);
3072 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3073 GEN_INT (n)), NULL_RTX);
3074 emit_move_insn (base_reg, val);
3075 x = (low_n == 0 ? base_reg
3076 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3077 }
3078 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3079 x = gen_rtx_PLUS (SImode, xop0, xop1);
3080 }
3081
3082 /* XXX We don't allow MINUS any more -- see comment in
3083 arm_legitimate_address_p (). */
3084 else if (GET_CODE (x) == MINUS)
3085 {
3086 rtx xop0 = XEXP (x, 0);
3087 rtx xop1 = XEXP (x, 1);
3088
3089 if (CONSTANT_P (xop0))
3090 xop0 = force_reg (SImode, xop0);
3091
3092 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3093 xop1 = force_reg (SImode, xop1);
3094
3095 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3096 x = gen_rtx_MINUS (SImode, xop0, xop1);
3097 }
3098
3099 if (flag_pic)
3100 {
3101 /* We need to find and carefully transform any SYMBOL and LABEL
3102 references; so go back to the original address expression. */
3103 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3104
3105 if (new_x != orig_x)
3106 x = new_x;
3107 }
3108
3109 return x;
3110 }
3111
3112 \f
3113
3114 #define REG_OR_SUBREG_REG(X) \
3115 (GET_CODE (X) == REG \
3116 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3117
3118 #define REG_OR_SUBREG_RTX(X) \
3119 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3120
3121 #ifndef COSTS_N_INSNS
3122 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3123 #endif
3124 /* Worker routine for arm_rtx_costs. */
3125 static inline int
3126 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3127 {
3128 enum machine_mode mode = GET_MODE (x);
3129 enum rtx_code subcode;
3130 int extra_cost;
3131
3132 if (TARGET_THUMB)
3133 {
3134 switch (code)
3135 {
3136 case ASHIFT:
3137 case ASHIFTRT:
3138 case LSHIFTRT:
3139 case ROTATERT:
3140 case PLUS:
3141 case MINUS:
3142 case COMPARE:
3143 case NEG:
3144 case NOT:
3145 return COSTS_N_INSNS (1);
3146
3147 case MULT:
3148 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3149 {
3150 int cycles = 0;
3151 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3152
3153 while (i)
3154 {
3155 i >>= 2;
3156 cycles++;
3157 }
3158 return COSTS_N_INSNS (2) + cycles;
3159 }
3160 return COSTS_N_INSNS (1) + 16;
3161
3162 case SET:
3163 return (COSTS_N_INSNS (1)
3164 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3165 + GET_CODE (SET_DEST (x)) == MEM));
3166
3167 case CONST_INT:
3168 if (outer == SET)
3169 {
3170 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3171 return 0;
3172 if (thumb_shiftable_const (INTVAL (x)))
3173 return COSTS_N_INSNS (2);
3174 return COSTS_N_INSNS (3);
3175 }
3176 else if ((outer == PLUS || outer == COMPARE)
3177 && INTVAL (x) < 256 && INTVAL (x) > -256)
3178 return 0;
3179 else if (outer == AND
3180 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3181 return COSTS_N_INSNS (1);
3182 else if (outer == ASHIFT || outer == ASHIFTRT
3183 || outer == LSHIFTRT)
3184 return 0;
3185 return COSTS_N_INSNS (2);
3186
3187 case CONST:
3188 case CONST_DOUBLE:
3189 case LABEL_REF:
3190 case SYMBOL_REF:
3191 return COSTS_N_INSNS (3);
3192
3193 case UDIV:
3194 case UMOD:
3195 case DIV:
3196 case MOD:
3197 return 100;
3198
3199 case TRUNCATE:
3200 return 99;
3201
3202 case AND:
3203 case XOR:
3204 case IOR:
3205 /* XXX guess. */
3206 return 8;
3207
3208 case ADDRESSOF:
3209 case MEM:
3210 /* XXX another guess. */
3211 /* Memory costs quite a lot for the first word, but subsequent words
3212 load at the equivalent of a single insn each. */
3213 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3214 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3215 ? 4 : 0));
3216
3217 case IF_THEN_ELSE:
3218 /* XXX a guess. */
3219 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3220 return 14;
3221 return 2;
3222
3223 case ZERO_EXTEND:
3224 /* XXX still guessing. */
3225 switch (GET_MODE (XEXP (x, 0)))
3226 {
3227 case QImode:
3228 return (1 + (mode == DImode ? 4 : 0)
3229 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3230
3231 case HImode:
3232 return (4 + (mode == DImode ? 4 : 0)
3233 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3234
3235 case SImode:
3236 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3237
3238 default:
3239 return 99;
3240 }
3241
3242 default:
3243 return 99;
3244 }
3245 }
3246
3247 switch (code)
3248 {
3249 case MEM:
3250 /* Memory costs quite a lot for the first word, but subsequent words
3251 load at the equivalent of a single insn each. */
3252 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3253 + (GET_CODE (x) == SYMBOL_REF
3254 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3255
3256 case DIV:
3257 case MOD:
3258 case UDIV:
3259 case UMOD:
3260 return optimize_size ? COSTS_N_INSNS (2) : 100;
3261
3262 case ROTATE:
3263 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3264 return 4;
3265 /* Fall through */
3266 case ROTATERT:
3267 if (mode != SImode)
3268 return 8;
3269 /* Fall through */
3270 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3271 if (mode == DImode)
3272 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3273 + ((GET_CODE (XEXP (x, 0)) == REG
3274 || (GET_CODE (XEXP (x, 0)) == SUBREG
3275 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3276 ? 0 : 8));
3277 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3278 || (GET_CODE (XEXP (x, 0)) == SUBREG
3279 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3280 ? 0 : 4)
3281 + ((GET_CODE (XEXP (x, 1)) == REG
3282 || (GET_CODE (XEXP (x, 1)) == SUBREG
3283 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3284 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3285 ? 0 : 4));
3286
3287 case MINUS:
3288 if (mode == DImode)
3289 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3290 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3291 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3292 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3293 ? 0 : 8));
3294
3295 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3296 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3297 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3298 && const_double_rtx_ok_for_fpa (XEXP (x, 1))))
3299 ? 0 : 8)
3300 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3301 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3302 && const_double_rtx_ok_for_fpa (XEXP (x, 0))))
3303 ? 0 : 8));
3304
3305 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3306 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3307 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3308 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3309 || subcode == ASHIFTRT || subcode == LSHIFTRT
3310 || subcode == ROTATE || subcode == ROTATERT
3311 || (subcode == MULT
3312 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3313 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3314 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3315 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3316 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3317 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3318 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3319 return 1;
3320 /* Fall through */
3321
3322 case PLUS:
3323 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3324 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3325 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3326 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3327 && const_double_rtx_ok_for_fpa (XEXP (x, 1))))
3328 ? 0 : 8));
3329
3330 /* Fall through */
3331 case AND: case XOR: case IOR:
3332 extra_cost = 0;
3333
3334 /* Normally the frame registers will be spilt into reg+const during
3335 reload, so it is a bad idea to combine them with other instructions,
3336 since then they might not be moved outside of loops. As a compromise
3337 we allow integration with ops that have a constant as their second
3338 operand. */
3339 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3340 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3341 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3342 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3343 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3344 extra_cost = 4;
3345
3346 if (mode == DImode)
3347 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3348 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3349 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3350 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3351 ? 0 : 8));
3352
3353 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3354 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3355 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3356 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3357 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3358 ? 0 : 4));
3359
3360 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3361 return (1 + extra_cost
3362 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3363 || subcode == LSHIFTRT || subcode == ASHIFTRT
3364 || subcode == ROTATE || subcode == ROTATERT
3365 || (subcode == MULT
3366 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3367 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3368 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3369 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3370 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3371 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3372 ? 0 : 4));
3373
3374 return 8;
3375
3376 case MULT:
3377 /* There is no point basing this on the tuning, since it is always the
3378 fast variant if it exists at all. */
3379 if (arm_fast_multiply && mode == DImode
3380 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3381 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3382 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3383 return 8;
3384
3385 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3386 || mode == DImode)
3387 return 30;
3388
3389 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3390 {
3391 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3392 & (unsigned HOST_WIDE_INT) 0xffffffff);
3393 int cost, const_ok = const_ok_for_arm (i);
3394 int j, booth_unit_size;
3395
3396 if (arm_tune_xscale)
3397 {
3398 unsigned HOST_WIDE_INT masked_const;
3399
3400 /* The cost will be related to two insns.
3401 First a load of the constant (MOV or LDR), then a multiply. */
3402 cost = 2;
3403 if (! const_ok)
3404 cost += 1; /* LDR is probably more expensive because
3405 of longer result latency. */
3406 masked_const = i & 0xffff8000;
3407 if (masked_const != 0 && masked_const != 0xffff8000)
3408 {
3409 masked_const = i & 0xf8000000;
3410 if (masked_const == 0 || masked_const == 0xf8000000)
3411 cost += 1;
3412 else
3413 cost += 2;
3414 }
3415 return cost;
3416 }
3417
3418 /* Tune as appropriate. */
3419 cost = const_ok ? 4 : 8;
3420 booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
3421 for (j = 0; i && j < 32; j += booth_unit_size)
3422 {
3423 i >>= booth_unit_size;
3424 cost += 2;
3425 }
3426
3427 return cost;
3428 }
3429
3430 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
3431 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3432 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
3433
3434 case TRUNCATE:
3435 if (arm_fast_multiply && mode == SImode
3436 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3437 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3438 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3439 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3440 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3441 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3442 return 8;
3443 return 99;
3444
3445 case NEG:
3446 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3447 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3448 /* Fall through */
3449 case NOT:
3450 if (mode == DImode)
3451 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3452
3453 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3454
3455 case IF_THEN_ELSE:
3456 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3457 return 14;
3458 return 2;
3459
3460 case COMPARE:
3461 return 1;
3462
3463 case ABS:
3464 return 4 + (mode == DImode ? 4 : 0);
3465
3466 case SIGN_EXTEND:
3467 if (GET_MODE (XEXP (x, 0)) == QImode)
3468 return (4 + (mode == DImode ? 4 : 0)
3469 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3470 /* Fall through */
3471 case ZERO_EXTEND:
3472 switch (GET_MODE (XEXP (x, 0)))
3473 {
3474 case QImode:
3475 return (1 + (mode == DImode ? 4 : 0)
3476 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3477
3478 case HImode:
3479 return (4 + (mode == DImode ? 4 : 0)
3480 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3481
3482 case SImode:
3483 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3484
3485 case V8QImode:
3486 case V4HImode:
3487 case V2SImode:
3488 case V4QImode:
3489 case V2HImode:
3490 return 1;
3491
3492 default:
3493 break;
3494 }
3495 abort ();
3496
3497 case CONST_INT:
3498 if (const_ok_for_arm (INTVAL (x)))
3499 return outer == SET ? 2 : -1;
3500 else if (outer == AND
3501 && const_ok_for_arm (~INTVAL (x)))
3502 return -1;
3503 else if ((outer == COMPARE
3504 || outer == PLUS || outer == MINUS)
3505 && const_ok_for_arm (-INTVAL (x)))
3506 return -1;
3507 else
3508 return 5;
3509
3510 case CONST:
3511 case LABEL_REF:
3512 case SYMBOL_REF:
3513 return 6;
3514
3515 case CONST_DOUBLE:
3516 if (const_double_rtx_ok_for_fpa (x))
3517 return outer == SET ? 2 : -1;
3518 else if ((outer == COMPARE || outer == PLUS)
3519 && neg_const_double_rtx_ok_for_fpa (x))
3520 return -1;
3521 return 7;
3522
3523 default:
3524 return 99;
3525 }
3526 }
3527
3528 static bool
3529 arm_rtx_costs (rtx x, int code, int outer_code, int *total)
3530 {
3531 *total = arm_rtx_costs_1 (x, code, outer_code);
3532 return true;
3533 }
3534
3535 /* All address computations that can be done are free, but rtx cost returns
3536 the same for practically all of them. So we weight the different types
3537 of address here in the order (most pref first):
3538 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
3539 static inline int
3540 arm_arm_address_cost (rtx x)
3541 {
3542 enum rtx_code c = GET_CODE (x);
3543
3544 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
3545 return 0;
3546 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
3547 return 10;
3548
3549 if (c == PLUS || c == MINUS)
3550 {
3551 char cl0 = GET_RTX_CLASS (GET_CODE (XEXP (x, 0)));
3552 char cl1 = GET_RTX_CLASS (GET_CODE (XEXP (x, 1)));
3553
3554 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
3555 return 2;
3556
3557 if (cl0 == '2' || cl0 == 'c' || cl1 == '2' || cl1 == 'c')
3558 return 3;
3559
3560 return 4;
3561 }
3562
3563 return 6;
3564 }
3565
3566 static inline int
3567 arm_thumb_address_cost (rtx x)
3568 {
3569 enum rtx_code c = GET_CODE (x);
3570
3571 if (c == REG)
3572 return 1;
3573 if (c == PLUS
3574 && GET_CODE (XEXP (x, 0)) == REG
3575 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3576 return 1;
3577
3578 return 2;
3579 }
3580
3581 static int
3582 arm_address_cost (rtx x)
3583 {
3584 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
3585 }
3586
3587 static int
3588 arm_use_dfa_pipeline_interface (void)
3589 {
3590 return true;
3591 }
3592
3593 static int
3594 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
3595 {
3596 rtx i_pat, d_pat;
3597
3598 /* Some true dependencies can have a higher cost depending
3599 on precisely how certain input operands are used. */
3600 if (arm_tune_xscale
3601 && REG_NOTE_KIND (link) == 0
3602 && recog_memoized (insn) >= 0
3603 && recog_memoized (dep) >= 0)
3604 {
3605 int shift_opnum = get_attr_shift (insn);
3606 enum attr_type attr_type = get_attr_type (dep);
3607
3608 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
3609 operand for INSN. If we have a shifted input operand and the
3610 instruction we depend on is another ALU instruction, then we may
3611 have to account for an additional stall. */
3612 if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
3613 {
3614 rtx shifted_operand;
3615 int opno;
3616
3617 /* Get the shifted operand. */
3618 extract_insn (insn);
3619 shifted_operand = recog_data.operand[shift_opnum];
3620
3621 /* Iterate over all the operands in DEP. If we write an operand
3622 that overlaps with SHIFTED_OPERAND, then we have increase the
3623 cost of this dependency. */
3624 extract_insn (dep);
3625 preprocess_constraints ();
3626 for (opno = 0; opno < recog_data.n_operands; opno++)
3627 {
3628 /* We can ignore strict inputs. */
3629 if (recog_data.operand_type[opno] == OP_IN)
3630 continue;
3631
3632 if (reg_overlap_mentioned_p (recog_data.operand[opno],
3633 shifted_operand))
3634 return 2;
3635 }
3636 }
3637 }
3638
3639 /* XXX This is not strictly true for the FPA. */
3640 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
3641 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
3642 return 0;
3643
3644 /* Call insns don't incur a stall, even if they follow a load. */
3645 if (REG_NOTE_KIND (link) == 0
3646 && GET_CODE (insn) == CALL_INSN)
3647 return 1;
3648
3649 if ((i_pat = single_set (insn)) != NULL
3650 && GET_CODE (SET_SRC (i_pat)) == MEM
3651 && (d_pat = single_set (dep)) != NULL
3652 && GET_CODE (SET_DEST (d_pat)) == MEM)
3653 {
3654 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
3655 /* This is a load after a store, there is no conflict if the load reads
3656 from a cached area. Assume that loads from the stack, and from the
3657 constant pool are cached, and that others will miss. This is a
3658 hack. */
3659
3660 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
3661 || reg_mentioned_p (stack_pointer_rtx, src_mem)
3662 || reg_mentioned_p (frame_pointer_rtx, src_mem)
3663 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
3664 return 1;
3665 }
3666
3667 return cost;
3668 }
3669
3670 static int fpa_consts_inited = 0;
3671
3672 static const char * const strings_fpa[8] =
3673 {
3674 "0", "1", "2", "3",
3675 "4", "5", "0.5", "10"
3676 };
3677
3678 static REAL_VALUE_TYPE values_fpa[8];
3679
3680 static void
3681 init_fpa_table (void)
3682 {
3683 int i;
3684 REAL_VALUE_TYPE r;
3685
3686 for (i = 0; i < 8; i++)
3687 {
3688 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
3689 values_fpa[i] = r;
3690 }
3691
3692 fpa_consts_inited = 1;
3693 }
3694
3695 /* Return TRUE if rtx X is a valid immediate FPA constant. */
3696 int
3697 const_double_rtx_ok_for_fpa (rtx x)
3698 {
3699 REAL_VALUE_TYPE r;
3700 int i;
3701
3702 if (!fpa_consts_inited)
3703 init_fpa_table ();
3704
3705 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3706 if (REAL_VALUE_MINUS_ZERO (r))
3707 return 0;
3708
3709 for (i = 0; i < 8; i++)
3710 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3711 return 1;
3712
3713 return 0;
3714 }
3715
3716 /* Return TRUE if rtx X is a valid immediate FPA constant. */
3717 int
3718 neg_const_double_rtx_ok_for_fpa (rtx x)
3719 {
3720 REAL_VALUE_TYPE r;
3721 int i;
3722
3723 if (!fpa_consts_inited)
3724 init_fpa_table ();
3725
3726 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3727 r = REAL_VALUE_NEGATE (r);
3728 if (REAL_VALUE_MINUS_ZERO (r))
3729 return 0;
3730
3731 for (i = 0; i < 8; i++)
3732 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3733 return 1;
3734
3735 return 0;
3736 }
3737 \f
3738 /* Predicates for `match_operand' and `match_operator'. */
3739
3740 /* s_register_operand is the same as register_operand, but it doesn't accept
3741 (SUBREG (MEM)...).
3742
3743 This function exists because at the time it was put in it led to better
3744 code. SUBREG(MEM) always needs a reload in the places where
3745 s_register_operand is used, and this seemed to lead to excessive
3746 reloading. */
3747 int
3748 s_register_operand (rtx op, enum machine_mode mode)
3749 {
3750 if (GET_MODE (op) != mode && mode != VOIDmode)
3751 return 0;
3752
3753 if (GET_CODE (op) == SUBREG)
3754 op = SUBREG_REG (op);
3755
3756 /* We don't consider registers whose class is NO_REGS
3757 to be a register operand. */
3758 /* XXX might have to check for lo regs only for thumb ??? */
3759 return (GET_CODE (op) == REG
3760 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3761 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3762 }
3763
3764 /* A hard register operand (even before reload. */
3765 int
3766 arm_hard_register_operand (rtx op, enum machine_mode mode)
3767 {
3768 if (GET_MODE (op) != mode && mode != VOIDmode)
3769 return 0;
3770
3771 return (GET_CODE (op) == REG
3772 && REGNO (op) < FIRST_PSEUDO_REGISTER);
3773 }
3774
3775 /* Only accept reg, subreg(reg), const_int. */
3776 int
3777 reg_or_int_operand (rtx op, enum machine_mode mode)
3778 {
3779 if (GET_CODE (op) == CONST_INT)
3780 return 1;
3781
3782 if (GET_MODE (op) != mode && mode != VOIDmode)
3783 return 0;
3784
3785 if (GET_CODE (op) == SUBREG)
3786 op = SUBREG_REG (op);
3787
3788 /* We don't consider registers whose class is NO_REGS
3789 to be a register operand. */
3790 return (GET_CODE (op) == REG
3791 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3792 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3793 }
3794
3795 /* Return 1 if OP is an item in memory, given that we are in reload. */
3796 int
3797 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3798 {
3799 int regno = true_regnum (op);
3800
3801 return (!CONSTANT_P (op)
3802 && (regno == -1
3803 || (GET_CODE (op) == REG
3804 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
3805 }
3806
3807 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
3808 memory access (architecture V4).
3809 MODE is QImode if called when computing constraints, or VOIDmode when
3810 emitting patterns. In this latter case we cannot use memory_operand()
3811 because it will fail on badly formed MEMs, which is precisely what we are
3812 trying to catch. */
3813 int
3814 bad_signed_byte_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3815 {
3816 if (GET_CODE (op) != MEM)
3817 return 0;
3818
3819 op = XEXP (op, 0);
3820
3821 /* A sum of anything more complex than reg + reg or reg + const is bad. */
3822 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3823 && (!s_register_operand (XEXP (op, 0), VOIDmode)
3824 || (!s_register_operand (XEXP (op, 1), VOIDmode)
3825 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
3826 return 1;
3827
3828 /* Big constants are also bad. */
3829 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
3830 && (INTVAL (XEXP (op, 1)) > 0xff
3831 || -INTVAL (XEXP (op, 1)) > 0xff))
3832 return 1;
3833
3834 /* Everything else is good, or can will automatically be made so. */
3835 return 0;
3836 }
3837
3838 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
3839 int
3840 arm_rhs_operand (rtx op, enum machine_mode mode)
3841 {
3842 return (s_register_operand (op, mode)
3843 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
3844 }
3845
3846 /* Return TRUE for valid operands for the
3847 rhs of an ARM instruction, or a load. */
3848 int
3849 arm_rhsm_operand (rtx op, enum machine_mode mode)
3850 {
3851 return (s_register_operand (op, mode)
3852 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
3853 || memory_operand (op, mode));
3854 }
3855
3856 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
3857 constant that is valid when negated. */
3858 int
3859 arm_add_operand (rtx op, enum machine_mode mode)
3860 {
3861 if (TARGET_THUMB)
3862 return thumb_cmp_operand (op, mode);
3863
3864 return (s_register_operand (op, mode)
3865 || (GET_CODE (op) == CONST_INT
3866 && (const_ok_for_arm (INTVAL (op))
3867 || const_ok_for_arm (-INTVAL (op)))));
3868 }
3869
3870 /* Return TRUE for valid ARM constants (or when valid if negated). */
3871 int
3872 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3873 {
3874 return (GET_CODE (op) == CONST_INT
3875 && (const_ok_for_arm (INTVAL (op))
3876 || const_ok_for_arm (-INTVAL (op))));
3877 }
3878
3879 int
3880 arm_not_operand (rtx op, enum machine_mode mode)
3881 {
3882 return (s_register_operand (op, mode)
3883 || (GET_CODE (op) == CONST_INT
3884 && (const_ok_for_arm (INTVAL (op))
3885 || const_ok_for_arm (~INTVAL (op)))));
3886 }
3887
3888 /* Return TRUE if the operand is a memory reference which contains an
3889 offsettable address. */
3890 int
3891 offsettable_memory_operand (rtx op, enum machine_mode mode)
3892 {
3893 if (mode == VOIDmode)
3894 mode = GET_MODE (op);
3895
3896 return (mode == GET_MODE (op)
3897 && GET_CODE (op) == MEM
3898 && offsettable_address_p (reload_completed | reload_in_progress,
3899 mode, XEXP (op, 0)));
3900 }
3901
3902 /* Return TRUE if the operand is a memory reference which is, or can be
3903 made word aligned by adjusting the offset. */
3904 int
3905 alignable_memory_operand (rtx op, enum machine_mode mode)
3906 {
3907 rtx reg;
3908
3909 if (mode == VOIDmode)
3910 mode = GET_MODE (op);
3911
3912 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
3913 return 0;
3914
3915 op = XEXP (op, 0);
3916
3917 return ((GET_CODE (reg = op) == REG
3918 || (GET_CODE (op) == SUBREG
3919 && GET_CODE (reg = SUBREG_REG (op)) == REG)
3920 || (GET_CODE (op) == PLUS
3921 && GET_CODE (XEXP (op, 1)) == CONST_INT
3922 && (GET_CODE (reg = XEXP (op, 0)) == REG
3923 || (GET_CODE (XEXP (op, 0)) == SUBREG
3924 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
3925 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
3926 }
3927
3928 /* Similar to s_register_operand, but does not allow hard integer
3929 registers. */
3930 int
3931 f_register_operand (rtx op, enum machine_mode mode)
3932 {
3933 if (GET_MODE (op) != mode && mode != VOIDmode)
3934 return 0;
3935
3936 if (GET_CODE (op) == SUBREG)
3937 op = SUBREG_REG (op);
3938
3939 /* We don't consider registers whose class is NO_REGS
3940 to be a register operand. */
3941 return (GET_CODE (op) == REG
3942 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3943 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
3944 }
3945
3946 /* Return TRUE for valid operands for the rhs of an FPA instruction. */
3947 int
3948 fpa_rhs_operand (rtx op, enum machine_mode mode)
3949 {
3950 if (s_register_operand (op, mode))
3951 return TRUE;
3952
3953 if (GET_MODE (op) != mode && mode != VOIDmode)
3954 return FALSE;
3955
3956 if (GET_CODE (op) == CONST_DOUBLE)
3957 return const_double_rtx_ok_for_fpa (op);
3958
3959 return FALSE;
3960 }
3961
3962 int
3963 fpa_add_operand (rtx op, enum machine_mode mode)
3964 {
3965 if (s_register_operand (op, mode))
3966 return TRUE;
3967
3968 if (GET_MODE (op) != mode && mode != VOIDmode)
3969 return FALSE;
3970
3971 if (GET_CODE (op) == CONST_DOUBLE)
3972 return (const_double_rtx_ok_for_fpa (op)
3973 || neg_const_double_rtx_ok_for_fpa (op));
3974
3975 return FALSE;
3976 }
3977
3978 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
3979 int
3980 cirrus_memory_offset (rtx op)
3981 {
3982 /* Reject eliminable registers. */
3983 if (! (reload_in_progress || reload_completed)
3984 && ( reg_mentioned_p (frame_pointer_rtx, op)
3985 || reg_mentioned_p (arg_pointer_rtx, op)
3986 || reg_mentioned_p (virtual_incoming_args_rtx, op)
3987 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
3988 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
3989 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
3990 return 0;
3991
3992 if (GET_CODE (op) == MEM)
3993 {
3994 rtx ind;
3995
3996 ind = XEXP (op, 0);
3997
3998 /* Match: (mem (reg)). */
3999 if (GET_CODE (ind) == REG)
4000 return 1;
4001
4002 /* Match:
4003 (mem (plus (reg)
4004 (const))). */
4005 if (GET_CODE (ind) == PLUS
4006 && GET_CODE (XEXP (ind, 0)) == REG
4007 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4008 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4009 return 1;
4010 }
4011
4012 return 0;
4013 }
4014
4015 /* Return nonzero if OP is a Cirrus or general register. */
4016 int
4017 cirrus_register_operand (rtx op, enum machine_mode mode)
4018 {
4019 if (GET_MODE (op) != mode && mode != VOIDmode)
4020 return FALSE;
4021
4022 if (GET_CODE (op) == SUBREG)
4023 op = SUBREG_REG (op);
4024
4025 return (GET_CODE (op) == REG
4026 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4027 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4028 }
4029
4030 /* Return nonzero if OP is a cirrus FP register. */
4031 int
4032 cirrus_fp_register (rtx op, enum machine_mode mode)
4033 {
4034 if (GET_MODE (op) != mode && mode != VOIDmode)
4035 return FALSE;
4036
4037 if (GET_CODE (op) == SUBREG)
4038 op = SUBREG_REG (op);
4039
4040 return (GET_CODE (op) == REG
4041 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4042 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4043 }
4044
4045 /* Return nonzero if OP is a 6bit constant (0..63). */
4046 int
4047 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4048 {
4049 return (GET_CODE (op) == CONST_INT
4050 && INTVAL (op) >= 0
4051 && INTVAL (op) < 64);
4052 }
4053
4054 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4055 Use by the Cirrus Maverick code which has to workaround
4056 a hardware bug triggered by such instructions. */
4057 static bool
4058 arm_memory_load_p (rtx insn)
4059 {
4060 rtx body, lhs, rhs;;
4061
4062 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4063 return false;
4064
4065 body = PATTERN (insn);
4066
4067 if (GET_CODE (body) != SET)
4068 return false;
4069
4070 lhs = XEXP (body, 0);
4071 rhs = XEXP (body, 1);
4072
4073 lhs = REG_OR_SUBREG_RTX (lhs);
4074
4075 /* If the destination is not a general purpose
4076 register we do not have to worry. */
4077 if (GET_CODE (lhs) != REG
4078 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4079 return false;
4080
4081 /* As well as loads from memory we also have to react
4082 to loads of invalid constants which will be turned
4083 into loads from the minipool. */
4084 return (GET_CODE (rhs) == MEM
4085 || GET_CODE (rhs) == SYMBOL_REF
4086 || note_invalid_constants (insn, -1, false));
4087 }
4088
4089 /* Return TRUE if INSN is a Cirrus instruction. */
4090 static bool
4091 arm_cirrus_insn_p (rtx insn)
4092 {
4093 enum attr_cirrus attr;
4094
4095 /* get_attr aborts on USE and CLOBBER. */
4096 if (!insn
4097 || GET_CODE (insn) != INSN
4098 || GET_CODE (PATTERN (insn)) == USE
4099 || GET_CODE (PATTERN (insn)) == CLOBBER)
4100 return 0;
4101
4102 attr = get_attr_cirrus (insn);
4103
4104 return attr != CIRRUS_NOT;
4105 }
4106
4107 /* Cirrus reorg for invalid instruction combinations. */
4108 static void
4109 cirrus_reorg (rtx first)
4110 {
4111 enum attr_cirrus attr;
4112 rtx body = PATTERN (first);
4113 rtx t;
4114 int nops;
4115
4116 /* Any branch must be followed by 2 non Cirrus instructions. */
4117 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4118 {
4119 nops = 0;
4120 t = next_nonnote_insn (first);
4121
4122 if (arm_cirrus_insn_p (t))
4123 ++ nops;
4124
4125 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4126 ++ nops;
4127
4128 while (nops --)
4129 emit_insn_after (gen_nop (), first);
4130
4131 return;
4132 }
4133
4134 /* (float (blah)) is in parallel with a clobber. */
4135 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4136 body = XVECEXP (body, 0, 0);
4137
4138 if (GET_CODE (body) == SET)
4139 {
4140 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4141
4142 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4143 be followed by a non Cirrus insn. */
4144 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4145 {
4146 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4147 emit_insn_after (gen_nop (), first);
4148
4149 return;
4150 }
4151 else if (arm_memory_load_p (first))
4152 {
4153 unsigned int arm_regno;
4154
4155 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4156 ldr/cfmv64hr combination where the Rd field is the same
4157 in both instructions must be split with a non Cirrus
4158 insn. Example:
4159
4160 ldr r0, blah
4161 nop
4162 cfmvsr mvf0, r0. */
4163
4164 /* Get Arm register number for ldr insn. */
4165 if (GET_CODE (lhs) == REG)
4166 arm_regno = REGNO (lhs);
4167 else if (GET_CODE (rhs) == REG)
4168 arm_regno = REGNO (rhs);
4169 else
4170 abort ();
4171
4172 /* Next insn. */
4173 first = next_nonnote_insn (first);
4174
4175 if (! arm_cirrus_insn_p (first))
4176 return;
4177
4178 body = PATTERN (first);
4179
4180 /* (float (blah)) is in parallel with a clobber. */
4181 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4182 body = XVECEXP (body, 0, 0);
4183
4184 if (GET_CODE (body) == FLOAT)
4185 body = XEXP (body, 0);
4186
4187 if (get_attr_cirrus (first) == CIRRUS_MOVE
4188 && GET_CODE (XEXP (body, 1)) == REG
4189 && arm_regno == REGNO (XEXP (body, 1)))
4190 emit_insn_after (gen_nop (), first);
4191
4192 return;
4193 }
4194 }
4195
4196 /* get_attr aborts on USE and CLOBBER. */
4197 if (!first
4198 || GET_CODE (first) != INSN
4199 || GET_CODE (PATTERN (first)) == USE
4200 || GET_CODE (PATTERN (first)) == CLOBBER)
4201 return;
4202
4203 attr = get_attr_cirrus (first);
4204
4205 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4206 must be followed by a non-coprocessor instruction. */
4207 if (attr == CIRRUS_COMPARE)
4208 {
4209 nops = 0;
4210
4211 t = next_nonnote_insn (first);
4212
4213 if (arm_cirrus_insn_p (t))
4214 ++ nops;
4215
4216 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4217 ++ nops;
4218
4219 while (nops --)
4220 emit_insn_after (gen_nop (), first);
4221
4222 return;
4223 }
4224 }
4225
4226 /* Return nonzero if OP is a constant power of two. */
4227 int
4228 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4229 {
4230 if (GET_CODE (op) == CONST_INT)
4231 {
4232 HOST_WIDE_INT value = INTVAL (op);
4233
4234 return value != 0 && (value & (value - 1)) == 0;
4235 }
4236
4237 return FALSE;
4238 }
4239
4240 /* Return TRUE for a valid operand of a DImode operation.
4241 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4242 Note that this disallows MEM(REG+REG), but allows
4243 MEM(PRE/POST_INC/DEC(REG)). */
4244 int
4245 di_operand (rtx op, enum machine_mode mode)
4246 {
4247 if (s_register_operand (op, mode))
4248 return TRUE;
4249
4250 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4251 return FALSE;
4252
4253 if (GET_CODE (op) == SUBREG)
4254 op = SUBREG_REG (op);
4255
4256 switch (GET_CODE (op))
4257 {
4258 case CONST_DOUBLE:
4259 case CONST_INT:
4260 return TRUE;
4261
4262 case MEM:
4263 return memory_address_p (DImode, XEXP (op, 0));
4264
4265 default:
4266 return FALSE;
4267 }
4268 }
4269
4270 /* Like di_operand, but don't accept constants. */
4271 int
4272 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4273 {
4274 if (s_register_operand (op, mode))
4275 return TRUE;
4276
4277 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4278 return FALSE;
4279
4280 if (GET_CODE (op) == SUBREG)
4281 op = SUBREG_REG (op);
4282
4283 if (GET_CODE (op) == MEM)
4284 return memory_address_p (DImode, XEXP (op, 0));
4285
4286 return FALSE;
4287 }
4288
4289 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
4290 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4291 Note that this disallows MEM(REG+REG), but allows
4292 MEM(PRE/POST_INC/DEC(REG)). */
4293 int
4294 soft_df_operand (rtx op, enum machine_mode mode)
4295 {
4296 if (s_register_operand (op, mode))
4297 return TRUE;
4298
4299 if (mode != VOIDmode && GET_MODE (op) != mode)
4300 return FALSE;
4301
4302 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4303 return FALSE;
4304
4305 if (GET_CODE (op) == SUBREG)
4306 op = SUBREG_REG (op);
4307
4308 switch (GET_CODE (op))
4309 {
4310 case CONST_DOUBLE:
4311 return TRUE;
4312
4313 case MEM:
4314 return memory_address_p (DFmode, XEXP (op, 0));
4315
4316 default:
4317 return FALSE;
4318 }
4319 }
4320
4321 /* Like soft_df_operand, but don't accept constants. */
4322 int
4323 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4324 {
4325 if (s_register_operand (op, mode))
4326 return TRUE;
4327
4328 if (mode != VOIDmode && GET_MODE (op) != mode)
4329 return FALSE;
4330
4331 if (GET_CODE (op) == SUBREG)
4332 op = SUBREG_REG (op);
4333
4334 if (GET_CODE (op) == MEM)
4335 return memory_address_p (DFmode, XEXP (op, 0));
4336 return FALSE;
4337 }
4338
4339 /* Return TRUE for valid index operands. */
4340 int
4341 index_operand (rtx op, enum machine_mode mode)
4342 {
4343 return (s_register_operand (op, mode)
4344 || (immediate_operand (op, mode)
4345 && (GET_CODE (op) != CONST_INT
4346 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4347 }
4348
4349 /* Return TRUE for valid shifts by a constant. This also accepts any
4350 power of two on the (somewhat overly relaxed) assumption that the
4351 shift operator in this case was a mult. */
4352 int
4353 const_shift_operand (rtx op, enum machine_mode mode)
4354 {
4355 return (power_of_two_operand (op, mode)
4356 || (immediate_operand (op, mode)
4357 && (GET_CODE (op) != CONST_INT
4358 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4359 }
4360
4361 /* Return TRUE for arithmetic operators which can be combined with a multiply
4362 (shift). */
4363 int
4364 shiftable_operator (rtx x, enum machine_mode mode)
4365 {
4366 enum rtx_code code;
4367
4368 if (GET_MODE (x) != mode)
4369 return FALSE;
4370
4371 code = GET_CODE (x);
4372
4373 return (code == PLUS || code == MINUS
4374 || code == IOR || code == XOR || code == AND);
4375 }
4376
4377 /* Return TRUE for binary logical operators. */
4378 int
4379 logical_binary_operator (rtx x, enum machine_mode mode)
4380 {
4381 enum rtx_code code;
4382
4383 if (GET_MODE (x) != mode)
4384 return FALSE;
4385
4386 code = GET_CODE (x);
4387
4388 return (code == IOR || code == XOR || code == AND);
4389 }
4390
4391 /* Return TRUE for shift operators. */
4392 int
4393 shift_operator (rtx x,enum machine_mode mode)
4394 {
4395 enum rtx_code code;
4396
4397 if (GET_MODE (x) != mode)
4398 return FALSE;
4399
4400 code = GET_CODE (x);
4401
4402 if (code == MULT)
4403 return power_of_two_operand (XEXP (x, 1), mode);
4404
4405 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
4406 || code == ROTATERT);
4407 }
4408
4409 /* Return TRUE if x is EQ or NE. */
4410 int
4411 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
4412 {
4413 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
4414 }
4415
4416 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
4417 int
4418 arm_comparison_operator (rtx x, enum machine_mode mode)
4419 {
4420 return (comparison_operator (x, mode)
4421 && GET_CODE (x) != LTGT
4422 && GET_CODE (x) != UNEQ);
4423 }
4424
4425 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
4426 int
4427 minmax_operator (rtx x, enum machine_mode mode)
4428 {
4429 enum rtx_code code = GET_CODE (x);
4430
4431 if (GET_MODE (x) != mode)
4432 return FALSE;
4433
4434 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
4435 }
4436
4437 /* Return TRUE if this is the condition code register, if we aren't given
4438 a mode, accept any class CCmode register. */
4439 int
4440 cc_register (rtx x, enum machine_mode mode)
4441 {
4442 if (mode == VOIDmode)
4443 {
4444 mode = GET_MODE (x);
4445
4446 if (GET_MODE_CLASS (mode) != MODE_CC)
4447 return FALSE;
4448 }
4449
4450 if ( GET_MODE (x) == mode
4451 && GET_CODE (x) == REG
4452 && REGNO (x) == CC_REGNUM)
4453 return TRUE;
4454
4455 return FALSE;
4456 }
4457
4458 /* Return TRUE if this is the condition code register, if we aren't given
4459 a mode, accept any class CCmode register which indicates a dominance
4460 expression. */
4461 int
4462 dominant_cc_register (rtx x, enum machine_mode mode)
4463 {
4464 if (mode == VOIDmode)
4465 {
4466 mode = GET_MODE (x);
4467
4468 if (GET_MODE_CLASS (mode) != MODE_CC)
4469 return FALSE;
4470 }
4471
4472 if (mode != CC_DNEmode && mode != CC_DEQmode
4473 && mode != CC_DLEmode && mode != CC_DLTmode
4474 && mode != CC_DGEmode && mode != CC_DGTmode
4475 && mode != CC_DLEUmode && mode != CC_DLTUmode
4476 && mode != CC_DGEUmode && mode != CC_DGTUmode)
4477 return FALSE;
4478
4479 return cc_register (x, mode);
4480 }
4481
4482 /* Return TRUE if X references a SYMBOL_REF. */
4483 int
4484 symbol_mentioned_p (rtx x)
4485 {
4486 const char * fmt;
4487 int i;
4488
4489 if (GET_CODE (x) == SYMBOL_REF)
4490 return 1;
4491
4492 fmt = GET_RTX_FORMAT (GET_CODE (x));
4493
4494 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4495 {
4496 if (fmt[i] == 'E')
4497 {
4498 int j;
4499
4500 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4501 if (symbol_mentioned_p (XVECEXP (x, i, j)))
4502 return 1;
4503 }
4504 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
4505 return 1;
4506 }
4507
4508 return 0;
4509 }
4510
4511 /* Return TRUE if X references a LABEL_REF. */
4512 int
4513 label_mentioned_p (rtx x)
4514 {
4515 const char * fmt;
4516 int i;
4517
4518 if (GET_CODE (x) == LABEL_REF)
4519 return 1;
4520
4521 fmt = GET_RTX_FORMAT (GET_CODE (x));
4522 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4523 {
4524 if (fmt[i] == 'E')
4525 {
4526 int j;
4527
4528 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4529 if (label_mentioned_p (XVECEXP (x, i, j)))
4530 return 1;
4531 }
4532 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
4533 return 1;
4534 }
4535
4536 return 0;
4537 }
4538
4539 enum rtx_code
4540 minmax_code (rtx x)
4541 {
4542 enum rtx_code code = GET_CODE (x);
4543
4544 if (code == SMAX)
4545 return GE;
4546 else if (code == SMIN)
4547 return LE;
4548 else if (code == UMIN)
4549 return LEU;
4550 else if (code == UMAX)
4551 return GEU;
4552
4553 abort ();
4554 }
4555
4556 /* Return 1 if memory locations are adjacent. */
4557 int
4558 adjacent_mem_locations (rtx a, rtx b)
4559 {
4560 if ((GET_CODE (XEXP (a, 0)) == REG
4561 || (GET_CODE (XEXP (a, 0)) == PLUS
4562 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
4563 && (GET_CODE (XEXP (b, 0)) == REG
4564 || (GET_CODE (XEXP (b, 0)) == PLUS
4565 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
4566 {
4567 int val0 = 0, val1 = 0;
4568 int reg0, reg1;
4569
4570 if (GET_CODE (XEXP (a, 0)) == PLUS)
4571 {
4572 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
4573 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
4574 }
4575 else
4576 reg0 = REGNO (XEXP (a, 0));
4577
4578 if (GET_CODE (XEXP (b, 0)) == PLUS)
4579 {
4580 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
4581 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
4582 }
4583 else
4584 reg1 = REGNO (XEXP (b, 0));
4585
4586 /* Don't accept any offset that will require multiple
4587 instructions to handle, since this would cause the
4588 arith_adjacentmem pattern to output an overlong sequence. */
4589 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
4590 return 0;
4591
4592 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
4593 }
4594 return 0;
4595 }
4596
4597 /* Return 1 if OP is a load multiple operation. It is known to be
4598 parallel and the first section will be tested. */
4599 int
4600 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4601 {
4602 HOST_WIDE_INT count = XVECLEN (op, 0);
4603 int dest_regno;
4604 rtx src_addr;
4605 HOST_WIDE_INT i = 1, base = 0;
4606 rtx elt;
4607
4608 if (count <= 1
4609 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
4610 return 0;
4611
4612 /* Check to see if this might be a write-back. */
4613 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
4614 {
4615 i++;
4616 base = 1;
4617
4618 /* Now check it more carefully. */
4619 if (GET_CODE (SET_DEST (elt)) != REG
4620 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
4621 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
4622 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
4623 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
4624 return 0;
4625 }
4626
4627 /* Perform a quick check so we don't blow up below. */
4628 if (count <= i
4629 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
4630 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
4631 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
4632 return 0;
4633
4634 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
4635 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
4636
4637 for (; i < count; i++)
4638 {
4639 elt = XVECEXP (op, 0, i);
4640
4641 if (GET_CODE (elt) != SET
4642 || GET_CODE (SET_DEST (elt)) != REG
4643 || GET_MODE (SET_DEST (elt)) != SImode
4644 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
4645 || GET_CODE (SET_SRC (elt)) != MEM
4646 || GET_MODE (SET_SRC (elt)) != SImode
4647 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
4648 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
4649 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
4650 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
4651 return 0;
4652 }
4653
4654 return 1;
4655 }
4656
4657 /* Return 1 if OP is a store multiple operation. It is known to be
4658 parallel and the first section will be tested. */
4659 int
4660 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4661 {
4662 HOST_WIDE_INT count = XVECLEN (op, 0);
4663 int src_regno;
4664 rtx dest_addr;
4665 HOST_WIDE_INT i = 1, base = 0;
4666 rtx elt;
4667
4668 if (count <= 1
4669 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
4670 return 0;
4671
4672 /* Check to see if this might be a write-back. */
4673 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
4674 {
4675 i++;
4676 base = 1;
4677
4678 /* Now check it more carefully. */
4679 if (GET_CODE (SET_DEST (elt)) != REG
4680 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
4681 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
4682 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
4683 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
4684 return 0;
4685 }
4686
4687 /* Perform a quick check so we don't blow up below. */
4688 if (count <= i
4689 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
4690 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
4691 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
4692 return 0;
4693
4694 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
4695 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
4696
4697 for (; i < count; i++)
4698 {
4699 elt = XVECEXP (op, 0, i);
4700
4701 if (GET_CODE (elt) != SET
4702 || GET_CODE (SET_SRC (elt)) != REG
4703 || GET_MODE (SET_SRC (elt)) != SImode
4704 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
4705 || GET_CODE (SET_DEST (elt)) != MEM
4706 || GET_MODE (SET_DEST (elt)) != SImode
4707 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
4708 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
4709 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
4710 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
4711 return 0;
4712 }
4713
4714 return 1;
4715 }
4716
4717 int
4718 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
4719 HOST_WIDE_INT *load_offset)
4720 {
4721 int unsorted_regs[4];
4722 HOST_WIDE_INT unsorted_offsets[4];
4723 int order[4];
4724 int base_reg = -1;
4725 int i;
4726
4727 /* Can only handle 2, 3, or 4 insns at present,
4728 though could be easily extended if required. */
4729 if (nops < 2 || nops > 4)
4730 abort ();
4731
4732 /* Loop over the operands and check that the memory references are
4733 suitable (ie immediate offsets from the same base register). At
4734 the same time, extract the target register, and the memory
4735 offsets. */
4736 for (i = 0; i < nops; i++)
4737 {
4738 rtx reg;
4739 rtx offset;
4740
4741 /* Convert a subreg of a mem into the mem itself. */
4742 if (GET_CODE (operands[nops + i]) == SUBREG)
4743 operands[nops + i] = alter_subreg (operands + (nops + i));
4744
4745 if (GET_CODE (operands[nops + i]) != MEM)
4746 abort ();
4747
4748 /* Don't reorder volatile memory references; it doesn't seem worth
4749 looking for the case where the order is ok anyway. */
4750 if (MEM_VOLATILE_P (operands[nops + i]))
4751 return 0;
4752
4753 offset = const0_rtx;
4754
4755 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4756 || (GET_CODE (reg) == SUBREG
4757 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4758 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4759 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4760 == REG)
4761 || (GET_CODE (reg) == SUBREG
4762 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4763 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4764 == CONST_INT)))
4765 {
4766 if (i == 0)
4767 {
4768 base_reg = REGNO (reg);
4769 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4770 ? REGNO (operands[i])
4771 : REGNO (SUBREG_REG (operands[i])));
4772 order[0] = 0;
4773 }
4774 else
4775 {
4776 if (base_reg != (int) REGNO (reg))
4777 /* Not addressed from the same base register. */
4778 return 0;
4779
4780 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
4781 ? REGNO (operands[i])
4782 : REGNO (SUBREG_REG (operands[i])));
4783 if (unsorted_regs[i] < unsorted_regs[order[0]])
4784 order[0] = i;
4785 }
4786
4787 /* If it isn't an integer register, or if it overwrites the
4788 base register but isn't the last insn in the list, then
4789 we can't do this. */
4790 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
4791 || (i != nops - 1 && unsorted_regs[i] == base_reg))
4792 return 0;
4793
4794 unsorted_offsets[i] = INTVAL (offset);
4795 }
4796 else
4797 /* Not a suitable memory address. */
4798 return 0;
4799 }
4800
4801 /* All the useful information has now been extracted from the
4802 operands into unsorted_regs and unsorted_offsets; additionally,
4803 order[0] has been set to the lowest numbered register in the
4804 list. Sort the registers into order, and check that the memory
4805 offsets are ascending and adjacent. */
4806
4807 for (i = 1; i < nops; i++)
4808 {
4809 int j;
4810
4811 order[i] = order[i - 1];
4812 for (j = 0; j < nops; j++)
4813 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4814 && (order[i] == order[i - 1]
4815 || unsorted_regs[j] < unsorted_regs[order[i]]))
4816 order[i] = j;
4817
4818 /* Have we found a suitable register? if not, one must be used more
4819 than once. */
4820 if (order[i] == order[i - 1])
4821 return 0;
4822
4823 /* Is the memory address adjacent and ascending? */
4824 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4825 return 0;
4826 }
4827
4828 if (base)
4829 {
4830 *base = base_reg;
4831
4832 for (i = 0; i < nops; i++)
4833 regs[i] = unsorted_regs[order[i]];
4834
4835 *load_offset = unsorted_offsets[order[0]];
4836 }
4837
4838 if (unsorted_offsets[order[0]] == 0)
4839 return 1; /* ldmia */
4840
4841 if (unsorted_offsets[order[0]] == 4)
4842 return 2; /* ldmib */
4843
4844 if (unsorted_offsets[order[nops - 1]] == 0)
4845 return 3; /* ldmda */
4846
4847 if (unsorted_offsets[order[nops - 1]] == -4)
4848 return 4; /* ldmdb */
4849
4850 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
4851 if the offset isn't small enough. The reason 2 ldrs are faster
4852 is because these ARMs are able to do more than one cache access
4853 in a single cycle. The ARM9 and StrongARM have Harvard caches,
4854 whilst the ARM8 has a double bandwidth cache. This means that
4855 these cores can do both an instruction fetch and a data fetch in
4856 a single cycle, so the trick of calculating the address into a
4857 scratch register (one of the result regs) and then doing a load
4858 multiple actually becomes slower (and no smaller in code size).
4859 That is the transformation
4860
4861 ldr rd1, [rbase + offset]
4862 ldr rd2, [rbase + offset + 4]
4863
4864 to
4865
4866 add rd1, rbase, offset
4867 ldmia rd1, {rd1, rd2}
4868
4869 produces worse code -- '3 cycles + any stalls on rd2' instead of
4870 '2 cycles + any stalls on rd2'. On ARMs with only one cache
4871 access per cycle, the first sequence could never complete in less
4872 than 6 cycles, whereas the ldm sequence would only take 5 and
4873 would make better use of sequential accesses if not hitting the
4874 cache.
4875
4876 We cheat here and test 'arm_ld_sched' which we currently know to
4877 only be true for the ARM8, ARM9 and StrongARM. If this ever
4878 changes, then the test below needs to be reworked. */
4879 if (nops == 2 && arm_ld_sched)
4880 return 0;
4881
4882 /* Can't do it without setting up the offset, only do this if it takes
4883 no more than one insn. */
4884 return (const_ok_for_arm (unsorted_offsets[order[0]])
4885 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
4886 }
4887
4888 const char *
4889 emit_ldm_seq (rtx *operands, int nops)
4890 {
4891 int regs[4];
4892 int base_reg;
4893 HOST_WIDE_INT offset;
4894 char buf[100];
4895 int i;
4896
4897 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4898 {
4899 case 1:
4900 strcpy (buf, "ldm%?ia\t");
4901 break;
4902
4903 case 2:
4904 strcpy (buf, "ldm%?ib\t");
4905 break;
4906
4907 case 3:
4908 strcpy (buf, "ldm%?da\t");
4909 break;
4910
4911 case 4:
4912 strcpy (buf, "ldm%?db\t");
4913 break;
4914
4915 case 5:
4916 if (offset >= 0)
4917 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4918 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4919 (long) offset);
4920 else
4921 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4922 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4923 (long) -offset);
4924 output_asm_insn (buf, operands);
4925 base_reg = regs[0];
4926 strcpy (buf, "ldm%?ia\t");
4927 break;
4928
4929 default:
4930 abort ();
4931 }
4932
4933 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4934 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4935
4936 for (i = 1; i < nops; i++)
4937 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4938 reg_names[regs[i]]);
4939
4940 strcat (buf, "}\t%@ phole ldm");
4941
4942 output_asm_insn (buf, operands);
4943 return "";
4944 }
4945
4946 int
4947 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
4948 HOST_WIDE_INT * load_offset)
4949 {
4950 int unsorted_regs[4];
4951 HOST_WIDE_INT unsorted_offsets[4];
4952 int order[4];
4953 int base_reg = -1;
4954 int i;
4955
4956 /* Can only handle 2, 3, or 4 insns at present, though could be easily
4957 extended if required. */
4958 if (nops < 2 || nops > 4)
4959 abort ();
4960
4961 /* Loop over the operands and check that the memory references are
4962 suitable (ie immediate offsets from the same base register). At
4963 the same time, extract the target register, and the memory
4964 offsets. */
4965 for (i = 0; i < nops; i++)
4966 {
4967 rtx reg;
4968 rtx offset;
4969
4970 /* Convert a subreg of a mem into the mem itself. */
4971 if (GET_CODE (operands[nops + i]) == SUBREG)
4972 operands[nops + i] = alter_subreg (operands + (nops + i));
4973
4974 if (GET_CODE (operands[nops + i]) != MEM)
4975 abort ();
4976
4977 /* Don't reorder volatile memory references; it doesn't seem worth
4978 looking for the case where the order is ok anyway. */
4979 if (MEM_VOLATILE_P (operands[nops + i]))
4980 return 0;
4981
4982 offset = const0_rtx;
4983
4984 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4985 || (GET_CODE (reg) == SUBREG
4986 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4987 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4988 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4989 == REG)
4990 || (GET_CODE (reg) == SUBREG
4991 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4992 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4993 == CONST_INT)))
4994 {
4995 if (i == 0)
4996 {
4997 base_reg = REGNO (reg);
4998 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4999 ? REGNO (operands[i])
5000 : REGNO (SUBREG_REG (operands[i])));
5001 order[0] = 0;
5002 }
5003 else
5004 {
5005 if (base_reg != (int) REGNO (reg))
5006 /* Not addressed from the same base register. */
5007 return 0;
5008
5009 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5010 ? REGNO (operands[i])
5011 : REGNO (SUBREG_REG (operands[i])));
5012 if (unsorted_regs[i] < unsorted_regs[order[0]])
5013 order[0] = i;
5014 }
5015
5016 /* If it isn't an integer register, then we can't do this. */
5017 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5018 return 0;
5019
5020 unsorted_offsets[i] = INTVAL (offset);
5021 }
5022 else
5023 /* Not a suitable memory address. */
5024 return 0;
5025 }
5026
5027 /* All the useful information has now been extracted from the
5028 operands into unsorted_regs and unsorted_offsets; additionally,
5029 order[0] has been set to the lowest numbered register in the
5030 list. Sort the registers into order, and check that the memory
5031 offsets are ascending and adjacent. */
5032
5033 for (i = 1; i < nops; i++)
5034 {
5035 int j;
5036
5037 order[i] = order[i - 1];
5038 for (j = 0; j < nops; j++)
5039 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5040 && (order[i] == order[i - 1]
5041 || unsorted_regs[j] < unsorted_regs[order[i]]))
5042 order[i] = j;
5043
5044 /* Have we found a suitable register? if not, one must be used more
5045 than once. */
5046 if (order[i] == order[i - 1])
5047 return 0;
5048
5049 /* Is the memory address adjacent and ascending? */
5050 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5051 return 0;
5052 }
5053
5054 if (base)
5055 {
5056 *base = base_reg;
5057
5058 for (i = 0; i < nops; i++)
5059 regs[i] = unsorted_regs[order[i]];
5060
5061 *load_offset = unsorted_offsets[order[0]];
5062 }
5063
5064 if (unsorted_offsets[order[0]] == 0)
5065 return 1; /* stmia */
5066
5067 if (unsorted_offsets[order[0]] == 4)
5068 return 2; /* stmib */
5069
5070 if (unsorted_offsets[order[nops - 1]] == 0)
5071 return 3; /* stmda */
5072
5073 if (unsorted_offsets[order[nops - 1]] == -4)
5074 return 4; /* stmdb */
5075
5076 return 0;
5077 }
5078
5079 const char *
5080 emit_stm_seq (rtx *operands, int nops)
5081 {
5082 int regs[4];
5083 int base_reg;
5084 HOST_WIDE_INT offset;
5085 char buf[100];
5086 int i;
5087
5088 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5089 {
5090 case 1:
5091 strcpy (buf, "stm%?ia\t");
5092 break;
5093
5094 case 2:
5095 strcpy (buf, "stm%?ib\t");
5096 break;
5097
5098 case 3:
5099 strcpy (buf, "stm%?da\t");
5100 break;
5101
5102 case 4:
5103 strcpy (buf, "stm%?db\t");
5104 break;
5105
5106 default:
5107 abort ();
5108 }
5109
5110 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5111 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5112
5113 for (i = 1; i < nops; i++)
5114 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5115 reg_names[regs[i]]);
5116
5117 strcat (buf, "}\t%@ phole stm");
5118
5119 output_asm_insn (buf, operands);
5120 return "";
5121 }
5122
5123 int
5124 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5125 {
5126 if (GET_CODE (op) != PARALLEL
5127 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5128 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5129 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5130 return 0;
5131
5132 return 1;
5133 }
5134 \f
5135 /* Routines for use in generating RTL. */
5136
5137 rtx
5138 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5139 int write_back, int unchanging_p, int in_struct_p,
5140 int scalar_p)
5141 {
5142 int i = 0, j;
5143 rtx result;
5144 int sign = up ? 1 : -1;
5145 rtx mem;
5146
5147 /* XScale has load-store double instructions, but they have stricter
5148 alignment requirements than load-store multiple, so we can not
5149 use them.
5150
5151 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5152 the pipeline until completion.
5153
5154 NREGS CYCLES
5155 1 3
5156 2 4
5157 3 5
5158 4 6
5159
5160 An ldr instruction takes 1-3 cycles, but does not block the
5161 pipeline.
5162
5163 NREGS CYCLES
5164 1 1-3
5165 2 2-6
5166 3 3-9
5167 4 4-12
5168
5169 Best case ldr will always win. However, the more ldr instructions
5170 we issue, the less likely we are to be able to schedule them well.
5171 Using ldr instructions also increases code size.
5172
5173 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5174 for counts of 3 or 4 regs. */
5175 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5176 {
5177 rtx seq;
5178
5179 start_sequence ();
5180
5181 for (i = 0; i < count; i++)
5182 {
5183 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5184 RTX_UNCHANGING_P (mem) = unchanging_p;
5185 MEM_IN_STRUCT_P (mem) = in_struct_p;
5186 MEM_SCALAR_P (mem) = scalar_p;
5187 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5188 }
5189
5190 if (write_back)
5191 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5192
5193 seq = get_insns ();
5194 end_sequence ();
5195
5196 return seq;
5197 }
5198
5199 result = gen_rtx_PARALLEL (VOIDmode,
5200 rtvec_alloc (count + (write_back ? 1 : 0)));
5201 if (write_back)
5202 {
5203 XVECEXP (result, 0, 0)
5204 = gen_rtx_SET (GET_MODE (from), from,
5205 plus_constant (from, count * 4 * sign));
5206 i = 1;
5207 count++;
5208 }
5209
5210 for (j = 0; i < count; i++, j++)
5211 {
5212 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5213 RTX_UNCHANGING_P (mem) = unchanging_p;
5214 MEM_IN_STRUCT_P (mem) = in_struct_p;
5215 MEM_SCALAR_P (mem) = scalar_p;
5216 XVECEXP (result, 0, i)
5217 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5218 }
5219
5220 return result;
5221 }
5222
5223 rtx
5224 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5225 int write_back, int unchanging_p, int in_struct_p,
5226 int scalar_p)
5227 {
5228 int i = 0, j;
5229 rtx result;
5230 int sign = up ? 1 : -1;
5231 rtx mem;
5232
5233 /* See arm_gen_load_multiple for discussion of
5234 the pros/cons of ldm/stm usage for XScale. */
5235 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5236 {
5237 rtx seq;
5238
5239 start_sequence ();
5240
5241 for (i = 0; i < count; i++)
5242 {
5243 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5244 RTX_UNCHANGING_P (mem) = unchanging_p;
5245 MEM_IN_STRUCT_P (mem) = in_struct_p;
5246 MEM_SCALAR_P (mem) = scalar_p;
5247 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5248 }
5249
5250 if (write_back)
5251 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5252
5253 seq = get_insns ();
5254 end_sequence ();
5255
5256 return seq;
5257 }
5258
5259 result = gen_rtx_PARALLEL (VOIDmode,
5260 rtvec_alloc (count + (write_back ? 1 : 0)));
5261 if (write_back)
5262 {
5263 XVECEXP (result, 0, 0)
5264 = gen_rtx_SET (GET_MODE (to), to,
5265 plus_constant (to, count * 4 * sign));
5266 i = 1;
5267 count++;
5268 }
5269
5270 for (j = 0; i < count; i++, j++)
5271 {
5272 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5273 RTX_UNCHANGING_P (mem) = unchanging_p;
5274 MEM_IN_STRUCT_P (mem) = in_struct_p;
5275 MEM_SCALAR_P (mem) = scalar_p;
5276
5277 XVECEXP (result, 0, i)
5278 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5279 }
5280
5281 return result;
5282 }
5283
5284 int
5285 arm_gen_movstrqi (rtx *operands)
5286 {
5287 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5288 int i;
5289 rtx src, dst;
5290 rtx st_src, st_dst, fin_src, fin_dst;
5291 rtx part_bytes_reg = NULL;
5292 rtx mem;
5293 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5294 int dst_scalar_p, src_scalar_p;
5295
5296 if (GET_CODE (operands[2]) != CONST_INT
5297 || GET_CODE (operands[3]) != CONST_INT
5298 || INTVAL (operands[2]) > 64
5299 || INTVAL (operands[3]) & 3)
5300 return 0;
5301
5302 st_dst = XEXP (operands[0], 0);
5303 st_src = XEXP (operands[1], 0);
5304
5305 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5306 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5307 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5308 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5309 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5310 src_scalar_p = MEM_SCALAR_P (operands[1]);
5311
5312 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5313 fin_src = src = copy_to_mode_reg (SImode, st_src);
5314
5315 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5316 out_words_to_go = INTVAL (operands[2]) / 4;
5317 last_bytes = INTVAL (operands[2]) & 3;
5318
5319 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5320 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5321
5322 for (i = 0; in_words_to_go >= 2; i+=4)
5323 {
5324 if (in_words_to_go > 4)
5325 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5326 src_unchanging_p,
5327 src_in_struct_p,
5328 src_scalar_p));
5329 else
5330 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5331 FALSE, src_unchanging_p,
5332 src_in_struct_p, src_scalar_p));
5333
5334 if (out_words_to_go)
5335 {
5336 if (out_words_to_go > 4)
5337 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5338 dst_unchanging_p,
5339 dst_in_struct_p,
5340 dst_scalar_p));
5341 else if (out_words_to_go != 1)
5342 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5343 dst, TRUE,
5344 (last_bytes == 0
5345 ? FALSE : TRUE),
5346 dst_unchanging_p,
5347 dst_in_struct_p,
5348 dst_scalar_p));
5349 else
5350 {
5351 mem = gen_rtx_MEM (SImode, dst);
5352 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5353 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5354 MEM_SCALAR_P (mem) = dst_scalar_p;
5355 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5356 if (last_bytes != 0)
5357 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5358 }
5359 }
5360
5361 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5362 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5363 }
5364
5365 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5366 if (out_words_to_go)
5367 {
5368 rtx sreg;
5369
5370 mem = gen_rtx_MEM (SImode, src);
5371 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5372 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5373 MEM_SCALAR_P (mem) = src_scalar_p;
5374 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5375 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5376
5377 mem = gen_rtx_MEM (SImode, dst);
5378 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5379 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5380 MEM_SCALAR_P (mem) = dst_scalar_p;
5381 emit_move_insn (mem, sreg);
5382 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
5383 in_words_to_go--;
5384
5385 if (in_words_to_go) /* Sanity check */
5386 abort ();
5387 }
5388
5389 if (in_words_to_go)
5390 {
5391 if (in_words_to_go < 0)
5392 abort ();
5393
5394 mem = gen_rtx_MEM (SImode, src);
5395 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5396 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5397 MEM_SCALAR_P (mem) = src_scalar_p;
5398 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5399 }
5400
5401 if (last_bytes && part_bytes_reg == NULL)
5402 abort ();
5403
5404 if (BYTES_BIG_ENDIAN && last_bytes)
5405 {
5406 rtx tmp = gen_reg_rtx (SImode);
5407
5408 /* The bytes we want are in the top end of the word. */
5409 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5410 GEN_INT (8 * (4 - last_bytes))));
5411 part_bytes_reg = tmp;
5412
5413 while (last_bytes)
5414 {
5415 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
5416 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5417 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5418 MEM_SCALAR_P (mem) = dst_scalar_p;
5419 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5420
5421 if (--last_bytes)
5422 {
5423 tmp = gen_reg_rtx (SImode);
5424 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5425 part_bytes_reg = tmp;
5426 }
5427 }
5428
5429 }
5430 else
5431 {
5432 if (last_bytes > 1)
5433 {
5434 mem = gen_rtx_MEM (HImode, dst);
5435 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5436 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5437 MEM_SCALAR_P (mem) = dst_scalar_p;
5438 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5439 last_bytes -= 2;
5440 if (last_bytes)
5441 {
5442 rtx tmp = gen_reg_rtx (SImode);
5443
5444 emit_insn (gen_addsi3 (dst, dst, GEN_INT (2)));
5445 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5446 part_bytes_reg = tmp;
5447 }
5448 }
5449
5450 if (last_bytes)
5451 {
5452 mem = gen_rtx_MEM (QImode, dst);
5453 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5454 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5455 MEM_SCALAR_P (mem) = dst_scalar_p;
5456 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5457 }
5458 }
5459
5460 return 1;
5461 }
5462
5463 /* Generate a memory reference for a half word, such that it will be loaded
5464 into the top 16 bits of the word. We can assume that the address is
5465 known to be alignable and of the form reg, or plus (reg, const). */
5466
5467 rtx
5468 arm_gen_rotated_half_load (rtx memref)
5469 {
5470 HOST_WIDE_INT offset = 0;
5471 rtx base = XEXP (memref, 0);
5472
5473 if (GET_CODE (base) == PLUS)
5474 {
5475 offset = INTVAL (XEXP (base, 1));
5476 base = XEXP (base, 0);
5477 }
5478
5479 /* If we aren't allowed to generate unaligned addresses, then fail. */
5480 if (TARGET_MMU_TRAPS
5481 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
5482 return NULL;
5483
5484 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5485
5486 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5487 return base;
5488
5489 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5490 }
5491
5492 /* Select a dominance comparison mode if possible for a test of the general
5493 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5494 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5495 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5496 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5497 In all cases OP will be either EQ or NE, but we don't need to know which
5498 here. If we are unable to support a dominance comparison we return
5499 CC mode. This will then fail to match for the RTL expressions that
5500 generate this call. */
5501 enum machine_mode
5502 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5503 {
5504 enum rtx_code cond1, cond2;
5505 int swapped = 0;
5506
5507 /* Currently we will probably get the wrong result if the individual
5508 comparisons are not simple. This also ensures that it is safe to
5509 reverse a comparison if necessary. */
5510 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5511 != CCmode)
5512 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5513 != CCmode))
5514 return CCmode;
5515
5516 /* The if_then_else variant of this tests the second condition if the
5517 first passes, but is true if the first fails. Reverse the first
5518 condition to get a true "inclusive-or" expression. */
5519 if (cond_or == DOM_CC_NX_OR_Y)
5520 cond1 = reverse_condition (cond1);
5521
5522 /* If the comparisons are not equal, and one doesn't dominate the other,
5523 then we can't do this. */
5524 if (cond1 != cond2
5525 && !comparison_dominates_p (cond1, cond2)
5526 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5527 return CCmode;
5528
5529 if (swapped)
5530 {
5531 enum rtx_code temp = cond1;
5532 cond1 = cond2;
5533 cond2 = temp;
5534 }
5535
5536 switch (cond1)
5537 {
5538 case EQ:
5539 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5540 return CC_DEQmode;
5541
5542 switch (cond2)
5543 {
5544 case LE: return CC_DLEmode;
5545 case LEU: return CC_DLEUmode;
5546 case GE: return CC_DGEmode;
5547 case GEU: return CC_DGEUmode;
5548 default: break;
5549 }
5550
5551 break;
5552
5553 case LT:
5554 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5555 return CC_DLTmode;
5556 if (cond2 == LE)
5557 return CC_DLEmode;
5558 if (cond2 == NE)
5559 return CC_DNEmode;
5560 break;
5561
5562 case GT:
5563 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5564 return CC_DGTmode;
5565 if (cond2 == GE)
5566 return CC_DGEmode;
5567 if (cond2 == NE)
5568 return CC_DNEmode;
5569 break;
5570
5571 case LTU:
5572 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5573 return CC_DLTUmode;
5574 if (cond2 == LEU)
5575 return CC_DLEUmode;
5576 if (cond2 == NE)
5577 return CC_DNEmode;
5578 break;
5579
5580 case GTU:
5581 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5582 return CC_DGTUmode;
5583 if (cond2 == GEU)
5584 return CC_DGEUmode;
5585 if (cond2 == NE)
5586 return CC_DNEmode;
5587 break;
5588
5589 /* The remaining cases only occur when both comparisons are the
5590 same. */
5591 case NE:
5592 return CC_DNEmode;
5593
5594 case LE:
5595 return CC_DLEmode;
5596
5597 case GE:
5598 return CC_DGEmode;
5599
5600 case LEU:
5601 return CC_DLEUmode;
5602
5603 case GEU:
5604 return CC_DGEUmode;
5605
5606 default:
5607 break;
5608 }
5609
5610 abort ();
5611 }
5612
5613 enum machine_mode
5614 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
5615 {
5616 /* All floating point compares return CCFP if it is an equality
5617 comparison, and CCFPE otherwise. */
5618 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5619 {
5620 switch (op)
5621 {
5622 case EQ:
5623 case NE:
5624 case UNORDERED:
5625 case ORDERED:
5626 case UNLT:
5627 case UNLE:
5628 case UNGT:
5629 case UNGE:
5630 case UNEQ:
5631 case LTGT:
5632 return CCFPmode;
5633
5634 case LT:
5635 case LE:
5636 case GT:
5637 case GE:
5638 if (TARGET_CIRRUS)
5639 return CCFPmode;
5640 return CCFPEmode;
5641
5642 default:
5643 abort ();
5644 }
5645 }
5646
5647 /* A compare with a shifted operand. Because of canonicalization, the
5648 comparison will have to be swapped when we emit the assembler. */
5649 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
5650 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5651 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
5652 || GET_CODE (x) == ROTATERT))
5653 return CC_SWPmode;
5654
5655 /* This is a special case that is used by combine to allow a
5656 comparison of a shifted byte load to be split into a zero-extend
5657 followed by a comparison of the shifted integer (only valid for
5658 equalities and unsigned inequalities). */
5659 if (GET_MODE (x) == SImode
5660 && GET_CODE (x) == ASHIFT
5661 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
5662 && GET_CODE (XEXP (x, 0)) == SUBREG
5663 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
5664 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
5665 && (op == EQ || op == NE
5666 || op == GEU || op == GTU || op == LTU || op == LEU)
5667 && GET_CODE (y) == CONST_INT)
5668 return CC_Zmode;
5669
5670 /* A construct for a conditional compare, if the false arm contains
5671 0, then both conditions must be true, otherwise either condition
5672 must be true. Not all conditions are possible, so CCmode is
5673 returned if it can't be done. */
5674 if (GET_CODE (x) == IF_THEN_ELSE
5675 && (XEXP (x, 2) == const0_rtx
5676 || XEXP (x, 2) == const1_rtx)
5677 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5678 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5679 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5680 INTVAL (XEXP (x, 2)));
5681
5682 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
5683 if (GET_CODE (x) == AND
5684 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5685 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5686 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5687 DOM_CC_X_AND_Y);
5688
5689 if (GET_CODE (x) == IOR
5690 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5691 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5692 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5693 DOM_CC_X_OR_Y);
5694
5695 /* An operation (on Thumb) where we want to test for a single bit.
5696 This is done by shifting that bit up into the top bit of a
5697 scratch register; we can then branch on the sign bit. */
5698 if (TARGET_THUMB
5699 && GET_MODE (x) == SImode
5700 && (op == EQ || op == NE)
5701 && (GET_CODE (x) == ZERO_EXTRACT))
5702 return CC_Nmode;
5703
5704 /* An operation that sets the condition codes as a side-effect, the
5705 V flag is not set correctly, so we can only use comparisons where
5706 this doesn't matter. (For LT and GE we can use "mi" and "pl"
5707 instead.) */
5708 if (GET_MODE (x) == SImode
5709 && y == const0_rtx
5710 && (op == EQ || op == NE || op == LT || op == GE)
5711 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
5712 || GET_CODE (x) == AND || GET_CODE (x) == IOR
5713 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
5714 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
5715 || GET_CODE (x) == LSHIFTRT
5716 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5717 || GET_CODE (x) == ROTATERT
5718 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
5719 return CC_NOOVmode;
5720
5721 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
5722 return CC_Zmode;
5723
5724 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
5725 && GET_CODE (x) == PLUS
5726 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
5727 return CC_Cmode;
5728
5729 return CCmode;
5730 }
5731
5732 /* X and Y are two things to compare using CODE. Emit the compare insn and
5733 return the rtx for register 0 in the proper mode. FP means this is a
5734 floating point compare: I don't think that it is needed on the arm. */
5735 rtx
5736 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
5737 {
5738 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
5739 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
5740
5741 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
5742 gen_rtx_COMPARE (mode, x, y)));
5743
5744 return cc_reg;
5745 }
5746
5747 /* Generate a sequence of insns that will generate the correct return
5748 address mask depending on the physical architecture that the program
5749 is running on. */
5750 rtx
5751 arm_gen_return_addr_mask (void)
5752 {
5753 rtx reg = gen_reg_rtx (Pmode);
5754
5755 emit_insn (gen_return_addr_mask (reg));
5756 return reg;
5757 }
5758
5759 void
5760 arm_reload_in_hi (rtx *operands)
5761 {
5762 rtx ref = operands[1];
5763 rtx base, scratch;
5764 HOST_WIDE_INT offset = 0;
5765
5766 if (GET_CODE (ref) == SUBREG)
5767 {
5768 offset = SUBREG_BYTE (ref);
5769 ref = SUBREG_REG (ref);
5770 }
5771
5772 if (GET_CODE (ref) == REG)
5773 {
5774 /* We have a pseudo which has been spilt onto the stack; there
5775 are two cases here: the first where there is a simple
5776 stack-slot replacement and a second where the stack-slot is
5777 out of range, or is used as a subreg. */
5778 if (reg_equiv_mem[REGNO (ref)])
5779 {
5780 ref = reg_equiv_mem[REGNO (ref)];
5781 base = find_replacement (&XEXP (ref, 0));
5782 }
5783 else
5784 /* The slot is out of range, or was dressed up in a SUBREG. */
5785 base = reg_equiv_address[REGNO (ref)];
5786 }
5787 else
5788 base = find_replacement (&XEXP (ref, 0));
5789
5790 /* Handle the case where the address is too complex to be offset by 1. */
5791 if (GET_CODE (base) == MINUS
5792 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5793 {
5794 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5795
5796 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5797 base = base_plus;
5798 }
5799 else if (GET_CODE (base) == PLUS)
5800 {
5801 /* The addend must be CONST_INT, or we would have dealt with it above. */
5802 HOST_WIDE_INT hi, lo;
5803
5804 offset += INTVAL (XEXP (base, 1));
5805 base = XEXP (base, 0);
5806
5807 /* Rework the address into a legal sequence of insns. */
5808 /* Valid range for lo is -4095 -> 4095 */
5809 lo = (offset >= 0
5810 ? (offset & 0xfff)
5811 : -((-offset) & 0xfff));
5812
5813 /* Corner case, if lo is the max offset then we would be out of range
5814 once we have added the additional 1 below, so bump the msb into the
5815 pre-loading insn(s). */
5816 if (lo == 4095)
5817 lo &= 0x7ff;
5818
5819 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5820 ^ (HOST_WIDE_INT) 0x80000000)
5821 - (HOST_WIDE_INT) 0x80000000);
5822
5823 if (hi + lo != offset)
5824 abort ();
5825
5826 if (hi != 0)
5827 {
5828 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5829
5830 /* Get the base address; addsi3 knows how to handle constants
5831 that require more than one insn. */
5832 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5833 base = base_plus;
5834 offset = lo;
5835 }
5836 }
5837
5838 /* Operands[2] may overlap operands[0] (though it won't overlap
5839 operands[1]), that's why we asked for a DImode reg -- so we can
5840 use the bit that does not overlap. */
5841 if (REGNO (operands[2]) == REGNO (operands[0]))
5842 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5843 else
5844 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5845
5846 emit_insn (gen_zero_extendqisi2 (scratch,
5847 gen_rtx_MEM (QImode,
5848 plus_constant (base,
5849 offset))));
5850 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
5851 gen_rtx_MEM (QImode,
5852 plus_constant (base,
5853 offset + 1))));
5854 if (!BYTES_BIG_ENDIAN)
5855 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5856 gen_rtx_IOR (SImode,
5857 gen_rtx_ASHIFT
5858 (SImode,
5859 gen_rtx_SUBREG (SImode, operands[0], 0),
5860 GEN_INT (8)),
5861 scratch)));
5862 else
5863 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5864 gen_rtx_IOR (SImode,
5865 gen_rtx_ASHIFT (SImode, scratch,
5866 GEN_INT (8)),
5867 gen_rtx_SUBREG (SImode, operands[0],
5868 0))));
5869 }
5870
5871 /* Handle storing a half-word to memory during reload by synthesizing as two
5872 byte stores. Take care not to clobber the input values until after we
5873 have moved them somewhere safe. This code assumes that if the DImode
5874 scratch in operands[2] overlaps either the input value or output address
5875 in some way, then that value must die in this insn (we absolutely need
5876 two scratch registers for some corner cases). */
5877 void
5878 arm_reload_out_hi (rtx *operands)
5879 {
5880 rtx ref = operands[0];
5881 rtx outval = operands[1];
5882 rtx base, scratch;
5883 HOST_WIDE_INT offset = 0;
5884
5885 if (GET_CODE (ref) == SUBREG)
5886 {
5887 offset = SUBREG_BYTE (ref);
5888 ref = SUBREG_REG (ref);
5889 }
5890
5891 if (GET_CODE (ref) == REG)
5892 {
5893 /* We have a pseudo which has been spilt onto the stack; there
5894 are two cases here: the first where there is a simple
5895 stack-slot replacement and a second where the stack-slot is
5896 out of range, or is used as a subreg. */
5897 if (reg_equiv_mem[REGNO (ref)])
5898 {
5899 ref = reg_equiv_mem[REGNO (ref)];
5900 base = find_replacement (&XEXP (ref, 0));
5901 }
5902 else
5903 /* The slot is out of range, or was dressed up in a SUBREG. */
5904 base = reg_equiv_address[REGNO (ref)];
5905 }
5906 else
5907 base = find_replacement (&XEXP (ref, 0));
5908
5909 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5910
5911 /* Handle the case where the address is too complex to be offset by 1. */
5912 if (GET_CODE (base) == MINUS
5913 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5914 {
5915 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5916
5917 /* Be careful not to destroy OUTVAL. */
5918 if (reg_overlap_mentioned_p (base_plus, outval))
5919 {
5920 /* Updating base_plus might destroy outval, see if we can
5921 swap the scratch and base_plus. */
5922 if (!reg_overlap_mentioned_p (scratch, outval))
5923 {
5924 rtx tmp = scratch;
5925 scratch = base_plus;
5926 base_plus = tmp;
5927 }
5928 else
5929 {
5930 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5931
5932 /* Be conservative and copy OUTVAL into the scratch now,
5933 this should only be necessary if outval is a subreg
5934 of something larger than a word. */
5935 /* XXX Might this clobber base? I can't see how it can,
5936 since scratch is known to overlap with OUTVAL, and
5937 must be wider than a word. */
5938 emit_insn (gen_movhi (scratch_hi, outval));
5939 outval = scratch_hi;
5940 }
5941 }
5942
5943 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5944 base = base_plus;
5945 }
5946 else if (GET_CODE (base) == PLUS)
5947 {
5948 /* The addend must be CONST_INT, or we would have dealt with it above. */
5949 HOST_WIDE_INT hi, lo;
5950
5951 offset += INTVAL (XEXP (base, 1));
5952 base = XEXP (base, 0);
5953
5954 /* Rework the address into a legal sequence of insns. */
5955 /* Valid range for lo is -4095 -> 4095 */
5956 lo = (offset >= 0
5957 ? (offset & 0xfff)
5958 : -((-offset) & 0xfff));
5959
5960 /* Corner case, if lo is the max offset then we would be out of range
5961 once we have added the additional 1 below, so bump the msb into the
5962 pre-loading insn(s). */
5963 if (lo == 4095)
5964 lo &= 0x7ff;
5965
5966 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5967 ^ (HOST_WIDE_INT) 0x80000000)
5968 - (HOST_WIDE_INT) 0x80000000);
5969
5970 if (hi + lo != offset)
5971 abort ();
5972
5973 if (hi != 0)
5974 {
5975 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5976
5977 /* Be careful not to destroy OUTVAL. */
5978 if (reg_overlap_mentioned_p (base_plus, outval))
5979 {
5980 /* Updating base_plus might destroy outval, see if we
5981 can swap the scratch and base_plus. */
5982 if (!reg_overlap_mentioned_p (scratch, outval))
5983 {
5984 rtx tmp = scratch;
5985 scratch = base_plus;
5986 base_plus = tmp;
5987 }
5988 else
5989 {
5990 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5991
5992 /* Be conservative and copy outval into scratch now,
5993 this should only be necessary if outval is a
5994 subreg of something larger than a word. */
5995 /* XXX Might this clobber base? I can't see how it
5996 can, since scratch is known to overlap with
5997 outval. */
5998 emit_insn (gen_movhi (scratch_hi, outval));
5999 outval = scratch_hi;
6000 }
6001 }
6002
6003 /* Get the base address; addsi3 knows how to handle constants
6004 that require more than one insn. */
6005 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6006 base = base_plus;
6007 offset = lo;
6008 }
6009 }
6010
6011 if (BYTES_BIG_ENDIAN)
6012 {
6013 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6014 plus_constant (base, offset + 1)),
6015 gen_lowpart (QImode, outval)));
6016 emit_insn (gen_lshrsi3 (scratch,
6017 gen_rtx_SUBREG (SImode, outval, 0),
6018 GEN_INT (8)));
6019 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6020 gen_lowpart (QImode, scratch)));
6021 }
6022 else
6023 {
6024 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6025 gen_lowpart (QImode, outval)));
6026 emit_insn (gen_lshrsi3 (scratch,
6027 gen_rtx_SUBREG (SImode, outval, 0),
6028 GEN_INT (8)));
6029 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6030 plus_constant (base, offset + 1)),
6031 gen_lowpart (QImode, scratch)));
6032 }
6033 }
6034 \f
6035 /* Print a symbolic form of X to the debug file, F. */
6036 static void
6037 arm_print_value (FILE *f, rtx x)
6038 {
6039 switch (GET_CODE (x))
6040 {
6041 case CONST_INT:
6042 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6043 return;
6044
6045 case CONST_DOUBLE:
6046 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6047 return;
6048
6049 case CONST_VECTOR:
6050 {
6051 int i;
6052
6053 fprintf (f, "<");
6054 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6055 {
6056 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6057 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6058 fputc (',', f);
6059 }
6060 fprintf (f, ">");
6061 }
6062 return;
6063
6064 case CONST_STRING:
6065 fprintf (f, "\"%s\"", XSTR (x, 0));
6066 return;
6067
6068 case SYMBOL_REF:
6069 fprintf (f, "`%s'", XSTR (x, 0));
6070 return;
6071
6072 case LABEL_REF:
6073 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6074 return;
6075
6076 case CONST:
6077 arm_print_value (f, XEXP (x, 0));
6078 return;
6079
6080 case PLUS:
6081 arm_print_value (f, XEXP (x, 0));
6082 fprintf (f, "+");
6083 arm_print_value (f, XEXP (x, 1));
6084 return;
6085
6086 case PC:
6087 fprintf (f, "pc");
6088 return;
6089
6090 default:
6091 fprintf (f, "????");
6092 return;
6093 }
6094 }
6095 \f
6096 /* Routines for manipulation of the constant pool. */
6097
6098 /* Arm instructions cannot load a large constant directly into a
6099 register; they have to come from a pc relative load. The constant
6100 must therefore be placed in the addressable range of the pc
6101 relative load. Depending on the precise pc relative load
6102 instruction the range is somewhere between 256 bytes and 4k. This
6103 means that we often have to dump a constant inside a function, and
6104 generate code to branch around it.
6105
6106 It is important to minimize this, since the branches will slow
6107 things down and make the code larger.
6108
6109 Normally we can hide the table after an existing unconditional
6110 branch so that there is no interruption of the flow, but in the
6111 worst case the code looks like this:
6112
6113 ldr rn, L1
6114 ...
6115 b L2
6116 align
6117 L1: .long value
6118 L2:
6119 ...
6120
6121 ldr rn, L3
6122 ...
6123 b L4
6124 align
6125 L3: .long value
6126 L4:
6127 ...
6128
6129 We fix this by performing a scan after scheduling, which notices
6130 which instructions need to have their operands fetched from the
6131 constant table and builds the table.
6132
6133 The algorithm starts by building a table of all the constants that
6134 need fixing up and all the natural barriers in the function (places
6135 where a constant table can be dropped without breaking the flow).
6136 For each fixup we note how far the pc-relative replacement will be
6137 able to reach and the offset of the instruction into the function.
6138
6139 Having built the table we then group the fixes together to form
6140 tables that are as large as possible (subject to addressing
6141 constraints) and emit each table of constants after the last
6142 barrier that is within range of all the instructions in the group.
6143 If a group does not contain a barrier, then we forcibly create one
6144 by inserting a jump instruction into the flow. Once the table has
6145 been inserted, the insns are then modified to reference the
6146 relevant entry in the pool.
6147
6148 Possible enhancements to the algorithm (not implemented) are:
6149
6150 1) For some processors and object formats, there may be benefit in
6151 aligning the pools to the start of cache lines; this alignment
6152 would need to be taken into account when calculating addressability
6153 of a pool. */
6154
6155 /* These typedefs are located at the start of this file, so that
6156 they can be used in the prototypes there. This comment is to
6157 remind readers of that fact so that the following structures
6158 can be understood more easily.
6159
6160 typedef struct minipool_node Mnode;
6161 typedef struct minipool_fixup Mfix; */
6162
6163 struct minipool_node
6164 {
6165 /* Doubly linked chain of entries. */
6166 Mnode * next;
6167 Mnode * prev;
6168 /* The maximum offset into the code that this entry can be placed. While
6169 pushing fixes for forward references, all entries are sorted in order
6170 of increasing max_address. */
6171 HOST_WIDE_INT max_address;
6172 /* Similarly for an entry inserted for a backwards ref. */
6173 HOST_WIDE_INT min_address;
6174 /* The number of fixes referencing this entry. This can become zero
6175 if we "unpush" an entry. In this case we ignore the entry when we
6176 come to emit the code. */
6177 int refcount;
6178 /* The offset from the start of the minipool. */
6179 HOST_WIDE_INT offset;
6180 /* The value in table. */
6181 rtx value;
6182 /* The mode of value. */
6183 enum machine_mode mode;
6184 /* The size of the value. With iWMMXt enabled
6185 sizes > 4 also imply an alignment of 8-bytes. */
6186 int fix_size;
6187 };
6188
6189 struct minipool_fixup
6190 {
6191 Mfix * next;
6192 rtx insn;
6193 HOST_WIDE_INT address;
6194 rtx * loc;
6195 enum machine_mode mode;
6196 int fix_size;
6197 rtx value;
6198 Mnode * minipool;
6199 HOST_WIDE_INT forwards;
6200 HOST_WIDE_INT backwards;
6201 };
6202
6203 /* Fixes less than a word need padding out to a word boundary. */
6204 #define MINIPOOL_FIX_SIZE(mode) \
6205 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6206
6207 static Mnode * minipool_vector_head;
6208 static Mnode * minipool_vector_tail;
6209 static rtx minipool_vector_label;
6210
6211 /* The linked list of all minipool fixes required for this function. */
6212 Mfix * minipool_fix_head;
6213 Mfix * minipool_fix_tail;
6214 /* The fix entry for the current minipool, once it has been placed. */
6215 Mfix * minipool_barrier;
6216
6217 /* Determines if INSN is the start of a jump table. Returns the end
6218 of the TABLE or NULL_RTX. */
6219 static rtx
6220 is_jump_table (rtx insn)
6221 {
6222 rtx table;
6223
6224 if (GET_CODE (insn) == JUMP_INSN
6225 && JUMP_LABEL (insn) != NULL
6226 && ((table = next_real_insn (JUMP_LABEL (insn)))
6227 == next_real_insn (insn))
6228 && table != NULL
6229 && GET_CODE (table) == JUMP_INSN
6230 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6231 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6232 return table;
6233
6234 return NULL_RTX;
6235 }
6236
6237 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6238 #define JUMP_TABLES_IN_TEXT_SECTION 0
6239 #endif
6240
6241 static HOST_WIDE_INT
6242 get_jump_table_size (rtx insn)
6243 {
6244 /* ADDR_VECs only take room if read-only data does into the text
6245 section. */
6246 if (JUMP_TABLES_IN_TEXT_SECTION
6247 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6248 || 1
6249 #endif
6250 )
6251 {
6252 rtx body = PATTERN (insn);
6253 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6254
6255 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6256 }
6257
6258 return 0;
6259 }
6260
6261 /* Move a minipool fix MP from its current location to before MAX_MP.
6262 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6263 constraints may need updating. */
6264 static Mnode *
6265 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6266 HOST_WIDE_INT max_address)
6267 {
6268 /* This should never be true and the code below assumes these are
6269 different. */
6270 if (mp == max_mp)
6271 abort ();
6272
6273 if (max_mp == NULL)
6274 {
6275 if (max_address < mp->max_address)
6276 mp->max_address = max_address;
6277 }
6278 else
6279 {
6280 if (max_address > max_mp->max_address - mp->fix_size)
6281 mp->max_address = max_mp->max_address - mp->fix_size;
6282 else
6283 mp->max_address = max_address;
6284
6285 /* Unlink MP from its current position. Since max_mp is non-null,
6286 mp->prev must be non-null. */
6287 mp->prev->next = mp->next;
6288 if (mp->next != NULL)
6289 mp->next->prev = mp->prev;
6290 else
6291 minipool_vector_tail = mp->prev;
6292
6293 /* Re-insert it before MAX_MP. */
6294 mp->next = max_mp;
6295 mp->prev = max_mp->prev;
6296 max_mp->prev = mp;
6297
6298 if (mp->prev != NULL)
6299 mp->prev->next = mp;
6300 else
6301 minipool_vector_head = mp;
6302 }
6303
6304 /* Save the new entry. */
6305 max_mp = mp;
6306
6307 /* Scan over the preceding entries and adjust their addresses as
6308 required. */
6309 while (mp->prev != NULL
6310 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6311 {
6312 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6313 mp = mp->prev;
6314 }
6315
6316 return max_mp;
6317 }
6318
6319 /* Add a constant to the minipool for a forward reference. Returns the
6320 node added or NULL if the constant will not fit in this pool. */
6321 static Mnode *
6322 add_minipool_forward_ref (Mfix *fix)
6323 {
6324 /* If set, max_mp is the first pool_entry that has a lower
6325 constraint than the one we are trying to add. */
6326 Mnode * max_mp = NULL;
6327 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6328 Mnode * mp;
6329
6330 /* If this fix's address is greater than the address of the first
6331 entry, then we can't put the fix in this pool. We subtract the
6332 size of the current fix to ensure that if the table is fully
6333 packed we still have enough room to insert this value by suffling
6334 the other fixes forwards. */
6335 if (minipool_vector_head &&
6336 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6337 return NULL;
6338
6339 /* Scan the pool to see if a constant with the same value has
6340 already been added. While we are doing this, also note the
6341 location where we must insert the constant if it doesn't already
6342 exist. */
6343 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6344 {
6345 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6346 && fix->mode == mp->mode
6347 && (GET_CODE (fix->value) != CODE_LABEL
6348 || (CODE_LABEL_NUMBER (fix->value)
6349 == CODE_LABEL_NUMBER (mp->value)))
6350 && rtx_equal_p (fix->value, mp->value))
6351 {
6352 /* More than one fix references this entry. */
6353 mp->refcount++;
6354 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6355 }
6356
6357 /* Note the insertion point if necessary. */
6358 if (max_mp == NULL
6359 && mp->max_address > max_address)
6360 max_mp = mp;
6361
6362 /* If we are inserting an 8-bytes aligned quantity and
6363 we have not already found an insertion point, then
6364 make sure that all such 8-byte aligned quantities are
6365 placed at the start of the pool. */
6366 if (TARGET_REALLY_IWMMXT
6367 && max_mp == NULL
6368 && fix->fix_size == 8
6369 && mp->fix_size != 8)
6370 {
6371 max_mp = mp;
6372 max_address = mp->max_address;
6373 }
6374 }
6375
6376 /* The value is not currently in the minipool, so we need to create
6377 a new entry for it. If MAX_MP is NULL, the entry will be put on
6378 the end of the list since the placement is less constrained than
6379 any existing entry. Otherwise, we insert the new fix before
6380 MAX_MP and, if necessary, adjust the constraints on the other
6381 entries. */
6382 mp = xmalloc (sizeof (* mp));
6383 mp->fix_size = fix->fix_size;
6384 mp->mode = fix->mode;
6385 mp->value = fix->value;
6386 mp->refcount = 1;
6387 /* Not yet required for a backwards ref. */
6388 mp->min_address = -65536;
6389
6390 if (max_mp == NULL)
6391 {
6392 mp->max_address = max_address;
6393 mp->next = NULL;
6394 mp->prev = minipool_vector_tail;
6395
6396 if (mp->prev == NULL)
6397 {
6398 minipool_vector_head = mp;
6399 minipool_vector_label = gen_label_rtx ();
6400 }
6401 else
6402 mp->prev->next = mp;
6403
6404 minipool_vector_tail = mp;
6405 }
6406 else
6407 {
6408 if (max_address > max_mp->max_address - mp->fix_size)
6409 mp->max_address = max_mp->max_address - mp->fix_size;
6410 else
6411 mp->max_address = max_address;
6412
6413 mp->next = max_mp;
6414 mp->prev = max_mp->prev;
6415 max_mp->prev = mp;
6416 if (mp->prev != NULL)
6417 mp->prev->next = mp;
6418 else
6419 minipool_vector_head = mp;
6420 }
6421
6422 /* Save the new entry. */
6423 max_mp = mp;
6424
6425 /* Scan over the preceding entries and adjust their addresses as
6426 required. */
6427 while (mp->prev != NULL
6428 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6429 {
6430 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6431 mp = mp->prev;
6432 }
6433
6434 return max_mp;
6435 }
6436
6437 static Mnode *
6438 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6439 HOST_WIDE_INT min_address)
6440 {
6441 HOST_WIDE_INT offset;
6442
6443 /* This should never be true, and the code below assumes these are
6444 different. */
6445 if (mp == min_mp)
6446 abort ();
6447
6448 if (min_mp == NULL)
6449 {
6450 if (min_address > mp->min_address)
6451 mp->min_address = min_address;
6452 }
6453 else
6454 {
6455 /* We will adjust this below if it is too loose. */
6456 mp->min_address = min_address;
6457
6458 /* Unlink MP from its current position. Since min_mp is non-null,
6459 mp->next must be non-null. */
6460 mp->next->prev = mp->prev;
6461 if (mp->prev != NULL)
6462 mp->prev->next = mp->next;
6463 else
6464 minipool_vector_head = mp->next;
6465
6466 /* Reinsert it after MIN_MP. */
6467 mp->prev = min_mp;
6468 mp->next = min_mp->next;
6469 min_mp->next = mp;
6470 if (mp->next != NULL)
6471 mp->next->prev = mp;
6472 else
6473 minipool_vector_tail = mp;
6474 }
6475
6476 min_mp = mp;
6477
6478 offset = 0;
6479 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6480 {
6481 mp->offset = offset;
6482 if (mp->refcount > 0)
6483 offset += mp->fix_size;
6484
6485 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6486 mp->next->min_address = mp->min_address + mp->fix_size;
6487 }
6488
6489 return min_mp;
6490 }
6491
6492 /* Add a constant to the minipool for a backward reference. Returns the
6493 node added or NULL if the constant will not fit in this pool.
6494
6495 Note that the code for insertion for a backwards reference can be
6496 somewhat confusing because the calculated offsets for each fix do
6497 not take into account the size of the pool (which is still under
6498 construction. */
6499 static Mnode *
6500 add_minipool_backward_ref (Mfix *fix)
6501 {
6502 /* If set, min_mp is the last pool_entry that has a lower constraint
6503 than the one we are trying to add. */
6504 Mnode *min_mp = NULL;
6505 /* This can be negative, since it is only a constraint. */
6506 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6507 Mnode *mp;
6508
6509 /* If we can't reach the current pool from this insn, or if we can't
6510 insert this entry at the end of the pool without pushing other
6511 fixes out of range, then we don't try. This ensures that we
6512 can't fail later on. */
6513 if (min_address >= minipool_barrier->address
6514 || (minipool_vector_tail->min_address + fix->fix_size
6515 >= minipool_barrier->address))
6516 return NULL;
6517
6518 /* Scan the pool to see if a constant with the same value has
6519 already been added. While we are doing this, also note the
6520 location where we must insert the constant if it doesn't already
6521 exist. */
6522 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6523 {
6524 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6525 && fix->mode == mp->mode
6526 && (GET_CODE (fix->value) != CODE_LABEL
6527 || (CODE_LABEL_NUMBER (fix->value)
6528 == CODE_LABEL_NUMBER (mp->value)))
6529 && rtx_equal_p (fix->value, mp->value)
6530 /* Check that there is enough slack to move this entry to the
6531 end of the table (this is conservative). */
6532 && (mp->max_address
6533 > (minipool_barrier->address
6534 + minipool_vector_tail->offset
6535 + minipool_vector_tail->fix_size)))
6536 {
6537 mp->refcount++;
6538 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6539 }
6540
6541 if (min_mp != NULL)
6542 mp->min_address += fix->fix_size;
6543 else
6544 {
6545 /* Note the insertion point if necessary. */
6546 if (mp->min_address < min_address)
6547 {
6548 /* For now, we do not allow the insertion of 8-byte alignment
6549 requiring nodes anywhere but at the start of the pool. */
6550 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8 && mp->fix_size != 8)
6551 return NULL;
6552 else
6553 min_mp = mp;
6554 }
6555 else if (mp->max_address
6556 < minipool_barrier->address + mp->offset + fix->fix_size)
6557 {
6558 /* Inserting before this entry would push the fix beyond
6559 its maximum address (which can happen if we have
6560 re-located a forwards fix); force the new fix to come
6561 after it. */
6562 min_mp = mp;
6563 min_address = mp->min_address + fix->fix_size;
6564 }
6565 /* If we are inserting an 8-bytes aligned quantity and
6566 we have not already found an insertion point, then
6567 make sure that all such 8-byte aligned quantities are
6568 placed at the start of the pool. */
6569 else if (TARGET_REALLY_IWMMXT
6570 && min_mp == NULL
6571 && fix->fix_size == 8
6572 && mp->fix_size < 8)
6573 {
6574 min_mp = mp;
6575 min_address = mp->min_address + fix->fix_size;
6576 }
6577 }
6578 }
6579
6580 /* We need to create a new entry. */
6581 mp = xmalloc (sizeof (* mp));
6582 mp->fix_size = fix->fix_size;
6583 mp->mode = fix->mode;
6584 mp->value = fix->value;
6585 mp->refcount = 1;
6586 mp->max_address = minipool_barrier->address + 65536;
6587
6588 mp->min_address = min_address;
6589
6590 if (min_mp == NULL)
6591 {
6592 mp->prev = NULL;
6593 mp->next = minipool_vector_head;
6594
6595 if (mp->next == NULL)
6596 {
6597 minipool_vector_tail = mp;
6598 minipool_vector_label = gen_label_rtx ();
6599 }
6600 else
6601 mp->next->prev = mp;
6602
6603 minipool_vector_head = mp;
6604 }
6605 else
6606 {
6607 mp->next = min_mp->next;
6608 mp->prev = min_mp;
6609 min_mp->next = mp;
6610
6611 if (mp->next != NULL)
6612 mp->next->prev = mp;
6613 else
6614 minipool_vector_tail = mp;
6615 }
6616
6617 /* Save the new entry. */
6618 min_mp = mp;
6619
6620 if (mp->prev)
6621 mp = mp->prev;
6622 else
6623 mp->offset = 0;
6624
6625 /* Scan over the following entries and adjust their offsets. */
6626 while (mp->next != NULL)
6627 {
6628 if (mp->next->min_address < mp->min_address + mp->fix_size)
6629 mp->next->min_address = mp->min_address + mp->fix_size;
6630
6631 if (mp->refcount)
6632 mp->next->offset = mp->offset + mp->fix_size;
6633 else
6634 mp->next->offset = mp->offset;
6635
6636 mp = mp->next;
6637 }
6638
6639 return min_mp;
6640 }
6641
6642 static void
6643 assign_minipool_offsets (Mfix *barrier)
6644 {
6645 HOST_WIDE_INT offset = 0;
6646 Mnode *mp;
6647
6648 minipool_barrier = barrier;
6649
6650 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6651 {
6652 mp->offset = offset;
6653
6654 if (mp->refcount > 0)
6655 offset += mp->fix_size;
6656 }
6657 }
6658
6659 /* Output the literal table */
6660 static void
6661 dump_minipool (rtx scan)
6662 {
6663 Mnode * mp;
6664 Mnode * nmp;
6665 int align64 = 0;
6666
6667 if (TARGET_REALLY_IWMMXT)
6668 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6669 if (mp->refcount > 0 && mp->fix_size == 8)
6670 {
6671 align64 = 1;
6672 break;
6673 }
6674
6675 if (rtl_dump_file)
6676 fprintf (rtl_dump_file,
6677 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
6678 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
6679
6680 scan = emit_label_after (gen_label_rtx (), scan);
6681 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
6682 scan = emit_label_after (minipool_vector_label, scan);
6683
6684 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
6685 {
6686 if (mp->refcount > 0)
6687 {
6688 if (rtl_dump_file)
6689 {
6690 fprintf (rtl_dump_file,
6691 ";; Offset %u, min %ld, max %ld ",
6692 (unsigned) mp->offset, (unsigned long) mp->min_address,
6693 (unsigned long) mp->max_address);
6694 arm_print_value (rtl_dump_file, mp->value);
6695 fputc ('\n', rtl_dump_file);
6696 }
6697
6698 switch (mp->fix_size)
6699 {
6700 #ifdef HAVE_consttable_1
6701 case 1:
6702 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
6703 break;
6704
6705 #endif
6706 #ifdef HAVE_consttable_2
6707 case 2:
6708 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
6709 break;
6710
6711 #endif
6712 #ifdef HAVE_consttable_4
6713 case 4:
6714 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
6715 break;
6716
6717 #endif
6718 #ifdef HAVE_consttable_8
6719 case 8:
6720 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
6721 break;
6722
6723 #endif
6724 default:
6725 abort ();
6726 break;
6727 }
6728 }
6729
6730 nmp = mp->next;
6731 free (mp);
6732 }
6733
6734 minipool_vector_head = minipool_vector_tail = NULL;
6735 scan = emit_insn_after (gen_consttable_end (), scan);
6736 scan = emit_barrier_after (scan);
6737 }
6738
6739 /* Return the cost of forcibly inserting a barrier after INSN. */
6740 static int
6741 arm_barrier_cost (rtx insn)
6742 {
6743 /* Basing the location of the pool on the loop depth is preferable,
6744 but at the moment, the basic block information seems to be
6745 corrupt by this stage of the compilation. */
6746 int base_cost = 50;
6747 rtx next = next_nonnote_insn (insn);
6748
6749 if (next != NULL && GET_CODE (next) == CODE_LABEL)
6750 base_cost -= 20;
6751
6752 switch (GET_CODE (insn))
6753 {
6754 case CODE_LABEL:
6755 /* It will always be better to place the table before the label, rather
6756 than after it. */
6757 return 50;
6758
6759 case INSN:
6760 case CALL_INSN:
6761 return base_cost;
6762
6763 case JUMP_INSN:
6764 return base_cost - 10;
6765
6766 default:
6767 return base_cost + 10;
6768 }
6769 }
6770
6771 /* Find the best place in the insn stream in the range
6772 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
6773 Create the barrier by inserting a jump and add a new fix entry for
6774 it. */
6775 static Mfix *
6776 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
6777 {
6778 HOST_WIDE_INT count = 0;
6779 rtx barrier;
6780 rtx from = fix->insn;
6781 rtx selected = from;
6782 int selected_cost;
6783 HOST_WIDE_INT selected_address;
6784 Mfix * new_fix;
6785 HOST_WIDE_INT max_count = max_address - fix->address;
6786 rtx label = gen_label_rtx ();
6787
6788 selected_cost = arm_barrier_cost (from);
6789 selected_address = fix->address;
6790
6791 while (from && count < max_count)
6792 {
6793 rtx tmp;
6794 int new_cost;
6795
6796 /* This code shouldn't have been called if there was a natural barrier
6797 within range. */
6798 if (GET_CODE (from) == BARRIER)
6799 abort ();
6800
6801 /* Count the length of this insn. */
6802 count += get_attr_length (from);
6803
6804 /* If there is a jump table, add its length. */
6805 tmp = is_jump_table (from);
6806 if (tmp != NULL)
6807 {
6808 count += get_jump_table_size (tmp);
6809
6810 /* Jump tables aren't in a basic block, so base the cost on
6811 the dispatch insn. If we select this location, we will
6812 still put the pool after the table. */
6813 new_cost = arm_barrier_cost (from);
6814
6815 if (count < max_count && new_cost <= selected_cost)
6816 {
6817 selected = tmp;
6818 selected_cost = new_cost;
6819 selected_address = fix->address + count;
6820 }
6821
6822 /* Continue after the dispatch table. */
6823 from = NEXT_INSN (tmp);
6824 continue;
6825 }
6826
6827 new_cost = arm_barrier_cost (from);
6828
6829 if (count < max_count && new_cost <= selected_cost)
6830 {
6831 selected = from;
6832 selected_cost = new_cost;
6833 selected_address = fix->address + count;
6834 }
6835
6836 from = NEXT_INSN (from);
6837 }
6838
6839 /* Create a new JUMP_INSN that branches around a barrier. */
6840 from = emit_jump_insn_after (gen_jump (label), selected);
6841 JUMP_LABEL (from) = label;
6842 barrier = emit_barrier_after (from);
6843 emit_label_after (label, barrier);
6844
6845 /* Create a minipool barrier entry for the new barrier. */
6846 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
6847 new_fix->insn = barrier;
6848 new_fix->address = selected_address;
6849 new_fix->next = fix->next;
6850 fix->next = new_fix;
6851
6852 return new_fix;
6853 }
6854
6855 /* Record that there is a natural barrier in the insn stream at
6856 ADDRESS. */
6857 static void
6858 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
6859 {
6860 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6861
6862 fix->insn = insn;
6863 fix->address = address;
6864
6865 fix->next = NULL;
6866 if (minipool_fix_head != NULL)
6867 minipool_fix_tail->next = fix;
6868 else
6869 minipool_fix_head = fix;
6870
6871 minipool_fix_tail = fix;
6872 }
6873
6874 /* Record INSN, which will need fixing up to load a value from the
6875 minipool. ADDRESS is the offset of the insn since the start of the
6876 function; LOC is a pointer to the part of the insn which requires
6877 fixing; VALUE is the constant that must be loaded, which is of type
6878 MODE. */
6879 static void
6880 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
6881 enum machine_mode mode, rtx value)
6882 {
6883 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6884
6885 #ifdef AOF_ASSEMBLER
6886 /* PIC symbol references need to be converted into offsets into the
6887 based area. */
6888 /* XXX This shouldn't be done here. */
6889 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
6890 value = aof_pic_entry (value);
6891 #endif /* AOF_ASSEMBLER */
6892
6893 fix->insn = insn;
6894 fix->address = address;
6895 fix->loc = loc;
6896 fix->mode = mode;
6897 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
6898 fix->value = value;
6899 fix->forwards = get_attr_pool_range (insn);
6900 fix->backwards = get_attr_neg_pool_range (insn);
6901 fix->minipool = NULL;
6902
6903 /* If an insn doesn't have a range defined for it, then it isn't
6904 expecting to be reworked by this code. Better to abort now than
6905 to generate duff assembly code. */
6906 if (fix->forwards == 0 && fix->backwards == 0)
6907 abort ();
6908
6909 /* With iWMMXt enabled, the pool is aligned to an 8-byte boundary.
6910 So there might be an empty word before the start of the pool.
6911 Hence we reduce the forward range by 4 to allow for this
6912 possibility. */
6913 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8)
6914 fix->forwards -= 4;
6915
6916 if (rtl_dump_file)
6917 {
6918 fprintf (rtl_dump_file,
6919 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
6920 GET_MODE_NAME (mode),
6921 INSN_UID (insn), (unsigned long) address,
6922 -1 * (long)fix->backwards, (long)fix->forwards);
6923 arm_print_value (rtl_dump_file, fix->value);
6924 fprintf (rtl_dump_file, "\n");
6925 }
6926
6927 /* Add it to the chain of fixes. */
6928 fix->next = NULL;
6929
6930 if (minipool_fix_head != NULL)
6931 minipool_fix_tail->next = fix;
6932 else
6933 minipool_fix_head = fix;
6934
6935 minipool_fix_tail = fix;
6936 }
6937
6938 /* Scan INSN and note any of its operands that need fixing.
6939 If DO_PUSHES is false we do not actually push any of the fixups
6940 needed. The function returns TRUE is any fixups were needed/pushed.
6941 This is used by arm_memory_load_p() which needs to know about loads
6942 of constants that will be converted into minipool loads. */
6943 static bool
6944 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
6945 {
6946 bool result = false;
6947 int opno;
6948
6949 extract_insn (insn);
6950
6951 if (!constrain_operands (1))
6952 fatal_insn_not_found (insn);
6953
6954 if (recog_data.n_alternatives == 0)
6955 return false;
6956
6957 /* Fill in recog_op_alt with information about the constraints of this insn. */
6958 preprocess_constraints ();
6959
6960 for (opno = 0; opno < recog_data.n_operands; opno++)
6961 {
6962 /* Things we need to fix can only occur in inputs. */
6963 if (recog_data.operand_type[opno] != OP_IN)
6964 continue;
6965
6966 /* If this alternative is a memory reference, then any mention
6967 of constants in this alternative is really to fool reload
6968 into allowing us to accept one there. We need to fix them up
6969 now so that we output the right code. */
6970 if (recog_op_alt[opno][which_alternative].memory_ok)
6971 {
6972 rtx op = recog_data.operand[opno];
6973
6974 if (CONSTANT_P (op))
6975 {
6976 if (do_pushes)
6977 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6978 recog_data.operand_mode[opno], op);
6979 result = true;
6980 }
6981 else if (GET_CODE (op) == MEM
6982 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
6983 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
6984 {
6985 if (do_pushes)
6986 {
6987 rtx cop = avoid_constant_pool_reference (op);
6988
6989 /* Casting the address of something to a mode narrower
6990 than a word can cause avoid_constant_pool_reference()
6991 to return the pool reference itself. That's no good to
6992 us here. Lets just hope that we can use the
6993 constant pool value directly. */
6994 if (op == cop)
6995 cop = get_pool_constant (XEXP (op, 0));
6996
6997 push_minipool_fix (insn, address,
6998 recog_data.operand_loc[opno],
6999 recog_data.operand_mode[opno], cop);
7000 }
7001
7002 result = true;
7003 }
7004 }
7005 }
7006
7007 return result;
7008 }
7009
7010 /* Gcc puts the pool in the wrong place for ARM, since we can only
7011 load addresses a limited distance around the pc. We do some
7012 special munging to move the constant pool values to the correct
7013 point in the code. */
7014 static void
7015 arm_reorg (void)
7016 {
7017 rtx insn;
7018 HOST_WIDE_INT address = 0;
7019 Mfix * fix;
7020
7021 minipool_fix_head = minipool_fix_tail = NULL;
7022
7023 /* The first insn must always be a note, or the code below won't
7024 scan it properly. */
7025 insn = get_insns ();
7026 if (GET_CODE (insn) != NOTE)
7027 abort ();
7028
7029 /* Scan all the insns and record the operands that will need fixing. */
7030 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7031 {
7032 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7033 && (arm_cirrus_insn_p (insn)
7034 || GET_CODE (insn) == JUMP_INSN
7035 || arm_memory_load_p (insn)))
7036 cirrus_reorg (insn);
7037
7038 if (GET_CODE (insn) == BARRIER)
7039 push_minipool_barrier (insn, address);
7040 else if (INSN_P (insn))
7041 {
7042 rtx table;
7043
7044 note_invalid_constants (insn, address, true);
7045 address += get_attr_length (insn);
7046
7047 /* If the insn is a vector jump, add the size of the table
7048 and skip the table. */
7049 if ((table = is_jump_table (insn)) != NULL)
7050 {
7051 address += get_jump_table_size (table);
7052 insn = table;
7053 }
7054 }
7055 }
7056
7057 fix = minipool_fix_head;
7058
7059 /* Now scan the fixups and perform the required changes. */
7060 while (fix)
7061 {
7062 Mfix * ftmp;
7063 Mfix * fdel;
7064 Mfix * last_added_fix;
7065 Mfix * last_barrier = NULL;
7066 Mfix * this_fix;
7067
7068 /* Skip any further barriers before the next fix. */
7069 while (fix && GET_CODE (fix->insn) == BARRIER)
7070 fix = fix->next;
7071
7072 /* No more fixes. */
7073 if (fix == NULL)
7074 break;
7075
7076 last_added_fix = NULL;
7077
7078 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7079 {
7080 if (GET_CODE (ftmp->insn) == BARRIER)
7081 {
7082 if (ftmp->address >= minipool_vector_head->max_address)
7083 break;
7084
7085 last_barrier = ftmp;
7086 }
7087 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7088 break;
7089
7090 last_added_fix = ftmp; /* Keep track of the last fix added. */
7091 }
7092
7093 /* If we found a barrier, drop back to that; any fixes that we
7094 could have reached but come after the barrier will now go in
7095 the next mini-pool. */
7096 if (last_barrier != NULL)
7097 {
7098 /* Reduce the refcount for those fixes that won't go into this
7099 pool after all. */
7100 for (fdel = last_barrier->next;
7101 fdel && fdel != ftmp;
7102 fdel = fdel->next)
7103 {
7104 fdel->minipool->refcount--;
7105 fdel->minipool = NULL;
7106 }
7107
7108 ftmp = last_barrier;
7109 }
7110 else
7111 {
7112 /* ftmp is first fix that we can't fit into this pool and
7113 there no natural barriers that we could use. Insert a
7114 new barrier in the code somewhere between the previous
7115 fix and this one, and arrange to jump around it. */
7116 HOST_WIDE_INT max_address;
7117
7118 /* The last item on the list of fixes must be a barrier, so
7119 we can never run off the end of the list of fixes without
7120 last_barrier being set. */
7121 if (ftmp == NULL)
7122 abort ();
7123
7124 max_address = minipool_vector_head->max_address;
7125 /* Check that there isn't another fix that is in range that
7126 we couldn't fit into this pool because the pool was
7127 already too large: we need to put the pool before such an
7128 instruction. */
7129 if (ftmp->address < max_address)
7130 max_address = ftmp->address;
7131
7132 last_barrier = create_fix_barrier (last_added_fix, max_address);
7133 }
7134
7135 assign_minipool_offsets (last_barrier);
7136
7137 while (ftmp)
7138 {
7139 if (GET_CODE (ftmp->insn) != BARRIER
7140 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7141 == NULL))
7142 break;
7143
7144 ftmp = ftmp->next;
7145 }
7146
7147 /* Scan over the fixes we have identified for this pool, fixing them
7148 up and adding the constants to the pool itself. */
7149 for (this_fix = fix; this_fix && ftmp != this_fix;
7150 this_fix = this_fix->next)
7151 if (GET_CODE (this_fix->insn) != BARRIER)
7152 {
7153 rtx addr
7154 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7155 minipool_vector_label),
7156 this_fix->minipool->offset);
7157 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7158 }
7159
7160 dump_minipool (last_barrier->insn);
7161 fix = ftmp;
7162 }
7163
7164 /* From now on we must synthesize any constants that we can't handle
7165 directly. This can happen if the RTL gets split during final
7166 instruction generation. */
7167 after_arm_reorg = 1;
7168
7169 /* Free the minipool memory. */
7170 obstack_free (&minipool_obstack, minipool_startobj);
7171 }
7172 \f
7173 /* Routines to output assembly language. */
7174
7175 /* If the rtx is the correct value then return the string of the number.
7176 In this way we can ensure that valid double constants are generated even
7177 when cross compiling. */
7178 const char *
7179 fp_immediate_constant (rtx x)
7180 {
7181 REAL_VALUE_TYPE r;
7182 int i;
7183
7184 if (!fpa_consts_inited)
7185 init_fpa_table ();
7186
7187 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7188 for (i = 0; i < 8; i++)
7189 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
7190 return strings_fpa[i];
7191
7192 abort ();
7193 }
7194
7195 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7196 static const char *
7197 fp_const_from_val (REAL_VALUE_TYPE *r)
7198 {
7199 int i;
7200
7201 if (!fpa_consts_inited)
7202 init_fpa_table ();
7203
7204 for (i = 0; i < 8; i++)
7205 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
7206 return strings_fpa[i];
7207
7208 abort ();
7209 }
7210
7211 /* Output the operands of a LDM/STM instruction to STREAM.
7212 MASK is the ARM register set mask of which only bits 0-15 are important.
7213 REG is the base register, either the frame pointer or the stack pointer,
7214 INSTR is the possibly suffixed load or store instruction. */
7215 static void
7216 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7217 {
7218 int i;
7219 int not_first = FALSE;
7220
7221 fputc ('\t', stream);
7222 asm_fprintf (stream, instr, reg);
7223 fputs (", {", stream);
7224
7225 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7226 if (mask & (1 << i))
7227 {
7228 if (not_first)
7229 fprintf (stream, ", ");
7230
7231 asm_fprintf (stream, "%r", i);
7232 not_first = TRUE;
7233 }
7234
7235 fprintf (stream, "}");
7236
7237 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7238 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7239 Strictly speaking the instruction would be unpredicatble only if
7240 we were writing back the base register as well, but since we never
7241 want to generate an LDM type 2 instruction (register bank switching)
7242 which is what you get if the PC is not being loaded, we do not need
7243 to check for writeback. */
7244 if (! TARGET_APCS_32
7245 && ((mask & (1 << PC_REGNUM)) != 0))
7246 fprintf (stream, "^");
7247
7248 fprintf (stream, "\n");
7249 }
7250
7251 /* Output a 'call' insn. */
7252 const char *
7253 output_call (rtx *operands)
7254 {
7255 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7256
7257 if (REGNO (operands[0]) == LR_REGNUM)
7258 {
7259 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7260 output_asm_insn ("mov%?\t%0, %|lr", operands);
7261 }
7262
7263 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7264
7265 if (TARGET_INTERWORK)
7266 output_asm_insn ("bx%?\t%0", operands);
7267 else
7268 output_asm_insn ("mov%?\t%|pc, %0", operands);
7269
7270 return "";
7271 }
7272
7273 /* Output a 'call' insn that is a reference in memory. */
7274 const char *
7275 output_call_mem (rtx *operands)
7276 {
7277 if (TARGET_INTERWORK)
7278 {
7279 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7280 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7281 output_asm_insn ("bx%?\t%|ip", operands);
7282 }
7283 else if (regno_use_in (LR_REGNUM, operands[0]))
7284 {
7285 /* LR is used in the memory address. We load the address in the
7286 first instruction. It's safe to use IP as the target of the
7287 load since the call will kill it anyway. */
7288 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7289 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7290 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7291 }
7292 else
7293 {
7294 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7295 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7296 }
7297
7298 return "";
7299 }
7300
7301
7302 /* Output a move from arm registers to an fpa registers.
7303 OPERANDS[0] is an fpa register.
7304 OPERANDS[1] is the first registers of an arm register pair. */
7305 const char *
7306 output_mov_long_double_fpa_from_arm (rtx *operands)
7307 {
7308 int arm_reg0 = REGNO (operands[1]);
7309 rtx ops[3];
7310
7311 if (arm_reg0 == IP_REGNUM)
7312 abort ();
7313
7314 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7315 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7316 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7317
7318 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7319 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7320
7321 return "";
7322 }
7323
7324 /* Output a move from an fpa register to arm registers.
7325 OPERANDS[0] is the first registers of an arm register pair.
7326 OPERANDS[1] is an fpa register. */
7327 const char *
7328 output_mov_long_double_arm_from_fpa (rtx *operands)
7329 {
7330 int arm_reg0 = REGNO (operands[0]);
7331 rtx ops[3];
7332
7333 if (arm_reg0 == IP_REGNUM)
7334 abort ();
7335
7336 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7337 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7338 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7339
7340 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7341 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7342 return "";
7343 }
7344
7345 /* Output a move from arm registers to arm registers of a long double
7346 OPERANDS[0] is the destination.
7347 OPERANDS[1] is the source. */
7348 const char *
7349 output_mov_long_double_arm_from_arm (rtx *operands)
7350 {
7351 /* We have to be careful here because the two might overlap. */
7352 int dest_start = REGNO (operands[0]);
7353 int src_start = REGNO (operands[1]);
7354 rtx ops[2];
7355 int i;
7356
7357 if (dest_start < src_start)
7358 {
7359 for (i = 0; i < 3; i++)
7360 {
7361 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7362 ops[1] = gen_rtx_REG (SImode, src_start + i);
7363 output_asm_insn ("mov%?\t%0, %1", ops);
7364 }
7365 }
7366 else
7367 {
7368 for (i = 2; i >= 0; i--)
7369 {
7370 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7371 ops[1] = gen_rtx_REG (SImode, src_start + i);
7372 output_asm_insn ("mov%?\t%0, %1", ops);
7373 }
7374 }
7375
7376 return "";
7377 }
7378
7379
7380 /* Output a move from arm registers to an fpa registers.
7381 OPERANDS[0] is an fpa register.
7382 OPERANDS[1] is the first registers of an arm register pair. */
7383 const char *
7384 output_mov_double_fpa_from_arm (rtx *operands)
7385 {
7386 int arm_reg0 = REGNO (operands[1]);
7387 rtx ops[2];
7388
7389 if (arm_reg0 == IP_REGNUM)
7390 abort ();
7391
7392 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7393 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7394 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7395 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7396 return "";
7397 }
7398
7399 /* Output a move from an fpa register to arm registers.
7400 OPERANDS[0] is the first registers of an arm register pair.
7401 OPERANDS[1] is an fpa register. */
7402 const char *
7403 output_mov_double_arm_from_fpa (rtx *operands)
7404 {
7405 int arm_reg0 = REGNO (operands[0]);
7406 rtx ops[2];
7407
7408 if (arm_reg0 == IP_REGNUM)
7409 abort ();
7410
7411 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7412 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7413 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
7414 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
7415 return "";
7416 }
7417
7418 /* Output a move between double words.
7419 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
7420 or MEM<-REG and all MEMs must be offsettable addresses. */
7421 const char *
7422 output_move_double (rtx *operands)
7423 {
7424 enum rtx_code code0 = GET_CODE (operands[0]);
7425 enum rtx_code code1 = GET_CODE (operands[1]);
7426 rtx otherops[3];
7427
7428 if (code0 == REG)
7429 {
7430 int reg0 = REGNO (operands[0]);
7431
7432 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
7433
7434 if (code1 == REG)
7435 {
7436 int reg1 = REGNO (operands[1]);
7437 if (reg1 == IP_REGNUM)
7438 abort ();
7439
7440 /* Ensure the second source is not overwritten. */
7441 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
7442 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
7443 else
7444 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
7445 }
7446 else if (code1 == CONST_VECTOR)
7447 {
7448 HOST_WIDE_INT hint = 0;
7449
7450 switch (GET_MODE (operands[1]))
7451 {
7452 case V2SImode:
7453 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
7454 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
7455 break;
7456
7457 case V4HImode:
7458 if (BYTES_BIG_ENDIAN)
7459 {
7460 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7461 hint <<= 16;
7462 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7463 }
7464 else
7465 {
7466 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7467 hint <<= 16;
7468 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7469 }
7470
7471 otherops[1] = GEN_INT (hint);
7472 hint = 0;
7473
7474 if (BYTES_BIG_ENDIAN)
7475 {
7476 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7477 hint <<= 16;
7478 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7479 }
7480 else
7481 {
7482 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7483 hint <<= 16;
7484 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7485 }
7486
7487 operands[1] = GEN_INT (hint);
7488 break;
7489
7490 case V8QImode:
7491 if (BYTES_BIG_ENDIAN)
7492 {
7493 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7494 hint <<= 8;
7495 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7496 hint <<= 8;
7497 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7498 hint <<= 8;
7499 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7500 }
7501 else
7502 {
7503 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7504 hint <<= 8;
7505 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7506 hint <<= 8;
7507 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7508 hint <<= 8;
7509 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7510 }
7511
7512 otherops[1] = GEN_INT (hint);
7513 hint = 0;
7514
7515 if (BYTES_BIG_ENDIAN)
7516 {
7517 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7518 hint <<= 8;
7519 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7520 hint <<= 8;
7521 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7522 hint <<= 8;
7523 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7524 }
7525 else
7526 {
7527 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7528 hint <<= 8;
7529 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7530 hint <<= 8;
7531 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7532 hint <<= 8;
7533 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7534 }
7535
7536 operands[1] = GEN_INT (hint);
7537 break;
7538
7539 default:
7540 abort ();
7541 }
7542 output_mov_immediate (operands);
7543 output_mov_immediate (otherops);
7544 }
7545 else if (code1 == CONST_DOUBLE)
7546 {
7547 if (GET_MODE (operands[1]) == DFmode)
7548 {
7549 REAL_VALUE_TYPE r;
7550 long l[2];
7551
7552 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
7553 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
7554 otherops[1] = GEN_INT (l[1]);
7555 operands[1] = GEN_INT (l[0]);
7556 }
7557 else if (GET_MODE (operands[1]) != VOIDmode)
7558 abort ();
7559 else if (WORDS_BIG_ENDIAN)
7560 {
7561 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7562 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7563 }
7564 else
7565 {
7566 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7567 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7568 }
7569
7570 output_mov_immediate (operands);
7571 output_mov_immediate (otherops);
7572 }
7573 else if (code1 == CONST_INT)
7574 {
7575 #if HOST_BITS_PER_WIDE_INT > 32
7576 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
7577 what the upper word is. */
7578 if (WORDS_BIG_ENDIAN)
7579 {
7580 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7581 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
7582 }
7583 else
7584 {
7585 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
7586 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7587 }
7588 #else
7589 /* Sign extend the intval into the high-order word. */
7590 if (WORDS_BIG_ENDIAN)
7591 {
7592 otherops[1] = operands[1];
7593 operands[1] = (INTVAL (operands[1]) < 0
7594 ? constm1_rtx : const0_rtx);
7595 }
7596 else
7597 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
7598 #endif
7599 output_mov_immediate (otherops);
7600 output_mov_immediate (operands);
7601 }
7602 else if (code1 == MEM)
7603 {
7604 switch (GET_CODE (XEXP (operands[1], 0)))
7605 {
7606 case REG:
7607 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
7608 break;
7609
7610 case PRE_INC:
7611 abort (); /* Should never happen now. */
7612 break;
7613
7614 case PRE_DEC:
7615 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
7616 break;
7617
7618 case POST_INC:
7619 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
7620 break;
7621
7622 case POST_DEC:
7623 abort (); /* Should never happen now. */
7624 break;
7625
7626 case LABEL_REF:
7627 case CONST:
7628 output_asm_insn ("adr%?\t%0, %1", operands);
7629 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
7630 break;
7631
7632 default:
7633 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
7634 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
7635 {
7636 otherops[0] = operands[0];
7637 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
7638 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
7639
7640 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
7641 {
7642 if (GET_CODE (otherops[2]) == CONST_INT)
7643 {
7644 switch ((int) INTVAL (otherops[2]))
7645 {
7646 case -8:
7647 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
7648 return "";
7649 case -4:
7650 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
7651 return "";
7652 case 4:
7653 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
7654 return "";
7655 }
7656
7657 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
7658 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
7659 else
7660 output_asm_insn ("add%?\t%0, %1, %2", otherops);
7661 }
7662 else
7663 output_asm_insn ("add%?\t%0, %1, %2", otherops);
7664 }
7665 else
7666 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
7667
7668 return "ldm%?ia\t%0, %M0";
7669 }
7670 else
7671 {
7672 otherops[1] = adjust_address (operands[1], SImode, 4);
7673 /* Take care of overlapping base/data reg. */
7674 if (reg_mentioned_p (operands[0], operands[1]))
7675 {
7676 output_asm_insn ("ldr%?\t%0, %1", otherops);
7677 output_asm_insn ("ldr%?\t%0, %1", operands);
7678 }
7679 else
7680 {
7681 output_asm_insn ("ldr%?\t%0, %1", operands);
7682 output_asm_insn ("ldr%?\t%0, %1", otherops);
7683 }
7684 }
7685 }
7686 }
7687 else
7688 abort (); /* Constraints should prevent this. */
7689 }
7690 else if (code0 == MEM && code1 == REG)
7691 {
7692 if (REGNO (operands[1]) == IP_REGNUM)
7693 abort ();
7694
7695 switch (GET_CODE (XEXP (operands[0], 0)))
7696 {
7697 case REG:
7698 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
7699 break;
7700
7701 case PRE_INC:
7702 abort (); /* Should never happen now. */
7703 break;
7704
7705 case PRE_DEC:
7706 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
7707 break;
7708
7709 case POST_INC:
7710 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
7711 break;
7712
7713 case POST_DEC:
7714 abort (); /* Should never happen now. */
7715 break;
7716
7717 case PLUS:
7718 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
7719 {
7720 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
7721 {
7722 case -8:
7723 output_asm_insn ("stm%?db\t%m0, %M1", operands);
7724 return "";
7725
7726 case -4:
7727 output_asm_insn ("stm%?da\t%m0, %M1", operands);
7728 return "";
7729
7730 case 4:
7731 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
7732 return "";
7733 }
7734 }
7735 /* Fall through */
7736
7737 default:
7738 otherops[0] = adjust_address (operands[0], SImode, 4);
7739 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
7740 output_asm_insn ("str%?\t%1, %0", operands);
7741 output_asm_insn ("str%?\t%1, %0", otherops);
7742 }
7743 }
7744 else
7745 /* Constraints should prevent this. */
7746 abort ();
7747
7748 return "";
7749 }
7750
7751
7752 /* Output an arbitrary MOV reg, #n.
7753 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
7754 const char *
7755 output_mov_immediate (rtx *operands)
7756 {
7757 HOST_WIDE_INT n = INTVAL (operands[1]);
7758
7759 /* Try to use one MOV. */
7760 if (const_ok_for_arm (n))
7761 output_asm_insn ("mov%?\t%0, %1", operands);
7762
7763 /* Try to use one MVN. */
7764 else if (const_ok_for_arm (~n))
7765 {
7766 operands[1] = GEN_INT (~n);
7767 output_asm_insn ("mvn%?\t%0, %1", operands);
7768 }
7769 else
7770 {
7771 int n_ones = 0;
7772 int i;
7773
7774 /* If all else fails, make it out of ORRs or BICs as appropriate. */
7775 for (i = 0; i < 32; i++)
7776 if (n & 1 << i)
7777 n_ones++;
7778
7779 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
7780 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
7781 else
7782 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
7783 }
7784
7785 return "";
7786 }
7787
7788 /* Output an ADD r, s, #n where n may be too big for one instruction.
7789 If adding zero to one register, output nothing. */
7790 const char *
7791 output_add_immediate (rtx *operands)
7792 {
7793 HOST_WIDE_INT n = INTVAL (operands[2]);
7794
7795 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
7796 {
7797 if (n < 0)
7798 output_multi_immediate (operands,
7799 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
7800 -n);
7801 else
7802 output_multi_immediate (operands,
7803 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
7804 n);
7805 }
7806
7807 return "";
7808 }
7809
7810 /* Output a multiple immediate operation.
7811 OPERANDS is the vector of operands referred to in the output patterns.
7812 INSTR1 is the output pattern to use for the first constant.
7813 INSTR2 is the output pattern to use for subsequent constants.
7814 IMMED_OP is the index of the constant slot in OPERANDS.
7815 N is the constant value. */
7816 static const char *
7817 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
7818 int immed_op, HOST_WIDE_INT n)
7819 {
7820 #if HOST_BITS_PER_WIDE_INT > 32
7821 n &= 0xffffffff;
7822 #endif
7823
7824 if (n == 0)
7825 {
7826 /* Quick and easy output. */
7827 operands[immed_op] = const0_rtx;
7828 output_asm_insn (instr1, operands);
7829 }
7830 else
7831 {
7832 int i;
7833 const char * instr = instr1;
7834
7835 /* Note that n is never zero here (which would give no output). */
7836 for (i = 0; i < 32; i += 2)
7837 {
7838 if (n & (3 << i))
7839 {
7840 operands[immed_op] = GEN_INT (n & (255 << i));
7841 output_asm_insn (instr, operands);
7842 instr = instr2;
7843 i += 6;
7844 }
7845 }
7846 }
7847
7848 return "";
7849 }
7850
7851 /* Return the appropriate ARM instruction for the operation code.
7852 The returned result should not be overwritten. OP is the rtx of the
7853 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
7854 was shifted. */
7855 const char *
7856 arithmetic_instr (rtx op, int shift_first_arg)
7857 {
7858 switch (GET_CODE (op))
7859 {
7860 case PLUS:
7861 return "add";
7862
7863 case MINUS:
7864 return shift_first_arg ? "rsb" : "sub";
7865
7866 case IOR:
7867 return "orr";
7868
7869 case XOR:
7870 return "eor";
7871
7872 case AND:
7873 return "and";
7874
7875 default:
7876 abort ();
7877 }
7878 }
7879
7880 /* Ensure valid constant shifts and return the appropriate shift mnemonic
7881 for the operation code. The returned result should not be overwritten.
7882 OP is the rtx code of the shift.
7883 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
7884 shift. */
7885 static const char *
7886 shift_op (rtx op, HOST_WIDE_INT *amountp)
7887 {
7888 const char * mnem;
7889 enum rtx_code code = GET_CODE (op);
7890
7891 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
7892 *amountp = -1;
7893 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
7894 *amountp = INTVAL (XEXP (op, 1));
7895 else
7896 abort ();
7897
7898 switch (code)
7899 {
7900 case ASHIFT:
7901 mnem = "asl";
7902 break;
7903
7904 case ASHIFTRT:
7905 mnem = "asr";
7906 break;
7907
7908 case LSHIFTRT:
7909 mnem = "lsr";
7910 break;
7911
7912 case ROTATERT:
7913 mnem = "ror";
7914 break;
7915
7916 case MULT:
7917 /* We never have to worry about the amount being other than a
7918 power of 2, since this case can never be reloaded from a reg. */
7919 if (*amountp != -1)
7920 *amountp = int_log2 (*amountp);
7921 else
7922 abort ();
7923 return "asl";
7924
7925 default:
7926 abort ();
7927 }
7928
7929 if (*amountp != -1)
7930 {
7931 /* This is not 100% correct, but follows from the desire to merge
7932 multiplication by a power of 2 with the recognizer for a
7933 shift. >=32 is not a valid shift for "asl", so we must try and
7934 output a shift that produces the correct arithmetical result.
7935 Using lsr #32 is identical except for the fact that the carry bit
7936 is not set correctly if we set the flags; but we never use the
7937 carry bit from such an operation, so we can ignore that. */
7938 if (code == ROTATERT)
7939 /* Rotate is just modulo 32. */
7940 *amountp &= 31;
7941 else if (*amountp != (*amountp & 31))
7942 {
7943 if (code == ASHIFT)
7944 mnem = "lsr";
7945 *amountp = 32;
7946 }
7947
7948 /* Shifts of 0 are no-ops. */
7949 if (*amountp == 0)
7950 return NULL;
7951 }
7952
7953 return mnem;
7954 }
7955
7956 /* Obtain the shift from the POWER of two. */
7957
7958 static HOST_WIDE_INT
7959 int_log2 (HOST_WIDE_INT power)
7960 {
7961 HOST_WIDE_INT shift = 0;
7962
7963 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
7964 {
7965 if (shift > 31)
7966 abort ();
7967 shift++;
7968 }
7969
7970 return shift;
7971 }
7972
7973 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
7974 /bin/as is horribly restrictive. */
7975 #define MAX_ASCII_LEN 51
7976
7977 void
7978 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
7979 {
7980 int i;
7981 int len_so_far = 0;
7982
7983 fputs ("\t.ascii\t\"", stream);
7984
7985 for (i = 0; i < len; i++)
7986 {
7987 int c = p[i];
7988
7989 if (len_so_far >= MAX_ASCII_LEN)
7990 {
7991 fputs ("\"\n\t.ascii\t\"", stream);
7992 len_so_far = 0;
7993 }
7994
7995 switch (c)
7996 {
7997 case TARGET_TAB:
7998 fputs ("\\t", stream);
7999 len_so_far += 2;
8000 break;
8001
8002 case TARGET_FF:
8003 fputs ("\\f", stream);
8004 len_so_far += 2;
8005 break;
8006
8007 case TARGET_BS:
8008 fputs ("\\b", stream);
8009 len_so_far += 2;
8010 break;
8011
8012 case TARGET_CR:
8013 fputs ("\\r", stream);
8014 len_so_far += 2;
8015 break;
8016
8017 case TARGET_NEWLINE:
8018 fputs ("\\n", stream);
8019 c = p [i + 1];
8020 if ((c >= ' ' && c <= '~')
8021 || c == TARGET_TAB)
8022 /* This is a good place for a line break. */
8023 len_so_far = MAX_ASCII_LEN;
8024 else
8025 len_so_far += 2;
8026 break;
8027
8028 case '\"':
8029 case '\\':
8030 putc ('\\', stream);
8031 len_so_far++;
8032 /* Drop through. */
8033
8034 default:
8035 if (c >= ' ' && c <= '~')
8036 {
8037 putc (c, stream);
8038 len_so_far++;
8039 }
8040 else
8041 {
8042 fprintf (stream, "\\%03o", c);
8043 len_so_far += 4;
8044 }
8045 break;
8046 }
8047 }
8048
8049 fputs ("\"\n", stream);
8050 }
8051 \f
8052 /* Compute the register sabe mask for registers 0 through 12
8053 inclusive. This code is used by both arm_compute_save_reg_mask
8054 and arm_compute_initial_elimination_offset. */
8055 static unsigned long
8056 arm_compute_save_reg0_reg12_mask (void)
8057 {
8058 unsigned long func_type = arm_current_func_type ();
8059 unsigned int save_reg_mask = 0;
8060 unsigned int reg;
8061
8062 if (IS_INTERRUPT (func_type))
8063 {
8064 unsigned int max_reg;
8065 /* Interrupt functions must not corrupt any registers,
8066 even call clobbered ones. If this is a leaf function
8067 we can just examine the registers used by the RTL, but
8068 otherwise we have to assume that whatever function is
8069 called might clobber anything, and so we have to save
8070 all the call-clobbered registers as well. */
8071 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8072 /* FIQ handlers have registers r8 - r12 banked, so
8073 we only need to check r0 - r7, Normal ISRs only
8074 bank r14 and r15, so we must check up to r12.
8075 r13 is the stack pointer which is always preserved,
8076 so we do not need to consider it here. */
8077 max_reg = 7;
8078 else
8079 max_reg = 12;
8080
8081 for (reg = 0; reg <= max_reg; reg++)
8082 if (regs_ever_live[reg]
8083 || (! current_function_is_leaf && call_used_regs [reg]))
8084 save_reg_mask |= (1 << reg);
8085 }
8086 else
8087 {
8088 /* In the normal case we only need to save those registers
8089 which are call saved and which are used by this function. */
8090 for (reg = 0; reg <= 10; reg++)
8091 if (regs_ever_live[reg] && ! call_used_regs [reg])
8092 save_reg_mask |= (1 << reg);
8093
8094 /* Handle the frame pointer as a special case. */
8095 if (! TARGET_APCS_FRAME
8096 && ! frame_pointer_needed
8097 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8098 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8099 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8100
8101 /* If we aren't loading the PIC register,
8102 don't stack it even though it may be live. */
8103 if (flag_pic
8104 && ! TARGET_SINGLE_PIC_BASE
8105 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8106 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8107 }
8108
8109 return save_reg_mask;
8110 }
8111
8112 /* Compute a bit mask of which registers need to be
8113 saved on the stack for the current function. */
8114
8115 static unsigned long
8116 arm_compute_save_reg_mask (void)
8117 {
8118 unsigned int save_reg_mask = 0;
8119 unsigned long func_type = arm_current_func_type ();
8120
8121 if (IS_NAKED (func_type))
8122 /* This should never really happen. */
8123 return 0;
8124
8125 /* If we are creating a stack frame, then we must save the frame pointer,
8126 IP (which will hold the old stack pointer), LR and the PC. */
8127 if (frame_pointer_needed)
8128 save_reg_mask |=
8129 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8130 | (1 << IP_REGNUM)
8131 | (1 << LR_REGNUM)
8132 | (1 << PC_REGNUM);
8133
8134 /* Volatile functions do not return, so there
8135 is no need to save any other registers. */
8136 if (IS_VOLATILE (func_type))
8137 return save_reg_mask;
8138
8139 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8140
8141 /* Decide if we need to save the link register.
8142 Interrupt routines have their own banked link register,
8143 so they never need to save it.
8144 Otherwise if we do not use the link register we do not need to save
8145 it. If we are pushing other registers onto the stack however, we
8146 can save an instruction in the epilogue by pushing the link register
8147 now and then popping it back into the PC. This incurs extra memory
8148 accesses though, so we only do it when optimizing for size, and only
8149 if we know that we will not need a fancy return sequence. */
8150 if (regs_ever_live [LR_REGNUM]
8151 || (save_reg_mask
8152 && optimize_size
8153 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8154 save_reg_mask |= 1 << LR_REGNUM;
8155
8156 if (cfun->machine->lr_save_eliminated)
8157 save_reg_mask &= ~ (1 << LR_REGNUM);
8158
8159 if (TARGET_REALLY_IWMMXT
8160 && ((bit_count (save_reg_mask)
8161 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8162 {
8163 unsigned int reg;
8164
8165 /* The total number of registers that are going to be pushed
8166 onto the stack is odd. We need to ensure that the stack
8167 is 64-bit aligned before we start to save iWMMXt registers,
8168 and also before we start to create locals. (A local variable
8169 might be a double or long long which we will load/store using
8170 an iWMMXt instruction). Therefore we need to push another
8171 ARM register, so that the stack will be 64-bit aligned. We
8172 try to avoid using the arg registers (r0 -r3) as they might be
8173 used to pass values in a tail call. */
8174 for (reg = 4; reg <= 12; reg++)
8175 if ((save_reg_mask & (1 << reg)) == 0)
8176 break;
8177
8178 if (reg <= 12)
8179 save_reg_mask |= (1 << reg);
8180 else
8181 {
8182 cfun->machine->sibcall_blocked = 1;
8183 save_reg_mask |= (1 << 3);
8184 }
8185 }
8186
8187 return save_reg_mask;
8188 }
8189
8190 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8191 everything bar the final return instruction. */
8192 const char *
8193 output_return_instruction (rtx operand, int really_return, int reverse)
8194 {
8195 char conditional[10];
8196 char instr[100];
8197 int reg;
8198 unsigned long live_regs_mask;
8199 unsigned long func_type;
8200
8201 func_type = arm_current_func_type ();
8202
8203 if (IS_NAKED (func_type))
8204 return "";
8205
8206 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8207 {
8208 /* If this function was declared non-returning, and we have
8209 found a tail call, then we have to trust that the called
8210 function won't return. */
8211 if (really_return)
8212 {
8213 rtx ops[2];
8214
8215 /* Otherwise, trap an attempted return by aborting. */
8216 ops[0] = operand;
8217 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8218 : "abort");
8219 assemble_external_libcall (ops[1]);
8220 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8221 }
8222
8223 return "";
8224 }
8225
8226 if (current_function_calls_alloca && !really_return)
8227 abort ();
8228
8229 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8230
8231 return_used_this_function = 1;
8232
8233 live_regs_mask = arm_compute_save_reg_mask ();
8234
8235 if (live_regs_mask)
8236 {
8237 const char * return_reg;
8238
8239 /* If we do not have any special requirements for function exit
8240 (eg interworking, or ISR) then we can load the return address
8241 directly into the PC. Otherwise we must load it into LR. */
8242 if (really_return
8243 && ! TARGET_INTERWORK)
8244 return_reg = reg_names[PC_REGNUM];
8245 else
8246 return_reg = reg_names[LR_REGNUM];
8247
8248 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8249 /* There are two possible reasons for the IP register being saved.
8250 Either a stack frame was created, in which case IP contains the
8251 old stack pointer, or an ISR routine corrupted it. If this in an
8252 ISR routine then just restore IP, otherwise restore IP into SP. */
8253 if (! IS_INTERRUPT (func_type))
8254 {
8255 live_regs_mask &= ~ (1 << IP_REGNUM);
8256 live_regs_mask |= (1 << SP_REGNUM);
8257 }
8258
8259 /* On some ARM architectures it is faster to use LDR rather than
8260 LDM to load a single register. On other architectures, the
8261 cost is the same. In 26 bit mode, or for exception handlers,
8262 we have to use LDM to load the PC so that the CPSR is also
8263 restored. */
8264 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8265 {
8266 if (live_regs_mask == (unsigned int)(1 << reg))
8267 break;
8268 }
8269 if (reg <= LAST_ARM_REGNUM
8270 && (reg != LR_REGNUM
8271 || ! really_return
8272 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
8273 {
8274 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8275 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8276 }
8277 else
8278 {
8279 char *p;
8280 int first = 1;
8281
8282 /* Generate the load multiple instruction to restore the
8283 registers. Note we can get here, even if
8284 frame_pointer_needed is true, but only if sp already
8285 points to the base of the saved core registers. */
8286 if (live_regs_mask & (1 << SP_REGNUM))
8287 {
8288 unsigned HOST_WIDE_INT stack_adjust =
8289 arm_get_frame_size () + current_function_outgoing_args_size;
8290
8291 if (stack_adjust != 0 && stack_adjust != 4)
8292 abort ();
8293
8294 if (stack_adjust && arm_arch5)
8295 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8296 else
8297 {
8298 /* If we can't use ldmib (SA110 bug), then try to pop r3
8299 instead. */
8300 if (stack_adjust)
8301 live_regs_mask |= 1 << 3;
8302 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8303 }
8304 }
8305 else
8306 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8307
8308 p = instr + strlen (instr);
8309
8310 for (reg = 0; reg <= SP_REGNUM; reg++)
8311 if (live_regs_mask & (1 << reg))
8312 {
8313 int l = strlen (reg_names[reg]);
8314
8315 if (first)
8316 first = 0;
8317 else
8318 {
8319 memcpy (p, ", ", 2);
8320 p += 2;
8321 }
8322
8323 memcpy (p, "%|", 2);
8324 memcpy (p + 2, reg_names[reg], l);
8325 p += l + 2;
8326 }
8327
8328 if (live_regs_mask & (1 << LR_REGNUM))
8329 {
8330 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8331 /* Decide if we need to add the ^ symbol to the end of the
8332 register list. This causes the saved condition codes
8333 register to be copied into the current condition codes
8334 register. We do the copy if we are conforming to the 32-bit
8335 ABI and this is an interrupt function, or if we are
8336 conforming to the 26-bit ABI. There is a special case for
8337 the 26-bit ABI however, which is if we are writing back the
8338 stack pointer but not loading the PC. In this case adding
8339 the ^ symbol would create a type 2 LDM instruction, where
8340 writeback is UNPREDICTABLE. We are safe in leaving the ^
8341 character off in this case however, since the actual return
8342 instruction will be a MOVS which will restore the CPSR. */
8343 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
8344 || (! TARGET_APCS_32 && really_return))
8345 strcat (p, "^");
8346 }
8347 else
8348 strcpy (p, "}");
8349 }
8350
8351 output_asm_insn (instr, & operand);
8352
8353 /* See if we need to generate an extra instruction to
8354 perform the actual function return. */
8355 if (really_return
8356 && func_type != ARM_FT_INTERWORKED
8357 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8358 {
8359 /* The return has already been handled
8360 by loading the LR into the PC. */
8361 really_return = 0;
8362 }
8363 }
8364
8365 if (really_return)
8366 {
8367 switch ((int) ARM_FUNC_TYPE (func_type))
8368 {
8369 case ARM_FT_ISR:
8370 case ARM_FT_FIQ:
8371 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
8372 break;
8373
8374 case ARM_FT_INTERWORKED:
8375 sprintf (instr, "bx%s\t%%|lr", conditional);
8376 break;
8377
8378 case ARM_FT_EXCEPTION:
8379 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
8380 break;
8381
8382 default:
8383 /* ARMv5 implementations always provide BX, so interworking
8384 is the default unless APCS-26 is in use. */
8385 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
8386 sprintf (instr, "bx%s\t%%|lr", conditional);
8387 else
8388 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
8389 conditional, TARGET_APCS_32 ? "" : "s");
8390 break;
8391 }
8392
8393 output_asm_insn (instr, & operand);
8394 }
8395
8396 return "";
8397 }
8398
8399 /* Write the function name into the code section, directly preceding
8400 the function prologue.
8401
8402 Code will be output similar to this:
8403 t0
8404 .ascii "arm_poke_function_name", 0
8405 .align
8406 t1
8407 .word 0xff000000 + (t1 - t0)
8408 arm_poke_function_name
8409 mov ip, sp
8410 stmfd sp!, {fp, ip, lr, pc}
8411 sub fp, ip, #4
8412
8413 When performing a stack backtrace, code can inspect the value
8414 of 'pc' stored at 'fp' + 0. If the trace function then looks
8415 at location pc - 12 and the top 8 bits are set, then we know
8416 that there is a function name embedded immediately preceding this
8417 location and has length ((pc[-3]) & 0xff000000).
8418
8419 We assume that pc is declared as a pointer to an unsigned long.
8420
8421 It is of no benefit to output the function name if we are assembling
8422 a leaf function. These function types will not contain a stack
8423 backtrace structure, therefore it is not possible to determine the
8424 function name. */
8425 void
8426 arm_poke_function_name (FILE *stream, const char *name)
8427 {
8428 unsigned long alignlength;
8429 unsigned long length;
8430 rtx x;
8431
8432 length = strlen (name) + 1;
8433 alignlength = ROUND_UP_WORD (length);
8434
8435 ASM_OUTPUT_ASCII (stream, name, length);
8436 ASM_OUTPUT_ALIGN (stream, 2);
8437 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
8438 assemble_aligned_integer (UNITS_PER_WORD, x);
8439 }
8440
8441 /* Place some comments into the assembler stream
8442 describing the current function. */
8443 static void
8444 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
8445 {
8446 unsigned long func_type;
8447
8448 if (!TARGET_ARM)
8449 {
8450 thumb_output_function_prologue (f, frame_size);
8451 return;
8452 }
8453
8454 /* Sanity check. */
8455 if (arm_ccfsm_state || arm_target_insn)
8456 abort ();
8457
8458 func_type = arm_current_func_type ();
8459
8460 switch ((int) ARM_FUNC_TYPE (func_type))
8461 {
8462 default:
8463 case ARM_FT_NORMAL:
8464 break;
8465 case ARM_FT_INTERWORKED:
8466 asm_fprintf (f, "\t%@ Function supports interworking.\n");
8467 break;
8468 case ARM_FT_EXCEPTION_HANDLER:
8469 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
8470 break;
8471 case ARM_FT_ISR:
8472 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
8473 break;
8474 case ARM_FT_FIQ:
8475 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
8476 break;
8477 case ARM_FT_EXCEPTION:
8478 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
8479 break;
8480 }
8481
8482 if (IS_NAKED (func_type))
8483 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
8484
8485 if (IS_VOLATILE (func_type))
8486 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
8487
8488 if (IS_NESTED (func_type))
8489 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
8490
8491 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
8492 current_function_args_size,
8493 current_function_pretend_args_size, frame_size);
8494
8495 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
8496 frame_pointer_needed,
8497 cfun->machine->uses_anonymous_args);
8498
8499 if (cfun->machine->lr_save_eliminated)
8500 asm_fprintf (f, "\t%@ link register save eliminated.\n");
8501
8502 #ifdef AOF_ASSEMBLER
8503 if (flag_pic)
8504 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
8505 #endif
8506
8507 return_used_this_function = 0;
8508 }
8509
8510 const char *
8511 arm_output_epilogue (rtx sibling)
8512 {
8513 int reg;
8514 unsigned long saved_regs_mask;
8515 unsigned long func_type;
8516 /* Floats_offset is the offset from the "virtual" frame. In an APCS
8517 frame that is $fp + 4 for a non-variadic function. */
8518 int floats_offset = 0;
8519 rtx operands[3];
8520 int frame_size = arm_get_frame_size ();
8521 FILE * f = asm_out_file;
8522 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
8523 unsigned int lrm_count = 0;
8524 int really_return = (sibling == NULL);
8525
8526 /* If we have already generated the return instruction
8527 then it is futile to generate anything else. */
8528 if (use_return_insn (FALSE, sibling) && return_used_this_function)
8529 return "";
8530
8531 func_type = arm_current_func_type ();
8532
8533 if (IS_NAKED (func_type))
8534 /* Naked functions don't have epilogues. */
8535 return "";
8536
8537 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8538 {
8539 rtx op;
8540
8541 /* A volatile function should never return. Call abort. */
8542 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
8543 assemble_external_libcall (op);
8544 output_asm_insn ("bl\t%a0", &op);
8545
8546 return "";
8547 }
8548
8549 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
8550 && ! really_return)
8551 /* If we are throwing an exception, then we really must
8552 be doing a return, so we can't tail-call. */
8553 abort ();
8554
8555 saved_regs_mask = arm_compute_save_reg_mask ();
8556
8557 if (TARGET_IWMMXT)
8558 lrm_count = bit_count (saved_regs_mask);
8559
8560 /* XXX We should adjust floats_offset for any anonymous args, and then
8561 re-adjust vfp_offset below to compensate. */
8562
8563 /* Compute how far away the floats will be. */
8564 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8565 if (saved_regs_mask & (1 << reg))
8566 floats_offset += 4;
8567
8568 if (frame_pointer_needed)
8569 {
8570 int vfp_offset = 4;
8571
8572 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
8573 {
8574 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
8575 if (regs_ever_live[reg] && !call_used_regs[reg])
8576 {
8577 floats_offset += 12;
8578 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
8579 reg, FP_REGNUM, floats_offset - vfp_offset);
8580 }
8581 }
8582 else
8583 {
8584 int start_reg = LAST_ARM_FP_REGNUM;
8585
8586 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
8587 {
8588 if (regs_ever_live[reg] && !call_used_regs[reg])
8589 {
8590 floats_offset += 12;
8591
8592 /* We can't unstack more than four registers at once. */
8593 if (start_reg - reg == 3)
8594 {
8595 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
8596 reg, FP_REGNUM, floats_offset - vfp_offset);
8597 start_reg = reg - 1;
8598 }
8599 }
8600 else
8601 {
8602 if (reg != start_reg)
8603 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
8604 reg + 1, start_reg - reg,
8605 FP_REGNUM, floats_offset - vfp_offset);
8606 start_reg = reg - 1;
8607 }
8608 }
8609
8610 /* Just in case the last register checked also needs unstacking. */
8611 if (reg != start_reg)
8612 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
8613 reg + 1, start_reg - reg,
8614 FP_REGNUM, floats_offset - vfp_offset);
8615 }
8616
8617 if (TARGET_IWMMXT)
8618 {
8619 /* The frame pointer is guaranteed to be non-double-word aligned.
8620 This is because it is set to (old_stack_pointer - 4) and the
8621 old_stack_pointer was double word aligned. Thus the offset to
8622 the iWMMXt registers to be loaded must also be non-double-word
8623 sized, so that the resultant address *is* double-word aligned.
8624 We can ignore floats_offset since that was already included in
8625 the live_regs_mask. */
8626 lrm_count += (lrm_count % 2 ? 2 : 1);
8627
8628 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
8629 if (regs_ever_live[reg] && !call_used_regs[reg])
8630 {
8631 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
8632 reg, FP_REGNUM, lrm_count * 4);
8633 lrm_count += 2;
8634 }
8635 }
8636
8637 /* saved_regs_mask should contain the IP, which at the time of stack
8638 frame generation actually contains the old stack pointer. So a
8639 quick way to unwind the stack is just pop the IP register directly
8640 into the stack pointer. */
8641 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
8642 abort ();
8643 saved_regs_mask &= ~ (1 << IP_REGNUM);
8644 saved_regs_mask |= (1 << SP_REGNUM);
8645
8646 /* There are two registers left in saved_regs_mask - LR and PC. We
8647 only need to restore the LR register (the return address), but to
8648 save time we can load it directly into the PC, unless we need a
8649 special function exit sequence, or we are not really returning. */
8650 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
8651 /* Delete the LR from the register mask, so that the LR on
8652 the stack is loaded into the PC in the register mask. */
8653 saved_regs_mask &= ~ (1 << LR_REGNUM);
8654 else
8655 saved_regs_mask &= ~ (1 << PC_REGNUM);
8656
8657 /* We must use SP as the base register, because SP is one of the
8658 registers being restored. If an interrupt or page fault
8659 happens in the ldm instruction, the SP might or might not
8660 have been restored. That would be bad, as then SP will no
8661 longer indicate the safe area of stack, and we can get stack
8662 corruption. Using SP as the base register means that it will
8663 be reset correctly to the original value, should an interrupt
8664 occur. If the stack pointer already points at the right
8665 place, then omit the subtraction. */
8666 if (((frame_size + current_function_outgoing_args_size + floats_offset)
8667 != 4 * (1 + (int) bit_count (saved_regs_mask)))
8668 || current_function_calls_alloca)
8669 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
8670 4 * bit_count (saved_regs_mask));
8671 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
8672
8673 if (IS_INTERRUPT (func_type))
8674 /* Interrupt handlers will have pushed the
8675 IP onto the stack, so restore it now. */
8676 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
8677 }
8678 else
8679 {
8680 /* Restore stack pointer if necessary. */
8681 if (frame_size + current_function_outgoing_args_size != 0)
8682 {
8683 operands[0] = operands[1] = stack_pointer_rtx;
8684 operands[2] = GEN_INT (frame_size
8685 + current_function_outgoing_args_size);
8686 output_add_immediate (operands);
8687 }
8688
8689 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
8690 {
8691 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
8692 if (regs_ever_live[reg] && !call_used_regs[reg])
8693 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
8694 reg, SP_REGNUM);
8695 }
8696 else
8697 {
8698 int start_reg = FIRST_ARM_FP_REGNUM;
8699
8700 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
8701 {
8702 if (regs_ever_live[reg] && !call_used_regs[reg])
8703 {
8704 if (reg - start_reg == 3)
8705 {
8706 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
8707 start_reg, SP_REGNUM);
8708 start_reg = reg + 1;
8709 }
8710 }
8711 else
8712 {
8713 if (reg != start_reg)
8714 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
8715 start_reg, reg - start_reg,
8716 SP_REGNUM);
8717
8718 start_reg = reg + 1;
8719 }
8720 }
8721
8722 /* Just in case the last register checked also needs unstacking. */
8723 if (reg != start_reg)
8724 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
8725 start_reg, reg - start_reg, SP_REGNUM);
8726 }
8727
8728 if (TARGET_IWMMXT)
8729 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
8730 if (regs_ever_live[reg] && !call_used_regs[reg])
8731 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
8732
8733 /* If we can, restore the LR into the PC. */
8734 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8735 && really_return
8736 && current_function_pretend_args_size == 0
8737 && saved_regs_mask & (1 << LR_REGNUM))
8738 {
8739 saved_regs_mask &= ~ (1 << LR_REGNUM);
8740 saved_regs_mask |= (1 << PC_REGNUM);
8741 }
8742
8743 /* Load the registers off the stack. If we only have one register
8744 to load use the LDR instruction - it is faster. */
8745 if (saved_regs_mask == (1 << LR_REGNUM))
8746 {
8747 /* The exception handler ignores the LR, so we do
8748 not really need to load it off the stack. */
8749 if (eh_ofs)
8750 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
8751 else
8752 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
8753 }
8754 else if (saved_regs_mask)
8755 {
8756 if (saved_regs_mask & (1 << SP_REGNUM))
8757 /* Note - write back to the stack register is not enabled
8758 (ie "ldmfd sp!..."). We know that the stack pointer is
8759 in the list of registers and if we add writeback the
8760 instruction becomes UNPREDICTABLE. */
8761 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
8762 else
8763 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
8764 }
8765
8766 if (current_function_pretend_args_size)
8767 {
8768 /* Unwind the pre-pushed regs. */
8769 operands[0] = operands[1] = stack_pointer_rtx;
8770 operands[2] = GEN_INT (current_function_pretend_args_size);
8771 output_add_immediate (operands);
8772 }
8773 }
8774
8775 if (! really_return
8776 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8777 && current_function_pretend_args_size == 0
8778 && saved_regs_mask & (1 << PC_REGNUM)))
8779 return "";
8780
8781 /* Generate the return instruction. */
8782 switch ((int) ARM_FUNC_TYPE (func_type))
8783 {
8784 case ARM_FT_EXCEPTION_HANDLER:
8785 /* Even in 26-bit mode we do a mov (rather than a movs)
8786 because we don't have the PSR bits set in the address. */
8787 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
8788 break;
8789
8790 case ARM_FT_ISR:
8791 case ARM_FT_FIQ:
8792 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
8793 break;
8794
8795 case ARM_FT_EXCEPTION:
8796 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8797 break;
8798
8799 case ARM_FT_INTERWORKED:
8800 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
8801 break;
8802
8803 default:
8804 if (frame_pointer_needed)
8805 /* If we used the frame pointer then the return address
8806 will have been loaded off the stack directly into the
8807 PC, so there is no need to issue a MOV instruction
8808 here. */
8809 ;
8810 else if (current_function_pretend_args_size == 0
8811 && (saved_regs_mask & (1 << LR_REGNUM)))
8812 /* Similarly we may have been able to load LR into the PC
8813 even if we did not create a stack frame. */
8814 ;
8815 else if (TARGET_APCS_32)
8816 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8817 else
8818 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8819 break;
8820 }
8821
8822 return "";
8823 }
8824
8825 static void
8826 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8827 HOST_WIDE_INT frame_size)
8828 {
8829 if (TARGET_THUMB)
8830 {
8831 /* ??? Probably not safe to set this here, since it assumes that a
8832 function will be emitted as assembly immediately after we generate
8833 RTL for it. This does not happen for inline functions. */
8834 return_used_this_function = 0;
8835 }
8836 else
8837 {
8838 /* We need to take into account any stack-frame rounding. */
8839 frame_size = arm_get_frame_size ();
8840
8841 if (use_return_insn (FALSE, NULL)
8842 && return_used_this_function
8843 && (frame_size + current_function_outgoing_args_size) != 0
8844 && !frame_pointer_needed)
8845 abort ();
8846
8847 /* Reset the ARM-specific per-function variables. */
8848 after_arm_reorg = 0;
8849 }
8850 }
8851
8852 /* Generate and emit an insn that we will recognize as a push_multi.
8853 Unfortunately, since this insn does not reflect very well the actual
8854 semantics of the operation, we need to annotate the insn for the benefit
8855 of DWARF2 frame unwind information. */
8856 static rtx
8857 emit_multi_reg_push (int mask)
8858 {
8859 int num_regs = 0;
8860 int num_dwarf_regs;
8861 int i, j;
8862 rtx par;
8863 rtx dwarf;
8864 int dwarf_par_index;
8865 rtx tmp, reg;
8866
8867 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8868 if (mask & (1 << i))
8869 num_regs++;
8870
8871 if (num_regs == 0 || num_regs > 16)
8872 abort ();
8873
8874 /* We don't record the PC in the dwarf frame information. */
8875 num_dwarf_regs = num_regs;
8876 if (mask & (1 << PC_REGNUM))
8877 num_dwarf_regs--;
8878
8879 /* For the body of the insn we are going to generate an UNSPEC in
8880 parallel with several USEs. This allows the insn to be recognized
8881 by the push_multi pattern in the arm.md file. The insn looks
8882 something like this:
8883
8884 (parallel [
8885 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
8886 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
8887 (use (reg:SI 11 fp))
8888 (use (reg:SI 12 ip))
8889 (use (reg:SI 14 lr))
8890 (use (reg:SI 15 pc))
8891 ])
8892
8893 For the frame note however, we try to be more explicit and actually
8894 show each register being stored into the stack frame, plus a (single)
8895 decrement of the stack pointer. We do it this way in order to be
8896 friendly to the stack unwinding code, which only wants to see a single
8897 stack decrement per instruction. The RTL we generate for the note looks
8898 something like this:
8899
8900 (sequence [
8901 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
8902 (set (mem:SI (reg:SI sp)) (reg:SI r4))
8903 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
8904 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
8905 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
8906 ])
8907
8908 This sequence is used both by the code to support stack unwinding for
8909 exceptions handlers and the code to generate dwarf2 frame debugging. */
8910
8911 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
8912 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
8913 dwarf_par_index = 1;
8914
8915 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8916 {
8917 if (mask & (1 << i))
8918 {
8919 reg = gen_rtx_REG (SImode, i);
8920
8921 XVECEXP (par, 0, 0)
8922 = gen_rtx_SET (VOIDmode,
8923 gen_rtx_MEM (BLKmode,
8924 gen_rtx_PRE_DEC (BLKmode,
8925 stack_pointer_rtx)),
8926 gen_rtx_UNSPEC (BLKmode,
8927 gen_rtvec (1, reg),
8928 UNSPEC_PUSH_MULT));
8929
8930 if (i != PC_REGNUM)
8931 {
8932 tmp = gen_rtx_SET (VOIDmode,
8933 gen_rtx_MEM (SImode, stack_pointer_rtx),
8934 reg);
8935 RTX_FRAME_RELATED_P (tmp) = 1;
8936 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
8937 dwarf_par_index++;
8938 }
8939
8940 break;
8941 }
8942 }
8943
8944 for (j = 1, i++; j < num_regs; i++)
8945 {
8946 if (mask & (1 << i))
8947 {
8948 reg = gen_rtx_REG (SImode, i);
8949
8950 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
8951
8952 if (i != PC_REGNUM)
8953 {
8954 tmp = gen_rtx_SET (VOIDmode,
8955 gen_rtx_MEM (SImode,
8956 plus_constant (stack_pointer_rtx,
8957 4 * j)),
8958 reg);
8959 RTX_FRAME_RELATED_P (tmp) = 1;
8960 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
8961 }
8962
8963 j++;
8964 }
8965 }
8966
8967 par = emit_insn (par);
8968
8969 tmp = gen_rtx_SET (SImode,
8970 stack_pointer_rtx,
8971 gen_rtx_PLUS (SImode,
8972 stack_pointer_rtx,
8973 GEN_INT (-4 * num_regs)));
8974 RTX_FRAME_RELATED_P (tmp) = 1;
8975 XVECEXP (dwarf, 0, 0) = tmp;
8976
8977 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8978 REG_NOTES (par));
8979 return par;
8980 }
8981
8982 static rtx
8983 emit_sfm (int base_reg, int count)
8984 {
8985 rtx par;
8986 rtx dwarf;
8987 rtx tmp, reg;
8988 int i;
8989
8990 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8991 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8992
8993 reg = gen_rtx_REG (XFmode, base_reg++);
8994
8995 XVECEXP (par, 0, 0)
8996 = gen_rtx_SET (VOIDmode,
8997 gen_rtx_MEM (BLKmode,
8998 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8999 gen_rtx_UNSPEC (BLKmode,
9000 gen_rtvec (1, reg),
9001 UNSPEC_PUSH_MULT));
9002 tmp
9003 = gen_rtx_SET (VOIDmode,
9004 gen_rtx_MEM (XFmode,
9005 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9006 reg);
9007 RTX_FRAME_RELATED_P (tmp) = 1;
9008 XVECEXP (dwarf, 0, count - 1) = tmp;
9009
9010 for (i = 1; i < count; i++)
9011 {
9012 reg = gen_rtx_REG (XFmode, base_reg++);
9013 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9014
9015 tmp = gen_rtx_SET (VOIDmode,
9016 gen_rtx_MEM (XFmode,
9017 gen_rtx_PRE_DEC (BLKmode,
9018 stack_pointer_rtx)),
9019 reg);
9020 RTX_FRAME_RELATED_P (tmp) = 1;
9021 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9022 }
9023
9024 par = emit_insn (par);
9025 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9026 REG_NOTES (par));
9027 return par;
9028 }
9029
9030 /* Compute the distance from register FROM to register TO.
9031 These can be the arg pointer (26), the soft frame pointer (25),
9032 the stack pointer (13) or the hard frame pointer (11).
9033 Typical stack layout looks like this:
9034
9035 old stack pointer -> | |
9036 ----
9037 | | \
9038 | | saved arguments for
9039 | | vararg functions
9040 | | /
9041 --
9042 hard FP & arg pointer -> | | \
9043 | | stack
9044 | | frame
9045 | | /
9046 --
9047 | | \
9048 | | call saved
9049 | | registers
9050 soft frame pointer -> | | /
9051 --
9052 | | \
9053 | | local
9054 | | variables
9055 | | /
9056 --
9057 | | \
9058 | | outgoing
9059 | | arguments
9060 current stack pointer -> | | /
9061 --
9062
9063 For a given function some or all of these stack components
9064 may not be needed, giving rise to the possibility of
9065 eliminating some of the registers.
9066
9067 The values returned by this function must reflect the behavior
9068 of arm_expand_prologue() and arm_compute_save_reg_mask().
9069
9070 The sign of the number returned reflects the direction of stack
9071 growth, so the values are positive for all eliminations except
9072 from the soft frame pointer to the hard frame pointer. */
9073 unsigned int
9074 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9075 {
9076 unsigned int local_vars = arm_get_frame_size ();
9077 unsigned int outgoing_args = current_function_outgoing_args_size;
9078 unsigned int stack_frame;
9079 unsigned int call_saved_registers;
9080 unsigned long func_type;
9081
9082 func_type = arm_current_func_type ();
9083
9084 /* Volatile functions never return, so there is
9085 no need to save call saved registers. */
9086 call_saved_registers = 0;
9087 if (! IS_VOLATILE (func_type))
9088 {
9089 unsigned int reg_mask;
9090 unsigned int reg;
9091
9092 /* Make sure that we compute which registers will be saved
9093 on the stack using the same algorithm that is used by
9094 the prologue creation code. */
9095 reg_mask = arm_compute_save_reg_mask ();
9096
9097 /* Now count the number of bits set in save_reg_mask.
9098 If we have already counted the registers in the stack
9099 frame, do not count them again. Non call-saved registers
9100 might be saved in the call-save area of the stack, if
9101 doing so will preserve the stack's alignment. Hence we
9102 must count them here. For each set bit we need 4 bytes
9103 of stack space. */
9104 if (frame_pointer_needed)
9105 reg_mask &= 0x07ff;
9106 call_saved_registers += 4 * bit_count (reg_mask);
9107
9108 /* If the hard floating point registers are going to be
9109 used then they must be saved on the stack as well.
9110 Each register occupies 12 bytes of stack space. */
9111 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
9112 if (regs_ever_live[reg] && ! call_used_regs[reg])
9113 call_saved_registers += 12;
9114
9115 if (TARGET_REALLY_IWMMXT)
9116 /* Check for the call-saved iWMMXt registers. */
9117 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9118 if (regs_ever_live[reg] && ! call_used_regs [reg])
9119 call_saved_registers += 8;
9120 }
9121
9122 /* The stack frame contains 4 registers - the old frame pointer,
9123 the old stack pointer, the return address and PC of the start
9124 of the function. */
9125 stack_frame = frame_pointer_needed ? 16 : 0;
9126
9127 /* OK, now we have enough information to compute the distances.
9128 There must be an entry in these switch tables for each pair
9129 of registers in ELIMINABLE_REGS, even if some of the entries
9130 seem to be redundant or useless. */
9131 switch (from)
9132 {
9133 case ARG_POINTER_REGNUM:
9134 switch (to)
9135 {
9136 case THUMB_HARD_FRAME_POINTER_REGNUM:
9137 return 0;
9138
9139 case FRAME_POINTER_REGNUM:
9140 /* This is the reverse of the soft frame pointer
9141 to hard frame pointer elimination below. */
9142 if (call_saved_registers == 0 && stack_frame == 0)
9143 return 0;
9144 return (call_saved_registers + stack_frame - 4);
9145
9146 case ARM_HARD_FRAME_POINTER_REGNUM:
9147 /* If there is no stack frame then the hard
9148 frame pointer and the arg pointer coincide. */
9149 if (stack_frame == 0 && call_saved_registers != 0)
9150 return 0;
9151 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9152 return (frame_pointer_needed
9153 && current_function_needs_context
9154 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9155
9156 case STACK_POINTER_REGNUM:
9157 /* If nothing has been pushed on the stack at all
9158 then this will return -4. This *is* correct! */
9159 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
9160
9161 default:
9162 abort ();
9163 }
9164 break;
9165
9166 case FRAME_POINTER_REGNUM:
9167 switch (to)
9168 {
9169 case THUMB_HARD_FRAME_POINTER_REGNUM:
9170 return 0;
9171
9172 case ARM_HARD_FRAME_POINTER_REGNUM:
9173 /* The hard frame pointer points to the top entry in the
9174 stack frame. The soft frame pointer to the bottom entry
9175 in the stack frame. If there is no stack frame at all,
9176 then they are identical. */
9177 if (call_saved_registers == 0 && stack_frame == 0)
9178 return 0;
9179 return - (call_saved_registers + stack_frame - 4);
9180
9181 case STACK_POINTER_REGNUM:
9182 return local_vars + outgoing_args;
9183
9184 default:
9185 abort ();
9186 }
9187 break;
9188
9189 default:
9190 /* You cannot eliminate from the stack pointer.
9191 In theory you could eliminate from the hard frame
9192 pointer to the stack pointer, but this will never
9193 happen, since if a stack frame is not needed the
9194 hard frame pointer will never be used. */
9195 abort ();
9196 }
9197 }
9198
9199 /* Calculate the size of the stack frame, taking into account any
9200 padding that is required to ensure stack-alignment. */
9201 HOST_WIDE_INT
9202 arm_get_frame_size (void)
9203 {
9204 int regno;
9205
9206 int base_size = ROUND_UP_WORD (get_frame_size ());
9207 int entry_size = 0;
9208 unsigned long func_type = arm_current_func_type ();
9209 int leaf;
9210
9211 if (! TARGET_ARM)
9212 abort();
9213
9214 if (! TARGET_ATPCS)
9215 return base_size;
9216
9217 /* We need to know if we are a leaf function. Unfortunately, it
9218 is possible to be called after start_sequence has been called,
9219 which causes get_insns to return the insns for the sequence,
9220 not the function, which will cause leaf_function_p to return
9221 the incorrect result.
9222
9223 To work around this, we cache the computed frame size. This
9224 works because we will only be calling RTL expanders that need
9225 to know about leaf functions once reload has completed, and the
9226 frame size cannot be changed after that time, so we can safely
9227 use the cached value. */
9228
9229 if (reload_completed)
9230 return cfun->machine->frame_size;
9231
9232 leaf = leaf_function_p ();
9233
9234 /* A leaf function does not need any stack alignment if it has nothing
9235 on the stack. */
9236 if (leaf && base_size == 0)
9237 {
9238 cfun->machine->frame_size = 0;
9239 return 0;
9240 }
9241
9242 /* We know that SP will be word aligned on entry, and we must
9243 preserve that condition at any subroutine call. But those are
9244 the only constraints. */
9245
9246 /* Space for variadic functions. */
9247 if (current_function_pretend_args_size)
9248 entry_size += current_function_pretend_args_size;
9249
9250 /* Space for saved registers. */
9251 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
9252
9253 /* Space for saved FPA registers. */
9254 if (! IS_VOLATILE (func_type))
9255 {
9256 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
9257 if (regs_ever_live[regno] && ! call_used_regs[regno])
9258 entry_size += 12;
9259 }
9260
9261 if (TARGET_REALLY_IWMMXT)
9262 {
9263 /* Check for the call-saved iWMMXt registers. */
9264 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
9265 if (regs_ever_live [regno] && ! call_used_regs [regno])
9266 entry_size += 8;
9267 }
9268
9269 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
9270 base_size += 4;
9271 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
9272 abort ();
9273
9274 cfun->machine->frame_size = base_size;
9275
9276 return base_size;
9277 }
9278
9279 /* Generate the prologue instructions for entry into an ARM function. */
9280 void
9281 arm_expand_prologue (void)
9282 {
9283 int reg;
9284 rtx amount;
9285 rtx insn;
9286 rtx ip_rtx;
9287 unsigned long live_regs_mask;
9288 unsigned long func_type;
9289 int fp_offset = 0;
9290 int saved_pretend_args = 0;
9291 unsigned int args_to_push;
9292
9293 func_type = arm_current_func_type ();
9294
9295 /* Naked functions don't have prologues. */
9296 if (IS_NAKED (func_type))
9297 return;
9298
9299 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
9300 args_to_push = current_function_pretend_args_size;
9301
9302 /* Compute which register we will have to save onto the stack. */
9303 live_regs_mask = arm_compute_save_reg_mask ();
9304
9305 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
9306
9307 if (frame_pointer_needed)
9308 {
9309 if (IS_INTERRUPT (func_type))
9310 {
9311 /* Interrupt functions must not corrupt any registers.
9312 Creating a frame pointer however, corrupts the IP
9313 register, so we must push it first. */
9314 insn = emit_multi_reg_push (1 << IP_REGNUM);
9315
9316 /* Do not set RTX_FRAME_RELATED_P on this insn.
9317 The dwarf stack unwinding code only wants to see one
9318 stack decrement per function, and this is not it. If
9319 this instruction is labeled as being part of the frame
9320 creation sequence then dwarf2out_frame_debug_expr will
9321 abort when it encounters the assignment of IP to FP
9322 later on, since the use of SP here establishes SP as
9323 the CFA register and not IP.
9324
9325 Anyway this instruction is not really part of the stack
9326 frame creation although it is part of the prologue. */
9327 }
9328 else if (IS_NESTED (func_type))
9329 {
9330 /* The Static chain register is the same as the IP register
9331 used as a scratch register during stack frame creation.
9332 To get around this need to find somewhere to store IP
9333 whilst the frame is being created. We try the following
9334 places in order:
9335
9336 1. The last argument register.
9337 2. A slot on the stack above the frame. (This only
9338 works if the function is not a varargs function).
9339 3. Register r3, after pushing the argument registers
9340 onto the stack.
9341
9342 Note - we only need to tell the dwarf2 backend about the SP
9343 adjustment in the second variant; the static chain register
9344 doesn't need to be unwound, as it doesn't contain a value
9345 inherited from the caller. */
9346
9347 if (regs_ever_live[3] == 0)
9348 {
9349 insn = gen_rtx_REG (SImode, 3);
9350 insn = gen_rtx_SET (SImode, insn, ip_rtx);
9351 insn = emit_insn (insn);
9352 }
9353 else if (args_to_push == 0)
9354 {
9355 rtx dwarf;
9356 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
9357 insn = gen_rtx_MEM (SImode, insn);
9358 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
9359 insn = emit_insn (insn);
9360
9361 fp_offset = 4;
9362
9363 /* Just tell the dwarf backend that we adjusted SP. */
9364 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9365 gen_rtx_PLUS (SImode, stack_pointer_rtx,
9366 GEN_INT (-fp_offset)));
9367 RTX_FRAME_RELATED_P (insn) = 1;
9368 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
9369 dwarf, REG_NOTES (insn));
9370 }
9371 else
9372 {
9373 /* Store the args on the stack. */
9374 if (cfun->machine->uses_anonymous_args)
9375 insn = emit_multi_reg_push
9376 ((0xf0 >> (args_to_push / 4)) & 0xf);
9377 else
9378 insn = emit_insn
9379 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9380 GEN_INT (- args_to_push)));
9381
9382 RTX_FRAME_RELATED_P (insn) = 1;
9383
9384 saved_pretend_args = 1;
9385 fp_offset = args_to_push;
9386 args_to_push = 0;
9387
9388 /* Now reuse r3 to preserve IP. */
9389 insn = gen_rtx_REG (SImode, 3);
9390 insn = gen_rtx_SET (SImode, insn, ip_rtx);
9391 (void) emit_insn (insn);
9392 }
9393 }
9394
9395 if (fp_offset)
9396 {
9397 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
9398 insn = gen_rtx_SET (SImode, ip_rtx, insn);
9399 }
9400 else
9401 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
9402
9403 insn = emit_insn (insn);
9404 RTX_FRAME_RELATED_P (insn) = 1;
9405 }
9406
9407 if (args_to_push)
9408 {
9409 /* Push the argument registers, or reserve space for them. */
9410 if (cfun->machine->uses_anonymous_args)
9411 insn = emit_multi_reg_push
9412 ((0xf0 >> (args_to_push / 4)) & 0xf);
9413 else
9414 insn = emit_insn
9415 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9416 GEN_INT (- args_to_push)));
9417 RTX_FRAME_RELATED_P (insn) = 1;
9418 }
9419
9420 /* If this is an interrupt service routine, and the link register
9421 is going to be pushed, and we are not creating a stack frame,
9422 (which would involve an extra push of IP and a pop in the epilogue)
9423 subtracting four from LR now will mean that the function return
9424 can be done with a single instruction. */
9425 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
9426 && (live_regs_mask & (1 << LR_REGNUM)) != 0
9427 && ! frame_pointer_needed)
9428 emit_insn (gen_rtx_SET (SImode,
9429 gen_rtx_REG (SImode, LR_REGNUM),
9430 gen_rtx_PLUS (SImode,
9431 gen_rtx_REG (SImode, LR_REGNUM),
9432 GEN_INT (-4))));
9433
9434 if (live_regs_mask)
9435 {
9436 insn = emit_multi_reg_push (live_regs_mask);
9437 RTX_FRAME_RELATED_P (insn) = 1;
9438 }
9439
9440 if (TARGET_IWMMXT)
9441 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9442 if (regs_ever_live[reg] && ! call_used_regs [reg])
9443 {
9444 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
9445 insn = gen_rtx_MEM (V2SImode, insn);
9446 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
9447 gen_rtx_REG (V2SImode, reg)));
9448 RTX_FRAME_RELATED_P (insn) = 1;
9449 }
9450
9451 if (! IS_VOLATILE (func_type))
9452 {
9453 /* Save any floating point call-saved registers used by this
9454 function. */
9455 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9456 {
9457 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
9458 if (regs_ever_live[reg] && !call_used_regs[reg])
9459 {
9460 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
9461 insn = gen_rtx_MEM (XFmode, insn);
9462 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
9463 gen_rtx_REG (XFmode, reg)));
9464 RTX_FRAME_RELATED_P (insn) = 1;
9465 }
9466 }
9467 else
9468 {
9469 int start_reg = LAST_ARM_FP_REGNUM;
9470
9471 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
9472 {
9473 if (regs_ever_live[reg] && !call_used_regs[reg])
9474 {
9475 if (start_reg - reg == 3)
9476 {
9477 insn = emit_sfm (reg, 4);
9478 RTX_FRAME_RELATED_P (insn) = 1;
9479 start_reg = reg - 1;
9480 }
9481 }
9482 else
9483 {
9484 if (start_reg != reg)
9485 {
9486 insn = emit_sfm (reg + 1, start_reg - reg);
9487 RTX_FRAME_RELATED_P (insn) = 1;
9488 }
9489 start_reg = reg - 1;
9490 }
9491 }
9492
9493 if (start_reg != reg)
9494 {
9495 insn = emit_sfm (reg + 1, start_reg - reg);
9496 RTX_FRAME_RELATED_P (insn) = 1;
9497 }
9498 }
9499 }
9500
9501 if (frame_pointer_needed)
9502 {
9503 /* Create the new frame pointer. */
9504 insn = GEN_INT (-(4 + args_to_push + fp_offset));
9505 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
9506 RTX_FRAME_RELATED_P (insn) = 1;
9507
9508 if (IS_NESTED (func_type))
9509 {
9510 /* Recover the static chain register. */
9511 if (regs_ever_live [3] == 0
9512 || saved_pretend_args)
9513 insn = gen_rtx_REG (SImode, 3);
9514 else /* if (current_function_pretend_args_size == 0) */
9515 {
9516 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
9517 GEN_INT (4));
9518 insn = gen_rtx_MEM (SImode, insn);
9519 }
9520
9521 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
9522 /* Add a USE to stop propagate_one_insn() from barfing. */
9523 emit_insn (gen_prologue_use (ip_rtx));
9524 }
9525 }
9526
9527 amount = GEN_INT (-(arm_get_frame_size ()
9528 + current_function_outgoing_args_size));
9529
9530 if (amount != const0_rtx)
9531 {
9532 /* This add can produce multiple insns for a large constant, so we
9533 need to get tricky. */
9534 rtx last = get_last_insn ();
9535 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9536 amount));
9537 do
9538 {
9539 last = last ? NEXT_INSN (last) : get_insns ();
9540 RTX_FRAME_RELATED_P (last) = 1;
9541 }
9542 while (last != insn);
9543
9544 /* If the frame pointer is needed, emit a special barrier that
9545 will prevent the scheduler from moving stores to the frame
9546 before the stack adjustment. */
9547 if (frame_pointer_needed)
9548 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
9549 hard_frame_pointer_rtx));
9550 }
9551
9552 /* If we are profiling, make sure no instructions are scheduled before
9553 the call to mcount. Similarly if the user has requested no
9554 scheduling in the prolog. */
9555 if (current_function_profile || TARGET_NO_SCHED_PRO)
9556 emit_insn (gen_blockage ());
9557
9558 /* If the link register is being kept alive, with the return address in it,
9559 then make sure that it does not get reused by the ce2 pass. */
9560 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
9561 {
9562 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
9563 cfun->machine->lr_save_eliminated = 1;
9564 }
9565 }
9566 \f
9567 /* If CODE is 'd', then the X is a condition operand and the instruction
9568 should only be executed if the condition is true.
9569 if CODE is 'D', then the X is a condition operand and the instruction
9570 should only be executed if the condition is false: however, if the mode
9571 of the comparison is CCFPEmode, then always execute the instruction -- we
9572 do this because in these circumstances !GE does not necessarily imply LT;
9573 in these cases the instruction pattern will take care to make sure that
9574 an instruction containing %d will follow, thereby undoing the effects of
9575 doing this instruction unconditionally.
9576 If CODE is 'N' then X is a floating point operand that must be negated
9577 before output.
9578 If CODE is 'B' then output a bitwise inverted value of X (a const int).
9579 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
9580 void
9581 arm_print_operand (FILE *stream, rtx x, int code)
9582 {
9583 switch (code)
9584 {
9585 case '@':
9586 fputs (ASM_COMMENT_START, stream);
9587 return;
9588
9589 case '_':
9590 fputs (user_label_prefix, stream);
9591 return;
9592
9593 case '|':
9594 fputs (REGISTER_PREFIX, stream);
9595 return;
9596
9597 case '?':
9598 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
9599 {
9600 if (TARGET_THUMB || current_insn_predicate != NULL)
9601 abort ();
9602
9603 fputs (arm_condition_codes[arm_current_cc], stream);
9604 }
9605 else if (current_insn_predicate)
9606 {
9607 enum arm_cond_code code;
9608
9609 if (TARGET_THUMB)
9610 abort ();
9611
9612 code = get_arm_condition_code (current_insn_predicate);
9613 fputs (arm_condition_codes[code], stream);
9614 }
9615 return;
9616
9617 case 'N':
9618 {
9619 REAL_VALUE_TYPE r;
9620 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
9621 r = REAL_VALUE_NEGATE (r);
9622 fprintf (stream, "%s", fp_const_from_val (&r));
9623 }
9624 return;
9625
9626 case 'B':
9627 if (GET_CODE (x) == CONST_INT)
9628 {
9629 HOST_WIDE_INT val;
9630 val = ARM_SIGN_EXTEND (~INTVAL (x));
9631 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
9632 }
9633 else
9634 {
9635 putc ('~', stream);
9636 output_addr_const (stream, x);
9637 }
9638 return;
9639
9640 case 'i':
9641 fprintf (stream, "%s", arithmetic_instr (x, 1));
9642 return;
9643
9644 /* Truncate Cirrus shift counts. */
9645 case 's':
9646 if (GET_CODE (x) == CONST_INT)
9647 {
9648 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
9649 return;
9650 }
9651 arm_print_operand (stream, x, 0);
9652 return;
9653
9654 case 'I':
9655 fprintf (stream, "%s", arithmetic_instr (x, 0));
9656 return;
9657
9658 case 'S':
9659 {
9660 HOST_WIDE_INT val;
9661 const char * shift = shift_op (x, &val);
9662
9663 if (shift)
9664 {
9665 fprintf (stream, ", %s ", shift_op (x, &val));
9666 if (val == -1)
9667 arm_print_operand (stream, XEXP (x, 1), 0);
9668 else
9669 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
9670 }
9671 }
9672 return;
9673
9674 /* An explanation of the 'Q', 'R' and 'H' register operands:
9675
9676 In a pair of registers containing a DI or DF value the 'Q'
9677 operand returns the register number of the register containing
9678 the least significant part of the value. The 'R' operand returns
9679 the register number of the register containing the most
9680 significant part of the value.
9681
9682 The 'H' operand returns the higher of the two register numbers.
9683 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
9684 same as the 'Q' operand, since the most significant part of the
9685 value is held in the lower number register. The reverse is true
9686 on systems where WORDS_BIG_ENDIAN is false.
9687
9688 The purpose of these operands is to distinguish between cases
9689 where the endian-ness of the values is important (for example
9690 when they are added together), and cases where the endian-ness
9691 is irrelevant, but the order of register operations is important.
9692 For example when loading a value from memory into a register
9693 pair, the endian-ness does not matter. Provided that the value
9694 from the lower memory address is put into the lower numbered
9695 register, and the value from the higher address is put into the
9696 higher numbered register, the load will work regardless of whether
9697 the value being loaded is big-wordian or little-wordian. The
9698 order of the two register loads can matter however, if the address
9699 of the memory location is actually held in one of the registers
9700 being overwritten by the load. */
9701 case 'Q':
9702 if (REGNO (x) > LAST_ARM_REGNUM)
9703 abort ();
9704 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
9705 return;
9706
9707 case 'R':
9708 if (REGNO (x) > LAST_ARM_REGNUM)
9709 abort ();
9710 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
9711 return;
9712
9713 case 'H':
9714 if (REGNO (x) > LAST_ARM_REGNUM)
9715 abort ();
9716 asm_fprintf (stream, "%r", REGNO (x) + 1);
9717 return;
9718
9719 case 'm':
9720 asm_fprintf (stream, "%r",
9721 GET_CODE (XEXP (x, 0)) == REG
9722 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
9723 return;
9724
9725 case 'M':
9726 asm_fprintf (stream, "{%r-%r}",
9727 REGNO (x),
9728 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
9729 return;
9730
9731 case 'd':
9732 /* CONST_TRUE_RTX means always -- that's the default. */
9733 if (x == const_true_rtx)
9734 return;
9735
9736 fputs (arm_condition_codes[get_arm_condition_code (x)],
9737 stream);
9738 return;
9739
9740 case 'D':
9741 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
9742 want to do that. */
9743 if (x == const_true_rtx)
9744 abort ();
9745
9746 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
9747 (get_arm_condition_code (x))],
9748 stream);
9749 return;
9750
9751 /* Cirrus registers can be accessed in a variety of ways:
9752 single floating point (f)
9753 double floating point (d)
9754 32bit integer (fx)
9755 64bit integer (dx). */
9756 case 'W': /* Cirrus register in F mode. */
9757 case 'X': /* Cirrus register in D mode. */
9758 case 'Y': /* Cirrus register in FX mode. */
9759 case 'Z': /* Cirrus register in DX mode. */
9760 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
9761 abort ();
9762
9763 fprintf (stream, "mv%s%s",
9764 code == 'W' ? "f"
9765 : code == 'X' ? "d"
9766 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
9767
9768 return;
9769
9770 /* Print cirrus register in the mode specified by the register's mode. */
9771 case 'V':
9772 {
9773 int mode = GET_MODE (x);
9774
9775 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
9776 abort ();
9777
9778 fprintf (stream, "mv%s%s",
9779 mode == DFmode ? "d"
9780 : mode == SImode ? "fx"
9781 : mode == DImode ? "dx"
9782 : "f", reg_names[REGNO (x)] + 2);
9783
9784 return;
9785 }
9786
9787 case 'U':
9788 if (GET_CODE (x) != REG
9789 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
9790 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
9791 /* Bad value for wCG register number. */
9792 abort ();
9793 else
9794 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
9795 return;
9796
9797 /* Print an iWMMXt control register name. */
9798 case 'w':
9799 if (GET_CODE (x) != CONST_INT
9800 || INTVAL (x) < 0
9801 || INTVAL (x) >= 16)
9802 /* Bad value for wC register number. */
9803 abort ();
9804 else
9805 {
9806 static const char * wc_reg_names [16] =
9807 {
9808 "wCID", "wCon", "wCSSF", "wCASF",
9809 "wC4", "wC5", "wC6", "wC7",
9810 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
9811 "wC12", "wC13", "wC14", "wC15"
9812 };
9813
9814 fprintf (stream, wc_reg_names [INTVAL (x)]);
9815 }
9816 return;
9817
9818 default:
9819 if (x == 0)
9820 abort ();
9821
9822 if (GET_CODE (x) == REG)
9823 asm_fprintf (stream, "%r", REGNO (x));
9824 else if (GET_CODE (x) == MEM)
9825 {
9826 output_memory_reference_mode = GET_MODE (x);
9827 output_address (XEXP (x, 0));
9828 }
9829 else if (GET_CODE (x) == CONST_DOUBLE)
9830 fprintf (stream, "#%s", fp_immediate_constant (x));
9831 else if (GET_CODE (x) == NEG)
9832 abort (); /* This should never happen now. */
9833 else
9834 {
9835 fputc ('#', stream);
9836 output_addr_const (stream, x);
9837 }
9838 }
9839 }
9840 \f
9841 #ifndef AOF_ASSEMBLER
9842 /* Target hook for assembling integer objects. The ARM version needs to
9843 handle word-sized values specially. */
9844 static bool
9845 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
9846 {
9847 if (size == UNITS_PER_WORD && aligned_p)
9848 {
9849 fputs ("\t.word\t", asm_out_file);
9850 output_addr_const (asm_out_file, x);
9851
9852 /* Mark symbols as position independent. We only do this in the
9853 .text segment, not in the .data segment. */
9854 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
9855 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
9856 {
9857 if (GET_CODE (x) == SYMBOL_REF
9858 && (CONSTANT_POOL_ADDRESS_P (x)
9859 || SYMBOL_REF_LOCAL_P (x)))
9860 fputs ("(GOTOFF)", asm_out_file);
9861 else if (GET_CODE (x) == LABEL_REF)
9862 fputs ("(GOTOFF)", asm_out_file);
9863 else
9864 fputs ("(GOT)", asm_out_file);
9865 }
9866 fputc ('\n', asm_out_file);
9867 return true;
9868 }
9869
9870 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
9871 {
9872 int i, units;
9873
9874 if (GET_CODE (x) != CONST_VECTOR)
9875 abort ();
9876
9877 units = CONST_VECTOR_NUNITS (x);
9878
9879 switch (GET_MODE (x))
9880 {
9881 case V2SImode: size = 4; break;
9882 case V4HImode: size = 2; break;
9883 case V8QImode: size = 1; break;
9884 default:
9885 abort ();
9886 }
9887
9888 for (i = 0; i < units; i++)
9889 {
9890 rtx elt;
9891
9892 elt = CONST_VECTOR_ELT (x, i);
9893 assemble_integer
9894 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
9895 }
9896
9897 return true;
9898 }
9899
9900 return default_assemble_integer (x, size, aligned_p);
9901 }
9902 #endif
9903 \f
9904 /* A finite state machine takes care of noticing whether or not instructions
9905 can be conditionally executed, and thus decrease execution time and code
9906 size by deleting branch instructions. The fsm is controlled by
9907 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
9908
9909 /* The state of the fsm controlling condition codes are:
9910 0: normal, do nothing special
9911 1: make ASM_OUTPUT_OPCODE not output this instruction
9912 2: make ASM_OUTPUT_OPCODE not output this instruction
9913 3: make instructions conditional
9914 4: make instructions conditional
9915
9916 State transitions (state->state by whom under condition):
9917 0 -> 1 final_prescan_insn if the `target' is a label
9918 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
9919 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
9920 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
9921 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
9922 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
9923 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
9924 (the target insn is arm_target_insn).
9925
9926 If the jump clobbers the conditions then we use states 2 and 4.
9927
9928 A similar thing can be done with conditional return insns.
9929
9930 XXX In case the `target' is an unconditional branch, this conditionalising
9931 of the instructions always reduces code size, but not always execution
9932 time. But then, I want to reduce the code size to somewhere near what
9933 /bin/cc produces. */
9934
9935 /* Returns the index of the ARM condition code string in
9936 `arm_condition_codes'. COMPARISON should be an rtx like
9937 `(eq (...) (...))'. */
9938 static enum arm_cond_code
9939 get_arm_condition_code (rtx comparison)
9940 {
9941 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
9942 int code;
9943 enum rtx_code comp_code = GET_CODE (comparison);
9944
9945 if (GET_MODE_CLASS (mode) != MODE_CC)
9946 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
9947 XEXP (comparison, 1));
9948
9949 switch (mode)
9950 {
9951 case CC_DNEmode: code = ARM_NE; goto dominance;
9952 case CC_DEQmode: code = ARM_EQ; goto dominance;
9953 case CC_DGEmode: code = ARM_GE; goto dominance;
9954 case CC_DGTmode: code = ARM_GT; goto dominance;
9955 case CC_DLEmode: code = ARM_LE; goto dominance;
9956 case CC_DLTmode: code = ARM_LT; goto dominance;
9957 case CC_DGEUmode: code = ARM_CS; goto dominance;
9958 case CC_DGTUmode: code = ARM_HI; goto dominance;
9959 case CC_DLEUmode: code = ARM_LS; goto dominance;
9960 case CC_DLTUmode: code = ARM_CC;
9961
9962 dominance:
9963 if (comp_code != EQ && comp_code != NE)
9964 abort ();
9965
9966 if (comp_code == EQ)
9967 return ARM_INVERSE_CONDITION_CODE (code);
9968 return code;
9969
9970 case CC_NOOVmode:
9971 switch (comp_code)
9972 {
9973 case NE: return ARM_NE;
9974 case EQ: return ARM_EQ;
9975 case GE: return ARM_PL;
9976 case LT: return ARM_MI;
9977 default: abort ();
9978 }
9979
9980 case CC_Zmode:
9981 switch (comp_code)
9982 {
9983 case NE: return ARM_NE;
9984 case EQ: return ARM_EQ;
9985 default: abort ();
9986 }
9987
9988 case CC_Nmode:
9989 switch (comp_code)
9990 {
9991 case NE: return ARM_MI;
9992 case EQ: return ARM_PL;
9993 default: abort ();
9994 }
9995
9996 case CCFPEmode:
9997 case CCFPmode:
9998 /* These encodings assume that AC=1 in the FPA system control
9999 byte. This allows us to handle all cases except UNEQ and
10000 LTGT. */
10001 switch (comp_code)
10002 {
10003 case GE: return ARM_GE;
10004 case GT: return ARM_GT;
10005 case LE: return ARM_LS;
10006 case LT: return ARM_MI;
10007 case NE: return ARM_NE;
10008 case EQ: return ARM_EQ;
10009 case ORDERED: return ARM_VC;
10010 case UNORDERED: return ARM_VS;
10011 case UNLT: return ARM_LT;
10012 case UNLE: return ARM_LE;
10013 case UNGT: return ARM_HI;
10014 case UNGE: return ARM_PL;
10015 /* UNEQ and LTGT do not have a representation. */
10016 case UNEQ: /* Fall through. */
10017 case LTGT: /* Fall through. */
10018 default: abort ();
10019 }
10020
10021 case CC_SWPmode:
10022 switch (comp_code)
10023 {
10024 case NE: return ARM_NE;
10025 case EQ: return ARM_EQ;
10026 case GE: return ARM_LE;
10027 case GT: return ARM_LT;
10028 case LE: return ARM_GE;
10029 case LT: return ARM_GT;
10030 case GEU: return ARM_LS;
10031 case GTU: return ARM_CC;
10032 case LEU: return ARM_CS;
10033 case LTU: return ARM_HI;
10034 default: abort ();
10035 }
10036
10037 case CC_Cmode:
10038 switch (comp_code)
10039 {
10040 case LTU: return ARM_CS;
10041 case GEU: return ARM_CC;
10042 default: abort ();
10043 }
10044
10045 case CCmode:
10046 switch (comp_code)
10047 {
10048 case NE: return ARM_NE;
10049 case EQ: return ARM_EQ;
10050 case GE: return ARM_GE;
10051 case GT: return ARM_GT;
10052 case LE: return ARM_LE;
10053 case LT: return ARM_LT;
10054 case GEU: return ARM_CS;
10055 case GTU: return ARM_HI;
10056 case LEU: return ARM_LS;
10057 case LTU: return ARM_CC;
10058 default: abort ();
10059 }
10060
10061 default: abort ();
10062 }
10063
10064 abort ();
10065 }
10066
10067 void
10068 arm_final_prescan_insn (rtx insn)
10069 {
10070 /* BODY will hold the body of INSN. */
10071 rtx body = PATTERN (insn);
10072
10073 /* This will be 1 if trying to repeat the trick, and things need to be
10074 reversed if it appears to fail. */
10075 int reverse = 0;
10076
10077 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10078 taken are clobbered, even if the rtl suggests otherwise. It also
10079 means that we have to grub around within the jump expression to find
10080 out what the conditions are when the jump isn't taken. */
10081 int jump_clobbers = 0;
10082
10083 /* If we start with a return insn, we only succeed if we find another one. */
10084 int seeking_return = 0;
10085
10086 /* START_INSN will hold the insn from where we start looking. This is the
10087 first insn after the following code_label if REVERSE is true. */
10088 rtx start_insn = insn;
10089
10090 /* If in state 4, check if the target branch is reached, in order to
10091 change back to state 0. */
10092 if (arm_ccfsm_state == 4)
10093 {
10094 if (insn == arm_target_insn)
10095 {
10096 arm_target_insn = NULL;
10097 arm_ccfsm_state = 0;
10098 }
10099 return;
10100 }
10101
10102 /* If in state 3, it is possible to repeat the trick, if this insn is an
10103 unconditional branch to a label, and immediately following this branch
10104 is the previous target label which is only used once, and the label this
10105 branch jumps to is not too far off. */
10106 if (arm_ccfsm_state == 3)
10107 {
10108 if (simplejump_p (insn))
10109 {
10110 start_insn = next_nonnote_insn (start_insn);
10111 if (GET_CODE (start_insn) == BARRIER)
10112 {
10113 /* XXX Isn't this always a barrier? */
10114 start_insn = next_nonnote_insn (start_insn);
10115 }
10116 if (GET_CODE (start_insn) == CODE_LABEL
10117 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10118 && LABEL_NUSES (start_insn) == 1)
10119 reverse = TRUE;
10120 else
10121 return;
10122 }
10123 else if (GET_CODE (body) == RETURN)
10124 {
10125 start_insn = next_nonnote_insn (start_insn);
10126 if (GET_CODE (start_insn) == BARRIER)
10127 start_insn = next_nonnote_insn (start_insn);
10128 if (GET_CODE (start_insn) == CODE_LABEL
10129 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10130 && LABEL_NUSES (start_insn) == 1)
10131 {
10132 reverse = TRUE;
10133 seeking_return = 1;
10134 }
10135 else
10136 return;
10137 }
10138 else
10139 return;
10140 }
10141
10142 if (arm_ccfsm_state != 0 && !reverse)
10143 abort ();
10144 if (GET_CODE (insn) != JUMP_INSN)
10145 return;
10146
10147 /* This jump might be paralleled with a clobber of the condition codes
10148 the jump should always come first */
10149 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10150 body = XVECEXP (body, 0, 0);
10151
10152 if (reverse
10153 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10154 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10155 {
10156 int insns_skipped;
10157 int fail = FALSE, succeed = FALSE;
10158 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10159 int then_not_else = TRUE;
10160 rtx this_insn = start_insn, label = 0;
10161
10162 /* If the jump cannot be done with one instruction, we cannot
10163 conditionally execute the instruction in the inverse case. */
10164 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10165 {
10166 jump_clobbers = 1;
10167 return;
10168 }
10169
10170 /* Register the insn jumped to. */
10171 if (reverse)
10172 {
10173 if (!seeking_return)
10174 label = XEXP (SET_SRC (body), 0);
10175 }
10176 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10177 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10178 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10179 {
10180 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10181 then_not_else = FALSE;
10182 }
10183 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10184 seeking_return = 1;
10185 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10186 {
10187 seeking_return = 1;
10188 then_not_else = FALSE;
10189 }
10190 else
10191 abort ();
10192
10193 /* See how many insns this branch skips, and what kind of insns. If all
10194 insns are okay, and the label or unconditional branch to the same
10195 label is not too far away, succeed. */
10196 for (insns_skipped = 0;
10197 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10198 {
10199 rtx scanbody;
10200
10201 this_insn = next_nonnote_insn (this_insn);
10202 if (!this_insn)
10203 break;
10204
10205 switch (GET_CODE (this_insn))
10206 {
10207 case CODE_LABEL:
10208 /* Succeed if it is the target label, otherwise fail since
10209 control falls in from somewhere else. */
10210 if (this_insn == label)
10211 {
10212 if (jump_clobbers)
10213 {
10214 arm_ccfsm_state = 2;
10215 this_insn = next_nonnote_insn (this_insn);
10216 }
10217 else
10218 arm_ccfsm_state = 1;
10219 succeed = TRUE;
10220 }
10221 else
10222 fail = TRUE;
10223 break;
10224
10225 case BARRIER:
10226 /* Succeed if the following insn is the target label.
10227 Otherwise fail.
10228 If return insns are used then the last insn in a function
10229 will be a barrier. */
10230 this_insn = next_nonnote_insn (this_insn);
10231 if (this_insn && this_insn == label)
10232 {
10233 if (jump_clobbers)
10234 {
10235 arm_ccfsm_state = 2;
10236 this_insn = next_nonnote_insn (this_insn);
10237 }
10238 else
10239 arm_ccfsm_state = 1;
10240 succeed = TRUE;
10241 }
10242 else
10243 fail = TRUE;
10244 break;
10245
10246 case CALL_INSN:
10247 /* If using 32-bit addresses the cc is not preserved over
10248 calls. */
10249 if (TARGET_APCS_32)
10250 {
10251 /* Succeed if the following insn is the target label,
10252 or if the following two insns are a barrier and
10253 the target label. */
10254 this_insn = next_nonnote_insn (this_insn);
10255 if (this_insn && GET_CODE (this_insn) == BARRIER)
10256 this_insn = next_nonnote_insn (this_insn);
10257
10258 if (this_insn && this_insn == label
10259 && insns_skipped < max_insns_skipped)
10260 {
10261 if (jump_clobbers)
10262 {
10263 arm_ccfsm_state = 2;
10264 this_insn = next_nonnote_insn (this_insn);
10265 }
10266 else
10267 arm_ccfsm_state = 1;
10268 succeed = TRUE;
10269 }
10270 else
10271 fail = TRUE;
10272 }
10273 break;
10274
10275 case JUMP_INSN:
10276 /* If this is an unconditional branch to the same label, succeed.
10277 If it is to another label, do nothing. If it is conditional,
10278 fail. */
10279 /* XXX Probably, the tests for SET and the PC are
10280 unnecessary. */
10281
10282 scanbody = PATTERN (this_insn);
10283 if (GET_CODE (scanbody) == SET
10284 && GET_CODE (SET_DEST (scanbody)) == PC)
10285 {
10286 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
10287 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
10288 {
10289 arm_ccfsm_state = 2;
10290 succeed = TRUE;
10291 }
10292 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
10293 fail = TRUE;
10294 }
10295 /* Fail if a conditional return is undesirable (eg on a
10296 StrongARM), but still allow this if optimizing for size. */
10297 else if (GET_CODE (scanbody) == RETURN
10298 && !use_return_insn (TRUE, NULL)
10299 && !optimize_size)
10300 fail = TRUE;
10301 else if (GET_CODE (scanbody) == RETURN
10302 && seeking_return)
10303 {
10304 arm_ccfsm_state = 2;
10305 succeed = TRUE;
10306 }
10307 else if (GET_CODE (scanbody) == PARALLEL)
10308 {
10309 switch (get_attr_conds (this_insn))
10310 {
10311 case CONDS_NOCOND:
10312 break;
10313 default:
10314 fail = TRUE;
10315 break;
10316 }
10317 }
10318 else
10319 fail = TRUE; /* Unrecognized jump (eg epilogue). */
10320
10321 break;
10322
10323 case INSN:
10324 /* Instructions using or affecting the condition codes make it
10325 fail. */
10326 scanbody = PATTERN (this_insn);
10327 if (!(GET_CODE (scanbody) == SET
10328 || GET_CODE (scanbody) == PARALLEL)
10329 || get_attr_conds (this_insn) != CONDS_NOCOND)
10330 fail = TRUE;
10331
10332 /* A conditional cirrus instruction must be followed by
10333 a non Cirrus instruction. However, since we
10334 conditionalize instructions in this function and by
10335 the time we get here we can't add instructions
10336 (nops), because shorten_branches() has already been
10337 called, we will disable conditionalizing Cirrus
10338 instructions to be safe. */
10339 if (GET_CODE (scanbody) != USE
10340 && GET_CODE (scanbody) != CLOBBER
10341 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
10342 fail = TRUE;
10343 break;
10344
10345 default:
10346 break;
10347 }
10348 }
10349 if (succeed)
10350 {
10351 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
10352 arm_target_label = CODE_LABEL_NUMBER (label);
10353 else if (seeking_return || arm_ccfsm_state == 2)
10354 {
10355 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
10356 {
10357 this_insn = next_nonnote_insn (this_insn);
10358 if (this_insn && (GET_CODE (this_insn) == BARRIER
10359 || GET_CODE (this_insn) == CODE_LABEL))
10360 abort ();
10361 }
10362 if (!this_insn)
10363 {
10364 /* Oh, dear! we ran off the end.. give up. */
10365 recog (PATTERN (insn), insn, NULL);
10366 arm_ccfsm_state = 0;
10367 arm_target_insn = NULL;
10368 return;
10369 }
10370 arm_target_insn = this_insn;
10371 }
10372 else
10373 abort ();
10374 if (jump_clobbers)
10375 {
10376 if (reverse)
10377 abort ();
10378 arm_current_cc =
10379 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
10380 0), 0), 1));
10381 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
10382 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10383 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
10384 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10385 }
10386 else
10387 {
10388 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
10389 what it was. */
10390 if (!reverse)
10391 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
10392 0));
10393 }
10394
10395 if (reverse || then_not_else)
10396 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10397 }
10398
10399 /* Restore recog_data (getting the attributes of other insns can
10400 destroy this array, but final.c assumes that it remains intact
10401 across this call; since the insn has been recognized already we
10402 call recog direct). */
10403 recog (PATTERN (insn), insn, NULL);
10404 }
10405 }
10406
10407 /* Returns true if REGNO is a valid register
10408 for holding a quantity of tyoe MODE. */
10409 int
10410 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
10411 {
10412 if (GET_MODE_CLASS (mode) == MODE_CC)
10413 return regno == CC_REGNUM;
10414
10415 if (TARGET_THUMB)
10416 /* For the Thumb we only allow values bigger than SImode in
10417 registers 0 - 6, so that there is always a second low
10418 register available to hold the upper part of the value.
10419 We probably we ought to ensure that the register is the
10420 start of an even numbered register pair. */
10421 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
10422
10423 if (IS_CIRRUS_REGNUM (regno))
10424 /* We have outlawed SI values in Cirrus registers because they
10425 reside in the lower 32 bits, but SF values reside in the
10426 upper 32 bits. This causes gcc all sorts of grief. We can't
10427 even split the registers into pairs because Cirrus SI values
10428 get sign extended to 64bits-- aldyh. */
10429 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
10430
10431 if (IS_IWMMXT_GR_REGNUM (regno))
10432 return mode == SImode;
10433
10434 if (IS_IWMMXT_REGNUM (regno))
10435 return VALID_IWMMXT_REG_MODE (mode);
10436
10437 if (regno <= LAST_ARM_REGNUM)
10438 /* We allow any value to be stored in the general registers. */
10439 return 1;
10440
10441 if ( regno == FRAME_POINTER_REGNUM
10442 || regno == ARG_POINTER_REGNUM)
10443 /* We only allow integers in the fake hard registers. */
10444 return GET_MODE_CLASS (mode) == MODE_INT;
10445
10446 /* The only registers left are the FPA registers
10447 which we only allow to hold FP values. */
10448 return GET_MODE_CLASS (mode) == MODE_FLOAT
10449 && regno >= FIRST_ARM_FP_REGNUM
10450 && regno <= LAST_ARM_FP_REGNUM;
10451 }
10452
10453 int
10454 arm_regno_class (int regno)
10455 {
10456 if (TARGET_THUMB)
10457 {
10458 if (regno == STACK_POINTER_REGNUM)
10459 return STACK_REG;
10460 if (regno == CC_REGNUM)
10461 return CC_REG;
10462 if (regno < 8)
10463 return LO_REGS;
10464 return HI_REGS;
10465 }
10466
10467 if ( regno <= LAST_ARM_REGNUM
10468 || regno == FRAME_POINTER_REGNUM
10469 || regno == ARG_POINTER_REGNUM)
10470 return GENERAL_REGS;
10471
10472 if (regno == CC_REGNUM)
10473 return NO_REGS;
10474
10475 if (IS_CIRRUS_REGNUM (regno))
10476 return CIRRUS_REGS;
10477
10478 if (IS_IWMMXT_REGNUM (regno))
10479 return IWMMXT_REGS;
10480
10481 if (IS_IWMMXT_GR_REGNUM (regno))
10482 return IWMMXT_GR_REGS;
10483
10484 return FPA_REGS;
10485 }
10486
10487 /* Handle a special case when computing the offset
10488 of an argument from the frame pointer. */
10489 int
10490 arm_debugger_arg_offset (int value, rtx addr)
10491 {
10492 rtx insn;
10493
10494 /* We are only interested if dbxout_parms() failed to compute the offset. */
10495 if (value != 0)
10496 return 0;
10497
10498 /* We can only cope with the case where the address is held in a register. */
10499 if (GET_CODE (addr) != REG)
10500 return 0;
10501
10502 /* If we are using the frame pointer to point at the argument, then
10503 an offset of 0 is correct. */
10504 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
10505 return 0;
10506
10507 /* If we are using the stack pointer to point at the
10508 argument, then an offset of 0 is correct. */
10509 if ((TARGET_THUMB || !frame_pointer_needed)
10510 && REGNO (addr) == SP_REGNUM)
10511 return 0;
10512
10513 /* Oh dear. The argument is pointed to by a register rather
10514 than being held in a register, or being stored at a known
10515 offset from the frame pointer. Since GDB only understands
10516 those two kinds of argument we must translate the address
10517 held in the register into an offset from the frame pointer.
10518 We do this by searching through the insns for the function
10519 looking to see where this register gets its value. If the
10520 register is initialized from the frame pointer plus an offset
10521 then we are in luck and we can continue, otherwise we give up.
10522
10523 This code is exercised by producing debugging information
10524 for a function with arguments like this:
10525
10526 double func (double a, double b, int c, double d) {return d;}
10527
10528 Without this code the stab for parameter 'd' will be set to
10529 an offset of 0 from the frame pointer, rather than 8. */
10530
10531 /* The if() statement says:
10532
10533 If the insn is a normal instruction
10534 and if the insn is setting the value in a register
10535 and if the register being set is the register holding the address of the argument
10536 and if the address is computing by an addition
10537 that involves adding to a register
10538 which is the frame pointer
10539 a constant integer
10540
10541 then... */
10542
10543 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10544 {
10545 if ( GET_CODE (insn) == INSN
10546 && GET_CODE (PATTERN (insn)) == SET
10547 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
10548 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
10549 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
10550 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
10551 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
10552 )
10553 {
10554 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
10555
10556 break;
10557 }
10558 }
10559
10560 if (value == 0)
10561 {
10562 debug_rtx (addr);
10563 warning ("unable to compute real location of stacked parameter");
10564 value = 8; /* XXX magic hack */
10565 }
10566
10567 return value;
10568 }
10569 \f
10570 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
10571 do \
10572 { \
10573 if ((MASK) & insn_flags) \
10574 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
10575 } \
10576 while (0)
10577
10578 struct builtin_description
10579 {
10580 const unsigned int mask;
10581 const enum insn_code icode;
10582 const char * const name;
10583 const enum arm_builtins code;
10584 const enum rtx_code comparison;
10585 const unsigned int flag;
10586 };
10587
10588 static const struct builtin_description bdesc_2arg[] =
10589 {
10590 #define IWMMXT_BUILTIN(code, string, builtin) \
10591 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
10592 ARM_BUILTIN_##builtin, 0, 0 },
10593
10594 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
10595 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
10596 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
10597 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
10598 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
10599 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
10600 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
10601 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
10602 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
10603 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
10604 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
10605 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
10606 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
10607 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
10608 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
10609 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
10610 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
10611 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
10612 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
10613 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsh", WMULSH)
10614 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmuluh", WMULUH)
10615 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
10616 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
10617 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
10618 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
10619 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
10620 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
10621 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
10622 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
10623 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
10624 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
10625 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
10626 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
10627 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
10628 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
10629 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
10630 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
10631 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
10632 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
10633 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
10634 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
10635 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
10636 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
10637 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
10638 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
10639 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
10640 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
10641 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
10642 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
10643 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
10644 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
10645 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
10646 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
10647 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
10648 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
10649 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
10650 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
10651 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
10652
10653 #define IWMMXT_BUILTIN2(code, builtin) \
10654 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
10655
10656 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
10657 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
10658 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
10659 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
10660 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
10661 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
10662 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
10663 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
10664 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
10665 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
10666 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
10667 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
10668 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
10669 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
10670 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
10671 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
10672 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
10673 IWMMXT_BUILTIN2 (lshrdi3, WSRLDI)
10674 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
10675 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
10676 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
10677 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
10678 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
10679 IWMMXT_BUILTIN2 (ashrdi3, WSRADI)
10680 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
10681 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
10682 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
10683 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
10684 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
10685 IWMMXT_BUILTIN2 (rordi3, WRORDI)
10686 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
10687 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
10688 };
10689
10690 static const struct builtin_description bdesc_1arg[] =
10691 {
10692 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
10693 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
10694 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
10695 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
10696 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
10697 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
10698 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
10699 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
10700 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
10701 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
10702 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
10703 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
10704 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
10705 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
10706 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
10707 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
10708 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
10709 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
10710 };
10711
10712 /* Set up all the iWMMXt builtins. This is
10713 not called if TARGET_IWMMXT is zero. */
10714
10715 static void
10716 arm_init_iwmmxt_builtins (void)
10717 {
10718 const struct builtin_description * d;
10719 size_t i;
10720 tree endlink = void_list_node;
10721
10722 tree int_ftype_int
10723 = build_function_type (integer_type_node,
10724 tree_cons (NULL_TREE, integer_type_node, endlink));
10725 tree v8qi_ftype_v8qi_v8qi_int
10726 = build_function_type (V8QI_type_node,
10727 tree_cons (NULL_TREE, V8QI_type_node,
10728 tree_cons (NULL_TREE, V8QI_type_node,
10729 tree_cons (NULL_TREE,
10730 integer_type_node,
10731 endlink))));
10732 tree v4hi_ftype_v4hi_int
10733 = build_function_type (V4HI_type_node,
10734 tree_cons (NULL_TREE, V4HI_type_node,
10735 tree_cons (NULL_TREE, integer_type_node,
10736 endlink)));
10737 tree v2si_ftype_v2si_int
10738 = build_function_type (V2SI_type_node,
10739 tree_cons (NULL_TREE, V2SI_type_node,
10740 tree_cons (NULL_TREE, integer_type_node,
10741 endlink)));
10742 tree v2si_ftype_di_di
10743 = build_function_type (V2SI_type_node,
10744 tree_cons (NULL_TREE, long_long_integer_type_node,
10745 tree_cons (NULL_TREE, long_long_integer_type_node,
10746 endlink)));
10747 tree di_ftype_di_int
10748 = build_function_type (long_long_integer_type_node,
10749 tree_cons (NULL_TREE, long_long_integer_type_node,
10750 tree_cons (NULL_TREE, integer_type_node,
10751 endlink)));
10752 tree di_ftype_di_int_int
10753 = build_function_type (long_long_integer_type_node,
10754 tree_cons (NULL_TREE, long_long_integer_type_node,
10755 tree_cons (NULL_TREE, integer_type_node,
10756 tree_cons (NULL_TREE,
10757 integer_type_node,
10758 endlink))));
10759 tree int_ftype_v8qi
10760 = build_function_type (integer_type_node,
10761 tree_cons (NULL_TREE, V8QI_type_node,
10762 endlink));
10763 tree int_ftype_v4hi
10764 = build_function_type (integer_type_node,
10765 tree_cons (NULL_TREE, V4HI_type_node,
10766 endlink));
10767 tree int_ftype_v2si
10768 = build_function_type (integer_type_node,
10769 tree_cons (NULL_TREE, V2SI_type_node,
10770 endlink));
10771 tree int_ftype_v8qi_int
10772 = build_function_type (integer_type_node,
10773 tree_cons (NULL_TREE, V8QI_type_node,
10774 tree_cons (NULL_TREE, integer_type_node,
10775 endlink)));
10776 tree int_ftype_v4hi_int
10777 = build_function_type (integer_type_node,
10778 tree_cons (NULL_TREE, V4HI_type_node,
10779 tree_cons (NULL_TREE, integer_type_node,
10780 endlink)));
10781 tree int_ftype_v2si_int
10782 = build_function_type (integer_type_node,
10783 tree_cons (NULL_TREE, V2SI_type_node,
10784 tree_cons (NULL_TREE, integer_type_node,
10785 endlink)));
10786 tree v8qi_ftype_v8qi_int_int
10787 = build_function_type (V8QI_type_node,
10788 tree_cons (NULL_TREE, V8QI_type_node,
10789 tree_cons (NULL_TREE, integer_type_node,
10790 tree_cons (NULL_TREE,
10791 integer_type_node,
10792 endlink))));
10793 tree v4hi_ftype_v4hi_int_int
10794 = build_function_type (V4HI_type_node,
10795 tree_cons (NULL_TREE, V4HI_type_node,
10796 tree_cons (NULL_TREE, integer_type_node,
10797 tree_cons (NULL_TREE,
10798 integer_type_node,
10799 endlink))));
10800 tree v2si_ftype_v2si_int_int
10801 = build_function_type (V2SI_type_node,
10802 tree_cons (NULL_TREE, V2SI_type_node,
10803 tree_cons (NULL_TREE, integer_type_node,
10804 tree_cons (NULL_TREE,
10805 integer_type_node,
10806 endlink))));
10807 /* Miscellaneous. */
10808 tree v8qi_ftype_v4hi_v4hi
10809 = build_function_type (V8QI_type_node,
10810 tree_cons (NULL_TREE, V4HI_type_node,
10811 tree_cons (NULL_TREE, V4HI_type_node,
10812 endlink)));
10813 tree v4hi_ftype_v2si_v2si
10814 = build_function_type (V4HI_type_node,
10815 tree_cons (NULL_TREE, V2SI_type_node,
10816 tree_cons (NULL_TREE, V2SI_type_node,
10817 endlink)));
10818 tree v2si_ftype_v4hi_v4hi
10819 = build_function_type (V2SI_type_node,
10820 tree_cons (NULL_TREE, V4HI_type_node,
10821 tree_cons (NULL_TREE, V4HI_type_node,
10822 endlink)));
10823 tree v2si_ftype_v8qi_v8qi
10824 = build_function_type (V2SI_type_node,
10825 tree_cons (NULL_TREE, V8QI_type_node,
10826 tree_cons (NULL_TREE, V8QI_type_node,
10827 endlink)));
10828 tree v4hi_ftype_v4hi_di
10829 = build_function_type (V4HI_type_node,
10830 tree_cons (NULL_TREE, V4HI_type_node,
10831 tree_cons (NULL_TREE,
10832 long_long_integer_type_node,
10833 endlink)));
10834 tree v2si_ftype_v2si_di
10835 = build_function_type (V2SI_type_node,
10836 tree_cons (NULL_TREE, V2SI_type_node,
10837 tree_cons (NULL_TREE,
10838 long_long_integer_type_node,
10839 endlink)));
10840 tree void_ftype_int_int
10841 = build_function_type (void_type_node,
10842 tree_cons (NULL_TREE, integer_type_node,
10843 tree_cons (NULL_TREE, integer_type_node,
10844 endlink)));
10845 tree di_ftype_void
10846 = build_function_type (long_long_unsigned_type_node, endlink);
10847 tree di_ftype_v8qi
10848 = build_function_type (long_long_integer_type_node,
10849 tree_cons (NULL_TREE, V8QI_type_node,
10850 endlink));
10851 tree di_ftype_v4hi
10852 = build_function_type (long_long_integer_type_node,
10853 tree_cons (NULL_TREE, V4HI_type_node,
10854 endlink));
10855 tree di_ftype_v2si
10856 = build_function_type (long_long_integer_type_node,
10857 tree_cons (NULL_TREE, V2SI_type_node,
10858 endlink));
10859 tree v2si_ftype_v4hi
10860 = build_function_type (V2SI_type_node,
10861 tree_cons (NULL_TREE, V4HI_type_node,
10862 endlink));
10863 tree v4hi_ftype_v8qi
10864 = build_function_type (V4HI_type_node,
10865 tree_cons (NULL_TREE, V8QI_type_node,
10866 endlink));
10867
10868 tree di_ftype_di_v4hi_v4hi
10869 = build_function_type (long_long_unsigned_type_node,
10870 tree_cons (NULL_TREE,
10871 long_long_unsigned_type_node,
10872 tree_cons (NULL_TREE, V4HI_type_node,
10873 tree_cons (NULL_TREE,
10874 V4HI_type_node,
10875 endlink))));
10876
10877 tree di_ftype_v4hi_v4hi
10878 = build_function_type (long_long_unsigned_type_node,
10879 tree_cons (NULL_TREE, V4HI_type_node,
10880 tree_cons (NULL_TREE, V4HI_type_node,
10881 endlink)));
10882
10883 /* Normal vector binops. */
10884 tree v8qi_ftype_v8qi_v8qi
10885 = build_function_type (V8QI_type_node,
10886 tree_cons (NULL_TREE, V8QI_type_node,
10887 tree_cons (NULL_TREE, V8QI_type_node,
10888 endlink)));
10889 tree v4hi_ftype_v4hi_v4hi
10890 = build_function_type (V4HI_type_node,
10891 tree_cons (NULL_TREE, V4HI_type_node,
10892 tree_cons (NULL_TREE, V4HI_type_node,
10893 endlink)));
10894 tree v2si_ftype_v2si_v2si
10895 = build_function_type (V2SI_type_node,
10896 tree_cons (NULL_TREE, V2SI_type_node,
10897 tree_cons (NULL_TREE, V2SI_type_node,
10898 endlink)));
10899 tree di_ftype_di_di
10900 = build_function_type (long_long_unsigned_type_node,
10901 tree_cons (NULL_TREE, long_long_unsigned_type_node,
10902 tree_cons (NULL_TREE,
10903 long_long_unsigned_type_node,
10904 endlink)));
10905
10906 /* Add all builtins that are more or less simple operations on two
10907 operands. */
10908 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
10909 {
10910 /* Use one of the operands; the target can have a different mode for
10911 mask-generating compares. */
10912 enum machine_mode mode;
10913 tree type;
10914
10915 if (d->name == 0)
10916 continue;
10917
10918 mode = insn_data[d->icode].operand[1].mode;
10919
10920 switch (mode)
10921 {
10922 case V8QImode:
10923 type = v8qi_ftype_v8qi_v8qi;
10924 break;
10925 case V4HImode:
10926 type = v4hi_ftype_v4hi_v4hi;
10927 break;
10928 case V2SImode:
10929 type = v2si_ftype_v2si_v2si;
10930 break;
10931 case DImode:
10932 type = di_ftype_di_di;
10933 break;
10934
10935 default:
10936 abort ();
10937 }
10938
10939 def_mbuiltin (d->mask, d->name, type, d->code);
10940 }
10941
10942 /* Add the remaining MMX insns with somewhat more complicated types. */
10943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
10944 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
10945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
10946
10947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
10948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
10949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
10950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
10951 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
10952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
10953
10954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
10955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
10956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
10957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
10958 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
10959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
10960
10961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
10962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
10963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
10964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
10965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
10966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
10967
10968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
10969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
10970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
10971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
10972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
10973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
10974
10975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
10976
10977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
10978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
10979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
10980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
10981
10982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
10983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
10984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
10985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
10986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
10987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
10988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
10989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
10990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
10991
10992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
10993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
10994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
10995
10996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
10997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
10998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
10999
11000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11006
11007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11019
11020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11024
11025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11032 }
11033
11034 static void
11035 arm_init_builtins (void)
11036 {
11037 if (TARGET_REALLY_IWMMXT)
11038 arm_init_iwmmxt_builtins ();
11039 }
11040
11041 /* Errors in the source file can cause expand_expr to return const0_rtx
11042 where we expect a vector. To avoid crashing, use one of the vector
11043 clear instructions. */
11044
11045 static rtx
11046 safe_vector_operand (rtx x, enum machine_mode mode)
11047 {
11048 if (x != const0_rtx)
11049 return x;
11050 x = gen_reg_rtx (mode);
11051
11052 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11053 : gen_rtx_SUBREG (DImode, x, 0)));
11054 return x;
11055 }
11056
11057 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11058
11059 static rtx
11060 arm_expand_binop_builtin (enum insn_code icode,
11061 tree arglist, rtx target)
11062 {
11063 rtx pat;
11064 tree arg0 = TREE_VALUE (arglist);
11065 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11066 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11067 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11068 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11069 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11070 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11071
11072 if (VECTOR_MODE_P (mode0))
11073 op0 = safe_vector_operand (op0, mode0);
11074 if (VECTOR_MODE_P (mode1))
11075 op1 = safe_vector_operand (op1, mode1);
11076
11077 if (! target
11078 || GET_MODE (target) != tmode
11079 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11080 target = gen_reg_rtx (tmode);
11081
11082 /* In case the insn wants input operands in modes different from
11083 the result, abort. */
11084 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11085 abort ();
11086
11087 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11088 op0 = copy_to_mode_reg (mode0, op0);
11089 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11090 op1 = copy_to_mode_reg (mode1, op1);
11091
11092 pat = GEN_FCN (icode) (target, op0, op1);
11093 if (! pat)
11094 return 0;
11095 emit_insn (pat);
11096 return target;
11097 }
11098
11099 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11100
11101 static rtx
11102 arm_expand_unop_builtin (enum insn_code icode,
11103 tree arglist, rtx target, int do_load)
11104 {
11105 rtx pat;
11106 tree arg0 = TREE_VALUE (arglist);
11107 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11108 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11109 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11110
11111 if (! target
11112 || GET_MODE (target) != tmode
11113 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11114 target = gen_reg_rtx (tmode);
11115 if (do_load)
11116 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11117 else
11118 {
11119 if (VECTOR_MODE_P (mode0))
11120 op0 = safe_vector_operand (op0, mode0);
11121
11122 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11123 op0 = copy_to_mode_reg (mode0, op0);
11124 }
11125
11126 pat = GEN_FCN (icode) (target, op0);
11127 if (! pat)
11128 return 0;
11129 emit_insn (pat);
11130 return target;
11131 }
11132
11133 /* Expand an expression EXP that calls a built-in function,
11134 with result going to TARGET if that's convenient
11135 (and in mode MODE if that's convenient).
11136 SUBTARGET may be used as the target for computing one of EXP's operands.
11137 IGNORE is nonzero if the value is to be ignored. */
11138
11139 static rtx
11140 arm_expand_builtin (tree exp,
11141 rtx target,
11142 rtx subtarget ATTRIBUTE_UNUSED,
11143 enum machine_mode mode ATTRIBUTE_UNUSED,
11144 int ignore ATTRIBUTE_UNUSED)
11145 {
11146 const struct builtin_description * d;
11147 enum insn_code icode;
11148 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11149 tree arglist = TREE_OPERAND (exp, 1);
11150 tree arg0;
11151 tree arg1;
11152 tree arg2;
11153 rtx op0;
11154 rtx op1;
11155 rtx op2;
11156 rtx pat;
11157 int fcode = DECL_FUNCTION_CODE (fndecl);
11158 size_t i;
11159 enum machine_mode tmode;
11160 enum machine_mode mode0;
11161 enum machine_mode mode1;
11162 enum machine_mode mode2;
11163
11164 switch (fcode)
11165 {
11166 case ARM_BUILTIN_TEXTRMSB:
11167 case ARM_BUILTIN_TEXTRMUB:
11168 case ARM_BUILTIN_TEXTRMSH:
11169 case ARM_BUILTIN_TEXTRMUH:
11170 case ARM_BUILTIN_TEXTRMSW:
11171 case ARM_BUILTIN_TEXTRMUW:
11172 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11173 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11174 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11175 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11176 : CODE_FOR_iwmmxt_textrmw);
11177
11178 arg0 = TREE_VALUE (arglist);
11179 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11180 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11181 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11182 tmode = insn_data[icode].operand[0].mode;
11183 mode0 = insn_data[icode].operand[1].mode;
11184 mode1 = insn_data[icode].operand[2].mode;
11185
11186 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11187 op0 = copy_to_mode_reg (mode0, op0);
11188 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11189 {
11190 /* @@@ better error message */
11191 error ("selector must be an immediate");
11192 return gen_reg_rtx (tmode);
11193 }
11194 if (target == 0
11195 || GET_MODE (target) != tmode
11196 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11197 target = gen_reg_rtx (tmode);
11198 pat = GEN_FCN (icode) (target, op0, op1);
11199 if (! pat)
11200 return 0;
11201 emit_insn (pat);
11202 return target;
11203
11204 case ARM_BUILTIN_TINSRB:
11205 case ARM_BUILTIN_TINSRH:
11206 case ARM_BUILTIN_TINSRW:
11207 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
11208 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
11209 : CODE_FOR_iwmmxt_tinsrw);
11210 arg0 = TREE_VALUE (arglist);
11211 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11212 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11213 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11214 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11215 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11216 tmode = insn_data[icode].operand[0].mode;
11217 mode0 = insn_data[icode].operand[1].mode;
11218 mode1 = insn_data[icode].operand[2].mode;
11219 mode2 = insn_data[icode].operand[3].mode;
11220
11221 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11222 op0 = copy_to_mode_reg (mode0, op0);
11223 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11224 op1 = copy_to_mode_reg (mode1, op1);
11225 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11226 {
11227 /* @@@ better error message */
11228 error ("selector must be an immediate");
11229 return const0_rtx;
11230 }
11231 if (target == 0
11232 || GET_MODE (target) != tmode
11233 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11234 target = gen_reg_rtx (tmode);
11235 pat = GEN_FCN (icode) (target, op0, op1, op2);
11236 if (! pat)
11237 return 0;
11238 emit_insn (pat);
11239 return target;
11240
11241 case ARM_BUILTIN_SETWCX:
11242 arg0 = TREE_VALUE (arglist);
11243 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11244 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11245 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11246 emit_insn (gen_iwmmxt_tmcr (op0, op1));
11247 return 0;
11248
11249 case ARM_BUILTIN_GETWCX:
11250 arg0 = TREE_VALUE (arglist);
11251 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11252 target = gen_reg_rtx (SImode);
11253 emit_insn (gen_iwmmxt_tmrc (target, op0));
11254 return target;
11255
11256 case ARM_BUILTIN_WSHUFH:
11257 icode = CODE_FOR_iwmmxt_wshufh;
11258 arg0 = TREE_VALUE (arglist);
11259 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11260 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11261 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11262 tmode = insn_data[icode].operand[0].mode;
11263 mode1 = insn_data[icode].operand[1].mode;
11264 mode2 = insn_data[icode].operand[2].mode;
11265
11266 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
11267 op0 = copy_to_mode_reg (mode1, op0);
11268 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
11269 {
11270 /* @@@ better error message */
11271 error ("mask must be an immediate");
11272 return const0_rtx;
11273 }
11274 if (target == 0
11275 || GET_MODE (target) != tmode
11276 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11277 target = gen_reg_rtx (tmode);
11278 pat = GEN_FCN (icode) (target, op0, op1);
11279 if (! pat)
11280 return 0;
11281 emit_insn (pat);
11282 return target;
11283
11284 case ARM_BUILTIN_WSADB:
11285 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
11286 case ARM_BUILTIN_WSADH:
11287 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
11288 case ARM_BUILTIN_WSADBZ:
11289 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
11290 case ARM_BUILTIN_WSADHZ:
11291 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
11292
11293 /* Several three-argument builtins. */
11294 case ARM_BUILTIN_WMACS:
11295 case ARM_BUILTIN_WMACU:
11296 case ARM_BUILTIN_WALIGN:
11297 case ARM_BUILTIN_TMIA:
11298 case ARM_BUILTIN_TMIAPH:
11299 case ARM_BUILTIN_TMIATT:
11300 case ARM_BUILTIN_TMIATB:
11301 case ARM_BUILTIN_TMIABT:
11302 case ARM_BUILTIN_TMIABB:
11303 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
11304 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
11305 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
11306 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
11307 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
11308 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
11309 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
11310 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
11311 : CODE_FOR_iwmmxt_walign);
11312 arg0 = TREE_VALUE (arglist);
11313 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11314 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11315 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11316 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11317 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11318 tmode = insn_data[icode].operand[0].mode;
11319 mode0 = insn_data[icode].operand[1].mode;
11320 mode1 = insn_data[icode].operand[2].mode;
11321 mode2 = insn_data[icode].operand[3].mode;
11322
11323 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11324 op0 = copy_to_mode_reg (mode0, op0);
11325 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11326 op1 = copy_to_mode_reg (mode1, op1);
11327 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11328 op2 = copy_to_mode_reg (mode2, op2);
11329 if (target == 0
11330 || GET_MODE (target) != tmode
11331 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11332 target = gen_reg_rtx (tmode);
11333 pat = GEN_FCN (icode) (target, op0, op1, op2);
11334 if (! pat)
11335 return 0;
11336 emit_insn (pat);
11337 return target;
11338
11339 case ARM_BUILTIN_WZERO:
11340 target = gen_reg_rtx (DImode);
11341 emit_insn (gen_iwmmxt_clrdi (target));
11342 return target;
11343
11344 default:
11345 break;
11346 }
11347
11348 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11349 if (d->code == (const enum arm_builtins) fcode)
11350 return arm_expand_binop_builtin (d->icode, arglist, target);
11351
11352 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11353 if (d->code == (const enum arm_builtins) fcode)
11354 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
11355
11356 /* @@@ Should really do something sensible here. */
11357 return NULL_RTX;
11358 }
11359 \f
11360 /* Recursively search through all of the blocks in a function
11361 checking to see if any of the variables created in that
11362 function match the RTX called 'orig'. If they do then
11363 replace them with the RTX called 'new'. */
11364 static void
11365 replace_symbols_in_block (tree block, rtx orig, rtx new)
11366 {
11367 for (; block; block = BLOCK_CHAIN (block))
11368 {
11369 tree sym;
11370
11371 if (!TREE_USED (block))
11372 continue;
11373
11374 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
11375 {
11376 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
11377 || DECL_IGNORED_P (sym)
11378 || TREE_CODE (sym) != VAR_DECL
11379 || DECL_EXTERNAL (sym)
11380 || !rtx_equal_p (DECL_RTL (sym), orig)
11381 )
11382 continue;
11383
11384 SET_DECL_RTL (sym, new);
11385 }
11386
11387 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
11388 }
11389 }
11390
11391 /* Return the number (counting from 0) of
11392 the least significant set bit in MASK. */
11393
11394 inline static int
11395 number_of_first_bit_set (int mask)
11396 {
11397 int bit;
11398
11399 for (bit = 0;
11400 (mask & (1 << bit)) == 0;
11401 ++bit)
11402 continue;
11403
11404 return bit;
11405 }
11406
11407 /* Generate code to return from a thumb function.
11408 If 'reg_containing_return_addr' is -1, then the return address is
11409 actually on the stack, at the stack pointer. */
11410 static void
11411 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
11412 {
11413 unsigned regs_available_for_popping;
11414 unsigned regs_to_pop;
11415 int pops_needed;
11416 unsigned available;
11417 unsigned required;
11418 int mode;
11419 int size;
11420 int restore_a4 = FALSE;
11421
11422 /* Compute the registers we need to pop. */
11423 regs_to_pop = 0;
11424 pops_needed = 0;
11425
11426 /* There is an assumption here, that if eh_ofs is not NULL, the
11427 normal return address will have been pushed. */
11428 if (reg_containing_return_addr == -1 || eh_ofs)
11429 {
11430 /* When we are generating a return for __builtin_eh_return,
11431 reg_containing_return_addr must specify the return regno. */
11432 if (eh_ofs && reg_containing_return_addr == -1)
11433 abort ();
11434
11435 regs_to_pop |= 1 << LR_REGNUM;
11436 ++pops_needed;
11437 }
11438
11439 if (TARGET_BACKTRACE)
11440 {
11441 /* Restore the (ARM) frame pointer and stack pointer. */
11442 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
11443 pops_needed += 2;
11444 }
11445
11446 /* If there is nothing to pop then just emit the BX instruction and
11447 return. */
11448 if (pops_needed == 0)
11449 {
11450 if (eh_ofs)
11451 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11452
11453 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11454 return;
11455 }
11456 /* Otherwise if we are not supporting interworking and we have not created
11457 a backtrace structure and the function was not entered in ARM mode then
11458 just pop the return address straight into the PC. */
11459 else if (!TARGET_INTERWORK
11460 && !TARGET_BACKTRACE
11461 && !is_called_in_ARM_mode (current_function_decl))
11462 {
11463 if (eh_ofs)
11464 {
11465 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
11466 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11467 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11468 }
11469 else
11470 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
11471
11472 return;
11473 }
11474
11475 /* Find out how many of the (return) argument registers we can corrupt. */
11476 regs_available_for_popping = 0;
11477
11478 /* If returning via __builtin_eh_return, the bottom three registers
11479 all contain information needed for the return. */
11480 if (eh_ofs)
11481 size = 12;
11482 else
11483 {
11484 #ifdef RTX_CODE
11485 /* If we can deduce the registers used from the function's
11486 return value. This is more reliable that examining
11487 regs_ever_live[] because that will be set if the register is
11488 ever used in the function, not just if the register is used
11489 to hold a return value. */
11490
11491 if (current_function_return_rtx != 0)
11492 mode = GET_MODE (current_function_return_rtx);
11493 else
11494 #endif
11495 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11496
11497 size = GET_MODE_SIZE (mode);
11498
11499 if (size == 0)
11500 {
11501 /* In a void function we can use any argument register.
11502 In a function that returns a structure on the stack
11503 we can use the second and third argument registers. */
11504 if (mode == VOIDmode)
11505 regs_available_for_popping =
11506 (1 << ARG_REGISTER (1))
11507 | (1 << ARG_REGISTER (2))
11508 | (1 << ARG_REGISTER (3));
11509 else
11510 regs_available_for_popping =
11511 (1 << ARG_REGISTER (2))
11512 | (1 << ARG_REGISTER (3));
11513 }
11514 else if (size <= 4)
11515 regs_available_for_popping =
11516 (1 << ARG_REGISTER (2))
11517 | (1 << ARG_REGISTER (3));
11518 else if (size <= 8)
11519 regs_available_for_popping =
11520 (1 << ARG_REGISTER (3));
11521 }
11522
11523 /* Match registers to be popped with registers into which we pop them. */
11524 for (available = regs_available_for_popping,
11525 required = regs_to_pop;
11526 required != 0 && available != 0;
11527 available &= ~(available & - available),
11528 required &= ~(required & - required))
11529 -- pops_needed;
11530
11531 /* If we have any popping registers left over, remove them. */
11532 if (available > 0)
11533 regs_available_for_popping &= ~available;
11534
11535 /* Otherwise if we need another popping register we can use
11536 the fourth argument register. */
11537 else if (pops_needed)
11538 {
11539 /* If we have not found any free argument registers and
11540 reg a4 contains the return address, we must move it. */
11541 if (regs_available_for_popping == 0
11542 && reg_containing_return_addr == LAST_ARG_REGNUM)
11543 {
11544 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
11545 reg_containing_return_addr = LR_REGNUM;
11546 }
11547 else if (size > 12)
11548 {
11549 /* Register a4 is being used to hold part of the return value,
11550 but we have dire need of a free, low register. */
11551 restore_a4 = TRUE;
11552
11553 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
11554 }
11555
11556 if (reg_containing_return_addr != LAST_ARG_REGNUM)
11557 {
11558 /* The fourth argument register is available. */
11559 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
11560
11561 --pops_needed;
11562 }
11563 }
11564
11565 /* Pop as many registers as we can. */
11566 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11567 regs_available_for_popping);
11568
11569 /* Process the registers we popped. */
11570 if (reg_containing_return_addr == -1)
11571 {
11572 /* The return address was popped into the lowest numbered register. */
11573 regs_to_pop &= ~(1 << LR_REGNUM);
11574
11575 reg_containing_return_addr =
11576 number_of_first_bit_set (regs_available_for_popping);
11577
11578 /* Remove this register for the mask of available registers, so that
11579 the return address will not be corrupted by further pops. */
11580 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
11581 }
11582
11583 /* If we popped other registers then handle them here. */
11584 if (regs_available_for_popping)
11585 {
11586 int frame_pointer;
11587
11588 /* Work out which register currently contains the frame pointer. */
11589 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
11590
11591 /* Move it into the correct place. */
11592 asm_fprintf (f, "\tmov\t%r, %r\n",
11593 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
11594
11595 /* (Temporarily) remove it from the mask of popped registers. */
11596 regs_available_for_popping &= ~(1 << frame_pointer);
11597 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
11598
11599 if (regs_available_for_popping)
11600 {
11601 int stack_pointer;
11602
11603 /* We popped the stack pointer as well,
11604 find the register that contains it. */
11605 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
11606
11607 /* Move it into the stack register. */
11608 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
11609
11610 /* At this point we have popped all necessary registers, so
11611 do not worry about restoring regs_available_for_popping
11612 to its correct value:
11613
11614 assert (pops_needed == 0)
11615 assert (regs_available_for_popping == (1 << frame_pointer))
11616 assert (regs_to_pop == (1 << STACK_POINTER)) */
11617 }
11618 else
11619 {
11620 /* Since we have just move the popped value into the frame
11621 pointer, the popping register is available for reuse, and
11622 we know that we still have the stack pointer left to pop. */
11623 regs_available_for_popping |= (1 << frame_pointer);
11624 }
11625 }
11626
11627 /* If we still have registers left on the stack, but we no longer have
11628 any registers into which we can pop them, then we must move the return
11629 address into the link register and make available the register that
11630 contained it. */
11631 if (regs_available_for_popping == 0 && pops_needed > 0)
11632 {
11633 regs_available_for_popping |= 1 << reg_containing_return_addr;
11634
11635 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
11636 reg_containing_return_addr);
11637
11638 reg_containing_return_addr = LR_REGNUM;
11639 }
11640
11641 /* If we have registers left on the stack then pop some more.
11642 We know that at most we will want to pop FP and SP. */
11643 if (pops_needed > 0)
11644 {
11645 int popped_into;
11646 int move_to;
11647
11648 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11649 regs_available_for_popping);
11650
11651 /* We have popped either FP or SP.
11652 Move whichever one it is into the correct register. */
11653 popped_into = number_of_first_bit_set (regs_available_for_popping);
11654 move_to = number_of_first_bit_set (regs_to_pop);
11655
11656 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
11657
11658 regs_to_pop &= ~(1 << move_to);
11659
11660 --pops_needed;
11661 }
11662
11663 /* If we still have not popped everything then we must have only
11664 had one register available to us and we are now popping the SP. */
11665 if (pops_needed > 0)
11666 {
11667 int popped_into;
11668
11669 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11670 regs_available_for_popping);
11671
11672 popped_into = number_of_first_bit_set (regs_available_for_popping);
11673
11674 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
11675 /*
11676 assert (regs_to_pop == (1 << STACK_POINTER))
11677 assert (pops_needed == 1)
11678 */
11679 }
11680
11681 /* If necessary restore the a4 register. */
11682 if (restore_a4)
11683 {
11684 if (reg_containing_return_addr != LR_REGNUM)
11685 {
11686 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
11687 reg_containing_return_addr = LR_REGNUM;
11688 }
11689
11690 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
11691 }
11692
11693 if (eh_ofs)
11694 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11695
11696 /* Return to caller. */
11697 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11698 }
11699
11700 /* Emit code to push or pop registers to or from the stack. F is the
11701 assembly file. MASK is the registers to push or pop. PUSH is
11702 non-zero if we should push, and zero if we should pop. For debugging
11703 output, if pushing, adjust CFA_OFFSET by the amount of space added
11704 to the stack. REAL_REGS should have the same number of bits set as
11705 MASK, and will be used instead (in the same order) to describe which
11706 registers were saved - this is used to mark the save slots when we
11707 push high registers after moving them to low registers. */
11708 static void
11709 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
11710 {
11711 int regno;
11712 int lo_mask = mask & 0xFF;
11713 int pushed_words = 0;
11714
11715 if (lo_mask == 0 && !push && (mask & (1 << 15)))
11716 {
11717 /* Special case. Do not generate a POP PC statement here, do it in
11718 thumb_exit() */
11719 thumb_exit (f, -1, NULL_RTX);
11720 return;
11721 }
11722
11723 fprintf (f, "\t%s\t{", push ? "push" : "pop");
11724
11725 /* Look at the low registers first. */
11726 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
11727 {
11728 if (lo_mask & 1)
11729 {
11730 asm_fprintf (f, "%r", regno);
11731
11732 if ((lo_mask & ~1) != 0)
11733 fprintf (f, ", ");
11734
11735 pushed_words++;
11736 }
11737 }
11738
11739 if (push && (mask & (1 << LR_REGNUM)))
11740 {
11741 /* Catch pushing the LR. */
11742 if (mask & 0xFF)
11743 fprintf (f, ", ");
11744
11745 asm_fprintf (f, "%r", LR_REGNUM);
11746
11747 pushed_words++;
11748 }
11749 else if (!push && (mask & (1 << PC_REGNUM)))
11750 {
11751 /* Catch popping the PC. */
11752 if (TARGET_INTERWORK || TARGET_BACKTRACE)
11753 {
11754 /* The PC is never poped directly, instead
11755 it is popped into r3 and then BX is used. */
11756 fprintf (f, "}\n");
11757
11758 thumb_exit (f, -1, NULL_RTX);
11759
11760 return;
11761 }
11762 else
11763 {
11764 if (mask & 0xFF)
11765 fprintf (f, ", ");
11766
11767 asm_fprintf (f, "%r", PC_REGNUM);
11768 }
11769 }
11770
11771 fprintf (f, "}\n");
11772
11773 if (push && pushed_words && dwarf2out_do_frame ())
11774 {
11775 char *l = dwarf2out_cfi_label ();
11776 int pushed_mask = real_regs;
11777
11778 *cfa_offset += pushed_words * 4;
11779 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
11780
11781 pushed_words = 0;
11782 pushed_mask = real_regs;
11783 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
11784 {
11785 if (pushed_mask & 1)
11786 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
11787 }
11788 }
11789 }
11790 \f
11791 void
11792 thumb_final_prescan_insn (rtx insn)
11793 {
11794 if (flag_print_asm_name)
11795 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
11796 INSN_ADDRESSES (INSN_UID (insn)));
11797 }
11798
11799 int
11800 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
11801 {
11802 unsigned HOST_WIDE_INT mask = 0xff;
11803 int i;
11804
11805 if (val == 0) /* XXX */
11806 return 0;
11807
11808 for (i = 0; i < 25; i++)
11809 if ((val & (mask << i)) == val)
11810 return 1;
11811
11812 return 0;
11813 }
11814
11815 /* Returns nonzero if the current function contains,
11816 or might contain a far jump. */
11817 int
11818 thumb_far_jump_used_p (int in_prologue)
11819 {
11820 rtx insn;
11821
11822 /* This test is only important for leaf functions. */
11823 /* assert (!leaf_function_p ()); */
11824
11825 /* If we have already decided that far jumps may be used,
11826 do not bother checking again, and always return true even if
11827 it turns out that they are not being used. Once we have made
11828 the decision that far jumps are present (and that hence the link
11829 register will be pushed onto the stack) we cannot go back on it. */
11830 if (cfun->machine->far_jump_used)
11831 return 1;
11832
11833 /* If this function is not being called from the prologue/epilogue
11834 generation code then it must be being called from the
11835 INITIAL_ELIMINATION_OFFSET macro. */
11836 if (!in_prologue)
11837 {
11838 /* In this case we know that we are being asked about the elimination
11839 of the arg pointer register. If that register is not being used,
11840 then there are no arguments on the stack, and we do not have to
11841 worry that a far jump might force the prologue to push the link
11842 register, changing the stack offsets. In this case we can just
11843 return false, since the presence of far jumps in the function will
11844 not affect stack offsets.
11845
11846 If the arg pointer is live (or if it was live, but has now been
11847 eliminated and so set to dead) then we do have to test to see if
11848 the function might contain a far jump. This test can lead to some
11849 false negatives, since before reload is completed, then length of
11850 branch instructions is not known, so gcc defaults to returning their
11851 longest length, which in turn sets the far jump attribute to true.
11852
11853 A false negative will not result in bad code being generated, but it
11854 will result in a needless push and pop of the link register. We
11855 hope that this does not occur too often. */
11856 if (regs_ever_live [ARG_POINTER_REGNUM])
11857 cfun->machine->arg_pointer_live = 1;
11858 else if (!cfun->machine->arg_pointer_live)
11859 return 0;
11860 }
11861
11862 /* Check to see if the function contains a branch
11863 insn with the far jump attribute set. */
11864 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11865 {
11866 if (GET_CODE (insn) == JUMP_INSN
11867 /* Ignore tablejump patterns. */
11868 && GET_CODE (PATTERN (insn)) != ADDR_VEC
11869 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
11870 && get_attr_far_jump (insn) == FAR_JUMP_YES
11871 )
11872 {
11873 /* Record the fact that we have decided that
11874 the function does use far jumps. */
11875 cfun->machine->far_jump_used = 1;
11876 return 1;
11877 }
11878 }
11879
11880 return 0;
11881 }
11882
11883 /* Return nonzero if FUNC must be entered in ARM mode. */
11884 int
11885 is_called_in_ARM_mode (tree func)
11886 {
11887 if (TREE_CODE (func) != FUNCTION_DECL)
11888 abort ();
11889
11890 /* Ignore the problem about functions whoes address is taken. */
11891 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
11892 return TRUE;
11893
11894 #ifdef ARM_PE
11895 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
11896 #else
11897 return FALSE;
11898 #endif
11899 }
11900
11901 /* The bits which aren't usefully expanded as rtl. */
11902 const char *
11903 thumb_unexpanded_epilogue (void)
11904 {
11905 int regno;
11906 int live_regs_mask = 0;
11907 int high_regs_pushed = 0;
11908 int leaf_function = leaf_function_p ();
11909 int had_to_push_lr;
11910 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
11911
11912 if (return_used_this_function)
11913 return "";
11914
11915 if (IS_NAKED (arm_current_func_type ()))
11916 return "";
11917
11918 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
11919 if (THUMB_REG_PUSHED_P (regno))
11920 live_regs_mask |= 1 << regno;
11921
11922 for (regno = 8; regno < 13; regno++)
11923 if (THUMB_REG_PUSHED_P (regno))
11924 high_regs_pushed++;
11925
11926 /* The prolog may have pushed some high registers to use as
11927 work registers. eg the testsuite file:
11928 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
11929 compiles to produce:
11930 push {r4, r5, r6, r7, lr}
11931 mov r7, r9
11932 mov r6, r8
11933 push {r6, r7}
11934 as part of the prolog. We have to undo that pushing here. */
11935
11936 if (high_regs_pushed)
11937 {
11938 int mask = live_regs_mask;
11939 int next_hi_reg;
11940 int size;
11941 int mode;
11942
11943 #ifdef RTX_CODE
11944 /* If we can deduce the registers used from the function's return value.
11945 This is more reliable that examining regs_ever_live[] because that
11946 will be set if the register is ever used in the function, not just if
11947 the register is used to hold a return value. */
11948
11949 if (current_function_return_rtx != 0)
11950 mode = GET_MODE (current_function_return_rtx);
11951 else
11952 #endif
11953 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11954
11955 size = GET_MODE_SIZE (mode);
11956
11957 /* Unless we are returning a type of size > 12 register r3 is
11958 available. */
11959 if (size < 13)
11960 mask |= 1 << 3;
11961
11962 if (mask == 0)
11963 /* Oh dear! We have no low registers into which we can pop
11964 high registers! */
11965 internal_error
11966 ("no low registers available for popping high registers");
11967
11968 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
11969 if (THUMB_REG_PUSHED_P (next_hi_reg))
11970 break;
11971
11972 while (high_regs_pushed)
11973 {
11974 /* Find lo register(s) into which the high register(s) can
11975 be popped. */
11976 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
11977 {
11978 if (mask & (1 << regno))
11979 high_regs_pushed--;
11980 if (high_regs_pushed == 0)
11981 break;
11982 }
11983
11984 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
11985
11986 /* Pop the values into the low register(s). */
11987 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
11988
11989 /* Move the value(s) into the high registers. */
11990 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
11991 {
11992 if (mask & (1 << regno))
11993 {
11994 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
11995 regno);
11996
11997 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
11998 if (THUMB_REG_PUSHED_P (next_hi_reg))
11999 break;
12000 }
12001 }
12002 }
12003 }
12004
12005 had_to_push_lr = (live_regs_mask || !leaf_function
12006 || thumb_far_jump_used_p (1));
12007
12008 if (TARGET_BACKTRACE
12009 && ((live_regs_mask & 0xFF) == 0)
12010 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12011 {
12012 /* The stack backtrace structure creation code had to
12013 push R7 in order to get a work register, so we pop
12014 it now. */
12015 live_regs_mask |= (1 << LAST_LO_REGNUM);
12016 }
12017
12018 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12019 {
12020 if (had_to_push_lr
12021 && !is_called_in_ARM_mode (current_function_decl)
12022 && !eh_ofs)
12023 live_regs_mask |= 1 << PC_REGNUM;
12024
12025 /* Either no argument registers were pushed or a backtrace
12026 structure was created which includes an adjusted stack
12027 pointer, so just pop everything. */
12028 if (live_regs_mask)
12029 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12030 live_regs_mask);
12031
12032 if (eh_ofs)
12033 thumb_exit (asm_out_file, 2, eh_ofs);
12034 /* We have either just popped the return address into the
12035 PC or it is was kept in LR for the entire function or
12036 it is still on the stack because we do not want to
12037 return by doing a pop {pc}. */
12038 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12039 thumb_exit (asm_out_file,
12040 (had_to_push_lr
12041 && is_called_in_ARM_mode (current_function_decl)) ?
12042 -1 : LR_REGNUM, NULL_RTX);
12043 }
12044 else
12045 {
12046 /* Pop everything but the return address. */
12047 live_regs_mask &= ~(1 << PC_REGNUM);
12048
12049 if (live_regs_mask)
12050 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12051 live_regs_mask);
12052
12053 if (had_to_push_lr)
12054 /* Get the return address into a temporary register. */
12055 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12056 1 << LAST_ARG_REGNUM);
12057
12058 /* Remove the argument registers that were pushed onto the stack. */
12059 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12060 SP_REGNUM, SP_REGNUM,
12061 current_function_pretend_args_size);
12062
12063 if (eh_ofs)
12064 thumb_exit (asm_out_file, 2, eh_ofs);
12065 else
12066 thumb_exit (asm_out_file,
12067 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
12068 }
12069
12070 return "";
12071 }
12072
12073 /* Functions to save and restore machine-specific function data. */
12074 static struct machine_function *
12075 arm_init_machine_status (void)
12076 {
12077 struct machine_function *machine;
12078 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12079
12080 #if ARM_FT_UNKNOWN != 0
12081 machine->func_type = ARM_FT_UNKNOWN;
12082 #endif
12083 return machine;
12084 }
12085
12086 /* Return an RTX indicating where the return address to the
12087 calling function can be found. */
12088 rtx
12089 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12090 {
12091 if (count != 0)
12092 return NULL_RTX;
12093
12094 if (TARGET_APCS_32)
12095 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12096 else
12097 {
12098 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
12099 GEN_INT (RETURN_ADDR_MASK26));
12100 return get_func_hard_reg_initial_val (cfun, lr);
12101 }
12102 }
12103
12104 /* Do anything needed before RTL is emitted for each function. */
12105 void
12106 arm_init_expanders (void)
12107 {
12108 /* Arrange to initialize and mark the machine per-function status. */
12109 init_machine_status = arm_init_machine_status;
12110 }
12111
12112 HOST_WIDE_INT
12113 thumb_get_frame_size (void)
12114 {
12115 int regno;
12116
12117 int base_size = ROUND_UP_WORD (get_frame_size ());
12118 int count_regs = 0;
12119 int entry_size = 0;
12120 int leaf;
12121
12122 if (! TARGET_THUMB)
12123 abort ();
12124
12125 if (! TARGET_ATPCS)
12126 return base_size;
12127
12128 /* We need to know if we are a leaf function. Unfortunately, it
12129 is possible to be called after start_sequence has been called,
12130 which causes get_insns to return the insns for the sequence,
12131 not the function, which will cause leaf_function_p to return
12132 the incorrect result.
12133
12134 To work around this, we cache the computed frame size. This
12135 works because we will only be calling RTL expanders that need
12136 to know about leaf functions once reload has completed, and the
12137 frame size cannot be changed after that time, so we can safely
12138 use the cached value. */
12139
12140 if (reload_completed)
12141 return cfun->machine->frame_size;
12142
12143 leaf = leaf_function_p ();
12144
12145 /* A leaf function does not need any stack alignment if it has nothing
12146 on the stack. */
12147 if (leaf && base_size == 0)
12148 {
12149 cfun->machine->frame_size = 0;
12150 return 0;
12151 }
12152
12153 /* We know that SP will be word aligned on entry, and we must
12154 preserve that condition at any subroutine call. But those are
12155 the only constraints. */
12156
12157 /* Space for variadic functions. */
12158 if (current_function_pretend_args_size)
12159 entry_size += current_function_pretend_args_size;
12160
12161 /* Space for pushed lo registers. */
12162 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12163 if (THUMB_REG_PUSHED_P (regno))
12164 count_regs++;
12165
12166 /* Space for backtrace structure. */
12167 if (TARGET_BACKTRACE)
12168 {
12169 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
12170 entry_size += 20;
12171 else
12172 entry_size += 16;
12173 }
12174
12175 if (count_regs || !leaf || thumb_far_jump_used_p (1))
12176 count_regs++; /* LR */
12177
12178 entry_size += count_regs * 4;
12179 count_regs = 0;
12180
12181 /* Space for pushed hi regs. */
12182 for (regno = 8; regno < 13; regno++)
12183 if (THUMB_REG_PUSHED_P (regno))
12184 count_regs++;
12185
12186 entry_size += count_regs * 4;
12187
12188 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12189 base_size += 4;
12190 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12191 abort ();
12192
12193 cfun->machine->frame_size = base_size;
12194
12195 return base_size;
12196 }
12197
12198 /* Generate the rest of a function's prologue. */
12199 void
12200 thumb_expand_prologue (void)
12201 {
12202 rtx insn, dwarf;
12203
12204 HOST_WIDE_INT amount = (thumb_get_frame_size ()
12205 + current_function_outgoing_args_size);
12206 unsigned long func_type;
12207
12208 func_type = arm_current_func_type ();
12209
12210 /* Naked functions don't have prologues. */
12211 if (IS_NAKED (func_type))
12212 return;
12213
12214 if (IS_INTERRUPT (func_type))
12215 {
12216 error ("interrupt Service Routines cannot be coded in Thumb mode");
12217 return;
12218 }
12219
12220 if (frame_pointer_needed)
12221 {
12222 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
12223 RTX_FRAME_RELATED_P (insn) = 1;
12224 }
12225
12226 if (amount)
12227 {
12228 amount = ROUND_UP_WORD (amount);
12229
12230 if (amount < 512)
12231 {
12232 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12233 GEN_INT (- amount)));
12234 RTX_FRAME_RELATED_P (insn) = 1;
12235 }
12236 else
12237 {
12238 int regno;
12239 rtx reg;
12240
12241 /* The stack decrement is too big for an immediate value in a single
12242 insn. In theory we could issue multiple subtracts, but after
12243 three of them it becomes more space efficient to place the full
12244 value in the constant pool and load into a register. (Also the
12245 ARM debugger really likes to see only one stack decrement per
12246 function). So instead we look for a scratch register into which
12247 we can load the decrement, and then we subtract this from the
12248 stack pointer. Unfortunately on the thumb the only available
12249 scratch registers are the argument registers, and we cannot use
12250 these as they may hold arguments to the function. Instead we
12251 attempt to locate a call preserved register which is used by this
12252 function. If we can find one, then we know that it will have
12253 been pushed at the start of the prologue and so we can corrupt
12254 it now. */
12255 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
12256 if (THUMB_REG_PUSHED_P (regno)
12257 && !(frame_pointer_needed
12258 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
12259 break;
12260
12261 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
12262 {
12263 rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
12264
12265 /* Choose an arbitrary, non-argument low register. */
12266 reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
12267
12268 /* Save it by copying it into a high, scratch register. */
12269 emit_insn (gen_movsi (spare, reg));
12270 /* Add a USE to stop propagate_one_insn() from barfing. */
12271 emit_insn (gen_prologue_use (spare));
12272
12273 /* Decrement the stack. */
12274 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12275 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12276 stack_pointer_rtx, reg));
12277 RTX_FRAME_RELATED_P (insn) = 1;
12278 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12279 plus_constant (stack_pointer_rtx,
12280 GEN_INT (- amount)));
12281 RTX_FRAME_RELATED_P (dwarf) = 1;
12282 REG_NOTES (insn)
12283 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12284 REG_NOTES (insn));
12285
12286 /* Restore the low register's original value. */
12287 emit_insn (gen_movsi (reg, spare));
12288
12289 /* Emit a USE of the restored scratch register, so that flow
12290 analysis will not consider the restore redundant. The
12291 register won't be used again in this function and isn't
12292 restored by the epilogue. */
12293 emit_insn (gen_prologue_use (reg));
12294 }
12295 else
12296 {
12297 reg = gen_rtx (REG, SImode, regno);
12298
12299 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12300
12301 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12302 stack_pointer_rtx, reg));
12303 RTX_FRAME_RELATED_P (insn) = 1;
12304 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12305 plus_constant (stack_pointer_rtx,
12306 GEN_INT (- amount)));
12307 RTX_FRAME_RELATED_P (dwarf) = 1;
12308 REG_NOTES (insn)
12309 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12310 REG_NOTES (insn));
12311 }
12312 }
12313 }
12314
12315 if (current_function_profile || TARGET_NO_SCHED_PRO)
12316 emit_insn (gen_blockage ());
12317 }
12318
12319 void
12320 thumb_expand_epilogue (void)
12321 {
12322 HOST_WIDE_INT amount = (thumb_get_frame_size ()
12323 + current_function_outgoing_args_size);
12324 int regno;
12325
12326 /* Naked functions don't have prologues. */
12327 if (IS_NAKED (arm_current_func_type ()))
12328 return;
12329
12330 if (frame_pointer_needed)
12331 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
12332 else if (amount)
12333 {
12334 amount = ROUND_UP_WORD (amount);
12335
12336 if (amount < 512)
12337 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12338 GEN_INT (amount)));
12339 else
12340 {
12341 /* r3 is always free in the epilogue. */
12342 rtx reg = gen_rtx (REG, SImode, LAST_ARG_REGNUM);
12343
12344 emit_insn (gen_movsi (reg, GEN_INT (amount)));
12345 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
12346 }
12347 }
12348
12349 /* Emit a USE (stack_pointer_rtx), so that
12350 the stack adjustment will not be deleted. */
12351 emit_insn (gen_prologue_use (stack_pointer_rtx));
12352
12353 if (current_function_profile || TARGET_NO_SCHED_PRO)
12354 emit_insn (gen_blockage ());
12355
12356 /* Emit a clobber for each insn that will be restored in the epilogue,
12357 so that flow2 will get register lifetimes correct. */
12358 for (regno = 0; regno < 13; regno++)
12359 if (regs_ever_live[regno] && !call_used_regs[regno])
12360 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
12361
12362 if (! regs_ever_live[LR_REGNUM])
12363 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
12364 }
12365
12366 static void
12367 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
12368 {
12369 int live_regs_mask = 0;
12370 int high_regs_pushed = 0;
12371 int cfa_offset = 0;
12372 int regno;
12373
12374 if (IS_NAKED (arm_current_func_type ()))
12375 return;
12376
12377 if (is_called_in_ARM_mode (current_function_decl))
12378 {
12379 const char * name;
12380
12381 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
12382 abort ();
12383 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
12384 abort ();
12385 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
12386
12387 /* Generate code sequence to switch us into Thumb mode. */
12388 /* The .code 32 directive has already been emitted by
12389 ASM_DECLARE_FUNCTION_NAME. */
12390 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
12391 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
12392
12393 /* Generate a label, so that the debugger will notice the
12394 change in instruction sets. This label is also used by
12395 the assembler to bypass the ARM code when this function
12396 is called from a Thumb encoded function elsewhere in the
12397 same file. Hence the definition of STUB_NAME here must
12398 agree with the definition in gas/config/tc-arm.c. */
12399
12400 #define STUB_NAME ".real_start_of"
12401
12402 fprintf (f, "\t.code\t16\n");
12403 #ifdef ARM_PE
12404 if (arm_dllexport_name_p (name))
12405 name = arm_strip_name_encoding (name);
12406 #endif
12407 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
12408 fprintf (f, "\t.thumb_func\n");
12409 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
12410 }
12411
12412 if (current_function_pretend_args_size)
12413 {
12414 if (cfun->machine->uses_anonymous_args)
12415 {
12416 int num_pushes;
12417
12418 fprintf (f, "\tpush\t{");
12419
12420 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
12421
12422 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
12423 regno <= LAST_ARG_REGNUM;
12424 regno++)
12425 asm_fprintf (f, "%r%s", regno,
12426 regno == LAST_ARG_REGNUM ? "" : ", ");
12427
12428 fprintf (f, "}\n");
12429 }
12430 else
12431 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
12432 SP_REGNUM, SP_REGNUM,
12433 current_function_pretend_args_size);
12434
12435 /* We don't need to record the stores for unwinding (would it
12436 help the debugger any if we did?), but record the change in
12437 the stack pointer. */
12438 if (dwarf2out_do_frame ())
12439 {
12440 char *l = dwarf2out_cfi_label ();
12441 cfa_offset = cfa_offset + current_function_pretend_args_size;
12442 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
12443 }
12444 }
12445
12446 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12447 if (THUMB_REG_PUSHED_P (regno))
12448 live_regs_mask |= 1 << regno;
12449
12450 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
12451 live_regs_mask |= 1 << LR_REGNUM;
12452
12453 if (TARGET_BACKTRACE)
12454 {
12455 int offset;
12456 int work_register = 0;
12457 int wr;
12458
12459 /* We have been asked to create a stack backtrace structure.
12460 The code looks like this:
12461
12462 0 .align 2
12463 0 func:
12464 0 sub SP, #16 Reserve space for 4 registers.
12465 2 push {R7} Get a work register.
12466 4 add R7, SP, #20 Get the stack pointer before the push.
12467 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
12468 8 mov R7, PC Get hold of the start of this code plus 12.
12469 10 str R7, [SP, #16] Store it.
12470 12 mov R7, FP Get hold of the current frame pointer.
12471 14 str R7, [SP, #4] Store it.
12472 16 mov R7, LR Get hold of the current return address.
12473 18 str R7, [SP, #12] Store it.
12474 20 add R7, SP, #16 Point at the start of the backtrace structure.
12475 22 mov FP, R7 Put this value into the frame pointer. */
12476
12477 if ((live_regs_mask & 0xFF) == 0)
12478 {
12479 /* See if the a4 register is free. */
12480
12481 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
12482 work_register = LAST_ARG_REGNUM;
12483 else /* We must push a register of our own. */
12484 live_regs_mask |= (1 << LAST_LO_REGNUM);
12485 }
12486
12487 if (work_register == 0)
12488 {
12489 /* Select a register from the list that will be pushed to
12490 use as our work register. */
12491 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
12492 if ((1 << work_register) & live_regs_mask)
12493 break;
12494 }
12495
12496 asm_fprintf
12497 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
12498 SP_REGNUM, SP_REGNUM);
12499
12500 if (dwarf2out_do_frame ())
12501 {
12502 char *l = dwarf2out_cfi_label ();
12503 cfa_offset = cfa_offset + 16;
12504 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
12505 }
12506
12507 if (live_regs_mask)
12508 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
12509
12510 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
12511 if (wr & live_regs_mask)
12512 offset += 4;
12513
12514 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
12515 offset + 16 + current_function_pretend_args_size);
12516
12517 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12518 offset + 4);
12519
12520 /* Make sure that the instruction fetching the PC is in the right place
12521 to calculate "start of backtrace creation code + 12". */
12522 if (live_regs_mask)
12523 {
12524 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
12525 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12526 offset + 12);
12527 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
12528 ARM_HARD_FRAME_POINTER_REGNUM);
12529 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12530 offset);
12531 }
12532 else
12533 {
12534 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
12535 ARM_HARD_FRAME_POINTER_REGNUM);
12536 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12537 offset);
12538 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
12539 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12540 offset + 12);
12541 }
12542
12543 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
12544 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12545 offset + 8);
12546 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
12547 offset + 12);
12548 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
12549 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
12550 }
12551 else if (live_regs_mask)
12552 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
12553
12554 for (regno = 8; regno < 13; regno++)
12555 if (THUMB_REG_PUSHED_P (regno))
12556 high_regs_pushed++;
12557
12558 if (high_regs_pushed)
12559 {
12560 int pushable_regs = 0;
12561 int mask = live_regs_mask & 0xff;
12562 int next_hi_reg;
12563
12564 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
12565 if (THUMB_REG_PUSHED_P (next_hi_reg))
12566 break;
12567
12568 pushable_regs = mask;
12569
12570 if (pushable_regs == 0)
12571 {
12572 /* Desperation time -- this probably will never happen. */
12573 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
12574 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
12575 mask = 1 << LAST_ARG_REGNUM;
12576 }
12577
12578 while (high_regs_pushed > 0)
12579 {
12580 int real_regs_mask = 0;
12581
12582 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
12583 {
12584 if (mask & (1 << regno))
12585 {
12586 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
12587
12588 high_regs_pushed--;
12589 real_regs_mask |= (1 << next_hi_reg);
12590
12591 if (high_regs_pushed)
12592 {
12593 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
12594 next_hi_reg--)
12595 if (THUMB_REG_PUSHED_P (next_hi_reg))
12596 break;
12597 }
12598 else
12599 {
12600 mask &= ~((1 << regno) - 1);
12601 break;
12602 }
12603 }
12604 }
12605
12606 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
12607 }
12608
12609 if (pushable_regs == 0
12610 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
12611 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12612 }
12613 }
12614
12615 /* Handle the case of a double word load into a low register from
12616 a computed memory address. The computed address may involve a
12617 register which is overwritten by the load. */
12618 const char *
12619 thumb_load_double_from_address (rtx *operands)
12620 {
12621 rtx addr;
12622 rtx base;
12623 rtx offset;
12624 rtx arg1;
12625 rtx arg2;
12626
12627 if (GET_CODE (operands[0]) != REG)
12628 abort ();
12629
12630 if (GET_CODE (operands[1]) != MEM)
12631 abort ();
12632
12633 /* Get the memory address. */
12634 addr = XEXP (operands[1], 0);
12635
12636 /* Work out how the memory address is computed. */
12637 switch (GET_CODE (addr))
12638 {
12639 case REG:
12640 operands[2] = gen_rtx (MEM, SImode,
12641 plus_constant (XEXP (operands[1], 0), 4));
12642
12643 if (REGNO (operands[0]) == REGNO (addr))
12644 {
12645 output_asm_insn ("ldr\t%H0, %2", operands);
12646 output_asm_insn ("ldr\t%0, %1", operands);
12647 }
12648 else
12649 {
12650 output_asm_insn ("ldr\t%0, %1", operands);
12651 output_asm_insn ("ldr\t%H0, %2", operands);
12652 }
12653 break;
12654
12655 case CONST:
12656 /* Compute <address> + 4 for the high order load. */
12657 operands[2] = gen_rtx (MEM, SImode,
12658 plus_constant (XEXP (operands[1], 0), 4));
12659
12660 output_asm_insn ("ldr\t%0, %1", operands);
12661 output_asm_insn ("ldr\t%H0, %2", operands);
12662 break;
12663
12664 case PLUS:
12665 arg1 = XEXP (addr, 0);
12666 arg2 = XEXP (addr, 1);
12667
12668 if (CONSTANT_P (arg1))
12669 base = arg2, offset = arg1;
12670 else
12671 base = arg1, offset = arg2;
12672
12673 if (GET_CODE (base) != REG)
12674 abort ();
12675
12676 /* Catch the case of <address> = <reg> + <reg> */
12677 if (GET_CODE (offset) == REG)
12678 {
12679 int reg_offset = REGNO (offset);
12680 int reg_base = REGNO (base);
12681 int reg_dest = REGNO (operands[0]);
12682
12683 /* Add the base and offset registers together into the
12684 higher destination register. */
12685 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
12686 reg_dest + 1, reg_base, reg_offset);
12687
12688 /* Load the lower destination register from the address in
12689 the higher destination register. */
12690 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
12691 reg_dest, reg_dest + 1);
12692
12693 /* Load the higher destination register from its own address
12694 plus 4. */
12695 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
12696 reg_dest + 1, reg_dest + 1);
12697 }
12698 else
12699 {
12700 /* Compute <address> + 4 for the high order load. */
12701 operands[2] = gen_rtx (MEM, SImode,
12702 plus_constant (XEXP (operands[1], 0), 4));
12703
12704 /* If the computed address is held in the low order register
12705 then load the high order register first, otherwise always
12706 load the low order register first. */
12707 if (REGNO (operands[0]) == REGNO (base))
12708 {
12709 output_asm_insn ("ldr\t%H0, %2", operands);
12710 output_asm_insn ("ldr\t%0, %1", operands);
12711 }
12712 else
12713 {
12714 output_asm_insn ("ldr\t%0, %1", operands);
12715 output_asm_insn ("ldr\t%H0, %2", operands);
12716 }
12717 }
12718 break;
12719
12720 case LABEL_REF:
12721 /* With no registers to worry about we can just load the value
12722 directly. */
12723 operands[2] = gen_rtx (MEM, SImode,
12724 plus_constant (XEXP (operands[1], 0), 4));
12725
12726 output_asm_insn ("ldr\t%H0, %2", operands);
12727 output_asm_insn ("ldr\t%0, %1", operands);
12728 break;
12729
12730 default:
12731 abort ();
12732 break;
12733 }
12734
12735 return "";
12736 }
12737
12738 const char *
12739 thumb_output_move_mem_multiple (int n, rtx *operands)
12740 {
12741 rtx tmp;
12742
12743 switch (n)
12744 {
12745 case 2:
12746 if (REGNO (operands[4]) > REGNO (operands[5]))
12747 {
12748 tmp = operands[4];
12749 operands[4] = operands[5];
12750 operands[5] = tmp;
12751 }
12752 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
12753 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
12754 break;
12755
12756 case 3:
12757 if (REGNO (operands[4]) > REGNO (operands[5]))
12758 {
12759 tmp = operands[4];
12760 operands[4] = operands[5];
12761 operands[5] = tmp;
12762 }
12763 if (REGNO (operands[5]) > REGNO (operands[6]))
12764 {
12765 tmp = operands[5];
12766 operands[5] = operands[6];
12767 operands[6] = tmp;
12768 }
12769 if (REGNO (operands[4]) > REGNO (operands[5]))
12770 {
12771 tmp = operands[4];
12772 operands[4] = operands[5];
12773 operands[5] = tmp;
12774 }
12775
12776 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
12777 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
12778 break;
12779
12780 default:
12781 abort ();
12782 }
12783
12784 return "";
12785 }
12786
12787 /* Routines for generating rtl. */
12788 void
12789 thumb_expand_movstrqi (rtx *operands)
12790 {
12791 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
12792 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
12793 HOST_WIDE_INT len = INTVAL (operands[2]);
12794 HOST_WIDE_INT offset = 0;
12795
12796 while (len >= 12)
12797 {
12798 emit_insn (gen_movmem12b (out, in, out, in));
12799 len -= 12;
12800 }
12801
12802 if (len >= 8)
12803 {
12804 emit_insn (gen_movmem8b (out, in, out, in));
12805 len -= 8;
12806 }
12807
12808 if (len >= 4)
12809 {
12810 rtx reg = gen_reg_rtx (SImode);
12811 emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
12812 emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
12813 len -= 4;
12814 offset += 4;
12815 }
12816
12817 if (len >= 2)
12818 {
12819 rtx reg = gen_reg_rtx (HImode);
12820 emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
12821 plus_constant (in, offset))));
12822 emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
12823 reg));
12824 len -= 2;
12825 offset += 2;
12826 }
12827
12828 if (len)
12829 {
12830 rtx reg = gen_reg_rtx (QImode);
12831 emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
12832 plus_constant (in, offset))));
12833 emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
12834 reg));
12835 }
12836 }
12837
12838 int
12839 thumb_cmp_operand (rtx op, enum machine_mode mode)
12840 {
12841 return ((GET_CODE (op) == CONST_INT
12842 && INTVAL (op) < 256
12843 && INTVAL (op) >= 0)
12844 || s_register_operand (op, mode));
12845 }
12846
12847 int
12848 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
12849 {
12850 return (GET_CODE (op) == CONST_INT
12851 && INTVAL (op) < 0
12852 && INTVAL (op) > -256);
12853 }
12854
12855 /* Return TRUE if a result can be stored in OP without clobbering the
12856 condition code register. Prior to reload we only accept a
12857 register. After reload we have to be able to handle memory as
12858 well, since a pseudo may not get a hard reg and reload cannot
12859 handle output-reloads on jump insns.
12860
12861 We could possibly handle mem before reload as well, but that might
12862 complicate things with the need to handle increment
12863 side-effects. */
12864
12865 int
12866 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
12867 {
12868 return (s_register_operand (op, mode)
12869 || ((reload_in_progress || reload_completed)
12870 && memory_operand (op, mode)));
12871 }
12872
12873 /* Handle storing a half-word to memory during reload. */
12874 void
12875 thumb_reload_out_hi (rtx *operands)
12876 {
12877 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
12878 }
12879
12880 /* Handle reading a half-word from memory during reload. */
12881 void
12882 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
12883 {
12884 abort ();
12885 }
12886
12887 /* Return the length of a function name prefix
12888 that starts with the character 'c'. */
12889 static int
12890 arm_get_strip_length (int c)
12891 {
12892 switch (c)
12893 {
12894 ARM_NAME_ENCODING_LENGTHS
12895 default: return 0;
12896 }
12897 }
12898
12899 /* Return a pointer to a function's name with any
12900 and all prefix encodings stripped from it. */
12901 const char *
12902 arm_strip_name_encoding (const char *name)
12903 {
12904 int skip;
12905
12906 while ((skip = arm_get_strip_length (* name)))
12907 name += skip;
12908
12909 return name;
12910 }
12911
12912 /* If there is a '*' anywhere in the name's prefix, then
12913 emit the stripped name verbatim, otherwise prepend an
12914 underscore if leading underscores are being used. */
12915 void
12916 arm_asm_output_labelref (FILE *stream, const char *name)
12917 {
12918 int skip;
12919 int verbatim = 0;
12920
12921 while ((skip = arm_get_strip_length (* name)))
12922 {
12923 verbatim |= (*name == '*');
12924 name += skip;
12925 }
12926
12927 if (verbatim)
12928 fputs (name, stream);
12929 else
12930 asm_fprintf (stream, "%U%s", name);
12931 }
12932
12933 rtx aof_pic_label;
12934
12935 #ifdef AOF_ASSEMBLER
12936 /* Special functions only needed when producing AOF syntax assembler. */
12937
12938 struct pic_chain
12939 {
12940 struct pic_chain * next;
12941 const char * symname;
12942 };
12943
12944 static struct pic_chain * aof_pic_chain = NULL;
12945
12946 rtx
12947 aof_pic_entry (rtx x)
12948 {
12949 struct pic_chain ** chainp;
12950 int offset;
12951
12952 if (aof_pic_label == NULL_RTX)
12953 {
12954 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
12955 }
12956
12957 for (offset = 0, chainp = &aof_pic_chain; *chainp;
12958 offset += 4, chainp = &(*chainp)->next)
12959 if ((*chainp)->symname == XSTR (x, 0))
12960 return plus_constant (aof_pic_label, offset);
12961
12962 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
12963 (*chainp)->next = NULL;
12964 (*chainp)->symname = XSTR (x, 0);
12965 return plus_constant (aof_pic_label, offset);
12966 }
12967
12968 void
12969 aof_dump_pic_table (FILE *f)
12970 {
12971 struct pic_chain * chain;
12972
12973 if (aof_pic_chain == NULL)
12974 return;
12975
12976 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
12977 PIC_OFFSET_TABLE_REGNUM,
12978 PIC_OFFSET_TABLE_REGNUM);
12979 fputs ("|x$adcons|\n", f);
12980
12981 for (chain = aof_pic_chain; chain; chain = chain->next)
12982 {
12983 fputs ("\tDCD\t", f);
12984 assemble_name (f, chain->symname);
12985 fputs ("\n", f);
12986 }
12987 }
12988
12989 int arm_text_section_count = 1;
12990
12991 char *
12992 aof_text_section (void )
12993 {
12994 static char buf[100];
12995 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
12996 arm_text_section_count++);
12997 if (flag_pic)
12998 strcat (buf, ", PIC, REENTRANT");
12999 return buf;
13000 }
13001
13002 static int arm_data_section_count = 1;
13003
13004 char *
13005 aof_data_section (void)
13006 {
13007 static char buf[100];
13008 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13009 return buf;
13010 }
13011
13012 /* The AOF assembler is religiously strict about declarations of
13013 imported and exported symbols, so that it is impossible to declare
13014 a function as imported near the beginning of the file, and then to
13015 export it later on. It is, however, possible to delay the decision
13016 until all the functions in the file have been compiled. To get
13017 around this, we maintain a list of the imports and exports, and
13018 delete from it any that are subsequently defined. At the end of
13019 compilation we spit the remainder of the list out before the END
13020 directive. */
13021
13022 struct import
13023 {
13024 struct import * next;
13025 const char * name;
13026 };
13027
13028 static struct import * imports_list = NULL;
13029
13030 void
13031 aof_add_import (const char *name)
13032 {
13033 struct import * new;
13034
13035 for (new = imports_list; new; new = new->next)
13036 if (new->name == name)
13037 return;
13038
13039 new = (struct import *) xmalloc (sizeof (struct import));
13040 new->next = imports_list;
13041 imports_list = new;
13042 new->name = name;
13043 }
13044
13045 void
13046 aof_delete_import (const char *name)
13047 {
13048 struct import ** old;
13049
13050 for (old = &imports_list; *old; old = & (*old)->next)
13051 {
13052 if ((*old)->name == name)
13053 {
13054 *old = (*old)->next;
13055 return;
13056 }
13057 }
13058 }
13059
13060 int arm_main_function = 0;
13061
13062 static void
13063 aof_dump_imports (FILE *f)
13064 {
13065 /* The AOF assembler needs this to cause the startup code to be extracted
13066 from the library. Brining in __main causes the whole thing to work
13067 automagically. */
13068 if (arm_main_function)
13069 {
13070 text_section ();
13071 fputs ("\tIMPORT __main\n", f);
13072 fputs ("\tDCD __main\n", f);
13073 }
13074
13075 /* Now dump the remaining imports. */
13076 while (imports_list)
13077 {
13078 fprintf (f, "\tIMPORT\t");
13079 assemble_name (f, imports_list->name);
13080 fputc ('\n', f);
13081 imports_list = imports_list->next;
13082 }
13083 }
13084
13085 static void
13086 aof_globalize_label (FILE *stream, const char *name)
13087 {
13088 default_globalize_label (stream, name);
13089 if (! strcmp (name, "main"))
13090 arm_main_function = 1;
13091 }
13092
13093 static void
13094 aof_file_start (void)
13095 {
13096 fputs ("__r0\tRN\t0\n", asm_out_file);
13097 fputs ("__a1\tRN\t0\n", asm_out_file);
13098 fputs ("__a2\tRN\t1\n", asm_out_file);
13099 fputs ("__a3\tRN\t2\n", asm_out_file);
13100 fputs ("__a4\tRN\t3\n", asm_out_file);
13101 fputs ("__v1\tRN\t4\n", asm_out_file);
13102 fputs ("__v2\tRN\t5\n", asm_out_file);
13103 fputs ("__v3\tRN\t6\n", asm_out_file);
13104 fputs ("__v4\tRN\t7\n", asm_out_file);
13105 fputs ("__v5\tRN\t8\n", asm_out_file);
13106 fputs ("__v6\tRN\t9\n", asm_out_file);
13107 fputs ("__sl\tRN\t10\n", asm_out_file);
13108 fputs ("__fp\tRN\t11\n", asm_out_file);
13109 fputs ("__ip\tRN\t12\n", asm_out_file);
13110 fputs ("__sp\tRN\t13\n", asm_out_file);
13111 fputs ("__lr\tRN\t14\n", asm_out_file);
13112 fputs ("__pc\tRN\t15\n", asm_out_file);
13113 fputs ("__f0\tFN\t0\n", asm_out_file);
13114 fputs ("__f1\tFN\t1\n", asm_out_file);
13115 fputs ("__f2\tFN\t2\n", asm_out_file);
13116 fputs ("__f3\tFN\t3\n", asm_out_file);
13117 fputs ("__f4\tFN\t4\n", asm_out_file);
13118 fputs ("__f5\tFN\t5\n", asm_out_file);
13119 fputs ("__f6\tFN\t6\n", asm_out_file);
13120 fputs ("__f7\tFN\t7\n", asm_out_file);
13121 text_section ();
13122 }
13123
13124 static void
13125 aof_file_end (void)
13126 {
13127 if (flag_pic)
13128 aof_dump_pic_table (asm_out_file);
13129 aof_dump_imports (asm_out_file);
13130 fputs ("\tEND\n", asm_out_file);
13131 }
13132 #endif /* AOF_ASSEMBLER */
13133
13134 #ifdef OBJECT_FORMAT_ELF
13135 /* Switch to an arbitrary section NAME with attributes as specified
13136 by FLAGS. ALIGN specifies any known alignment requirements for
13137 the section; 0 if the default should be used.
13138
13139 Differs from the default elf version only in the prefix character
13140 used before the section type. */
13141
13142 static void
13143 arm_elf_asm_named_section (const char *name, unsigned int flags)
13144 {
13145 char flagchars[10], *f = flagchars;
13146
13147 if (! named_section_first_declaration (name))
13148 {
13149 fprintf (asm_out_file, "\t.section\t%s\n", name);
13150 return;
13151 }
13152
13153 if (!(flags & SECTION_DEBUG))
13154 *f++ = 'a';
13155 if (flags & SECTION_WRITE)
13156 *f++ = 'w';
13157 if (flags & SECTION_CODE)
13158 *f++ = 'x';
13159 if (flags & SECTION_SMALL)
13160 *f++ = 's';
13161 if (flags & SECTION_MERGE)
13162 *f++ = 'M';
13163 if (flags & SECTION_STRINGS)
13164 *f++ = 'S';
13165 if (flags & SECTION_TLS)
13166 *f++ = 'T';
13167 *f = '\0';
13168
13169 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
13170
13171 if (!(flags & SECTION_NOTYPE))
13172 {
13173 const char *type;
13174
13175 if (flags & SECTION_BSS)
13176 type = "nobits";
13177 else
13178 type = "progbits";
13179
13180 fprintf (asm_out_file, ",%%%s", type);
13181
13182 if (flags & SECTION_ENTSIZE)
13183 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
13184 }
13185
13186 putc ('\n', asm_out_file);
13187 }
13188 #endif
13189
13190 #ifndef ARM_PE
13191 /* Symbols in the text segment can be accessed without indirecting via the
13192 constant pool; it may take an extra binary operation, but this is still
13193 faster than indirecting via memory. Don't do this when not optimizing,
13194 since we won't be calculating al of the offsets necessary to do this
13195 simplification. */
13196
13197 static void
13198 arm_encode_section_info (tree decl, rtx rtl, int first)
13199 {
13200 /* This doesn't work with AOF syntax, since the string table may be in
13201 a different AREA. */
13202 #ifndef AOF_ASSEMBLER
13203 if (optimize > 0 && TREE_CONSTANT (decl)
13204 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
13205 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13206 #endif
13207
13208 /* If we are referencing a function that is weak then encode a long call
13209 flag in the function name, otherwise if the function is static or
13210 or known to be defined in this file then encode a short call flag. */
13211 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
13212 {
13213 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13214 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13215 else if (! TREE_PUBLIC (decl))
13216 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13217 }
13218 }
13219 #endif /* !ARM_PE */
13220
13221 static void
13222 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13223 {
13224 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13225 && !strcmp (prefix, "L"))
13226 {
13227 arm_ccfsm_state = 0;
13228 arm_target_insn = NULL;
13229 }
13230 default_internal_label (stream, prefix, labelno);
13231 }
13232
13233 /* Output code to add DELTA to the first argument, and then jump
13234 to FUNCTION. Used for C++ multiple inheritance. */
13235 static void
13236 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13237 HOST_WIDE_INT delta,
13238 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13239 tree function)
13240 {
13241 int mi_delta = delta;
13242 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13243 int shift = 0;
13244 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13245 ? 1 : 0);
13246 if (mi_delta < 0)
13247 mi_delta = - mi_delta;
13248 while (mi_delta != 0)
13249 {
13250 if ((mi_delta & (3 << shift)) == 0)
13251 shift += 2;
13252 else
13253 {
13254 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
13255 mi_op, this_regno, this_regno,
13256 mi_delta & (0xff << shift));
13257 mi_delta &= ~(0xff << shift);
13258 shift += 8;
13259 }
13260 }
13261 fputs ("\tb\t", file);
13262 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
13263 if (NEED_PLT_RELOC)
13264 fputs ("(PLT)", file);
13265 fputc ('\n', file);
13266 }
13267
13268 int
13269 arm_emit_vector_const (FILE *file, rtx x)
13270 {
13271 int i;
13272 const char * pattern;
13273
13274 if (GET_CODE (x) != CONST_VECTOR)
13275 abort ();
13276
13277 switch (GET_MODE (x))
13278 {
13279 case V2SImode: pattern = "%08x"; break;
13280 case V4HImode: pattern = "%04x"; break;
13281 case V8QImode: pattern = "%02x"; break;
13282 default: abort ();
13283 }
13284
13285 fprintf (file, "0x");
13286 for (i = CONST_VECTOR_NUNITS (x); i--;)
13287 {
13288 rtx element;
13289
13290 element = CONST_VECTOR_ELT (x, i);
13291 fprintf (file, pattern, INTVAL (element));
13292 }
13293
13294 return 1;
13295 }
13296
13297 const char *
13298 arm_output_load_gr (rtx *operands)
13299 {
13300 rtx reg;
13301 rtx offset;
13302 rtx wcgr;
13303 rtx sum;
13304
13305 if (GET_CODE (operands [1]) != MEM
13306 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
13307 || GET_CODE (reg = XEXP (sum, 0)) != REG
13308 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
13309 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
13310 return "wldrw%?\t%0, %1";
13311
13312 /* Fix up an out-of-range load of a GR register. */
13313 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
13314 wcgr = operands[0];
13315 operands[0] = reg;
13316 output_asm_insn ("ldr%?\t%0, %1", operands);
13317
13318 operands[0] = wcgr;
13319 operands[1] = reg;
13320 output_asm_insn ("tmcr%?\t%0, %1", operands);
13321 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
13322
13323 return "";
13324 }
This page took 0.686048 seconds and 6 git commands to generate.