]> gcc.gnu.org Git - gcc.git/blame - gcc/config/arm/arm.c
semantics.c (begin_function_try_block, [...]): New fns.
[gcc.git] / gcc / config / arm / arm.c
CommitLineData
b36ba79f 1/* Output routines for GCC for ARM.
949d79eb 2 Copyright (C) 1991, 93-98, 1999 Free Software Foundation, Inc.
cce8749e 3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
956d6950 4 and Martin Simmons (@harleqn.co.uk).
b36ba79f 5 More major hacks by Richard Earnshaw (rearnsha@arm.com).
cce8749e
CH
6
7This file is part of GNU CC.
8
9GNU CC is free software; you can redistribute it and/or modify
10it under the terms of the GNU General Public License as published by
11the Free Software Foundation; either version 2, or (at your option)
12any later version.
13
14GNU CC is distributed in the hope that it will be useful,
15but WITHOUT ANY WARRANTY; without even the implied warranty of
16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License
20along with GNU CC; see the file COPYING. If not, write to
8fb289e7
RK
21the Free Software Foundation, 59 Temple Place - Suite 330,
22Boston, MA 02111-1307, USA. */
ff9940b0 23
56636818 24#include "config.h"
43cffd11 25#include "system.h"
cce8749e
CH
26#include "rtl.h"
27#include "regs.h"
28#include "hard-reg-set.h"
29#include "real.h"
30#include "insn-config.h"
31#include "conditions.h"
32#include "insn-flags.h"
33#include "output.h"
34#include "insn-attr.h"
35#include "flags.h"
af48348a 36#include "reload.h"
e2c671ba 37#include "tree.h"
bee06f3d 38#include "expr.h"
ad076f4e 39#include "toplev.h"
aec3cfba 40#include "recog.h"
cce8749e
CH
41
42/* The maximum number of insns skipped which will be conditionalised if
43 possible. */
b36ba79f 44static int max_insns_skipped = 5;
cce8749e 45
f5a1b0d2 46extern FILE * asm_out_file;
cce8749e 47/* Some function declarations. */
cce8749e 48
18af7313 49static HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT));
2e943e99 50static char * output_multi_immediate PROTO ((rtx *, char *, char *, int,
18af7313 51 HOST_WIDE_INT));
2b835d68
RE
52static int arm_gen_constant PROTO ((enum rtx_code, enum machine_mode,
53 HOST_WIDE_INT, rtx, rtx, int, int));
18af7313
RE
54static int arm_naked_function_p PROTO ((tree));
55static void init_fpa_table PROTO ((void));
74bbc178
NC
56static enum machine_mode select_dominance_cc_mode PROTO ((rtx, rtx,
57 HOST_WIDE_INT));
949d79eb
RE
58static HOST_WIDE_INT add_minipool_constant PROTO ((rtx, enum machine_mode));
59static void dump_minipool PROTO ((rtx));
18af7313 60static rtx find_barrier PROTO ((rtx, int));
949d79eb
RE
61static void push_minipool_fix PROTO ((rtx, int, rtx *, enum machine_mode,
62 rtx));
63static void push_minipool_barrier PROTO ((rtx, int));
64static void note_invalid_constants PROTO ((rtx, int));
2e943e99 65static char * fp_const_from_val PROTO ((REAL_VALUE_TYPE *));
18af7313 66static int eliminate_lr2ip PROTO ((rtx *));
2e943e99 67static char * shift_op PROTO ((rtx, HOST_WIDE_INT *));
18af7313
RE
68static int pattern_really_clobbers_lr PROTO ((rtx));
69static int function_really_clobbers_lr PROTO ((rtx));
70static void emit_multi_reg_push PROTO ((int));
b111229a 71static void emit_sfm PROTO ((int, int));
18af7313 72static enum arm_cond_code get_arm_condition_code PROTO ((rtx));
3bccbef6 73static int const_ok_for_op RTX_CODE_PROTO ((HOST_WIDE_INT, Rcode));
f3bb6135 74
13bd191d
PB
75/* True if we are currently building a constant table. */
76int making_const_table;
77
60d0536b 78/* Define the information needed to generate branch insns. This is
ff9940b0 79 stored from the compare operation. */
ff9940b0 80rtx arm_compare_op0, arm_compare_op1;
ff9940b0 81
b111229a 82/* What type of floating point are we tuning for? */
bee06f3d
RE
83enum floating_point_type arm_fpu;
84
b111229a
RE
85/* What type of floating point instructions are available? */
86enum floating_point_type arm_fpu_arch;
87
2b835d68
RE
88/* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
89enum prog_mode_type arm_prgmode;
90
b111229a 91/* Set by the -mfp=... option */
f9cc092a 92const char * target_fp_name = NULL;
2b835d68 93
b355a481 94/* Used to parse -mstructure_size_boundary command line option. */
f9cc092a 95const char * structure_size_string = NULL;
b355a481
NC
96int arm_structure_size_boundary = 32; /* Used to be 8 */
97
aec3cfba 98/* Bit values used to identify processor capabilities. */
62b10bbc
NC
99#define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
100#define FL_FAST_MULT (1 << 1) /* Fast multiply */
101#define FL_MODE26 (1 << 2) /* 26-bit mode support */
102#define FL_MODE32 (1 << 3) /* 32-bit mode support */
103#define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
104#define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
105#define FL_THUMB (1 << 6) /* Thumb aware */
106#define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
107#define FL_STRONG (1 << 8) /* StrongARM */
aec3cfba 108
949d79eb
RE
109/* The bits in this mask specify which instructions we are allowed to
110 generate. */
aec3cfba
NC
111static int insn_flags = 0;
112/* The bits in this mask specify which instruction scheduling options should
113 be used. Note - there is an overlap with the FL_FAST_MULT. For some
114 hardware we want to be able to generate the multiply instructions, but to
115 tune as if they were not present in the architecture. */
116static int tune_flags = 0;
117
118/* The following are used in the arm.md file as equivalents to bits
119 in the above two flag variables. */
120
2b835d68
RE
121/* Nonzero if this is an "M" variant of the processor. */
122int arm_fast_multiply = 0;
123
32de079a 124/* Nonzero if this chip supports the ARM Architecture 4 extensions */
2b835d68
RE
125int arm_arch4 = 0;
126
62b10bbc
NC
127/* Nonzero if this chip supports the ARM Architecture 5 extensions */
128int arm_arch5 = 0;
129
aec3cfba 130/* Nonzero if this chip can benefit from load scheduling. */
f5a1b0d2
NC
131int arm_ld_sched = 0;
132
133/* Nonzero if this chip is a StrongARM. */
134int arm_is_strong = 0;
135
136/* Nonzero if this chip is a an ARM6 or an ARM7. */
137int arm_is_6_or_7 = 0;
b111229a 138
cce8749e
CH
139/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
140 must report the mode of the memory reference from PRINT_OPERAND to
141 PRINT_OPERAND_ADDRESS. */
f3bb6135 142enum machine_mode output_memory_reference_mode;
cce8749e
CH
143
144/* Nonzero if the prologue must setup `fp'. */
145int current_function_anonymous_args;
146
32de079a 147/* The register number to be used for the PIC offset register. */
ed0e6530 148const char * arm_pic_register_string = NULL;
32de079a
RE
149int arm_pic_register = 9;
150
ff9940b0
RE
151/* Set to one if we think that lr is only saved because of subroutine calls,
152 but all of these can be `put after' return insns */
153int lr_save_eliminated;
154
ff9940b0
RE
155/* Set to 1 when a return insn is output, this means that the epilogue
156 is not needed. */
ff9940b0
RE
157static int return_used_this_function;
158
aec3cfba
NC
159/* Set to 1 after arm_reorg has started. Reset to start at the start of
160 the next function. */
4b632bf1
RE
161static int after_arm_reorg = 0;
162
aec3cfba 163/* The maximum number of insns to be used when loading a constant. */
2b835d68
RE
164static int arm_constant_limit = 3;
165
cce8749e
CH
166/* For an explanation of these variables, see final_prescan_insn below. */
167int arm_ccfsm_state;
84ed5e79 168enum arm_cond_code arm_current_cc;
cce8749e
CH
169rtx arm_target_insn;
170int arm_target_label;
9997d19d
RE
171
172/* The condition codes of the ARM, and the inverse function. */
f5a1b0d2 173char * arm_condition_codes[] =
9997d19d
RE
174{
175 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
176 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
177};
178
84ed5e79 179static enum arm_cond_code get_arm_condition_code ();
2b835d68 180
f5a1b0d2 181#define streq(string1, string2) (strcmp (string1, string2) == 0)
2b835d68
RE
182\f
183/* Initialization code */
184
2b835d68
RE
185struct processors
186{
f5a1b0d2 187 char * name;
2b835d68
RE
188 unsigned int flags;
189};
190
191/* Not all of these give usefully different compilation alternatives,
192 but there is no simple way of generalizing them. */
f5a1b0d2
NC
193static struct processors all_cores[] =
194{
195 /* ARM Cores */
196
197 {"arm2", FL_CO_PROC | FL_MODE26 },
198 {"arm250", FL_CO_PROC | FL_MODE26 },
199 {"arm3", FL_CO_PROC | FL_MODE26 },
200 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
201 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
202 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
203 {"arm610", FL_MODE26 | FL_MODE32 },
204 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
949d79eb
RE
205 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
206 /* arm7m doesn't exist on its own, but only with D, (and I), but
207 those don't alter the code, so arm7m is sometimes used. */
208 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
209 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
210 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
211 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
f5a1b0d2
NC
212 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
213 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
214 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
215 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
216 {"arm710", FL_MODE26 | FL_MODE32 },
217 {"arm710c", FL_MODE26 | FL_MODE32 },
218 {"arm7100", FL_MODE26 | FL_MODE32 },
219 {"arm7500", FL_MODE26 | FL_MODE32 },
949d79eb
RE
220 /* Doesn't have an external co-proc, but does have embedded fpu. */
221 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
f5a1b0d2
NC
222 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
223 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
224 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
225 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
6cf32035
NC
226 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
227 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
f5a1b0d2
NC
228 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
229 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
230 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
231 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
232
233 {NULL, 0}
234};
235
236static struct processors all_architectures[] =
2b835d68 237{
f5a1b0d2
NC
238 /* ARM Architectures */
239
62b10bbc
NC
240 { "armv2", FL_CO_PROC | FL_MODE26 },
241 { "armv2a", FL_CO_PROC | FL_MODE26 },
242 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
243 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
949d79eb 244 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
b111229a
RE
245 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
246 implementations that support it, so we will leave it out for now. */
62b10bbc
NC
247 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
248 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
249 { NULL, 0 }
f5a1b0d2
NC
250};
251
252/* This is a magic stucture. The 'string' field is magically filled in
253 with a pointer to the value specified by the user on the command line
254 assuming that the user has specified such a value. */
255
256struct arm_cpu_select arm_select[] =
257{
258 /* string name processors */
259 { NULL, "-mcpu=", all_cores },
260 { NULL, "-march=", all_architectures },
261 { NULL, "-mtune=", all_cores }
2b835d68
RE
262};
263
aec3cfba
NC
264/* Return the number of bits set in value' */
265static unsigned int
266bit_count (value)
267 signed int value;
268{
269 unsigned int count = 0;
270
271 while (value)
272 {
273 value &= ~(value & - value);
274 ++ count;
275 }
276
277 return count;
278}
279
2b835d68
RE
280/* Fix up any incompatible options that the user has specified.
281 This has now turned into a maze. */
282void
283arm_override_options ()
284{
ed4c4348 285 unsigned i;
f5a1b0d2
NC
286
287 /* Set up the flags based on the cpu/architecture selected by the user. */
288 for (i = sizeof (arm_select) / sizeof (arm_select[0]); i--;)
bd9c7e23 289 {
f5a1b0d2
NC
290 struct arm_cpu_select * ptr = arm_select + i;
291
292 if (ptr->string != NULL && ptr->string[0] != '\0')
bd9c7e23 293 {
13bd191d 294 const struct processors * sel;
bd9c7e23 295
f5a1b0d2
NC
296 for (sel = ptr->processors; sel->name != NULL; sel ++)
297 if (streq (ptr->string, sel->name))
bd9c7e23 298 {
aec3cfba
NC
299 if (i == 2)
300 tune_flags = sel->flags;
301 else
b111229a 302 {
aec3cfba
NC
303 /* If we have been given an architecture and a processor
304 make sure that they are compatible. We only generate
305 a warning though, and we prefer the CPU over the
306 architecture. */
307 if (insn_flags != 0 && (insn_flags ^ sel->flags))
6cf32035 308 warning ("switch -mcpu=%s conflicts with -march= switch",
aec3cfba
NC
309 ptr->string);
310
311 insn_flags = sel->flags;
b111229a 312 }
f5a1b0d2 313
bd9c7e23
RE
314 break;
315 }
316
317 if (sel->name == NULL)
318 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
319 }
320 }
aec3cfba 321
f5a1b0d2 322 /* If the user did not specify a processor, choose one for them. */
aec3cfba 323 if (insn_flags == 0)
f5a1b0d2
NC
324 {
325 struct processors * sel;
aec3cfba
NC
326 unsigned int sought;
327 static struct cpu_default
328 {
329 int cpu;
330 char * name;
331 }
332 cpu_defaults[] =
333 {
334 { TARGET_CPU_arm2, "arm2" },
335 { TARGET_CPU_arm6, "arm6" },
336 { TARGET_CPU_arm610, "arm610" },
2aa0c933 337 { TARGET_CPU_arm710, "arm710" },
aec3cfba
NC
338 { TARGET_CPU_arm7m, "arm7m" },
339 { TARGET_CPU_arm7500fe, "arm7500fe" },
340 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
341 { TARGET_CPU_arm8, "arm8" },
342 { TARGET_CPU_arm810, "arm810" },
343 { TARGET_CPU_arm9, "arm9" },
344 { TARGET_CPU_strongarm, "strongarm" },
345 { TARGET_CPU_generic, "arm" },
346 { 0, 0 }
347 };
348 struct cpu_default * def;
349
350 /* Find the default. */
351 for (def = cpu_defaults; def->name; def ++)
352 if (def->cpu == TARGET_CPU_DEFAULT)
353 break;
354
355 /* Make sure we found the default CPU. */
356 if (def->name == NULL)
357 abort ();
358
359 /* Find the default CPU's flags. */
360 for (sel = all_cores; sel->name != NULL; sel ++)
361 if (streq (def->name, sel->name))
362 break;
363
364 if (sel->name == NULL)
365 abort ();
366
367 insn_flags = sel->flags;
368
369 /* Now check to see if the user has specified some command line
370 switch that require certain abilities from the cpu. */
371 sought = 0;
f5a1b0d2 372
6cfc7210 373 if (TARGET_INTERWORK)
f5a1b0d2 374 {
aec3cfba
NC
375 sought |= (FL_THUMB | FL_MODE32);
376
377 /* Force apcs-32 to be used for interworking. */
f5a1b0d2 378 target_flags |= ARM_FLAG_APCS_32;
aec3cfba
NC
379
380 /* There are no ARM processor that supports both APCS-26 and
381 interworking. Therefore we force FL_MODE26 to be removed
382 from insn_flags here (if it was set), so that the search
383 below will always be able to find a compatible processor. */
384 insn_flags &= ~ FL_MODE26;
f5a1b0d2
NC
385 }
386
1323d53a 387 if (! TARGET_APCS_32)
f5a1b0d2
NC
388 sought |= FL_MODE26;
389
aec3cfba 390 if (sought != 0 && ((sought & insn_flags) != sought))
f5a1b0d2 391 {
aec3cfba
NC
392 /* Try to locate a CPU type that supports all of the abilities
393 of the default CPU, plus the extra abilities requested by
394 the user. */
f5a1b0d2 395 for (sel = all_cores; sel->name != NULL; sel ++)
aec3cfba 396 if ((sel->flags & sought) == (sought | insn_flags))
f5a1b0d2
NC
397 break;
398
399 if (sel->name == NULL)
aec3cfba
NC
400 {
401 unsigned int current_bit_count = 0;
402 struct processors * best_fit = NULL;
403
404 /* Ideally we would like to issue an error message here
405 saying that it was not possible to find a CPU compatible
406 with the default CPU, but which also supports the command
407 line options specified by the programmer, and so they
408 ought to use the -mcpu=<name> command line option to
409 override the default CPU type.
410
411 Unfortunately this does not work with multilibing. We
412 need to be able to support multilibs for -mapcs-26 and for
413 -mthumb-interwork and there is no CPU that can support both
414 options. Instead if we cannot find a cpu that has both the
415 characteristics of the default cpu and the given command line
416 options we scan the array again looking for a best match. */
417 for (sel = all_cores; sel->name != NULL; sel ++)
418 if ((sel->flags & sought) == sought)
419 {
420 unsigned int count;
421
422 count = bit_count (sel->flags & insn_flags);
423
424 if (count >= current_bit_count)
425 {
426 best_fit = sel;
427 current_bit_count = count;
428 }
429 }
f5a1b0d2 430
aec3cfba
NC
431 if (best_fit == NULL)
432 abort ();
433 else
434 sel = best_fit;
435 }
436
437 insn_flags = sel->flags;
f5a1b0d2
NC
438 }
439 }
aec3cfba
NC
440
441 /* If tuning has not been specified, tune for whichever processor or
442 architecture has been selected. */
443 if (tune_flags == 0)
444 tune_flags = insn_flags;
445
f5a1b0d2
NC
446 /* Make sure that the processor choice does not conflict with any of the
447 other command line choices. */
aec3cfba 448 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
f5a1b0d2 449 {
aec3cfba
NC
450 /* If APCS-32 was not the default then it must have been set by the
451 user, so issue a warning message. If the user has specified
452 "-mapcs-32 -mcpu=arm2" then we loose here. */
453 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
454 warning ("target CPU does not support APCS-32" );
f5a1b0d2
NC
455 target_flags &= ~ ARM_FLAG_APCS_32;
456 }
aec3cfba 457 else if (! TARGET_APCS_32 && !(insn_flags & FL_MODE26))
f5a1b0d2
NC
458 {
459 warning ("target CPU does not support APCS-26" );
460 target_flags |= ARM_FLAG_APCS_32;
461 }
462
6cfc7210 463 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
f5a1b0d2
NC
464 {
465 warning ("target CPU does not support interworking" );
6cfc7210 466 target_flags &= ~ARM_FLAG_INTERWORK;
f5a1b0d2
NC
467 }
468
469 /* If interworking is enabled then APCS-32 must be selected as well. */
6cfc7210 470 if (TARGET_INTERWORK)
f5a1b0d2
NC
471 {
472 if (! TARGET_APCS_32)
473 warning ("interworking forces APCS-32 to be used" );
474 target_flags |= ARM_FLAG_APCS_32;
475 }
476
477 if (TARGET_APCS_STACK && ! TARGET_APCS)
478 {
479 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
480 target_flags |= ARM_FLAG_APCS_FRAME;
481 }
aec3cfba 482
2b835d68
RE
483 if (TARGET_POKE_FUNCTION_NAME)
484 target_flags |= ARM_FLAG_APCS_FRAME;
aec3cfba 485
2b835d68
RE
486 if (TARGET_APCS_REENT && flag_pic)
487 fatal ("-fpic and -mapcs-reent are incompatible");
aec3cfba 488
2b835d68 489 if (TARGET_APCS_REENT)
f5a1b0d2 490 warning ("APCS reentrant code not supported. Ignored");
aec3cfba 491
6cfc7210
NC
492 if (write_symbols != NO_DEBUG && flag_omit_frame_pointer)
493 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
494
32de079a
RE
495 /* If stack checking is disabled, we can use r10 as the PIC register,
496 which keeps r9 available. */
497 if (flag_pic && ! TARGET_APCS_STACK)
498 arm_pic_register = 10;
aec3cfba 499
2b835d68
RE
500 if (TARGET_APCS_FLOAT)
501 warning ("Passing floating point arguments in fp regs not yet supported");
f5a1b0d2 502
aec3cfba
NC
503 /* Initialise boolean versions of the flags, for use in the arm.md file. */
504 arm_fast_multiply = insn_flags & FL_FAST_MULT;
505 arm_arch4 = insn_flags & FL_ARCH4;
62b10bbc 506 arm_arch5 = insn_flags & FL_ARCH5;
aec3cfba
NC
507
508 arm_ld_sched = tune_flags & FL_LDSCHED;
509 arm_is_strong = tune_flags & FL_STRONG;
510 arm_is_6_or_7 = ((tune_flags & (FL_MODE26 | FL_MODE32))
511 && !(tune_flags & FL_ARCH4));
f5a1b0d2 512
bd9c7e23
RE
513 /* Default value for floating point code... if no co-processor
514 bus, then schedule for emulated floating point. Otherwise,
b111229a
RE
515 assume the user has an FPA.
516 Note: this does not prevent use of floating point instructions,
517 -msoft-float does that. */
aec3cfba 518 arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
f5a1b0d2 519
b111229a 520 if (target_fp_name)
2b835d68 521 {
f5a1b0d2 522 if (streq (target_fp_name, "2"))
b111229a 523 arm_fpu_arch = FP_SOFT2;
f5a1b0d2
NC
524 else if (streq (target_fp_name, "3"))
525 arm_fpu_arch = FP_SOFT3;
2b835d68 526 else
f5a1b0d2 527 fatal ("Invalid floating point emulation option: -mfpe-%s",
b111229a 528 target_fp_name);
2b835d68 529 }
b111229a
RE
530 else
531 arm_fpu_arch = FP_DEFAULT;
f5a1b0d2
NC
532
533 if (TARGET_FPE && arm_fpu != FP_HARD)
534 arm_fpu = FP_SOFT2;
aec3cfba 535
f5a1b0d2
NC
536 /* For arm2/3 there is no need to do any scheduling if there is only
537 a floating point emulator, or we are doing software floating-point. */
ed0e6530
PB
538 if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD)
539 && (tune_flags & FL_MODE32) == 0)
f5a1b0d2 540 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
aec3cfba 541
2b835d68 542 arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
b355a481
NC
543
544 if (structure_size_string != NULL)
545 {
546 int size = strtol (structure_size_string, NULL, 0);
547
548 if (size == 8 || size == 32)
549 arm_structure_size_boundary = size;
550 else
551 warning ("Structure size boundary can only be set to 8 or 32");
552 }
ed0e6530
PB
553
554 if (arm_pic_register_string != NULL)
555 {
556 int pic_register;
557
558 if (! flag_pic)
559 warning ("-mpic-register= is useless without -fpic");
560
561 pic_register = decode_reg_name (arm_pic_register_string);
562
563 /* Prevent the user from choosing an obviously stupid PIC register. */
564 if (pic_register < 0 || call_used_regs[pic_register]
565 || pic_register == HARD_FRAME_POINTER_REGNUM
566 || pic_register == STACK_POINTER_REGNUM
567 || pic_register >= PC_REGNUM)
568 error ("Unable to use '%s' for PIC register", arm_pic_register_string);
569 else
570 arm_pic_register = pic_register;
571 }
f5a1b0d2
NC
572
573 /* If optimizing for space, don't synthesize constants.
574 For processors with load scheduling, it never costs more than 2 cycles
575 to load a constant, and the load scheduler may well reduce that to 1. */
aec3cfba 576 if (optimize_size || (tune_flags & FL_LDSCHED))
f5a1b0d2 577 arm_constant_limit = 1;
aec3cfba 578
f5a1b0d2
NC
579 /* If optimizing for size, bump the number of instructions that we
580 are prepared to conditionally execute (even on a StrongARM).
581 Otherwise for the StrongARM, which has early execution of branches,
582 a sequence that is worth skipping is shorter. */
583 if (optimize_size)
584 max_insns_skipped = 6;
585 else if (arm_is_strong)
586 max_insns_skipped = 3;
2b835d68 587}
cce8749e 588\f
ff9940b0
RE
589/* Return 1 if it is possible to return using a single instruction */
590
591int
b36ba79f
RE
592use_return_insn (iscond)
593 int iscond;
ff9940b0
RE
594{
595 int regno;
596
f5a1b0d2
NC
597 if (!reload_completed
598 || current_function_pretend_args_size
ff9940b0 599 || current_function_anonymous_args
56636818 600 || ((get_frame_size () + current_function_outgoing_args_size != 0)
f5a1b0d2 601 && !(TARGET_APCS && frame_pointer_needed)))
ff9940b0
RE
602 return 0;
603
b111229a 604 /* Can't be done if interworking with Thumb, and any registers have been
b36ba79f
RE
605 stacked. Similarly, on StrongARM, conditional returns are expensive
606 if they aren't taken and registers have been stacked. */
f5a1b0d2 607 if (iscond && arm_is_strong && frame_pointer_needed)
b36ba79f 608 return 0;
f5a1b0d2 609 if ((iscond && arm_is_strong)
6cfc7210 610 || TARGET_INTERWORK)
6ed30148
RE
611 {
612 for (regno = 0; regno < 16; regno++)
613 if (regs_ever_live[regno] && ! call_used_regs[regno])
614 return 0;
615
616 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
b111229a 617 return 0;
6ed30148 618 }
b111229a 619
ff9940b0
RE
620 /* Can't be done if any of the FPU regs are pushed, since this also
621 requires an insn */
b111229a
RE
622 for (regno = 16; regno < 24; regno++)
623 if (regs_ever_live[regno] && ! call_used_regs[regno])
ff9940b0
RE
624 return 0;
625
31fdb4d5
DE
626 /* If a function is naked, don't use the "return" insn. */
627 if (arm_naked_function_p (current_function_decl))
628 return 0;
629
ff9940b0
RE
630 return 1;
631}
632
cce8749e
CH
633/* Return TRUE if int I is a valid immediate ARM constant. */
634
635int
636const_ok_for_arm (i)
ff9940b0 637 HOST_WIDE_INT i;
cce8749e 638{
ed4c4348 639 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
cce8749e 640
56636818
JL
641 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
642 be all zero, or all one. */
643 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
644 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
ed4c4348
RE
645 != ((~(unsigned HOST_WIDE_INT) 0)
646 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
56636818
JL
647 return FALSE;
648
e2c671ba
RE
649 /* Fast return for 0 and powers of 2 */
650 if ((i & (i - 1)) == 0)
651 return TRUE;
652
cce8749e
CH
653 do
654 {
abaa26e5 655 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
f3bb6135 656 return TRUE;
abaa26e5
RE
657 mask =
658 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
659 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff);
ed4c4348 660 } while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
cce8749e 661
f3bb6135
RE
662 return FALSE;
663}
cce8749e 664
e2c671ba 665/* Return true if I is a valid constant for the operation CODE. */
74bbc178
NC
666static int
667const_ok_for_op (i, code)
e2c671ba
RE
668 HOST_WIDE_INT i;
669 enum rtx_code code;
e2c671ba
RE
670{
671 if (const_ok_for_arm (i))
672 return 1;
673
674 switch (code)
675 {
676 case PLUS:
677 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
678
679 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
680 case XOR:
681 case IOR:
682 return 0;
683
684 case AND:
685 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
686
687 default:
688 abort ();
689 }
690}
691
692/* Emit a sequence of insns to handle a large constant.
693 CODE is the code of the operation required, it can be any of SET, PLUS,
694 IOR, AND, XOR, MINUS;
695 MODE is the mode in which the operation is being performed;
696 VAL is the integer to operate on;
697 SOURCE is the other operand (a register, or a null-pointer for SET);
698 SUBTARGETS means it is safe to create scratch registers if that will
2b835d68
RE
699 either produce a simpler sequence, or we will want to cse the values.
700 Return value is the number of insns emitted. */
e2c671ba
RE
701
702int
703arm_split_constant (code, mode, val, target, source, subtargets)
704 enum rtx_code code;
705 enum machine_mode mode;
706 HOST_WIDE_INT val;
707 rtx target;
708 rtx source;
709 int subtargets;
2b835d68
RE
710{
711 if (subtargets || code == SET
712 || (GET_CODE (target) == REG && GET_CODE (source) == REG
713 && REGNO (target) != REGNO (source)))
714 {
4b632bf1
RE
715 /* After arm_reorg has been called, we can't fix up expensive
716 constants by pushing them into memory so we must synthesise
717 them in-line, regardless of the cost. This is only likely to
718 be more costly on chips that have load delay slots and we are
719 compiling without running the scheduler (so no splitting
aec3cfba
NC
720 occurred before the final instruction emission).
721
722 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
aec3cfba 723 */
4b632bf1
RE
724 if (! after_arm_reorg
725 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
726 > arm_constant_limit + (code != SET)))
2b835d68
RE
727 {
728 if (code == SET)
729 {
730 /* Currently SET is the only monadic value for CODE, all
731 the rest are diadic. */
43cffd11 732 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
2b835d68
RE
733 return 1;
734 }
735 else
736 {
737 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
738
43cffd11 739 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
2b835d68
RE
740 /* For MINUS, the value is subtracted from, since we never
741 have subtraction of a constant. */
742 if (code == MINUS)
43cffd11
RE
743 emit_insn (gen_rtx_SET (VOIDmode, target,
744 gen_rtx (code, mode, temp, source)));
2b835d68 745 else
43cffd11
RE
746 emit_insn (gen_rtx_SET (VOIDmode, target,
747 gen_rtx (code, mode, source, temp)));
2b835d68
RE
748 return 2;
749 }
750 }
751 }
752
753 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
754}
755
756/* As above, but extra parameter GENERATE which, if clear, suppresses
757 RTL generation. */
758int
759arm_gen_constant (code, mode, val, target, source, subtargets, generate)
760 enum rtx_code code;
761 enum machine_mode mode;
762 HOST_WIDE_INT val;
763 rtx target;
764 rtx source;
765 int subtargets;
766 int generate;
e2c671ba 767{
e2c671ba
RE
768 int can_invert = 0;
769 int can_negate = 0;
770 int can_negate_initial = 0;
771 int can_shift = 0;
772 int i;
773 int num_bits_set = 0;
774 int set_sign_bit_copies = 0;
775 int clear_sign_bit_copies = 0;
776 int clear_zero_bit_copies = 0;
777 int set_zero_bit_copies = 0;
778 int insns = 0;
e2c671ba
RE
779 unsigned HOST_WIDE_INT temp1, temp2;
780 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
781
782 /* find out which operations are safe for a given CODE. Also do a quick
783 check for degenerate cases; these can occur when DImode operations
784 are split. */
785 switch (code)
786 {
787 case SET:
788 can_invert = 1;
789 can_shift = 1;
790 can_negate = 1;
791 break;
792
793 case PLUS:
794 can_negate = 1;
795 can_negate_initial = 1;
796 break;
797
798 case IOR:
799 if (remainder == 0xffffffff)
800 {
2b835d68 801 if (generate)
43cffd11
RE
802 emit_insn (gen_rtx_SET (VOIDmode, target,
803 GEN_INT (ARM_SIGN_EXTEND (val))));
e2c671ba
RE
804 return 1;
805 }
806 if (remainder == 0)
807 {
808 if (reload_completed && rtx_equal_p (target, source))
809 return 0;
2b835d68 810 if (generate)
43cffd11 811 emit_insn (gen_rtx_SET (VOIDmode, target, source));
e2c671ba
RE
812 return 1;
813 }
814 break;
815
816 case AND:
817 if (remainder == 0)
818 {
2b835d68 819 if (generate)
43cffd11 820 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
e2c671ba
RE
821 return 1;
822 }
823 if (remainder == 0xffffffff)
824 {
825 if (reload_completed && rtx_equal_p (target, source))
826 return 0;
2b835d68 827 if (generate)
43cffd11 828 emit_insn (gen_rtx_SET (VOIDmode, target, source));
e2c671ba
RE
829 return 1;
830 }
831 can_invert = 1;
832 break;
833
834 case XOR:
835 if (remainder == 0)
836 {
837 if (reload_completed && rtx_equal_p (target, source))
838 return 0;
2b835d68 839 if (generate)
43cffd11 840 emit_insn (gen_rtx_SET (VOIDmode, target, source));
e2c671ba
RE
841 return 1;
842 }
843 if (remainder == 0xffffffff)
844 {
2b835d68 845 if (generate)
43cffd11
RE
846 emit_insn (gen_rtx_SET (VOIDmode, target,
847 gen_rtx_NOT (mode, source)));
e2c671ba
RE
848 return 1;
849 }
850
851 /* We don't know how to handle this yet below. */
852 abort ();
853
854 case MINUS:
855 /* We treat MINUS as (val - source), since (source - val) is always
856 passed as (source + (-val)). */
857 if (remainder == 0)
858 {
2b835d68 859 if (generate)
43cffd11
RE
860 emit_insn (gen_rtx_SET (VOIDmode, target,
861 gen_rtx_NEG (mode, source)));
e2c671ba
RE
862 return 1;
863 }
864 if (const_ok_for_arm (val))
865 {
2b835d68 866 if (generate)
43cffd11
RE
867 emit_insn (gen_rtx_SET (VOIDmode, target,
868 gen_rtx_MINUS (mode, GEN_INT (val),
869 source)));
e2c671ba
RE
870 return 1;
871 }
872 can_negate = 1;
873
874 break;
875
876 default:
877 abort ();
878 }
879
880 /* If we can do it in one insn get out quickly */
881 if (const_ok_for_arm (val)
882 || (can_negate_initial && const_ok_for_arm (-val))
883 || (can_invert && const_ok_for_arm (~val)))
884 {
2b835d68 885 if (generate)
43cffd11
RE
886 emit_insn (gen_rtx_SET (VOIDmode, target,
887 (source ? gen_rtx (code, mode, source,
888 GEN_INT (val))
889 : GEN_INT (val))));
e2c671ba
RE
890 return 1;
891 }
892
893
894 /* Calculate a few attributes that may be useful for specific
895 optimizations. */
896
897 for (i = 31; i >= 0; i--)
898 {
899 if ((remainder & (1 << i)) == 0)
900 clear_sign_bit_copies++;
901 else
902 break;
903 }
904
905 for (i = 31; i >= 0; i--)
906 {
907 if ((remainder & (1 << i)) != 0)
908 set_sign_bit_copies++;
909 else
910 break;
911 }
912
913 for (i = 0; i <= 31; i++)
914 {
915 if ((remainder & (1 << i)) == 0)
916 clear_zero_bit_copies++;
917 else
918 break;
919 }
920
921 for (i = 0; i <= 31; i++)
922 {
923 if ((remainder & (1 << i)) != 0)
924 set_zero_bit_copies++;
925 else
926 break;
927 }
928
929 switch (code)
930 {
931 case SET:
932 /* See if we can do this by sign_extending a constant that is known
933 to be negative. This is a good, way of doing it, since the shift
934 may well merge into a subsequent insn. */
935 if (set_sign_bit_copies > 1)
936 {
937 if (const_ok_for_arm
938 (temp1 = ARM_SIGN_EXTEND (remainder
939 << (set_sign_bit_copies - 1))))
940 {
2b835d68
RE
941 if (generate)
942 {
d499463f 943 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
43cffd11
RE
944 emit_insn (gen_rtx_SET (VOIDmode, new_src,
945 GEN_INT (temp1)));
2b835d68
RE
946 emit_insn (gen_ashrsi3 (target, new_src,
947 GEN_INT (set_sign_bit_copies - 1)));
948 }
e2c671ba
RE
949 return 2;
950 }
951 /* For an inverted constant, we will need to set the low bits,
952 these will be shifted out of harm's way. */
953 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
954 if (const_ok_for_arm (~temp1))
955 {
2b835d68
RE
956 if (generate)
957 {
d499463f 958 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
43cffd11
RE
959 emit_insn (gen_rtx_SET (VOIDmode, new_src,
960 GEN_INT (temp1)));
2b835d68
RE
961 emit_insn (gen_ashrsi3 (target, new_src,
962 GEN_INT (set_sign_bit_copies - 1)));
963 }
e2c671ba
RE
964 return 2;
965 }
966 }
967
968 /* See if we can generate this by setting the bottom (or the top)
969 16 bits, and then shifting these into the other half of the
970 word. We only look for the simplest cases, to do more would cost
971 too much. Be careful, however, not to generate this when the
972 alternative would take fewer insns. */
973 if (val & 0xffff0000)
974 {
975 temp1 = remainder & 0xffff0000;
976 temp2 = remainder & 0x0000ffff;
977
978 /* Overlaps outside this range are best done using other methods. */
979 for (i = 9; i < 24; i++)
980 {
981 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
982 && ! const_ok_for_arm (temp2))
983 {
d499463f
RE
984 rtx new_src = (subtargets
985 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
986 : target);
987 insns = arm_gen_constant (code, mode, temp2, new_src,
2b835d68 988 source, subtargets, generate);
e2c671ba 989 source = new_src;
2b835d68 990 if (generate)
43cffd11
RE
991 emit_insn (gen_rtx_SET
992 (VOIDmode, target,
993 gen_rtx_IOR (mode,
994 gen_rtx_ASHIFT (mode, source,
995 GEN_INT (i)),
996 source)));
e2c671ba
RE
997 return insns + 1;
998 }
999 }
1000
1001 /* Don't duplicate cases already considered. */
1002 for (i = 17; i < 24; i++)
1003 {
1004 if (((temp1 | (temp1 >> i)) == remainder)
1005 && ! const_ok_for_arm (temp1))
1006 {
d499463f
RE
1007 rtx new_src = (subtargets
1008 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1009 : target);
1010 insns = arm_gen_constant (code, mode, temp1, new_src,
2b835d68 1011 source, subtargets, generate);
e2c671ba 1012 source = new_src;
2b835d68 1013 if (generate)
43cffd11
RE
1014 emit_insn
1015 (gen_rtx_SET (VOIDmode, target,
1016 gen_rtx_IOR
1017 (mode,
1018 gen_rtx_LSHIFTRT (mode, source,
1019 GEN_INT (i)),
1020 source)));
e2c671ba
RE
1021 return insns + 1;
1022 }
1023 }
1024 }
1025 break;
1026
1027 case IOR:
1028 case XOR:
7b64da89
RE
1029 /* If we have IOR or XOR, and the constant can be loaded in a
1030 single instruction, and we can find a temporary to put it in,
e2c671ba
RE
1031 then this can be done in two instructions instead of 3-4. */
1032 if (subtargets
d499463f 1033 /* TARGET can't be NULL if SUBTARGETS is 0 */
e2c671ba
RE
1034 || (reload_completed && ! reg_mentioned_p (target, source)))
1035 {
1036 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val)))
1037 {
2b835d68
RE
1038 if (generate)
1039 {
1040 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
e2c671ba 1041
43cffd11
RE
1042 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1043 emit_insn (gen_rtx_SET (VOIDmode, target,
1044 gen_rtx (code, mode, source, sub)));
2b835d68 1045 }
e2c671ba
RE
1046 return 2;
1047 }
1048 }
1049
1050 if (code == XOR)
1051 break;
1052
1053 if (set_sign_bit_copies > 8
1054 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1055 {
2b835d68
RE
1056 if (generate)
1057 {
1058 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1059 rtx shift = GEN_INT (set_sign_bit_copies);
1060
43cffd11
RE
1061 emit_insn (gen_rtx_SET (VOIDmode, sub,
1062 gen_rtx_NOT (mode,
1063 gen_rtx_ASHIFT (mode,
1064 source,
f5a1b0d2 1065 shift))));
43cffd11
RE
1066 emit_insn (gen_rtx_SET (VOIDmode, target,
1067 gen_rtx_NOT (mode,
1068 gen_rtx_LSHIFTRT (mode, sub,
1069 shift))));
2b835d68 1070 }
e2c671ba
RE
1071 return 2;
1072 }
1073
1074 if (set_zero_bit_copies > 8
1075 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1076 {
2b835d68
RE
1077 if (generate)
1078 {
1079 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1080 rtx shift = GEN_INT (set_zero_bit_copies);
1081
43cffd11
RE
1082 emit_insn (gen_rtx_SET (VOIDmode, sub,
1083 gen_rtx_NOT (mode,
1084 gen_rtx_LSHIFTRT (mode,
1085 source,
f5a1b0d2 1086 shift))));
43cffd11
RE
1087 emit_insn (gen_rtx_SET (VOIDmode, target,
1088 gen_rtx_NOT (mode,
1089 gen_rtx_ASHIFT (mode, sub,
f5a1b0d2 1090 shift))));
2b835d68 1091 }
e2c671ba
RE
1092 return 2;
1093 }
1094
1095 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val)))
1096 {
2b835d68
RE
1097 if (generate)
1098 {
1099 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
43cffd11
RE
1100 emit_insn (gen_rtx_SET (VOIDmode, sub,
1101 gen_rtx_NOT (mode, source)));
2b835d68
RE
1102 source = sub;
1103 if (subtargets)
1104 sub = gen_reg_rtx (mode);
43cffd11
RE
1105 emit_insn (gen_rtx_SET (VOIDmode, sub,
1106 gen_rtx_AND (mode, source,
1107 GEN_INT (temp1))));
1108 emit_insn (gen_rtx_SET (VOIDmode, target,
1109 gen_rtx_NOT (mode, sub)));
2b835d68 1110 }
e2c671ba
RE
1111 return 3;
1112 }
1113 break;
1114
1115 case AND:
1116 /* See if two shifts will do 2 or more insn's worth of work. */
1117 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1118 {
1119 HOST_WIDE_INT shift_mask = ((0xffffffff
1120 << (32 - clear_sign_bit_copies))
1121 & 0xffffffff);
e2c671ba
RE
1122
1123 if ((remainder | shift_mask) != 0xffffffff)
1124 {
2b835d68
RE
1125 if (generate)
1126 {
d499463f 1127 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2b835d68 1128 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
d499463f
RE
1129 new_src, source, subtargets, 1);
1130 source = new_src;
2b835d68
RE
1131 }
1132 else
d499463f
RE
1133 {
1134 rtx targ = subtargets ? NULL_RTX : target;
1135 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1136 targ, source, subtargets, 0);
1137 }
2b835d68
RE
1138 }
1139
1140 if (generate)
1141 {
d499463f
RE
1142 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1143 rtx shift = GEN_INT (clear_sign_bit_copies);
1144
1145 emit_insn (gen_ashlsi3 (new_src, source, shift));
1146 emit_insn (gen_lshrsi3 (target, new_src, shift));
e2c671ba
RE
1147 }
1148
e2c671ba
RE
1149 return insns + 2;
1150 }
1151
1152 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1153 {
1154 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
e2c671ba
RE
1155
1156 if ((remainder | shift_mask) != 0xffffffff)
1157 {
2b835d68
RE
1158 if (generate)
1159 {
d499463f
RE
1160 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1161
2b835d68 1162 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
d499463f
RE
1163 new_src, source, subtargets, 1);
1164 source = new_src;
2b835d68
RE
1165 }
1166 else
d499463f
RE
1167 {
1168 rtx targ = subtargets ? NULL_RTX : target;
1169
1170 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1171 targ, source, subtargets, 0);
1172 }
2b835d68
RE
1173 }
1174
1175 if (generate)
1176 {
d499463f
RE
1177 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1178 rtx shift = GEN_INT (clear_zero_bit_copies);
1179
1180 emit_insn (gen_lshrsi3 (new_src, source, shift));
1181 emit_insn (gen_ashlsi3 (target, new_src, shift));
e2c671ba
RE
1182 }
1183
e2c671ba
RE
1184 return insns + 2;
1185 }
1186
1187 break;
1188
1189 default:
1190 break;
1191 }
1192
1193 for (i = 0; i < 32; i++)
1194 if (remainder & (1 << i))
1195 num_bits_set++;
1196
1197 if (code == AND || (can_invert && num_bits_set > 16))
1198 remainder = (~remainder) & 0xffffffff;
1199 else if (code == PLUS && num_bits_set > 16)
1200 remainder = (-remainder) & 0xffffffff;
1201 else
1202 {
1203 can_invert = 0;
1204 can_negate = 0;
1205 }
1206
1207 /* Now try and find a way of doing the job in either two or three
1208 instructions.
1209 We start by looking for the largest block of zeros that are aligned on
1210 a 2-bit boundary, we then fill up the temps, wrapping around to the
1211 top of the word when we drop off the bottom.
1212 In the worst case this code should produce no more than four insns. */
1213 {
1214 int best_start = 0;
1215 int best_consecutive_zeros = 0;
1216
1217 for (i = 0; i < 32; i += 2)
1218 {
1219 int consecutive_zeros = 0;
1220
1221 if (! (remainder & (3 << i)))
1222 {
1223 while ((i < 32) && ! (remainder & (3 << i)))
1224 {
1225 consecutive_zeros += 2;
1226 i += 2;
1227 }
1228 if (consecutive_zeros > best_consecutive_zeros)
1229 {
1230 best_consecutive_zeros = consecutive_zeros;
1231 best_start = i - consecutive_zeros;
1232 }
1233 i -= 2;
1234 }
1235 }
1236
1237 /* Now start emitting the insns, starting with the one with the highest
1238 bit set: we do this so that the smallest number will be emitted last;
1239 this is more likely to be combinable with addressing insns. */
1240 i = best_start;
1241 do
1242 {
1243 int end;
1244
1245 if (i <= 0)
1246 i += 32;
1247 if (remainder & (3 << (i - 2)))
1248 {
1249 end = i - 8;
1250 if (end < 0)
1251 end += 32;
1252 temp1 = remainder & ((0x0ff << end)
1253 | ((i < end) ? (0xff >> (32 - end)) : 0));
1254 remainder &= ~temp1;
1255
d499463f 1256 if (generate)
e2c671ba 1257 {
d499463f
RE
1258 rtx new_src;
1259
1260 if (code == SET)
43cffd11
RE
1261 emit_insn (gen_rtx_SET (VOIDmode,
1262 new_src = (subtargets
1263 ? gen_reg_rtx (mode)
1264 : target),
1265 GEN_INT (can_invert
1266 ? ~temp1 : temp1)));
d499463f 1267 else if (code == MINUS)
43cffd11
RE
1268 emit_insn (gen_rtx_SET (VOIDmode,
1269 new_src = (subtargets
1270 ? gen_reg_rtx (mode)
1271 : target),
1272 gen_rtx (code, mode, GEN_INT (temp1),
1273 source)));
d499463f 1274 else
43cffd11
RE
1275 emit_insn (gen_rtx_SET (VOIDmode,
1276 new_src = (remainder
1277 ? (subtargets
1278 ? gen_reg_rtx (mode)
1279 : target)
1280 : target),
1281 gen_rtx (code, mode, source,
1282 GEN_INT (can_invert ? ~temp1
1283 : (can_negate
1284 ? -temp1
1285 : temp1)))));
d499463f 1286 source = new_src;
e2c671ba
RE
1287 }
1288
d499463f
RE
1289 if (code == SET)
1290 {
1291 can_invert = 0;
1292 code = PLUS;
1293 }
1294 else if (code == MINUS)
1295 code = PLUS;
1296
e2c671ba 1297 insns++;
e2c671ba
RE
1298 i -= 6;
1299 }
1300 i -= 2;
1301 } while (remainder);
1302 }
1303 return insns;
1304}
1305
bd9c7e23
RE
1306/* Canonicalize a comparison so that we are more likely to recognize it.
1307 This can be done for a few constant compares, where we can make the
1308 immediate value easier to load. */
1309enum rtx_code
1310arm_canonicalize_comparison (code, op1)
1311 enum rtx_code code;
62b10bbc 1312 rtx * op1;
bd9c7e23 1313{
ad076f4e 1314 unsigned HOST_WIDE_INT i = INTVAL (*op1);
bd9c7e23
RE
1315
1316 switch (code)
1317 {
1318 case EQ:
1319 case NE:
1320 return code;
1321
1322 case GT:
1323 case LE:
ad076f4e
RE
1324 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1325 - 1)
bd9c7e23
RE
1326 && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
1327 {
1328 *op1 = GEN_INT (i+1);
1329 return code == GT ? GE : LT;
1330 }
1331 break;
1332
1333 case GE:
1334 case LT:
ad076f4e 1335 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
bd9c7e23
RE
1336 && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1))))
1337 {
1338 *op1 = GEN_INT (i-1);
1339 return code == GE ? GT : LE;
1340 }
1341 break;
1342
1343 case GTU:
1344 case LEU:
ad076f4e 1345 if (i != ~((unsigned HOST_WIDE_INT) 0)
bd9c7e23
RE
1346 && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
1347 {
1348 *op1 = GEN_INT (i + 1);
1349 return code == GTU ? GEU : LTU;
1350 }
1351 break;
1352
1353 case GEU:
1354 case LTU:
1355 if (i != 0
1356 && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1))))
1357 {
1358 *op1 = GEN_INT (i - 1);
1359 return code == GEU ? GTU : LEU;
1360 }
1361 break;
1362
1363 default:
1364 abort ();
1365 }
1366
1367 return code;
1368}
bd9c7e23 1369
f5a1b0d2
NC
1370/* Decide whether a type should be returned in memory (true)
1371 or in a register (false). This is called by the macro
1372 RETURN_IN_MEMORY. */
2b835d68
RE
1373int
1374arm_return_in_memory (type)
1375 tree type;
1376{
f5a1b0d2
NC
1377 if (! AGGREGATE_TYPE_P (type))
1378 {
1379 /* All simple types are returned in registers. */
1380 return 0;
1381 }
1382 else if (int_size_in_bytes (type) > 4)
1383 {
1384 /* All structures/unions bigger than one word are returned in memory. */
1385 return 1;
1386 }
1387 else if (TREE_CODE (type) == RECORD_TYPE)
2b835d68
RE
1388 {
1389 tree field;
1390
f5a1b0d2
NC
1391 /* For a struct the APCS says that we must return in a register if
1392 every addressable element has an offset of zero. For practical
1393 purposes this means that the structure can have at most one non
1394 bit-field element and that this element must be the first one in
1395 the structure. */
1396
1397 /* Find the first field, ignoring non FIELD_DECL things which will
1398 have been created by C++. */
1399 for (field = TYPE_FIELDS (type);
1400 field && TREE_CODE (field) != FIELD_DECL;
1401 field = TREE_CHAIN (field))
1402 continue;
1403
1404 if (field == NULL)
1405 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1406
1407 /* Now check the remaining fields, if any. */
1408 for (field = TREE_CHAIN (field);
1409 field;
1410 field = TREE_CHAIN (field))
1411 {
1412 if (TREE_CODE (field) != FIELD_DECL)
1413 continue;
1414
1415 if (! DECL_BIT_FIELD_TYPE (field))
1416 return 1;
1417 }
2b835d68
RE
1418
1419 return 0;
1420 }
1421 else if (TREE_CODE (type) == UNION_TYPE)
1422 {
1423 tree field;
1424
1425 /* Unions can be returned in registers if every element is
1426 integral, or can be returned in an integer register. */
f5a1b0d2
NC
1427 for (field = TYPE_FIELDS (type);
1428 field;
1429 field = TREE_CHAIN (field))
2b835d68 1430 {
f5a1b0d2
NC
1431 if (TREE_CODE (field) != FIELD_DECL)
1432 continue;
1433
6cc8c0b3
NC
1434 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1435 return 1;
1436
f5a1b0d2 1437 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2b835d68
RE
1438 return 1;
1439 }
f5a1b0d2 1440
2b835d68
RE
1441 return 0;
1442 }
f5a1b0d2 1443
2b835d68
RE
1444 /* XXX Not sure what should be done for other aggregates, so put them in
1445 memory. */
1446 return 1;
1447}
1448
32de079a
RE
1449int
1450legitimate_pic_operand_p (x)
1451 rtx x;
1452{
1453 if (CONSTANT_P (x) && flag_pic
1454 && (GET_CODE (x) == SYMBOL_REF
1455 || (GET_CODE (x) == CONST
1456 && GET_CODE (XEXP (x, 0)) == PLUS
1457 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
1458 return 0;
1459
1460 return 1;
1461}
1462
1463rtx
1464legitimize_pic_address (orig, mode, reg)
1465 rtx orig;
1466 enum machine_mode mode;
1467 rtx reg;
1468{
1469 if (GET_CODE (orig) == SYMBOL_REF)
1470 {
1471 rtx pic_ref, address;
1472 rtx insn;
1473 int subregs = 0;
1474
1475 if (reg == 0)
1476 {
1477 if (reload_in_progress || reload_completed)
1478 abort ();
1479 else
1480 reg = gen_reg_rtx (Pmode);
1481
1482 subregs = 1;
1483 }
1484
1485#ifdef AOF_ASSEMBLER
1486 /* The AOF assembler can generate relocations for these directly, and
1487 understands that the PIC register has to be added into the offset.
1488 */
1489 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
1490#else
1491 if (subregs)
1492 address = gen_reg_rtx (Pmode);
1493 else
1494 address = reg;
1495
1496 emit_insn (gen_pic_load_addr (address, orig));
1497
43cffd11
RE
1498 pic_ref = gen_rtx_MEM (Pmode,
1499 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
1500 address));
32de079a
RE
1501 RTX_UNCHANGING_P (pic_ref) = 1;
1502 insn = emit_move_insn (reg, pic_ref);
1503#endif
1504 current_function_uses_pic_offset_table = 1;
1505 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1506 by loop. */
43cffd11
RE
1507 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
1508 REG_NOTES (insn));
32de079a
RE
1509 return reg;
1510 }
1511 else if (GET_CODE (orig) == CONST)
1512 {
1513 rtx base, offset;
1514
1515 if (GET_CODE (XEXP (orig, 0)) == PLUS
1516 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
1517 return orig;
1518
1519 if (reg == 0)
1520 {
1521 if (reload_in_progress || reload_completed)
1522 abort ();
1523 else
1524 reg = gen_reg_rtx (Pmode);
1525 }
1526
1527 if (GET_CODE (XEXP (orig, 0)) == PLUS)
1528 {
1529 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
1530 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
1531 base == reg ? 0 : reg);
1532 }
1533 else
1534 abort ();
1535
1536 if (GET_CODE (offset) == CONST_INT)
1537 {
1538 /* The base register doesn't really matter, we only want to
1539 test the index for the appropriate mode. */
1540 GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
1541
1542 if (! reload_in_progress && ! reload_completed)
1543 offset = force_reg (Pmode, offset);
1544 else
1545 abort ();
1546
1547 win:
1548 if (GET_CODE (offset) == CONST_INT)
1549 return plus_constant_for_output (base, INTVAL (offset));
1550 }
1551
1552 if (GET_MODE_SIZE (mode) > 4
1553 && (GET_MODE_CLASS (mode) == MODE_INT
1554 || TARGET_SOFT_FLOAT))
1555 {
1556 emit_insn (gen_addsi3 (reg, base, offset));
1557 return reg;
1558 }
1559
43cffd11 1560 return gen_rtx_PLUS (Pmode, base, offset);
32de079a
RE
1561 }
1562 else if (GET_CODE (orig) == LABEL_REF)
1563 current_function_uses_pic_offset_table = 1;
1564
1565 return orig;
1566}
1567
1568static rtx pic_rtx;
1569
1570int
62b10bbc 1571is_pic (x)
32de079a
RE
1572 rtx x;
1573{
1574 if (x == pic_rtx)
1575 return 1;
1576 return 0;
1577}
1578
1579void
1580arm_finalize_pic ()
1581{
1582#ifndef AOF_ASSEMBLER
1583 rtx l1, pic_tmp, pic_tmp2, seq;
1584 rtx global_offset_table;
1585
ed0e6530 1586 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
32de079a
RE
1587 return;
1588
1589 if (! flag_pic)
1590 abort ();
1591
1592 start_sequence ();
1593 l1 = gen_label_rtx ();
1594
43cffd11 1595 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
dfa08768
RE
1596 /* On the ARM the PC register contains 'dot + 8' at the time of the
1597 addition. */
1598 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), 8);
84306176
PB
1599 if (GOT_PCREL)
1600 pic_tmp2 = gen_rtx_CONST (VOIDmode,
43cffd11 1601 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
84306176
PB
1602 else
1603 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
43cffd11
RE
1604
1605 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
f5a1b0d2 1606
32de079a 1607 emit_insn (gen_pic_load_addr (pic_offset_table_rtx, pic_rtx));
dfa08768 1608 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
32de079a
RE
1609
1610 seq = gen_sequence ();
1611 end_sequence ();
1612 emit_insn_after (seq, get_insns ());
1613
1614 /* Need to emit this whether or not we obey regdecls,
1615 since setjmp/longjmp can cause life info to screw up. */
43cffd11 1616 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
32de079a
RE
1617#endif /* AOF_ASSEMBLER */
1618}
1619
e2c671ba
RE
1620#define REG_OR_SUBREG_REG(X) \
1621 (GET_CODE (X) == REG \
1622 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1623
1624#define REG_OR_SUBREG_RTX(X) \
1625 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1626
1627#define ARM_FRAME_RTX(X) \
1628 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1629 || (X) == arg_pointer_rtx)
1630
1631int
74bbc178 1632arm_rtx_costs (x, code)
e2c671ba 1633 rtx x;
74bbc178 1634 enum rtx_code code;
e2c671ba
RE
1635{
1636 enum machine_mode mode = GET_MODE (x);
1637 enum rtx_code subcode;
1638 int extra_cost;
1639
1640 switch (code)
1641 {
1642 case MEM:
1643 /* Memory costs quite a lot for the first word, but subsequent words
1644 load at the equivalent of a single insn each. */
1645 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
1646 + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
1647
1648 case DIV:
1649 case MOD:
1650 return 100;
1651
1652 case ROTATE:
1653 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
1654 return 4;
1655 /* Fall through */
1656 case ROTATERT:
1657 if (mode != SImode)
1658 return 8;
1659 /* Fall through */
1660 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
1661 if (mode == DImode)
1662 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
1663 + ((GET_CODE (XEXP (x, 0)) == REG
1664 || (GET_CODE (XEXP (x, 0)) == SUBREG
1665 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
1666 ? 0 : 8));
1667 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
1668 || (GET_CODE (XEXP (x, 0)) == SUBREG
1669 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
1670 ? 0 : 4)
1671 + ((GET_CODE (XEXP (x, 1)) == REG
1672 || (GET_CODE (XEXP (x, 1)) == SUBREG
1673 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
1674 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
1675 ? 0 : 4));
1676
1677 case MINUS:
1678 if (mode == DImode)
1679 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
1680 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
1681 || (GET_CODE (XEXP (x, 0)) == CONST_INT
1682 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
1683 ? 0 : 8));
1684
1685 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1686 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
1687 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
1688 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
1689 ? 0 : 8)
1690 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
1691 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
1692 && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
1693 ? 0 : 8));
1694
1695 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
1696 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
1697 && REG_OR_SUBREG_REG (XEXP (x, 1))))
1698 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
1699 || subcode == ASHIFTRT || subcode == LSHIFTRT
1700 || subcode == ROTATE || subcode == ROTATERT
1701 || (subcode == MULT
1702 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
1703 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
1704 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
1705 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
1706 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
1707 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
1708 && REG_OR_SUBREG_REG (XEXP (x, 0))))
1709 return 1;
1710 /* Fall through */
1711
1712 case PLUS:
1713 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1714 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
1715 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
1716 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
1717 && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
1718 ? 0 : 8));
1719
1720 /* Fall through */
1721 case AND: case XOR: case IOR:
1722 extra_cost = 0;
1723
1724 /* Normally the frame registers will be spilt into reg+const during
1725 reload, so it is a bad idea to combine them with other instructions,
1726 since then they might not be moved outside of loops. As a compromise
1727 we allow integration with ops that have a constant as their second
1728 operand. */
1729 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
1730 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
1731 && GET_CODE (XEXP (x, 1)) != CONST_INT)
1732 || (REG_OR_SUBREG_REG (XEXP (x, 0))
1733 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
1734 extra_cost = 4;
1735
1736 if (mode == DImode)
1737 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
1738 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
1739 || (GET_CODE (XEXP (x, 1)) == CONST_INT
74bbc178 1740 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
e2c671ba
RE
1741 ? 0 : 8));
1742
1743 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
1744 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
1745 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
1746 || (GET_CODE (XEXP (x, 1)) == CONST_INT
74bbc178 1747 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
e2c671ba
RE
1748 ? 0 : 4));
1749
1750 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
1751 return (1 + extra_cost
1752 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
1753 || subcode == LSHIFTRT || subcode == ASHIFTRT
1754 || subcode == ROTATE || subcode == ROTATERT
1755 || (subcode == MULT
1756 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1757 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
ad076f4e 1758 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
e2c671ba
RE
1759 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
1760 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
ad076f4e 1761 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
e2c671ba
RE
1762 ? 0 : 4));
1763
1764 return 8;
1765
1766 case MULT:
b111229a
RE
1767 /* There is no point basing this on the tuning, since it is always the
1768 fast variant if it exists at all */
2b835d68
RE
1769 if (arm_fast_multiply && mode == DImode
1770 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
1771 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
1772 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
1773 return 8;
1774
e2c671ba
RE
1775 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1776 || mode == DImode)
1777 return 30;
1778
1779 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
1780 {
2b835d68
RE
1781 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
1782 & (unsigned HOST_WIDE_INT) 0xffffffff);
e2c671ba
RE
1783 int add_cost = const_ok_for_arm (i) ? 4 : 8;
1784 int j;
b111229a 1785 /* Tune as appropriate */
aec3cfba 1786 int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
2a5307b1 1787
2b835d68 1788 for (j = 0; i && j < 32; j += booth_unit_size)
e2c671ba 1789 {
2b835d68 1790 i >>= booth_unit_size;
e2c671ba
RE
1791 add_cost += 2;
1792 }
1793
1794 return add_cost;
1795 }
1796
aec3cfba 1797 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
2b835d68 1798 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
e2c671ba
RE
1799 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
1800
56636818
JL
1801 case TRUNCATE:
1802 if (arm_fast_multiply && mode == SImode
1803 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
1804 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1805 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
1806 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1807 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
1808 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
1809 return 8;
1810 return 99;
1811
e2c671ba
RE
1812 case NEG:
1813 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1814 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
1815 /* Fall through */
1816 case NOT:
1817 if (mode == DImode)
1818 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
1819
1820 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
1821
1822 case IF_THEN_ELSE:
1823 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
1824 return 14;
1825 return 2;
1826
1827 case COMPARE:
1828 return 1;
1829
1830 case ABS:
1831 return 4 + (mode == DImode ? 4 : 0);
1832
1833 case SIGN_EXTEND:
1834 if (GET_MODE (XEXP (x, 0)) == QImode)
1835 return (4 + (mode == DImode ? 4 : 0)
1836 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
1837 /* Fall through */
1838 case ZERO_EXTEND:
1839 switch (GET_MODE (XEXP (x, 0)))
1840 {
1841 case QImode:
1842 return (1 + (mode == DImode ? 4 : 0)
1843 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
1844
1845 case HImode:
1846 return (4 + (mode == DImode ? 4 : 0)
1847 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
1848
1849 case SImode:
1850 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
ad076f4e
RE
1851
1852 default:
1853 break;
e2c671ba
RE
1854 }
1855 abort ();
1856
1857 default:
1858 return 99;
1859 }
1860}
32de079a
RE
1861
1862int
1863arm_adjust_cost (insn, link, dep, cost)
1864 rtx insn;
1865 rtx link;
1866 rtx dep;
1867 int cost;
1868{
1869 rtx i_pat, d_pat;
1870
b36ba79f
RE
1871 /* XXX This is not strictly true for the FPA. */
1872 if (REG_NOTE_KIND(link) == REG_DEP_ANTI
1873 || REG_NOTE_KIND(link) == REG_DEP_OUTPUT)
1874 return 0;
1875
32de079a
RE
1876 if ((i_pat = single_set (insn)) != NULL
1877 && GET_CODE (SET_SRC (i_pat)) == MEM
1878 && (d_pat = single_set (dep)) != NULL
1879 && GET_CODE (SET_DEST (d_pat)) == MEM)
1880 {
1881 /* This is a load after a store, there is no conflict if the load reads
1882 from a cached area. Assume that loads from the stack, and from the
1883 constant pool are cached, and that others will miss. This is a
1884 hack. */
1885
32de079a
RE
1886 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0))
1887 || reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
1888 || reg_mentioned_p (frame_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
1889 || reg_mentioned_p (hard_frame_pointer_rtx,
1890 XEXP (SET_SRC (i_pat), 0)))
949d79eb 1891 return 1;
32de079a
RE
1892 }
1893
1894 return cost;
1895}
1896
ff9940b0
RE
1897/* This code has been fixed for cross compilation. */
1898
1899static int fpa_consts_inited = 0;
1900
62b10bbc
NC
1901char * strings_fpa[8] =
1902{
2b835d68
RE
1903 "0", "1", "2", "3",
1904 "4", "5", "0.5", "10"
1905};
ff9940b0
RE
1906
1907static REAL_VALUE_TYPE values_fpa[8];
1908
1909static void
1910init_fpa_table ()
1911{
1912 int i;
1913 REAL_VALUE_TYPE r;
1914
1915 for (i = 0; i < 8; i++)
1916 {
1917 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
1918 values_fpa[i] = r;
1919 }
f3bb6135 1920
ff9940b0
RE
1921 fpa_consts_inited = 1;
1922}
1923
cce8749e
CH
1924/* Return TRUE if rtx X is a valid immediate FPU constant. */
1925
1926int
1927const_double_rtx_ok_for_fpu (x)
1928 rtx x;
1929{
ff9940b0
RE
1930 REAL_VALUE_TYPE r;
1931 int i;
1932
1933 if (!fpa_consts_inited)
1934 init_fpa_table ();
1935
1936 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
1937 if (REAL_VALUE_MINUS_ZERO (r))
1938 return 0;
f3bb6135 1939
ff9940b0
RE
1940 for (i = 0; i < 8; i++)
1941 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
1942 return 1;
f3bb6135 1943
ff9940b0 1944 return 0;
f3bb6135 1945}
ff9940b0
RE
1946
1947/* Return TRUE if rtx X is a valid immediate FPU constant. */
1948
1949int
1950neg_const_double_rtx_ok_for_fpu (x)
1951 rtx x;
1952{
1953 REAL_VALUE_TYPE r;
1954 int i;
1955
1956 if (!fpa_consts_inited)
1957 init_fpa_table ();
1958
1959 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
1960 r = REAL_VALUE_NEGATE (r);
1961 if (REAL_VALUE_MINUS_ZERO (r))
1962 return 0;
f3bb6135 1963
ff9940b0
RE
1964 for (i = 0; i < 8; i++)
1965 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
1966 return 1;
f3bb6135 1967
ff9940b0 1968 return 0;
f3bb6135 1969}
cce8749e
CH
1970\f
1971/* Predicates for `match_operand' and `match_operator'. */
1972
ff9940b0 1973/* s_register_operand is the same as register_operand, but it doesn't accept
56a38cec
DE
1974 (SUBREG (MEM)...).
1975
1976 This function exists because at the time it was put in it led to better
1977 code. SUBREG(MEM) always needs a reload in the places where
1978 s_register_operand is used, and this seemed to lead to excessive
1979 reloading. */
ff9940b0
RE
1980
1981int
1982s_register_operand (op, mode)
1983 register rtx op;
1984 enum machine_mode mode;
1985{
1986 if (GET_MODE (op) != mode && mode != VOIDmode)
1987 return 0;
1988
1989 if (GET_CODE (op) == SUBREG)
f3bb6135 1990 op = SUBREG_REG (op);
ff9940b0
RE
1991
1992 /* We don't consider registers whose class is NO_REGS
1993 to be a register operand. */
1994 return (GET_CODE (op) == REG
1995 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
1996 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
1997}
1998
e2c671ba
RE
1999/* Only accept reg, subreg(reg), const_int. */
2000
2001int
2002reg_or_int_operand (op, mode)
2003 register rtx op;
2004 enum machine_mode mode;
2005{
2006 if (GET_CODE (op) == CONST_INT)
2007 return 1;
2008
2009 if (GET_MODE (op) != mode && mode != VOIDmode)
2010 return 0;
2011
2012 if (GET_CODE (op) == SUBREG)
2013 op = SUBREG_REG (op);
2014
2015 /* We don't consider registers whose class is NO_REGS
2016 to be a register operand. */
2017 return (GET_CODE (op) == REG
2018 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
2019 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
2020}
2021
ff9940b0
RE
2022/* Return 1 if OP is an item in memory, given that we are in reload. */
2023
2024int
2025reload_memory_operand (op, mode)
2026 rtx op;
74bbc178 2027 enum machine_mode mode ATTRIBUTE_UNUSED;
ff9940b0
RE
2028{
2029 int regno = true_regnum (op);
2030
2031 return (! CONSTANT_P (op)
2032 && (regno == -1
2033 || (GET_CODE (op) == REG
2034 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
2035}
2036
4d818c85
RE
2037/* Return 1 if OP is a valid memory address, but not valid for a signed byte
2038 memory access (architecture V4) */
2039int
2040bad_signed_byte_operand (op, mode)
2041 rtx op;
2042 enum machine_mode mode;
2043{
2044 if (! memory_operand (op, mode) || GET_CODE (op) != MEM)
2045 return 0;
2046
2047 op = XEXP (op, 0);
2048
2049 /* A sum of anything more complex than reg + reg or reg + const is bad */
2050 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
9c8cc54f
RE
2051 && (! s_register_operand (XEXP (op, 0), VOIDmode)
2052 || (! s_register_operand (XEXP (op, 1), VOIDmode)
2053 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
4d818c85
RE
2054 return 1;
2055
2056 /* Big constants are also bad */
2057 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
2058 && (INTVAL (XEXP (op, 1)) > 0xff
2059 || -INTVAL (XEXP (op, 1)) > 0xff))
2060 return 1;
2061
2062 /* Everything else is good, or can will automatically be made so. */
2063 return 0;
2064}
2065
cce8749e
CH
2066/* Return TRUE for valid operands for the rhs of an ARM instruction. */
2067
2068int
2069arm_rhs_operand (op, mode)
2070 rtx op;
2071 enum machine_mode mode;
2072{
ff9940b0 2073 return (s_register_operand (op, mode)
cce8749e 2074 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
f3bb6135 2075}
cce8749e 2076
ff9940b0
RE
2077/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
2078 */
2079
2080int
2081arm_rhsm_operand (op, mode)
2082 rtx op;
2083 enum machine_mode mode;
2084{
2085 return (s_register_operand (op, mode)
2086 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
2087 || memory_operand (op, mode));
f3bb6135 2088}
ff9940b0
RE
2089
2090/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
2091 constant that is valid when negated. */
2092
2093int
2094arm_add_operand (op, mode)
2095 rtx op;
2096 enum machine_mode mode;
2097{
2098 return (s_register_operand (op, mode)
2099 || (GET_CODE (op) == CONST_INT
2100 && (const_ok_for_arm (INTVAL (op))
2101 || const_ok_for_arm (-INTVAL (op)))));
f3bb6135 2102}
ff9940b0
RE
2103
2104int
2105arm_not_operand (op, mode)
2106 rtx op;
2107 enum machine_mode mode;
2108{
2109 return (s_register_operand (op, mode)
2110 || (GET_CODE (op) == CONST_INT
2111 && (const_ok_for_arm (INTVAL (op))
2112 || const_ok_for_arm (~INTVAL (op)))));
f3bb6135 2113}
ff9940b0 2114
5165176d
RE
2115/* Return TRUE if the operand is a memory reference which contains an
2116 offsettable address. */
2117int
2118offsettable_memory_operand (op, mode)
2119 register rtx op;
2120 enum machine_mode mode;
2121{
2122 if (mode == VOIDmode)
2123 mode = GET_MODE (op);
2124
2125 return (mode == GET_MODE (op)
2126 && GET_CODE (op) == MEM
2127 && offsettable_address_p (reload_completed | reload_in_progress,
2128 mode, XEXP (op, 0)));
2129}
2130
2131/* Return TRUE if the operand is a memory reference which is, or can be
2132 made word aligned by adjusting the offset. */
2133int
2134alignable_memory_operand (op, mode)
2135 register rtx op;
2136 enum machine_mode mode;
2137{
2138 rtx reg;
2139
2140 if (mode == VOIDmode)
2141 mode = GET_MODE (op);
2142
2143 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
2144 return 0;
2145
2146 op = XEXP (op, 0);
2147
2148 return ((GET_CODE (reg = op) == REG
2149 || (GET_CODE (op) == SUBREG
2150 && GET_CODE (reg = SUBREG_REG (op)) == REG)
2151 || (GET_CODE (op) == PLUS
2152 && GET_CODE (XEXP (op, 1)) == CONST_INT
2153 && (GET_CODE (reg = XEXP (op, 0)) == REG
2154 || (GET_CODE (XEXP (op, 0)) == SUBREG
2155 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
2156 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 4);
2157}
2158
b111229a
RE
2159/* Similar to s_register_operand, but does not allow hard integer
2160 registers. */
2161int
2162f_register_operand (op, mode)
2163 register rtx op;
2164 enum machine_mode mode;
2165{
2166 if (GET_MODE (op) != mode && mode != VOIDmode)
2167 return 0;
2168
2169 if (GET_CODE (op) == SUBREG)
2170 op = SUBREG_REG (op);
2171
2172 /* We don't consider registers whose class is NO_REGS
2173 to be a register operand. */
2174 return (GET_CODE (op) == REG
2175 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
2176 || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
2177}
2178
cce8749e
CH
2179/* Return TRUE for valid operands for the rhs of an FPU instruction. */
2180
2181int
2182fpu_rhs_operand (op, mode)
2183 rtx op;
2184 enum machine_mode mode;
2185{
ff9940b0 2186 if (s_register_operand (op, mode))
f3bb6135 2187 return TRUE;
cce8749e
CH
2188 else if (GET_CODE (op) == CONST_DOUBLE)
2189 return (const_double_rtx_ok_for_fpu (op));
f3bb6135
RE
2190
2191 return FALSE;
2192}
cce8749e 2193
ff9940b0
RE
2194int
2195fpu_add_operand (op, mode)
2196 rtx op;
2197 enum machine_mode mode;
2198{
2199 if (s_register_operand (op, mode))
f3bb6135 2200 return TRUE;
ff9940b0 2201 else if (GET_CODE (op) == CONST_DOUBLE)
f3bb6135
RE
2202 return (const_double_rtx_ok_for_fpu (op)
2203 || neg_const_double_rtx_ok_for_fpu (op));
2204
2205 return FALSE;
ff9940b0
RE
2206}
2207
cce8749e
CH
2208/* Return nonzero if OP is a constant power of two. */
2209
2210int
2211power_of_two_operand (op, mode)
2212 rtx op;
74bbc178 2213 enum machine_mode mode ATTRIBUTE_UNUSED;
cce8749e
CH
2214{
2215 if (GET_CODE (op) == CONST_INT)
2216 {
f3bb6135
RE
2217 HOST_WIDE_INT value = INTVAL(op);
2218 return value != 0 && (value & (value - 1)) == 0;
cce8749e 2219 }
f3bb6135
RE
2220 return FALSE;
2221}
cce8749e
CH
2222
2223/* Return TRUE for a valid operand of a DImode operation.
e9c6b69b 2224 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
ff9940b0
RE
2225 Note that this disallows MEM(REG+REG), but allows
2226 MEM(PRE/POST_INC/DEC(REG)). */
cce8749e
CH
2227
2228int
2229di_operand (op, mode)
2230 rtx op;
2231 enum machine_mode mode;
2232{
ff9940b0 2233 if (s_register_operand (op, mode))
f3bb6135 2234 return TRUE;
cce8749e 2235
e9c6b69b
NC
2236 if (GET_CODE (op) == SUBREG)
2237 op = SUBREG_REG (op);
2238
cce8749e
CH
2239 switch (GET_CODE (op))
2240 {
2241 case CONST_DOUBLE:
2242 case CONST_INT:
f3bb6135
RE
2243 return TRUE;
2244
cce8749e 2245 case MEM:
f3bb6135
RE
2246 return memory_address_p (DImode, XEXP (op, 0));
2247
cce8749e 2248 default:
f3bb6135 2249 return FALSE;
cce8749e 2250 }
f3bb6135 2251}
cce8749e 2252
f3139301 2253/* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
e9c6b69b 2254 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
f3139301
DE
2255 Note that this disallows MEM(REG+REG), but allows
2256 MEM(PRE/POST_INC/DEC(REG)). */
2257
2258int
2259soft_df_operand (op, mode)
2260 rtx op;
2261 enum machine_mode mode;
2262{
2263 if (s_register_operand (op, mode))
2264 return TRUE;
2265
e9c6b69b
NC
2266 if (GET_CODE (op) == SUBREG)
2267 op = SUBREG_REG (op);
2268
f3139301
DE
2269 switch (GET_CODE (op))
2270 {
2271 case CONST_DOUBLE:
2272 return TRUE;
2273
2274 case MEM:
2275 return memory_address_p (DFmode, XEXP (op, 0));
2276
2277 default:
2278 return FALSE;
2279 }
2280}
2281
cce8749e
CH
2282/* Return TRUE for valid index operands. */
2283
2284int
2285index_operand (op, mode)
2286 rtx op;
2287 enum machine_mode mode;
2288{
ff9940b0
RE
2289 return (s_register_operand(op, mode)
2290 || (immediate_operand (op, mode)
2291 && INTVAL (op) < 4096 && INTVAL (op) > -4096));
f3bb6135 2292}
cce8749e 2293
ff9940b0
RE
2294/* Return TRUE for valid shifts by a constant. This also accepts any
2295 power of two on the (somewhat overly relaxed) assumption that the
2296 shift operator in this case was a mult. */
2297
2298int
2299const_shift_operand (op, mode)
2300 rtx op;
2301 enum machine_mode mode;
2302{
2303 return (power_of_two_operand (op, mode)
2304 || (immediate_operand (op, mode)
2305 && (INTVAL (op) < 32 && INTVAL (op) > 0)));
f3bb6135 2306}
ff9940b0 2307
cce8749e
CH
2308/* Return TRUE for arithmetic operators which can be combined with a multiply
2309 (shift). */
2310
2311int
2312shiftable_operator (x, mode)
2313 rtx x;
2314 enum machine_mode mode;
2315{
2316 if (GET_MODE (x) != mode)
2317 return FALSE;
2318 else
2319 {
2320 enum rtx_code code = GET_CODE (x);
2321
2322 return (code == PLUS || code == MINUS
2323 || code == IOR || code == XOR || code == AND);
2324 }
f3bb6135 2325}
cce8749e
CH
2326
2327/* Return TRUE for shift operators. */
2328
2329int
2330shift_operator (x, mode)
2331 rtx x;
2332 enum machine_mode mode;
2333{
2334 if (GET_MODE (x) != mode)
2335 return FALSE;
2336 else
2337 {
2338 enum rtx_code code = GET_CODE (x);
2339
ff9940b0 2340 if (code == MULT)
aec3cfba 2341 return power_of_two_operand (XEXP (x, 1), mode);
f3bb6135 2342
e2c671ba
RE
2343 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
2344 || code == ROTATERT);
cce8749e 2345 }
f3bb6135 2346}
ff9940b0
RE
2347
2348int equality_operator (x, mode)
f3bb6135 2349 rtx x;
74bbc178 2350 enum machine_mode mode ATTRIBUTE_UNUSED;
ff9940b0 2351{
f3bb6135 2352 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
ff9940b0
RE
2353}
2354
2355/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
2356
2357int
2358minmax_operator (x, mode)
2359 rtx x;
2360 enum machine_mode mode;
2361{
2362 enum rtx_code code = GET_CODE (x);
2363
2364 if (GET_MODE (x) != mode)
2365 return FALSE;
f3bb6135 2366
ff9940b0 2367 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
f3bb6135 2368}
ff9940b0
RE
2369
2370/* return TRUE if x is EQ or NE */
2371
2372/* Return TRUE if this is the condition code register, if we aren't given
2373 a mode, accept any class CCmode register */
2374
2375int
2376cc_register (x, mode)
f3bb6135
RE
2377 rtx x;
2378 enum machine_mode mode;
ff9940b0
RE
2379{
2380 if (mode == VOIDmode)
2381 {
2382 mode = GET_MODE (x);
2383 if (GET_MODE_CLASS (mode) != MODE_CC)
2384 return FALSE;
2385 }
f3bb6135 2386
ff9940b0
RE
2387 if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
2388 return TRUE;
f3bb6135 2389
ff9940b0
RE
2390 return FALSE;
2391}
5bbe2d40
RE
2392
2393/* Return TRUE if this is the condition code register, if we aren't given
84ed5e79
RE
2394 a mode, accept any class CCmode register which indicates a dominance
2395 expression. */
5bbe2d40
RE
2396
2397int
84ed5e79 2398dominant_cc_register (x, mode)
5bbe2d40
RE
2399 rtx x;
2400 enum machine_mode mode;
2401{
2402 if (mode == VOIDmode)
2403 {
2404 mode = GET_MODE (x);
84ed5e79 2405 if (GET_MODE_CLASS (mode) != MODE_CC)
5bbe2d40
RE
2406 return FALSE;
2407 }
2408
84ed5e79
RE
2409 if (mode != CC_DNEmode && mode != CC_DEQmode
2410 && mode != CC_DLEmode && mode != CC_DLTmode
2411 && mode != CC_DGEmode && mode != CC_DGTmode
2412 && mode != CC_DLEUmode && mode != CC_DLTUmode
2413 && mode != CC_DGEUmode && mode != CC_DGTUmode)
2414 return FALSE;
2415
5bbe2d40
RE
2416 if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
2417 return TRUE;
2418
2419 return FALSE;
2420}
2421
2b835d68
RE
2422/* Return TRUE if X references a SYMBOL_REF. */
2423int
2424symbol_mentioned_p (x)
2425 rtx x;
2426{
62b10bbc 2427 register char * fmt;
2b835d68
RE
2428 register int i;
2429
2430 if (GET_CODE (x) == SYMBOL_REF)
2431 return 1;
2432
2433 fmt = GET_RTX_FORMAT (GET_CODE (x));
2434 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2435 {
2436 if (fmt[i] == 'E')
2437 {
2438 register int j;
2439
2440 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2441 if (symbol_mentioned_p (XVECEXP (x, i, j)))
2442 return 1;
2443 }
2444 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
2445 return 1;
2446 }
2447
2448 return 0;
2449}
2450
2451/* Return TRUE if X references a LABEL_REF. */
2452int
2453label_mentioned_p (x)
2454 rtx x;
2455{
62b10bbc 2456 register char * fmt;
2b835d68
RE
2457 register int i;
2458
2459 if (GET_CODE (x) == LABEL_REF)
2460 return 1;
2461
2462 fmt = GET_RTX_FORMAT (GET_CODE (x));
2463 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2464 {
2465 if (fmt[i] == 'E')
2466 {
2467 register int j;
2468
2469 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2470 if (label_mentioned_p (XVECEXP (x, i, j)))
2471 return 1;
2472 }
2473 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
2474 return 1;
2475 }
2476
2477 return 0;
2478}
2479
ff9940b0
RE
2480enum rtx_code
2481minmax_code (x)
f3bb6135 2482 rtx x;
ff9940b0
RE
2483{
2484 enum rtx_code code = GET_CODE (x);
2485
2486 if (code == SMAX)
2487 return GE;
f3bb6135 2488 else if (code == SMIN)
ff9940b0 2489 return LE;
f3bb6135 2490 else if (code == UMIN)
ff9940b0 2491 return LEU;
f3bb6135 2492 else if (code == UMAX)
ff9940b0 2493 return GEU;
f3bb6135 2494
ff9940b0
RE
2495 abort ();
2496}
2497
2498/* Return 1 if memory locations are adjacent */
2499
f3bb6135 2500int
ff9940b0
RE
2501adjacent_mem_locations (a, b)
2502 rtx a, b;
2503{
2504 int val0 = 0, val1 = 0;
2505 int reg0, reg1;
2506
2507 if ((GET_CODE (XEXP (a, 0)) == REG
2508 || (GET_CODE (XEXP (a, 0)) == PLUS
2509 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
2510 && (GET_CODE (XEXP (b, 0)) == REG
2511 || (GET_CODE (XEXP (b, 0)) == PLUS
2512 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
2513 {
2514 if (GET_CODE (XEXP (a, 0)) == PLUS)
2515 {
2516 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
2517 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
2518 }
2519 else
2520 reg0 = REGNO (XEXP (a, 0));
2521 if (GET_CODE (XEXP (b, 0)) == PLUS)
2522 {
2523 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
2524 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
2525 }
2526 else
2527 reg1 = REGNO (XEXP (b, 0));
2528 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
2529 }
2530 return 0;
2531}
2532
2533/* Return 1 if OP is a load multiple operation. It is known to be
2534 parallel and the first section will be tested. */
2535
f3bb6135 2536int
ff9940b0
RE
2537load_multiple_operation (op, mode)
2538 rtx op;
74bbc178 2539 enum machine_mode mode ATTRIBUTE_UNUSED;
ff9940b0 2540{
f3bb6135 2541 HOST_WIDE_INT count = XVECLEN (op, 0);
ff9940b0
RE
2542 int dest_regno;
2543 rtx src_addr;
f3bb6135 2544 HOST_WIDE_INT i = 1, base = 0;
ff9940b0
RE
2545 rtx elt;
2546
2547 if (count <= 1
2548 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
2549 return 0;
2550
2551 /* Check to see if this might be a write-back */
2552 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
2553 {
2554 i++;
2555 base = 1;
2556
2557 /* Now check it more carefully */
2558 if (GET_CODE (SET_DEST (elt)) != REG
2559 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
2560 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
2561 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
2562 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
2563 || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
2564 || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
2565 || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
2566 != REGNO (SET_DEST (elt)))
2567 return 0;
f3bb6135 2568
ff9940b0
RE
2569 count--;
2570 }
2571
2572 /* Perform a quick check so we don't blow up below. */
2573 if (count <= i
2574 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
2575 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
2576 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
2577 return 0;
2578
2579 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
2580 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
2581
2582 for (; i < count; i++)
2583 {
ed4c4348 2584 elt = XVECEXP (op, 0, i);
ff9940b0
RE
2585
2586 if (GET_CODE (elt) != SET
2587 || GET_CODE (SET_DEST (elt)) != REG
2588 || GET_MODE (SET_DEST (elt)) != SImode
2589 || REGNO (SET_DEST (elt)) != dest_regno + i - base
2590 || GET_CODE (SET_SRC (elt)) != MEM
2591 || GET_MODE (SET_SRC (elt)) != SImode
2592 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
2593 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
2594 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
2595 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
2596 return 0;
2597 }
2598
2599 return 1;
2600}
2601
2602/* Return 1 if OP is a store multiple operation. It is known to be
2603 parallel and the first section will be tested. */
2604
f3bb6135 2605int
ff9940b0
RE
2606store_multiple_operation (op, mode)
2607 rtx op;
74bbc178 2608 enum machine_mode mode ATTRIBUTE_UNUSED;
ff9940b0 2609{
f3bb6135 2610 HOST_WIDE_INT count = XVECLEN (op, 0);
ff9940b0
RE
2611 int src_regno;
2612 rtx dest_addr;
f3bb6135 2613 HOST_WIDE_INT i = 1, base = 0;
ff9940b0
RE
2614 rtx elt;
2615
2616 if (count <= 1
2617 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
2618 return 0;
2619
2620 /* Check to see if this might be a write-back */
2621 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
2622 {
2623 i++;
2624 base = 1;
2625
2626 /* Now check it more carefully */
2627 if (GET_CODE (SET_DEST (elt)) != REG
2628 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
2629 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
2630 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
2631 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
2632 || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
2633 || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
2634 || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
2635 != REGNO (SET_DEST (elt)))
2636 return 0;
f3bb6135 2637
ff9940b0
RE
2638 count--;
2639 }
2640
2641 /* Perform a quick check so we don't blow up below. */
2642 if (count <= i
2643 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
2644 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
2645 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
2646 return 0;
2647
2648 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
2649 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
2650
2651 for (; i < count; i++)
2652 {
2653 elt = XVECEXP (op, 0, i);
2654
2655 if (GET_CODE (elt) != SET
2656 || GET_CODE (SET_SRC (elt)) != REG
2657 || GET_MODE (SET_SRC (elt)) != SImode
2658 || REGNO (SET_SRC (elt)) != src_regno + i - base
2659 || GET_CODE (SET_DEST (elt)) != MEM
2660 || GET_MODE (SET_DEST (elt)) != SImode
2661 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
2662 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
2663 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
2664 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
2665 return 0;
2666 }
2667
2668 return 1;
2669}
e2c671ba 2670
84ed5e79
RE
2671int
2672load_multiple_sequence (operands, nops, regs, base, load_offset)
62b10bbc 2673 rtx * operands;
84ed5e79 2674 int nops;
62b10bbc
NC
2675 int * regs;
2676 int * base;
2677 HOST_WIDE_INT * load_offset;
84ed5e79
RE
2678{
2679 int unsorted_regs[4];
2680 HOST_WIDE_INT unsorted_offsets[4];
2681 int order[4];
ad076f4e 2682 int base_reg = -1;
84ed5e79
RE
2683 int i;
2684
2685 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2686 extended if required. */
2687 if (nops < 2 || nops > 4)
2688 abort ();
2689
2690 /* Loop over the operands and check that the memory references are
2691 suitable (ie immediate offsets from the same base register). At
2692 the same time, extract the target register, and the memory
2693 offsets. */
2694 for (i = 0; i < nops; i++)
2695 {
2696 rtx reg;
2697 rtx offset;
2698
56636818
JL
2699 /* Convert a subreg of a mem into the mem itself. */
2700 if (GET_CODE (operands[nops + i]) == SUBREG)
2701 operands[nops + i] = alter_subreg(operands[nops + i]);
2702
84ed5e79
RE
2703 if (GET_CODE (operands[nops + i]) != MEM)
2704 abort ();
2705
2706 /* Don't reorder volatile memory references; it doesn't seem worth
2707 looking for the case where the order is ok anyway. */
2708 if (MEM_VOLATILE_P (operands[nops + i]))
2709 return 0;
2710
2711 offset = const0_rtx;
2712
2713 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
2714 || (GET_CODE (reg) == SUBREG
2715 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
2716 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
2717 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
2718 == REG)
2719 || (GET_CODE (reg) == SUBREG
2720 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
2721 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
2722 == CONST_INT)))
2723 {
2724 if (i == 0)
2725 {
2726 base_reg = REGNO(reg);
2727 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
2728 ? REGNO (operands[i])
2729 : REGNO (SUBREG_REG (operands[i])));
2730 order[0] = 0;
2731 }
2732 else
2733 {
2734 if (base_reg != REGNO (reg))
2735 /* Not addressed from the same base register. */
2736 return 0;
2737
2738 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
2739 ? REGNO (operands[i])
2740 : REGNO (SUBREG_REG (operands[i])));
2741 if (unsorted_regs[i] < unsorted_regs[order[0]])
2742 order[0] = i;
2743 }
2744
2745 /* If it isn't an integer register, or if it overwrites the
2746 base register but isn't the last insn in the list, then
2747 we can't do this. */
2748 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
2749 || (i != nops - 1 && unsorted_regs[i] == base_reg))
2750 return 0;
2751
2752 unsorted_offsets[i] = INTVAL (offset);
2753 }
2754 else
2755 /* Not a suitable memory address. */
2756 return 0;
2757 }
2758
2759 /* All the useful information has now been extracted from the
2760 operands into unsorted_regs and unsorted_offsets; additionally,
2761 order[0] has been set to the lowest numbered register in the
2762 list. Sort the registers into order, and check that the memory
2763 offsets are ascending and adjacent. */
2764
2765 for (i = 1; i < nops; i++)
2766 {
2767 int j;
2768
2769 order[i] = order[i - 1];
2770 for (j = 0; j < nops; j++)
2771 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
2772 && (order[i] == order[i - 1]
2773 || unsorted_regs[j] < unsorted_regs[order[i]]))
2774 order[i] = j;
2775
2776 /* Have we found a suitable register? if not, one must be used more
2777 than once. */
2778 if (order[i] == order[i - 1])
2779 return 0;
2780
2781 /* Is the memory address adjacent and ascending? */
2782 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
2783 return 0;
2784 }
2785
2786 if (base)
2787 {
2788 *base = base_reg;
2789
2790 for (i = 0; i < nops; i++)
2791 regs[i] = unsorted_regs[order[i]];
2792
2793 *load_offset = unsorted_offsets[order[0]];
2794 }
2795
2796 if (unsorted_offsets[order[0]] == 0)
2797 return 1; /* ldmia */
2798
2799 if (unsorted_offsets[order[0]] == 4)
2800 return 2; /* ldmib */
2801
2802 if (unsorted_offsets[order[nops - 1]] == 0)
2803 return 3; /* ldmda */
2804
2805 if (unsorted_offsets[order[nops - 1]] == -4)
2806 return 4; /* ldmdb */
2807
949d79eb
RE
2808 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
2809 if the offset isn't small enough. The reason 2 ldrs are faster
2810 is because these ARMs are able to do more than one cache access
2811 in a single cycle. The ARM9 and StrongARM have Harvard caches,
2812 whilst the ARM8 has a double bandwidth cache. This means that
2813 these cores can do both an instruction fetch and a data fetch in
2814 a single cycle, so the trick of calculating the address into a
2815 scratch register (one of the result regs) and then doing a load
2816 multiple actually becomes slower (and no smaller in code size).
2817 That is the transformation
6cc8c0b3
NC
2818
2819 ldr rd1, [rbase + offset]
2820 ldr rd2, [rbase + offset + 4]
2821
2822 to
2823
2824 add rd1, rbase, offset
2825 ldmia rd1, {rd1, rd2}
2826
949d79eb
RE
2827 produces worse code -- '3 cycles + any stalls on rd2' instead of
2828 '2 cycles + any stalls on rd2'. On ARMs with only one cache
2829 access per cycle, the first sequence could never complete in less
2830 than 6 cycles, whereas the ldm sequence would only take 5 and
2831 would make better use of sequential accesses if not hitting the
2832 cache.
2833
2834 We cheat here and test 'arm_ld_sched' which we currently know to
2835 only be true for the ARM8, ARM9 and StrongARM. If this ever
2836 changes, then the test below needs to be reworked. */
f5a1b0d2 2837 if (nops == 2 && arm_ld_sched)
b36ba79f
RE
2838 return 0;
2839
84ed5e79
RE
2840 /* Can't do it without setting up the offset, only do this if it takes
2841 no more than one insn. */
2842 return (const_ok_for_arm (unsorted_offsets[order[0]])
2843 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
2844}
2845
2846char *
2847emit_ldm_seq (operands, nops)
62b10bbc 2848 rtx * operands;
84ed5e79
RE
2849 int nops;
2850{
2851 int regs[4];
2852 int base_reg;
2853 HOST_WIDE_INT offset;
2854 char buf[100];
2855 int i;
2856
2857 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
2858 {
2859 case 1:
2860 strcpy (buf, "ldm%?ia\t");
2861 break;
2862
2863 case 2:
2864 strcpy (buf, "ldm%?ib\t");
2865 break;
2866
2867 case 3:
2868 strcpy (buf, "ldm%?da\t");
2869 break;
2870
2871 case 4:
2872 strcpy (buf, "ldm%?db\t");
2873 break;
2874
2875 case 5:
2876 if (offset >= 0)
2877 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
2878 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
2879 (long) offset);
2880 else
2881 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
2882 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
2883 (long) -offset);
2884 output_asm_insn (buf, operands);
2885 base_reg = regs[0];
2886 strcpy (buf, "ldm%?ia\t");
2887 break;
2888
2889 default:
2890 abort ();
2891 }
2892
2893 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
2894 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
2895
2896 for (i = 1; i < nops; i++)
2897 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
2898 reg_names[regs[i]]);
2899
2900 strcat (buf, "}\t%@ phole ldm");
2901
2902 output_asm_insn (buf, operands);
2903 return "";
2904}
2905
2906int
2907store_multiple_sequence (operands, nops, regs, base, load_offset)
62b10bbc 2908 rtx * operands;
84ed5e79 2909 int nops;
62b10bbc
NC
2910 int * regs;
2911 int * base;
2912 HOST_WIDE_INT * load_offset;
84ed5e79
RE
2913{
2914 int unsorted_regs[4];
2915 HOST_WIDE_INT unsorted_offsets[4];
2916 int order[4];
ad076f4e 2917 int base_reg = -1;
84ed5e79
RE
2918 int i;
2919
2920 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2921 extended if required. */
2922 if (nops < 2 || nops > 4)
2923 abort ();
2924
2925 /* Loop over the operands and check that the memory references are
2926 suitable (ie immediate offsets from the same base register). At
2927 the same time, extract the target register, and the memory
2928 offsets. */
2929 for (i = 0; i < nops; i++)
2930 {
2931 rtx reg;
2932 rtx offset;
2933
56636818
JL
2934 /* Convert a subreg of a mem into the mem itself. */
2935 if (GET_CODE (operands[nops + i]) == SUBREG)
2936 operands[nops + i] = alter_subreg(operands[nops + i]);
2937
84ed5e79
RE
2938 if (GET_CODE (operands[nops + i]) != MEM)
2939 abort ();
2940
2941 /* Don't reorder volatile memory references; it doesn't seem worth
2942 looking for the case where the order is ok anyway. */
2943 if (MEM_VOLATILE_P (operands[nops + i]))
2944 return 0;
2945
2946 offset = const0_rtx;
2947
2948 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
2949 || (GET_CODE (reg) == SUBREG
2950 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
2951 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
2952 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
2953 == REG)
2954 || (GET_CODE (reg) == SUBREG
2955 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
2956 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
2957 == CONST_INT)))
2958 {
2959 if (i == 0)
2960 {
62b10bbc 2961 base_reg = REGNO (reg);
84ed5e79
RE
2962 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
2963 ? REGNO (operands[i])
2964 : REGNO (SUBREG_REG (operands[i])));
2965 order[0] = 0;
2966 }
2967 else
2968 {
2969 if (base_reg != REGNO (reg))
2970 /* Not addressed from the same base register. */
2971 return 0;
2972
2973 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
2974 ? REGNO (operands[i])
2975 : REGNO (SUBREG_REG (operands[i])));
2976 if (unsorted_regs[i] < unsorted_regs[order[0]])
2977 order[0] = i;
2978 }
2979
2980 /* If it isn't an integer register, then we can't do this. */
2981 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
2982 return 0;
2983
2984 unsorted_offsets[i] = INTVAL (offset);
2985 }
2986 else
2987 /* Not a suitable memory address. */
2988 return 0;
2989 }
2990
2991 /* All the useful information has now been extracted from the
2992 operands into unsorted_regs and unsorted_offsets; additionally,
2993 order[0] has been set to the lowest numbered register in the
2994 list. Sort the registers into order, and check that the memory
2995 offsets are ascending and adjacent. */
2996
2997 for (i = 1; i < nops; i++)
2998 {
2999 int j;
3000
3001 order[i] = order[i - 1];
3002 for (j = 0; j < nops; j++)
3003 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
3004 && (order[i] == order[i - 1]
3005 || unsorted_regs[j] < unsorted_regs[order[i]]))
3006 order[i] = j;
3007
3008 /* Have we found a suitable register? if not, one must be used more
3009 than once. */
3010 if (order[i] == order[i - 1])
3011 return 0;
3012
3013 /* Is the memory address adjacent and ascending? */
3014 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
3015 return 0;
3016 }
3017
3018 if (base)
3019 {
3020 *base = base_reg;
3021
3022 for (i = 0; i < nops; i++)
3023 regs[i] = unsorted_regs[order[i]];
3024
3025 *load_offset = unsorted_offsets[order[0]];
3026 }
3027
3028 if (unsorted_offsets[order[0]] == 0)
3029 return 1; /* stmia */
3030
3031 if (unsorted_offsets[order[0]] == 4)
3032 return 2; /* stmib */
3033
3034 if (unsorted_offsets[order[nops - 1]] == 0)
3035 return 3; /* stmda */
3036
3037 if (unsorted_offsets[order[nops - 1]] == -4)
3038 return 4; /* stmdb */
3039
3040 return 0;
3041}
3042
3043char *
3044emit_stm_seq (operands, nops)
62b10bbc 3045 rtx * operands;
84ed5e79
RE
3046 int nops;
3047{
3048 int regs[4];
3049 int base_reg;
3050 HOST_WIDE_INT offset;
3051 char buf[100];
3052 int i;
3053
3054 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
3055 {
3056 case 1:
3057 strcpy (buf, "stm%?ia\t");
3058 break;
3059
3060 case 2:
3061 strcpy (buf, "stm%?ib\t");
3062 break;
3063
3064 case 3:
3065 strcpy (buf, "stm%?da\t");
3066 break;
3067
3068 case 4:
3069 strcpy (buf, "stm%?db\t");
3070 break;
3071
3072 default:
3073 abort ();
3074 }
3075
3076 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
3077 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
3078
3079 for (i = 1; i < nops; i++)
3080 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
3081 reg_names[regs[i]]);
3082
3083 strcat (buf, "}\t%@ phole stm");
3084
3085 output_asm_insn (buf, operands);
3086 return "";
3087}
3088
e2c671ba
RE
3089int
3090multi_register_push (op, mode)
0a81f500 3091 rtx op;
74bbc178 3092 enum machine_mode mode ATTRIBUTE_UNUSED;
e2c671ba
RE
3093{
3094 if (GET_CODE (op) != PARALLEL
3095 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
3096 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
3097 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2))
3098 return 0;
3099
3100 return 1;
3101}
3102
ff9940b0 3103\f
f3bb6135
RE
3104/* Routines for use with attributes */
3105
31fdb4d5
DE
3106/* Return nonzero if ATTR is a valid attribute for DECL.
3107 ATTRIBUTES are any existing attributes and ARGS are the arguments
3108 supplied with ATTR.
3109
3110 Supported attributes:
3111
3112 naked: don't output any prologue or epilogue code, the user is assumed
3113 to do the right thing. */
3114
3115int
74bbc178 3116arm_valid_machine_decl_attribute (decl, attr, args)
31fdb4d5 3117 tree decl;
31fdb4d5
DE
3118 tree attr;
3119 tree args;
3120{
3121 if (args != NULL_TREE)
3122 return 0;
3123
3124 if (is_attribute_p ("naked", attr))
3125 return TREE_CODE (decl) == FUNCTION_DECL;
3126 return 0;
3127}
3128
3129/* Return non-zero if FUNC is a naked function. */
3130
3131static int
3132arm_naked_function_p (func)
3133 tree func;
3134{
3135 tree a;
3136
3137 if (TREE_CODE (func) != FUNCTION_DECL)
3138 abort ();
2e943e99 3139
31fdb4d5
DE
3140 a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
3141 return a != NULL_TREE;
3142}
f3bb6135 3143\f
ff9940b0
RE
3144/* Routines for use in generating RTL */
3145
f3bb6135 3146rtx
56636818 3147arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
c6df88cb 3148 in_struct_p, scalar_p)
ff9940b0
RE
3149 int base_regno;
3150 int count;
3151 rtx from;
3152 int up;
3153 int write_back;
56636818
JL
3154 int unchanging_p;
3155 int in_struct_p;
c6df88cb 3156 int scalar_p;
ff9940b0
RE
3157{
3158 int i = 0, j;
3159 rtx result;
3160 int sign = up ? 1 : -1;
56636818 3161 rtx mem;
ff9940b0 3162
43cffd11
RE
3163 result = gen_rtx_PARALLEL (VOIDmode,
3164 rtvec_alloc (count + (write_back ? 2 : 0)));
ff9940b0 3165 if (write_back)
f3bb6135 3166 {
ff9940b0 3167 XVECEXP (result, 0, 0)
43cffd11
RE
3168 = gen_rtx_SET (GET_MODE (from), from,
3169 plus_constant (from, count * 4 * sign));
ff9940b0
RE
3170 i = 1;
3171 count++;
f3bb6135
RE
3172 }
3173
ff9940b0 3174 for (j = 0; i < count; i++, j++)
f3bb6135 3175 {
43cffd11 3176 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
56636818
JL
3177 RTX_UNCHANGING_P (mem) = unchanging_p;
3178 MEM_IN_STRUCT_P (mem) = in_struct_p;
c6df88cb 3179 MEM_SCALAR_P (mem) = scalar_p;
43cffd11
RE
3180 XVECEXP (result, 0, i)
3181 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
f3bb6135
RE
3182 }
3183
ff9940b0 3184 if (write_back)
43cffd11 3185 XVECEXP (result, 0, i) = gen_rtx_CLOBBER (SImode, from);
ff9940b0
RE
3186
3187 return result;
3188}
3189
f3bb6135 3190rtx
56636818 3191arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
c6df88cb 3192 in_struct_p, scalar_p)
ff9940b0
RE
3193 int base_regno;
3194 int count;
3195 rtx to;
3196 int up;
3197 int write_back;
56636818
JL
3198 int unchanging_p;
3199 int in_struct_p;
c6df88cb 3200 int scalar_p;
ff9940b0
RE
3201{
3202 int i = 0, j;
3203 rtx result;
3204 int sign = up ? 1 : -1;
56636818 3205 rtx mem;
ff9940b0 3206
43cffd11
RE
3207 result = gen_rtx_PARALLEL (VOIDmode,
3208 rtvec_alloc (count + (write_back ? 2 : 0)));
ff9940b0 3209 if (write_back)
f3bb6135 3210 {
ff9940b0 3211 XVECEXP (result, 0, 0)
43cffd11
RE
3212 = gen_rtx_SET (GET_MODE (to), to,
3213 plus_constant (to, count * 4 * sign));
ff9940b0
RE
3214 i = 1;
3215 count++;
f3bb6135
RE
3216 }
3217
ff9940b0 3218 for (j = 0; i < count; i++, j++)
f3bb6135 3219 {
43cffd11 3220 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
56636818
JL
3221 RTX_UNCHANGING_P (mem) = unchanging_p;
3222 MEM_IN_STRUCT_P (mem) = in_struct_p;
c6df88cb 3223 MEM_SCALAR_P (mem) = scalar_p;
56636818 3224
43cffd11
RE
3225 XVECEXP (result, 0, i)
3226 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
f3bb6135
RE
3227 }
3228
ff9940b0 3229 if (write_back)
43cffd11 3230 XVECEXP (result, 0, i) = gen_rtx_CLOBBER (SImode, to);
ff9940b0
RE
3231
3232 return result;
3233}
3234
880e2516
RE
3235int
3236arm_gen_movstrqi (operands)
62b10bbc 3237 rtx * operands;
880e2516
RE
3238{
3239 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
ad076f4e 3240 int i;
880e2516 3241 rtx src, dst;
ad076f4e 3242 rtx st_src, st_dst, fin_src, fin_dst;
880e2516 3243 rtx part_bytes_reg = NULL;
56636818
JL
3244 rtx mem;
3245 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
c6df88cb 3246 int dst_scalar_p, src_scalar_p;
880e2516
RE
3247
3248 if (GET_CODE (operands[2]) != CONST_INT
3249 || GET_CODE (operands[3]) != CONST_INT
3250 || INTVAL (operands[2]) > 64
3251 || INTVAL (operands[3]) & 3)
3252 return 0;
3253
3254 st_dst = XEXP (operands[0], 0);
3255 st_src = XEXP (operands[1], 0);
56636818
JL
3256
3257 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
3258 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
c6df88cb 3259 dst_scalar_p = MEM_SCALAR_P (operands[0]);
56636818
JL
3260 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
3261 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
c6df88cb 3262 src_scalar_p = MEM_SCALAR_P (operands[1]);
56636818 3263
880e2516
RE
3264 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
3265 fin_src = src = copy_to_mode_reg (SImode, st_src);
3266
3267 in_words_to_go = (INTVAL (operands[2]) + 3) / 4;
3268 out_words_to_go = INTVAL (operands[2]) / 4;
3269 last_bytes = INTVAL (operands[2]) & 3;
3270
3271 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
43cffd11 3272 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
880e2516
RE
3273
3274 for (i = 0; in_words_to_go >= 2; i+=4)
3275 {
bd9c7e23 3276 if (in_words_to_go > 4)
56636818 3277 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
c6df88cb
MM
3278 src_unchanging_p,
3279 src_in_struct_p,
3280 src_scalar_p));
bd9c7e23
RE
3281 else
3282 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
56636818 3283 FALSE, src_unchanging_p,
c6df88cb 3284 src_in_struct_p, src_scalar_p));
bd9c7e23 3285
880e2516
RE
3286 if (out_words_to_go)
3287 {
bd9c7e23 3288 if (out_words_to_go > 4)
56636818
JL
3289 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
3290 dst_unchanging_p,
c6df88cb
MM
3291 dst_in_struct_p,
3292 dst_scalar_p));
bd9c7e23
RE
3293 else if (out_words_to_go != 1)
3294 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
3295 dst, TRUE,
3296 (last_bytes == 0
56636818
JL
3297 ? FALSE : TRUE),
3298 dst_unchanging_p,
c6df88cb
MM
3299 dst_in_struct_p,
3300 dst_scalar_p));
880e2516
RE
3301 else
3302 {
43cffd11 3303 mem = gen_rtx_MEM (SImode, dst);
56636818
JL
3304 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
3305 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
c6df88cb 3306 MEM_SCALAR_P (mem) = dst_scalar_p;
43cffd11 3307 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
bd9c7e23
RE
3308 if (last_bytes != 0)
3309 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
880e2516
RE
3310 }
3311 }
3312
3313 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
3314 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
3315 }
3316
3317 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
3318 if (out_words_to_go)
62b10bbc
NC
3319 {
3320 rtx sreg;
3321
3322 mem = gen_rtx_MEM (SImode, src);
3323 RTX_UNCHANGING_P (mem) = src_unchanging_p;
3324 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
3325 MEM_SCALAR_P (mem) = src_scalar_p;
3326 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
3327 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
3328
3329 mem = gen_rtx_MEM (SImode, dst);
3330 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
3331 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
3332 MEM_SCALAR_P (mem) = dst_scalar_p;
3333 emit_move_insn (mem, sreg);
3334 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
3335 in_words_to_go--;
3336
3337 if (in_words_to_go) /* Sanity check */
3338 abort ();
3339 }
880e2516
RE
3340
3341 if (in_words_to_go)
3342 {
3343 if (in_words_to_go < 0)
3344 abort ();
3345
43cffd11 3346 mem = gen_rtx_MEM (SImode, src);
56636818
JL
3347 RTX_UNCHANGING_P (mem) = src_unchanging_p;
3348 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
c6df88cb 3349 MEM_SCALAR_P (mem) = src_scalar_p;
56636818 3350 part_bytes_reg = copy_to_mode_reg (SImode, mem);
880e2516
RE
3351 }
3352
3353 if (BYTES_BIG_ENDIAN && last_bytes)
3354 {
3355 rtx tmp = gen_reg_rtx (SImode);
3356
3357 if (part_bytes_reg == NULL)
3358 abort ();
3359
3360 /* The bytes we want are in the top end of the word */
bee06f3d
RE
3361 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
3362 GEN_INT (8 * (4 - last_bytes))));
880e2516
RE
3363 part_bytes_reg = tmp;
3364
3365 while (last_bytes)
3366 {
43cffd11 3367 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
56636818
JL
3368 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
3369 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
c6df88cb 3370 MEM_SCALAR_P (mem) = dst_scalar_p;
43cffd11 3371 emit_move_insn (mem, gen_rtx_SUBREG (QImode, part_bytes_reg, 0));
62b10bbc 3372
880e2516
RE
3373 if (--last_bytes)
3374 {
3375 tmp = gen_reg_rtx (SImode);
3376 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
3377 part_bytes_reg = tmp;
3378 }
3379 }
3380
3381 }
3382 else
3383 {
3384 while (last_bytes)
3385 {
3386 if (part_bytes_reg == NULL)
3387 abort ();
3388
43cffd11 3389 mem = gen_rtx_MEM (QImode, dst);
56636818
JL
3390 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
3391 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
c6df88cb 3392 MEM_SCALAR_P (mem) = dst_scalar_p;
43cffd11 3393 emit_move_insn (mem, gen_rtx_SUBREG (QImode, part_bytes_reg, 0));
62b10bbc 3394
880e2516
RE
3395 if (--last_bytes)
3396 {
3397 rtx tmp = gen_reg_rtx (SImode);
bd9c7e23
RE
3398
3399 emit_insn (gen_addsi3 (dst, dst, const1_rtx));
880e2516
RE
3400 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
3401 part_bytes_reg = tmp;
3402 }
3403 }
3404 }
3405
3406 return 1;
3407}
3408
5165176d
RE
3409/* Generate a memory reference for a half word, such that it will be loaded
3410 into the top 16 bits of the word. We can assume that the address is
3411 known to be alignable and of the form reg, or plus (reg, const). */
3412rtx
3413gen_rotated_half_load (memref)
3414 rtx memref;
3415{
3416 HOST_WIDE_INT offset = 0;
3417 rtx base = XEXP (memref, 0);
3418
3419 if (GET_CODE (base) == PLUS)
3420 {
3421 offset = INTVAL (XEXP (base, 1));
3422 base = XEXP (base, 0);
3423 }
3424
956d6950 3425 /* If we aren't allowed to generate unaligned addresses, then fail. */
5165176d
RE
3426 if (TARGET_SHORT_BY_BYTES
3427 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
3428 return NULL;
3429
43cffd11 3430 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5165176d
RE
3431
3432 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
3433 return base;
3434
43cffd11 3435 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5165176d
RE
3436}
3437
84ed5e79 3438static enum machine_mode
74bbc178 3439select_dominance_cc_mode (x, y, cond_or)
84ed5e79
RE
3440 rtx x;
3441 rtx y;
3442 HOST_WIDE_INT cond_or;
3443{
3444 enum rtx_code cond1, cond2;
3445 int swapped = 0;
3446
3447 /* Currently we will probably get the wrong result if the individual
3448 comparisons are not simple. This also ensures that it is safe to
956d6950 3449 reverse a comparison if necessary. */
84ed5e79
RE
3450 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
3451 != CCmode)
3452 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
3453 != CCmode))
3454 return CCmode;
3455
3456 if (cond_or)
3457 cond1 = reverse_condition (cond1);
3458
3459 /* If the comparisons are not equal, and one doesn't dominate the other,
3460 then we can't do this. */
3461 if (cond1 != cond2
3462 && ! comparison_dominates_p (cond1, cond2)
3463 && (swapped = 1, ! comparison_dominates_p (cond2, cond1)))
3464 return CCmode;
3465
3466 if (swapped)
3467 {
3468 enum rtx_code temp = cond1;
3469 cond1 = cond2;
3470 cond2 = temp;
3471 }
3472
3473 switch (cond1)
3474 {
3475 case EQ:
3476 if (cond2 == EQ || ! cond_or)
3477 return CC_DEQmode;
3478
3479 switch (cond2)
3480 {
3481 case LE: return CC_DLEmode;
3482 case LEU: return CC_DLEUmode;
3483 case GE: return CC_DGEmode;
3484 case GEU: return CC_DGEUmode;
ad076f4e 3485 default: break;
84ed5e79
RE
3486 }
3487
3488 break;
3489
3490 case LT:
3491 if (cond2 == LT || ! cond_or)
3492 return CC_DLTmode;
3493 if (cond2 == LE)
3494 return CC_DLEmode;
3495 if (cond2 == NE)
3496 return CC_DNEmode;
3497 break;
3498
3499 case GT:
3500 if (cond2 == GT || ! cond_or)
3501 return CC_DGTmode;
3502 if (cond2 == GE)
3503 return CC_DGEmode;
3504 if (cond2 == NE)
3505 return CC_DNEmode;
3506 break;
3507
3508 case LTU:
3509 if (cond2 == LTU || ! cond_or)
3510 return CC_DLTUmode;
3511 if (cond2 == LEU)
3512 return CC_DLEUmode;
3513 if (cond2 == NE)
3514 return CC_DNEmode;
3515 break;
3516
3517 case GTU:
3518 if (cond2 == GTU || ! cond_or)
3519 return CC_DGTUmode;
3520 if (cond2 == GEU)
3521 return CC_DGEUmode;
3522 if (cond2 == NE)
3523 return CC_DNEmode;
3524 break;
3525
3526 /* The remaining cases only occur when both comparisons are the
3527 same. */
3528 case NE:
3529 return CC_DNEmode;
3530
3531 case LE:
3532 return CC_DLEmode;
3533
3534 case GE:
3535 return CC_DGEmode;
3536
3537 case LEU:
3538 return CC_DLEUmode;
3539
3540 case GEU:
3541 return CC_DGEUmode;
ad076f4e
RE
3542
3543 default:
3544 break;
84ed5e79
RE
3545 }
3546
3547 abort ();
3548}
3549
3550enum machine_mode
3551arm_select_cc_mode (op, x, y)
3552 enum rtx_code op;
3553 rtx x;
3554 rtx y;
3555{
3556 /* All floating point compares return CCFP if it is an equality
3557 comparison, and CCFPE otherwise. */
3558 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3559 return (op == EQ || op == NE) ? CCFPmode : CCFPEmode;
3560
3561 /* A compare with a shifted operand. Because of canonicalization, the
3562 comparison will have to be swapped when we emit the assembler. */
3563 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
3564 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
3565 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
3566 || GET_CODE (x) == ROTATERT))
3567 return CC_SWPmode;
3568
956d6950
JL
3569 /* This is a special case that is used by combine to allow a
3570 comparison of a shifted byte load to be split into a zero-extend
84ed5e79 3571 followed by a comparison of the shifted integer (only valid for
956d6950 3572 equalities and unsigned inequalities). */
84ed5e79
RE
3573 if (GET_MODE (x) == SImode
3574 && GET_CODE (x) == ASHIFT
3575 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
3576 && GET_CODE (XEXP (x, 0)) == SUBREG
3577 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
3578 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
3579 && (op == EQ || op == NE
3580 || op == GEU || op == GTU || op == LTU || op == LEU)
3581 && GET_CODE (y) == CONST_INT)
3582 return CC_Zmode;
3583
3584 /* An operation that sets the condition codes as a side-effect, the
3585 V flag is not set correctly, so we can only use comparisons where
3586 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3587 instead. */
3588 if (GET_MODE (x) == SImode
3589 && y == const0_rtx
3590 && (op == EQ || op == NE || op == LT || op == GE)
3591 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3592 || GET_CODE (x) == AND || GET_CODE (x) == IOR
3593 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
3594 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
3595 || GET_CODE (x) == LSHIFTRT
3596 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
3597 || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
3598 return CC_NOOVmode;
3599
3600 /* A construct for a conditional compare, if the false arm contains
3601 0, then both conditions must be true, otherwise either condition
3602 must be true. Not all conditions are possible, so CCmode is
3603 returned if it can't be done. */
3604 if (GET_CODE (x) == IF_THEN_ELSE
3605 && (XEXP (x, 2) == const0_rtx
3606 || XEXP (x, 2) == const1_rtx)
3607 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
3608 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
74bbc178 3609 return select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
84ed5e79
RE
3610 INTVAL (XEXP (x, 2)));
3611
3612 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
3613 return CC_Zmode;
3614
bd9c7e23
RE
3615 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
3616 && GET_CODE (x) == PLUS
3617 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
3618 return CC_Cmode;
3619
84ed5e79
RE
3620 return CCmode;
3621}
3622
ff9940b0
RE
3623/* X and Y are two things to compare using CODE. Emit the compare insn and
3624 return the rtx for register 0 in the proper mode. FP means this is a
3625 floating point compare: I don't think that it is needed on the arm. */
3626
3627rtx
74bbc178 3628gen_compare_reg (code, x, y)
ff9940b0
RE
3629 enum rtx_code code;
3630 rtx x, y;
3631{
3632 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
43cffd11 3633 rtx cc_reg = gen_rtx_REG (mode, 24);
ff9940b0 3634
43cffd11
RE
3635 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
3636 gen_rtx_COMPARE (mode, x, y)));
ff9940b0
RE
3637
3638 return cc_reg;
3639}
3640
0a81f500
RE
3641void
3642arm_reload_in_hi (operands)
62b10bbc 3643 rtx * operands;
0a81f500 3644{
f9cc092a
RE
3645 rtx ref = operands[1];
3646 rtx base, scratch;
3647 HOST_WIDE_INT offset = 0;
3648
3649 if (GET_CODE (ref) == SUBREG)
3650 {
3651 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
3652 if (BYTES_BIG_ENDIAN)
3653 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
3654 - MIN (UNITS_PER_WORD,
3655 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
3656 ref = SUBREG_REG (ref);
3657 }
3658
3659 if (GET_CODE (ref) == REG)
3660 {
3661 /* We have a pseudo which has been spilt onto the stack; there
3662 are two cases here: the first where there is a simple
3663 stack-slot replacement and a second where the stack-slot is
3664 out of range, or is used as a subreg. */
3665 if (reg_equiv_mem[REGNO (ref)])
3666 {
3667 ref = reg_equiv_mem[REGNO (ref)];
3668 base = find_replacement (&XEXP (ref, 0));
3669 }
3670 else
3671 /* The slot is out of range, or was dressed up in a SUBREG */
3672 base = reg_equiv_address[REGNO (ref)];
3673 }
3674 else
3675 base = find_replacement (&XEXP (ref, 0));
0a81f500 3676
e5e809f4
JL
3677 /* Handle the case where the address is too complex to be offset by 1. */
3678 if (GET_CODE (base) == MINUS
3679 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
3680 {
f9cc092a 3681 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
e5e809f4 3682
43cffd11 3683 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
e5e809f4
JL
3684 base = base_plus;
3685 }
f9cc092a
RE
3686 else if (GET_CODE (base) == PLUS)
3687 {
3688 /* The addend must be CONST_INT, or we would have dealt with it above */
3689 HOST_WIDE_INT hi, lo;
3690
3691 offset += INTVAL (XEXP (base, 1));
3692 base = XEXP (base, 0);
3693
3694 /* Rework the address into a legal sequence of insns */
3695 /* Valid range for lo is -4095 -> 4095 */
3696 lo = (offset >= 0
3697 ? (offset & 0xfff)
3698 : -((-offset) & 0xfff));
3699
3700 /* Corner case, if lo is the max offset then we would be out of range
3701 once we have added the additional 1 below, so bump the msb into the
3702 pre-loading insn(s). */
3703 if (lo == 4095)
3704 lo &= 0x7ff;
3705
3706 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xFFFFFFFF)
3707 ^ (HOST_WIDE_INT) 0x80000000)
3708 - (HOST_WIDE_INT) 0x80000000);
3709
3710 if (hi + lo != offset)
3711 abort ();
3712
3713 if (hi != 0)
3714 {
3715 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
3716
3717 /* Get the base address; addsi3 knows how to handle constants
3718 that require more than one insn */
3719 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
3720 base = base_plus;
3721 offset = lo;
3722 }
3723 }
e5e809f4 3724
f9cc092a
RE
3725 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
3726 emit_insn (gen_zero_extendqisi2 (scratch,
3727 gen_rtx_MEM (QImode,
3728 plus_constant (base,
3729 offset))));
43cffd11
RE
3730 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
3731 gen_rtx_MEM (QImode,
f9cc092a
RE
3732 plus_constant (base,
3733 offset + 1))));
b3b15f14 3734 if (! BYTES_BIG_ENDIAN)
43cffd11
RE
3735 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
3736 gen_rtx_IOR (SImode,
3737 gen_rtx_ASHIFT
3738 (SImode,
3739 gen_rtx_SUBREG (SImode, operands[0], 0),
3740 GEN_INT (8)),
f9cc092a 3741 scratch)));
0a81f500 3742 else
43cffd11
RE
3743 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
3744 gen_rtx_IOR (SImode,
f9cc092a 3745 gen_rtx_ASHIFT (SImode, scratch,
43cffd11
RE
3746 GEN_INT (8)),
3747 gen_rtx_SUBREG (SImode, operands[0],
3748 0))));
0a81f500
RE
3749}
3750
f9cc092a
RE
3751/* Handle storing a half-word to memory during reload by synthesising as two
3752 byte stores. Take care not to clobber the input values until after we
3753 have moved them somewhere safe. This code assumes that if the DImode
3754 scratch in operands[2] overlaps either the input value or output address
3755 in some way, then that value must die in this insn (we absolutely need
3756 two scratch registers for some corner cases). */
f3bb6135 3757void
af48348a 3758arm_reload_out_hi (operands)
62b10bbc 3759 rtx * operands;
af48348a 3760{
f9cc092a
RE
3761 rtx ref = operands[0];
3762 rtx outval = operands[1];
3763 rtx base, scratch;
3764 HOST_WIDE_INT offset = 0;
3765
3766 if (GET_CODE (ref) == SUBREG)
3767 {
3768 offset = SUBREG_WORD (ref) * UNITS_PER_WORD;
3769 if (BYTES_BIG_ENDIAN)
3770 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (ref)))
3771 - MIN (UNITS_PER_WORD,
3772 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref)))));
3773 ref = SUBREG_REG (ref);
3774 }
3775
3776
3777 if (GET_CODE (ref) == REG)
3778 {
3779 /* We have a pseudo which has been spilt onto the stack; there
3780 are two cases here: the first where there is a simple
3781 stack-slot replacement and a second where the stack-slot is
3782 out of range, or is used as a subreg. */
3783 if (reg_equiv_mem[REGNO (ref)])
3784 {
3785 ref = reg_equiv_mem[REGNO (ref)];
3786 base = find_replacement (&XEXP (ref, 0));
3787 }
3788 else
3789 /* The slot is out of range, or was dressed up in a SUBREG */
3790 base = reg_equiv_address[REGNO (ref)];
3791 }
3792 else
3793 base = find_replacement (&XEXP (ref, 0));
3794
3795 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
3796
3797 /* Handle the case where the address is too complex to be offset by 1. */
3798 if (GET_CODE (base) == MINUS
3799 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
3800 {
3801 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
3802
3803 /* Be careful not to destroy OUTVAL. */
3804 if (reg_overlap_mentioned_p (base_plus, outval))
3805 {
3806 /* Updating base_plus might destroy outval, see if we can
3807 swap the scratch and base_plus. */
3808 if (! reg_overlap_mentioned_p (scratch, outval))
3809 {
3810 rtx tmp = scratch;
3811 scratch = base_plus;
3812 base_plus = tmp;
3813 }
3814 else
3815 {
3816 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
3817
3818 /* Be conservative and copy OUTVAL into the scratch now,
3819 this should only be necessary if outval is a subreg
3820 of something larger than a word. */
3821 /* XXX Might this clobber base? I can't see how it can,
3822 since scratch is known to overlap with OUTVAL, and
3823 must be wider than a word. */
3824 emit_insn (gen_movhi (scratch_hi, outval));
3825 outval = scratch_hi;
3826 }
3827 }
3828
3829 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
3830 base = base_plus;
3831 }
3832 else if (GET_CODE (base) == PLUS)
3833 {
3834 /* The addend must be CONST_INT, or we would have dealt with it above */
3835 HOST_WIDE_INT hi, lo;
3836
3837 offset += INTVAL (XEXP (base, 1));
3838 base = XEXP (base, 0);
3839
3840 /* Rework the address into a legal sequence of insns */
3841 /* Valid range for lo is -4095 -> 4095 */
3842 lo = (offset >= 0
3843 ? (offset & 0xfff)
3844 : -((-offset) & 0xfff));
3845
3846 /* Corner case, if lo is the max offset then we would be out of range
3847 once we have added the additional 1 below, so bump the msb into the
3848 pre-loading insn(s). */
3849 if (lo == 4095)
3850 lo &= 0x7ff;
3851
3852 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xFFFFFFFF)
3853 ^ (HOST_WIDE_INT) 0x80000000)
3854 - (HOST_WIDE_INT) 0x80000000);
3855
3856 if (hi + lo != offset)
3857 abort ();
3858
3859 if (hi != 0)
3860 {
3861 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
3862
3863 /* Be careful not to destroy OUTVAL. */
3864 if (reg_overlap_mentioned_p (base_plus, outval))
3865 {
3866 /* Updating base_plus might destroy outval, see if we
3867 can swap the scratch and base_plus. */
3868 if (! reg_overlap_mentioned_p (scratch, outval))
3869 {
3870 rtx tmp = scratch;
3871 scratch = base_plus;
3872 base_plus = tmp;
3873 }
3874 else
3875 {
3876 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
3877
3878 /* Be conservative and copy outval into scratch now,
3879 this should only be necessary if outval is a
3880 subreg of something larger than a word. */
3881 /* XXX Might this clobber base? I can't see how it
3882 can, since scratch is known to overlap with
3883 outval. */
3884 emit_insn (gen_movhi (scratch_hi, outval));
3885 outval = scratch_hi;
3886 }
3887 }
3888
3889 /* Get the base address; addsi3 knows how to handle constants
3890 that require more than one insn */
3891 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
3892 base = base_plus;
3893 offset = lo;
3894 }
3895 }
af48348a 3896
b5cc037f
RE
3897 if (BYTES_BIG_ENDIAN)
3898 {
f9cc092a
RE
3899 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
3900 plus_constant (base, offset + 1)),
3901 gen_rtx_SUBREG (QImode, outval, 0)));
3902 emit_insn (gen_lshrsi3 (scratch,
3903 gen_rtx_SUBREG (SImode, outval, 0),
b5cc037f 3904 GEN_INT (8)));
f9cc092a
RE
3905 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
3906 gen_rtx_SUBREG (QImode, scratch, 0)));
b5cc037f
RE
3907 }
3908 else
3909 {
f9cc092a
RE
3910 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
3911 gen_rtx_SUBREG (QImode, outval, 0)));
3912 emit_insn (gen_lshrsi3 (scratch,
3913 gen_rtx_SUBREG (SImode, outval, 0),
b5cc037f 3914 GEN_INT (8)));
f9cc092a
RE
3915 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
3916 plus_constant (base, offset + 1)),
3917 gen_rtx_SUBREG (QImode, scratch, 0)));
b5cc037f 3918 }
af48348a 3919}
2b835d68
RE
3920\f
3921/* Routines for manipulation of the constant pool. */
2b835d68 3922
949d79eb
RE
3923/* Arm instructions cannot load a large constant directly into a
3924 register; they have to come from a pc relative load. The constant
3925 must therefore be placed in the addressable range of the pc
3926 relative load. Depending on the precise pc relative load
3927 instruction the range is somewhere between 256 bytes and 4k. This
3928 means that we often have to dump a constant inside a function, and
2b835d68
RE
3929 generate code to branch around it.
3930
949d79eb
RE
3931 It is important to minimize this, since the branches will slow
3932 things down and make the code larger.
2b835d68 3933
949d79eb
RE
3934 Normally we can hide the table after an existing unconditional
3935 branch so that there is no interruption of the flow, but in the
3936 worst case the code looks like this:
2b835d68
RE
3937
3938 ldr rn, L1
949d79eb 3939 ...
2b835d68
RE
3940 b L2
3941 align
3942 L1: .long value
3943 L2:
949d79eb 3944 ...
2b835d68 3945
2b835d68 3946 ldr rn, L3
949d79eb 3947 ...
2b835d68
RE
3948 b L4
3949 align
2b835d68
RE
3950 L3: .long value
3951 L4:
949d79eb
RE
3952 ...
3953
3954 We fix this by performing a scan after scheduling, which notices
3955 which instructions need to have their operands fetched from the
3956 constant table and builds the table.
3957
3958 The algorithm starts by building a table of all the constants that
3959 need fixing up and all the natural barriers in the function (places
3960 where a constant table can be dropped without breaking the flow).
3961 For each fixup we note how far the pc-relative replacement will be
3962 able to reach and the offset of the instruction into the function.
3963
3964 Having built the table we then group the fixes together to form
3965 tables that are as large as possible (subject to addressing
3966 constraints) and emit each table of constants after the last
3967 barrier that is within range of all the instructions in the group.
3968 If a group does not contain a barrier, then we forcibly create one
3969 by inserting a jump instruction into the flow. Once the table has
3970 been inserted, the insns are then modified to reference the
3971 relevant entry in the pool.
3972
3973 Possible enhancements to the alogorithm (not implemented) are:
3974
3975 1) ARM instructions (but not thumb) can use negative offsets, so we
3976 could reference back to a previous pool rather than forwards to a
3977 new one. For large functions this may reduce the number of pools
3978 required.
3979
3980 2) For some processors and object formats, there may be benefit in
3981 aligning the pools to the start of cache lines; this alignment
3982 would need to be taken into account when calculating addressability
3983 of a pool.
2b835d68
RE
3984
3985 */
3986
3987typedef struct
3988{
3989 rtx value; /* Value in table */
3990 HOST_WIDE_INT next_offset;
3991 enum machine_mode mode; /* Mode of value */
949d79eb 3992} minipool_node;
2b835d68
RE
3993
3994/* The maximum number of constants that can fit into one pool, since
949d79eb
RE
3995 the pc relative range is 0...4092 bytes and constants are at least 4
3996 bytes long. */
2b835d68 3997
949d79eb
RE
3998#define MAX_MINIPOOL_SIZE (4092/4)
3999static minipool_node minipool_vector[MAX_MINIPOOL_SIZE];
4000static int minipool_size;
4001static rtx minipool_vector_label;
2b835d68 4002
332072db
RE
4003/* Add a constant to the pool and return its offset within the current
4004 pool.
4005
4006 X is the rtx we want to replace. MODE is its mode. On return,
4007 ADDRESS_ONLY will be non-zero if we really want the address of such
4008 a constant, not the constant itself. */
2b835d68 4009static HOST_WIDE_INT
949d79eb 4010add_minipool_constant (x, mode)
2b835d68
RE
4011 rtx x;
4012 enum machine_mode mode;
4013{
4014 int i;
2b835d68 4015 HOST_WIDE_INT offset;
da6558fd 4016
949d79eb
RE
4017 /* First, see if we've already got it. */
4018 for (i = 0; i < minipool_size; i++)
2b835d68 4019 {
949d79eb
RE
4020 if (GET_CODE (x) == minipool_vector[i].value->code
4021 && mode == minipool_vector[i].mode)
2b835d68
RE
4022 {
4023 if (GET_CODE (x) == CODE_LABEL)
4024 {
949d79eb 4025 if (XINT (x, 3) != XINT (minipool_vector[i].value, 3))
2b835d68
RE
4026 continue;
4027 }
949d79eb
RE
4028 if (rtx_equal_p (x, minipool_vector[i].value))
4029 return minipool_vector[i].next_offset - GET_MODE_SIZE (mode);
2b835d68
RE
4030 }
4031 }
4032
4033 /* Need a new one */
949d79eb 4034 minipool_vector[minipool_size].next_offset = GET_MODE_SIZE (mode);
2b835d68 4035 offset = 0;
949d79eb
RE
4036 if (minipool_size == 0)
4037 minipool_vector_label = gen_label_rtx ();
2b835d68 4038 else
949d79eb
RE
4039 minipool_vector[minipool_size].next_offset
4040 += (offset = minipool_vector[minipool_size - 1].next_offset);
2b835d68 4041
949d79eb
RE
4042 minipool_vector[minipool_size].value = x;
4043 minipool_vector[minipool_size].mode = mode;
4044 minipool_size++;
2b835d68
RE
4045 return offset;
4046}
4047
4048/* Output the literal table */
4049static void
949d79eb 4050dump_minipool (scan)
2b835d68
RE
4051 rtx scan;
4052{
4053 int i;
4054
4055 scan = emit_label_after (gen_label_rtx (), scan);
4056 scan = emit_insn_after (gen_align_4 (), scan);
949d79eb 4057 scan = emit_label_after (minipool_vector_label, scan);
2b835d68 4058
949d79eb 4059 for (i = 0; i < minipool_size; i++)
2b835d68 4060 {
949d79eb 4061 minipool_node *p = minipool_vector + i;
2b835d68
RE
4062
4063 switch (GET_MODE_SIZE (p->mode))
4064 {
4065 case 4:
4066 scan = emit_insn_after (gen_consttable_4 (p->value), scan);
4067 break;
4068
4069 case 8:
4070 scan = emit_insn_after (gen_consttable_8 (p->value), scan);
4071 break;
4072
4073 default:
4074 abort ();
4075 break;
4076 }
4077 }
4078
4079 scan = emit_insn_after (gen_consttable_end (), scan);
4080 scan = emit_barrier_after (scan);
949d79eb 4081 minipool_size = 0;
2b835d68
RE
4082}
4083
949d79eb
RE
4084/* Find the last barrier less than MAX_COUNT bytes from FROM, or
4085 create one. */
2b835d68
RE
4086static rtx
4087find_barrier (from, max_count)
4088 rtx from;
4089 int max_count;
4090{
4091 int count = 0;
4092 rtx found_barrier = 0;
e5e809f4 4093 rtx last = from;
2b835d68
RE
4094
4095 while (from && count < max_count)
4096 {
7551cbc7 4097 rtx tmp;
da6558fd 4098
2b835d68 4099 if (GET_CODE (from) == BARRIER)
7551cbc7 4100 found_barrier = from;
2b835d68
RE
4101
4102 /* Count the length of this insn */
949d79eb
RE
4103 if (GET_CODE (from) == JUMP_INSN
4104 && JUMP_LABEL (from) != 0
4105 && ((tmp = next_real_insn (JUMP_LABEL (from)))
4106 == next_real_insn (from))
4107 && tmp != NULL
4108 && GET_CODE (tmp) == JUMP_INSN
4109 && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
4110 || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
7551cbc7
RE
4111 {
4112 int elt = GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC ? 1 : 0;
4113 count += (get_attr_length (from)
4114 + GET_MODE_SIZE (SImode) * XVECLEN (PATTERN (tmp), elt));
4115 /* Continue after the dispatch table. */
4116 last = from;
4117 from = NEXT_INSN (tmp);
4118 continue;
4119 }
2b835d68
RE
4120 else
4121 count += get_attr_length (from);
4122
e5e809f4 4123 last = from;
2b835d68
RE
4124 from = NEXT_INSN (from);
4125 }
4126
da6558fd 4127 if (! found_barrier)
2b835d68
RE
4128 {
4129 /* We didn't find a barrier in time to
da6558fd 4130 dump our stuff, so we'll make one. */
2b835d68 4131 rtx label = gen_label_rtx ();
da6558fd 4132
2b835d68 4133 if (from)
e5e809f4 4134 from = PREV_INSN (last);
2b835d68
RE
4135 else
4136 from = get_last_insn ();
da6558fd
NC
4137
4138 /* Walk back to be just before any jump. */
2b835d68 4139 while (GET_CODE (from) == JUMP_INSN
25b1c156 4140 || GET_CODE (from) == NOTE
2b835d68
RE
4141 || GET_CODE (from) == CODE_LABEL)
4142 from = PREV_INSN (from);
da6558fd 4143
2b835d68
RE
4144 from = emit_jump_insn_after (gen_jump (label), from);
4145 JUMP_LABEL (from) = label;
4146 found_barrier = emit_barrier_after (from);
4147 emit_label_after (label, found_barrier);
2b835d68
RE
4148 }
4149
4150 return found_barrier;
4151}
4152
949d79eb
RE
4153struct minipool_fixup
4154{
4155 struct minipool_fixup *next;
4156 rtx insn;
4157 int address;
4158 rtx *loc;
4159 enum machine_mode mode;
4160 rtx value;
4161 int range;
4162};
4163
4164struct minipool_fixup *minipool_fix_head;
4165struct minipool_fixup *minipool_fix_tail;
4166
4167static void
4168push_minipool_barrier (insn, address)
2b835d68 4169 rtx insn;
949d79eb 4170 int address;
2b835d68 4171{
949d79eb
RE
4172 struct minipool_fixup *fix
4173 = (struct minipool_fixup *) oballoc (sizeof (struct minipool_fixup));
ad076f4e 4174
949d79eb
RE
4175 fix->insn = insn;
4176 fix->address = address;
2b835d68 4177
949d79eb
RE
4178 fix->next = NULL;
4179 if (minipool_fix_head != NULL)
4180 minipool_fix_tail->next = fix;
4181 else
4182 minipool_fix_head = fix;
4183
4184 minipool_fix_tail = fix;
4185}
2b835d68 4186
949d79eb
RE
4187static void
4188push_minipool_fix (insn, address, loc, mode, value)
4189 rtx insn;
4190 int address;
4191 rtx *loc;
4192 enum machine_mode mode;
4193 rtx value;
4194{
4195 struct minipool_fixup *fix
4196 = (struct minipool_fixup *) oballoc (sizeof (struct minipool_fixup));
4197
4198#ifdef AOF_ASSEMBLER
4199 /* PIC symbol refereneces need to be converted into offsets into the
4200 based area. */
4201 if (flag_pic && GET_MODE == SYMBOL_REF)
4202 value = aof_pic_entry (value);
4203#endif /* AOF_ASSEMBLER */
4204
4205 fix->insn = insn;
4206 fix->address = address;
4207 fix->loc = loc;
4208 fix->mode = mode;
4209 fix->value = value;
4210 fix->range = get_attr_pool_range (insn);
4211
4212 /* If an insn doesn't have a range defined for it, then it isn't
4213 expecting to be reworked by this code. Better to abort now than
4214 to generate duff assembly code. */
4215 if (fix->range == 0)
4216 abort ();
4217
4218 /* Add it to the chain of fixes */
4219 fix->next = NULL;
4220 if (minipool_fix_head != NULL)
4221 minipool_fix_tail->next = fix;
4222 else
4223 minipool_fix_head = fix;
4224
4225 minipool_fix_tail = fix;
4226}
4227
4228static void
4229note_invalid_constants (insn, address)
4230 rtx insn;
4231 int address;
4232{
4233 int opno;
4234
4235 /* Extract the operands of the insn */
4236 extract_insn(insn);
4237
4238 /* If this is an asm, we can't do anything about it (or can we?) */
4239 if (INSN_CODE (insn) < 0)
4240 return;
4241
4242 /* Find the alternative selected */
4243 if (! constrain_operands (1))
4244 fatal_insn_not_found (insn);
4245
4246 /* Preprocess the constraints, to extract some useful information. */
4247 preprocess_constraints ();
4248
4249 for (opno = 0; opno < recog_n_operands; opno++)
4250 {
4251 /* Things we need to fix can only occur in inputs */
4252 if (recog_op_type[opno] != OP_IN)
4253 continue;
4254
4255 /* If this alternative is a memory reference, then any mention
4256 of constants in this alternative is really to fool reload
4257 into allowing us to accept one there. We need to fix them up
4258 now so that we output the right code. */
4259 if (recog_op_alt[opno][which_alternative].memory_ok)
4260 {
4261 rtx op = recog_operand[opno];
4262
4263 if (CONSTANT_P (op))
4264 push_minipool_fix (insn, address, recog_operand_loc[opno],
4265 recog_operand_mode[opno], op);
4266#ifndef AOF_ASSEMBLER
4267 else if (GET_CODE (op) == UNSPEC && XINT (op, 1) == 3)
4268 push_minipool_fix (insn, address, recog_operand_loc[opno],
4269 recog_operand_mode[opno], XVECEXP (op, 0, 0));
4270#endif
4271 else if (recog_operand_mode[opno] == SImode
4272 && GET_CODE (op) == MEM
4273 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
4274 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
4275 push_minipool_fix (insn, address, recog_operand_loc[opno],
4276 recog_operand_mode[opno],
4277 get_pool_constant (XEXP (op, 0)));
4278 }
2b835d68 4279 }
2b835d68
RE
4280}
4281
4282void
4283arm_reorg (first)
4284 rtx first;
4285{
4286 rtx insn;
949d79eb
RE
4287 int address = 0;
4288 struct minipool_fixup *fix;
ad076f4e 4289
949d79eb 4290 minipool_fix_head = minipool_fix_tail = NULL;
2b835d68 4291
949d79eb
RE
4292 /* The first insn must always be a note, or the code below won't
4293 scan it properly. */
4294 if (GET_CODE (first) != NOTE)
4295 abort ();
4296
4297 /* Scan all the insns and record the operands that will need fixing. */
4298 for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn))
2b835d68 4299 {
2b835d68 4300
949d79eb
RE
4301 if (GET_CODE (insn) == BARRIER)
4302 push_minipool_barrier(insn, address);
4303 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
4304 || GET_CODE (insn) == JUMP_INSN)
4305 {
4306 rtx table;
4307
4308 note_invalid_constants (insn, address);
4309 address += get_attr_length (insn);
4310 /* If the insn is a vector jump, add the size of the table
4311 and skip the table. */
4312 if (GET_CODE (insn) == JUMP_INSN
4313 && JUMP_LABEL (insn) != NULL
4314 && ((table = next_real_insn (JUMP_LABEL (insn)))
4315 == next_real_insn (insn))
4316 && table != NULL
4317 && GET_CODE (table) == JUMP_INSN
4318 && (GET_CODE (PATTERN (table)) == ADDR_VEC
4319 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2b835d68 4320 {
949d79eb 4321 int elt = GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC ? 1 : 0;
2b835d68 4322
949d79eb
RE
4323 address += GET_MODE_SIZE (SImode) * XVECLEN (PATTERN (table),
4324 elt);
4325 insn = table;
4326 }
4327 }
4328 }
332072db 4329
949d79eb
RE
4330 /* Now scan the fixups and perform the required changes. */
4331 for (fix = minipool_fix_head; fix; fix = fix->next)
4332 {
4333 struct minipool_fixup *ftmp;
4334 struct minipool_fixup *last_barrier = NULL;
4335 int max_range;
4336 rtx barrier;
4337 struct minipool_fixup *this_fix;
4338 int new_minipool_size = 0;
4339
4340 /* Skip any further barriers before the next fix. */
4341 while (fix && GET_CODE (fix->insn) == BARRIER)
4342 fix = fix->next;
4343
4344 if (fix == NULL)
4345 break;
332072db 4346
949d79eb
RE
4347 ftmp = fix;
4348 max_range = fix->address + fix->range;
2b835d68 4349
949d79eb
RE
4350 /* Find all the other fixes that can live in the same pool. */
4351 while (ftmp->next && ftmp->next->address < max_range
4352 && (GET_CODE (ftmp->next->insn) == BARRIER
4353 /* Ensure we can reach the constant inside the pool. */
4354 || ftmp->next->range > new_minipool_size))
4355 {
4356 ftmp = ftmp->next;
4357 if (GET_CODE (ftmp->insn) == BARRIER)
4358 last_barrier = ftmp;
4359 else
4360 {
4361 /* Does this fix constrain the range we can search? */
4362 if (ftmp->address + ftmp->range - new_minipool_size < max_range)
4363 max_range = ftmp->address + ftmp->range - new_minipool_size;
2b835d68 4364
949d79eb 4365 new_minipool_size += GET_MODE_SIZE (ftmp->mode);
2b835d68 4366 }
2b835d68 4367 }
949d79eb
RE
4368
4369 /* If we found a barrier, drop back to that; any fixes that we could
4370 have reached but come after the barrier will now go in the next
4371 mini-pool. */
4372 if (last_barrier != NULL)
4373 {
4374 barrier = last_barrier->insn;
4375 ftmp = last_barrier;
4376 }
4377 else
4378 /* ftmp is last fix that we can fit into this pool and we
4379 failed to find a barrier that we could use. Insert a new
4380 barrier in the code and arrange to jump around it. */
4381 barrier = find_barrier (ftmp->insn, max_range - ftmp->address);
4382
4383 /* Scan over the fixes we have identified for this pool, fixing them
4384 up and adding the constants to the pool itself. */
4385 for (this_fix = fix; this_fix && ftmp->next != this_fix;
4386 this_fix = this_fix->next)
4387 if (GET_CODE (this_fix->insn) != BARRIER)
4388 {
4389 int offset = add_minipool_constant (this_fix->value,
4390 this_fix->mode);
4391 rtx addr
4392 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
4393 minipool_vector_label),
4394 offset);
4395 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
4396 }
4397
4398 dump_minipool (barrier);
4399 fix = ftmp;
2b835d68 4400 }
4b632bf1 4401
949d79eb
RE
4402 /* From now on we must synthesize any constants that we can't handle
4403 directly. This can happen if the RTL gets split during final
4404 instruction generation. */
4b632bf1 4405 after_arm_reorg = 1;
2b835d68
RE
4406}
4407
cce8749e
CH
4408\f
4409/* Routines to output assembly language. */
4410
f3bb6135 4411/* If the rtx is the correct value then return the string of the number.
ff9940b0
RE
4412 In this way we can ensure that valid double constants are generated even
4413 when cross compiling. */
4414char *
4415fp_immediate_constant (x)
b5cc037f 4416 rtx x;
ff9940b0
RE
4417{
4418 REAL_VALUE_TYPE r;
4419 int i;
4420
4421 if (!fpa_consts_inited)
4422 init_fpa_table ();
4423
4424 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4425 for (i = 0; i < 8; i++)
4426 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
4427 return strings_fpa[i];
f3bb6135 4428
ff9940b0
RE
4429 abort ();
4430}
4431
9997d19d
RE
4432/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
4433static char *
4434fp_const_from_val (r)
62b10bbc 4435 REAL_VALUE_TYPE * r;
9997d19d
RE
4436{
4437 int i;
4438
4439 if (! fpa_consts_inited)
4440 init_fpa_table ();
4441
4442 for (i = 0; i < 8; i++)
4443 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
4444 return strings_fpa[i];
4445
4446 abort ();
4447}
ff9940b0 4448
cce8749e
CH
4449/* Output the operands of a LDM/STM instruction to STREAM.
4450 MASK is the ARM register set mask of which only bits 0-15 are important.
4451 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
4452 must follow the register list. */
4453
4454void
dd18ae56 4455print_multi_reg (stream, instr, reg, mask, hat)
62b10bbc
NC
4456 FILE * stream;
4457 char * instr;
dd18ae56
NC
4458 int reg;
4459 int mask;
4460 int hat;
cce8749e
CH
4461{
4462 int i;
4463 int not_first = FALSE;
4464
1d5473cb 4465 fputc ('\t', stream);
dd18ae56 4466 asm_fprintf (stream, instr, reg);
1d5473cb 4467 fputs (", {", stream);
62b10bbc 4468
cce8749e
CH
4469 for (i = 0; i < 16; i++)
4470 if (mask & (1 << i))
4471 {
4472 if (not_first)
4473 fprintf (stream, ", ");
62b10bbc 4474
dd18ae56 4475 asm_fprintf (stream, "%r", i);
cce8749e
CH
4476 not_first = TRUE;
4477 }
f3bb6135 4478
cce8749e 4479 fprintf (stream, "}%s\n", hat ? "^" : "");
f3bb6135 4480}
cce8749e
CH
4481
4482/* Output a 'call' insn. */
4483
4484char *
4485output_call (operands)
62b10bbc 4486 rtx * operands;
cce8749e 4487{
cce8749e
CH
4488 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
4489
62b10bbc 4490 if (REGNO (operands[0]) == LR_REGNUM)
cce8749e 4491 {
62b10bbc 4492 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
1d5473cb 4493 output_asm_insn ("mov%?\t%0, %|lr", operands);
cce8749e 4494 }
62b10bbc 4495
1d5473cb 4496 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
da6558fd 4497
6cfc7210 4498 if (TARGET_INTERWORK)
da6558fd
NC
4499 output_asm_insn ("bx%?\t%0", operands);
4500 else
4501 output_asm_insn ("mov%?\t%|pc, %0", operands);
4502
f3bb6135
RE
4503 return "";
4504}
cce8749e 4505
ff9940b0
RE
4506static int
4507eliminate_lr2ip (x)
62b10bbc 4508 rtx * x;
ff9940b0
RE
4509{
4510 int something_changed = 0;
62b10bbc 4511 rtx x0 = * x;
ff9940b0
RE
4512 int code = GET_CODE (x0);
4513 register int i, j;
62b10bbc 4514 register char * fmt;
ff9940b0
RE
4515
4516 switch (code)
4517 {
4518 case REG:
62b10bbc 4519 if (REGNO (x0) == LR_REGNUM)
ff9940b0 4520 {
62b10bbc 4521 *x = gen_rtx_REG (SImode, IP_REGNUM);
ff9940b0
RE
4522 return 1;
4523 }
4524 return 0;
4525 default:
4526 /* Scan through the sub-elements and change any references there */
4527 fmt = GET_RTX_FORMAT (code);
62b10bbc 4528
ff9940b0
RE
4529 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4530 if (fmt[i] == 'e')
4531 something_changed |= eliminate_lr2ip (&XEXP (x0, i));
4532 else if (fmt[i] == 'E')
4533 for (j = 0; j < XVECLEN (x0, i); j++)
4534 something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
62b10bbc 4535
ff9940b0
RE
4536 return something_changed;
4537 }
4538}
4539
4540/* Output a 'call' insn that is a reference in memory. */
4541
4542char *
4543output_call_mem (operands)
62b10bbc 4544 rtx * operands;
ff9940b0
RE
4545{
4546 operands[0] = copy_rtx (operands[0]); /* Be ultra careful */
4547 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
4548 */
4549 if (eliminate_lr2ip (&operands[0]))
1d5473cb 4550 output_asm_insn ("mov%?\t%|ip, %|lr", operands);
f3bb6135 4551
6cfc7210 4552 if (TARGET_INTERWORK)
da6558fd
NC
4553 {
4554 output_asm_insn ("ldr%?\t%|ip, %0", operands);
4555 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
4556 output_asm_insn ("bx%?\t%|ip", operands);
4557 }
4558 else
4559 {
4560 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
4561 output_asm_insn ("ldr%?\t%|pc, %0", operands);
4562 }
4563
f3bb6135
RE
4564 return "";
4565}
ff9940b0
RE
4566
4567
4568/* Output a move from arm registers to an fpu registers.
4569 OPERANDS[0] is an fpu register.
4570 OPERANDS[1] is the first registers of an arm register pair. */
4571
4572char *
4573output_mov_long_double_fpu_from_arm (operands)
62b10bbc 4574 rtx * operands;
ff9940b0
RE
4575{
4576 int arm_reg0 = REGNO (operands[1]);
4577 rtx ops[3];
4578
62b10bbc
NC
4579 if (arm_reg0 == IP_REGNUM)
4580 abort ();
f3bb6135 4581
43cffd11
RE
4582 ops[0] = gen_rtx_REG (SImode, arm_reg0);
4583 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
4584 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
ff9940b0 4585
1d5473cb
RE
4586 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
4587 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
62b10bbc 4588
f3bb6135
RE
4589 return "";
4590}
ff9940b0
RE
4591
4592/* Output a move from an fpu register to arm registers.
4593 OPERANDS[0] is the first registers of an arm register pair.
4594 OPERANDS[1] is an fpu register. */
4595
4596char *
4597output_mov_long_double_arm_from_fpu (operands)
62b10bbc 4598 rtx * operands;
ff9940b0
RE
4599{
4600 int arm_reg0 = REGNO (operands[0]);
4601 rtx ops[3];
4602
62b10bbc
NC
4603 if (arm_reg0 == IP_REGNUM)
4604 abort ();
f3bb6135 4605
43cffd11
RE
4606 ops[0] = gen_rtx_REG (SImode, arm_reg0);
4607 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
4608 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
ff9940b0 4609
1d5473cb
RE
4610 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
4611 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
f3bb6135
RE
4612 return "";
4613}
ff9940b0
RE
4614
4615/* Output a move from arm registers to arm registers of a long double
4616 OPERANDS[0] is the destination.
4617 OPERANDS[1] is the source. */
4618char *
4619output_mov_long_double_arm_from_arm (operands)
62b10bbc 4620 rtx * operands;
ff9940b0
RE
4621{
4622 /* We have to be careful here because the two might overlap */
4623 int dest_start = REGNO (operands[0]);
4624 int src_start = REGNO (operands[1]);
4625 rtx ops[2];
4626 int i;
4627
4628 if (dest_start < src_start)
4629 {
4630 for (i = 0; i < 3; i++)
4631 {
43cffd11
RE
4632 ops[0] = gen_rtx_REG (SImode, dest_start + i);
4633 ops[1] = gen_rtx_REG (SImode, src_start + i);
9997d19d 4634 output_asm_insn ("mov%?\t%0, %1", ops);
ff9940b0
RE
4635 }
4636 }
4637 else
4638 {
4639 for (i = 2; i >= 0; i--)
4640 {
43cffd11
RE
4641 ops[0] = gen_rtx_REG (SImode, dest_start + i);
4642 ops[1] = gen_rtx_REG (SImode, src_start + i);
9997d19d 4643 output_asm_insn ("mov%?\t%0, %1", ops);
ff9940b0
RE
4644 }
4645 }
f3bb6135 4646
ff9940b0
RE
4647 return "";
4648}
4649
4650
cce8749e
CH
4651/* Output a move from arm registers to an fpu registers.
4652 OPERANDS[0] is an fpu register.
4653 OPERANDS[1] is the first registers of an arm register pair. */
4654
4655char *
4656output_mov_double_fpu_from_arm (operands)
62b10bbc 4657 rtx * operands;
cce8749e
CH
4658{
4659 int arm_reg0 = REGNO (operands[1]);
4660 rtx ops[2];
4661
62b10bbc
NC
4662 if (arm_reg0 == IP_REGNUM)
4663 abort ();
4664
43cffd11
RE
4665 ops[0] = gen_rtx_REG (SImode, arm_reg0);
4666 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
1d5473cb
RE
4667 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
4668 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
f3bb6135
RE
4669 return "";
4670}
cce8749e
CH
4671
4672/* Output a move from an fpu register to arm registers.
4673 OPERANDS[0] is the first registers of an arm register pair.
4674 OPERANDS[1] is an fpu register. */
4675
4676char *
4677output_mov_double_arm_from_fpu (operands)
62b10bbc 4678 rtx * operands;
cce8749e
CH
4679{
4680 int arm_reg0 = REGNO (operands[0]);
4681 rtx ops[2];
4682
62b10bbc
NC
4683 if (arm_reg0 == IP_REGNUM)
4684 abort ();
f3bb6135 4685
43cffd11
RE
4686 ops[0] = gen_rtx_REG (SImode, arm_reg0);
4687 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
1d5473cb
RE
4688 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
4689 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
f3bb6135
RE
4690 return "";
4691}
cce8749e
CH
4692
4693/* Output a move between double words.
4694 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
4695 or MEM<-REG and all MEMs must be offsettable addresses. */
4696
4697char *
4698output_move_double (operands)
aec3cfba 4699 rtx * operands;
cce8749e
CH
4700{
4701 enum rtx_code code0 = GET_CODE (operands[0]);
4702 enum rtx_code code1 = GET_CODE (operands[1]);
56636818 4703 rtx otherops[3];
cce8749e
CH
4704
4705 if (code0 == REG)
4706 {
4707 int reg0 = REGNO (operands[0]);
4708
43cffd11 4709 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
aec3cfba 4710
cce8749e
CH
4711 if (code1 == REG)
4712 {
4713 int reg1 = REGNO (operands[1]);
62b10bbc
NC
4714 if (reg1 == IP_REGNUM)
4715 abort ();
f3bb6135 4716
cce8749e 4717 /* Ensure the second source is not overwritten */
c1c2bc04 4718 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
6cfc7210 4719 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
cce8749e 4720 else
6cfc7210 4721 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
cce8749e
CH
4722 }
4723 else if (code1 == CONST_DOUBLE)
4724 {
226a5051
RE
4725 if (GET_MODE (operands[1]) == DFmode)
4726 {
4727 long l[2];
4728 union real_extract u;
4729
4730 bcopy ((char *) &CONST_DOUBLE_LOW (operands[1]), (char *) &u,
4731 sizeof (u));
4732 REAL_VALUE_TO_TARGET_DOUBLE (u.d, l);
4733 otherops[1] = GEN_INT(l[1]);
4734 operands[1] = GEN_INT(l[0]);
4735 }
c1c2bc04
RE
4736 else if (GET_MODE (operands[1]) != VOIDmode)
4737 abort ();
4738 else if (WORDS_BIG_ENDIAN)
4739 {
4740
4741 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
4742 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
4743 }
226a5051
RE
4744 else
4745 {
c1c2bc04 4746
226a5051
RE
4747 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
4748 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
4749 }
6cfc7210 4750
c1c2bc04
RE
4751 output_mov_immediate (operands);
4752 output_mov_immediate (otherops);
cce8749e
CH
4753 }
4754 else if (code1 == CONST_INT)
4755 {
56636818
JL
4756#if HOST_BITS_PER_WIDE_INT > 32
4757 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
4758 what the upper word is. */
4759 if (WORDS_BIG_ENDIAN)
4760 {
4761 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
4762 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
4763 }
4764 else
4765 {
4766 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
4767 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
4768 }
4769#else
4770 /* Sign extend the intval into the high-order word */
c1c2bc04
RE
4771 if (WORDS_BIG_ENDIAN)
4772 {
4773 otherops[1] = operands[1];
4774 operands[1] = (INTVAL (operands[1]) < 0
4775 ? constm1_rtx : const0_rtx);
4776 }
ff9940b0 4777 else
c1c2bc04 4778 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
56636818 4779#endif
c1c2bc04
RE
4780 output_mov_immediate (otherops);
4781 output_mov_immediate (operands);
cce8749e
CH
4782 }
4783 else if (code1 == MEM)
4784 {
ff9940b0 4785 switch (GET_CODE (XEXP (operands[1], 0)))
cce8749e 4786 {
ff9940b0 4787 case REG:
9997d19d 4788 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
ff9940b0 4789 break;
2b835d68 4790
ff9940b0 4791 case PRE_INC:
2b835d68 4792 abort (); /* Should never happen now */
ff9940b0 4793 break;
2b835d68 4794
ff9940b0 4795 case PRE_DEC:
2b835d68 4796 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
ff9940b0 4797 break;
2b835d68 4798
ff9940b0 4799 case POST_INC:
9997d19d 4800 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
ff9940b0 4801 break;
2b835d68 4802
ff9940b0 4803 case POST_DEC:
2b835d68 4804 abort (); /* Should never happen now */
ff9940b0 4805 break;
2b835d68
RE
4806
4807 case LABEL_REF:
4808 case CONST:
4809 output_asm_insn ("adr%?\t%0, %1", operands);
4810 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
4811 break;
4812
ff9940b0 4813 default:
aec3cfba
NC
4814 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
4815 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
cce8749e 4816 {
2b835d68
RE
4817 otherops[0] = operands[0];
4818 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
4819 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
4820 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
4821 {
4822 if (GET_CODE (otherops[2]) == CONST_INT)
4823 {
4824 switch (INTVAL (otherops[2]))
4825 {
4826 case -8:
4827 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
4828 return "";
4829 case -4:
4830 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
4831 return "";
4832 case 4:
4833 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
4834 return "";
4835 }
4836 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
4837 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
4838 else
4839 output_asm_insn ("add%?\t%0, %1, %2", otherops);
4840 }
4841 else
4842 output_asm_insn ("add%?\t%0, %1, %2", otherops);
4843 }
4844 else
4845 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
6cfc7210 4846
2b835d68
RE
4847 return "ldm%?ia\t%0, %M0";
4848 }
4849 else
4850 {
4851 otherops[1] = adj_offsettable_operand (operands[1], 4);
4852 /* Take care of overlapping base/data reg. */
4853 if (reg_mentioned_p (operands[0], operands[1]))
4854 {
4855 output_asm_insn ("ldr%?\t%0, %1", otherops);
4856 output_asm_insn ("ldr%?\t%0, %1", operands);
4857 }
4858 else
4859 {
4860 output_asm_insn ("ldr%?\t%0, %1", operands);
4861 output_asm_insn ("ldr%?\t%0, %1", otherops);
4862 }
cce8749e
CH
4863 }
4864 }
4865 }
2b835d68 4866 else
62b10bbc 4867 abort (); /* Constraints should prevent this */
cce8749e
CH
4868 }
4869 else if (code0 == MEM && code1 == REG)
4870 {
62b10bbc
NC
4871 if (REGNO (operands[1]) == IP_REGNUM)
4872 abort ();
2b835d68 4873
ff9940b0
RE
4874 switch (GET_CODE (XEXP (operands[0], 0)))
4875 {
4876 case REG:
9997d19d 4877 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
ff9940b0 4878 break;
2b835d68 4879
ff9940b0 4880 case PRE_INC:
2b835d68 4881 abort (); /* Should never happen now */
ff9940b0 4882 break;
2b835d68 4883
ff9940b0 4884 case PRE_DEC:
2b835d68 4885 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
ff9940b0 4886 break;
2b835d68 4887
ff9940b0 4888 case POST_INC:
9997d19d 4889 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
ff9940b0 4890 break;
2b835d68 4891
ff9940b0 4892 case POST_DEC:
2b835d68 4893 abort (); /* Should never happen now */
ff9940b0 4894 break;
2b835d68
RE
4895
4896 case PLUS:
4897 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
4898 {
4899 switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
4900 {
4901 case -8:
4902 output_asm_insn ("stm%?db\t%m0, %M1", operands);
4903 return "";
4904
4905 case -4:
4906 output_asm_insn ("stm%?da\t%m0, %M1", operands);
4907 return "";
4908
4909 case 4:
4910 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
4911 return "";
4912 }
4913 }
4914 /* Fall through */
4915
ff9940b0 4916 default:
cce8749e 4917 otherops[0] = adj_offsettable_operand (operands[0], 4);
43cffd11 4918 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9997d19d
RE
4919 output_asm_insn ("str%?\t%1, %0", operands);
4920 output_asm_insn ("str%?\t%1, %0", otherops);
cce8749e
CH
4921 }
4922 }
2b835d68 4923 else
62b10bbc 4924 abort (); /* Constraints should prevent this */
cce8749e 4925
9997d19d
RE
4926 return "";
4927}
cce8749e
CH
4928
4929
4930/* Output an arbitrary MOV reg, #n.
4931 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4932
4933char *
4934output_mov_immediate (operands)
62b10bbc 4935 rtx * operands;
cce8749e 4936{
f3bb6135 4937 HOST_WIDE_INT n = INTVAL (operands[1]);
cce8749e
CH
4938 int n_ones = 0;
4939 int i;
4940
4941 /* Try to use one MOV */
cce8749e 4942 if (const_ok_for_arm (n))
f3bb6135 4943 {
9997d19d 4944 output_asm_insn ("mov%?\t%0, %1", operands);
f3bb6135
RE
4945 return "";
4946 }
cce8749e
CH
4947
4948 /* Try to use one MVN */
f3bb6135 4949 if (const_ok_for_arm (~n))
cce8749e 4950 {
f3bb6135 4951 operands[1] = GEN_INT (~n);
9997d19d 4952 output_asm_insn ("mvn%?\t%0, %1", operands);
f3bb6135 4953 return "";
cce8749e
CH
4954 }
4955
4956 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4957
4958 for (i=0; i < 32; i++)
4959 if (n & 1 << i)
4960 n_ones++;
4961
4962 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
9997d19d
RE
4963 output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4964 ~n);
cce8749e 4965 else
9997d19d
RE
4966 output_multi_immediate(operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4967 n);
f3bb6135
RE
4968
4969 return "";
4970}
cce8749e
CH
4971
4972
4973/* Output an ADD r, s, #n where n may be too big for one instruction. If
4974 adding zero to one register, output nothing. */
4975
4976char *
4977output_add_immediate (operands)
62b10bbc 4978 rtx * operands;
cce8749e 4979{
f3bb6135 4980 HOST_WIDE_INT n = INTVAL (operands[2]);
cce8749e
CH
4981
4982 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
4983 {
4984 if (n < 0)
4985 output_multi_immediate (operands,
9997d19d
RE
4986 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4987 -n);
cce8749e
CH
4988 else
4989 output_multi_immediate (operands,
9997d19d
RE
4990 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4991 n);
cce8749e 4992 }
f3bb6135
RE
4993
4994 return "";
4995}
cce8749e 4996
cce8749e
CH
4997/* Output a multiple immediate operation.
4998 OPERANDS is the vector of operands referred to in the output patterns.
4999 INSTR1 is the output pattern to use for the first constant.
5000 INSTR2 is the output pattern to use for subsequent constants.
5001 IMMED_OP is the index of the constant slot in OPERANDS.
5002 N is the constant value. */
5003
18af7313 5004static char *
cce8749e 5005output_multi_immediate (operands, instr1, instr2, immed_op, n)
62b10bbc
NC
5006 rtx * operands;
5007 char * instr1, * instr2;
f3bb6135
RE
5008 int immed_op;
5009 HOST_WIDE_INT n;
cce8749e 5010{
f3bb6135
RE
5011#if HOST_BITS_PER_WIDE_INT > 32
5012 n &= 0xffffffff;
5013#endif
5014
cce8749e
CH
5015 if (n == 0)
5016 {
5017 operands[immed_op] = const0_rtx;
f3bb6135 5018 output_asm_insn (instr1, operands); /* Quick and easy output */
cce8749e
CH
5019 }
5020 else
5021 {
5022 int i;
5023 char *instr = instr1;
5024
5025 /* Note that n is never zero here (which would give no output) */
cce8749e
CH
5026 for (i = 0; i < 32; i += 2)
5027 {
5028 if (n & (3 << i))
5029 {
f3bb6135
RE
5030 operands[immed_op] = GEN_INT (n & (255 << i));
5031 output_asm_insn (instr, operands);
cce8749e
CH
5032 instr = instr2;
5033 i += 6;
5034 }
5035 }
5036 }
f3bb6135 5037 return "";
9997d19d 5038}
cce8749e
CH
5039
5040
5041/* Return the appropriate ARM instruction for the operation code.
5042 The returned result should not be overwritten. OP is the rtx of the
5043 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
5044 was shifted. */
5045
5046char *
5047arithmetic_instr (op, shift_first_arg)
5048 rtx op;
f3bb6135 5049 int shift_first_arg;
cce8749e 5050{
9997d19d 5051 switch (GET_CODE (op))
cce8749e
CH
5052 {
5053 case PLUS:
f3bb6135
RE
5054 return "add";
5055
cce8749e 5056 case MINUS:
f3bb6135
RE
5057 return shift_first_arg ? "rsb" : "sub";
5058
cce8749e 5059 case IOR:
f3bb6135
RE
5060 return "orr";
5061
cce8749e 5062 case XOR:
f3bb6135
RE
5063 return "eor";
5064
cce8749e 5065 case AND:
f3bb6135
RE
5066 return "and";
5067
cce8749e 5068 default:
f3bb6135 5069 abort ();
cce8749e 5070 }
f3bb6135 5071}
cce8749e
CH
5072
5073
5074/* Ensure valid constant shifts and return the appropriate shift mnemonic
5075 for the operation code. The returned result should not be overwritten.
5076 OP is the rtx code of the shift.
9997d19d
RE
5077 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
5078 shift. */
cce8749e 5079
9997d19d
RE
5080static char *
5081shift_op (op, amountp)
5082 rtx op;
5083 HOST_WIDE_INT *amountp;
cce8749e 5084{
62b10bbc 5085 char * mnem;
e2c671ba 5086 enum rtx_code code = GET_CODE (op);
cce8749e 5087
9997d19d
RE
5088 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
5089 *amountp = -1;
5090 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
5091 *amountp = INTVAL (XEXP (op, 1));
5092 else
5093 abort ();
5094
e2c671ba 5095 switch (code)
cce8749e
CH
5096 {
5097 case ASHIFT:
5098 mnem = "asl";
5099 break;
f3bb6135 5100
cce8749e
CH
5101 case ASHIFTRT:
5102 mnem = "asr";
cce8749e 5103 break;
f3bb6135 5104
cce8749e
CH
5105 case LSHIFTRT:
5106 mnem = "lsr";
cce8749e 5107 break;
f3bb6135 5108
9997d19d
RE
5109 case ROTATERT:
5110 mnem = "ror";
9997d19d
RE
5111 break;
5112
ff9940b0 5113 case MULT:
e2c671ba
RE
5114 /* We never have to worry about the amount being other than a
5115 power of 2, since this case can never be reloaded from a reg. */
9997d19d
RE
5116 if (*amountp != -1)
5117 *amountp = int_log2 (*amountp);
5118 else
5119 abort ();
f3bb6135
RE
5120 return "asl";
5121
cce8749e 5122 default:
f3bb6135 5123 abort ();
cce8749e
CH
5124 }
5125
e2c671ba
RE
5126 if (*amountp != -1)
5127 {
5128 /* This is not 100% correct, but follows from the desire to merge
5129 multiplication by a power of 2 with the recognizer for a
5130 shift. >=32 is not a valid shift for "asl", so we must try and
5131 output a shift that produces the correct arithmetical result.
ddd5a7c1 5132 Using lsr #32 is identical except for the fact that the carry bit
e2c671ba
RE
5133 is not set correctly if we set the flags; but we never use the
5134 carry bit from such an operation, so we can ignore that. */
5135 if (code == ROTATERT)
5136 *amountp &= 31; /* Rotate is just modulo 32 */
5137 else if (*amountp != (*amountp & 31))
5138 {
5139 if (code == ASHIFT)
5140 mnem = "lsr";
5141 *amountp = 32;
5142 }
5143
5144 /* Shifts of 0 are no-ops. */
5145 if (*amountp == 0)
5146 return NULL;
5147 }
5148
9997d19d
RE
5149 return mnem;
5150}
cce8749e
CH
5151
5152
5153/* Obtain the shift from the POWER of two. */
5154
18af7313 5155static HOST_WIDE_INT
cce8749e 5156int_log2 (power)
f3bb6135 5157 HOST_WIDE_INT power;
cce8749e 5158{
f3bb6135 5159 HOST_WIDE_INT shift = 0;
cce8749e 5160
2b835d68 5161 while (((((HOST_WIDE_INT) 1) << shift) & power) == 0)
cce8749e
CH
5162 {
5163 if (shift > 31)
f3bb6135 5164 abort ();
cce8749e
CH
5165 shift++;
5166 }
f3bb6135
RE
5167
5168 return shift;
5169}
cce8749e 5170
cce8749e
CH
5171/* Output a .ascii pseudo-op, keeping track of lengths. This is because
5172 /bin/as is horribly restrictive. */
6cfc7210 5173#define MAX_ASCII_LEN 51
cce8749e
CH
5174
5175void
5176output_ascii_pseudo_op (stream, p, len)
62b10bbc
NC
5177 FILE * stream;
5178 unsigned char * p;
cce8749e
CH
5179 int len;
5180{
5181 int i;
6cfc7210 5182 int len_so_far = 0;
cce8749e 5183
6cfc7210
NC
5184 fputs ("\t.ascii\t\"", stream);
5185
cce8749e
CH
5186 for (i = 0; i < len; i++)
5187 {
5188 register int c = p[i];
5189
6cfc7210 5190 if (len_so_far >= MAX_ASCII_LEN)
cce8749e 5191 {
6cfc7210 5192 fputs ("\"\n\t.ascii\t\"", stream);
cce8749e 5193 len_so_far = 0;
cce8749e
CH
5194 }
5195
6cfc7210 5196 switch (c)
cce8749e 5197 {
6cfc7210
NC
5198 case TARGET_TAB:
5199 fputs ("\\t", stream);
5200 len_so_far += 2;
5201 break;
5202
5203 case TARGET_FF:
5204 fputs ("\\f", stream);
5205 len_so_far += 2;
5206 break;
5207
5208 case TARGET_BS:
5209 fputs ("\\b", stream);
5210 len_so_far += 2;
5211 break;
5212
5213 case TARGET_CR:
5214 fputs ("\\r", stream);
5215 len_so_far += 2;
5216 break;
5217
5218 case TARGET_NEWLINE:
5219 fputs ("\\n", stream);
5220 c = p [i + 1];
5221 if ((c >= ' ' && c <= '~')
5222 || c == TARGET_TAB)
5223 /* This is a good place for a line break. */
5224 len_so_far = MAX_ASCII_LEN;
5225 else
5226 len_so_far += 2;
5227 break;
5228
5229 case '\"':
5230 case '\\':
5231 putc ('\\', stream);
5232 len_so_far ++;
5233 /* drop through. */
f3bb6135 5234
6cfc7210
NC
5235 default:
5236 if (c >= ' ' && c <= '~')
5237 {
5238 putc (c, stream);
5239 len_so_far ++;
5240 }
5241 else
5242 {
5243 fprintf (stream, "\\%03o", c);
5244 len_so_far += 4;
5245 }
5246 break;
cce8749e 5247 }
cce8749e 5248 }
f3bb6135 5249
cce8749e 5250 fputs ("\"\n", stream);
f3bb6135 5251}
cce8749e 5252\f
ff9940b0
RE
5253
5254/* Try to determine whether a pattern really clobbers the link register.
5255 This information is useful when peepholing, so that lr need not be pushed
0e84b556
RK
5256 if we combine a call followed by a return.
5257 NOTE: This code does not check for side-effect expressions in a SET_SRC:
5258 such a check should not be needed because these only update an existing
5259 value within a register; the register must still be set elsewhere within
5260 the function. */
ff9940b0
RE
5261
5262static int
5263pattern_really_clobbers_lr (x)
f3bb6135 5264 rtx x;
ff9940b0
RE
5265{
5266 int i;
5267
5268 switch (GET_CODE (x))
5269 {
5270 case SET:
5271 switch (GET_CODE (SET_DEST (x)))
5272 {
5273 case REG:
62b10bbc 5274 return REGNO (SET_DEST (x)) == LR_REGNUM;
f3bb6135 5275
ff9940b0
RE
5276 case SUBREG:
5277 if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG)
62b10bbc 5278 return REGNO (XEXP (SET_DEST (x), 0)) == LR_REGNUM;
f3bb6135 5279
0e84b556
RK
5280 if (GET_CODE (XEXP (SET_DEST (x), 0)) == MEM)
5281 return 0;
ff9940b0 5282 abort ();
f3bb6135 5283
ff9940b0
RE
5284 default:
5285 return 0;
5286 }
f3bb6135 5287
ff9940b0
RE
5288 case PARALLEL:
5289 for (i = 0; i < XVECLEN (x, 0); i++)
5290 if (pattern_really_clobbers_lr (XVECEXP (x, 0, i)))
5291 return 1;
5292 return 0;
f3bb6135 5293
ff9940b0
RE
5294 case CLOBBER:
5295 switch (GET_CODE (XEXP (x, 0)))
5296 {
5297 case REG:
62b10bbc 5298 return REGNO (XEXP (x, 0)) == LR_REGNUM;
f3bb6135 5299
ff9940b0
RE
5300 case SUBREG:
5301 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG)
62b10bbc 5302 return REGNO (XEXP (XEXP (x, 0), 0)) == LR_REGNUM;
ff9940b0 5303 abort ();
f3bb6135 5304
ff9940b0
RE
5305 default:
5306 return 0;
5307 }
f3bb6135 5308
ff9940b0
RE
5309 case UNSPEC:
5310 return 1;
f3bb6135 5311
ff9940b0
RE
5312 default:
5313 return 0;
5314 }
5315}
5316
5317static int
5318function_really_clobbers_lr (first)
f3bb6135 5319 rtx first;
ff9940b0
RE
5320{
5321 rtx insn, next;
5322
5323 for (insn = first; insn; insn = next_nonnote_insn (insn))
5324 {
5325 switch (GET_CODE (insn))
5326 {
5327 case BARRIER:
5328 case NOTE:
5329 case CODE_LABEL:
5330 case JUMP_INSN: /* Jump insns only change the PC (and conds) */
5331 case INLINE_HEADER:
5332 break;
f3bb6135 5333
ff9940b0
RE
5334 case INSN:
5335 if (pattern_really_clobbers_lr (PATTERN (insn)))
5336 return 1;
5337 break;
f3bb6135 5338
ff9940b0
RE
5339 case CALL_INSN:
5340 /* Don't yet know how to handle those calls that are not to a
5341 SYMBOL_REF */
5342 if (GET_CODE (PATTERN (insn)) != PARALLEL)
5343 abort ();
f3bb6135 5344
ff9940b0
RE
5345 switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)))
5346 {
5347 case CALL:
5348 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0))
5349 != SYMBOL_REF)
5350 return 1;
5351 break;
f3bb6135 5352
ff9940b0
RE
5353 case SET:
5354 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn),
5355 0, 0)), 0), 0))
5356 != SYMBOL_REF)
5357 return 1;
5358 break;
f3bb6135 5359
ff9940b0
RE
5360 default: /* Don't recognize it, be safe */
5361 return 1;
5362 }
f3bb6135 5363
ff9940b0
RE
5364 /* A call can be made (by peepholing) not to clobber lr iff it is
5365 followed by a return. There may, however, be a use insn iff
5366 we are returning the result of the call.
5367 If we run off the end of the insn chain, then that means the
5368 call was at the end of the function. Unfortunately we don't
5369 have a return insn for the peephole to recognize, so we
5370 must reject this. (Can this be fixed by adding our own insn?) */
5371 if ((next = next_nonnote_insn (insn)) == NULL)
5372 return 1;
f3bb6135 5373
32de079a
RE
5374 /* No need to worry about lr if the call never returns */
5375 if (GET_CODE (next) == BARRIER)
5376 break;
5377
ff9940b0
RE
5378 if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE
5379 && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
5380 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0)))
5381 == REGNO (XEXP (PATTERN (next), 0))))
5382 if ((next = next_nonnote_insn (next)) == NULL)
5383 return 1;
f3bb6135 5384
ff9940b0
RE
5385 if (GET_CODE (next) == JUMP_INSN
5386 && GET_CODE (PATTERN (next)) == RETURN)
5387 break;
5388 return 1;
f3bb6135 5389
ff9940b0
RE
5390 default:
5391 abort ();
5392 }
5393 }
f3bb6135 5394
ff9940b0
RE
5395 /* We have reached the end of the chain so lr was _not_ clobbered */
5396 return 0;
5397}
5398
5399char *
84ed5e79 5400output_return_instruction (operand, really_return, reverse)
f3bb6135
RE
5401 rtx operand;
5402 int really_return;
84ed5e79 5403 int reverse;
ff9940b0
RE
5404{
5405 char instr[100];
5406 int reg, live_regs = 0;
e2c671ba
RE
5407 int volatile_func = (optimize > 0
5408 && TREE_THIS_VOLATILE (current_function_decl));
5409
5410 return_used_this_function = 1;
ff9940b0 5411
62b10bbc 5412 if (TARGET_ABORT_NORETURN && volatile_func)
e2c671ba
RE
5413 {
5414 rtx ops[2];
5415 /* If this function was declared non-returning, and we have found a tail
5416 call, then we have to trust that the called function won't return. */
5417 if (! really_return)
5418 return "";
5419
5420 /* Otherwise, trap an attempted return by aborting. */
5421 ops[0] = operand;
ed0e6530 5422 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
86efdc8e 5423 : "abort");
2b835d68 5424 assemble_external_libcall (ops[1]);
84ed5e79 5425 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
e2c671ba
RE
5426 return "";
5427 }
5428
f3bb6135 5429 if (current_function_calls_alloca && ! really_return)
62b10bbc 5430 abort ();
ff9940b0 5431
f3bb6135
RE
5432 for (reg = 0; reg <= 10; reg++)
5433 if (regs_ever_live[reg] && ! call_used_regs[reg])
ff9940b0
RE
5434 live_regs++;
5435
ed0e6530
PB
5436 if (flag_pic && ! TARGET_SINGLE_PIC_BASE
5437 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
6ed30148
RE
5438 live_regs++;
5439
62b10bbc 5440 if (live_regs || (regs_ever_live[LR_REGNUM] && ! lr_save_eliminated))
ff9940b0
RE
5441 live_regs++;
5442
5443 if (frame_pointer_needed)
5444 live_regs += 4;
5445
5446 if (live_regs)
5447 {
62b10bbc 5448 if (lr_save_eliminated || ! regs_ever_live[LR_REGNUM])
ff9940b0 5449 live_regs++;
f3bb6135 5450
ff9940b0 5451 if (frame_pointer_needed)
84ed5e79
RE
5452 strcpy (instr,
5453 reverse ? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
ff9940b0 5454 else
84ed5e79
RE
5455 strcpy (instr,
5456 reverse ? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
f3bb6135
RE
5457
5458 for (reg = 0; reg <= 10; reg++)
62b10bbc 5459 if (regs_ever_live[reg]
6ed30148 5460 && (! call_used_regs[reg]
ed0e6530
PB
5461 || (flag_pic && ! TARGET_SINGLE_PIC_BASE
5462 && reg == PIC_OFFSET_TABLE_REGNUM)))
ff9940b0 5463 {
1d5473cb 5464 strcat (instr, "%|");
ff9940b0
RE
5465 strcat (instr, reg_names[reg]);
5466 if (--live_regs)
5467 strcat (instr, ", ");
5468 }
f3bb6135 5469
ff9940b0
RE
5470 if (frame_pointer_needed)
5471 {
1d5473cb 5472 strcat (instr, "%|");
ff9940b0
RE
5473 strcat (instr, reg_names[11]);
5474 strcat (instr, ", ");
1d5473cb 5475 strcat (instr, "%|");
ff9940b0
RE
5476 strcat (instr, reg_names[13]);
5477 strcat (instr, ", ");
1d5473cb 5478 strcat (instr, "%|");
6cfc7210 5479 strcat (instr, TARGET_INTERWORK || (! really_return)
62b10bbc 5480 ? reg_names[LR_REGNUM] : reg_names[PC_REGNUM] );
ff9940b0
RE
5481 }
5482 else
1d5473cb
RE
5483 {
5484 strcat (instr, "%|");
6cfc7210 5485 if (TARGET_INTERWORK && really_return)
62b10bbc 5486 strcat (instr, reg_names[IP_REGNUM]);
da6558fd 5487 else
62b10bbc 5488 strcat (instr, really_return ? reg_names[PC_REGNUM] : reg_names[LR_REGNUM]);
1d5473cb 5489 }
2b835d68 5490 strcat (instr, (TARGET_APCS_32 || !really_return) ? "}" : "}^");
f3bb6135 5491 output_asm_insn (instr, &operand);
da6558fd 5492
6cfc7210 5493 if (TARGET_INTERWORK && really_return)
da6558fd
NC
5494 {
5495 strcpy (instr, "bx%?");
5496 strcat (instr, reverse ? "%D0" : "%d0");
5497 strcat (instr, "\t%|");
5498 strcat (instr, frame_pointer_needed ? "lr" : "ip");
5499
5500 output_asm_insn (instr, & operand);
5501 }
ff9940b0
RE
5502 }
5503 else if (really_return)
5504 {
6cfc7210 5505 if (TARGET_INTERWORK)
25b1c156 5506 sprintf (instr, "bx%%?%%%s0\t%%|lr", reverse ? "D" : "d");
b111229a
RE
5507 else
5508 sprintf (instr, "mov%%?%%%s0%s\t%%|pc, %%|lr",
5509 reverse ? "D" : "d", TARGET_APCS_32 ? "" : "s");
da6558fd
NC
5510
5511 output_asm_insn (instr, & operand);
ff9940b0 5512 }
f3bb6135 5513
ff9940b0
RE
5514 return "";
5515}
5516
e82ea128
DE
5517/* Return nonzero if optimizing and the current function is volatile.
5518 Such functions never return, and many memory cycles can be saved
5519 by not storing register values that will never be needed again.
5520 This optimization was added to speed up context switching in a
5521 kernel application. */
a0b2ce4c 5522
e2c671ba
RE
5523int
5524arm_volatile_func ()
5525{
5526 return (optimize > 0 && TREE_THIS_VOLATILE (current_function_decl));
5527}
5528
ef179a26
NC
5529/* Write the function name into the code section, directly preceding
5530 the function prologue.
5531
5532 Code will be output similar to this:
5533 t0
5534 .ascii "arm_poke_function_name", 0
5535 .align
5536 t1
5537 .word 0xff000000 + (t1 - t0)
5538 arm_poke_function_name
5539 mov ip, sp
5540 stmfd sp!, {fp, ip, lr, pc}
5541 sub fp, ip, #4
5542
5543 When performing a stack backtrace, code can inspect the value
5544 of 'pc' stored at 'fp' + 0. If the trace function then looks
5545 at location pc - 12 and the top 8 bits are set, then we know
5546 that there is a function name embedded immediately preceding this
5547 location and has length ((pc[-3]) & 0xff000000).
5548
5549 We assume that pc is declared as a pointer to an unsigned long.
5550
5551 It is of no benefit to output the function name if we are assembling
5552 a leaf function. These function types will not contain a stack
5553 backtrace structure, therefore it is not possible to determine the
5554 function name. */
5555
5556void
5557arm_poke_function_name (stream, name)
5558 FILE * stream;
5559 char * name;
5560{
5561 unsigned long alignlength;
5562 unsigned long length;
5563 rtx x;
5564
949d79eb
RE
5565 length = strlen (name) + 1;
5566 alignlength = (length + 3) & ~3;
ef179a26 5567
949d79eb 5568 ASM_OUTPUT_ASCII (stream, name, length);
ef179a26
NC
5569 ASM_OUTPUT_ALIGN (stream, 2);
5570 x = GEN_INT (0xff000000UL + alignlength);
5571 ASM_OUTPUT_INT (stream, x);
5572}
5573
ff9940b0
RE
5574/* The amount of stack adjustment that happens here, in output_return and in
5575 output_epilogue must be exactly the same as was calculated during reload,
5576 or things will point to the wrong place. The only time we can safely
5577 ignore this constraint is when a function has no arguments on the stack,
5578 no stack frame requirement and no live registers execpt for `lr'. If we
5579 can guarantee that by making all function calls into tail calls and that
5580 lr is not clobbered in any other way, then there is no need to push lr
5581 onto the stack. */
5582
cce8749e 5583void
f3bb6135 5584output_func_prologue (f, frame_size)
6cfc7210 5585 FILE * f;
cce8749e
CH
5586 int frame_size;
5587{
f3bb6135 5588 int reg, live_regs_mask = 0;
e2c671ba
RE
5589 int volatile_func = (optimize > 0
5590 && TREE_THIS_VOLATILE (current_function_decl));
cce8749e 5591
cce8749e
CH
5592 /* Nonzero if we must stuff some register arguments onto the stack as if
5593 they were passed there. */
5594 int store_arg_regs = 0;
5595
abaa26e5
RE
5596 if (arm_ccfsm_state || arm_target_insn)
5597 abort (); /* Sanity check */
31fdb4d5
DE
5598
5599 if (arm_naked_function_p (current_function_decl))
5600 return;
5601
ff9940b0
RE
5602 return_used_this_function = 0;
5603 lr_save_eliminated = 0;
5604
dd18ae56
NC
5605 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %d\n",
5606 current_function_args_size,
5607 current_function_pretend_args_size, frame_size);
5608 asm_fprintf (f, "\t%@ frame_needed = %d, current_function_anonymous_args = %d\n",
5609 frame_pointer_needed,
5610 current_function_anonymous_args);
cce8749e 5611
e2c671ba 5612 if (volatile_func)
dd18ae56 5613 asm_fprintf (f, "\t%@ Volatile function.\n");
e2c671ba 5614
cce8749e
CH
5615 if (current_function_anonymous_args && current_function_pretend_args_size)
5616 store_arg_regs = 1;
5617
f3bb6135
RE
5618 for (reg = 0; reg <= 10; reg++)
5619 if (regs_ever_live[reg] && ! call_used_regs[reg])
cce8749e
CH
5620 live_regs_mask |= (1 << reg);
5621
dd18ae56 5622 if (flag_pic && ! TARGET_SINGLE_PIC_BASE
ed0e6530 5623 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
6ed30148
RE
5624 live_regs_mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
5625
ff9940b0 5626 if (frame_pointer_needed)
e2c671ba 5627 live_regs_mask |= 0xD800;
62b10bbc 5628 else if (regs_ever_live[LR_REGNUM])
ff9940b0
RE
5629 {
5630 if (! current_function_args_size
f3bb6135 5631 && ! function_really_clobbers_lr (get_insns ()))
e2c671ba 5632 lr_save_eliminated = 1;
ff9940b0 5633 else
62b10bbc 5634 live_regs_mask |= 1 << LR_REGNUM;
ff9940b0 5635 }
cce8749e 5636
cce8749e
CH
5637 if (live_regs_mask)
5638 {
ff9940b0
RE
5639 /* if a di mode load/store multiple is used, and the base register
5640 is r3, then r4 can become an ever live register without lr
5641 doing so, in this case we need to push lr as well, or we
5642 will fail to get a proper return. */
5643
62b10bbc 5644 live_regs_mask |= 1 << LR_REGNUM;
ff9940b0 5645 lr_save_eliminated = 0;
f3bb6135 5646
cce8749e
CH
5647 }
5648
e2c671ba 5649 if (lr_save_eliminated)
dd18ae56 5650 asm_fprintf (f,"\t%@ I don't think this function clobbers lr\n");
32de079a
RE
5651
5652#ifdef AOF_ASSEMBLER
5653 if (flag_pic)
dd18ae56 5654 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
32de079a 5655#endif
f3bb6135 5656}
cce8749e 5657
949d79eb
RE
5658char *
5659arm_output_epilogue ()
cce8749e 5660{
949d79eb
RE
5661 int reg;
5662 int live_regs_mask = 0;
5663 /* If we need this, then it will always be at least this much */
b111229a 5664 int floats_offset = 12;
cce8749e 5665 rtx operands[3];
949d79eb
RE
5666 int frame_size = get_frame_size ();
5667 FILE *f = asm_out_file;
e2c671ba
RE
5668 int volatile_func = (optimize > 0
5669 && TREE_THIS_VOLATILE (current_function_decl));
cce8749e 5670
b36ba79f 5671 if (use_return_insn (FALSE) && return_used_this_function)
949d79eb 5672 return "";
cce8749e 5673
31fdb4d5
DE
5674 /* Naked functions don't have epilogues. */
5675 if (arm_naked_function_p (current_function_decl))
949d79eb 5676 return "";
31fdb4d5 5677
e2c671ba 5678 /* A volatile function should never return. Call abort. */
c11145f6 5679 if (TARGET_ABORT_NORETURN && volatile_func)
e2c671ba 5680 {
86efdc8e 5681 rtx op;
ed0e6530 5682 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
2b835d68 5683 assemble_external_libcall (op);
e2c671ba 5684 output_asm_insn ("bl\t%a0", &op);
949d79eb 5685 return "";
e2c671ba
RE
5686 }
5687
f3bb6135
RE
5688 for (reg = 0; reg <= 10; reg++)
5689 if (regs_ever_live[reg] && ! call_used_regs[reg])
cce8749e 5690 {
ff9940b0
RE
5691 live_regs_mask |= (1 << reg);
5692 floats_offset += 4;
cce8749e
CH
5693 }
5694
ed0e6530
PB
5695 /* If we aren't loading the PIC register, don't stack it even though it may
5696 be live. */
5697 if (flag_pic && ! TARGET_SINGLE_PIC_BASE
5698 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
6ed30148
RE
5699 {
5700 live_regs_mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
5701 floats_offset += 4;
5702 }
5703
ff9940b0 5704 if (frame_pointer_needed)
cce8749e 5705 {
b111229a
RE
5706 if (arm_fpu_arch == FP_SOFT2)
5707 {
5708 for (reg = 23; reg > 15; reg--)
5709 if (regs_ever_live[reg] && ! call_used_regs[reg])
5710 {
5711 floats_offset += 12;
dd18ae56
NC
5712 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
5713 reg, FP_REGNUM, floats_offset);
b111229a
RE
5714 }
5715 }
5716 else
5717 {
5718 int start_reg = 23;
5719
5720 for (reg = 23; reg > 15; reg--)
5721 {
5722 if (regs_ever_live[reg] && ! call_used_regs[reg])
5723 {
5724 floats_offset += 12;
6cfc7210 5725
b111229a
RE
5726 /* We can't unstack more than four registers at once */
5727 if (start_reg - reg == 3)
5728 {
dd18ae56
NC
5729 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
5730 reg, FP_REGNUM, floats_offset);
b111229a
RE
5731 start_reg = reg - 1;
5732 }
5733 }
5734 else
5735 {
5736 if (reg != start_reg)
dd18ae56
NC
5737 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
5738 reg + 1, start_reg - reg,
5739 FP_REGNUM, floats_offset);
b111229a
RE
5740 start_reg = reg - 1;
5741 }
5742 }
5743
5744 /* Just in case the last register checked also needs unstacking. */
5745 if (reg != start_reg)
dd18ae56
NC
5746 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
5747 reg + 1, start_reg - reg,
5748 FP_REGNUM, floats_offset);
b111229a 5749 }
da6558fd 5750
6cfc7210 5751 if (TARGET_INTERWORK)
b111229a
RE
5752 {
5753 live_regs_mask |= 0x6800;
dd18ae56
NC
5754 print_multi_reg (f, "ldmea\t%r", FP_REGNUM, live_regs_mask, FALSE);
5755 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
b111229a
RE
5756 }
5757 else
5758 {
5759 live_regs_mask |= 0xA800;
dd18ae56 5760 print_multi_reg (f, "ldmea\t%r", FP_REGNUM, live_regs_mask,
b111229a
RE
5761 TARGET_APCS_32 ? FALSE : TRUE);
5762 }
cce8749e
CH
5763 }
5764 else
5765 {
d2288d8d 5766 /* Restore stack pointer if necessary. */
56636818 5767 if (frame_size + current_function_outgoing_args_size != 0)
d2288d8d
TG
5768 {
5769 operands[0] = operands[1] = stack_pointer_rtx;
56636818
JL
5770 operands[2] = GEN_INT (frame_size
5771 + current_function_outgoing_args_size);
d2288d8d
TG
5772 output_add_immediate (operands);
5773 }
5774
b111229a
RE
5775 if (arm_fpu_arch == FP_SOFT2)
5776 {
5777 for (reg = 16; reg < 24; reg++)
5778 if (regs_ever_live[reg] && ! call_used_regs[reg])
dd18ae56
NC
5779 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
5780 reg, SP_REGNUM);
b111229a
RE
5781 }
5782 else
5783 {
5784 int start_reg = 16;
5785
5786 for (reg = 16; reg < 24; reg++)
5787 {
5788 if (regs_ever_live[reg] && ! call_used_regs[reg])
5789 {
5790 if (reg - start_reg == 3)
5791 {
dd18ae56
NC
5792 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
5793 start_reg, SP_REGNUM);
b111229a
RE
5794 start_reg = reg + 1;
5795 }
5796 }
5797 else
5798 {
5799 if (reg != start_reg)
dd18ae56
NC
5800 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
5801 start_reg, reg - start_reg,
5802 SP_REGNUM);
6cfc7210 5803
b111229a
RE
5804 start_reg = reg + 1;
5805 }
5806 }
5807
5808 /* Just in case the last register checked also needs unstacking. */
5809 if (reg != start_reg)
dd18ae56
NC
5810 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
5811 start_reg, reg - start_reg, SP_REGNUM);
b111229a
RE
5812 }
5813
62b10bbc 5814 if (current_function_pretend_args_size == 0 && regs_ever_live[LR_REGNUM])
cce8749e 5815 {
6cfc7210 5816 if (TARGET_INTERWORK)
b111229a
RE
5817 {
5818 if (! lr_save_eliminated)
62b10bbc 5819 live_regs_mask |= 1 << LR_REGNUM;
f5a1b0d2
NC
5820
5821 if (live_regs_mask != 0)
dd18ae56 5822 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, live_regs_mask, FALSE);
6cfc7210 5823
dd18ae56 5824 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
b111229a
RE
5825 }
5826 else if (lr_save_eliminated)
6cfc7210
NC
5827 asm_fprintf (f, "\tmov%c\t%r, %r\n",
5828 TARGET_APCS_32 ? ' ' : 's',
dd18ae56 5829 PC_REGNUM, LR_REGNUM);
32de079a 5830 else
dd18ae56 5831 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, live_regs_mask | 0x8000,
32de079a 5832 TARGET_APCS_32 ? FALSE : TRUE);
cce8749e
CH
5833 }
5834 else
5835 {
62b10bbc 5836 if (live_regs_mask || regs_ever_live[LR_REGNUM])
cce8749e 5837 {
32de079a
RE
5838 /* Restore the integer regs, and the return address into lr */
5839 if (! lr_save_eliminated)
62b10bbc 5840 live_regs_mask |= 1 << LR_REGNUM;
32de079a
RE
5841
5842 if (live_regs_mask != 0)
dd18ae56 5843 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, live_regs_mask, FALSE);
cce8749e 5844 }
b111229a 5845
cce8749e
CH
5846 if (current_function_pretend_args_size)
5847 {
32de079a 5848 /* Unwind the pre-pushed regs */
cce8749e 5849 operands[0] = operands[1] = stack_pointer_rtx;
3a598fbe 5850 operands[2] = GEN_INT (current_function_pretend_args_size);
cce8749e
CH
5851 output_add_immediate (operands);
5852 }
32de079a 5853 /* And finally, go home */
6cfc7210 5854 if (TARGET_INTERWORK)
dd18ae56 5855 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
25b1c156 5856 else if (TARGET_APCS_32)
dd18ae56 5857 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
b111229a 5858 else
dd18ae56 5859 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
cce8749e
CH
5860 }
5861 }
f3bb6135 5862
949d79eb
RE
5863 return "";
5864}
5865
5866void
5867output_func_epilogue (f, frame_size)
5868 FILE *f ATTRIBUTE_UNUSED;
5869 int frame_size;
5870{
5871 if (use_return_insn (FALSE) && return_used_this_function
5872 && (frame_size + current_function_outgoing_args_size) != 0
5873 && ! (frame_pointer_needed && TARGET_APCS))
5874 abort ();
f3bb6135 5875
4b632bf1 5876 /* Reset the ARM-specific per-function variables. */
cce8749e 5877 current_function_anonymous_args = 0;
4b632bf1 5878 after_arm_reorg = 0;
f3bb6135 5879}
e2c671ba
RE
5880
5881static void
5882emit_multi_reg_push (mask)
5883 int mask;
5884{
5885 int num_regs = 0;
5886 int i, j;
5887 rtx par;
5888
5889 for (i = 0; i < 16; i++)
5890 if (mask & (1 << i))
5891 num_regs++;
5892
5893 if (num_regs == 0 || num_regs > 16)
5894 abort ();
5895
43cffd11 5896 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
e2c671ba
RE
5897
5898 for (i = 0; i < 16; i++)
5899 {
5900 if (mask & (1 << i))
5901 {
5902 XVECEXP (par, 0, 0)
43cffd11
RE
5903 = gen_rtx_SET (VOIDmode,
5904 gen_rtx_MEM (BLKmode,
5905 gen_rtx_PRE_DEC (BLKmode,
5906 stack_pointer_rtx)),
5907 gen_rtx_UNSPEC (BLKmode,
5908 gen_rtvec (1,
5909 gen_rtx_REG (SImode, i)),
5910 2));
e2c671ba
RE
5911 break;
5912 }
5913 }
5914
5915 for (j = 1, i++; j < num_regs; i++)
5916 {
5917 if (mask & (1 << i))
5918 {
5919 XVECEXP (par, 0, j)
43cffd11 5920 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, i));
e2c671ba
RE
5921 j++;
5922 }
5923 }
b111229a
RE
5924
5925 emit_insn (par);
5926}
5927
5928static void
5929emit_sfm (base_reg, count)
5930 int base_reg;
5931 int count;
5932{
5933 rtx par;
5934 int i;
5935
43cffd11
RE
5936 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
5937
5938 XVECEXP (par, 0, 0)
5939 = gen_rtx_SET (VOIDmode,
5940 gen_rtx_MEM (BLKmode,
5941 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
5942 gen_rtx_UNSPEC (BLKmode,
5943 gen_rtvec (1, gen_rtx_REG (XFmode,
5944 base_reg++)),
5945 2));
b111229a 5946 for (i = 1; i < count; i++)
43cffd11
RE
5947 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode,
5948 gen_rtx_REG (XFmode, base_reg++));
b111229a 5949
e2c671ba
RE
5950 emit_insn (par);
5951}
5952
5953void
5954arm_expand_prologue ()
5955{
5956 int reg;
56636818
JL
5957 rtx amount = GEN_INT (-(get_frame_size ()
5958 + current_function_outgoing_args_size));
e2c671ba
RE
5959 int live_regs_mask = 0;
5960 int store_arg_regs = 0;
949d79eb
RE
5961 /* If this function doesn't return, then there is no need to push
5962 the call-saved regs. */
e2c671ba
RE
5963 int volatile_func = (optimize > 0
5964 && TREE_THIS_VOLATILE (current_function_decl));
5965
31fdb4d5
DE
5966 /* Naked functions don't have prologues. */
5967 if (arm_naked_function_p (current_function_decl))
5968 return;
5969
e2c671ba
RE
5970 if (current_function_anonymous_args && current_function_pretend_args_size)
5971 store_arg_regs = 1;
5972
5973 if (! volatile_func)
6ed30148
RE
5974 {
5975 for (reg = 0; reg <= 10; reg++)
5976 if (regs_ever_live[reg] && ! call_used_regs[reg])
5977 live_regs_mask |= 1 << reg;
5978
5979 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
5980 live_regs_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
e2c671ba 5981
62b10bbc
NC
5982 if (regs_ever_live[LR_REGNUM])
5983 live_regs_mask |= 1 << LR_REGNUM;
6ed30148 5984 }
e2c671ba
RE
5985
5986 if (frame_pointer_needed)
5987 {
5988 live_regs_mask |= 0xD800;
62b10bbc 5989 emit_insn (gen_movsi (gen_rtx_REG (SImode, IP_REGNUM),
e2c671ba
RE
5990 stack_pointer_rtx));
5991 }
5992
5993 if (current_function_pretend_args_size)
5994 {
5995 if (store_arg_regs)
5996 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size / 4))
5997 & 0xf);
5998 else
5999 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
6000 GEN_INT (-current_function_pretend_args_size)));
6001 }
6002
6003 if (live_regs_mask)
6004 {
6005 /* If we have to push any regs, then we must push lr as well, or
ddd5a7c1 6006 we won't get a proper return. */
62b10bbc 6007 live_regs_mask |= 1 << LR_REGNUM;
e2c671ba
RE
6008 emit_multi_reg_push (live_regs_mask);
6009 }
6010
6011 /* For now the integer regs are still pushed in output_func_epilogue (). */
6012
6013 if (! volatile_func)
b111229a
RE
6014 {
6015 if (arm_fpu_arch == FP_SOFT2)
6016 {
6017 for (reg = 23; reg > 15; reg--)
6018 if (regs_ever_live[reg] && ! call_used_regs[reg])
43cffd11
RE
6019 emit_insn (gen_rtx_SET
6020 (VOIDmode,
6021 gen_rtx_MEM (XFmode,
6022 gen_rtx_PRE_DEC (XFmode,
6023 stack_pointer_rtx)),
6024 gen_rtx_REG (XFmode, reg)));
b111229a
RE
6025 }
6026 else
6027 {
6028 int start_reg = 23;
6029
6030 for (reg = 23; reg > 15; reg--)
6031 {
6032 if (regs_ever_live[reg] && ! call_used_regs[reg])
6033 {
6034 if (start_reg - reg == 3)
6035 {
6036 emit_sfm (reg, 4);
6037 start_reg = reg - 1;
6038 }
6039 }
6040 else
6041 {
6042 if (start_reg != reg)
6043 emit_sfm (reg + 1, start_reg - reg);
6044 start_reg = reg - 1;
6045 }
6046 }
6047
6048 if (start_reg != reg)
6049 emit_sfm (reg + 1, start_reg - reg);
6050 }
6051 }
e2c671ba
RE
6052
6053 if (frame_pointer_needed)
62b10bbc 6054 emit_insn (gen_addsi3 (hard_frame_pointer_rtx, gen_rtx_REG (SImode, IP_REGNUM),
e2c671ba
RE
6055 (GEN_INT
6056 (-(4 + current_function_pretend_args_size)))));
6057
6058 if (amount != const0_rtx)
6059 {
6060 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, amount));
43cffd11
RE
6061 emit_insn (gen_rtx_CLOBBER (VOIDmode,
6062 gen_rtx_MEM (BLKmode, stack_pointer_rtx)));
e2c671ba
RE
6063 }
6064
6065 /* If we are profiling, make sure no instructions are scheduled before
f5a1b0d2
NC
6066 the call to mcount. Similarly if the user has requested no
6067 scheduling in the prolog. */
6068 if (profile_flag || profile_block_flag || TARGET_NO_SCHED_PRO)
e2c671ba
RE
6069 emit_insn (gen_blockage ());
6070}
6071
cce8749e 6072\f
9997d19d
RE
6073/* If CODE is 'd', then the X is a condition operand and the instruction
6074 should only be executed if the condition is true.
ddd5a7c1 6075 if CODE is 'D', then the X is a condition operand and the instruction
9997d19d
RE
6076 should only be executed if the condition is false: however, if the mode
6077 of the comparison is CCFPEmode, then always execute the instruction -- we
6078 do this because in these circumstances !GE does not necessarily imply LT;
6079 in these cases the instruction pattern will take care to make sure that
6080 an instruction containing %d will follow, thereby undoing the effects of
ddd5a7c1 6081 doing this instruction unconditionally.
9997d19d
RE
6082 If CODE is 'N' then X is a floating point operand that must be negated
6083 before output.
6084 If CODE is 'B' then output a bitwise inverted value of X (a const int).
6085 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
6086
6087void
6088arm_print_operand (stream, x, code)
62b10bbc 6089 FILE * stream;
9997d19d
RE
6090 rtx x;
6091 int code;
6092{
6093 switch (code)
6094 {
6095 case '@':
f3139301 6096 fputs (ASM_COMMENT_START, stream);
9997d19d
RE
6097 return;
6098
6099 case '|':
f3139301 6100 fputs (REGISTER_PREFIX, stream);
9997d19d
RE
6101 return;
6102
6103 case '?':
6104 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
6105 fputs (arm_condition_codes[arm_current_cc], stream);
6106 return;
6107
6108 case 'N':
6109 {
6110 REAL_VALUE_TYPE r;
6111 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6112 r = REAL_VALUE_NEGATE (r);
6113 fprintf (stream, "%s", fp_const_from_val (&r));
6114 }
6115 return;
6116
6117 case 'B':
6118 if (GET_CODE (x) == CONST_INT)
4bc74ece
NC
6119 {
6120 HOST_WIDE_INT val;
6121 val = ARM_SIGN_EXTEND (~ INTVAL (x));
36ba9cb8 6122 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
4bc74ece 6123 }
9997d19d
RE
6124 else
6125 {
6126 putc ('~', stream);
6127 output_addr_const (stream, x);
6128 }
6129 return;
6130
6131 case 'i':
6132 fprintf (stream, "%s", arithmetic_instr (x, 1));
6133 return;
6134
6135 case 'I':
6136 fprintf (stream, "%s", arithmetic_instr (x, 0));
6137 return;
6138
6139 case 'S':
6140 {
6141 HOST_WIDE_INT val;
4bc74ece 6142 char * shift = shift_op (x, & val);
9997d19d 6143
e2c671ba
RE
6144 if (shift)
6145 {
4bc74ece 6146 fprintf (stream, ", %s ", shift_op (x, & val));
e2c671ba
RE
6147 if (val == -1)
6148 arm_print_operand (stream, XEXP (x, 1), 0);
6149 else
4bc74ece
NC
6150 {
6151 fputc ('#', stream);
36ba9cb8 6152 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
4bc74ece 6153 }
e2c671ba 6154 }
9997d19d
RE
6155 }
6156 return;
6157
c1c2bc04
RE
6158 case 'Q':
6159 if (REGNO (x) > 15)
6160 abort ();
6161 fputs (REGISTER_PREFIX, stream);
6162 fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], stream);
6163 return;
6164
9997d19d
RE
6165 case 'R':
6166 if (REGNO (x) > 15)
6167 abort ();
f3139301 6168 fputs (REGISTER_PREFIX, stream);
c1c2bc04 6169 fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], stream);
9997d19d
RE
6170 return;
6171
6172 case 'm':
f3139301 6173 fputs (REGISTER_PREFIX, stream);
9997d19d
RE
6174 if (GET_CODE (XEXP (x, 0)) == REG)
6175 fputs (reg_names[REGNO (XEXP (x, 0))], stream);
6176 else
6177 fputs (reg_names[REGNO (XEXP (XEXP (x, 0), 0))], stream);
6178 return;
6179
6180 case 'M':
dd18ae56
NC
6181 asm_fprintf (stream, "{%r-%r}",
6182 REGNO (x), REGNO (x) + NUM_REGS (GET_MODE (x)) - 1);
9997d19d
RE
6183 return;
6184
6185 case 'd':
6186 if (x)
6187 fputs (arm_condition_codes[get_arm_condition_code (x)],
6188 stream);
6189 return;
6190
6191 case 'D':
84ed5e79 6192 if (x)
9997d19d
RE
6193 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
6194 (get_arm_condition_code (x))],
6195 stream);
6196 return;
6197
6198 default:
6199 if (x == 0)
6200 abort ();
6201
6202 if (GET_CODE (x) == REG)
1d5473cb 6203 {
f3139301 6204 fputs (REGISTER_PREFIX, stream);
1d5473cb
RE
6205 fputs (reg_names[REGNO (x)], stream);
6206 }
9997d19d
RE
6207 else if (GET_CODE (x) == MEM)
6208 {
6209 output_memory_reference_mode = GET_MODE (x);
6210 output_address (XEXP (x, 0));
6211 }
6212 else if (GET_CODE (x) == CONST_DOUBLE)
6213 fprintf (stream, "#%s", fp_immediate_constant (x));
6214 else if (GET_CODE (x) == NEG)
6215 abort (); /* This should never happen now. */
6216 else
6217 {
6218 fputc ('#', stream);
6219 output_addr_const (stream, x);
6220 }
6221 }
6222}
cce8749e
CH
6223\f
6224/* A finite state machine takes care of noticing whether or not instructions
6225 can be conditionally executed, and thus decrease execution time and code
6226 size by deleting branch instructions. The fsm is controlled by
6227 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
6228
6229/* The state of the fsm controlling condition codes are:
6230 0: normal, do nothing special
6231 1: make ASM_OUTPUT_OPCODE not output this instruction
6232 2: make ASM_OUTPUT_OPCODE not output this instruction
6233 3: make instructions conditional
6234 4: make instructions conditional
6235
6236 State transitions (state->state by whom under condition):
6237 0 -> 1 final_prescan_insn if the `target' is a label
6238 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
6239 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
6240 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
6241 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
6242 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
6243 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
6244 (the target insn is arm_target_insn).
6245
ff9940b0
RE
6246 If the jump clobbers the conditions then we use states 2 and 4.
6247
6248 A similar thing can be done with conditional return insns.
6249
cce8749e
CH
6250 XXX In case the `target' is an unconditional branch, this conditionalising
6251 of the instructions always reduces code size, but not always execution
6252 time. But then, I want to reduce the code size to somewhere near what
6253 /bin/cc produces. */
6254
cce8749e
CH
6255/* Returns the index of the ARM condition code string in
6256 `arm_condition_codes'. COMPARISON should be an rtx like
6257 `(eq (...) (...))'. */
6258
84ed5e79 6259static enum arm_cond_code
cce8749e
CH
6260get_arm_condition_code (comparison)
6261 rtx comparison;
6262{
5165176d 6263 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
84ed5e79
RE
6264 register int code;
6265 register enum rtx_code comp_code = GET_CODE (comparison);
5165176d
RE
6266
6267 if (GET_MODE_CLASS (mode) != MODE_CC)
84ed5e79 6268 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
5165176d
RE
6269 XEXP (comparison, 1));
6270
6271 switch (mode)
cce8749e 6272 {
84ed5e79
RE
6273 case CC_DNEmode: code = ARM_NE; goto dominance;
6274 case CC_DEQmode: code = ARM_EQ; goto dominance;
6275 case CC_DGEmode: code = ARM_GE; goto dominance;
6276 case CC_DGTmode: code = ARM_GT; goto dominance;
6277 case CC_DLEmode: code = ARM_LE; goto dominance;
6278 case CC_DLTmode: code = ARM_LT; goto dominance;
6279 case CC_DGEUmode: code = ARM_CS; goto dominance;
6280 case CC_DGTUmode: code = ARM_HI; goto dominance;
6281 case CC_DLEUmode: code = ARM_LS; goto dominance;
6282 case CC_DLTUmode: code = ARM_CC;
6283
6284 dominance:
6285 if (comp_code != EQ && comp_code != NE)
6286 abort ();
6287
6288 if (comp_code == EQ)
6289 return ARM_INVERSE_CONDITION_CODE (code);
6290 return code;
6291
5165176d 6292 case CC_NOOVmode:
84ed5e79 6293 switch (comp_code)
5165176d 6294 {
84ed5e79
RE
6295 case NE: return ARM_NE;
6296 case EQ: return ARM_EQ;
6297 case GE: return ARM_PL;
6298 case LT: return ARM_MI;
5165176d
RE
6299 default: abort ();
6300 }
6301
6302 case CC_Zmode:
6303 case CCFPmode:
84ed5e79 6304 switch (comp_code)
5165176d 6305 {
84ed5e79
RE
6306 case NE: return ARM_NE;
6307 case EQ: return ARM_EQ;
5165176d
RE
6308 default: abort ();
6309 }
6310
6311 case CCFPEmode:
84ed5e79
RE
6312 switch (comp_code)
6313 {
6314 case GE: return ARM_GE;
6315 case GT: return ARM_GT;
6316 case LE: return ARM_LS;
6317 case LT: return ARM_MI;
6318 default: abort ();
6319 }
6320
6321 case CC_SWPmode:
6322 switch (comp_code)
6323 {
6324 case NE: return ARM_NE;
6325 case EQ: return ARM_EQ;
6326 case GE: return ARM_LE;
6327 case GT: return ARM_LT;
6328 case LE: return ARM_GE;
6329 case LT: return ARM_GT;
6330 case GEU: return ARM_LS;
6331 case GTU: return ARM_CC;
6332 case LEU: return ARM_CS;
6333 case LTU: return ARM_HI;
6334 default: abort ();
6335 }
6336
bd9c7e23
RE
6337 case CC_Cmode:
6338 switch (comp_code)
6339 {
6340 case LTU: return ARM_CS;
6341 case GEU: return ARM_CC;
6342 default: abort ();
6343 }
6344
5165176d 6345 case CCmode:
84ed5e79 6346 switch (comp_code)
5165176d 6347 {
84ed5e79
RE
6348 case NE: return ARM_NE;
6349 case EQ: return ARM_EQ;
6350 case GE: return ARM_GE;
6351 case GT: return ARM_GT;
6352 case LE: return ARM_LE;
6353 case LT: return ARM_LT;
6354 case GEU: return ARM_CS;
6355 case GTU: return ARM_HI;
6356 case LEU: return ARM_LS;
6357 case LTU: return ARM_CC;
5165176d
RE
6358 default: abort ();
6359 }
6360
cce8749e
CH
6361 default: abort ();
6362 }
84ed5e79
RE
6363
6364 abort ();
f3bb6135 6365}
cce8749e
CH
6366
6367
6368void
74bbc178 6369arm_final_prescan_insn (insn)
cce8749e 6370 rtx insn;
cce8749e
CH
6371{
6372 /* BODY will hold the body of INSN. */
6373 register rtx body = PATTERN (insn);
6374
6375 /* This will be 1 if trying to repeat the trick, and things need to be
6376 reversed if it appears to fail. */
6377 int reverse = 0;
6378
ff9940b0
RE
6379 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
6380 taken are clobbered, even if the rtl suggests otherwise. It also
6381 means that we have to grub around within the jump expression to find
6382 out what the conditions are when the jump isn't taken. */
6383 int jump_clobbers = 0;
6384
6385 /* If we start with a return insn, we only succeed if we find another one. */
6386 int seeking_return = 0;
6387
cce8749e
CH
6388 /* START_INSN will hold the insn from where we start looking. This is the
6389 first insn after the following code_label if REVERSE is true. */
6390 rtx start_insn = insn;
6391
6392 /* If in state 4, check if the target branch is reached, in order to
6393 change back to state 0. */
6394 if (arm_ccfsm_state == 4)
6395 {
6396 if (insn == arm_target_insn)
f5a1b0d2
NC
6397 {
6398 arm_target_insn = NULL;
6399 arm_ccfsm_state = 0;
6400 }
cce8749e
CH
6401 return;
6402 }
6403
6404 /* If in state 3, it is possible to repeat the trick, if this insn is an
6405 unconditional branch to a label, and immediately following this branch
6406 is the previous target label which is only used once, and the label this
6407 branch jumps to is not too far off. */
6408 if (arm_ccfsm_state == 3)
6409 {
6410 if (simplejump_p (insn))
6411 {
6412 start_insn = next_nonnote_insn (start_insn);
6413 if (GET_CODE (start_insn) == BARRIER)
6414 {
6415 /* XXX Isn't this always a barrier? */
6416 start_insn = next_nonnote_insn (start_insn);
6417 }
6418 if (GET_CODE (start_insn) == CODE_LABEL
6419 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
6420 && LABEL_NUSES (start_insn) == 1)
6421 reverse = TRUE;
6422 else
6423 return;
6424 }
ff9940b0
RE
6425 else if (GET_CODE (body) == RETURN)
6426 {
6427 start_insn = next_nonnote_insn (start_insn);
6428 if (GET_CODE (start_insn) == BARRIER)
6429 start_insn = next_nonnote_insn (start_insn);
6430 if (GET_CODE (start_insn) == CODE_LABEL
6431 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
6432 && LABEL_NUSES (start_insn) == 1)
6433 {
6434 reverse = TRUE;
6435 seeking_return = 1;
6436 }
6437 else
6438 return;
6439 }
cce8749e
CH
6440 else
6441 return;
6442 }
6443
6444 if (arm_ccfsm_state != 0 && !reverse)
6445 abort ();
6446 if (GET_CODE (insn) != JUMP_INSN)
6447 return;
6448
ddd5a7c1 6449 /* This jump might be paralleled with a clobber of the condition codes
ff9940b0
RE
6450 the jump should always come first */
6451 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
6452 body = XVECEXP (body, 0, 0);
6453
6454#if 0
6455 /* If this is a conditional return then we don't want to know */
6456 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
6457 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
6458 && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
6459 || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
6460 return;
6461#endif
6462
cce8749e
CH
6463 if (reverse
6464 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
6465 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
6466 {
bd9c7e23
RE
6467 int insns_skipped;
6468 int fail = FALSE, succeed = FALSE;
cce8749e
CH
6469 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
6470 int then_not_else = TRUE;
ff9940b0 6471 rtx this_insn = start_insn, label = 0;
cce8749e 6472
ff9940b0 6473 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
5bbe2d40
RE
6474 {
6475 /* The code below is wrong for these, and I haven't time to
6476 fix it now. So we just do the safe thing and return. This
6477 whole function needs re-writing anyway. */
6478 jump_clobbers = 1;
6479 return;
6480 }
ff9940b0 6481
cce8749e
CH
6482 /* Register the insn jumped to. */
6483 if (reverse)
ff9940b0
RE
6484 {
6485 if (!seeking_return)
6486 label = XEXP (SET_SRC (body), 0);
6487 }
cce8749e
CH
6488 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
6489 label = XEXP (XEXP (SET_SRC (body), 1), 0);
6490 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
6491 {
6492 label = XEXP (XEXP (SET_SRC (body), 2), 0);
6493 then_not_else = FALSE;
6494 }
ff9940b0
RE
6495 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
6496 seeking_return = 1;
6497 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
6498 {
6499 seeking_return = 1;
6500 then_not_else = FALSE;
6501 }
cce8749e
CH
6502 else
6503 abort ();
6504
6505 /* See how many insns this branch skips, and what kind of insns. If all
6506 insns are okay, and the label or unconditional branch to the same
6507 label is not too far away, succeed. */
6508 for (insns_skipped = 0;
b36ba79f 6509 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
cce8749e
CH
6510 {
6511 rtx scanbody;
6512
6513 this_insn = next_nonnote_insn (this_insn);
6514 if (!this_insn)
6515 break;
6516
cce8749e
CH
6517 switch (GET_CODE (this_insn))
6518 {
6519 case CODE_LABEL:
6520 /* Succeed if it is the target label, otherwise fail since
6521 control falls in from somewhere else. */
6522 if (this_insn == label)
6523 {
ff9940b0
RE
6524 if (jump_clobbers)
6525 {
6526 arm_ccfsm_state = 2;
6527 this_insn = next_nonnote_insn (this_insn);
6528 }
6529 else
6530 arm_ccfsm_state = 1;
cce8749e
CH
6531 succeed = TRUE;
6532 }
6533 else
6534 fail = TRUE;
6535 break;
6536
ff9940b0 6537 case BARRIER:
cce8749e 6538 /* Succeed if the following insn is the target label.
ff9940b0
RE
6539 Otherwise fail.
6540 If return insns are used then the last insn in a function
6541 will be a barrier. */
cce8749e 6542 this_insn = next_nonnote_insn (this_insn);
ff9940b0 6543 if (this_insn && this_insn == label)
cce8749e 6544 {
ff9940b0
RE
6545 if (jump_clobbers)
6546 {
6547 arm_ccfsm_state = 2;
6548 this_insn = next_nonnote_insn (this_insn);
6549 }
6550 else
6551 arm_ccfsm_state = 1;
cce8749e
CH
6552 succeed = TRUE;
6553 }
6554 else
6555 fail = TRUE;
6556 break;
6557
ff9940b0 6558 case CALL_INSN:
2b835d68
RE
6559 /* If using 32-bit addresses the cc is not preserved over
6560 calls */
6561 if (TARGET_APCS_32)
bd9c7e23
RE
6562 {
6563 /* Succeed if the following insn is the target label,
6564 or if the following two insns are a barrier and
6565 the target label. */
6566 this_insn = next_nonnote_insn (this_insn);
6567 if (this_insn && GET_CODE (this_insn) == BARRIER)
6568 this_insn = next_nonnote_insn (this_insn);
6569
6570 if (this_insn && this_insn == label
b36ba79f 6571 && insns_skipped < max_insns_skipped)
bd9c7e23
RE
6572 {
6573 if (jump_clobbers)
6574 {
6575 arm_ccfsm_state = 2;
6576 this_insn = next_nonnote_insn (this_insn);
6577 }
6578 else
6579 arm_ccfsm_state = 1;
6580 succeed = TRUE;
6581 }
6582 else
6583 fail = TRUE;
6584 }
ff9940b0 6585 break;
2b835d68 6586
cce8749e
CH
6587 case JUMP_INSN:
6588 /* If this is an unconditional branch to the same label, succeed.
6589 If it is to another label, do nothing. If it is conditional,
6590 fail. */
ed4c4348 6591 /* XXX Probably, the tests for SET and the PC are unnecessary. */
cce8749e 6592
ed4c4348 6593 scanbody = PATTERN (this_insn);
ff9940b0
RE
6594 if (GET_CODE (scanbody) == SET
6595 && GET_CODE (SET_DEST (scanbody)) == PC)
cce8749e
CH
6596 {
6597 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
6598 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
6599 {
6600 arm_ccfsm_state = 2;
6601 succeed = TRUE;
6602 }
6603 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
6604 fail = TRUE;
6605 }
b36ba79f
RE
6606 /* Fail if a conditional return is undesirable (eg on a
6607 StrongARM), but still allow this if optimizing for size. */
6608 else if (GET_CODE (scanbody) == RETURN
6609 && ! use_return_insn (TRUE)
6610 && ! optimize_size)
6611 fail = TRUE;
ff9940b0
RE
6612 else if (GET_CODE (scanbody) == RETURN
6613 && seeking_return)
6614 {
6615 arm_ccfsm_state = 2;
6616 succeed = TRUE;
6617 }
6618 else if (GET_CODE (scanbody) == PARALLEL)
6619 {
6620 switch (get_attr_conds (this_insn))
6621 {
6622 case CONDS_NOCOND:
6623 break;
6624 default:
6625 fail = TRUE;
6626 break;
6627 }
6628 }
cce8749e
CH
6629 break;
6630
6631 case INSN:
ff9940b0
RE
6632 /* Instructions using or affecting the condition codes make it
6633 fail. */
ed4c4348 6634 scanbody = PATTERN (this_insn);
74641843
RE
6635 if (! (GET_CODE (scanbody) == SET
6636 || GET_CODE (scanbody) == PARALLEL)
6637 || get_attr_conds (this_insn) != CONDS_NOCOND)
cce8749e
CH
6638 fail = TRUE;
6639 break;
6640
6641 default:
6642 break;
6643 }
6644 }
6645 if (succeed)
6646 {
ff9940b0 6647 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
cce8749e 6648 arm_target_label = CODE_LABEL_NUMBER (label);
ff9940b0
RE
6649 else if (seeking_return || arm_ccfsm_state == 2)
6650 {
6651 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
6652 {
6653 this_insn = next_nonnote_insn (this_insn);
6654 if (this_insn && (GET_CODE (this_insn) == BARRIER
6655 || GET_CODE (this_insn) == CODE_LABEL))
6656 abort ();
6657 }
6658 if (!this_insn)
6659 {
6660 /* Oh, dear! we ran off the end.. give up */
6661 recog (PATTERN (insn), insn, NULL_PTR);
6662 arm_ccfsm_state = 0;
abaa26e5 6663 arm_target_insn = NULL;
ff9940b0
RE
6664 return;
6665 }
6666 arm_target_insn = this_insn;
6667 }
cce8749e
CH
6668 else
6669 abort ();
ff9940b0
RE
6670 if (jump_clobbers)
6671 {
6672 if (reverse)
6673 abort ();
6674 arm_current_cc =
6675 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
6676 0), 0), 1));
6677 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
6678 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
6679 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
6680 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
6681 }
6682 else
6683 {
6684 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
6685 what it was. */
6686 if (!reverse)
6687 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
6688 0));
6689 }
cce8749e 6690
cce8749e
CH
6691 if (reverse || then_not_else)
6692 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
6693 }
ff9940b0
RE
6694 /* restore recog_operand (getting the attributes of other insns can
6695 destroy this array, but final.c assumes that it remains intact
ddd5a7c1 6696 across this call; since the insn has been recognized already we
ff9940b0
RE
6697 call recog direct). */
6698 recog (PATTERN (insn), insn, NULL_PTR);
cce8749e 6699 }
f3bb6135 6700}
cce8749e 6701
2b835d68
RE
6702#ifdef AOF_ASSEMBLER
6703/* Special functions only needed when producing AOF syntax assembler. */
6704
32de079a
RE
6705rtx aof_pic_label = NULL_RTX;
6706struct pic_chain
6707{
62b10bbc
NC
6708 struct pic_chain * next;
6709 char * symname;
32de079a
RE
6710};
6711
62b10bbc 6712static struct pic_chain * aof_pic_chain = NULL;
32de079a
RE
6713
6714rtx
6715aof_pic_entry (x)
6716 rtx x;
6717{
62b10bbc 6718 struct pic_chain ** chainp;
32de079a
RE
6719 int offset;
6720
6721 if (aof_pic_label == NULL_RTX)
6722 {
6723 /* This needs to persist throughout the compilation. */
6724 end_temporary_allocation ();
43cffd11 6725 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
32de079a
RE
6726 resume_temporary_allocation ();
6727 }
6728
6729 for (offset = 0, chainp = &aof_pic_chain; *chainp;
6730 offset += 4, chainp = &(*chainp)->next)
6731 if ((*chainp)->symname == XSTR (x, 0))
6732 return plus_constant (aof_pic_label, offset);
6733
6734 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
6735 (*chainp)->next = NULL;
6736 (*chainp)->symname = XSTR (x, 0);
6737 return plus_constant (aof_pic_label, offset);
6738}
6739
6740void
6741aof_dump_pic_table (f)
62b10bbc 6742 FILE * f;
32de079a 6743{
62b10bbc 6744 struct pic_chain * chain;
32de079a
RE
6745
6746 if (aof_pic_chain == NULL)
6747 return;
6748
dd18ae56
NC
6749 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
6750 PIC_OFFSET_TABLE_REGNUM,
6751 PIC_OFFSET_TABLE_REGNUM);
32de079a
RE
6752 fputs ("|x$adcons|\n", f);
6753
6754 for (chain = aof_pic_chain; chain; chain = chain->next)
6755 {
6756 fputs ("\tDCD\t", f);
6757 assemble_name (f, chain->symname);
6758 fputs ("\n", f);
6759 }
6760}
6761
2b835d68
RE
6762int arm_text_section_count = 1;
6763
6764char *
84ed5e79 6765aof_text_section ()
2b835d68
RE
6766{
6767 static char buf[100];
2b835d68
RE
6768 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
6769 arm_text_section_count++);
6770 if (flag_pic)
6771 strcat (buf, ", PIC, REENTRANT");
6772 return buf;
6773}
6774
6775static int arm_data_section_count = 1;
6776
6777char *
6778aof_data_section ()
6779{
6780 static char buf[100];
6781 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
6782 return buf;
6783}
6784
6785/* The AOF assembler is religiously strict about declarations of
6786 imported and exported symbols, so that it is impossible to declare
956d6950 6787 a function as imported near the beginning of the file, and then to
2b835d68
RE
6788 export it later on. It is, however, possible to delay the decision
6789 until all the functions in the file have been compiled. To get
6790 around this, we maintain a list of the imports and exports, and
6791 delete from it any that are subsequently defined. At the end of
6792 compilation we spit the remainder of the list out before the END
6793 directive. */
6794
6795struct import
6796{
62b10bbc
NC
6797 struct import * next;
6798 char * name;
2b835d68
RE
6799};
6800
62b10bbc 6801static struct import * imports_list = NULL;
2b835d68
RE
6802
6803void
6804aof_add_import (name)
62b10bbc 6805 char * name;
2b835d68 6806{
62b10bbc 6807 struct import * new;
2b835d68
RE
6808
6809 for (new = imports_list; new; new = new->next)
6810 if (new->name == name)
6811 return;
6812
6813 new = (struct import *) xmalloc (sizeof (struct import));
6814 new->next = imports_list;
6815 imports_list = new;
6816 new->name = name;
6817}
6818
6819void
6820aof_delete_import (name)
62b10bbc 6821 char * name;
2b835d68 6822{
62b10bbc 6823 struct import ** old;
2b835d68
RE
6824
6825 for (old = &imports_list; *old; old = & (*old)->next)
6826 {
6827 if ((*old)->name == name)
6828 {
6829 *old = (*old)->next;
6830 return;
6831 }
6832 }
6833}
6834
6835int arm_main_function = 0;
6836
6837void
6838aof_dump_imports (f)
62b10bbc 6839 FILE * f;
2b835d68
RE
6840{
6841 /* The AOF assembler needs this to cause the startup code to be extracted
6842 from the library. Brining in __main causes the whole thing to work
6843 automagically. */
6844 if (arm_main_function)
6845 {
6846 text_section ();
6847 fputs ("\tIMPORT __main\n", f);
6848 fputs ("\tDCD __main\n", f);
6849 }
6850
6851 /* Now dump the remaining imports. */
6852 while (imports_list)
6853 {
6854 fprintf (f, "\tIMPORT\t");
6855 assemble_name (f, imports_list->name);
6856 fputc ('\n', f);
6857 imports_list = imports_list->next;
6858 }
6859}
6860#endif /* AOF_ASSEMBLER */
This page took 1.452566 seconds and 5 git commands to generate.