]> gcc.gnu.org Git - gcc.git/blob - gcc/config/m32c/m32c.c
Update copyright years.
[gcc.git] / gcc / config / m32c / m32c.c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "tree.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "expr.h"
43 #include "insn-codes.h"
44 #include "optabs.h"
45 #include "except.h"
46 #include "hashtab.h"
47 #include "hash-set.h"
48 #include "vec.h"
49 #include "machmode.h"
50 #include "input.h"
51 #include "function.h"
52 #include "ggc.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "tm_p.h"
56 #include "langhooks.h"
57 #include "hash-table.h"
58 #include "predict.h"
59 #include "dominance.h"
60 #include "cfg.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "basic-block.h"
67 #include "tree-ssa-alias.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimple-expr.h"
72 #include "is-a.h"
73 #include "gimple.h"
74 #include "df.h"
75 #include "tm-constrs.h"
76 #include "builtins.h"
77
78 /* Prototypes */
79
80 /* Used by m32c_pushm_popm. */
81 typedef enum
82 {
83 PP_pushm,
84 PP_popm,
85 PP_justcount
86 } Push_Pop_Type;
87
88 static bool m32c_function_needs_enter (void);
89 static tree interrupt_handler (tree *, tree, tree, int, bool *);
90 static tree function_vector_handler (tree *, tree, tree, int, bool *);
91 static int interrupt_p (tree node);
92 static int bank_switch_p (tree node);
93 static int fast_interrupt_p (tree node);
94 static int interrupt_p (tree node);
95 static bool m32c_asm_integer (rtx, unsigned int, int);
96 static int m32c_comp_type_attributes (const_tree, const_tree);
97 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
98 static struct machine_function *m32c_init_machine_status (void);
99 static void m32c_insert_attributes (tree, tree *);
100 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
101 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
102 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
103 const_tree, bool);
104 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
105 const_tree, bool);
106 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
107 const_tree, bool);
108 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
109 static int m32c_pushm_popm (Push_Pop_Type);
110 static bool m32c_strict_argument_naming (cumulative_args_t);
111 static rtx m32c_struct_value_rtx (tree, int);
112 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
113 static int need_to_save (int);
114 static rtx m32c_function_value (const_tree, const_tree, bool);
115 static rtx m32c_libcall_value (machine_mode, const_rtx);
116
117 /* Returns true if an address is specified, else false. */
118 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
119
120 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
121
122 #define streq(a,b) (strcmp ((a), (b)) == 0)
123
124 /* Internal support routines */
125
126 /* Debugging statements are tagged with DEBUG0 only so that they can
127 be easily enabled individually, by replacing the '0' with '1' as
128 needed. */
129 #define DEBUG0 0
130 #define DEBUG1 1
131
132 #if DEBUG0
133 /* This is needed by some of the commented-out debug statements
134 below. */
135 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
136 #endif
137 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
138
139 /* These are all to support encode_pattern(). */
140 static char pattern[30], *patternp;
141 static GTY(()) rtx patternr[30];
142 #define RTX_IS(x) (streq (pattern, x))
143
144 /* Some macros to simplify the logic throughout this file. */
145 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
146 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
147
148 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
149 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
150
151 static int
152 far_addr_space_p (rtx x)
153 {
154 if (GET_CODE (x) != MEM)
155 return 0;
156 #if DEBUG0
157 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
158 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
159 #endif
160 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
161 }
162
163 /* We do most RTX matching by converting the RTX into a string, and
164 using string compares. This vastly simplifies the logic in many of
165 the functions in this file.
166
167 On exit, pattern[] has the encoded string (use RTX_IS("...") to
168 compare it) and patternr[] has pointers to the nodes in the RTX
169 corresponding to each character in the encoded string. The latter
170 is mostly used by print_operand().
171
172 Unrecognized patterns have '?' in them; this shows up when the
173 assembler complains about syntax errors.
174 */
175
176 static void
177 encode_pattern_1 (rtx x)
178 {
179 int i;
180
181 if (patternp == pattern + sizeof (pattern) - 2)
182 {
183 patternp[-1] = '?';
184 return;
185 }
186
187 patternr[patternp - pattern] = x;
188
189 switch (GET_CODE (x))
190 {
191 case REG:
192 *patternp++ = 'r';
193 break;
194 case SUBREG:
195 if (GET_MODE_SIZE (GET_MODE (x)) !=
196 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
197 *patternp++ = 'S';
198 if (GET_MODE (x) == PSImode
199 && GET_CODE (XEXP (x, 0)) == REG)
200 *patternp++ = 'S';
201 encode_pattern_1 (XEXP (x, 0));
202 break;
203 case MEM:
204 *patternp++ = 'm';
205 case CONST:
206 encode_pattern_1 (XEXP (x, 0));
207 break;
208 case SIGN_EXTEND:
209 *patternp++ = '^';
210 *patternp++ = 'S';
211 encode_pattern_1 (XEXP (x, 0));
212 break;
213 case ZERO_EXTEND:
214 *patternp++ = '^';
215 *patternp++ = 'Z';
216 encode_pattern_1 (XEXP (x, 0));
217 break;
218 case PLUS:
219 *patternp++ = '+';
220 encode_pattern_1 (XEXP (x, 0));
221 encode_pattern_1 (XEXP (x, 1));
222 break;
223 case PRE_DEC:
224 *patternp++ = '>';
225 encode_pattern_1 (XEXP (x, 0));
226 break;
227 case POST_INC:
228 *patternp++ = '<';
229 encode_pattern_1 (XEXP (x, 0));
230 break;
231 case LO_SUM:
232 *patternp++ = 'L';
233 encode_pattern_1 (XEXP (x, 0));
234 encode_pattern_1 (XEXP (x, 1));
235 break;
236 case HIGH:
237 *patternp++ = 'H';
238 encode_pattern_1 (XEXP (x, 0));
239 break;
240 case SYMBOL_REF:
241 *patternp++ = 's';
242 break;
243 case LABEL_REF:
244 *patternp++ = 'l';
245 break;
246 case CODE_LABEL:
247 *patternp++ = 'c';
248 break;
249 case CONST_INT:
250 case CONST_DOUBLE:
251 *patternp++ = 'i';
252 break;
253 case UNSPEC:
254 *patternp++ = 'u';
255 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
256 for (i = 0; i < XVECLEN (x, 0); i++)
257 encode_pattern_1 (XVECEXP (x, 0, i));
258 break;
259 case USE:
260 *patternp++ = 'U';
261 break;
262 case PARALLEL:
263 *patternp++ = '|';
264 for (i = 0; i < XVECLEN (x, 0); i++)
265 encode_pattern_1 (XVECEXP (x, 0, i));
266 break;
267 case EXPR_LIST:
268 *patternp++ = 'E';
269 encode_pattern_1 (XEXP (x, 0));
270 if (XEXP (x, 1))
271 encode_pattern_1 (XEXP (x, 1));
272 break;
273 default:
274 *patternp++ = '?';
275 #if DEBUG0
276 fprintf (stderr, "can't encode pattern %s\n",
277 GET_RTX_NAME (GET_CODE (x)));
278 debug_rtx (x);
279 gcc_unreachable ();
280 #endif
281 break;
282 }
283 }
284
285 static void
286 encode_pattern (rtx x)
287 {
288 patternp = pattern;
289 encode_pattern_1 (x);
290 *patternp = 0;
291 }
292
293 /* Since register names indicate the mode they're used in, we need a
294 way to determine which name to refer to the register with. Called
295 by print_operand(). */
296
297 static const char *
298 reg_name_with_mode (int regno, machine_mode mode)
299 {
300 int mlen = GET_MODE_SIZE (mode);
301 if (regno == R0_REGNO && mlen == 1)
302 return "r0l";
303 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
304 return "r2r0";
305 if (regno == R0_REGNO && mlen == 6)
306 return "r2r1r0";
307 if (regno == R0_REGNO && mlen == 8)
308 return "r3r1r2r0";
309 if (regno == R1_REGNO && mlen == 1)
310 return "r1l";
311 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
312 return "r3r1";
313 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
314 return "a1a0";
315 return reg_names[regno];
316 }
317
318 /* How many bytes a register uses on stack when it's pushed. We need
319 to know this because the push opcode needs to explicitly indicate
320 the size of the register, even though the name of the register
321 already tells it that. Used by m32c_output_reg_{push,pop}, which
322 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
323
324 static int
325 reg_push_size (int regno)
326 {
327 switch (regno)
328 {
329 case R0_REGNO:
330 case R1_REGNO:
331 return 2;
332 case R2_REGNO:
333 case R3_REGNO:
334 case FLG_REGNO:
335 return 2;
336 case A0_REGNO:
337 case A1_REGNO:
338 case SB_REGNO:
339 case FB_REGNO:
340 case SP_REGNO:
341 if (TARGET_A16)
342 return 2;
343 else
344 return 3;
345 default:
346 gcc_unreachable ();
347 }
348 }
349
350 /* Given two register classes, find the largest intersection between
351 them. If there is no intersection, return RETURNED_IF_EMPTY
352 instead. */
353 static reg_class_t
354 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
355 reg_class_t returned_if_empty)
356 {
357 HARD_REG_SET cc;
358 int i;
359 reg_class_t best = NO_REGS;
360 unsigned int best_size = 0;
361
362 if (original_class == limiting_class)
363 return original_class;
364
365 cc = reg_class_contents[original_class];
366 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
367
368 for (i = 0; i < LIM_REG_CLASSES; i++)
369 {
370 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
371 if (best_size < reg_class_size[i])
372 {
373 best = (reg_class_t) i;
374 best_size = reg_class_size[i];
375 }
376
377 }
378 if (best == NO_REGS)
379 return returned_if_empty;
380 return best;
381 }
382
383 /* Used by m32c_register_move_cost to determine if a move is
384 impossibly expensive. */
385 static bool
386 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
387 {
388 /* Cache the results: 0=untested 1=no 2=yes */
389 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
390
391 if (results[(int) rclass][mode] == 0)
392 {
393 int r;
394 results[rclass][mode] = 1;
395 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
396 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
397 && HARD_REGNO_MODE_OK (r, mode))
398 {
399 results[rclass][mode] = 2;
400 break;
401 }
402 }
403
404 #if DEBUG0
405 fprintf (stderr, "class %s can hold %s? %s\n",
406 class_names[(int) rclass], mode_name[mode],
407 (results[rclass][mode] == 2) ? "yes" : "no");
408 #endif
409 return results[(int) rclass][mode] == 2;
410 }
411
412 /* Run-time Target Specification. */
413
414 /* Memregs are memory locations that gcc treats like general
415 registers, as there are a limited number of true registers and the
416 m32c families can use memory in most places that registers can be
417 used.
418
419 However, since memory accesses are more expensive than registers,
420 we allow the user to limit the number of memregs available, in
421 order to try to persuade gcc to try harder to use real registers.
422
423 Memregs are provided by lib1funcs.S.
424 */
425
426 int ok_to_change_target_memregs = TRUE;
427
428 /* Implements TARGET_OPTION_OVERRIDE. */
429
430 #undef TARGET_OPTION_OVERRIDE
431 #define TARGET_OPTION_OVERRIDE m32c_option_override
432
433 static void
434 m32c_option_override (void)
435 {
436 /* We limit memregs to 0..16, and provide a default. */
437 if (global_options_set.x_target_memregs)
438 {
439 if (target_memregs < 0 || target_memregs > 16)
440 error ("invalid target memregs value '%d'", target_memregs);
441 }
442 else
443 target_memregs = 16;
444
445 if (TARGET_A24)
446 flag_ivopts = 0;
447
448 /* This target defaults to strict volatile bitfields. */
449 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
450 flag_strict_volatile_bitfields = 1;
451
452 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
453 This is always worse than an absolute call. */
454 if (TARGET_A16)
455 flag_no_function_cse = 1;
456
457 /* This wants to put insns between compares and their jumps. */
458 /* FIXME: The right solution is to properly trace the flags register
459 values, but that is too much work for stage 4. */
460 flag_combine_stack_adjustments = 0;
461 }
462
463 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
464 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
465
466 static void
467 m32c_override_options_after_change (void)
468 {
469 if (TARGET_A16)
470 flag_no_function_cse = 1;
471 }
472
473 /* Defining data structures for per-function information */
474
475 /* The usual; we set up our machine_function data. */
476 static struct machine_function *
477 m32c_init_machine_status (void)
478 {
479 return ggc_cleared_alloc<machine_function> ();
480 }
481
482 /* Implements INIT_EXPANDERS. We just set up to call the above
483 function. */
484 void
485 m32c_init_expanders (void)
486 {
487 init_machine_status = m32c_init_machine_status;
488 }
489
490 /* Storage Layout */
491
492 /* Register Basics */
493
494 /* Basic Characteristics of Registers */
495
496 /* Whether a mode fits in a register is complex enough to warrant a
497 table. */
498 static struct
499 {
500 char qi_regs;
501 char hi_regs;
502 char pi_regs;
503 char si_regs;
504 char di_regs;
505 } nregs_table[FIRST_PSEUDO_REGISTER] =
506 {
507 { 1, 1, 2, 2, 4 }, /* r0 */
508 { 0, 1, 0, 0, 0 }, /* r2 */
509 { 1, 1, 2, 2, 0 }, /* r1 */
510 { 0, 1, 0, 0, 0 }, /* r3 */
511 { 0, 1, 1, 0, 0 }, /* a0 */
512 { 0, 1, 1, 0, 0 }, /* a1 */
513 { 0, 1, 1, 0, 0 }, /* sb */
514 { 0, 1, 1, 0, 0 }, /* fb */
515 { 0, 1, 1, 0, 0 }, /* sp */
516 { 1, 1, 1, 0, 0 }, /* pc */
517 { 0, 0, 0, 0, 0 }, /* fl */
518 { 1, 1, 1, 0, 0 }, /* ap */
519 { 1, 1, 2, 2, 4 }, /* mem0 */
520 { 1, 1, 2, 2, 4 }, /* mem1 */
521 { 1, 1, 2, 2, 4 }, /* mem2 */
522 { 1, 1, 2, 2, 4 }, /* mem3 */
523 { 1, 1, 2, 2, 4 }, /* mem4 */
524 { 1, 1, 2, 2, 0 }, /* mem5 */
525 { 1, 1, 2, 2, 0 }, /* mem6 */
526 { 1, 1, 0, 0, 0 }, /* mem7 */
527 };
528
529 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
530 of available memregs, and select which registers need to be preserved
531 across calls based on the chip family. */
532
533 #undef TARGET_CONDITIONAL_REGISTER_USAGE
534 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
535 void
536 m32c_conditional_register_usage (void)
537 {
538 int i;
539
540 if (0 <= target_memregs && target_memregs <= 16)
541 {
542 /* The command line option is bytes, but our "registers" are
543 16-bit words. */
544 for (i = (target_memregs+1)/2; i < 8; i++)
545 {
546 fixed_regs[MEM0_REGNO + i] = 1;
547 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
548 }
549 }
550
551 /* M32CM and M32C preserve more registers across function calls. */
552 if (TARGET_A24)
553 {
554 call_used_regs[R1_REGNO] = 0;
555 call_used_regs[R2_REGNO] = 0;
556 call_used_regs[R3_REGNO] = 0;
557 call_used_regs[A0_REGNO] = 0;
558 call_used_regs[A1_REGNO] = 0;
559 }
560 }
561
562 /* How Values Fit in Registers */
563
564 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
565 different registers are different sizes from each other, *and* may
566 be different sizes in different chip families. */
567 static int
568 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
569 {
570 if (regno == FLG_REGNO && mode == CCmode)
571 return 1;
572 if (regno >= FIRST_PSEUDO_REGISTER)
573 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
574
575 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
576 return (GET_MODE_SIZE (mode) + 1) / 2;
577
578 if (GET_MODE_SIZE (mode) <= 1)
579 return nregs_table[regno].qi_regs;
580 if (GET_MODE_SIZE (mode) <= 2)
581 return nregs_table[regno].hi_regs;
582 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
583 return 2;
584 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
585 return nregs_table[regno].pi_regs;
586 if (GET_MODE_SIZE (mode) <= 4)
587 return nregs_table[regno].si_regs;
588 if (GET_MODE_SIZE (mode) <= 8)
589 return nregs_table[regno].di_regs;
590 return 0;
591 }
592
593 int
594 m32c_hard_regno_nregs (int regno, machine_mode mode)
595 {
596 int rv = m32c_hard_regno_nregs_1 (regno, mode);
597 return rv ? rv : 1;
598 }
599
600 /* Implements HARD_REGNO_MODE_OK. The above function does the work
601 already; just test its return value. */
602 int
603 m32c_hard_regno_ok (int regno, machine_mode mode)
604 {
605 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
606 }
607
608 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
609 registers are all different sizes. However, since most modes are
610 bigger than our registers anyway, it's easier to implement this
611 function that way, leaving QImode as the only unique case. */
612 int
613 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
614 {
615 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
616 return 1;
617
618 #if 0
619 if (m1 == QImode || m2 == QImode)
620 return 0;
621 #endif
622
623 return 1;
624 }
625
626 /* Register Classes */
627
628 /* Implements REGNO_REG_CLASS. */
629 enum reg_class
630 m32c_regno_reg_class (int regno)
631 {
632 switch (regno)
633 {
634 case R0_REGNO:
635 return R0_REGS;
636 case R1_REGNO:
637 return R1_REGS;
638 case R2_REGNO:
639 return R2_REGS;
640 case R3_REGNO:
641 return R3_REGS;
642 case A0_REGNO:
643 return A0_REGS;
644 case A1_REGNO:
645 return A1_REGS;
646 case SB_REGNO:
647 return SB_REGS;
648 case FB_REGNO:
649 return FB_REGS;
650 case SP_REGNO:
651 return SP_REGS;
652 case FLG_REGNO:
653 return FLG_REGS;
654 default:
655 if (IS_MEM_REGNO (regno))
656 return MEM_REGS;
657 return ALL_REGS;
658 }
659 }
660
661 /* Implements REGNO_OK_FOR_BASE_P. */
662 int
663 m32c_regno_ok_for_base_p (int regno)
664 {
665 if (regno == A0_REGNO
666 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
667 return 1;
668 return 0;
669 }
670
671 #define DEBUG_RELOAD 0
672
673 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
674 registers of the appropriate size. */
675
676 #undef TARGET_PREFERRED_RELOAD_CLASS
677 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
678
679 static reg_class_t
680 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
681 {
682 reg_class_t newclass = rclass;
683
684 #if DEBUG_RELOAD
685 fprintf (stderr, "\npreferred_reload_class for %s is ",
686 class_names[rclass]);
687 #endif
688 if (rclass == NO_REGS)
689 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
690
691 if (reg_classes_intersect_p (rclass, CR_REGS))
692 {
693 switch (GET_MODE (x))
694 {
695 case QImode:
696 newclass = HL_REGS;
697 break;
698 default:
699 /* newclass = HI_REGS; */
700 break;
701 }
702 }
703
704 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
705 newclass = SI_REGS;
706 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
707 && ! reg_class_subset_p (R03_REGS, rclass))
708 newclass = DI_REGS;
709
710 rclass = reduce_class (rclass, newclass, rclass);
711
712 if (GET_MODE (x) == QImode)
713 rclass = reduce_class (rclass, HL_REGS, rclass);
714
715 #if DEBUG_RELOAD
716 fprintf (stderr, "%s\n", class_names[rclass]);
717 debug_rtx (x);
718
719 if (GET_CODE (x) == MEM
720 && GET_CODE (XEXP (x, 0)) == PLUS
721 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
722 fprintf (stderr, "Glorm!\n");
723 #endif
724 return rclass;
725 }
726
727 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
728
729 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
730 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
731
732 static reg_class_t
733 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
734 {
735 return m32c_preferred_reload_class (x, rclass);
736 }
737
738 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
739 address registers for reloads since they're needed for address
740 reloads. */
741 int
742 m32c_limit_reload_class (machine_mode mode, int rclass)
743 {
744 #if DEBUG_RELOAD
745 fprintf (stderr, "limit_reload_class for %s: %s ->",
746 mode_name[mode], class_names[rclass]);
747 #endif
748
749 if (mode == QImode)
750 rclass = reduce_class (rclass, HL_REGS, rclass);
751 else if (mode == HImode)
752 rclass = reduce_class (rclass, HI_REGS, rclass);
753 else if (mode == SImode)
754 rclass = reduce_class (rclass, SI_REGS, rclass);
755
756 if (rclass != A_REGS)
757 rclass = reduce_class (rclass, DI_REGS, rclass);
758
759 #if DEBUG_RELOAD
760 fprintf (stderr, " %s\n", class_names[rclass]);
761 #endif
762 return rclass;
763 }
764
765 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
766 r0 or r1, as those are the only real QImode registers. CR regs get
767 reloaded through appropriately sized general or address
768 registers. */
769 int
770 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
771 {
772 int cc = class_contents[rclass][0];
773 #if DEBUG0
774 fprintf (stderr, "\nsecondary reload class %s %s\n",
775 class_names[rclass], mode_name[mode]);
776 debug_rtx (x);
777 #endif
778 if (mode == QImode
779 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
780 return QI_REGS;
781 if (reg_classes_intersect_p (rclass, CR_REGS)
782 && GET_CODE (x) == REG
783 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
784 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
785 return NO_REGS;
786 }
787
788 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
789 reloads. */
790
791 #undef TARGET_CLASS_LIKELY_SPILLED_P
792 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
793
794 static bool
795 m32c_class_likely_spilled_p (reg_class_t regclass)
796 {
797 if (regclass == A_REGS)
798 return true;
799
800 return (reg_class_size[(int) regclass] == 1);
801 }
802
803 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
804 documented meaning, to avoid potential inconsistencies with actual
805 class definitions. */
806
807 #undef TARGET_CLASS_MAX_NREGS
808 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
809
810 static unsigned char
811 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
812 {
813 int rn;
814 unsigned char max = 0;
815
816 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
817 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
818 {
819 unsigned char n = m32c_hard_regno_nregs (rn, mode);
820 if (max < n)
821 max = n;
822 }
823 return max;
824 }
825
826 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
827 QI (r0l, r1l) because the chip doesn't support QI ops on other
828 registers (well, it does on a0/a1 but if we let gcc do that, reload
829 suffers). Otherwise, we allow changes to larger modes. */
830 int
831 m32c_cannot_change_mode_class (machine_mode from,
832 machine_mode to, int rclass)
833 {
834 int rn;
835 #if DEBUG0
836 fprintf (stderr, "cannot change from %s to %s in %s\n",
837 mode_name[from], mode_name[to], class_names[rclass]);
838 #endif
839
840 /* If the larger mode isn't allowed in any of these registers, we
841 can't allow the change. */
842 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
843 if (class_contents[rclass][0] & (1 << rn))
844 if (! m32c_hard_regno_ok (rn, to))
845 return 1;
846
847 if (to == QImode)
848 return (class_contents[rclass][0] & 0x1ffa);
849
850 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
851 && GET_MODE_SIZE (from) > 1)
852 return 0;
853 if (GET_MODE_SIZE (from) > 2) /* all other regs */
854 return 0;
855
856 return 1;
857 }
858
859 /* Helpers for the rest of the file. */
860 /* TRUE if the rtx is a REG rtx for the given register. */
861 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
862 && REGNO (rtx) == regno)
863 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
864 base register in address calculations (hence the "strict"
865 argument). */
866 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
867 && (REGNO (rtx) == AP_REGNO \
868 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
869
870 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
871
872 /* Implements matching for constraints (see next function too). 'S' is
873 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
874 call return values. */
875 bool
876 m32c_matches_constraint_p (rtx value, int constraint)
877 {
878 encode_pattern (value);
879
880 switch (constraint) {
881 case CONSTRAINT_SF:
882 return (far_addr_space_p (value)
883 && ((RTX_IS ("mr")
884 && A0_OR_PSEUDO (patternr[1])
885 && GET_MODE (patternr[1]) == SImode)
886 || (RTX_IS ("m+^Sri")
887 && A0_OR_PSEUDO (patternr[4])
888 && GET_MODE (patternr[4]) == HImode)
889 || (RTX_IS ("m+^Srs")
890 && A0_OR_PSEUDO (patternr[4])
891 && GET_MODE (patternr[4]) == HImode)
892 || (RTX_IS ("m+^S+ris")
893 && A0_OR_PSEUDO (patternr[5])
894 && GET_MODE (patternr[5]) == HImode)
895 || RTX_IS ("ms")));
896 case CONSTRAINT_Sd:
897 {
898 /* This is the common "src/dest" address */
899 rtx r;
900 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
901 return true;
902 if (RTX_IS ("ms") || RTX_IS ("m+si"))
903 return true;
904 if (RTX_IS ("m++rii"))
905 {
906 if (REGNO (patternr[3]) == FB_REGNO
907 && INTVAL (patternr[4]) == 0)
908 return true;
909 }
910 if (RTX_IS ("mr"))
911 r = patternr[1];
912 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
913 r = patternr[2];
914 else
915 return false;
916 if (REGNO (r) == SP_REGNO)
917 return false;
918 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
919 }
920 case CONSTRAINT_Sa:
921 {
922 rtx r;
923 if (RTX_IS ("mr"))
924 r = patternr[1];
925 else if (RTX_IS ("m+ri"))
926 r = patternr[2];
927 else
928 return false;
929 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
930 }
931 case CONSTRAINT_Si:
932 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
933 case CONSTRAINT_Ss:
934 return ((RTX_IS ("mr")
935 && (IS_REG (patternr[1], SP_REGNO)))
936 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
937 case CONSTRAINT_Sf:
938 return ((RTX_IS ("mr")
939 && (IS_REG (patternr[1], FB_REGNO)))
940 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
941 case CONSTRAINT_Sb:
942 return ((RTX_IS ("mr")
943 && (IS_REG (patternr[1], SB_REGNO)))
944 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
945 case CONSTRAINT_Sp:
946 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
947 return (RTX_IS ("mi")
948 && !(INTVAL (patternr[1]) & ~0x1fff));
949 case CONSTRAINT_S1:
950 return r1h_operand (value, QImode);
951 case CONSTRAINT_Rpa:
952 return GET_CODE (value) == PARALLEL;
953 default:
954 return false;
955 }
956 }
957
958 /* STACK AND CALLING */
959
960 /* Frame Layout */
961
962 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
963 (yes, THREE bytes) onto the stack for the return address, but we
964 don't support pointers bigger than 16 bits on those chips. This
965 will likely wreak havoc with exception unwinding. FIXME. */
966 rtx
967 m32c_return_addr_rtx (int count)
968 {
969 machine_mode mode;
970 int offset;
971 rtx ra_mem;
972
973 if (count)
974 return NULL_RTX;
975 /* we want 2[$fb] */
976
977 if (TARGET_A24)
978 {
979 /* It's four bytes */
980 mode = PSImode;
981 offset = 4;
982 }
983 else
984 {
985 /* FIXME: it's really 3 bytes */
986 mode = HImode;
987 offset = 2;
988 }
989
990 ra_mem =
991 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
992 offset));
993 return copy_to_mode_reg (mode, ra_mem);
994 }
995
996 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
997 rtx
998 m32c_incoming_return_addr_rtx (void)
999 {
1000 /* we want [sp] */
1001 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1002 }
1003
1004 /* Exception Handling Support */
1005
1006 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1007 pointers. */
1008 int
1009 m32c_eh_return_data_regno (int n)
1010 {
1011 switch (n)
1012 {
1013 case 0:
1014 return MEM0_REGNO;
1015 case 1:
1016 return MEM0_REGNO+4;
1017 default:
1018 return INVALID_REGNUM;
1019 }
1020 }
1021
1022 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1023 m32c_emit_eh_epilogue. */
1024 rtx
1025 m32c_eh_return_stackadj_rtx (void)
1026 {
1027 if (!cfun->machine->eh_stack_adjust)
1028 {
1029 rtx sa;
1030
1031 sa = gen_rtx_REG (Pmode, R0_REGNO);
1032 cfun->machine->eh_stack_adjust = sa;
1033 }
1034 return cfun->machine->eh_stack_adjust;
1035 }
1036
1037 /* Registers That Address the Stack Frame */
1038
1039 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1040 the original spec called for dwarf numbers to vary with register
1041 width as well, for example, r0l, r0, and r2r0 would each have
1042 different dwarf numbers. GCC doesn't support this, and we don't do
1043 it, and gdb seems to like it this way anyway. */
1044 unsigned int
1045 m32c_dwarf_frame_regnum (int n)
1046 {
1047 switch (n)
1048 {
1049 case R0_REGNO:
1050 return 5;
1051 case R1_REGNO:
1052 return 6;
1053 case R2_REGNO:
1054 return 7;
1055 case R3_REGNO:
1056 return 8;
1057 case A0_REGNO:
1058 return 9;
1059 case A1_REGNO:
1060 return 10;
1061 case FB_REGNO:
1062 return 11;
1063 case SB_REGNO:
1064 return 19;
1065
1066 case SP_REGNO:
1067 return 12;
1068 case PC_REGNO:
1069 return 13;
1070 default:
1071 return DWARF_FRAME_REGISTERS + 1;
1072 }
1073 }
1074
1075 /* The frame looks like this:
1076
1077 ap -> +------------------------------
1078 | Return address (3 or 4 bytes)
1079 | Saved FB (2 or 4 bytes)
1080 fb -> +------------------------------
1081 | local vars
1082 | register saves fb
1083 | through r0 as needed
1084 sp -> +------------------------------
1085 */
1086
1087 /* We use this to wrap all emitted insns in the prologue. */
1088 static rtx
1089 F (rtx x)
1090 {
1091 RTX_FRAME_RELATED_P (x) = 1;
1092 return x;
1093 }
1094
1095 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1096 how much the stack pointer moves for each, for each cpu family. */
1097 static struct
1098 {
1099 int reg1;
1100 int bit;
1101 int a16_bytes;
1102 int a24_bytes;
1103 } pushm_info[] =
1104 {
1105 /* These are in reverse push (nearest-to-sp) order. */
1106 { R0_REGNO, 0x80, 2, 2 },
1107 { R1_REGNO, 0x40, 2, 2 },
1108 { R2_REGNO, 0x20, 2, 2 },
1109 { R3_REGNO, 0x10, 2, 2 },
1110 { A0_REGNO, 0x08, 2, 4 },
1111 { A1_REGNO, 0x04, 2, 4 },
1112 { SB_REGNO, 0x02, 2, 4 },
1113 { FB_REGNO, 0x01, 2, 4 }
1114 };
1115
1116 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1117
1118 /* Returns TRUE if we need to save/restore the given register. We
1119 save everything for exception handlers, so that any register can be
1120 unwound. For interrupt handlers, we save everything if the handler
1121 calls something else (because we don't know what *that* function
1122 might do), but try to be a bit smarter if the handler is a leaf
1123 function. We always save $a0, though, because we use that in the
1124 epilogue to copy $fb to $sp. */
1125 static int
1126 need_to_save (int regno)
1127 {
1128 if (fixed_regs[regno])
1129 return 0;
1130 if (crtl->calls_eh_return)
1131 return 1;
1132 if (regno == FP_REGNO)
1133 return 0;
1134 if (cfun->machine->is_interrupt
1135 && (!cfun->machine->is_leaf
1136 || (regno == A0_REGNO
1137 && m32c_function_needs_enter ())
1138 ))
1139 return 1;
1140 if (df_regs_ever_live_p (regno)
1141 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1142 return 1;
1143 return 0;
1144 }
1145
1146 /* This function contains all the intelligence about saving and
1147 restoring registers. It always figures out the register save set.
1148 When called with PP_justcount, it merely returns the size of the
1149 save set (for eliminating the frame pointer, for example). When
1150 called with PP_pushm or PP_popm, it emits the appropriate
1151 instructions for saving (pushm) or restoring (popm) the
1152 registers. */
1153 static int
1154 m32c_pushm_popm (Push_Pop_Type ppt)
1155 {
1156 int reg_mask = 0;
1157 int byte_count = 0, bytes;
1158 int i;
1159 rtx dwarf_set[PUSHM_N];
1160 int n_dwarfs = 0;
1161 int nosave_mask = 0;
1162
1163 if (crtl->return_rtx
1164 && GET_CODE (crtl->return_rtx) == PARALLEL
1165 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1166 {
1167 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1168 rtx rv = XEXP (exp, 0);
1169 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1170
1171 if (rv_bytes > 2)
1172 nosave_mask |= 0x20; /* PSI, SI */
1173 else
1174 nosave_mask |= 0xf0; /* DF */
1175 if (rv_bytes > 4)
1176 nosave_mask |= 0x50; /* DI */
1177 }
1178
1179 for (i = 0; i < (int) PUSHM_N; i++)
1180 {
1181 /* Skip if neither register needs saving. */
1182 if (!need_to_save (pushm_info[i].reg1))
1183 continue;
1184
1185 if (pushm_info[i].bit & nosave_mask)
1186 continue;
1187
1188 reg_mask |= pushm_info[i].bit;
1189 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1190
1191 if (ppt == PP_pushm)
1192 {
1193 machine_mode mode = (bytes == 2) ? HImode : SImode;
1194 rtx addr;
1195
1196 /* Always use stack_pointer_rtx instead of calling
1197 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1198 that there is a single rtx representing the stack pointer,
1199 namely stack_pointer_rtx, and uses == to recognize it. */
1200 addr = stack_pointer_rtx;
1201
1202 if (byte_count != 0)
1203 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1204
1205 dwarf_set[n_dwarfs++] =
1206 gen_rtx_SET (VOIDmode,
1207 gen_rtx_MEM (mode, addr),
1208 gen_rtx_REG (mode, pushm_info[i].reg1));
1209 F (dwarf_set[n_dwarfs - 1]);
1210
1211 }
1212 byte_count += bytes;
1213 }
1214
1215 if (cfun->machine->is_interrupt)
1216 {
1217 cfun->machine->intr_pushm = reg_mask & 0xfe;
1218 reg_mask = 0;
1219 byte_count = 0;
1220 }
1221
1222 if (cfun->machine->is_interrupt)
1223 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1224 if (need_to_save (i))
1225 {
1226 byte_count += 2;
1227 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1228 }
1229
1230 if (ppt == PP_pushm && byte_count)
1231 {
1232 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1233 rtx pushm;
1234
1235 if (reg_mask)
1236 {
1237 XVECEXP (note, 0, 0)
1238 = gen_rtx_SET (VOIDmode,
1239 stack_pointer_rtx,
1240 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1241 stack_pointer_rtx,
1242 GEN_INT (-byte_count)));
1243 F (XVECEXP (note, 0, 0));
1244
1245 for (i = 0; i < n_dwarfs; i++)
1246 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1247
1248 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1249
1250 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1251 }
1252
1253 if (cfun->machine->is_interrupt)
1254 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1255 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1256 {
1257 if (TARGET_A16)
1258 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1259 else
1260 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1261 F (pushm);
1262 }
1263 }
1264 if (ppt == PP_popm && byte_count)
1265 {
1266 if (cfun->machine->is_interrupt)
1267 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1268 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1269 {
1270 if (TARGET_A16)
1271 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1272 else
1273 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1274 }
1275 if (reg_mask)
1276 emit_insn (gen_popm (GEN_INT (reg_mask)));
1277 }
1278
1279 return byte_count;
1280 }
1281
1282 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1283 diagrams our call frame. */
1284 int
1285 m32c_initial_elimination_offset (int from, int to)
1286 {
1287 int ofs = 0;
1288
1289 if (from == AP_REGNO)
1290 {
1291 if (TARGET_A16)
1292 ofs += 5;
1293 else
1294 ofs += 8;
1295 }
1296
1297 if (to == SP_REGNO)
1298 {
1299 ofs += m32c_pushm_popm (PP_justcount);
1300 ofs += get_frame_size ();
1301 }
1302
1303 /* Account for push rounding. */
1304 if (TARGET_A24)
1305 ofs = (ofs + 1) & ~1;
1306 #if DEBUG0
1307 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1308 to, ofs);
1309 #endif
1310 return ofs;
1311 }
1312
1313 /* Passing Function Arguments on the Stack */
1314
1315 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1316 M32C has word stacks. */
1317 unsigned int
1318 m32c_push_rounding (int n)
1319 {
1320 if (TARGET_R8C || TARGET_M16C)
1321 return n;
1322 return (n + 1) & ~1;
1323 }
1324
1325 /* Passing Arguments in Registers */
1326
1327 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1328 registers, partly on stack. If our function returns a struct, a
1329 pointer to a buffer for it is at the top of the stack (last thing
1330 pushed). The first few real arguments may be in registers as
1331 follows:
1332
1333 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1334 arg2 in r2 if it's HI (else pushed on stack)
1335 rest on stack
1336 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1337 rest on stack
1338
1339 Structs are not passed in registers, even if they fit. Only
1340 integer and pointer types are passed in registers.
1341
1342 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1343 r2 if it fits. */
1344 #undef TARGET_FUNCTION_ARG
1345 #define TARGET_FUNCTION_ARG m32c_function_arg
1346 static rtx
1347 m32c_function_arg (cumulative_args_t ca_v,
1348 machine_mode mode, const_tree type, bool named)
1349 {
1350 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1351
1352 /* Can return a reg, parallel, or 0 for stack */
1353 rtx rv = NULL_RTX;
1354 #if DEBUG0
1355 fprintf (stderr, "func_arg %d (%s, %d)\n",
1356 ca->parm_num, mode_name[mode], named);
1357 debug_tree (type);
1358 #endif
1359
1360 if (mode == VOIDmode)
1361 return GEN_INT (0);
1362
1363 if (ca->force_mem || !named)
1364 {
1365 #if DEBUG0
1366 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1367 named);
1368 #endif
1369 return NULL_RTX;
1370 }
1371
1372 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1373 return NULL_RTX;
1374
1375 if (type && AGGREGATE_TYPE_P (type))
1376 return NULL_RTX;
1377
1378 switch (ca->parm_num)
1379 {
1380 case 1:
1381 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1382 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1383 break;
1384
1385 case 2:
1386 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1387 rv = gen_rtx_REG (mode, R2_REGNO);
1388 break;
1389 }
1390
1391 #if DEBUG0
1392 debug_rtx (rv);
1393 #endif
1394 return rv;
1395 }
1396
1397 #undef TARGET_PASS_BY_REFERENCE
1398 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1399 static bool
1400 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1401 machine_mode mode ATTRIBUTE_UNUSED,
1402 const_tree type ATTRIBUTE_UNUSED,
1403 bool named ATTRIBUTE_UNUSED)
1404 {
1405 return 0;
1406 }
1407
1408 /* Implements INIT_CUMULATIVE_ARGS. */
1409 void
1410 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1411 tree fntype,
1412 rtx libname ATTRIBUTE_UNUSED,
1413 tree fndecl,
1414 int n_named_args ATTRIBUTE_UNUSED)
1415 {
1416 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1417 ca->force_mem = 1;
1418 else
1419 ca->force_mem = 0;
1420 ca->parm_num = 1;
1421 }
1422
1423 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1424 functions returning structures, so we always reset that. Otherwise,
1425 we only need to know the sequence number of the argument to know what
1426 to do with it. */
1427 #undef TARGET_FUNCTION_ARG_ADVANCE
1428 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1429 static void
1430 m32c_function_arg_advance (cumulative_args_t ca_v,
1431 machine_mode mode ATTRIBUTE_UNUSED,
1432 const_tree type ATTRIBUTE_UNUSED,
1433 bool named ATTRIBUTE_UNUSED)
1434 {
1435 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1436
1437 if (ca->force_mem)
1438 ca->force_mem = 0;
1439 else
1440 ca->parm_num++;
1441 }
1442
1443 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1444 #undef TARGET_FUNCTION_ARG_BOUNDARY
1445 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1446 static unsigned int
1447 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1448 const_tree type ATTRIBUTE_UNUSED)
1449 {
1450 return (TARGET_A16 ? 8 : 16);
1451 }
1452
1453 /* Implements FUNCTION_ARG_REGNO_P. */
1454 int
1455 m32c_function_arg_regno_p (int r)
1456 {
1457 if (TARGET_A24)
1458 return (r == R0_REGNO);
1459 return (r == R1_REGNO || r == R2_REGNO);
1460 }
1461
1462 /* HImode and PSImode are the two "native" modes as far as GCC is
1463 concerned, but the chips also support a 32-bit mode which is used
1464 for some opcodes in R8C/M16C and for reset vectors and such. */
1465 #undef TARGET_VALID_POINTER_MODE
1466 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1467 static bool
1468 m32c_valid_pointer_mode (machine_mode mode)
1469 {
1470 if (mode == HImode
1471 || mode == PSImode
1472 || mode == SImode
1473 )
1474 return 1;
1475 return 0;
1476 }
1477
1478 /* How Scalar Function Values Are Returned */
1479
1480 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1481 combination of registers starting there (r2r0 for longs, r3r1r2r0
1482 for long long, r3r2r1r0 for doubles), except that that ABI
1483 currently doesn't work because it ends up using all available
1484 general registers and gcc often can't compile it. So, instead, we
1485 return anything bigger than 16 bits in "mem0" (effectively, a
1486 memory location). */
1487
1488 #undef TARGET_LIBCALL_VALUE
1489 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1490
1491 static rtx
1492 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1493 {
1494 /* return reg or parallel */
1495 #if 0
1496 /* FIXME: GCC has difficulty returning large values in registers,
1497 because that ties up most of the general registers and gives the
1498 register allocator little to work with. Until we can resolve
1499 this, large values are returned in memory. */
1500 if (mode == DFmode)
1501 {
1502 rtx rv;
1503
1504 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1505 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1506 gen_rtx_REG (HImode,
1507 R0_REGNO),
1508 GEN_INT (0));
1509 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1510 gen_rtx_REG (HImode,
1511 R1_REGNO),
1512 GEN_INT (2));
1513 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1514 gen_rtx_REG (HImode,
1515 R2_REGNO),
1516 GEN_INT (4));
1517 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1518 gen_rtx_REG (HImode,
1519 R3_REGNO),
1520 GEN_INT (6));
1521 return rv;
1522 }
1523
1524 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1525 {
1526 rtx rv;
1527
1528 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1529 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1530 gen_rtx_REG (mode,
1531 R0_REGNO),
1532 GEN_INT (0));
1533 return rv;
1534 }
1535 #endif
1536
1537 if (GET_MODE_SIZE (mode) > 2)
1538 return gen_rtx_REG (mode, MEM0_REGNO);
1539 return gen_rtx_REG (mode, R0_REGNO);
1540 }
1541
1542 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1543 conventions. */
1544
1545 #undef TARGET_FUNCTION_VALUE
1546 #define TARGET_FUNCTION_VALUE m32c_function_value
1547
1548 static rtx
1549 m32c_function_value (const_tree valtype,
1550 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1551 bool outgoing ATTRIBUTE_UNUSED)
1552 {
1553 /* return reg or parallel */
1554 const machine_mode mode = TYPE_MODE (valtype);
1555 return m32c_libcall_value (mode, NULL_RTX);
1556 }
1557
1558 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1559
1560 #undef TARGET_FUNCTION_VALUE_REGNO_P
1561 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1562
1563 static bool
1564 m32c_function_value_regno_p (const unsigned int regno)
1565 {
1566 return (regno == R0_REGNO || regno == MEM0_REGNO);
1567 }
1568
1569 /* How Large Values Are Returned */
1570
1571 /* We return structures by pushing the address on the stack, even if
1572 we use registers for the first few "real" arguments. */
1573 #undef TARGET_STRUCT_VALUE_RTX
1574 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1575 static rtx
1576 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1577 int incoming ATTRIBUTE_UNUSED)
1578 {
1579 return 0;
1580 }
1581
1582 /* Function Entry and Exit */
1583
1584 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1585 int
1586 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1587 {
1588 if (cfun->machine->is_interrupt)
1589 return 1;
1590 return 0;
1591 }
1592
1593 /* Implementing the Varargs Macros */
1594
1595 #undef TARGET_STRICT_ARGUMENT_NAMING
1596 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1597 static bool
1598 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1599 {
1600 return 1;
1601 }
1602
1603 /* Trampolines for Nested Functions */
1604
1605 /*
1606 m16c:
1607 1 0000 75C43412 mov.w #0x1234,a0
1608 2 0004 FC000000 jmp.a label
1609
1610 m32c:
1611 1 0000 BC563412 mov.l:s #0x123456,a0
1612 2 0004 CC000000 jmp.a label
1613 */
1614
1615 /* Implements TRAMPOLINE_SIZE. */
1616 int
1617 m32c_trampoline_size (void)
1618 {
1619 /* Allocate extra space so we can avoid the messy shifts when we
1620 initialize the trampoline; we just write past the end of the
1621 opcode. */
1622 return TARGET_A16 ? 8 : 10;
1623 }
1624
1625 /* Implements TRAMPOLINE_ALIGNMENT. */
1626 int
1627 m32c_trampoline_alignment (void)
1628 {
1629 return 2;
1630 }
1631
1632 /* Implements TARGET_TRAMPOLINE_INIT. */
1633
1634 #undef TARGET_TRAMPOLINE_INIT
1635 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1636 static void
1637 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1638 {
1639 rtx function = XEXP (DECL_RTL (fndecl), 0);
1640
1641 #define A0(m,i) adjust_address (m_tramp, m, i)
1642 if (TARGET_A16)
1643 {
1644 /* Note: we subtract a "word" because the moves want signed
1645 constants, not unsigned constants. */
1646 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1647 emit_move_insn (A0 (HImode, 2), chainval);
1648 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1649 /* We use 16-bit addresses here, but store the zero to turn it
1650 into a 24-bit offset. */
1651 emit_move_insn (A0 (HImode, 5), function);
1652 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1653 }
1654 else
1655 {
1656 /* Note that the PSI moves actually write 4 bytes. Make sure we
1657 write stuff out in the right order, and leave room for the
1658 extra byte at the end. */
1659 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1660 emit_move_insn (A0 (PSImode, 1), chainval);
1661 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1662 emit_move_insn (A0 (PSImode, 5), function);
1663 }
1664 #undef A0
1665 }
1666
1667 /* Addressing Modes */
1668
1669 /* The r8c/m32c family supports a wide range of non-orthogonal
1670 addressing modes, including the ability to double-indirect on *some*
1671 of them. Not all insns support all modes, either, but we rely on
1672 predicates and constraints to deal with that. */
1673 #undef TARGET_LEGITIMATE_ADDRESS_P
1674 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1675 bool
1676 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1677 {
1678 int mode_adjust;
1679 if (CONSTANT_P (x))
1680 return 1;
1681
1682 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1683 return 0;
1684 if (TARGET_A24 && GET_MODE (x) != PSImode)
1685 return 0;
1686
1687 /* Wide references to memory will be split after reload, so we must
1688 ensure that all parts of such splits remain legitimate
1689 addresses. */
1690 mode_adjust = GET_MODE_SIZE (mode) - 1;
1691
1692 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1693 if (GET_CODE (x) == PRE_DEC
1694 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1695 {
1696 return (GET_CODE (XEXP (x, 0)) == REG
1697 && REGNO (XEXP (x, 0)) == SP_REGNO);
1698 }
1699
1700 #if 0
1701 /* This is the double indirection detection, but it currently
1702 doesn't work as cleanly as this code implies, so until we've had
1703 a chance to debug it, leave it disabled. */
1704 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1705 {
1706 #if DEBUG_DOUBLE
1707 fprintf (stderr, "double indirect\n");
1708 #endif
1709 x = XEXP (x, 0);
1710 }
1711 #endif
1712
1713 encode_pattern (x);
1714 if (RTX_IS ("r"))
1715 {
1716 /* Most indexable registers can be used without displacements,
1717 although some of them will be emitted with an explicit zero
1718 to please the assembler. */
1719 switch (REGNO (patternr[0]))
1720 {
1721 case A1_REGNO:
1722 case SB_REGNO:
1723 case FB_REGNO:
1724 case SP_REGNO:
1725 if (TARGET_A16 && GET_MODE (x) == SImode)
1726 return 0;
1727 case A0_REGNO:
1728 return 1;
1729
1730 default:
1731 if (IS_PSEUDO (patternr[0], strict))
1732 return 1;
1733 return 0;
1734 }
1735 }
1736
1737 if (TARGET_A16 && GET_MODE (x) == SImode)
1738 return 0;
1739
1740 if (RTX_IS ("+ri"))
1741 {
1742 /* This is more interesting, because different base registers
1743 allow for different displacements - both range and signedness
1744 - and it differs from chip series to chip series too. */
1745 int rn = REGNO (patternr[1]);
1746 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1747 switch (rn)
1748 {
1749 case A0_REGNO:
1750 case A1_REGNO:
1751 case SB_REGNO:
1752 /* The syntax only allows positive offsets, but when the
1753 offsets span the entire memory range, we can simulate
1754 negative offsets by wrapping. */
1755 if (TARGET_A16)
1756 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1757 if (rn == SB_REGNO)
1758 return (offs >= 0 && offs <= 65535 - mode_adjust);
1759 /* A0 or A1 */
1760 return (offs >= -16777216 && offs <= 16777215);
1761
1762 case FB_REGNO:
1763 if (TARGET_A16)
1764 return (offs >= -128 && offs <= 127 - mode_adjust);
1765 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1766
1767 case SP_REGNO:
1768 return (offs >= -128 && offs <= 127 - mode_adjust);
1769
1770 default:
1771 if (IS_PSEUDO (patternr[1], strict))
1772 return 1;
1773 return 0;
1774 }
1775 }
1776 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1777 {
1778 rtx reg = patternr[1];
1779
1780 /* We don't know where the symbol is, so only allow base
1781 registers which support displacements spanning the whole
1782 address range. */
1783 switch (REGNO (reg))
1784 {
1785 case A0_REGNO:
1786 case A1_REGNO:
1787 /* $sb needs a secondary reload, but since it's involved in
1788 memory address reloads too, we don't deal with it very
1789 well. */
1790 /* case SB_REGNO: */
1791 return 1;
1792 default:
1793 if (GET_CODE (reg) == SUBREG)
1794 return 0;
1795 if (IS_PSEUDO (reg, strict))
1796 return 1;
1797 return 0;
1798 }
1799 }
1800 return 0;
1801 }
1802
1803 /* Implements REG_OK_FOR_BASE_P. */
1804 int
1805 m32c_reg_ok_for_base_p (rtx x, int strict)
1806 {
1807 if (GET_CODE (x) != REG)
1808 return 0;
1809 switch (REGNO (x))
1810 {
1811 case A0_REGNO:
1812 case A1_REGNO:
1813 case SB_REGNO:
1814 case FB_REGNO:
1815 case SP_REGNO:
1816 return 1;
1817 default:
1818 if (IS_PSEUDO (x, strict))
1819 return 1;
1820 return 0;
1821 }
1822 }
1823
1824 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1825 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1826 like this:
1827 EB 4B FF mova -128[$fb],$a0
1828 D8 0C FF FF mov.w:Q #0,-1[$a0]
1829
1830 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1831 displacements:
1832 7B F4 stc $fb,$a0
1833 77 54 00 01 sub #256,$a0
1834 D8 08 01 mov.w:Q #0,1[$a0]
1835
1836 If we don't offset (i.e. offset by zero), we end up with:
1837 7B F4 stc $fb,$a0
1838 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1839
1840 We have to subtract *something* so that we have a PLUS rtx to mark
1841 that we've done this reload. The -128 offset will never result in
1842 an 8-bit aN offset, and the payoff for the second case is five
1843 loads *if* those loads are within 256 bytes of the other end of the
1844 frame, so the third case seems best. Note that we subtract the
1845 zero, but detect that in the addhi3 pattern. */
1846
1847 #define BIG_FB_ADJ 0
1848
1849 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1850 worry about is frame base offsets, as $fb has a limited
1851 displacement range. We deal with this by attempting to reload $fb
1852 itself into an address register; that seems to result in the best
1853 code. */
1854 #undef TARGET_LEGITIMIZE_ADDRESS
1855 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1856 static rtx
1857 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1858 machine_mode mode)
1859 {
1860 #if DEBUG0
1861 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1862 debug_rtx (x);
1863 fprintf (stderr, "\n");
1864 #endif
1865
1866 if (GET_CODE (x) == PLUS
1867 && GET_CODE (XEXP (x, 0)) == REG
1868 && REGNO (XEXP (x, 0)) == FB_REGNO
1869 && GET_CODE (XEXP (x, 1)) == CONST_INT
1870 && (INTVAL (XEXP (x, 1)) < -128
1871 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1872 {
1873 /* reload FB to A_REGS */
1874 rtx temp = gen_reg_rtx (Pmode);
1875 x = copy_rtx (x);
1876 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1877 XEXP (x, 0) = temp;
1878 }
1879
1880 return x;
1881 }
1882
1883 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1884 int
1885 m32c_legitimize_reload_address (rtx * x,
1886 machine_mode mode,
1887 int opnum,
1888 int type, int ind_levels ATTRIBUTE_UNUSED)
1889 {
1890 #if DEBUG0
1891 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1892 mode_name[mode]);
1893 debug_rtx (*x);
1894 #endif
1895
1896 /* At one point, this function tried to get $fb copied to an address
1897 register, which in theory would maximize sharing, but gcc was
1898 *also* still trying to reload the whole address, and we'd run out
1899 of address registers. So we let gcc do the naive (but safe)
1900 reload instead, when the above function doesn't handle it for
1901 us.
1902
1903 The code below is a second attempt at the above. */
1904
1905 if (GET_CODE (*x) == PLUS
1906 && GET_CODE (XEXP (*x, 0)) == REG
1907 && REGNO (XEXP (*x, 0)) == FB_REGNO
1908 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1909 && (INTVAL (XEXP (*x, 1)) < -128
1910 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1911 {
1912 rtx sum;
1913 int offset = INTVAL (XEXP (*x, 1));
1914 int adjustment = -BIG_FB_ADJ;
1915
1916 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1917 GEN_INT (adjustment));
1918 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1919 if (type == RELOAD_OTHER)
1920 type = RELOAD_FOR_OTHER_ADDRESS;
1921 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1922 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1923 (enum reload_type) type);
1924 return 1;
1925 }
1926
1927 if (GET_CODE (*x) == PLUS
1928 && GET_CODE (XEXP (*x, 0)) == PLUS
1929 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1930 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1931 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1932 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1933 )
1934 {
1935 if (type == RELOAD_OTHER)
1936 type = RELOAD_FOR_OTHER_ADDRESS;
1937 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1938 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1939 (enum reload_type) type);
1940 return 1;
1941 }
1942
1943 return 0;
1944 }
1945
1946 /* Return the appropriate mode for a named address pointer. */
1947 #undef TARGET_ADDR_SPACE_POINTER_MODE
1948 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1949 static machine_mode
1950 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1951 {
1952 switch (addrspace)
1953 {
1954 case ADDR_SPACE_GENERIC:
1955 return TARGET_A24 ? PSImode : HImode;
1956 case ADDR_SPACE_FAR:
1957 return SImode;
1958 default:
1959 gcc_unreachable ();
1960 }
1961 }
1962
1963 /* Return the appropriate mode for a named address address. */
1964 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1965 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1966 static machine_mode
1967 m32c_addr_space_address_mode (addr_space_t addrspace)
1968 {
1969 switch (addrspace)
1970 {
1971 case ADDR_SPACE_GENERIC:
1972 return TARGET_A24 ? PSImode : HImode;
1973 case ADDR_SPACE_FAR:
1974 return SImode;
1975 default:
1976 gcc_unreachable ();
1977 }
1978 }
1979
1980 /* Like m32c_legitimate_address_p, except with named addresses. */
1981 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1982 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1983 m32c_addr_space_legitimate_address_p
1984 static bool
1985 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1986 bool strict, addr_space_t as)
1987 {
1988 if (as == ADDR_SPACE_FAR)
1989 {
1990 if (TARGET_A24)
1991 return 0;
1992 encode_pattern (x);
1993 if (RTX_IS ("r"))
1994 {
1995 if (GET_MODE (x) != SImode)
1996 return 0;
1997 switch (REGNO (patternr[0]))
1998 {
1999 case A0_REGNO:
2000 return 1;
2001
2002 default:
2003 if (IS_PSEUDO (patternr[0], strict))
2004 return 1;
2005 return 0;
2006 }
2007 }
2008 if (RTX_IS ("+^Sri"))
2009 {
2010 int rn = REGNO (patternr[3]);
2011 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2012 if (GET_MODE (patternr[3]) != HImode)
2013 return 0;
2014 switch (rn)
2015 {
2016 case A0_REGNO:
2017 return (offs >= 0 && offs <= 0xfffff);
2018
2019 default:
2020 if (IS_PSEUDO (patternr[3], strict))
2021 return 1;
2022 return 0;
2023 }
2024 }
2025 if (RTX_IS ("+^Srs"))
2026 {
2027 int rn = REGNO (patternr[3]);
2028 if (GET_MODE (patternr[3]) != HImode)
2029 return 0;
2030 switch (rn)
2031 {
2032 case A0_REGNO:
2033 return 1;
2034
2035 default:
2036 if (IS_PSEUDO (patternr[3], strict))
2037 return 1;
2038 return 0;
2039 }
2040 }
2041 if (RTX_IS ("+^S+ris"))
2042 {
2043 int rn = REGNO (patternr[4]);
2044 if (GET_MODE (patternr[4]) != HImode)
2045 return 0;
2046 switch (rn)
2047 {
2048 case A0_REGNO:
2049 return 1;
2050
2051 default:
2052 if (IS_PSEUDO (patternr[4], strict))
2053 return 1;
2054 return 0;
2055 }
2056 }
2057 if (RTX_IS ("s"))
2058 {
2059 return 1;
2060 }
2061 return 0;
2062 }
2063
2064 else if (as != ADDR_SPACE_GENERIC)
2065 gcc_unreachable ();
2066
2067 return m32c_legitimate_address_p (mode, x, strict);
2068 }
2069
2070 /* Like m32c_legitimate_address, except with named address support. */
2071 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2072 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2073 static rtx
2074 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2075 addr_space_t as)
2076 {
2077 if (as != ADDR_SPACE_GENERIC)
2078 {
2079 #if DEBUG0
2080 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2081 debug_rtx (x);
2082 fprintf (stderr, "\n");
2083 #endif
2084
2085 if (GET_CODE (x) != REG)
2086 {
2087 x = force_reg (SImode, x);
2088 }
2089 return x;
2090 }
2091
2092 return m32c_legitimize_address (x, oldx, mode);
2093 }
2094
2095 /* Determine if one named address space is a subset of another. */
2096 #undef TARGET_ADDR_SPACE_SUBSET_P
2097 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2098 static bool
2099 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2100 {
2101 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2102 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2103
2104 if (subset == superset)
2105 return true;
2106
2107 else
2108 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2109 }
2110
2111 #undef TARGET_ADDR_SPACE_CONVERT
2112 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2113 /* Convert from one address space to another. */
2114 static rtx
2115 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2116 {
2117 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2118 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2119 rtx result;
2120
2121 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2122 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2123
2124 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2125 {
2126 /* This is unpredictable, as we're truncating off usable address
2127 bits. */
2128
2129 result = gen_reg_rtx (HImode);
2130 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2131 return result;
2132 }
2133 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2134 {
2135 /* This always works. */
2136 result = gen_reg_rtx (SImode);
2137 emit_insn (gen_zero_extendhisi2 (result, op));
2138 return result;
2139 }
2140 else
2141 gcc_unreachable ();
2142 }
2143
2144 /* Condition Code Status */
2145
2146 #undef TARGET_FIXED_CONDITION_CODE_REGS
2147 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2148 static bool
2149 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2150 {
2151 *p1 = FLG_REGNO;
2152 *p2 = INVALID_REGNUM;
2153 return true;
2154 }
2155
2156 /* Describing Relative Costs of Operations */
2157
2158 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2159 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2160 no opcodes to do that). We also discourage use of mem* registers
2161 since they're really memory. */
2162
2163 #undef TARGET_REGISTER_MOVE_COST
2164 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2165
2166 static int
2167 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2168 reg_class_t to)
2169 {
2170 int cost = COSTS_N_INSNS (3);
2171 HARD_REG_SET cc;
2172
2173 /* FIXME: pick real values, but not 2 for now. */
2174 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2175 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2176
2177 if (mode == QImode
2178 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2179 {
2180 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2181 cost = COSTS_N_INSNS (1000);
2182 else
2183 cost = COSTS_N_INSNS (80);
2184 }
2185
2186 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2187 cost = COSTS_N_INSNS (1000);
2188
2189 if (reg_classes_intersect_p (from, CR_REGS))
2190 cost += COSTS_N_INSNS (5);
2191
2192 if (reg_classes_intersect_p (to, CR_REGS))
2193 cost += COSTS_N_INSNS (5);
2194
2195 if (from == MEM_REGS || to == MEM_REGS)
2196 cost += COSTS_N_INSNS (50);
2197 else if (reg_classes_intersect_p (from, MEM_REGS)
2198 || reg_classes_intersect_p (to, MEM_REGS))
2199 cost += COSTS_N_INSNS (10);
2200
2201 #if DEBUG0
2202 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2203 mode_name[mode], class_names[(int) from], class_names[(int) to],
2204 cost);
2205 #endif
2206 return cost;
2207 }
2208
2209 /* Implements TARGET_MEMORY_MOVE_COST. */
2210
2211 #undef TARGET_MEMORY_MOVE_COST
2212 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2213
2214 static int
2215 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2216 reg_class_t rclass ATTRIBUTE_UNUSED,
2217 bool in ATTRIBUTE_UNUSED)
2218 {
2219 /* FIXME: pick real values. */
2220 return COSTS_N_INSNS (10);
2221 }
2222
2223 /* Here we try to describe when we use multiple opcodes for one RTX so
2224 that gcc knows when to use them. */
2225 #undef TARGET_RTX_COSTS
2226 #define TARGET_RTX_COSTS m32c_rtx_costs
2227 static bool
2228 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2229 int *total, bool speed ATTRIBUTE_UNUSED)
2230 {
2231 switch (code)
2232 {
2233 case REG:
2234 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2235 *total += COSTS_N_INSNS (500);
2236 else
2237 *total += COSTS_N_INSNS (1);
2238 return true;
2239
2240 case ASHIFT:
2241 case LSHIFTRT:
2242 case ASHIFTRT:
2243 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2244 {
2245 /* mov.b r1l, r1h */
2246 *total += COSTS_N_INSNS (1);
2247 return true;
2248 }
2249 if (INTVAL (XEXP (x, 1)) > 8
2250 || INTVAL (XEXP (x, 1)) < -8)
2251 {
2252 /* mov.b #N, r1l */
2253 /* mov.b r1l, r1h */
2254 *total += COSTS_N_INSNS (2);
2255 return true;
2256 }
2257 return true;
2258
2259 case LE:
2260 case LEU:
2261 case LT:
2262 case LTU:
2263 case GT:
2264 case GTU:
2265 case GE:
2266 case GEU:
2267 case NE:
2268 case EQ:
2269 if (outer_code == SET)
2270 {
2271 *total += COSTS_N_INSNS (2);
2272 return true;
2273 }
2274 break;
2275
2276 case ZERO_EXTRACT:
2277 {
2278 rtx dest = XEXP (x, 0);
2279 rtx addr = XEXP (dest, 0);
2280 switch (GET_CODE (addr))
2281 {
2282 case CONST_INT:
2283 *total += COSTS_N_INSNS (1);
2284 break;
2285 case SYMBOL_REF:
2286 *total += COSTS_N_INSNS (3);
2287 break;
2288 default:
2289 *total += COSTS_N_INSNS (2);
2290 break;
2291 }
2292 return true;
2293 }
2294 break;
2295
2296 default:
2297 /* Reasonable default. */
2298 if (TARGET_A16 && GET_MODE(x) == SImode)
2299 *total += COSTS_N_INSNS (2);
2300 break;
2301 }
2302 return false;
2303 }
2304
2305 #undef TARGET_ADDRESS_COST
2306 #define TARGET_ADDRESS_COST m32c_address_cost
2307 static int
2308 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2309 addr_space_t as ATTRIBUTE_UNUSED,
2310 bool speed ATTRIBUTE_UNUSED)
2311 {
2312 int i;
2313 /* fprintf(stderr, "\naddress_cost\n");
2314 debug_rtx(addr);*/
2315 switch (GET_CODE (addr))
2316 {
2317 case CONST_INT:
2318 i = INTVAL (addr);
2319 if (i == 0)
2320 return COSTS_N_INSNS(1);
2321 if (0 < i && i <= 255)
2322 return COSTS_N_INSNS(2);
2323 if (0 < i && i <= 65535)
2324 return COSTS_N_INSNS(3);
2325 return COSTS_N_INSNS(4);
2326 case SYMBOL_REF:
2327 return COSTS_N_INSNS(4);
2328 case REG:
2329 return COSTS_N_INSNS(1);
2330 case PLUS:
2331 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2332 {
2333 i = INTVAL (XEXP (addr, 1));
2334 if (i == 0)
2335 return COSTS_N_INSNS(1);
2336 if (0 < i && i <= 255)
2337 return COSTS_N_INSNS(2);
2338 if (0 < i && i <= 65535)
2339 return COSTS_N_INSNS(3);
2340 }
2341 return COSTS_N_INSNS(4);
2342 default:
2343 return 0;
2344 }
2345 }
2346
2347 /* Defining the Output Assembler Language */
2348
2349 /* Output of Data */
2350
2351 /* We may have 24 bit sizes, which is the native address size.
2352 Currently unused, but provided for completeness. */
2353 #undef TARGET_ASM_INTEGER
2354 #define TARGET_ASM_INTEGER m32c_asm_integer
2355 static bool
2356 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2357 {
2358 switch (size)
2359 {
2360 case 3:
2361 fprintf (asm_out_file, "\t.3byte\t");
2362 output_addr_const (asm_out_file, x);
2363 fputc ('\n', asm_out_file);
2364 return true;
2365 case 4:
2366 if (GET_CODE (x) == SYMBOL_REF)
2367 {
2368 fprintf (asm_out_file, "\t.long\t");
2369 output_addr_const (asm_out_file, x);
2370 fputc ('\n', asm_out_file);
2371 return true;
2372 }
2373 break;
2374 }
2375 return default_assemble_integer (x, size, aligned_p);
2376 }
2377
2378 /* Output of Assembler Instructions */
2379
2380 /* We use a lookup table because the addressing modes are non-orthogonal. */
2381
2382 static struct
2383 {
2384 char code;
2385 char const *pattern;
2386 char const *format;
2387 }
2388 const conversions[] = {
2389 { 0, "r", "0" },
2390
2391 { 0, "mr", "z[1]" },
2392 { 0, "m+ri", "3[2]" },
2393 { 0, "m+rs", "3[2]" },
2394 { 0, "m+^Zrs", "5[4]" },
2395 { 0, "m+^Zri", "5[4]" },
2396 { 0, "m+^Z+ris", "7+6[5]" },
2397 { 0, "m+^Srs", "5[4]" },
2398 { 0, "m+^Sri", "5[4]" },
2399 { 0, "m+^S+ris", "7+6[5]" },
2400 { 0, "m+r+si", "4+5[2]" },
2401 { 0, "ms", "1" },
2402 { 0, "mi", "1" },
2403 { 0, "m+si", "2+3" },
2404
2405 { 0, "mmr", "[z[2]]" },
2406 { 0, "mm+ri", "[4[3]]" },
2407 { 0, "mm+rs", "[4[3]]" },
2408 { 0, "mm+r+si", "[5+6[3]]" },
2409 { 0, "mms", "[[2]]" },
2410 { 0, "mmi", "[[2]]" },
2411 { 0, "mm+si", "[4[3]]" },
2412
2413 { 0, "i", "#0" },
2414 { 0, "s", "#0" },
2415 { 0, "+si", "#1+2" },
2416 { 0, "l", "#0" },
2417
2418 { 'l', "l", "0" },
2419 { 'd', "i", "0" },
2420 { 'd', "s", "0" },
2421 { 'd', "+si", "1+2" },
2422 { 'D', "i", "0" },
2423 { 'D', "s", "0" },
2424 { 'D', "+si", "1+2" },
2425 { 'x', "i", "#0" },
2426 { 'X', "i", "#0" },
2427 { 'm', "i", "#0" },
2428 { 'b', "i", "#0" },
2429 { 'B', "i", "0" },
2430 { 'p', "i", "0" },
2431
2432 { 0, 0, 0 }
2433 };
2434
2435 /* This is in order according to the bitfield that pushm/popm use. */
2436 static char const *pushm_regs[] = {
2437 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2438 };
2439
2440 /* Implements TARGET_PRINT_OPERAND. */
2441
2442 #undef TARGET_PRINT_OPERAND
2443 #define TARGET_PRINT_OPERAND m32c_print_operand
2444
2445 static void
2446 m32c_print_operand (FILE * file, rtx x, int code)
2447 {
2448 int i, j, b;
2449 const char *comma;
2450 HOST_WIDE_INT ival;
2451 int unsigned_const = 0;
2452 int force_sign;
2453
2454 /* Multiplies; constants are converted to sign-extended format but
2455 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2456 need. */
2457 if (code == 'u')
2458 {
2459 unsigned_const = 2;
2460 code = 0;
2461 }
2462 if (code == 'U')
2463 {
2464 unsigned_const = 1;
2465 code = 0;
2466 }
2467 /* This one is only for debugging; you can put it in a pattern to
2468 force this error. */
2469 if (code == '!')
2470 {
2471 fprintf (stderr, "dj: unreviewed pattern:");
2472 if (current_output_insn)
2473 debug_rtx (current_output_insn);
2474 gcc_unreachable ();
2475 }
2476 /* PSImode operations are either .w or .l depending on the target. */
2477 if (code == '&')
2478 {
2479 if (TARGET_A16)
2480 fprintf (file, "w");
2481 else
2482 fprintf (file, "l");
2483 return;
2484 }
2485 /* Inverted conditionals. */
2486 if (code == 'C')
2487 {
2488 switch (GET_CODE (x))
2489 {
2490 case LE:
2491 fputs ("gt", file);
2492 break;
2493 case LEU:
2494 fputs ("gtu", file);
2495 break;
2496 case LT:
2497 fputs ("ge", file);
2498 break;
2499 case LTU:
2500 fputs ("geu", file);
2501 break;
2502 case GT:
2503 fputs ("le", file);
2504 break;
2505 case GTU:
2506 fputs ("leu", file);
2507 break;
2508 case GE:
2509 fputs ("lt", file);
2510 break;
2511 case GEU:
2512 fputs ("ltu", file);
2513 break;
2514 case NE:
2515 fputs ("eq", file);
2516 break;
2517 case EQ:
2518 fputs ("ne", file);
2519 break;
2520 default:
2521 gcc_unreachable ();
2522 }
2523 return;
2524 }
2525 /* Regular conditionals. */
2526 if (code == 'c')
2527 {
2528 switch (GET_CODE (x))
2529 {
2530 case LE:
2531 fputs ("le", file);
2532 break;
2533 case LEU:
2534 fputs ("leu", file);
2535 break;
2536 case LT:
2537 fputs ("lt", file);
2538 break;
2539 case LTU:
2540 fputs ("ltu", file);
2541 break;
2542 case GT:
2543 fputs ("gt", file);
2544 break;
2545 case GTU:
2546 fputs ("gtu", file);
2547 break;
2548 case GE:
2549 fputs ("ge", file);
2550 break;
2551 case GEU:
2552 fputs ("geu", file);
2553 break;
2554 case NE:
2555 fputs ("ne", file);
2556 break;
2557 case EQ:
2558 fputs ("eq", file);
2559 break;
2560 default:
2561 gcc_unreachable ();
2562 }
2563 return;
2564 }
2565 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2566 operand. */
2567 if (code == 'h' && GET_MODE (x) == SImode)
2568 {
2569 x = m32c_subreg (HImode, x, SImode, 0);
2570 code = 0;
2571 }
2572 if (code == 'H' && GET_MODE (x) == SImode)
2573 {
2574 x = m32c_subreg (HImode, x, SImode, 2);
2575 code = 0;
2576 }
2577 if (code == 'h' && GET_MODE (x) == HImode)
2578 {
2579 x = m32c_subreg (QImode, x, HImode, 0);
2580 code = 0;
2581 }
2582 if (code == 'H' && GET_MODE (x) == HImode)
2583 {
2584 /* We can't actually represent this as an rtx. Do it here. */
2585 if (GET_CODE (x) == REG)
2586 {
2587 switch (REGNO (x))
2588 {
2589 case R0_REGNO:
2590 fputs ("r0h", file);
2591 return;
2592 case R1_REGNO:
2593 fputs ("r1h", file);
2594 return;
2595 default:
2596 gcc_unreachable();
2597 }
2598 }
2599 /* This should be a MEM. */
2600 x = m32c_subreg (QImode, x, HImode, 1);
2601 code = 0;
2602 }
2603 /* This is for BMcond, which always wants word register names. */
2604 if (code == 'h' && GET_MODE (x) == QImode)
2605 {
2606 if (GET_CODE (x) == REG)
2607 x = gen_rtx_REG (HImode, REGNO (x));
2608 code = 0;
2609 }
2610 /* 'x' and 'X' need to be ignored for non-immediates. */
2611 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2612 code = 0;
2613
2614 encode_pattern (x);
2615 force_sign = 0;
2616 for (i = 0; conversions[i].pattern; i++)
2617 if (conversions[i].code == code
2618 && streq (conversions[i].pattern, pattern))
2619 {
2620 for (j = 0; conversions[i].format[j]; j++)
2621 /* backslash quotes the next character in the output pattern. */
2622 if (conversions[i].format[j] == '\\')
2623 {
2624 fputc (conversions[i].format[j + 1], file);
2625 j++;
2626 }
2627 /* Digits in the output pattern indicate that the
2628 corresponding RTX is to be output at that point. */
2629 else if (ISDIGIT (conversions[i].format[j]))
2630 {
2631 rtx r = patternr[conversions[i].format[j] - '0'];
2632 switch (GET_CODE (r))
2633 {
2634 case REG:
2635 fprintf (file, "%s",
2636 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2637 break;
2638 case CONST_INT:
2639 switch (code)
2640 {
2641 case 'b':
2642 case 'B':
2643 {
2644 int v = INTVAL (r);
2645 int i = (int) exact_log2 (v);
2646 if (i == -1)
2647 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2648 if (i == -1)
2649 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2650 /* Bit position. */
2651 fprintf (file, "%d", i);
2652 }
2653 break;
2654 case 'x':
2655 /* Unsigned byte. */
2656 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2657 INTVAL (r) & 0xff);
2658 break;
2659 case 'X':
2660 /* Unsigned word. */
2661 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2662 INTVAL (r) & 0xffff);
2663 break;
2664 case 'p':
2665 /* pushm and popm encode a register set into a single byte. */
2666 comma = "";
2667 for (b = 7; b >= 0; b--)
2668 if (INTVAL (r) & (1 << b))
2669 {
2670 fprintf (file, "%s%s", comma, pushm_regs[b]);
2671 comma = ",";
2672 }
2673 break;
2674 case 'm':
2675 /* "Minus". Output -X */
2676 ival = (-INTVAL (r) & 0xffff);
2677 if (ival & 0x8000)
2678 ival = ival - 0x10000;
2679 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2680 break;
2681 default:
2682 ival = INTVAL (r);
2683 if (conversions[i].format[j + 1] == '[' && ival < 0)
2684 {
2685 /* We can simulate negative displacements by
2686 taking advantage of address space
2687 wrapping when the offset can span the
2688 entire address range. */
2689 rtx base =
2690 patternr[conversions[i].format[j + 2] - '0'];
2691 if (GET_CODE (base) == REG)
2692 switch (REGNO (base))
2693 {
2694 case A0_REGNO:
2695 case A1_REGNO:
2696 if (TARGET_A24)
2697 ival = 0x1000000 + ival;
2698 else
2699 ival = 0x10000 + ival;
2700 break;
2701 case SB_REGNO:
2702 if (TARGET_A16)
2703 ival = 0x10000 + ival;
2704 break;
2705 }
2706 }
2707 else if (code == 'd' && ival < 0 && j == 0)
2708 /* The "mova" opcode is used to do addition by
2709 computing displacements, but again, we need
2710 displacements to be unsigned *if* they're
2711 the only component of the displacement
2712 (i.e. no "symbol-4" type displacement). */
2713 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2714
2715 if (conversions[i].format[j] == '0')
2716 {
2717 /* More conversions to unsigned. */
2718 if (unsigned_const == 2)
2719 ival &= 0xffff;
2720 if (unsigned_const == 1)
2721 ival &= 0xff;
2722 }
2723 if (streq (conversions[i].pattern, "mi")
2724 || streq (conversions[i].pattern, "mmi"))
2725 {
2726 /* Integers used as addresses are unsigned. */
2727 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2728 }
2729 if (force_sign && ival >= 0)
2730 fputc ('+', file);
2731 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2732 break;
2733 }
2734 break;
2735 case CONST_DOUBLE:
2736 /* We don't have const_double constants. If it
2737 happens, make it obvious. */
2738 fprintf (file, "[const_double 0x%lx]",
2739 (unsigned long) CONST_DOUBLE_HIGH (r));
2740 break;
2741 case SYMBOL_REF:
2742 assemble_name (file, XSTR (r, 0));
2743 break;
2744 case LABEL_REF:
2745 output_asm_label (r);
2746 break;
2747 default:
2748 fprintf (stderr, "don't know how to print this operand:");
2749 debug_rtx (r);
2750 gcc_unreachable ();
2751 }
2752 }
2753 else
2754 {
2755 if (conversions[i].format[j] == 'z')
2756 {
2757 /* Some addressing modes *must* have a displacement,
2758 so insert a zero here if needed. */
2759 int k;
2760 for (k = j + 1; conversions[i].format[k]; k++)
2761 if (ISDIGIT (conversions[i].format[k]))
2762 {
2763 rtx reg = patternr[conversions[i].format[k] - '0'];
2764 if (GET_CODE (reg) == REG
2765 && (REGNO (reg) == SB_REGNO
2766 || REGNO (reg) == FB_REGNO
2767 || REGNO (reg) == SP_REGNO))
2768 fputc ('0', file);
2769 }
2770 continue;
2771 }
2772 /* Signed displacements off symbols need to have signs
2773 blended cleanly. */
2774 if (conversions[i].format[j] == '+'
2775 && (!code || code == 'D' || code == 'd')
2776 && ISDIGIT (conversions[i].format[j + 1])
2777 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2778 == CONST_INT))
2779 {
2780 force_sign = 1;
2781 continue;
2782 }
2783 fputc (conversions[i].format[j], file);
2784 }
2785 break;
2786 }
2787 if (!conversions[i].pattern)
2788 {
2789 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2790 pattern);
2791 debug_rtx (x);
2792 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2793 }
2794
2795 return;
2796 }
2797
2798 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2799
2800 See m32c_print_operand above for descriptions of what these do. */
2801
2802 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2803 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2804
2805 static bool
2806 m32c_print_operand_punct_valid_p (unsigned char c)
2807 {
2808 if (c == '&' || c == '!')
2809 return true;
2810
2811 return false;
2812 }
2813
2814 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2815
2816 #undef TARGET_PRINT_OPERAND_ADDRESS
2817 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2818
2819 static void
2820 m32c_print_operand_address (FILE * stream, rtx address)
2821 {
2822 if (GET_CODE (address) == MEM)
2823 address = XEXP (address, 0);
2824 else
2825 /* cf: gcc.dg/asm-4.c. */
2826 gcc_assert (GET_CODE (address) == REG);
2827
2828 m32c_print_operand (stream, address, 0);
2829 }
2830
2831 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2832 differently than general registers. */
2833 void
2834 m32c_output_reg_push (FILE * s, int regno)
2835 {
2836 if (regno == FLG_REGNO)
2837 fprintf (s, "\tpushc\tflg\n");
2838 else
2839 fprintf (s, "\tpush.%c\t%s\n",
2840 " bwll"[reg_push_size (regno)], reg_names[regno]);
2841 }
2842
2843 /* Likewise for ASM_OUTPUT_REG_POP. */
2844 void
2845 m32c_output_reg_pop (FILE * s, int regno)
2846 {
2847 if (regno == FLG_REGNO)
2848 fprintf (s, "\tpopc\tflg\n");
2849 else
2850 fprintf (s, "\tpop.%c\t%s\n",
2851 " bwll"[reg_push_size (regno)], reg_names[regno]);
2852 }
2853
2854 /* Defining target-specific uses of `__attribute__' */
2855
2856 /* Used to simplify the logic below. Find the attributes wherever
2857 they may be. */
2858 #define M32C_ATTRIBUTES(decl) \
2859 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2860 : DECL_ATTRIBUTES (decl) \
2861 ? (DECL_ATTRIBUTES (decl)) \
2862 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2863
2864 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2865 static int
2866 interrupt_p (tree node ATTRIBUTE_UNUSED)
2867 {
2868 tree list = M32C_ATTRIBUTES (node);
2869 while (list)
2870 {
2871 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2872 return 1;
2873 list = TREE_CHAIN (list);
2874 }
2875 return fast_interrupt_p (node);
2876 }
2877
2878 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2879 static int
2880 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2881 {
2882 tree list = M32C_ATTRIBUTES (node);
2883 while (list)
2884 {
2885 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2886 return 1;
2887 list = TREE_CHAIN (list);
2888 }
2889 return 0;
2890 }
2891
2892 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2893 static int
2894 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2895 {
2896 tree list = M32C_ATTRIBUTES (node);
2897 while (list)
2898 {
2899 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2900 return 1;
2901 list = TREE_CHAIN (list);
2902 }
2903 return 0;
2904 }
2905
2906 static tree
2907 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2908 tree name ATTRIBUTE_UNUSED,
2909 tree args ATTRIBUTE_UNUSED,
2910 int flags ATTRIBUTE_UNUSED,
2911 bool * no_add_attrs ATTRIBUTE_UNUSED)
2912 {
2913 return NULL_TREE;
2914 }
2915
2916 /* Returns TRUE if given tree has the "function_vector" attribute. */
2917 int
2918 m32c_special_page_vector_p (tree func)
2919 {
2920 tree list;
2921
2922 if (TREE_CODE (func) != FUNCTION_DECL)
2923 return 0;
2924
2925 list = M32C_ATTRIBUTES (func);
2926 while (list)
2927 {
2928 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2929 return 1;
2930 list = TREE_CHAIN (list);
2931 }
2932 return 0;
2933 }
2934
2935 static tree
2936 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2937 tree name ATTRIBUTE_UNUSED,
2938 tree args ATTRIBUTE_UNUSED,
2939 int flags ATTRIBUTE_UNUSED,
2940 bool * no_add_attrs ATTRIBUTE_UNUSED)
2941 {
2942 if (TARGET_R8C)
2943 {
2944 /* The attribute is not supported for R8C target. */
2945 warning (OPT_Wattributes,
2946 "%qE attribute is not supported for R8C target",
2947 name);
2948 *no_add_attrs = true;
2949 }
2950 else if (TREE_CODE (*node) != FUNCTION_DECL)
2951 {
2952 /* The attribute must be applied to functions only. */
2953 warning (OPT_Wattributes,
2954 "%qE attribute applies only to functions",
2955 name);
2956 *no_add_attrs = true;
2957 }
2958 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2959 {
2960 /* The argument must be a constant integer. */
2961 warning (OPT_Wattributes,
2962 "%qE attribute argument not an integer constant",
2963 name);
2964 *no_add_attrs = true;
2965 }
2966 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2967 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2968 {
2969 /* The argument value must be between 18 to 255. */
2970 warning (OPT_Wattributes,
2971 "%qE attribute argument should be between 18 to 255",
2972 name);
2973 *no_add_attrs = true;
2974 }
2975 return NULL_TREE;
2976 }
2977
2978 /* If the function is assigned the attribute 'function_vector', it
2979 returns the function vector number, otherwise returns zero. */
2980 int
2981 current_function_special_page_vector (rtx x)
2982 {
2983 int num;
2984
2985 if ((GET_CODE(x) == SYMBOL_REF)
2986 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2987 {
2988 tree list;
2989 tree t = SYMBOL_REF_DECL (x);
2990
2991 if (TREE_CODE (t) != FUNCTION_DECL)
2992 return 0;
2993
2994 list = M32C_ATTRIBUTES (t);
2995 while (list)
2996 {
2997 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2998 {
2999 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3000 return num;
3001 }
3002
3003 list = TREE_CHAIN (list);
3004 }
3005
3006 return 0;
3007 }
3008 else
3009 return 0;
3010 }
3011
3012 #undef TARGET_ATTRIBUTE_TABLE
3013 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3014 static const struct attribute_spec m32c_attribute_table[] = {
3015 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3016 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3017 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3018 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3019 false},
3020 {0, 0, 0, 0, 0, 0, 0, false}
3021 };
3022
3023 #undef TARGET_COMP_TYPE_ATTRIBUTES
3024 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3025 static int
3026 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3027 const_tree type2 ATTRIBUTE_UNUSED)
3028 {
3029 /* 0=incompatible 1=compatible 2=warning */
3030 return 1;
3031 }
3032
3033 #undef TARGET_INSERT_ATTRIBUTES
3034 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3035 static void
3036 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3037 tree * attr_ptr ATTRIBUTE_UNUSED)
3038 {
3039 unsigned addr;
3040 /* See if we need to make #pragma address variables volatile. */
3041
3042 if (TREE_CODE (node) == VAR_DECL)
3043 {
3044 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3045 if (m32c_get_pragma_address (name, &addr))
3046 {
3047 TREE_THIS_VOLATILE (node) = true;
3048 }
3049 }
3050 }
3051
3052
3053 struct pragma_traits : default_hashmap_traits
3054 {
3055 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3056 static bool
3057 equal_keys (const char *a, const char *b)
3058 {
3059 return !strcmp (a, b);
3060 }
3061 };
3062
3063 /* Hash table of pragma info. */
3064 static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
3065
3066 void
3067 m32c_note_pragma_address (const char *varname, unsigned address)
3068 {
3069 if (!pragma_htab)
3070 pragma_htab
3071 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
3072
3073 const char *name = ggc_strdup (varname);
3074 unsigned int *slot = &pragma_htab->get_or_insert (name);
3075 *slot = address;
3076 }
3077
3078 static bool
3079 m32c_get_pragma_address (const char *varname, unsigned *address)
3080 {
3081 if (!pragma_htab)
3082 return false;
3083
3084 unsigned int *slot = pragma_htab->get (varname);
3085 if (slot)
3086 {
3087 *address = *slot;
3088 return true;
3089 }
3090 return false;
3091 }
3092
3093 void
3094 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3095 const char *name,
3096 int size, int align, int global)
3097 {
3098 unsigned address;
3099
3100 if (m32c_get_pragma_address (name, &address))
3101 {
3102 /* We never output these as global. */
3103 assemble_name (stream, name);
3104 fprintf (stream, " = 0x%04x\n", address);
3105 return;
3106 }
3107 if (!global)
3108 {
3109 fprintf (stream, "\t.local\t");
3110 assemble_name (stream, name);
3111 fprintf (stream, "\n");
3112 }
3113 fprintf (stream, "\t.comm\t");
3114 assemble_name (stream, name);
3115 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3116 }
3117
3118 /* Predicates */
3119
3120 /* This is a list of legal subregs of hard regs. */
3121 static const struct {
3122 unsigned char outer_mode_size;
3123 unsigned char inner_mode_size;
3124 unsigned char byte_mask;
3125 unsigned char legal_when;
3126 unsigned int regno;
3127 } legal_subregs[] = {
3128 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3129 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3130 {1, 2, 0x01, 1, A0_REGNO},
3131 {1, 2, 0x01, 1, A1_REGNO},
3132
3133 {1, 4, 0x01, 1, A0_REGNO},
3134 {1, 4, 0x01, 1, A1_REGNO},
3135
3136 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3137 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3138 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3139 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3140 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3141
3142 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3143 };
3144
3145 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3146 support. We also bail on MEMs with illegal addresses. */
3147 bool
3148 m32c_illegal_subreg_p (rtx op)
3149 {
3150 int offset;
3151 unsigned int i;
3152 machine_mode src_mode, dest_mode;
3153
3154 if (GET_CODE (op) == MEM
3155 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3156 {
3157 return true;
3158 }
3159
3160 if (GET_CODE (op) != SUBREG)
3161 return false;
3162
3163 dest_mode = GET_MODE (op);
3164 offset = SUBREG_BYTE (op);
3165 op = SUBREG_REG (op);
3166 src_mode = GET_MODE (op);
3167
3168 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3169 return false;
3170 if (GET_CODE (op) != REG)
3171 return false;
3172 if (REGNO (op) >= MEM0_REGNO)
3173 return false;
3174
3175 offset = (1 << offset);
3176
3177 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3178 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3179 && legal_subregs[i].regno == REGNO (op)
3180 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3181 && legal_subregs[i].byte_mask & offset)
3182 {
3183 switch (legal_subregs[i].legal_when)
3184 {
3185 case 1:
3186 return false;
3187 case 16:
3188 if (TARGET_A16)
3189 return false;
3190 break;
3191 case 24:
3192 if (TARGET_A24)
3193 return false;
3194 break;
3195 }
3196 }
3197 return true;
3198 }
3199
3200 /* Returns TRUE if we support a move between the first two operands.
3201 At the moment, we just want to discourage mem to mem moves until
3202 after reload, because reload has a hard time with our limited
3203 number of address registers, and we can get into a situation where
3204 we need three of them when we only have two. */
3205 bool
3206 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3207 {
3208 rtx op0 = operands[0];
3209 rtx op1 = operands[1];
3210
3211 if (TARGET_A24)
3212 return true;
3213
3214 #define DEBUG_MOV_OK 0
3215 #if DEBUG_MOV_OK
3216 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3217 debug_rtx (op0);
3218 debug_rtx (op1);
3219 #endif
3220
3221 if (GET_CODE (op0) == SUBREG)
3222 op0 = XEXP (op0, 0);
3223 if (GET_CODE (op1) == SUBREG)
3224 op1 = XEXP (op1, 0);
3225
3226 if (GET_CODE (op0) == MEM
3227 && GET_CODE (op1) == MEM
3228 && ! reload_completed)
3229 {
3230 #if DEBUG_MOV_OK
3231 fprintf (stderr, " - no, mem to mem\n");
3232 #endif
3233 return false;
3234 }
3235
3236 #if DEBUG_MOV_OK
3237 fprintf (stderr, " - ok\n");
3238 #endif
3239 return true;
3240 }
3241
3242 /* Returns TRUE if two consecutive HImode mov instructions, generated
3243 for moving an immediate double data to a double data type variable
3244 location, can be combined into single SImode mov instruction. */
3245 bool
3246 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3247 machine_mode mode ATTRIBUTE_UNUSED)
3248 {
3249 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3250 flags. */
3251 return false;
3252 }
3253
3254 /* Expanders */
3255
3256 /* Subregs are non-orthogonal for us, because our registers are all
3257 different sizes. */
3258 static rtx
3259 m32c_subreg (machine_mode outer,
3260 rtx x, machine_mode inner, int byte)
3261 {
3262 int r, nr = -1;
3263
3264 /* Converting MEMs to different types that are the same size, we
3265 just rewrite them. */
3266 if (GET_CODE (x) == SUBREG
3267 && SUBREG_BYTE (x) == 0
3268 && GET_CODE (SUBREG_REG (x)) == MEM
3269 && (GET_MODE_SIZE (GET_MODE (x))
3270 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3271 {
3272 rtx oldx = x;
3273 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3274 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3275 }
3276
3277 /* Push/pop get done as smaller push/pops. */
3278 if (GET_CODE (x) == MEM
3279 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3280 || GET_CODE (XEXP (x, 0)) == POST_INC))
3281 return gen_rtx_MEM (outer, XEXP (x, 0));
3282 if (GET_CODE (x) == SUBREG
3283 && GET_CODE (XEXP (x, 0)) == MEM
3284 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3285 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3286 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3287
3288 if (GET_CODE (x) != REG)
3289 {
3290 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3291 if (GET_CODE (r) == SUBREG
3292 && GET_CODE (x) == MEM
3293 && MEM_VOLATILE_P (x))
3294 {
3295 /* Volatile MEMs don't get simplified, but we need them to
3296 be. We are little endian, so the subreg byte is the
3297 offset. */
3298 r = adjust_address_nv (x, outer, byte);
3299 }
3300 return r;
3301 }
3302
3303 r = REGNO (x);
3304 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3305 return simplify_gen_subreg (outer, x, inner, byte);
3306
3307 if (IS_MEM_REGNO (r))
3308 return simplify_gen_subreg (outer, x, inner, byte);
3309
3310 /* This is where the complexities of our register layout are
3311 described. */
3312 if (byte == 0)
3313 nr = r;
3314 else if (outer == HImode)
3315 {
3316 if (r == R0_REGNO && byte == 2)
3317 nr = R2_REGNO;
3318 else if (r == R0_REGNO && byte == 4)
3319 nr = R1_REGNO;
3320 else if (r == R0_REGNO && byte == 6)
3321 nr = R3_REGNO;
3322 else if (r == R1_REGNO && byte == 2)
3323 nr = R3_REGNO;
3324 else if (r == A0_REGNO && byte == 2)
3325 nr = A1_REGNO;
3326 }
3327 else if (outer == SImode)
3328 {
3329 if (r == R0_REGNO && byte == 0)
3330 nr = R0_REGNO;
3331 else if (r == R0_REGNO && byte == 4)
3332 nr = R1_REGNO;
3333 }
3334 if (nr == -1)
3335 {
3336 fprintf (stderr, "m32c_subreg %s %s %d\n",
3337 mode_name[outer], mode_name[inner], byte);
3338 debug_rtx (x);
3339 gcc_unreachable ();
3340 }
3341 return gen_rtx_REG (outer, nr);
3342 }
3343
3344 /* Used to emit move instructions. We split some moves,
3345 and avoid mem-mem moves. */
3346 int
3347 m32c_prepare_move (rtx * operands, machine_mode mode)
3348 {
3349 if (far_addr_space_p (operands[0])
3350 && CONSTANT_P (operands[1]))
3351 {
3352 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3353 }
3354 if (TARGET_A16 && mode == PSImode)
3355 return m32c_split_move (operands, mode, 1);
3356 if ((GET_CODE (operands[0]) == MEM)
3357 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3358 {
3359 rtx pmv = XEXP (operands[0], 0);
3360 rtx dest_reg = XEXP (pmv, 0);
3361 rtx dest_mod = XEXP (pmv, 1);
3362
3363 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3364 operands[0] = gen_rtx_MEM (mode, dest_reg);
3365 }
3366 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3367 operands[1] = copy_to_mode_reg (mode, operands[1]);
3368 return 0;
3369 }
3370
3371 #define DEBUG_SPLIT 0
3372
3373 /* Returns TRUE if the given PSImode move should be split. We split
3374 for all r8c/m16c moves, since it doesn't support them, and for
3375 POP.L as we can only *push* SImode. */
3376 int
3377 m32c_split_psi_p (rtx * operands)
3378 {
3379 #if DEBUG_SPLIT
3380 fprintf (stderr, "\nm32c_split_psi_p\n");
3381 debug_rtx (operands[0]);
3382 debug_rtx (operands[1]);
3383 #endif
3384 if (TARGET_A16)
3385 {
3386 #if DEBUG_SPLIT
3387 fprintf (stderr, "yes, A16\n");
3388 #endif
3389 return 1;
3390 }
3391 if (GET_CODE (operands[1]) == MEM
3392 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3393 {
3394 #if DEBUG_SPLIT
3395 fprintf (stderr, "yes, pop.l\n");
3396 #endif
3397 return 1;
3398 }
3399 #if DEBUG_SPLIT
3400 fprintf (stderr, "no, default\n");
3401 #endif
3402 return 0;
3403 }
3404
3405 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3406 (define_expand), 1 if it is not optional (define_insn_and_split),
3407 and 3 for define_split (alternate api). */
3408 int
3409 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3410 {
3411 rtx s[4], d[4];
3412 int parts, si, di, rev = 0;
3413 int rv = 0, opi = 2;
3414 machine_mode submode = HImode;
3415 rtx *ops, local_ops[10];
3416
3417 /* define_split modifies the existing operands, but the other two
3418 emit new insns. OPS is where we store the operand pairs, which
3419 we emit later. */
3420 if (split_all == 3)
3421 ops = operands;
3422 else
3423 ops = local_ops;
3424
3425 /* Else HImode. */
3426 if (mode == DImode)
3427 submode = SImode;
3428
3429 /* Before splitting mem-mem moves, force one operand into a
3430 register. */
3431 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3432 {
3433 #if DEBUG0
3434 fprintf (stderr, "force_reg...\n");
3435 debug_rtx (operands[1]);
3436 #endif
3437 operands[1] = force_reg (mode, operands[1]);
3438 #if DEBUG0
3439 debug_rtx (operands[1]);
3440 #endif
3441 }
3442
3443 parts = 2;
3444
3445 #if DEBUG_SPLIT
3446 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3447 split_all);
3448 debug_rtx (operands[0]);
3449 debug_rtx (operands[1]);
3450 #endif
3451
3452 /* Note that split_all is not used to select the api after this
3453 point, so it's safe to set it to 3 even with define_insn. */
3454 /* None of the chips can move SI operands to sp-relative addresses,
3455 so we always split those. */
3456 if (satisfies_constraint_Ss (operands[0]))
3457 split_all = 3;
3458
3459 if (TARGET_A16
3460 && (far_addr_space_p (operands[0])
3461 || far_addr_space_p (operands[1])))
3462 split_all |= 1;
3463
3464 /* We don't need to split these. */
3465 if (TARGET_A24
3466 && split_all != 3
3467 && (mode == SImode || mode == PSImode)
3468 && !(GET_CODE (operands[1]) == MEM
3469 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3470 return 0;
3471
3472 /* First, enumerate the subregs we'll be dealing with. */
3473 for (si = 0; si < parts; si++)
3474 {
3475 d[si] =
3476 m32c_subreg (submode, operands[0], mode,
3477 si * GET_MODE_SIZE (submode));
3478 s[si] =
3479 m32c_subreg (submode, operands[1], mode,
3480 si * GET_MODE_SIZE (submode));
3481 }
3482
3483 /* Split pushes by emitting a sequence of smaller pushes. */
3484 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3485 {
3486 for (si = parts - 1; si >= 0; si--)
3487 {
3488 ops[opi++] = gen_rtx_MEM (submode,
3489 gen_rtx_PRE_DEC (Pmode,
3490 gen_rtx_REG (Pmode,
3491 SP_REGNO)));
3492 ops[opi++] = s[si];
3493 }
3494
3495 rv = 1;
3496 }
3497 /* Likewise for pops. */
3498 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3499 {
3500 for (di = 0; di < parts; di++)
3501 {
3502 ops[opi++] = d[di];
3503 ops[opi++] = gen_rtx_MEM (submode,
3504 gen_rtx_POST_INC (Pmode,
3505 gen_rtx_REG (Pmode,
3506 SP_REGNO)));
3507 }
3508 rv = 1;
3509 }
3510 else if (split_all)
3511 {
3512 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3513 for (di = 0; di < parts - 1; di++)
3514 for (si = di + 1; si < parts; si++)
3515 if (reg_mentioned_p (d[di], s[si]))
3516 rev = 1;
3517
3518 if (rev)
3519 for (si = 0; si < parts; si++)
3520 {
3521 ops[opi++] = d[si];
3522 ops[opi++] = s[si];
3523 }
3524 else
3525 for (si = parts - 1; si >= 0; si--)
3526 {
3527 ops[opi++] = d[si];
3528 ops[opi++] = s[si];
3529 }
3530 rv = 1;
3531 }
3532 /* Now emit any moves we may have accumulated. */
3533 if (rv && split_all != 3)
3534 {
3535 int i;
3536 for (i = 2; i < opi; i += 2)
3537 emit_move_insn (ops[i], ops[i + 1]);
3538 }
3539 return rv;
3540 }
3541
3542 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3543 the like. For the R8C they expect one of the addresses to be in
3544 R1L:An so we need to arrange for that. Otherwise, it's just a
3545 matter of picking out the operands we want and emitting the right
3546 pattern for them. All these expanders, which correspond to
3547 patterns in blkmov.md, must return nonzero if they expand the insn,
3548 or zero if they should FAIL. */
3549
3550 /* This is a memset() opcode. All operands are implied, so we need to
3551 arrange for them to be in the right registers. The opcode wants
3552 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3553 the count (HI), and $2 the value (QI). */
3554 int
3555 m32c_expand_setmemhi(rtx *operands)
3556 {
3557 rtx desta, count, val;
3558 rtx desto, counto;
3559
3560 desta = XEXP (operands[0], 0);
3561 count = operands[1];
3562 val = operands[2];
3563
3564 desto = gen_reg_rtx (Pmode);
3565 counto = gen_reg_rtx (HImode);
3566
3567 if (GET_CODE (desta) != REG
3568 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3569 desta = copy_to_mode_reg (Pmode, desta);
3570
3571 /* This looks like an arbitrary restriction, but this is by far the
3572 most common case. For counts 8..14 this actually results in
3573 smaller code with no speed penalty because the half-sized
3574 constant can be loaded with a shorter opcode. */
3575 if (GET_CODE (count) == CONST_INT
3576 && GET_CODE (val) == CONST_INT
3577 && ! (INTVAL (count) & 1)
3578 && (INTVAL (count) > 1)
3579 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3580 {
3581 unsigned v = INTVAL (val) & 0xff;
3582 v = v | (v << 8);
3583 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3584 val = copy_to_mode_reg (HImode, GEN_INT (v));
3585 if (TARGET_A16)
3586 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3587 else
3588 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3589 return 1;
3590 }
3591
3592 /* This is the generalized memset() case. */
3593 if (GET_CODE (val) != REG
3594 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3595 val = copy_to_mode_reg (QImode, val);
3596
3597 if (GET_CODE (count) != REG
3598 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3599 count = copy_to_mode_reg (HImode, count);
3600
3601 if (TARGET_A16)
3602 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3603 else
3604 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3605
3606 return 1;
3607 }
3608
3609 /* This is a memcpy() opcode. All operands are implied, so we need to
3610 arrange for them to be in the right registers. The opcode wants
3611 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3612 is the source (MEM:BLK), and $2 the count (HI). */
3613 int
3614 m32c_expand_movmemhi(rtx *operands)
3615 {
3616 rtx desta, srca, count;
3617 rtx desto, srco, counto;
3618
3619 desta = XEXP (operands[0], 0);
3620 srca = XEXP (operands[1], 0);
3621 count = operands[2];
3622
3623 desto = gen_reg_rtx (Pmode);
3624 srco = gen_reg_rtx (Pmode);
3625 counto = gen_reg_rtx (HImode);
3626
3627 if (GET_CODE (desta) != REG
3628 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3629 desta = copy_to_mode_reg (Pmode, desta);
3630
3631 if (GET_CODE (srca) != REG
3632 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3633 srca = copy_to_mode_reg (Pmode, srca);
3634
3635 /* Similar to setmem, but we don't need to check the value. */
3636 if (GET_CODE (count) == CONST_INT
3637 && ! (INTVAL (count) & 1)
3638 && (INTVAL (count) > 1))
3639 {
3640 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3641 if (TARGET_A16)
3642 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3643 else
3644 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3645 return 1;
3646 }
3647
3648 /* This is the generalized memset() case. */
3649 if (GET_CODE (count) != REG
3650 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3651 count = copy_to_mode_reg (HImode, count);
3652
3653 if (TARGET_A16)
3654 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3655 else
3656 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3657
3658 return 1;
3659 }
3660
3661 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3662 the copy, which should point to the NUL at the end of the string,
3663 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3664 Since our opcode leaves the destination pointing *after* the NUL,
3665 we must emit an adjustment. */
3666 int
3667 m32c_expand_movstr(rtx *operands)
3668 {
3669 rtx desta, srca;
3670 rtx desto, srco;
3671
3672 desta = XEXP (operands[1], 0);
3673 srca = XEXP (operands[2], 0);
3674
3675 desto = gen_reg_rtx (Pmode);
3676 srco = gen_reg_rtx (Pmode);
3677
3678 if (GET_CODE (desta) != REG
3679 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3680 desta = copy_to_mode_reg (Pmode, desta);
3681
3682 if (GET_CODE (srca) != REG
3683 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3684 srca = copy_to_mode_reg (Pmode, srca);
3685
3686 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3687 /* desto ends up being a1, which allows this type of add through MOVA. */
3688 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3689
3690 return 1;
3691 }
3692
3693 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3694 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3695 $2 is the other (MEM:BLK). We must do the comparison, and then
3696 convert the flags to a signed integer result. */
3697 int
3698 m32c_expand_cmpstr(rtx *operands)
3699 {
3700 rtx src1a, src2a;
3701
3702 src1a = XEXP (operands[1], 0);
3703 src2a = XEXP (operands[2], 0);
3704
3705 if (GET_CODE (src1a) != REG
3706 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3707 src1a = copy_to_mode_reg (Pmode, src1a);
3708
3709 if (GET_CODE (src2a) != REG
3710 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3711 src2a = copy_to_mode_reg (Pmode, src2a);
3712
3713 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3714 emit_insn (gen_cond_to_int (operands[0]));
3715
3716 return 1;
3717 }
3718
3719
3720 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3721
3722 static shift_gen_func
3723 shift_gen_func_for (int mode, int code)
3724 {
3725 #define GFF(m,c,f) if (mode == m && code == c) return f
3726 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3727 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3728 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3729 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3730 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3731 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3732 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3733 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3734 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3735 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3736 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3737 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3738 #undef GFF
3739 gcc_unreachable ();
3740 }
3741
3742 /* The m32c only has one shift, but it takes a signed count. GCC
3743 doesn't want this, so we fake it by negating any shift count when
3744 we're pretending to shift the other way. Also, the shift count is
3745 limited to -8..8. It's slightly better to use two shifts for 9..15
3746 than to load the count into r1h, so we do that too. */
3747 int
3748 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3749 {
3750 machine_mode mode = GET_MODE (operands[0]);
3751 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3752 rtx temp;
3753
3754 if (GET_CODE (operands[2]) == CONST_INT)
3755 {
3756 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3757 int count = INTVAL (operands[2]) * scale;
3758
3759 while (count > maxc)
3760 {
3761 temp = gen_reg_rtx (mode);
3762 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3763 operands[1] = temp;
3764 count -= maxc;
3765 }
3766 while (count < -maxc)
3767 {
3768 temp = gen_reg_rtx (mode);
3769 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3770 operands[1] = temp;
3771 count += maxc;
3772 }
3773 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3774 return 1;
3775 }
3776
3777 temp = gen_reg_rtx (QImode);
3778 if (scale < 0)
3779 /* The pattern has a NEG that corresponds to this. */
3780 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3781 else if (TARGET_A16 && mode == SImode)
3782 /* We do this because the code below may modify this, we don't
3783 want to modify the origin of this value. */
3784 emit_move_insn (temp, operands[2]);
3785 else
3786 /* We'll only use it for the shift, no point emitting a move. */
3787 temp = operands[2];
3788
3789 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3790 {
3791 /* The m16c has a limit of -16..16 for SI shifts, even when the
3792 shift count is in a register. Since there are so many targets
3793 of these shifts, it's better to expand the RTL here than to
3794 call a helper function.
3795
3796 The resulting code looks something like this:
3797
3798 cmp.b r1h,-16
3799 jge.b 1f
3800 shl.l -16,dest
3801 add.b r1h,16
3802 1f: cmp.b r1h,16
3803 jle.b 1f
3804 shl.l 16,dest
3805 sub.b r1h,16
3806 1f: shl.l r1h,dest
3807
3808 We take advantage of the fact that "negative" shifts are
3809 undefined to skip one of the comparisons. */
3810
3811 rtx count;
3812 rtx label, tempvar;
3813 rtx_insn *insn;
3814
3815 emit_move_insn (operands[0], operands[1]);
3816
3817 count = temp;
3818 label = gen_label_rtx ();
3819 LABEL_NUSES (label) ++;
3820
3821 tempvar = gen_reg_rtx (mode);
3822
3823 if (shift_code == ASHIFT)
3824 {
3825 /* This is a left shift. We only need check positive counts. */
3826 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3827 count, GEN_INT (16), label));
3828 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3829 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3830 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3831 emit_label_after (label, insn);
3832 }
3833 else
3834 {
3835 /* This is a right shift. We only need check negative counts. */
3836 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3837 count, GEN_INT (-16), label));
3838 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3839 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3840 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3841 emit_label_after (label, insn);
3842 }
3843 operands[1] = operands[0];
3844 emit_insn (func (operands[0], operands[0], count));
3845 return 1;
3846 }
3847
3848 operands[2] = temp;
3849 return 0;
3850 }
3851
3852 /* The m32c has a limited range of operations that work on PSImode
3853 values; we have to expand to SI, do the math, and truncate back to
3854 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3855 those cases. */
3856 void
3857 m32c_expand_neg_mulpsi3 (rtx * operands)
3858 {
3859 /* operands: a = b * i */
3860 rtx temp1; /* b as SI */
3861 rtx scale /* i as SI */;
3862 rtx temp2; /* a*b as SI */
3863
3864 temp1 = gen_reg_rtx (SImode);
3865 temp2 = gen_reg_rtx (SImode);
3866 if (GET_CODE (operands[2]) != CONST_INT)
3867 {
3868 scale = gen_reg_rtx (SImode);
3869 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3870 }
3871 else
3872 scale = copy_to_mode_reg (SImode, operands[2]);
3873
3874 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3875 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3876 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3877 }
3878
3879 /* Pattern Output Functions */
3880
3881 int
3882 m32c_expand_movcc (rtx *operands)
3883 {
3884 rtx rel = operands[1];
3885
3886 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3887 return 1;
3888 if (GET_CODE (operands[2]) != CONST_INT
3889 || GET_CODE (operands[3]) != CONST_INT)
3890 return 1;
3891 if (GET_CODE (rel) == NE)
3892 {
3893 rtx tmp = operands[2];
3894 operands[2] = operands[3];
3895 operands[3] = tmp;
3896 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3897 }
3898
3899 emit_move_insn (operands[0],
3900 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3901 rel,
3902 operands[2],
3903 operands[3]));
3904 return 0;
3905 }
3906
3907 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3908 int
3909 m32c_expand_insv (rtx *operands)
3910 {
3911 rtx op0, src0, p;
3912 int mask;
3913
3914 if (INTVAL (operands[1]) != 1)
3915 return 1;
3916
3917 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3918 if (GET_CODE (operands[3]) != CONST_INT)
3919 return 1;
3920 if (INTVAL (operands[3]) != 0
3921 && INTVAL (operands[3]) != 1
3922 && INTVAL (operands[3]) != -1)
3923 return 1;
3924
3925 mask = 1 << INTVAL (operands[2]);
3926
3927 op0 = operands[0];
3928 if (GET_CODE (op0) == SUBREG
3929 && SUBREG_BYTE (op0) == 0)
3930 {
3931 rtx sub = SUBREG_REG (op0);
3932 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3933 op0 = sub;
3934 }
3935
3936 if (!can_create_pseudo_p ()
3937 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3938 src0 = op0;
3939 else
3940 {
3941 src0 = gen_reg_rtx (GET_MODE (op0));
3942 emit_move_insn (src0, op0);
3943 }
3944
3945 if (GET_MODE (op0) == HImode
3946 && INTVAL (operands[2]) >= 8
3947 && GET_CODE (op0) == MEM)
3948 {
3949 /* We are little endian. */
3950 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3951 XEXP (op0, 0), 1));
3952 MEM_COPY_ATTRIBUTES (new_mem, op0);
3953 mask >>= 8;
3954 }
3955
3956 /* First, we generate a mask with the correct polarity. If we are
3957 storing a zero, we want an AND mask, so invert it. */
3958 if (INTVAL (operands[3]) == 0)
3959 {
3960 /* Storing a zero, use an AND mask */
3961 if (GET_MODE (op0) == HImode)
3962 mask ^= 0xffff;
3963 else
3964 mask ^= 0xff;
3965 }
3966 /* Now we need to properly sign-extend the mask in case we need to
3967 fall back to an AND or OR opcode. */
3968 if (GET_MODE (op0) == HImode)
3969 {
3970 if (mask & 0x8000)
3971 mask -= 0x10000;
3972 }
3973 else
3974 {
3975 if (mask & 0x80)
3976 mask -= 0x100;
3977 }
3978
3979 switch ( (INTVAL (operands[3]) ? 4 : 0)
3980 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3981 + (TARGET_A24 ? 1 : 0))
3982 {
3983 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3984 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3985 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3986 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3987 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3988 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3989 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3990 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3991 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3992 }
3993
3994 emit_insn (p);
3995 return 0;
3996 }
3997
3998 const char *
3999 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4000 {
4001 static char buf[30];
4002 if (GET_CODE (operands[0]) == REG
4003 && REGNO (operands[0]) == R0_REGNO)
4004 {
4005 if (code == EQ)
4006 return "stzx\t#1,#0,r0l";
4007 if (code == NE)
4008 return "stzx\t#0,#1,r0l";
4009 }
4010 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4011 return buf;
4012 }
4013
4014 /* Encode symbol attributes of a SYMBOL_REF into its
4015 SYMBOL_REF_FLAGS. */
4016 static void
4017 m32c_encode_section_info (tree decl, rtx rtl, int first)
4018 {
4019 int extra_flags = 0;
4020
4021 default_encode_section_info (decl, rtl, first);
4022 if (TREE_CODE (decl) == FUNCTION_DECL
4023 && m32c_special_page_vector_p (decl))
4024
4025 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4026
4027 if (extra_flags)
4028 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4029 }
4030
4031 /* Returns TRUE if the current function is a leaf, and thus we can
4032 determine which registers an interrupt function really needs to
4033 save. The logic below is mostly about finding the insn sequence
4034 that's the function, versus any sequence that might be open for the
4035 current insn. */
4036 static int
4037 m32c_leaf_function_p (void)
4038 {
4039 rtx_insn *saved_first, *saved_last;
4040 struct sequence_stack *seq;
4041 int rv;
4042
4043 saved_first = crtl->emit.x_first_insn;
4044 saved_last = crtl->emit.x_last_insn;
4045 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4046 ;
4047 if (seq)
4048 {
4049 crtl->emit.x_first_insn = seq->first;
4050 crtl->emit.x_last_insn = seq->last;
4051 }
4052
4053 rv = leaf_function_p ();
4054
4055 crtl->emit.x_first_insn = saved_first;
4056 crtl->emit.x_last_insn = saved_last;
4057 return rv;
4058 }
4059
4060 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4061 opcodes. If the function doesn't need the frame base or stack
4062 pointer, it can use the simpler RTS opcode. */
4063 static bool
4064 m32c_function_needs_enter (void)
4065 {
4066 rtx_insn *insn;
4067 struct sequence_stack *seq;
4068 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4069 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4070
4071 insn = get_insns ();
4072 for (seq = crtl->emit.sequence_stack;
4073 seq;
4074 insn = seq->first, seq = seq->next);
4075
4076 while (insn)
4077 {
4078 if (reg_mentioned_p (sp, insn))
4079 return true;
4080 if (reg_mentioned_p (fb, insn))
4081 return true;
4082 insn = NEXT_INSN (insn);
4083 }
4084 return false;
4085 }
4086
4087 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4088 frame-related. Return PAR.
4089
4090 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4091 PARALLEL rtx other than the first if they do not have the
4092 FRAME_RELATED flag set on them. So this function is handy for
4093 marking up 'enter' instructions. */
4094 static rtx
4095 m32c_all_frame_related (rtx par)
4096 {
4097 int len = XVECLEN (par, 0);
4098 int i;
4099
4100 for (i = 0; i < len; i++)
4101 F (XVECEXP (par, 0, i));
4102
4103 return par;
4104 }
4105
4106 /* Emits the prologue. See the frame layout comment earlier in this
4107 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4108 that we manually update sp. */
4109 void
4110 m32c_emit_prologue (void)
4111 {
4112 int frame_size, extra_frame_size = 0, reg_save_size;
4113 int complex_prologue = 0;
4114
4115 cfun->machine->is_leaf = m32c_leaf_function_p ();
4116 if (interrupt_p (cfun->decl))
4117 {
4118 cfun->machine->is_interrupt = 1;
4119 complex_prologue = 1;
4120 }
4121 else if (bank_switch_p (cfun->decl))
4122 warning (OPT_Wattributes,
4123 "%<bank_switch%> has no effect on non-interrupt functions");
4124
4125 reg_save_size = m32c_pushm_popm (PP_justcount);
4126
4127 if (interrupt_p (cfun->decl))
4128 {
4129 if (bank_switch_p (cfun->decl))
4130 emit_insn (gen_fset_b ());
4131 else if (cfun->machine->intr_pushm)
4132 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4133 }
4134
4135 frame_size =
4136 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4137 if (frame_size == 0
4138 && !m32c_function_needs_enter ())
4139 cfun->machine->use_rts = 1;
4140
4141 if (frame_size > 254)
4142 {
4143 extra_frame_size = frame_size - 254;
4144 frame_size = 254;
4145 }
4146 if (cfun->machine->use_rts == 0)
4147 F (emit_insn (m32c_all_frame_related
4148 (TARGET_A16
4149 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4150 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4151
4152 if (extra_frame_size)
4153 {
4154 complex_prologue = 1;
4155 if (TARGET_A16)
4156 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4157 gen_rtx_REG (HImode, SP_REGNO),
4158 GEN_INT (-extra_frame_size))));
4159 else
4160 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4161 gen_rtx_REG (PSImode, SP_REGNO),
4162 GEN_INT (-extra_frame_size))));
4163 }
4164
4165 complex_prologue += m32c_pushm_popm (PP_pushm);
4166
4167 /* This just emits a comment into the .s file for debugging. */
4168 if (complex_prologue)
4169 emit_insn (gen_prologue_end ());
4170 }
4171
4172 /* Likewise, for the epilogue. The only exception is that, for
4173 interrupts, we must manually unwind the frame as the REIT opcode
4174 doesn't do that. */
4175 void
4176 m32c_emit_epilogue (void)
4177 {
4178 int popm_count = m32c_pushm_popm (PP_justcount);
4179
4180 /* This just emits a comment into the .s file for debugging. */
4181 if (popm_count > 0 || cfun->machine->is_interrupt)
4182 emit_insn (gen_epilogue_start ());
4183
4184 if (popm_count > 0)
4185 m32c_pushm_popm (PP_popm);
4186
4187 if (cfun->machine->is_interrupt)
4188 {
4189 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4190
4191 /* REIT clears B flag and restores $fp for us, but we still
4192 have to fix up the stack. USE_RTS just means we didn't
4193 emit ENTER. */
4194 if (!cfun->machine->use_rts)
4195 {
4196 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4197 gen_rtx_REG (spmode, FP_REGNO));
4198 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4199 gen_rtx_REG (spmode, A0_REGNO));
4200 /* We can't just add this to the POPM because it would be in
4201 the wrong order, and wouldn't fix the stack if we're bank
4202 switching. */
4203 if (TARGET_A16)
4204 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4205 else
4206 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4207 }
4208 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4209 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4210
4211 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4212 generated only for M32C/M32CM targets (generate the REIT
4213 instruction otherwise). */
4214 if (fast_interrupt_p (cfun->decl))
4215 {
4216 /* Check if fast_attribute is set for M32C or M32CM. */
4217 if (TARGET_A24)
4218 {
4219 emit_jump_insn (gen_epilogue_freit ());
4220 }
4221 /* If fast_interrupt attribute is set for an R8C or M16C
4222 target ignore this attribute and generated REIT
4223 instruction. */
4224 else
4225 {
4226 warning (OPT_Wattributes,
4227 "%<fast_interrupt%> attribute directive ignored");
4228 emit_jump_insn (gen_epilogue_reit_16 ());
4229 }
4230 }
4231 else if (TARGET_A16)
4232 emit_jump_insn (gen_epilogue_reit_16 ());
4233 else
4234 emit_jump_insn (gen_epilogue_reit_24 ());
4235 }
4236 else if (cfun->machine->use_rts)
4237 emit_jump_insn (gen_epilogue_rts ());
4238 else if (TARGET_A16)
4239 emit_jump_insn (gen_epilogue_exitd_16 ());
4240 else
4241 emit_jump_insn (gen_epilogue_exitd_24 ());
4242 }
4243
4244 void
4245 m32c_emit_eh_epilogue (rtx ret_addr)
4246 {
4247 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4248 return to. We have to fudge the stack, pop everything, pop SP
4249 (fudged), and return (fudged). This is actually easier to do in
4250 assembler, so punt to libgcc. */
4251 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4252 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4253 }
4254
4255 /* Indicate which flags must be properly set for a given conditional. */
4256 static int
4257 flags_needed_for_conditional (rtx cond)
4258 {
4259 switch (GET_CODE (cond))
4260 {
4261 case LE:
4262 case GT:
4263 return FLAGS_OSZ;
4264 case LEU:
4265 case GTU:
4266 return FLAGS_ZC;
4267 case LT:
4268 case GE:
4269 return FLAGS_OS;
4270 case LTU:
4271 case GEU:
4272 return FLAGS_C;
4273 case EQ:
4274 case NE:
4275 return FLAGS_Z;
4276 default:
4277 return FLAGS_N;
4278 }
4279 }
4280
4281 #define DEBUG_CMP 0
4282
4283 /* Returns true if a compare insn is redundant because it would only
4284 set flags that are already set correctly. */
4285 static bool
4286 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4287 {
4288 int flags_needed;
4289 int pflags;
4290 rtx_insn *prev;
4291 rtx pp, next;
4292 rtx op0, op1;
4293 #if DEBUG_CMP
4294 int prev_icode, i;
4295 #endif
4296
4297 op0 = operands[0];
4298 op1 = operands[1];
4299
4300 #if DEBUG_CMP
4301 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4302 debug_rtx(cmp);
4303 for (i=0; i<2; i++)
4304 {
4305 fprintf(stderr, "operands[%d] = ", i);
4306 debug_rtx(operands[i]);
4307 }
4308 #endif
4309
4310 next = next_nonnote_insn (cmp);
4311 if (!next || !INSN_P (next))
4312 {
4313 #if DEBUG_CMP
4314 fprintf(stderr, "compare not followed by insn\n");
4315 debug_rtx(next);
4316 #endif
4317 return false;
4318 }
4319 if (GET_CODE (PATTERN (next)) == SET
4320 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4321 {
4322 next = XEXP (XEXP (PATTERN (next), 1), 0);
4323 }
4324 else if (GET_CODE (PATTERN (next)) == SET)
4325 {
4326 /* If this is a conditional, flags_needed will be something
4327 other than FLAGS_N, which we test below. */
4328 next = XEXP (PATTERN (next), 1);
4329 }
4330 else
4331 {
4332 #if DEBUG_CMP
4333 fprintf(stderr, "compare not followed by conditional\n");
4334 debug_rtx(next);
4335 #endif
4336 return false;
4337 }
4338 #if DEBUG_CMP
4339 fprintf(stderr, "conditional is: ");
4340 debug_rtx(next);
4341 #endif
4342
4343 flags_needed = flags_needed_for_conditional (next);
4344 if (flags_needed == FLAGS_N)
4345 {
4346 #if DEBUG_CMP
4347 fprintf(stderr, "compare not followed by conditional\n");
4348 debug_rtx(next);
4349 #endif
4350 return false;
4351 }
4352
4353 /* Compare doesn't set overflow and carry the same way that
4354 arithmetic instructions do, so we can't replace those. */
4355 if (flags_needed & FLAGS_OC)
4356 return false;
4357
4358 prev = cmp;
4359 do {
4360 prev = prev_nonnote_insn (prev);
4361 if (!prev)
4362 {
4363 #if DEBUG_CMP
4364 fprintf(stderr, "No previous insn.\n");
4365 #endif
4366 return false;
4367 }
4368 if (!INSN_P (prev))
4369 {
4370 #if DEBUG_CMP
4371 fprintf(stderr, "Previous insn is a non-insn.\n");
4372 #endif
4373 return false;
4374 }
4375 pp = PATTERN (prev);
4376 if (GET_CODE (pp) != SET)
4377 {
4378 #if DEBUG_CMP
4379 fprintf(stderr, "Previous insn is not a SET.\n");
4380 #endif
4381 return false;
4382 }
4383 pflags = get_attr_flags (prev);
4384
4385 /* Looking up attributes of previous insns corrupted the recog
4386 tables. */
4387 INSN_UID (cmp) = -1;
4388 recog (PATTERN (cmp), cmp, 0);
4389
4390 if (pflags == FLAGS_N
4391 && reg_mentioned_p (op0, pp))
4392 {
4393 #if DEBUG_CMP
4394 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4395 debug_rtx(prev);
4396 #endif
4397 return false;
4398 }
4399
4400 /* Check for comparisons against memory - between volatiles and
4401 aliases, we just can't risk this one. */
4402 if (GET_CODE (operands[0]) == MEM
4403 || GET_CODE (operands[0]) == MEM)
4404 {
4405 #if DEBUG_CMP
4406 fprintf(stderr, "comparisons with memory:\n");
4407 debug_rtx(prev);
4408 #endif
4409 return false;
4410 }
4411
4412 /* Check for PREV changing a register that's used to compute a
4413 value in CMP, even if it doesn't otherwise change flags. */
4414 if (GET_CODE (operands[0]) == REG
4415 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4416 {
4417 #if DEBUG_CMP
4418 fprintf(stderr, "sub-value affected, op0:\n");
4419 debug_rtx(prev);
4420 #endif
4421 return false;
4422 }
4423 if (GET_CODE (operands[1]) == REG
4424 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4425 {
4426 #if DEBUG_CMP
4427 fprintf(stderr, "sub-value affected, op1:\n");
4428 debug_rtx(prev);
4429 #endif
4430 return false;
4431 }
4432
4433 } while (pflags == FLAGS_N);
4434 #if DEBUG_CMP
4435 fprintf(stderr, "previous flag-setting insn:\n");
4436 debug_rtx(prev);
4437 debug_rtx(pp);
4438 #endif
4439
4440 if (GET_CODE (pp) == SET
4441 && GET_CODE (XEXP (pp, 0)) == REG
4442 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4443 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4444 {
4445 /* Adjacent cbranches must have the same operands to be
4446 redundant. */
4447 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4448 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4449 #if DEBUG_CMP
4450 fprintf(stderr, "adjacent cbranches\n");
4451 debug_rtx(pop0);
4452 debug_rtx(pop1);
4453 #endif
4454 if (rtx_equal_p (op0, pop0)
4455 && rtx_equal_p (op1, pop1))
4456 return true;
4457 #if DEBUG_CMP
4458 fprintf(stderr, "prev cmp not same\n");
4459 #endif
4460 return false;
4461 }
4462
4463 /* Else the previous insn must be a SET, with either the source or
4464 dest equal to operands[0], and operands[1] must be zero. */
4465
4466 if (!rtx_equal_p (op1, const0_rtx))
4467 {
4468 #if DEBUG_CMP
4469 fprintf(stderr, "operands[1] not const0_rtx\n");
4470 #endif
4471 return false;
4472 }
4473 if (GET_CODE (pp) != SET)
4474 {
4475 #if DEBUG_CMP
4476 fprintf (stderr, "pp not set\n");
4477 #endif
4478 return false;
4479 }
4480 if (!rtx_equal_p (op0, SET_SRC (pp))
4481 && !rtx_equal_p (op0, SET_DEST (pp)))
4482 {
4483 #if DEBUG_CMP
4484 fprintf(stderr, "operands[0] not found in set\n");
4485 #endif
4486 return false;
4487 }
4488
4489 #if DEBUG_CMP
4490 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4491 #endif
4492 if ((pflags & flags_needed) == flags_needed)
4493 return true;
4494
4495 return false;
4496 }
4497
4498 /* Return the pattern for a compare. This will be commented out if
4499 the compare is redundant, else a normal pattern is returned. Thus,
4500 the assembler output says where the compare would have been. */
4501 char *
4502 m32c_output_compare (rtx_insn *insn, rtx *operands)
4503 {
4504 static char templ[] = ";cmp.b\t%1,%0";
4505 /* ^ 5 */
4506
4507 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4508 if (m32c_compare_redundant (insn, operands))
4509 {
4510 #if DEBUG_CMP
4511 fprintf(stderr, "cbranch: cmp not needed\n");
4512 #endif
4513 return templ;
4514 }
4515
4516 #if DEBUG_CMP
4517 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4518 #endif
4519 return templ + 1;
4520 }
4521
4522 #undef TARGET_ENCODE_SECTION_INFO
4523 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4524
4525 /* If the frame pointer isn't used, we detect it manually. But the
4526 stack pointer doesn't have as flexible addressing as the frame
4527 pointer, so we always assume we have it. */
4528
4529 #undef TARGET_FRAME_POINTER_REQUIRED
4530 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4531
4532 /* The Global `targetm' Variable. */
4533
4534 struct gcc_target targetm = TARGET_INITIALIZER;
4535
4536 #include "gt-m32c.h"
This page took 0.231846 seconds and 5 git commands to generate.