1 /* GCC backend functions for C-SKY targets.
2 Copyright (C) 2018-2021 Free Software Foundation, Inc.
3 Contributed by C-SKY Microsystems and Mentor Graphics.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
34 #include "stringpool.h"
41 #include "c-family/c-common.h"
43 #include "diagnostic-core.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
50 #include "insn-attr.h"
56 #include "sched-int.h"
57 #include "common/common-target.h"
58 #include "langhooks.h"
63 #include "target-globals.h"
65 #include "tm-constrs.h"
67 #include "pass_manager.h"
68 #include "tree-pass.h"
71 /* This file should be included last. */
72 #include "target-def.h"
74 /* Stack and register size macros. */
76 #define CSKY_NUM_WORDS(SIZE) \
77 (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
78 #define CSKY_NUM_REGS(MODE) \
79 CSKY_NUM_WORDS (GET_MODE_SIZE (MODE))
80 #define CSKY_STACK_ALIGN(SIZE) \
81 (CSKY_NUM_WORDS (SIZE) * UNITS_PER_WORD)
83 /* Offsets and range macros. */
85 #define CSKY_LD16_MAX_OFFSET(MODE) \
86 (31 * GET_MODE_SIZE (MODE))
87 #define CSKY_LD32_MAX_OFFSET(MODE) \
88 (4095 * GET_MODE_SIZE (MODE))
89 #define CSKY_LD16_OFFSET_MASK(MODE) \
90 (CSKY_LD16_MAX_OFFSET (MODE) + GET_MODE_SIZE (MODE) - 1)
92 #define CSKY_ADDI16_MAX_IMM 256
93 #define CSKY_SUBI16_MAX_IMM 256
95 #define CSKY_CONSTPOOL_LABEL_PREFIX "LCP"
97 /* Array of the smallest class containing reg number REGNO, indexed by
98 REGNO. Used by REGNO_REG_CLASS. */
99 enum reg_class regno_reg_class
[FIRST_PSEUDO_REGISTER
] =
101 /* Registers r0-r7. */
102 MINI_REGS
, MINI_REGS
, MINI_REGS
, MINI_REGS
,
103 MINI_REGS
, MINI_REGS
, MINI_REGS
, MINI_REGS
,
104 /* Registers r8-r15. */
105 LOW_REGS
, LOW_REGS
, LOW_REGS
, LOW_REGS
,
106 LOW_REGS
, LOW_REGS
, SP_REGS
, LOW_REGS
,
107 /* Registers r16-r31. */
108 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
109 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
110 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
111 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
114 /* CC,HI,LO registers. */
115 C_REGS
, HILO_REGS
, HILO_REGS
,
117 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
118 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
119 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
120 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
122 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
123 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
124 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
125 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
127 RESERVE_REGS
, RESERVE_REGS
,
131 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
132 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
133 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
134 V_REGS
, V_REGS
, V_REGS
, V_REGS
,
136 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
137 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
138 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
139 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
141 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
142 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
143 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
144 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
145 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
146 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
147 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
148 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
150 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
151 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
152 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
153 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
154 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
155 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
156 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
157 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
159 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
160 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
161 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
162 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
163 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
164 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
165 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
166 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
,
168 RESERVE_REGS
, RESERVE_REGS
, RESERVE_REGS
171 /* Arrays that map GCC register numbers to debugger register numbers,
172 '-1' means that is INVALID_REGNUM.
173 TODO: which rules according to here ? */
174 const int csky_dbx_regno
[FIRST_PSEUDO_REGISTER
] =
176 0, 1, 2, 3, 4, 5, 6, 7,
177 8, 9, 10, 11, 12, 13, 14, 15,
178 16, 17, 18, 19, 20, 21, 22, 23,
179 24, 25, 26, 27, 28, 29, 30, 31,
181 75, 79, 83, 87, 91, 95, 99, 103,
182 107, 111, 115, 119, 123, 127, 131, 135,
183 74, 78, 82, 86, 90, 94, 98, 102,
184 106, 110, 114, 118, 122, 126, 130, 134,
187 139, 143, 147, 151, 155, 159, 163, 167,
188 171, 175, 179, 183, 187, 191, 195, 199,
189 138, 142, 146, 150, 154, 158, 162, 166,
190 170, 174, 178, 182, 186, 190, 194, 198,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1,
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
210 /* Table of machine attributes. */
211 static tree
csky_handle_fndecl_attribute (tree
*, tree
, tree
, int, bool *);
212 static tree
csky_handle_isr_attribute (tree
*, tree
, tree
, int, bool *);
213 static const struct attribute_spec csky_attribute_table
[] =
215 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
216 affects_type_identity, handler, exclude } */
217 { "naked", 0, 0, true, false, false, false, csky_handle_fndecl_attribute
, NULL
},
218 /* Interrupt Service Routines have special prologue and epilogue requirements. */
219 { "interrupt", 0, 1, false, false, false, false, csky_handle_isr_attribute
, NULL
},
220 { "isr", 0, 1, false, false, false, false, csky_handle_isr_attribute
, NULL
},
221 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
224 /* A C structure for machine-specific, per-function data.
225 This is added to the cfun structure. */
226 typedef struct GTY(()) machine_function
228 /* Records if LR has to be saved for far jumps. */
230 /* Records the type of the current function. */
231 unsigned long func_type
;
232 /* Record if the function has a variable argument list. */
233 int uses_anonymous_args
;
235 /* Stack frame layout information. If frame_init_p is true,
236 these fields have been initialized and don't need to be
238 unsigned int reg_mask
; /* non-volatile reg saves */
239 int arg_size
; /* stdarg spills (bytes) */
240 int reg_size
; /* non-volatile reg saves (bytes) */
241 int local_size
; /* locals */
242 int outbound_size
; /* arg overflow on calls out */
243 int frame_size
; /* total static size of stack frame */
251 /* These macros are for the func_type values above. */
252 #define CSKY_FT_TYPE_MASK ((1 << 3) - 1)
253 #define CSKY_FT_UNKNOWN 0 /* Type not been determined */
254 #define CSKY_FT_NORMAL 1 /* Normal function */
255 #define CSKY_FT_ISR 4 /* Interrupt service routine */
256 #define CSKY_FT_FIQ 5 /* Fast interrupt service routine */
257 #define CSKY_FT_EXCEPTION 6 /* Exception handler */
258 #define CSKY_FT_INTERRUPT (1 << 2) /* overlap CSKY_FT_ISR */
259 #define CSKY_FT_NAKED (1 << 3) /* No prologue and epilogue */
260 #define CSKY_FUNCTION_TYPE(t) ((t) & CSKY_FT_TYPE_MASK)
261 #define CSKY_FUNCTION_IS_INTERRUPT(t) ((t) & CSKY_FT_INTERRUPT)
262 #define CSKY_FUNCTION_IS_NAKED(t) ((t) & CSKY_FT_NAKED)
264 struct csky_processors
266 const char *const name
;
267 enum csky_processor_type core
;
269 enum csky_base_architecture base_arch
;
270 enum csky_isa_feature isa_bits
[CSKY_ISA_FEATURE_GET (max
)];
273 static struct csky_processors all_cores
[] =
276 #define CSKY_CORE(NAME, CORE, X, ARCH, ISA) \
277 {NAME, TARGET_CPU_##CORE, #ARCH, CSKY_BASE_ARCH_##ARCH, \
278 {ISA CSKY_ISA_FEATURE_GET (none)}},
279 #include "csky_cores.def"
281 {NULL
, TARGET_CPU_csky_none
, NULL
, CSKY_BASE_ARCH_NONE
, \
282 {CSKY_ISA_FEATURE_GET (none
)}}
285 static struct csky_processors all_architectures
[] =
288 #define CSKY_ARCH(NAME, CORE, ARCH, ISA) \
289 {NAME, TARGET_CPU_##CORE, #ARCH, CSKY_BASE_ARCH_##ARCH, \
290 {ISA CSKY_ISA_FEATURE_GET (none)}},
291 #include "csky_cores.def"
293 {NULL
, TARGET_CPU_csky_none
, NULL
, CSKY_BASE_ARCH_NONE
, \
294 {CSKY_ISA_FEATURE_GET (none
)}}
300 enum csky_isa_feature isa_bits
[CSKY_ISA_FEATURE_GET (max
)];
303 static const struct csky_fpu_desc all_fpus
[] =
306 #define CSKY_FPU(NAME, CNAME, ISA) \
307 {NAME, {ISA CSKY_ISA_FEATURE_GET (none)}},
308 #include "csky_cores.def"
312 /* Active target architecture. */
313 struct csky_build_target
315 /* Name of the target CPU, if known, or NULL if the target CPU was not
316 specified by the user (and inferred from the -march option). */
317 const char *core_name
;
318 /* Name of the target ARCH. NULL if there is a selected CPU. */
319 const char *arch_name
;
320 /* Preprocessor substring (never NULL). */
321 const char *arch_pp_name
;
322 /* CPU identifier for the core we're compiling for (architecturally). */
323 enum csky_processor_type arch_core
;
324 /* The base architecture value. */
325 enum csky_base_architecture base_arch
;
326 /* Bitmap encapsulating the isa_bits for the target environment. */
330 struct csky_build_target csky_active_target
;
332 /* The following are used in the .md file as equivalents to bits. */
333 int csky_arch_isa_features
[CSKY_ISA_FEATURE_GET (max
)] = {0};
335 /* The highest CSKY architecture version supported by the target. */
336 enum csky_base_architecture csky_base_arch
= CSKY_TARGET_ARCH_GET (NONE
);
338 /* Forward definitions of types. */
339 typedef struct minipool_node Mnode
;
340 typedef struct minipool_fixup Mfix
;
342 static GTY(()) int tls_labelno
;
345 /* Maximum constant offset that can be added/subtracted from SP in a
346 single instruction. For ck801, this is for addsp/subsp, otherwise
347 it is the range of addi/subi. */
348 #define CSKY_MAX_SP_ADJUST \
349 (CSKY_TARGET_ARCH (CK801) ? 508 : 4096)
352 /* Implement TARGET_CPU_CPP_BUILTINS. */
354 #define builtin_define(MACRO) cpp_define (pfile, MACRO)
357 csky_cpu_cpp_builtins (cpp_reader
*pfile
)
359 const char *arch_name
= csky_active_target
.arch_pp_name
;
360 char *pp_name
= (char *) alloca (1 + strlen (arch_name
) + 4);
361 sprintf (pp_name
, "__%s__", arch_name
);
362 builtin_define (pp_name
);
364 builtin_define ("__csky__=2");
365 builtin_define ("__CSKY__=2");
366 builtin_define ("__ckcore__=2");
367 builtin_define ("__CKCORE__=2");
369 builtin_define ("__CSKYABIV2__");
370 builtin_define ("__cskyabiv2__");
371 builtin_define ("__CSKYABI__=2");
372 builtin_define ("__cskyabi__=2");
374 if (TARGET_BIG_ENDIAN
)
376 builtin_define ("__ckcoreBE__");
377 builtin_define ("__cskyBE__");
378 builtin_define ("__cskybe__");
379 builtin_define ("__CSKYBE__");
383 builtin_define ("__ckcoreLE__");
384 builtin_define ("__cskyLE__");
385 builtin_define ("__cskyle__");
386 builtin_define ("__CSKYLE__");
389 if (TARGET_HARD_FLOAT
)
391 builtin_define ("__csky_hard_float__");
392 builtin_define ("__CSKY_HARD_FLOAT__");
393 if (TARGET_HARD_FLOAT_ABI
)
395 builtin_define ("__csky_hard_float_abi__");
396 builtin_define ("__CSKY_HARD_FLOAT_ABI__");
398 if (TARGET_SINGLE_FPU
)
400 builtin_define ("__csky_hard_float_fpu_sf__");
401 builtin_define ("__CSKY_HARD_FLOAT_FPU_SF__");
406 builtin_define ("__csky_soft_float__");
407 builtin_define ("__CSKY_SOFT_FLOAT__");
410 if (CSKY_ISA_FEATURE (fpv2_sf
))
412 builtin_define ("__csky_fpuv2__");
413 builtin_define ("__CSKY_FPUV2__");
416 if (TARGET_SUPPORT_FPV3
)
418 builtin_define ("__csky_fpuv3__");
419 builtin_define ("__CSKY_FPUV3__");
424 builtin_define ("__csky_elrw__");
425 builtin_define ("__CSKY_ELRW__");
429 builtin_define ("__csky_istack__");
430 builtin_define ("__CSKY_ISTACK__");
434 builtin_define ("__csky_mp__");
435 builtin_define ("__CSKY_MP__");
439 builtin_define ("__csky_cp__");
440 builtin_define ("__CSKY_CP__");
444 builtin_define ("__csky_cache__");
445 builtin_define ("__CSKY_CACHE__");
449 builtin_define ("__csky_security__");
450 builtin_define ("__CSKY_SECURITY__");
454 builtin_define ("__csky_trust__");
455 builtin_define ("__CSKY_TRUST__");
459 builtin_define ("__csky_dsp__");
460 builtin_define ("__CSKY_DSP__");
464 builtin_define ("__csky_edsp__");
465 builtin_define ("__CSKY_EDSP__");
469 builtin_define ("__csky_vdsp__");
470 builtin_define ("__CSKY_VDSP__");
475 /******************************************************************
477 ******************************************************************/
479 #undef TARGET_PROMOTE_FUNCTION_MODE
480 #define TARGET_PROMOTE_FUNCTION_MODE \
481 default_promote_function_mode_always_promote
483 #undef TARGET_CONSTANT_ALIGNMENT
484 #define TARGET_CONSTANT_ALIGNMENT csky_constant_alignment
486 #undef TARGET_MANGLE_TYPE
487 #define TARGET_MANGLE_TYPE csky_mangle_type
490 /******************************************************************
491 * Stack Layout and Calling Conventions *
492 ******************************************************************/
494 #undef TARGET_CAN_ELIMINATE
495 #define TARGET_CAN_ELIMINATE csky_can_eliminate
497 #undef TARGET_FUNCTION_ARG
498 #define TARGET_FUNCTION_ARG csky_function_arg
500 #undef TARGET_FUNCTION_ARG_ADVANCE
501 #define TARGET_FUNCTION_ARG_ADVANCE csky_function_arg_advance
503 #undef TARGET_FUNCTION_VALUE
504 #define TARGET_FUNCTION_VALUE csky_function_value
506 #undef TARGET_LIBCALL_VALUE
507 #define TARGET_LIBCALL_VALUE csky_libcall_value
509 #undef TARGET_FUNCTION_VALUE_REGNO_P
510 #define TARGET_FUNCTION_VALUE_REGNO_P csky_function_value_regno_p
512 #undef TARGET_SPLIT_COMPLEX_ARG
513 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
515 #undef TARGET_MUST_PASS_IN_STACK
516 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
518 #undef TARGET_ARG_PARTIAL_BYTES
519 #define TARGET_ARG_PARTIAL_BYTES csky_arg_partial_bytes
521 #undef TARGET_PASS_BY_REFERENCE
522 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
524 #undef TARGET_ASM_OUTPUT_MI_THUNK
525 #define TARGET_ASM_OUTPUT_MI_THUNK csky_output_mi_thunk
527 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
528 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
529 hook_bool_const_tree_hwi_hwi_const_tree_true
531 #undef TARGET_ASM_FUNCTION_PROLOGUE
532 #define TARGET_ASM_FUNCTION_PROLOGUE csky_output_function_prologue
534 #undef TARGET_ASM_FUNCTION_EPILOGUE
535 #define TARGET_ASM_FUNCTION_EPILOGUE csky_output_function_epilogue
537 #undef TARGET_WARN_FUNC_RETURN
538 #define TARGET_WARN_FUNC_RETURN csky_warn_func_return
540 #undef TARGET_RETURN_IN_MEMORY
541 #define TARGET_RETURN_IN_MEMORY csky_return_in_memory
544 /******************************************************************
545 * Implementing the Varargs Macros *
546 ******************************************************************/
549 #undef TARGET_SETUP_INCOMING_VARARGS
550 #define TARGET_SETUP_INCOMING_VARARGS csky_setup_incoming_varargs
553 /******************************************************************
554 * Implicit Calls to Library Routines *
555 ******************************************************************/
558 #undef TARGET_INIT_LIBFUNCS
559 #define TARGET_INIT_LIBFUNCS csky_init_libfuncs
562 /******************************************************************
563 * Dividing the Output into Sections (Texts, Data, . . . ) *
564 ******************************************************************/
567 #undef TARGET_HAVE_TLS
568 #define TARGET_HAVE_TLS TARGET_CSKY_LINUX
571 /******************************************************************
572 * Defining target-specific uses of __attribute__ *
573 ******************************************************************/
576 #undef TARGET_ATTRIBUTE_TABLE
577 #define TARGET_ATTRIBUTE_TABLE csky_attribute_table
579 #undef TARGET_OPTION_OVERRIDE
580 #define TARGET_OPTION_OVERRIDE csky_option_override
583 /* Implement the BRANCH_COST target macro. */
586 csky_default_branch_cost (bool speed_p ATTRIBUTE_UNUSED
,
587 bool predictable_p ATTRIBUTE_UNUSED
)
589 return csky_branch_cost
;
593 csky_default_logical_op_non_short_circuit (void)
595 return BRANCH_COST (optimize_function_for_speed_p (cfun
), false) >= 2;
598 /******************************************************************
600 ******************************************************************/
602 #undef TARGET_HARD_REGNO_NREGS
603 #define TARGET_HARD_REGNO_NREGS csky_hard_regno_nregs
605 #undef TARGET_HARD_REGNO_MODE_OK
606 #define TARGET_HARD_REGNO_MODE_OK csky_hard_regno_mode_ok
608 #undef TARGET_MODES_TIEABLE_P
609 #define TARGET_MODES_TIEABLE_P csky_modes_tieable_p
611 #undef TARGET_CAN_CHANGE_MODE_CLASS
612 #define TARGET_CAN_CHANGE_MODE_CLASS csky_can_change_mode_class
614 #undef TARGET_CONDITIONAL_REGISTER_USAGE
615 #define TARGET_CONDITIONAL_REGISTER_USAGE csky_conditional_register_usage
617 #undef TARGET_CLASS_LIKELY_SPILLED_P
618 #define TARGET_CLASS_LIKELY_SPILLED_P csky_class_likely_spilled_p
620 #undef TARGET_PREFERRED_RELOAD_CLASS
621 #define TARGET_PREFERRED_RELOAD_CLASS csky_preferred_reload_class
623 #undef TARGET_CLASS_MAX_NREGS
624 #define TARGET_CLASS_MAX_NREGS csky_class_max_nregs
626 #undef TARGET_SECONDARY_RELOAD
627 #define TARGET_SECONDARY_RELOAD csky_secondary_reload
629 #undef TARGET_SPILL_CLASS
630 #define TARGET_SPILL_CLASS csky_spill_class
633 /******************************************************************
635 ******************************************************************/
638 #undef TARGET_CANNOT_FORCE_CONST_MEM
639 #define TARGET_CANNOT_FORCE_CONST_MEM csky_cannot_force_const_mem
641 #undef TARGET_LEGITIMATE_CONSTANT_P
642 #define TARGET_LEGITIMATE_CONSTANT_P csky_legitimate_constant_p
644 #undef TARGET_LEGITIMIZE_ADDRESS
645 #define TARGET_LEGITIMIZE_ADDRESS csky_legitimize_address
647 #undef TARGET_LEGITIMATE_ADDRESS_P
648 #define TARGET_LEGITIMATE_ADDRESS_P csky_legitimate_address_p
651 /******************************************************************
653 ******************************************************************/
656 #undef TARGET_CANNOT_COPY_INSN_P
657 #define TARGET_CANNOT_COPY_INSN_P csky_cannot_copy_insn_p
660 /******************************************************************
662 ******************************************************************/
665 #undef TARGET_PRINT_OPERAND
666 #define TARGET_PRINT_OPERAND csky_print_operand
668 #undef TARGET_PRINT_OPERAND_ADDRESS
669 #define TARGET_PRINT_OPERAND_ADDRESS csky_print_operand_address
671 #undef TARGET_ASM_UNALIGNED_HI_OP
672 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
674 #undef TARGET_ASM_UNALIGNED_SI_OP
675 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
677 #undef TARGET_DWARF_REGISTER_SPAN
678 #define TARGET_DWARF_REGISTER_SPAN csky_dwarf_register_span
681 /******************************************************************
682 * Miscellaneous Parameters *
683 ******************************************************************/
686 #undef TARGET_MACHINE_DEPENDENT_REORG
687 #define TARGET_MACHINE_DEPENDENT_REORG csky_reorg
689 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
690 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS csky_allocate_stack_slots_for_args
692 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
693 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
696 /******************************************************************
697 * Trampolines for Nested Functions *
698 ******************************************************************/
701 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
702 #define TARGET_ASM_TRAMPOLINE_TEMPLATE csky_asm_trampoline_template
703 #undef TARGET_TRAMPOLINE_INIT
704 #define TARGET_TRAMPOLINE_INIT csky_trampoline_init
706 /* The low bit is ignored by jsr and jmp instructions so is safe to use. */
707 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
708 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
710 /******************************************************************
711 * Describing Relative Costs of Operations *
712 ******************************************************************/
715 #undef TARGET_REGISTER_MOVE_COST
716 #define TARGET_REGISTER_MOVE_COST csky_register_move_cost
718 #undef TARGET_MEMORY_MOVE_COST
719 #define TARGET_MEMORY_MOVE_COST csky_memory_move_cost
721 #undef TARGET_RTX_COSTS
722 #define TARGET_RTX_COSTS csky_rtx_costs
724 #undef TARGET_ADDRESS_COST
725 #define TARGET_ADDRESS_COST csky_address_cost
728 /******************************************************************
730 ******************************************************************/
733 /* FIXME: the max offset is related to mode size, the following is
734 defined according to SImode. How to deal with HImode and
735 QImode, and should the min offset be defined? */
736 #undef TARGET_MAX_ANCHOR_OFFSET
737 #define TARGET_MAX_ANCHOR_OFFSET \
738 ((TARGET_MINI_REGISTERS && optimize_size) ? 127 : 4095)
741 /******************************************************************
742 * Condition Code Status *
743 ******************************************************************/
746 #undef TARGET_FIXED_CONDITION_CODE_REGS
747 #define TARGET_FIXED_CONDITION_CODE_REGS csky_fixed_condition_code_regs
750 /******************************************************************
751 * Adjusting the Instruction Scheduler *
752 ******************************************************************/
755 #undef TARGET_SCHED_ISSUE_RATE
756 #define TARGET_SCHED_ISSUE_RATE csky_sched_issue_rate
758 #undef TARGET_SCHED_ADJUST_COST
759 #define TARGET_SCHED_ADJUST_COST csky_sched_adjust_cost
762 /******************************************************************
764 ******************************************************************/
767 #undef TARGET_INIT_BUILTINS
768 #define TARGET_INIT_BUILTINS csky_init_builtins
771 /* The declaration of functions. */
772 static void push_csky_minipool_fix (rtx_insn
*, HOST_WIDE_INT
, rtx
*,
774 static void csky_print_operand (FILE *stream
, rtx x
, int code
);
777 /* Define a table to map ISR attribute arguments onto function type
782 const char *const arg
;
783 const unsigned long return_value
;
784 } isr_attribute_entry
;
786 static const isr_attribute_entry isr_attribute_map
[] =
788 {"irq", CSKY_FT_ISR
},
789 {"IRQ", CSKY_FT_ISR
},
790 {"fiq", CSKY_FT_FIQ
},
791 {"FIQ", CSKY_FT_FIQ
},
792 {NULL
, CSKY_FT_NORMAL
}
796 /* Return the function type of the current function, if it has not been
797 determined, return CSKY_FT_UNKNOWN. */
800 get_csky_isr_type (tree argument
)
802 const isr_attribute_entry
*ptr
;
805 /* if argument is NULL, set default value ISR. */
806 if (argument
== NULL_TREE
)
809 if (TREE_VALUE (argument
) == NULL_TREE
810 || TREE_CODE (TREE_VALUE (argument
)) != STRING_CST
)
811 return CSKY_FT_UNKNOWN
;
813 arg
= TREE_STRING_POINTER (TREE_VALUE (argument
));
815 for (ptr
= isr_attribute_map
; ptr
->arg
!= NULL
; ptr
++)
816 if (strcmp (arg
, ptr
->arg
) == 0)
817 return ptr
->return_value
;
819 return CSKY_FT_UNKNOWN
;
822 /* Classify cfun as a normal function or some sort of interrupt
823 handler, and set the corresponding bits in cfun->machine->func_type. */
826 get_csky_current_func_type (void)
828 if (CSKY_FUNCTION_TYPE (cfun
->machine
->func_type
) == CSKY_FT_UNKNOWN
)
830 unsigned long type
= CSKY_FT_UNKNOWN
;
834 gcc_assert (TREE_CODE (current_function_decl
) == FUNCTION_DECL
);
836 attr
= DECL_ATTRIBUTES (current_function_decl
);
837 a
= lookup_attribute ("naked", attr
);
839 type
|= CSKY_FT_NAKED
;
840 a
= lookup_attribute ("isr", attr
);
842 a
= lookup_attribute ("interrupt", attr
);
844 type
|= CSKY_FT_NORMAL
;
846 type
|= get_csky_isr_type (TREE_VALUE (a
));
848 cfun
->machine
->func_type
= type
;
851 return cfun
->machine
->func_type
;
854 /* These typedefs are located at the start of this file, so that
855 they can be used in the prototypes there. This comment is to
856 remind readers of that fact so that the following structures
857 can be understood more easily.
859 typedef struct minipool_node Mnode;
860 typedef struct minipool_fixup Mfix; */
864 /* Doubly linked chain of entries. */
867 /* The maximum offset into the code that this entry can be placed. While
868 pushing fixes for forward references, all entries are sorted in order
869 of increasing max_address. */
870 HOST_WIDE_INT max_address
;
871 /* Similarly for an entry inserted for a backwards ref. */
872 HOST_WIDE_INT min_address
;
873 /* The number of fixes referencing this entry. This can become zero
874 if we "unpush" an entry. In this case we ignore the entry when we
875 come to emit the code. */
877 /* The offset from the start of the minipool. */
878 HOST_WIDE_INT offset
;
879 /* The value in table. */
881 /* The mode of value. */
883 /* The size of the value. */
887 struct minipool_fixup
891 HOST_WIDE_INT address
;
897 HOST_WIDE_INT forwards
;
898 HOST_WIDE_INT backwards
;
901 static Mnode
*minipool_vector_head
;
902 static Mnode
*minipool_vector_tail
;
903 static rtx minipool_vector_label
;
904 static HOST_WIDE_INT constpool_label_no
= 0;
906 /* Obstack for minipool constant handling. */
907 static struct obstack minipool_obstack
;
908 static char *minipool_startobj
;
909 /* The linked list of all minipool fixes required for this function. */
910 Mfix
*minipool_fix_head
;
911 Mfix
*minipool_fix_tail
;
912 /* The fix entry for the current minipool, once it has been placed. */
913 Mfix
*minipool_barrier
;
915 /* Allow GC scanning of the minipool obstack. */
918 csky_add_gc_roots (void)
920 gcc_obstack_init (&minipool_obstack
);
921 minipool_startobj
= (char *) obstack_alloc (&minipool_obstack
, 0);
924 /* Implement TARGET_CONSTANT_ALIGNMENT.
925 Make strings word-aligned so strcpy from constants will be faster. */
928 csky_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
930 if (TREE_CODE (exp
) == STRING_CST
932 && align
< BITS_PER_WORD
)
933 return BITS_PER_WORD
;
937 /* Record that there is a natural barrier in the insn stream at
941 push_csky_minipool_barrier (rtx_insn
*insn
, HOST_WIDE_INT address
)
943 Mfix
*fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (*fix
));
946 fix
->address
= address
;
949 if (minipool_fix_head
!= NULL
)
950 minipool_fix_tail
->next
= fix
;
952 minipool_fix_head
= fix
;
954 minipool_fix_tail
= fix
;
957 /* Compute the size of a vector jump table. */
960 get_csky_jump_table_size (rtx insn
)
962 /* ADDR_VECs only take room if read-only data does into the text
964 if (JUMP_TABLES_IN_TEXT_SECTION
|| readonly_data_section
== text_section
)
966 rtx body
= PATTERN (insn
);
967 int elt
= GET_CODE (body
) == ADDR_DIFF_VEC
? 1 : 0;
969 HOST_WIDE_INT modesize
;
971 modesize
= GET_MODE_SIZE (GET_MODE (body
));
972 size
= modesize
* XVECLEN (body
, elt
);
976 /* Round up size of TBB table to a halfword boundary. */
977 size
= (size
+ 1) & ~(HOST_WIDE_INT
)1;
980 /* No padding necessary for TBH. */
994 /* Scan INSN and note any of its operands that need fixing.
995 If DO_PUSHES is false we do not actually push any of the fixups
996 needed. The function returns TRUE if any fixups were needed/pushed. */
999 note_csky_invalid_constants (rtx_insn
*insn
, HOST_WIDE_INT address
,
1002 bool result
= false;
1005 extract_constrain_insn (insn
);
1007 if (recog_data
.n_alternatives
== 0)
1010 /* Fill in recog_op_alt with information about the constraints of
1012 preprocess_constraints (insn
);
1014 const operand_alternative
*op_alt
= which_op_alt ();
1015 for (opno
= 0; opno
< recog_data
.n_operands
; opno
++)
1017 /* Things we need to fix can only occur in inputs. */
1018 if (recog_data
.operand_type
[opno
] != OP_IN
)
1021 /* If this alternative is a memory reference, then any mention
1022 of constants in this alternative is really to fool reload
1023 into allowing us to accept one there. We need to fix them up
1024 now so that we output the right code. */
1025 if (op_alt
[opno
].memory_ok
)
1027 rtx op
= recog_data
.operand
[opno
];
1029 if (CONSTANT_P (op
))
1032 push_csky_minipool_fix (insn
, address
,
1033 recog_data
.operand_loc
[opno
],
1034 recog_data
.operand_mode
[opno
], op
);
1044 /* Add a constant to the minipool for a forward reference. Returns the
1045 node added or NULL if the constant will not fit in this pool. */
1048 add_csky_minipool_forward_ref (Mfix
*fix
)
1050 /* If set, max_mp is the first pool_entry that has a lower
1051 constraint than the one we are trying to add. */
1052 Mnode
*max_mp
= NULL
;
1053 HOST_WIDE_INT max_address
= fix
->address
+ fix
->forwards
;
1056 /* If the minipool starts before the end of FIX->INSN then this FIX
1057 cannot be placed into the current pool. Furthermore, adding the
1058 new constant pool entry may cause the pool to start FIX_SIZE bytes
1060 if (minipool_vector_head
1061 && (fix
->address
+ get_attr_length (fix
->insn
)
1062 >= minipool_vector_head
->max_address
- fix
->fix_size
))
1065 /* Scan the pool to see if a constant with the same value has
1066 already been added. While we are doing this, also note the
1067 location where we must insert the constant if it doesn't already
1069 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
1071 if (GET_CODE (fix
->value
) == GET_CODE (mp
->value
)
1072 && fix
->mode
== mp
->mode
1073 && (GET_CODE (fix
->value
) != CODE_LABEL
1074 || (CODE_LABEL_NUMBER (fix
->value
)
1075 == CODE_LABEL_NUMBER (mp
->value
)))
1076 && rtx_equal_p (fix
->value
, mp
->value
))
1078 /* More than one fix references this entry. */
1083 /* Note the insertion point if necessary. */
1084 if (max_mp
== NULL
&& mp
->max_address
> max_address
)
1088 /* The value is not currently in the minipool, so we need to create
1089 a new entry for it. If MAX_MP is NULL, the entry will be put on
1090 the end of the list since the placement is less constrained than
1091 any existing entry. Otherwise, we insert the new fix before
1092 MAX_MP and, if necessary, adjust the constraints on the other
1095 mp
->fix_size
= fix
->fix_size
;
1096 mp
->mode
= fix
->mode
;
1097 mp
->value
= fix
->value
;
1099 /* Not yet required for a backwards ref. */
1100 mp
->min_address
= -65536;
1104 mp
->max_address
= max_address
;
1106 mp
->prev
= minipool_vector_tail
;
1108 if (mp
->prev
== NULL
)
1110 minipool_vector_head
= mp
;
1111 minipool_vector_label
1112 = gen_csky_constpool_label (gen_rtx_CONST_INT (VOIDmode
,
1113 constpool_label_no
++));
1116 mp
->prev
->next
= mp
;
1118 minipool_vector_tail
= mp
;
1122 if (max_address
> max_mp
->max_address
- mp
->fix_size
)
1123 mp
->max_address
= max_mp
->max_address
- mp
->fix_size
;
1125 mp
->max_address
= max_address
;
1128 mp
->prev
= max_mp
->prev
;
1130 if (mp
->prev
!= NULL
)
1131 mp
->prev
->next
= mp
;
1133 minipool_vector_head
= mp
;
1136 /* Save the new entry. */
1139 /* Scan over the preceding entries and adjust their addresses as
1141 while (mp
->prev
!= NULL
1142 && mp
->prev
->max_address
> mp
->max_address
- mp
->prev
->fix_size
)
1144 mp
->prev
->max_address
= mp
->max_address
- mp
->prev
->fix_size
;
1152 /* Return the cost of forcibly inserting a barrier after INSN. */
1155 get_csky_barrier_cost (rtx_insn
*insn
)
1157 /* Basing the location of the pool on the loop depth is preferable,
1158 but at the moment, the basic block information seems to be
1159 corrupt by this stage of the compilation. */
1161 rtx next
= next_nonnote_insn (insn
);
1163 if (next
!= NULL
&& GET_CODE (next
) == CODE_LABEL
)
1166 switch (GET_CODE (insn
))
1169 /* It will always be better to place the table before the label, rather
1178 return base_cost
- 10;
1181 return base_cost
+ 10;
1186 /* Find the best place in the insn stream in the range
1187 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
1188 Create the barrier by inserting a jump and add a new fix entry for
1192 create_csky_fix_barrier (Mfix
*fix
, Mfix
*fix_next
,
1193 HOST_WIDE_INT max_address
)
1195 rtx_barrier
*barrier
;
1196 rtx_insn
*from
= (fix
? fix
->insn
: get_insns ());
1197 /* The instruction after which we will insert the jump. */
1198 rtx_insn
*selected
= NULL
;
1200 /* The address at which the jump instruction will be placed. */
1201 HOST_WIDE_INT selected_address
= 0;
1203 HOST_WIDE_INT count
= (fix
? fix
->address
: 0);
1204 HOST_WIDE_INT max_count
= max_address
;
1205 rtx_code_label
*label
= gen_label_rtx ();
1207 selected_cost
= get_csky_barrier_cost (from
);
1209 while (from
&& count
< max_count
)
1212 rtx_jump_table_data
*table
;
1214 /* Count the length of this insn. */
1215 count
+= get_attr_length (from
);
1217 /* If there is a jump table, add its length. */
1218 if (tablejump_p (from
, NULL
, &table
))
1220 count
+= get_csky_jump_table_size (table
);
1222 /* Jump tables aren't in a basic block, so base the cost on
1223 the dispatch insn. If we select this location, we will
1224 still put the pool after the table. */
1225 new_cost
= get_csky_barrier_cost (from
);
1227 if (count
< max_count
1228 && (!selected
|| new_cost
<= selected_cost
))
1231 selected_cost
= new_cost
;
1232 selected_address
= count
;
1235 /* Continue after the dispatch table. */
1236 from
= NEXT_INSN (table
);
1240 new_cost
= get_csky_barrier_cost (from
);
1242 if (count
< max_count
1243 && (!selected
|| new_cost
<= selected_cost
))
1246 selected_cost
= new_cost
;
1247 selected_address
= count
;
1250 from
= NEXT_INSN (from
);
1253 /* Make sure that we found a place to insert the jump. */
1254 gcc_assert (selected
);
1256 /* Create a new JUMP_INSN that branches around a barrier. */
1257 from
= emit_jump_insn_after (gen_jump (label
), selected
);
1258 JUMP_LABEL (from
) = label
;
1259 barrier
= emit_barrier_after (from
);
1260 emit_label_after (label
, barrier
);
1262 /* Create a minipool barrier entry for the new barrier. */
1263 new_fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* new_fix
));
1264 new_fix
->insn
= barrier
;
1265 new_fix
->address
= selected_address
;
1268 new_fix
->next
= fix
->next
;
1269 fix
->next
= new_fix
;
1272 new_fix
->next
= fix_next
;
1278 /* Print a symbolic form of the constant X to the dump file F.
1279 This is used for dump output for -mconstpool in the target-dependent
1283 print_csky_value (FILE *f
, rtx x
)
1285 switch (GET_CODE (x
))
1288 fprintf (f
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (x
));
1292 fprintf (f
, "<0x%lx,0x%lx>", (long)XWINT (x
, 2), (long)XWINT (x
, 3));
1300 for (i
= 0; i
< CONST_VECTOR_NUNITS (x
); i
++)
1302 fprintf (f
, HOST_WIDE_INT_PRINT_HEX
,
1303 INTVAL (CONST_VECTOR_ELT (x
, i
)));
1304 if (i
< (CONST_VECTOR_NUNITS (x
) - 1))
1312 fprintf (f
, "\"%s\"", XSTR (x
, 0));
1316 fprintf (f
, "`%s'", XSTR (x
, 0));
1320 fprintf (f
, "L%d", INSN_UID (XEXP (x
, 0)));
1324 print_csky_value (f
, XEXP (x
, 0));
1328 print_csky_value (f
, XEXP (x
, 0));
1330 print_csky_value (f
, XEXP (x
, 1));
1338 fprintf (f
, "????");
1344 /* Record INSN, which will need fixing up to load a value from the
1345 minipool. ADDRESS is the offset of the insn since the start of the
1346 function; LOC is a pointer to the part of the insn which requires
1347 fixing; VALUE is the constant that must be loaded, which is of type
1351 push_csky_minipool_fix (rtx_insn
*insn
, HOST_WIDE_INT address
, rtx
*loc
,
1352 machine_mode mode
, rtx value
)
1354 #define CSKY_ELRW16_RANGE 1400
1355 #define CSKY_LRW16_RANGE 700
1356 #define CSKY_CONSTANT_POOL_RANGE (TARGET_ELRW ? CSKY_ELRW16_RANGE \
1359 /* Fixes less than a word need padding out to a word boundary. */
1360 #define CSKY_MINIPOOL_FIX_SIZE(mode) \
1361 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
1363 Mfix
*fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (*fix
));
1366 fix
->address
= address
;
1369 fix
->fix_size
= CSKY_MINIPOOL_FIX_SIZE (mode
);
1371 fix
->forwards
= CSKY_CONSTANT_POOL_RANGE
;
1373 fix
->minipool
= NULL
;
1375 /* If an insn doesn't have a range defined for it, then it isn't
1376 expecting to be reworked by this code. Better to stop now than
1377 to generate duff assembly code. */
1378 gcc_assert (fix
->forwards
|| fix
->backwards
);
1383 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
1384 GET_MODE_NAME (mode
),
1385 INSN_UID (insn
), (unsigned long) address
,
1386 -1 * (long)fix
->backwards
, (long)fix
->forwards
);
1387 print_csky_value (dump_file
, fix
->value
);
1388 fprintf (dump_file
, "\n");
1391 /* Add it to the chain of fixes. */
1394 if (minipool_fix_head
!= NULL
)
1395 minipool_fix_tail
->next
= fix
;
1397 minipool_fix_head
= fix
;
1399 minipool_fix_tail
= fix
;
1403 /* Fill in the offsets for minipool entries. */
1406 assign_csky_minipool_offsets (Mfix
*barrier
)
1408 HOST_WIDE_INT offset
= 0;
1411 minipool_barrier
= barrier
;
1413 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
1415 mp
->offset
= offset
;
1417 if (mp
->refcount
> 0)
1418 offset
+= mp
->fix_size
;
1423 /* Output the literal table. */
1425 static HOST_WIDE_INT
1426 dump_csky_minipool (rtx_insn
*scan
)
1430 HOST_WIDE_INT pool_length
= 0;
1434 ";; Emitting minipool after insn %u;\
1435 address %ld; align %d (bytes)\n",
1436 INSN_UID (scan
), (unsigned long) minipool_barrier
->address
, 4);
1438 scan
= emit_insn_after (gen_align_4 (), scan
);
1439 scan
= emit_insn_after (minipool_vector_label
, scan
);
1441 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= nmp
)
1443 if (mp
->refcount
> 0)
1447 fprintf (dump_file
, ";; Offset %u, min %ld, max %ld ",
1448 (unsigned) mp
->offset
, (unsigned long) mp
->min_address
,
1449 (unsigned long) mp
->max_address
);
1450 print_csky_value (dump_file
, mp
->value
);
1451 fputc ('\n', dump_file
);
1454 switch (mp
->fix_size
)
1457 scan
= emit_insn_after (gen_consttable_4 (mp
->value
), scan
);
1461 scan
= emit_insn_after (gen_consttable_8 (mp
->value
), scan
);
1473 minipool_vector_head
= minipool_vector_tail
= NULL
;
1474 scan
= emit_barrier_after (scan
);
1479 /* Return true if INSN is a minipool load or instruction that will be
1480 converted to one. It is assumed that INSN has type attribute "load". */
1483 csky_minipool_load_p (rtx_insn
*insn
)
1487 extract_insn_cached (insn
);
1489 op1
= recog_data
.operand
[1];
1491 /* This is a constant that has not yet been turned into
1493 if (CONSTANT_P (op1
))
1496 /* Constant pool loads are label_refs. */
1497 if (GET_CODE (op1
) == ZERO_EXTEND
|| GET_CODE (op1
) == SIGN_EXTEND
)
1498 op1
= XEXP (op1
, 0);
1499 if (GET_CODE (op1
) != MEM
)
1501 addr
= XEXP (op1
, 0);
1502 if (GET_CODE (addr
) == PLUS
&& CONST_INT_P (XEXP (addr
, 1)))
1503 addr
= XEXP (addr
, 0);
1504 return GET_CODE (addr
) == LABEL_REF
;
1508 /* Compute the attribute "length" of push or pop insn, according to
1509 the registers it uses. */
1512 csky_compute_pushpop_length (rtx
*operands
)
1514 rtx parallel_op
= operands
[2];
1515 /* Initialize to elements number of PARALLEL. */
1516 unsigned indx
= XVECLEN (parallel_op
, 0) - 1;
1517 unsigned first_indx
= 0;
1518 unsigned regno
= REGNO (operands
[1]);
1520 if (regno
> CSKY_LR_REGNUM
)
1523 /* Check each register in the list. */
1524 for (; indx
> first_indx
; indx
--)
1526 regno
= REGNO (XEXP (XVECEXP (parallel_op
, 0, indx
), 0));
1527 /* If a register number higher than 15 is included, a 32-bit insn
1529 if (regno
> CSKY_LR_REGNUM
)
1536 /* Emit constant pools for -mconstpool. */
1539 csky_emit_constant_pools (void)
1542 HOST_WIDE_INT address
= 0;
1545 minipool_fix_head
= minipool_fix_tail
= NULL
;
1547 /* The first insn must always be a note, or the code below won't
1548 scan it properly. */
1549 insn
= get_insns ();
1550 gcc_assert (NOTE_P (insn
));
1552 /* Scan the insns and record the operands that need fixing. */
1553 for (insn
= next_nonnote_insn (insn
); insn
;
1554 insn
= next_nonnote_insn (insn
))
1556 if (BARRIER_P (insn
))
1557 push_csky_minipool_barrier (insn
, address
);
1558 else if (INSN_P (insn
))
1560 rtx_jump_table_data
*table
;
1562 note_csky_invalid_constants (insn
, address
, true);
1563 address
+= get_attr_length (insn
);
1565 /* If the insn is a vector jump, add the size of the table
1566 and skip the table. */
1567 if (tablejump_p (insn
, NULL
, &table
))
1569 address
+= get_csky_jump_table_size (table
);
1575 fix
= minipool_fix_head
;
1577 /* Now scan the fixups and perform the required changes. */
1581 Mfix
*last_added_fix
;
1582 Mfix
*last_barrier
= NULL
;
1585 bool has_pending_const
= false;
1587 /* Check if there is any pending constant not processed. */
1588 for (mp
= minipool_vector_head
; mp
; mp
= mp
->next
)
1589 if (mp
->refcount
> 0)
1591 has_pending_const
= true;
1595 /* If no pending constant, skip over barrier insns. */
1596 if (has_pending_const
== false)
1598 while (fix
&& BARRIER_P (fix
->insn
))
1604 last_added_fix
= NULL
;
1606 for (ftmp
= fix
; ftmp
; ftmp
= ftmp
->next
)
1608 if (BARRIER_P (ftmp
->insn
))
1610 if (minipool_vector_head
1611 && ftmp
->address
>= minipool_vector_head
->max_address
)
1614 last_barrier
= ftmp
;
1618 ftmp
->minipool
= add_csky_minipool_forward_ref (ftmp
);
1619 if (ftmp
->minipool
== NULL
)
1622 last_added_fix
= ftmp
; /* Keep track of the last fix added. */
1625 /* If the last added fix is a barrier, dump minipool after it. */
1626 if (last_added_fix
&& BARRIER_P (last_added_fix
->insn
))
1627 ftmp
= last_barrier
;
1630 /* ftmp is first fix that we can't fit into this pool.
1631 Insert a new barrier in the code somewhere between the previous
1632 fix and this one, and arrange to jump around it. */
1633 HOST_WIDE_INT max_address
;
1635 /* The last item on the list of fixes must be a barrier, so
1636 we can never run off the end of the list of fixes without
1637 last_barrier being set. */
1640 /* Check that there isn't another fix that is in range that
1641 we couldn't fit into this pool because the pool was
1642 already too large: we need to put the pool before such an
1643 instruction. The pool itself may come just after the
1644 fix because create_csky_fix_barrier also allows space for a
1645 jump instruction. */
1646 max_address
= minipool_vector_head
->max_address
;
1647 if (ftmp
->address
< max_address
)
1648 max_address
= ftmp
->address
+ 1;
1649 last_barrier
= create_csky_fix_barrier (last_added_fix
, ftmp
,
1653 assign_csky_minipool_offsets (last_barrier
);
1655 /* Scan over the fixes we have identified for this pool, fixing them
1656 up and adding the constants to the pool itself. */
1657 for (this_fix
= fix
; this_fix
&& ftmp
!= this_fix
;
1658 this_fix
= this_fix
->next
)
1660 if (GET_CODE (this_fix
->insn
) != BARRIER
)
1663 = plus_constant (Pmode
,
1664 gen_rtx_LABEL_REF (VOIDmode
,
1665 minipool_vector_label
),
1666 this_fix
->minipool
->offset
);
1667 rtx insn_body
= PATTERN (this_fix
->insn
);
1668 rtx src
= XEXP (insn_body
, 1);
1669 *this_fix
->loc
= gen_rtx_MEM (this_fix
->mode
, addr
);
1670 if (GET_CODE (this_fix
->value
) == SYMBOL_REF
)
1671 emit_insn_after (gen_rtx_UNSPEC_VOLATILE (VOIDmode
,
1673 VUNSPEC_SYMBOL_REF
),
1677 dump_csky_minipool (last_barrier
->insn
);
1679 if (fix
->next
== NULL
)
1683 /* Free the minipool memory. */
1684 obstack_free (&minipool_obstack
, minipool_startobj
);
1688 /* Implement TARGET_MACHINE_DEPENDENT_REORG. This handles
1689 -mconstpool output. */
1694 if (TARGET_CONSTANT_POOL
)
1695 csky_emit_constant_pools ();
1699 /* Check to see if the current function contains a branch insn with the
1700 far jump attribute set. Such a function uses the LR register. */
1703 csky_far_jump_used_p (void)
1706 if (cfun
->machine
->far_jump_used
)
1709 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1710 if (GET_CODE (insn
) == JUMP_INSN
1711 /* Ignore tablejump patterns. */
1712 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
1713 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
1714 && get_attr_far_jump (insn
) == FAR_JUMP_YES
)
1716 cfun
->machine
->far_jump_used
= 1;
1723 /* Return the mask of registers used by the current function. Set
1724 COUNT to the number of registers used. */
1727 get_csky_live_regs (int *count
)
1730 unsigned int live_regs_mask
= 0;
1733 for (reg
= 0; reg
< CSKY_NGPR_REGS
; reg
++)
1737 /* Ignore unsupported registers. */
1738 if (CSKY_TARGET_ARCH (CK801
) && reg
> 8 && reg
< 13)
1740 if ((CSKY_TARGET_ARCH (CK801
)
1741 || CSKY_TARGET_ARCH (CK802
)
1742 || CSKY_TARGET_ARCH (CK803
))
1746 /* Caller-saved registers marked as used. */
1747 if (df_regs_ever_live_p (reg
) && !call_used_regs
[reg
])
1750 /* Frame pointer marked used. */
1751 else if (frame_pointer_needed
&& reg
== HARD_FRAME_POINTER_REGNUM
)
1754 /* This is required for CK801/802 where FP is a fixed reg, otherwise
1755 we end up with no FP value available to the DWARF-2 unwinder. */
1756 else if (crtl
->calls_eh_return
&& reg
== HARD_FRAME_POINTER_REGNUM
)
1759 /* CK801/802 also need special handling for LR because it's clobbered
1761 else if ((CSKY_TARGET_ARCH (CK801
) || CSKY_TARGET_ARCH (CK802
))
1762 && reg
== CSKY_LR_REGNUM
1763 && (!crtl
->is_leaf
|| csky_far_jump_used_p ()))
1766 /* Register is used for EH data return. */
1767 else if (crtl
->calls_eh_return
1768 && reg
>= CSKY_FIRST_EH_RETDATA_REGNUM
1769 && reg
<= CSKY_LAST_EH_RETDATA_REGNUM
)
1772 /* We need a temporary reg to hold the offset for adjusting the SP
1773 for a large stack frame. */
1774 if (reg
== CSKY_STACKADJUST_REGNUM
1775 && cfun
->machine
->reg_offset
> CSKY_MAX_SP_ADJUST
* 2)
1778 /* Add reg to the mask. */
1782 live_regs_mask
|= (1 << reg
);
1785 return live_regs_mask
;
1788 /* Compute the stack frame layout, storing sizes of the various pieces
1791 Stack frames constructed in the prologue look like:
1792 ... caller's frame ...
1793 incoming SP -> caller's outbound argument overflow
1795 optional FP -> register save
1798 adjusted SP -> outbound argument overflow
1800 with SP/FP pointing at the base (low address) of the respective area,
1801 and each area aligned to a word boundary. */
1804 csky_layout_stack_frame (void)
1806 machine_function
*infp
= cfun
->machine
;
1809 if (infp
->frame_init_p
)
1812 /* Get sizes of local variables & outbound arguments. */
1813 infp
->outbound_size
= CSKY_STACK_ALIGN (crtl
->outgoing_args_size
);
1814 infp
->local_offset
= infp
->outbound_size
;
1815 infp
->local_size
= CSKY_STACK_ALIGN (get_frame_size ());
1816 infp
->reg_offset
= infp
->local_offset
+ infp
->local_size
;
1818 /* Now compute size of argument spill + saved regs. These do not
1819 need explicit alignment since they are already word-sized. */
1820 infp
->reg_mask
= get_csky_live_regs (®_count
);
1821 infp
->reg_size
= reg_count
* UNITS_PER_WORD
;
1822 infp
->arg_offset
= infp
->reg_offset
+ infp
->reg_size
;
1823 infp
->arg_size
= crtl
->args
.pretend_args_size
;
1824 infp
->frame_size
= infp
->arg_offset
+ infp
->arg_size
;
1825 infp
->frame_init_p
= reload_completed
;
1828 /* Implement TARGET_CAN_ELIMINATE. */
1830 csky_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
1832 if (to
== FRAME_POINTER_REGNUM
)
1833 return from
!= ARG_POINTER_REGNUM
;
1834 if (to
== STACK_POINTER_REGNUM
)
1835 return !frame_pointer_needed
;
1839 /* Worker function for INITIAL_ELIMINATION_OFFSET macro.
1840 Define the offset between two registers, one to be eliminated, and
1841 the other its replacement, at the start of a routine. */
1844 csky_initial_elimination_offset (int from
, int to
)
1848 csky_layout_stack_frame ();
1850 /* Set OFFSET to the offset to the initial stack pointer. */
1853 case FRAME_POINTER_REGNUM
:
1854 case HARD_FRAME_POINTER_REGNUM
:
1855 offset
= cfun
->machine
->reg_offset
;
1858 case ARG_POINTER_REGNUM
:
1859 offset
= cfun
->machine
->arg_offset
;
1866 /* If we are asked for the offset to the frame pointer instead,
1867 then subtract the difference between the frame pointer and stack
1869 if (to
== FRAME_POINTER_REGNUM
|| to
== HARD_FRAME_POINTER_REGNUM
)
1870 offset
-= cfun
->machine
->reg_offset
;
1875 /* Determine where to put an argument to a function.
1876 Value is zero to push the argument on the stack,
1877 or a hard register in which to store the argument.
1879 CUM is a variable of type CUMULATIVE_ARGS which gives info about
1880 the preceding args and about the function being called.
1881 ARG is a description of the argument. */
1884 csky_function_arg (cumulative_args_t pcum_v
, const function_arg_info
&arg
)
1886 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
1887 int reg
= pcum
->reg
;
1888 machine_mode mode
= arg
.mode
;
1890 if (FUNCTION_VARG_MODE_P(mode
)
1891 && !pcum
->is_stdarg
)
1895 if (reg
< CSKY_NPARM_FREGS
)
1896 return gen_rtx_REG (mode
, CSKY_FIRST_VFP_REGNUM
+ reg
);
1901 if (reg
< CSKY_NPARM_REGS
)
1902 return gen_rtx_REG (mode
, CSKY_FIRST_PARM_REGNUM
+ reg
);
1908 /* Return the number of registers (words) needed to pass an argument of
1912 csky_num_arg_regs (machine_mode mode
, const_tree type
, bool is_stdarg
)
1916 if (type
&& mode
== BLKmode
)
1917 size
= int_size_in_bytes (type
);
1919 size
= GET_MODE_SIZE (mode
);
1921 if (TARGET_HARD_FLOAT_ABI
1924 if (CSKY_VREG_MODE_P(mode
)
1925 && !TARGET_SINGLE_FPU
)
1926 return ((CSKY_NUM_WORDS (size
) + 1) / 2);
1929 return CSKY_NUM_WORDS (size
);
1933 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
1936 csky_function_arg_advance (cumulative_args_t pcum_v
,
1937 const function_arg_info
&arg
)
1939 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
1940 int *reg
= &pcum
->reg
;
1941 machine_mode mode
= arg
.mode
;
1943 int param_size
= csky_num_arg_regs (mode
, arg
.type
, pcum
->is_stdarg
);
1944 int param_regs_nums
= CSKY_NPARM_REGS
;
1946 if (FUNCTION_VARG_MODE_P(mode
)
1947 && !pcum
->is_stdarg
)
1950 param_regs_nums
= CSKY_NPARM_FREGS
;
1953 if (*reg
+ param_size
> param_regs_nums
)
1954 *reg
= param_regs_nums
;
1960 /* Implement TARGET_FUNCTION_VALUE. */
1962 csky_function_value (const_tree type
, const_tree func
,
1963 bool outgoing ATTRIBUTE_UNUSED
)
1966 int unsignedp ATTRIBUTE_UNUSED
;
1969 mode
= TYPE_MODE (type
);
1970 size
= int_size_in_bytes (type
);
1972 if (FUNCTION_VARG_MODE_P(mode
))
1974 mode
= promote_function_mode (type
, mode
, &unsignedp
, func
, 1);
1975 return gen_rtx_REG (mode
, CSKY_FIRST_VFP_REGNUM
);
1978 /* Since we promote return types, we must promote the mode here too. */
1979 if (INTEGRAL_TYPE_P (type
))
1981 mode
= promote_function_mode (type
, mode
, &unsignedp
, func
, 1);
1982 return gen_rtx_REG (mode
, CSKY_FIRST_RET_REGNUM
);
1985 if (mode
== BLKmode
&& size
> UNITS_PER_WORD
1986 && size
<= UNITS_PER_WORD
* 2)
1989 ret_regs
[0] = gen_rtx_EXPR_LIST (SImode
,
1990 gen_rtx_REG (SImode
,
1991 CSKY_FIRST_RET_REGNUM
),
1992 GEN_INT (0 * UNITS_PER_WORD
));
1993 ret_regs
[1] = gen_rtx_EXPR_LIST (SImode
,
1994 gen_rtx_REG (SImode
,
1995 CSKY_FIRST_RET_REGNUM
+ 1),
1996 GEN_INT (1 * UNITS_PER_WORD
));
1998 rtvec vec
= gen_rtvec (2, ret_regs
[0], ret_regs
[1]);
2000 return gen_rtx_PARALLEL (mode
, vec
);
2003 return gen_rtx_REG (mode
, CSKY_FIRST_RET_REGNUM
);
2007 /* Implement TARGET_LIBCALL_VALUE. */
2010 csky_libcall_value (machine_mode mode
,
2011 const_rtx libcall ATTRIBUTE_UNUSED
)
2013 if (FUNCTION_VARG_MODE_P(mode
))
2015 return gen_rtx_REG (mode
, CSKY_FIRST_VFP_REGNUM
);
2017 return gen_rtx_REG (mode
, CSKY_FIRST_RET_REGNUM
);
2021 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
2022 On C-SKY, only r0 can return results. */
2025 csky_function_value_regno_p (const unsigned int regno
)
2027 if (regno
== CSKY_FIRST_RET_REGNUM
2028 || (TARGET_HARD_FLOAT_ABI
2029 && regno
== CSKY_FIRST_VFP_REGNUM
))
2035 /* Return an RTX indicating where the return address to the
2036 calling function can be found. */
2039 csky_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
2044 return get_hard_reg_initial_val (Pmode
, CSKY_LR_REGNUM
);
2048 /* Implement TARGET_ARG_PARTIAL_BYTES.
2049 Return the number of bytes at the beginning of an argument
2050 that must be put in registers. The value must be zero for arguments
2051 that are passed entirely in registers or
2052 that are entirely pushed on the stack. */
2055 csky_arg_partial_bytes (cumulative_args_t pcum_v
, const function_arg_info
&arg
)
2057 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
2058 int param_size
= csky_num_arg_regs (arg
.mode
, arg
.type
, pcum
->is_stdarg
);
2059 int reg
= pcum
->reg
;
2061 if (FUNCTION_VARG_MODE_P(arg
.mode
)
2062 && !pcum
->is_stdarg
)
2065 if (reg
< CSKY_NPARM_REGS
2066 && reg
+ param_size
> CSKY_NPARM_REGS
)
2067 return (CSKY_NPARM_REGS
- reg
) * UNITS_PER_WORD
;
2073 /* Implement TARGET_SETUP_INCOMING_VARARGS.
2074 On C-Sky the copy from the argument registers to the stack is emitted
2075 by the prologue hooks, so here we just have to note how much stack space
2079 csky_setup_incoming_varargs (cumulative_args_t pcum_v
,
2080 const function_arg_info
&arg
,
2082 int second_time ATTRIBUTE_UNUSED
)
2084 CUMULATIVE_ARGS
*pcum
= get_cumulative_args (pcum_v
);
2085 CUMULATIVE_ARGS local_cum
;
2086 cumulative_args_t local_cum_v
= pack_cumulative_args (&local_cum
);
2089 cfun
->machine
->uses_anonymous_args
= 1;
2091 csky_function_arg_advance (local_cum_v
, arg
);
2092 regs_to_push
= CSKY_NPARM_REGS
- local_cum
.reg
;
2094 *pretend_size
= regs_to_push
* UNITS_PER_WORD
;
2098 /* Implement TARGET_ASM_OUTPUT_MI_THUNK.
2099 Output code to add DELTA to the first argument, and then jump
2100 to FUNCTION. Used for C++ multiple inheritance. */
2103 csky_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
2104 HOST_WIDE_INT delta
,
2105 HOST_WIDE_INT vcall_offset
,
2108 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk
));
2109 const char *thiz
= "a0";
2110 const char *reg0
= "t0";
2111 const char *reg1
= "t1";
2112 int maxoff
= 4096; /* Constant range for addi/subi. */
2114 assemble_start_function (thunk
, fnname
);
2115 final_start_function (emit_barrier (), file
, 1);
2117 rtx fnaddr
= XEXP (DECL_RTL (function
), 0);
2119 if (CSKY_TARGET_ARCH (CK801
))
2121 /* CK801 can't use t registers and has only 16-bit addi/subi. */
2125 if (vcall_offset
> maxoff
|| vcall_offset
< -maxoff
)
2126 fprintf (file
, "\tpush\tl0, l1\n");
2127 else if (delta
> maxoff
|| delta
< -maxoff
)
2128 fprintf (file
, "\tpush\tl0\n");
2131 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2134 /* Add delta to this_rtx. */
2137 if (delta
> maxoff
|| delta
< -maxoff
)
2139 fprintf (file
, "\tlrw\t%s, %ld\n", reg0
, (long)delta
);
2140 fprintf (file
, "\taddu\t%s, %s, %s\n", thiz
, thiz
, reg0
);
2143 fprintf (file
, "\t%s\t%s, %s, %ld\n",
2144 (delta
> 0 ? "addi" : "subi"), thiz
, thiz
,
2145 (long)(delta
> 0 ? delta
: -delta
));
2148 /* If needed, add *(*this_rtx + vcall_offset) to this_rtx. */
2149 if (vcall_offset
!= 0)
2151 fprintf (file
, "\tld.w\t%s, (%s, 0)\n", reg0
, thiz
);
2153 if (vcall_offset
> maxoff
|| vcall_offset
< -maxoff
)
2155 fprintf (file
, "\tlrw\t%s, %ld\n", reg1
, (long)vcall_offset
);
2156 fprintf (file
, "\taddu\t%s, %s, %s\n", reg0
, reg0
, reg1
);
2159 fprintf (file
, "\t%s\t%s, %s, %ld\n",
2160 (vcall_offset
> 0 ? "addi" : "subi"), reg0
, reg0
,
2161 (long)(vcall_offset
> 0 ? vcall_offset
: -vcall_offset
));
2163 /* Load the offset and add it to this_rtx */
2164 fprintf (file
, "\tld.w\t%s, (%s, 0)\n", reg0
, reg0
);
2165 fprintf (file
, "\taddu\t%s, %s, %s\n", thiz
, thiz
, reg0
);
2168 /* We must pop the scratch regs individually instead of using the
2169 "pop" insn, which also does a return. */
2170 if (CSKY_TARGET_ARCH (CK801
))
2172 if (vcall_offset
> maxoff
|| vcall_offset
< -maxoff
)
2174 fprintf (file
, "\tld.w\tl0, (sp, 0)\n");
2175 fprintf (file
, "\tld.w\tl1, (sp, 4)\n");
2176 fprintf (file
, "\taddi\t sp, sp, 8\n");
2178 else if (delta
> maxoff
|| delta
< -maxoff
)
2180 fprintf (file
, "\tld.w\tl0, (sp, 0)\n");
2181 fprintf (file
, "\taddi\tsp, sp, 4\n");
2185 fprintf (file
, "\tjbr\t");
2186 output_addr_const (file
, fnaddr
);
2187 fprintf (file
, "\n");
2189 final_end_function ();
2190 assemble_end_function (thunk
, fnname
);
2194 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE.
2195 Conditionally modify five variables fixed_regs, call_used_regs, global_regs,
2196 reg_names, and reg_class_contents, to take into account any dependence of
2197 these register sets on target flags.
2199 CK801 has registers r0-r8 and r13-r15. CK802 and CK803 have registers
2200 r0-r15 (the "low" registers). Other cpus use registers r0-r31 with
2201 -mhigh-registers, otherwise also only r0-r15.
2203 CK801 only has 16-bit instructions, most of which can only reference
2204 r0-r7 (the "mini" registers). So we mark regs outside that range as
2205 fixed. -msmart can be used on other arch variants to force the same
2206 behavior because it results in smaller code size.
2208 TODO: investigate whether it's beneficial to use r8-r13 as a spill
2209 class when TARGET_MINI_REGISTERS instead of making them unusable by
2210 the register allocator. */
2213 csky_conditional_register_usage (void)
2215 /* Only use mini registers in smart mode or 801. */
2216 if (TARGET_MINI_REGISTERS
)
2220 for (i
= (CSKY_LAST_MINI_REGNUM
+ 1); i
< 32; i
++)
2223 call_used_regs
[i
] = 1;
2226 /* For some targets, the high registers are not supported.
2227 CPUs other than ck801/ck802/ck803 use high registers
2228 depending on -mhigh-registers option. */
2229 else if (CSKY_TARGET_ARCH (CK802
)
2230 || CSKY_TARGET_ARCH (CK803
)
2231 || !TARGET_HIGH_REGISTERS
)
2235 for (i
= CSKY_FIRST_HIGH_REGNUM
; i
<= CSKY_LAST_HIGH_REGNUM
; i
++)
2238 call_used_regs
[i
] = 1;
2242 /* On CK801/CK802 we must mark lr as a fixed register because it is
2243 used to implement far jumps.
2244 FIXME: perhaps there should be a command-line option controlling
2245 use of lr for far jumps on ck802 when !TARGET_MINI_REGS, when
2246 you really want lr to be available to the register allocator and
2247 you know there are no far jumps in the code. */
2248 if (CSKY_TARGET_ARCH (CK801
) || CSKY_TARGET_ARCH (CK802
))
2250 fixed_regs
[CSKY_LR_REGNUM
] = 1;
2251 call_used_regs
[CSKY_LR_REGNUM
] = 0;
2254 /* The hi/lo registers are only supported in dsp mode. */
2257 fixed_regs
[CSKY_HI_REGNUM
] = 1;
2258 call_used_regs
[CSKY_HI_REGNUM
] = 1;
2260 fixed_regs
[CSKY_LO_REGNUM
] = 1;
2261 call_used_regs
[CSKY_LO_REGNUM
] = 1;
2264 /* The V_REGS are only supported in hard float mode. */
2265 if (!TARGET_HARD_FLOAT
)
2269 for (regno
= CSKY_FIRST_VFP_REGNUM
;
2270 regno
<= CSKY_LAST_VFP3_REGNUM
; regno
++)
2272 fixed_regs
[regno
] = 1;
2273 call_used_regs
[regno
] = 1;
2277 if (!TARGET_SUPPORT_FPV3
)
2281 for (regno
= CSKY_FIRST_VFP3_REGNUM
;
2282 regno
<= CSKY_LAST_VFP3_REGNUM
; regno
++)
2284 fixed_regs
[regno
] = 1;
2285 call_used_regs
[regno
] = 1;
2289 /* In pic mode, the gb register is not available for register
2290 allocation. Since gb is not clobbered by function
2291 calls, set its call_used_regs to 0. */
2294 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2295 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 0;
2299 /* Implement TARGET_HARD_REGNO_NREGS. */
2302 csky_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
2304 if (regno
>= CSKY_FIRST_VFP_REGNUM
&& !CSKY_TARGET_ARCH (CK803
))
2307 return CSKY_NUM_REGS (mode
);
2310 /* Implement TARGET_HARD_REGNO_MODE_OK. Return true if REGNO is a
2311 valid register for holding a quantity of type MODE. */
2314 csky_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2316 int nregs
= CSKY_NUM_REGS (mode
);
2318 /* We can't handle more than doubleword sizes for any register. */
2322 /* For general registers, return true if mode is one word size.
2323 When the size is larger than one word size, there should
2324 be two successive hard registers to put the data. */
2325 if (regno
< CSKY_NGPR_REGS
)
2329 else if (TARGET_MINI_REGISTERS
)
2330 return (regno
< CSKY_LAST_MINI_REGNUM
);
2331 else if (CSKY_TARGET_ARCH (CK802
)
2332 || CSKY_TARGET_ARCH (CK803
)
2333 || !TARGET_HIGH_REGISTERS
)
2334 /* Without high register, r15 cannot hold doubleword data. */
2335 return (regno
< (CSKY_SP_REGNUM
- 1));
2337 return (regno
< (CSKY_SP_REGNUM
- 1)
2338 || (regno
>= CSKY_LR_REGNUM
2339 && regno
< CSKY_LAST_HIGH_UNFIXED_REGNUM
));
2341 else if (regno
== CSKY_CC_REGNUM
)
2342 return (mode
== CCmode
);
2343 else if (regno
== CSKY_HI_REGNUM
|| regno
== CSKY_LO_REGNUM
)
2345 /* Don't allocate hi,lo register for float data even
2346 if in dsp mode, because it will cause high cost
2347 to reload data from hi,lo register. */
2348 if (!TARGET_DSP
|| mode
== SFmode
|| mode
== DFmode
)
2350 else if (nregs
== 2)
2351 return (regno
== CSKY_HI_REGNUM
);
2355 else if (CSKY_VREG_P (regno
) && TARGET_HARD_FLOAT
)
2361 /* Implement TARGET_MODES_TIEABLE_P. We can't tie DFmode with other modes
2362 when V_REGs might be in use because those registers mess with the stored
2366 csky_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2368 return !(TARGET_HARD_FLOAT
2370 && (mode1
== DFmode
|| mode2
== DFmode
));
2373 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.
2374 V_REG registers can't do subreg as all values are reformatted to
2375 internal precision. */
2378 csky_can_change_mode_class (machine_mode from
,
2382 return (GET_MODE_SIZE (from
) == GET_MODE_SIZE (to
)
2383 || !reg_classes_intersect_p (V_REGS
, rclass
));
2386 /* Implement TARGET_CLASS_LIKELY_SPILLED_P.
2387 We need to define this for MINI_REGS when we only use r0 - r7.
2388 Otherwise we can end up using r0-r4 for function arguments, and don't
2389 have enough left over to do doubleword arithmetic. */
2392 csky_class_likely_spilled_p (reg_class_t rclass
)
2394 if ((TARGET_MINI_REGISTERS
&& rclass
== MINI_REGS
)
2395 || rclass
== C_REGS
)
2402 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
2403 Given an rtx X being reloaded into a reg required to be
2404 in class CLASS, return the class of reg to actually use.
2405 In general this is just CLASS. */
2408 csky_preferred_reload_class (rtx x
, reg_class_t rclass
)
2410 if (TARGET_HARD_FLOAT
2411 && CONST_DOUBLE_P (x
)
2412 && (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == SFmode
)
2413 && rclass
== NO_REGS
)
2414 return GENERAL_REGS
;
2419 /* Implement TARGET_CLASS_MAX_NREGS.
2420 Return the maximum number of consecutive registers of class rclass needed
2421 to hold a value of mode mode.
2422 On the csky, this is the size of MODE in words,
2423 except in the FP regs, where a single reg is always enough. */
2425 static unsigned char
2426 csky_class_max_nregs (reg_class_t rclass
, machine_mode mode
)
2428 if (rclass
== V_REGS
)
2431 return CSKY_NUM_REGS (mode
);
2435 /* Implement TARGET_SECONDARY_RELOAD.
2436 If copying a register of RCLASS from/to X requires an intermediate
2437 register, the hook should return the REGISTER_CLASS required for this
2438 intermediate register.
2439 If no intermediate register is required, it should return NO_REGS.
2440 If more than one intermediate register is required, describe the one
2441 that is closest in the copy chain to the reload register. */
2444 csky_secondary_reload (bool in_p ATTRIBUTE_UNUSED
, rtx x
,
2447 secondary_reload_info
*sri ATTRIBUTE_UNUSED
)
2451 /* Extract the real regno from X. */
2452 if (GET_CODE (x
) == SIGN_EXTEND
)
2459 regno
= true_regnum (x
);
2462 while (GET_CODE (x
) == SUBREG
)
2464 off
+= subreg_regno_offset (REGNO (SUBREG_REG (x
)),
2465 GET_MODE (SUBREG_REG (x
)),
2466 SUBREG_BYTE (x
), GET_MODE (x
));
2470 if (GET_CODE (x
) == REG
)
2471 regno
= REGNO (x
) + off
;
2474 else if (GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
2475 regno
= true_regnum (x
);
2477 /* We always require a general register when copying anything to
2478 HI/LO_REGNUM, except when copying an SImode value from HI/LO_REGNUM
2479 to a general register, or when copying from register 0. */
2480 if (rclass
== HILO_REGS
&& !CSKY_GENERAL_REGNO_P (regno
))
2481 return GENERAL_REGS
;
2483 if (rclass
== V_REGS
&& !CSKY_GENERAL_REGNO_P (regno
))
2485 /* Reload between vector reg and memory does not need an
2486 intermediate register. */
2487 if (MEM_P (x
) && (mode
== SFmode
|| mode
== DFmode
))
2490 return GENERAL_REGS
;
2496 /* Implement TARGET_SPILL_CLASS.
2497 Try spilling to a larger register class before spilling to memory. */
2500 csky_spill_class (reg_class_t rclass
, machine_mode mode ATTRIBUTE_UNUSED
)
2502 if ((rclass
== MINI_REGS
&& !TARGET_MINI_REGISTERS
)
2503 || (rclass
== LOW_REGS
&& TARGET_HIGH_REGISTERS
))
2504 return GENERAL_REGS
;
2508 /* Convert a static initializer array of feature bits to sbitmap
2512 csky_initialize_isa (sbitmap isa
, const enum csky_isa_feature
*isa_bits
)
2515 while (*isa_bits
!= CSKY_ISA_FEATURE_GET (none
))
2516 bitmap_set_bit (isa
, *(isa_bits
++));
2520 /* Configure a build target TARGET from the user-specified options OPTS and
2524 csky_configure_build_target (struct csky_build_target
*target
,
2525 struct cl_target_option
*opts
,
2526 struct gcc_options
*opts_set
)
2528 const struct csky_processors
*csky_selected_tune
= NULL
;
2529 struct csky_processors
*csky_selected_cpu
= NULL
;
2530 struct csky_processors
*csky_selected_arch
= NULL
;
2531 sbitmap all_sbits
= sbitmap_alloc (CSKY_ISA_FEATURE_GET (max
));
2532 bitmap_clear (all_sbits
);
2534 bitmap_clear (target
->isa
);
2535 target
->core_name
= NULL
;
2536 target
->arch_name
= NULL
;
2538 if (opts_set
->x_csky_arch_option
)
2539 csky_selected_arch
= &all_architectures
[opts
->x_csky_arch_option
];
2541 if (opts_set
->x_csky_cpu_option
)
2543 csky_selected_cpu
= &all_cores
[opts
->x_csky_cpu_option
];
2544 csky_selected_tune
= &all_cores
[opts
->x_csky_cpu_option
];
2547 if (csky_selected_cpu
)
2549 /* TODO: support combination of features
2550 between different cpu & arch, should based on arch. */
2551 if (csky_selected_arch
2552 && (csky_selected_cpu
->base_arch
!= csky_selected_arch
->base_arch
))
2553 warning (0, "cpu %s is not based on arch %s, ignoring the arch",
2554 csky_selected_cpu
->name
, csky_selected_arch
->name
);
2555 if (!csky_selected_arch
)
2556 csky_selected_arch
= &all_architectures
[csky_selected_cpu
->base_arch
];
2557 csky_initialize_isa (all_sbits
, csky_selected_arch
->isa_bits
);
2558 target
->core_name
= csky_selected_cpu
->name
;
2560 else if (csky_selected_arch
)
2562 csky_selected_cpu
= csky_selected_arch
;
2563 target
->arch_name
= csky_selected_arch
->name
;
2565 else /* If the user did not specify a processor, choose one for them. */
2567 csky_selected_cpu
= &all_cores
[TARGET_CPU_DEFAULT
];
2568 csky_selected_arch
= &all_architectures
[csky_selected_cpu
->base_arch
];
2569 csky_initialize_isa (all_sbits
, csky_selected_arch
->isa_bits
);
2570 target
->core_name
= csky_selected_cpu
->name
;
2573 /* The selected cpu may be an architecture, so lookup tuning by core ID. */
2574 if (!csky_selected_tune
)
2575 csky_selected_tune
= &all_cores
[csky_selected_cpu
->core
];
2576 gcc_assert (csky_selected_tune
);
2578 gcc_assert (csky_selected_arch
);
2579 gcc_assert (csky_selected_cpu
);
2580 csky_initialize_isa (target
->isa
, csky_selected_cpu
->isa_bits
);
2581 bitmap_ior (target
->isa
, target
->isa
, all_sbits
);
2583 /* Finish initializing the target structure. */
2584 target
->arch_pp_name
= csky_selected_cpu
->arch
;
2585 target
->base_arch
= csky_selected_cpu
->base_arch
;
2586 target
->arch_core
= csky_selected_cpu
->core
;
2588 sbitmap_free (all_sbits
);
2592 /* Implement TARGET_OPTION_OVERRIDE. */
2595 csky_option_override (void)
2597 csky_active_target
.isa
= sbitmap_alloc (CSKY_ISA_FEATURE_GET (max
));
2599 /* Create the default target_options structure. We need this early
2600 to configure the overall build target. */
2601 target_option_default_node
= target_option_current_node
2602 = build_target_option_node (&global_options
, &global_options_set
);
2604 csky_configure_build_target (&csky_active_target
,
2605 TREE_TARGET_OPTION (target_option_default_node
),
2606 &global_options_set
);
2608 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2609 SUBTARGET_OVERRIDE_OPTIONS
;
2612 csky_base_arch
= csky_active_target
.base_arch
;
2614 if (flag_pic
&& !(CSKY_TARGET_ARCH (CK807
)
2615 || CSKY_TARGET_ARCH (CK810
)
2616 || CSKY_TARGET_ARCH (CK860
)))
2619 warning (0, "%qs is not supported by arch %s",
2620 "-fPIC", csky_active_target
.arch_pp_name
);
2623 /* Check floating-point options for consistency. */
2624 if (TARGET_HARD_FLOAT
)
2626 const struct csky_fpu_desc
*csky_selected_fpu
= NULL
;
2628 if (csky_fpu_index
== TARGET_FPU_auto
)
2630 const char *target_fpu_name
;
2634 if (csky_active_target
.core_name
!= NULL
2635 && !strchr (csky_active_target
.core_name
, 'f'))
2636 target_fpu_name
= "auto";
2637 else if (CSKY_TARGET_ARCH (CK803
) || !TARGET_DOUBLE_FLOAT
)
2638 target_fpu_name
= "fpv2_sf";
2639 else if (CSKY_TARGET_ARCH (CK860
))
2640 target_fpu_name
= "fpv3";
2641 else if (TARGET_DOUBLE_FLOAT
&& TARGET_FDIVDU
)
2642 target_fpu_name
= "fpv2_divd";
2644 #ifdef CSKY_FPUTYPE_DEFAULT
2645 target_fpu_name
= CSKY_FPUTYPE_DEFAULT
;
2647 target_fpu_name
= "fpv2";
2650 ok
= opt_enum_arg_to_value (OPT_mfpu_
, target_fpu_name
, &fpu_index
,
2653 csky_fpu_index
= (enum csky_fpu_type
) fpu_index
;
2656 if (CSKY_TARGET_ARCH (CK801
) || CSKY_TARGET_ARCH (CK802
))
2657 error ("%qs is not supported by arch %s",
2658 "-mhard-float", csky_active_target
.arch_pp_name
);
2659 else if (csky_fpu_index
== TARGET_FPU_auto
)
2660 error ("%<-mhard-float%> is not supported by the selected CPU");
2663 csky_selected_fpu
= &all_fpus
[csky_fpu_index
];
2664 sbitmap fpu_bits
= sbitmap_alloc (CSKY_ISA_FEATURE_GET (max
));
2665 csky_initialize_isa (fpu_bits
, csky_selected_fpu
->isa_bits
);
2667 bitmap_ior (csky_active_target
.isa
, csky_active_target
.isa
,
2670 sbitmap_free (fpu_bits
);
2675 if (TARGET_DOUBLE_FLOAT
> 0)
2676 warning (0, "%<-mdouble-float%> ignored without %<-mhard-float%>");
2677 TARGET_DOUBLE_FLOAT
= 0;
2678 if (TARGET_FDIVDU
> 0)
2679 warning (0, "%<-mfdivdu%> ignored without %<-mhard-float%>");
2683 /* Initialize boolean versions of the architectural flags, for use
2687 #define CSKY_ISA(IDENT, DESC) \
2689 csky_arch_isa_features[CSKY_ISA_FEATURE_GET (IDENT)] = \
2690 bitmap_bit_p (csky_active_target.isa, CSKY_ISA_FEATURE_GET (IDENT)); \
2692 #include "csky_isa.def"
2695 /* Extended LRW instructions are enabled by default on CK801, disabled
2697 if (TARGET_ELRW
== -1)
2698 TARGET_ELRW
= CSKY_TARGET_ARCH (CK801
);
2700 /* DSP is enabled either by the processor feature or -mdsp
2701 command-line option. There is no -mno-dsp option as the assembler
2702 doesn't take one. */
2704 TARGET_DSP
= CSKY_ISA_FEATURE (dsp
);
2706 /* There's both -mdiv and -mno-div. Take default from processor if
2707 neither is specified explicitly. */
2708 if (TARGET_DIV
== -1)
2709 TARGET_DIV
= CSKY_ISA_FEATURE (div
);
2711 /* TARGET_CONSTANT_POOL is mandatory for CK801 and CK802 and optional
2713 The reason why the compiler has to generate constant pools for CK801/2
2714 instead of deferring to the assembler is that these cores don't have a
2715 long branch instruction other than jbsr, which clobbers lr. So for
2716 the compiler to correctly save/restore lr it has to know whether there
2717 are long branches, which depends on having accurate branch length
2718 counts, which in turn depends on having control over where constant
2719 pools are placed. */
2720 if ((CSKY_TARGET_ARCH (CK801
) || CSKY_TARGET_ARCH (CK802
))
2721 && !TARGET_CONSTANT_POOL
)
2722 error ("%qs is not supported by arch %s",
2723 "-mno-constpool", csky_active_target
.arch_pp_name
);
2724 else if (TARGET_CONSTANT_POOL
== -1)
2725 TARGET_CONSTANT_POOL
= (CSKY_TARGET_ARCH (CK801
)
2726 || CSKY_TARGET_ARCH (CK802
));
2728 /* TARGET_MINI_REGISTERS is mandatory for CK801, the default for CK802,
2729 and optional for other CPUs. TARGET_HIGH_REGISTERS is incompatible
2730 with TARGET_MINI_REGISTERS, is not supported by CK801/802/803,
2731 and is the default for other processors.
2732 See csky_conditional_register_usage. */
2733 if (TARGET_MINI_REGISTERS
> 0 && TARGET_HIGH_REGISTERS
> 0)
2734 error ("%<-msmart%> is incompatible with %<-mhigh-registers%>");
2735 else if (CSKY_TARGET_ARCH (CK801
)
2736 || CSKY_TARGET_ARCH (CK802
)
2737 || CSKY_TARGET_ARCH (CK803
))
2739 if (CSKY_TARGET_ARCH (CK801
)
2740 || (CSKY_TARGET_ARCH (CK802
) && TARGET_MINI_REGISTERS
== -1))
2741 TARGET_MINI_REGISTERS
= 1;
2742 else if (TARGET_MINI_REGISTERS
== -1)
2743 TARGET_MINI_REGISTERS
= 0;
2744 if (TARGET_HIGH_REGISTERS
> 0)
2745 warning (0, "%qs is not supported by arch %s",
2746 "-mhigh-registers", csky_active_target
.arch_pp_name
);
2747 TARGET_HIGH_REGISTERS
= 0;
2751 if (TARGET_MINI_REGISTERS
== -1)
2752 TARGET_MINI_REGISTERS
= 0;
2753 if (TARGET_HIGH_REGISTERS
== -1)
2754 TARGET_HIGH_REGISTERS
= !TARGET_MINI_REGISTERS
;
2757 /* -mmultiple-stld is the default for everything but CK801, which
2758 doesn't support it. */
2759 if (CSKY_TARGET_ARCH (CK801
))
2761 if (TARGET_MULTIPLE_STLD
> 0)
2762 warning (0, "%qs is not supported by arch %s",
2763 "-mmultiple-stld", csky_active_target
.arch_pp_name
);
2764 TARGET_MULTIPLE_STLD
= 0;
2769 /* Resynchronize the saved target options. */
2770 cl_target_option_save (TREE_TARGET_OPTION (target_option_default_node
),
2771 &global_options
, &global_options_set
);
2773 #ifdef ENABLE_TPF_DEBUG
2774 /* Don't emit DWARF4 unless specifically selected. The TPF
2775 debuggers do not yet support DWARF 3/4. */
2776 if (!global_options_set
.x_dwarf_strict
)
2778 if (!global_options_set
.x_dwarf_version
)
2782 /* Don't run the scheduler before reload by default,
2783 since it tends to increase register pressure. */
2784 if (!global_options_set
.x_flag_schedule_insns
)
2785 flag_schedule_insns
= 0;
2787 csky_add_gc_roots ();
2791 /* Return TRUE if X contains any references to TLS symbols. */
2794 csky_tls_referenced_p (rtx x
)
2799 subrtx_iterator::array_type array
;
2800 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
2802 const_rtx x
= *iter
;
2803 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0)
2806 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
2807 TLS offsets, not real symbol references. */
2808 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLS
)
2809 iter
.skip_subrtxes ();
2815 /* Implement TARGET_CANNOT_FORCE_CONST_MEM.
2816 Determine if it's legal to put X into the constant pool. This
2817 is not possible for the address of thread-local symbols, which
2818 is checked above. */
2821 csky_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
,
2824 return csky_tls_referenced_p (x
);
2828 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns nonzero if the
2829 constant value X is a legitimate general operand.
2830 It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2833 csky_legitimate_constant_p (machine_mode mode
, rtx x
)
2835 return (!csky_cannot_force_const_mem (mode
, x
)
2840 /* Return true if X is valid as an CSKY addressing register. */
2843 is_csky_address_register_rtx_p (rtx x
, int strict_p
)
2855 return (CSKY_GENERAL_REGNO_P (regno
)
2856 || CSKY_GENERAL_REGNO_P (reg_renumber
[regno
]));
2858 return CSKY_GENERAL_REGNO_P (regno
) || regno
>= FIRST_PSEUDO_REGISTER
;
2862 /* Return TRUE if X is a thread-local symbol. */
2865 csky_tls_symbol_p (rtx x
)
2870 if (GET_CODE (x
) != SYMBOL_REF
)
2873 return SYMBOL_REF_TLS_MODEL (x
) != 0;
2877 /* Handle lazy initialization of __tls_get_addr libfunc. */
2878 static GTY(()) rtx tls_get_addr_libfunc
;
2881 get_tls_get_addr (void)
2883 if (!tls_get_addr_libfunc
)
2884 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
2885 return tls_get_addr_libfunc
;
2889 /* Emit a call to __tls_get_addr. */
2892 csky_call_tls_get_addr (rtx x
, rtx reg
, rtx
*valuep
, int reloc
)
2894 rtx label
, labelno
, unspec
, tmp
;
2899 labelno
= GEN_INT (tls_labelno
++);
2900 label
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, labelno
), UNSPEC_TLS_LABEL
);
2901 unspec
= gen_rtx_UNSPEC (Pmode
,
2902 gen_rtvec (3, x
, GEN_INT (reloc
), label
),
2904 tmp
= gen_reg_rtx (SImode
);
2905 emit_move_insn (reg
, unspec
);
2906 emit_move_insn (tmp
, label
);
2907 emit_insn (gen_addsi3 (reg
, reg
, tmp
));
2908 *valuep
= emit_library_call_value (get_tls_get_addr (),
2909 NULL_RTX
, LCT_PURE
, /* LCT_CONST? */
2911 insns
= get_insns ();
2916 /* Helper function for csky_legitimize_address, to handle the TLS cases.
2917 REG is a scratch register and may be null. */
2920 csky_legitimize_tls_address (rtx x
, rtx reg
)
2922 rtx dest
, tp
, label
, labelno
, unspec
, ret
, eqv
, addend
, tmp
;
2924 unsigned int model
= SYMBOL_REF_TLS_MODEL (x
);
2927 reg
= gen_reg_rtx (SImode
);
2931 case TLS_MODEL_GLOBAL_DYNAMIC
:
2932 insns
= csky_call_tls_get_addr (x
, reg
, &ret
, TLS_GD32
);
2933 dest
= gen_reg_rtx (Pmode
);
2934 emit_libcall_block (insns
, dest
, ret
, x
);
2937 case TLS_MODEL_LOCAL_DYNAMIC
:
2938 insns
= csky_call_tls_get_addr (x
, reg
, &ret
, TLS_LDM32
);
2940 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2941 share the LDM result with other LD model accesses. */
2942 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const1_rtx
), UNSPEC_TLS
);
2943 dest
= gen_reg_rtx (Pmode
);
2944 emit_libcall_block (insns
, dest
, ret
, eqv
);
2946 /* Load the addend. */
2947 addend
= gen_rtx_UNSPEC (Pmode
,
2948 gen_rtvec (2, x
, GEN_INT (TLS_LDO32
)),
2950 addend
= force_reg (SImode
, addend
);
2951 return gen_rtx_PLUS (Pmode
, dest
, addend
);
2953 case TLS_MODEL_INITIAL_EXEC
:
2954 labelno
= GEN_INT (tls_labelno
++);
2955 label
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, labelno
), UNSPEC_TLS_LABEL
);
2956 unspec
= gen_rtx_UNSPEC (Pmode
,
2957 gen_rtvec (3, x
, GEN_INT (TLS_IE32
), label
),
2959 tmp
= gen_reg_rtx (SImode
);
2960 emit_move_insn (reg
, unspec
);
2961 emit_move_insn (tmp
, label
);
2962 emit_insn (gen_addsi3 (reg
, reg
, tmp
));
2963 emit_move_insn (reg
, gen_const_mem (Pmode
, reg
));
2964 tp
= gen_rtx_REG (SImode
, CSKY_TLS_REGNUM
);
2965 return gen_rtx_PLUS (Pmode
, tp
, reg
);
2967 case TLS_MODEL_LOCAL_EXEC
:
2968 unspec
= gen_rtx_UNSPEC (Pmode
,
2969 gen_rtvec (2, x
, GEN_INT (TLS_LE32
)),
2971 emit_move_insn (reg
, unspec
);
2972 tp
= gen_rtx_REG (SImode
, CSKY_TLS_REGNUM
);
2973 return gen_rtx_PLUS (Pmode
, tp
, reg
);
2981 /* Implement TARGET_LEGITIMIZE_ADDRESS. */
2984 csky_legitimize_address (rtx x
, rtx orig_x ATTRIBUTE_UNUSED
,
2987 if (csky_tls_symbol_p (x
))
2988 return csky_legitimize_tls_address (x
, NULL_RTX
);
2990 if (GET_CODE (x
) == PLUS
)
2992 rtx xop0
= XEXP (x
, 0);
2993 rtx xop1
= XEXP (x
, 1);
2995 if (is_csky_address_register_rtx_p (xop0
, 0)
2996 && CONST_INT_P (xop1
))
2998 HOST_WIDE_INT offset
= INTVAL (xop1
);
3000 /* Try to replace ld32 rx,(ry, offset), to addi16 rz, oimm8
3001 and ld16 rx,(rz, new_ld_offset) to avoid emitting a
3002 32-bit ld, but this addi has a range limitation. */
3004 && offset
> CSKY_LD16_MAX_OFFSET (mode
)
3005 && offset
<= (CSKY_ADDI16_MAX_IMM
3006 + CSKY_LD16_MAX_OFFSET (mode
)))
3008 HOST_WIDE_INT new_ld_offset
3009 = offset
& CSKY_LD16_OFFSET_MASK (mode
);
3011 xop0
= force_operand (plus_constant (Pmode
, xop0
,
3012 offset
- new_ld_offset
),
3014 x
= plus_constant (Pmode
, xop0
, new_ld_offset
);
3016 else if (offset
< 0 && offset
>= (-CSKY_SUBI16_MAX_IMM
))
3017 x
= force_operand (x
, NULL_RTX
);
3018 else if (offset
> CSKY_LD16_MAX_OFFSET (mode
)
3021 /* For the remaining cases, force the constant into a
3023 xop1
= force_reg (SImode
, xop1
);
3024 x
= gen_rtx_PLUS (SImode
, xop0
, xop1
);
3028 /* If the index is store in register, force the
3029 base to register. */
3030 if (is_csky_address_register_rtx_p (xop1
, 0)
3031 && !is_csky_address_register_rtx_p (xop0
, 0))
3033 xop0
= force_operand (xop0
, NULL_RTX
);
3034 x
= gen_rtx_PLUS (SImode
, xop0
, xop1
);
3037 /* Make sure to take full advantage of the pre-indexed addressing mode
3038 with absolute addresses which often allows for the base register to
3039 be factorized for multiple adjacent memory references, and it might
3040 even allows for the mini pool to be avoided entirely. */
3041 else if (CONST_INT_P (x
) && optimize
> 0)
3043 HOST_WIDE_INT mask
, base
, index
;
3046 mask
= CSKY_LD16_OFFSET_MASK (mode
);
3047 base
= INTVAL (x
) & ~mask
;
3048 index
= INTVAL (x
) & mask
;
3049 base_reg
= force_reg (SImode
, GEN_INT (base
));
3050 x
= plus_constant (Pmode
, base_reg
, index
);
3057 /* Return nonzero if INDEX is valid for an address index operand.
3058 ck801 use 16 bits ld
3059 ck802 use 16 and 32 bits ld
3060 others use ld and ldr. */
3063 ck801_legitimate_index_p (machine_mode mode
, rtx index
,
3064 int strict_p ATTRIBUTE_UNUSED
)
3066 enum rtx_code code
= GET_CODE (index
);
3068 /* When the mode size is larger than 4, we may use two ld instruction
3069 to get data, the index and (index+1) should be valid. */
3070 if (GET_MODE_SIZE (mode
) >= 8)
3071 return (code
== CONST_INT
3072 && INTVAL (index
) < CSKY_LD16_MAX_OFFSET (SImode
)
3073 && INTVAL (index
) >= 0 && (INTVAL (index
) & 3) == 0);
3075 if (code
== CONST_INT
&& GET_MODE_SIZE (mode
) > 0
3076 && INTVAL (index
) <= CSKY_LD16_MAX_OFFSET (mode
)
3077 && INTVAL (index
) >= 0)
3078 return ((INTVAL (index
) % GET_MODE_SIZE (mode
)) == 0);
3085 ck802_legitimate_index_p (machine_mode mode
, rtx index
,
3086 int strict_p ATTRIBUTE_UNUSED
)
3088 enum rtx_code code
= GET_CODE (index
);
3090 /* When the mode size is larger than 4, we may use two ld instruction
3091 to get data, the index and (index+1) should be valid. */
3092 if (GET_MODE_SIZE (mode
) >= 8)
3093 return (code
== CONST_INT
3094 && INTVAL (index
) < CSKY_LD32_MAX_OFFSET (SImode
)
3095 && INTVAL (index
) >= 0 && (INTVAL (index
) & 3) == 0);
3097 if (code
== CONST_INT
&& GET_MODE_SIZE (mode
) > 0
3098 && INTVAL (index
) <= CSKY_LD32_MAX_OFFSET (mode
)
3099 && INTVAL (index
) >= 0)
3100 return ((INTVAL (index
) % GET_MODE_SIZE (mode
)) == 0);
3106 /* The instruction ldr rz, (rx, ry << i), i can be 0,1,2,3.
3107 Check that SHIFT is valid, that the code is MULT, and that
3108 the shift is a power of 2. */
3111 is_ldr_shift_p (HOST_WIDE_INT shift
, enum rtx_code code
)
3114 return (shift
>= 0 && shift
<= 3);
3115 else if (code
== MULT
)
3126 ck810_legitimate_index_p (machine_mode mode
, rtx index
, int strict_p
)
3128 enum rtx_code code
= GET_CODE (index
);
3130 if (code
== CONST_INT
&& TARGET_HARD_FLOAT
&& CSKY_VREG_MODE_P (mode
))
3131 return (INTVAL (index
) < 1024 && INTVAL (index
) >= 0
3132 && (INTVAL (index
) & 3) == 0);
3134 if (code
== CONST_INT
)
3136 /* When the mode size is larger than 4, we may use two ld instruction
3137 to get data, the index and (index+1) should be valid. */
3138 if (GET_MODE_SIZE (mode
) >= 8)
3139 return (INTVAL (index
) < CSKY_LD32_MAX_OFFSET (SImode
)
3140 && INTVAL (index
) >= 0 && (INTVAL (index
) & 3) == 0);
3142 if (GET_MODE_SIZE (mode
) > 0
3143 && INTVAL (index
) <= CSKY_LD32_MAX_OFFSET (mode
)
3144 && INTVAL (index
) >= 0)
3145 return ((INTVAL (index
) % GET_MODE_SIZE (mode
)) == 0);
3147 /* Allow ld.w rx, (gb, sym@got) when -fpic specially. */
3148 else if (code
== UNSPEC
)
3149 return (flag_pic
== 1
3150 && (XINT (index
, 1) == UNSPEC_PIC_SYMBOL_PLT
3151 || XINT (index
, 1) == UNSPEC_PIC_SYMBOL_GOT
));
3152 /* The follow index is for ldr instruction, the ldr cannot
3153 load dword data, so the mode size should not be larger than
3155 else if (GET_MODE_SIZE (mode
) <= 4
3156 || (TARGET_HARD_FLOAT
&& CSKY_VREG_MODE_P (mode
)))
3158 if (is_csky_address_register_rtx_p (index
, strict_p
))
3160 else if (code
== MULT
|| code
== ASHIFT
)
3162 rtx xiop0
= XEXP (index
, 0);
3163 rtx xiop1
= XEXP (index
, 1);
3165 /* FIXME can the xiop1 be the reg and xiop0 be the int when mult? */
3166 return (is_csky_address_register_rtx_p (xiop0
, strict_p
)
3167 && CONST_INT_P (xiop1
)
3168 && is_ldr_shift_p (INTVAL (xiop1
), code
));
3177 csky_legitimate_index_p (machine_mode mode
, rtx index
, int strict_p
)
3179 if (CSKY_TARGET_ARCH (CK801
))
3180 return ck801_legitimate_index_p (mode
, index
, strict_p
);
3181 else if (CSKY_TARGET_ARCH (CK802
))
3182 return ck802_legitimate_index_p (mode
, index
, strict_p
);
3184 return ck810_legitimate_index_p (mode
, index
, strict_p
);
3188 /* Implement TARGET_LEGITIMATE_ADDRESS_P.
3189 Recognizes RTL expressions that are valid memory addresses for an
3190 instruction. The MODE argument is the machine mode for the MEM
3191 expression that wants to use this address.
3193 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
3194 convert common non-canonical forms to canonical form so that they will
3198 csky_legitimate_address_p (machine_mode mode
, rtx addr
, bool strict_p
)
3200 enum rtx_code code
= GET_CODE (addr
);
3202 /* Match the RTX form emitted for constant pool references.
3203 After reload constants split into minipools will have addresses
3204 from a LABEL_REF. */
3205 if (reload_completed
3206 && ((code
== LABEL_REF
)
3208 && GET_CODE (XEXP (addr
, 0)) == PLUS
3209 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == LABEL_REF
3210 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))))
3213 if (is_csky_address_register_rtx_p (addr
, strict_p
))
3215 /* It is a pc-relative load, may be generated for constpool. */
3216 else if (GET_CODE (addr
) == LABEL_REF
)
3221 rtx xop0
= XEXP (addr
, 0);
3222 rtx xop1
= XEXP (addr
, 1);
3224 return ((is_csky_address_register_rtx_p (xop0
, strict_p
)
3225 && csky_legitimate_index_p (mode
, xop1
, strict_p
))
3226 || (is_csky_address_register_rtx_p (xop1
, strict_p
)
3227 && csky_legitimate_index_p (mode
, xop0
, strict_p
)));
3234 /* Functions to save and restore machine-specific function data. */
3236 static struct machine_function
*
3237 csky_init_machine_status (void)
3239 struct machine_function
*machine
;
3241 machine
= ggc_cleared_alloc
<machine_function
> ();
3243 #if CSKY_FT_UNKNOWN != 0
3244 machine
->func_type
= CSKY_FT_UNKNOWN
;
3250 /* Implement INIT_EXPANDERS. */
3253 csky_init_expanders (void)
3255 /* Arrange to initialize and mark the machine per-function status. */
3256 init_machine_status
= csky_init_machine_status
;
3260 /* Implement TARGET_CANNOT_COPY_INSN_P.
3261 We must not copy any rtx that uses a pc-relative address. */
3264 csky_cannot_copy_insn_p (rtx_insn
*insn
)
3266 subrtx_iterator::array_type array
;
3267 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), ALL
)
3269 const_rtx x
= *iter
;
3270 if (GET_CODE (x
) == UNSPEC
3271 && (XINT (x
, 1) == UNSPEC_TLS_LABEL
3272 || XINT (x
, 1) == UNSPEC_PIC_SYMBOL_GOTPC_GRS
))
3279 /* Extract the parts of an RTL expression that is a valid memory address
3280 for an instruction. Return FALSE if it is a invalid memory address. */
3284 rtx base
, index
, symbol
, label
, disp
;
3285 HOST_WIDE_INT scale
;
3289 decompose_csky_address (rtx addr
, struct csky_address
*out
)
3291 rtx base
= NULL_RTX
, index
= NULL_RTX
, disp
= NULL_RTX
;
3292 HOST_WIDE_INT scale
= 0;
3293 rtx scale_rtx
= NULL_RTX
;
3296 out
->base
= out
->index
= out
->symbol
= out
->label
= out
->disp
= NULL_RTX
;
3305 if (GET_CODE (addr
) == LABEL_REF
)
3311 if (GET_CODE (addr
) == CONST
)
3312 addr
= XEXP (addr
, 0);
3314 if (GET_CODE (addr
) == PLUS
)
3318 addends
[0] = XEXP (addr
, 0);
3319 addends
[1] = XEXP (addr
, 1);
3321 if (GET_CODE (addends
[0]) == LABEL_REF
&& CONST_INT_P (addends
[1]))
3323 out
->label
= addends
[0];
3324 out
->disp
= addends
[1];
3328 if (!REG_P (addends
[0]))
3329 std::swap (addends
[0], addends
[1]);
3331 for (i
= 0; i
< 2; ++i
)
3334 switch (GET_CODE (op
))
3356 index
= XEXP (op
, 0);
3357 scale_rtx
= XEXP (op
, 1);
3358 if (!CONST_INT_P (index
) && !CONST_INT_P (scale_rtx
))
3360 else if (CONST_INT_P (index
))
3361 std::swap (index
, scale_rtx
);
3362 scale
= INTVAL (scale_rtx
);
3367 index
= XEXP (op
, 0);
3368 scale_rtx
= XEXP (op
, 1);
3369 if (!CONST_INT_P (scale_rtx
))
3371 scale
= 1 << INTVAL (scale_rtx
);
3390 /* Helper function for the csky_simple_mem_operand predicate. Returns
3391 true if OP is an address of the form reg + displacement. */
3394 csky_simple_addr_operand_p (rtx op
)
3396 struct csky_address addr
;
3398 if (!decompose_csky_address (op
, &addr
))
3401 /* FIXME The PIC related code.
3402 Check if load the symbol address from got table. */
3403 if (addr
.disp
&& GET_CODE (addr
.disp
) == UNSPEC
)
3405 if (!addr
.index
&& !addr
.symbol
)
3411 /* Print the UNSPEC operand in X to the STREAM. */
3414 csky_output_pic_addr_const (FILE *stream
, rtx x
, int code
)
3417 if (GET_CODE (x
) != UNSPEC
)
3420 if (UNSPEC_TLS
== XINT (x
, 1))
3422 /* FIXME It is not reached */
3426 csky_print_operand (stream
, XVECEXP (x
, 0, 0), code
);
3428 switch (XINT (x
, 1))
3430 case UNSPEC_PIC_SYMBOL_GOTOFF
:
3431 fputs ("@GOTOFF", stream
);
3433 case UNSPEC_PIC_SYMBOL_PLT
:
3434 fputs ("@PLT", stream
);
3436 case UNSPEC_PIC_SYMBOL_GOT
:
3437 fputs ("@GOT", stream
);
3439 case UNSPEC_PIC_SYMBOL_GOTPC
:
3440 fputs ("@GOTPC", stream
);
3442 case UNSPEC_PIC_SYMBOL_BSR
:
3450 /* Output the constpool label according to the rtx expression X. */
3453 csky_output_constpool_label (FILE *stream
, rtx x
)
3457 gcc_assert (GET_CODE (x
) == LABEL_REF
);
3460 if (GET_CODE (x
) == UNSPEC_VOLATILE
&& XINT (x
, 1) == VUNSPEC_POOL_LABEL
)
3462 ASM_GENERATE_INTERNAL_LABEL (buf
, CSKY_CONSTPOOL_LABEL_PREFIX
,
3463 INTVAL (XVECEXP (x
, 0, 0)));
3464 assemble_name (stream
, buf
);
3469 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
3472 csky_print_operand_address (FILE *stream
,
3473 machine_mode mode ATTRIBUTE_UNUSED
,
3477 struct csky_address addr
;
3479 decompose_csky_address (x
, &addr
);
3481 if (addr
.label
&& addr
.disp
&& GET_CODE (addr
.disp
) == CONST_INT
)
3483 fprintf (stream
, "[");
3484 csky_output_constpool_label (stream
, addr
.label
);
3485 fprintf (stream
, "+%d]", (int) INTVAL (addr
.disp
));
3487 else if (addr
.label
)
3489 fprintf (stream
, "[");
3490 csky_output_constpool_label (stream
, addr
.label
);
3491 fprintf (stream
, "]");
3493 else if (addr
.symbol
&& addr
.disp
&& GET_CODE (addr
.disp
) == CONST_INT
)
3495 fprintf (stream
, "[");
3496 output_addr_const (stream
, addr
.symbol
);
3497 fprintf (stream
, "+%d]", (int) INTVAL (addr
.disp
));
3499 else if (addr
.symbol
)
3501 fprintf (stream
, "[");
3502 output_addr_const (stream
, addr
.symbol
);
3503 fprintf (stream
, "]");
3505 else if (addr
.disp
&& GET_CODE (addr
.disp
) == CONST_INT
)
3506 fprintf (stream
, "(%s, %d)",
3507 reg_names
[REGNO (addr
.base
)], (int) INTVAL (addr
.disp
));
3508 else if (addr
.disp
&& GET_CODE (addr
.disp
) == UNSPEC
)
3510 if (REGNO (addr
.base
) != CSKY_GB_REGNUM
)
3511 fprintf (stream
, "(%s, ", reg_names
[REGNO (addr
.base
)]);
3513 fprintf (stream
, "[");
3514 csky_output_pic_addr_const (stream
, addr
.disp
, 0);
3515 fprintf (stream
, "%s", (REGNO (addr
.base
) != CSKY_GB_REGNUM
)
3518 else if (addr
.index
)
3519 fprintf (stream
, "(%s, %s << %d)",
3520 reg_names
[REGNO (addr
.base
)], reg_names
[REGNO (addr
.index
)],
3521 exact_log2 ((int) (addr
.scale
)));
3523 fprintf (stream
, "(%s, 0)", reg_names
[REGNO (addr
.base
)]);
3527 /* Implement TARGET_PRINT_OPERAND.
3528 Print operand X (an rtx) in assembler syntax to file STREAM
3529 according to modifier CODE.
3531 'N' print the log2(X+1), mainly used for bmaski
3532 'P' print the log2(X)
3533 'Q' print the log2(~X)
3534 'O' print a decimal number
3535 'M' print a decimal number as its negative
3536 'R' print the next register or memory location along, i.e. the lsw in
3538 'H' print the high 16 bits of a constant. */
3541 csky_print_operand (FILE *stream
, rtx x
, int code
)
3546 if ((INTVAL (x
) & 0xffffffff) == 0xffffffff)
3547 fprintf (stream
, "0");
3549 fprintf (stream
, "%d",
3550 (int) exact_log2 ((INTVAL (x
) & 0xffffffff) + 1) % 32);
3553 fprintf (stream
, "%d",
3554 (int) exact_log2 (INTVAL (x
) & 0xffffffff));
3557 fprintf (stream
, "%d",
3558 (int) exact_log2 (~INTVAL (x
) & 0xffffffff));
3561 fprintf (stream
, "%d", (int) INTVAL (x
));
3564 fprintf (stream
, "%d", (int) (-INTVAL (x
)));
3567 /* Next location along in memory or register. */
3568 switch (GET_CODE (x
))
3571 fputs (reg_names
[REGNO (x
) + 1], stream
);
3574 csky_print_operand_address
3575 (stream
, GET_MODE (x
), XEXP (adjust_address (x
, SImode
, 4), 0));
3582 fprintf (stream
, "%ld", (long)((INTVAL (x
) & 0xFFFF0000) >> 16));
3585 switch (GET_CODE (x
))
3588 fputs (reg_names
[REGNO (x
)], stream
);
3591 output_address (GET_MODE (x
), XEXP (x
, 0));
3594 csky_output_pic_addr_const (stream
, x
, code
);
3599 real_to_decimal ( fpstr
, CONST_DOUBLE_REAL_VALUE (x
),
3600 sizeof (fpstr
), 0, 1);
3601 fprintf (stream
, "%s", fpstr
);
3605 output_addr_const (stream
, x
);
3614 /* Implement TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. */
3617 csky_allocate_stack_slots_for_args (void)
3619 /* Naked functions should not allocate stack slots for arguments. */
3620 return !CSKY_FUNCTION_IS_NAKED (get_csky_current_func_type ());
3624 /* Can we generate a constant with a single instruction, without using
3628 const_ok_for_cskyv2 (HOST_WIDE_INT value
)
3630 /* Try exact power of two. It can be generated by bgeni. */
3631 if (CSKY_CONST_OK_FOR_Ub (value
))
3634 /* Try exact power of two - 1. It can be generated by bmaski. */
3635 if (CSKY_CONST_OK_FOR_Uc (value
) && value
!= -1)
3638 /* Try if it can be generated by movi. */
3639 if (CSKY_CONST_OK_FOR_I (value
))
3642 /* The constant can be generated by movih.
3643 Notice that movih is a 32-bit instruction. */
3644 if (CSKY_CONST_OK_FOR_MOVIH (value
))
3651 /* Tricks for synthesizing constants from values that can be directly
3652 manipulated by machine instructions. */
3654 enum csky_inline_const_type
3656 IC_UNINLINABLE
= 0, /* Not inlineable */
3657 IC_SINGLE
, /* Single instruction */
3658 IC_APPEND_NOT
, /* Single instruction followed by a not */
3659 IC_APPEND_ADDI
, /* Single insn followed by an addi */
3660 IC_APPEND_SUBI
, /* Single insn followed by a subi */
3661 IC_BGENI_ADDI
, /* Single insn(bgeni) followed by an addi */
3662 IC_BGENI_SUBI
, /* Single insn(bgeni) followed by a subi */
3663 IC_APPEND_BSETI
, /* Single insn followed by bseti */
3664 IC_APPEND_MOVI
, /* Single insn followed by movi */
3665 IC_APPEND_BCLRI
, /* Single insn followed by bclri */
3666 IC_APPEND_ROTLI
, /* Single insn followed by rotli */
3667 IC_APPEND_LSLI
, /* Single insn followed by lsli */
3668 IC_APPEND_IXH
, /* Single insn followed by ixh */
3669 IC_APPEND_IXW
/* Single insn followed by ixw */
3673 /* Try tricks to load a constant inline and return the trick number if
3674 success, or IC_UNINLINABLE. */
3676 static enum csky_inline_const_type
3677 try_csky_constant_tricks (HOST_WIDE_INT value
, HOST_WIDE_INT
*x
,
3680 HOST_WIDE_INT i
, value_invert
;
3681 unsigned HOST_WIDE_INT bit
, shf
, rot
, lobits
, hibits
;
3683 value
&= 0xffffffff;
3684 value_invert
= ~value
& 0xffffffff;
3686 if (const_ok_for_cskyv2 (value
))
3692 /* Since movih is 32 bits, do not use it here, better code may
3693 be generated later. */
3694 if (const_ok_for_cskyv2 (value_invert
)
3695 && !CSKY_CONST_OK_FOR_MOVIH (value_invert
))
3698 return IC_APPEND_NOT
;
3701 /* One immediate generate instruction, and one 16-bit subi or addi. */
3702 for (i
= 1; i
<= 32; i
++)
3704 if (const_ok_for_cskyv2 (value
- i
)
3705 && !CSKY_CONST_OK_FOR_MOVIH (value
- i
))
3709 return IC_APPEND_ADDI
;
3712 if (const_ok_for_cskyv2 (value
+ i
)
3713 && !CSKY_CONST_OK_FOR_MOVIH (value
- i
))
3717 return IC_APPEND_SUBI
;
3721 /* Generate bgeni + addi. */
3722 if (CSKY_CONST_OK_FOR_Ub (value
& 0xfffff000))
3724 *x
= (value
& 0xfffff000);
3725 *y
= (value
& 0xfff);
3726 return IC_BGENI_ADDI
;
3729 /* Generate bgeni + subi. */
3730 lobits
= value
& 0xfff;
3731 hibits
= (unsigned HOST_WIDE_INT
)(value
& 0xfffff000) + (1 << 12);
3732 if (exact_log2 (hibits
) >= 1
3733 && exact_log2 (hibits
) <= 30
3737 *y
= (0x1000 - lobits
);
3738 return IC_BGENI_SUBI
;
3741 /* One immediate generate instruction, and one bseti or bclri. */
3742 bit
= 0x80000000ULL
;
3743 for (i
= 0; i
<= 31; i
++)
3745 if (const_ok_for_cskyv2 (value
& ~bit
)
3746 && !CSKY_CONST_OK_FOR_MOVIH (value
& ~bit
))
3749 *x
= (value
& ~bit
);
3750 return IC_APPEND_BSETI
;
3753 if (const_ok_for_cskyv2 (value
| bit
)
3754 && !CSKY_CONST_OK_FOR_MOVIH (value
| bit
))
3756 *y
= ~bit
& 0xffffffff;
3758 return IC_APPEND_BCLRI
;
3764 /* One immediate generate instruction, and one rotli or lsli. */
3767 for (i
= 1; i
< 31; i
++)
3777 if (const_ok_for_cskyv2 (rot
) && !CSKY_CONST_OK_FOR_MOVIH (rot
))
3781 return IC_APPEND_ROTLI
;
3784 /* Can't use logical shift when low order bit is one. */
3790 if (shf
!= 0 && const_ok_for_cskyv2 (shf
)
3791 && !CSKY_CONST_OK_FOR_MOVIH (shf
))
3795 return IC_APPEND_LSLI
;
3799 /* One immediate generate instruction, and one ixh. */
3800 if (CSKY_ISA_FEATURE (E2
)
3802 && const_ok_for_cskyv2 (value
/ 3)
3803 && !CSKY_CONST_OK_FOR_MOVIH (value
/ 3))
3806 return IC_APPEND_IXH
;
3809 /* One immediate generate instruction, and one ixw. */
3810 if (CSKY_ISA_FEATURE (E2
)
3812 && const_ok_for_cskyv2 (value
/ 5)
3813 && !CSKY_CONST_OK_FOR_MOVIH (value
/ 5))
3816 return IC_APPEND_IXW
;
3819 /* Generate movih + bseti. */
3820 if (CSKY_CONST_OK_FOR_Ub (value
& 0xffff))
3822 *x
= value
& 0xffff0000;
3823 *y
= value
& 0xffff;
3824 return IC_APPEND_BSETI
;
3827 /* Generate movih + not. */
3828 if (CSKY_CONST_OK_FOR_MOVIH (value_invert
))
3831 return IC_APPEND_NOT
;
3834 /* One movih, and one 16bits addi or subi. */
3835 for (i
= 1; i
<= 32; i
++)
3837 if (CSKY_CONST_OK_FOR_MOVIH (value
- i
))
3841 return IC_APPEND_ADDI
;
3844 if (CSKY_CONST_OK_FOR_MOVIH (value
+ i
))
3848 return IC_APPEND_SUBI
;
3852 /* One movih, and one bseti or bclri. */
3853 bit
= 0x80000000ULL
;
3854 for (i
= 0; i
<= 31; i
++)
3856 if (CSKY_CONST_OK_FOR_MOVIH (value
& ~bit
))
3860 return IC_APPEND_BSETI
;
3863 if (CSKY_CONST_OK_FOR_MOVIH (value
| bit
))
3865 *y
= ~bit
& 0xffffffff;
3867 return IC_APPEND_BCLRI
;
3873 /* One movih, and one rotli or lsli. */
3876 for (i
= 1; i
< 31; i
++)
3886 if (CSKY_CONST_OK_FOR_MOVIH (rot
))
3890 return IC_APPEND_ROTLI
;
3893 /* Can't use logical shift when low order bit is one. */
3899 if (shf
!= 0 && CSKY_CONST_OK_FOR_MOVIH (shf
))
3903 return IC_APPEND_LSLI
;
3907 return IC_UNINLINABLE
;
3911 /* Actually output a constant using a trick.
3912 FIXME: I think this would be better handled by a splitter than at the
3913 asm output level. */
3916 csky_output_inline_const (machine_mode mode
, rtx operands
[])
3918 HOST_WIDE_INT x
= 0, y
= 0;
3919 enum csky_inline_const_type trick_type
;
3920 rtx out_operands
[3];
3923 const char *dst_fmt
;
3924 HOST_WIDE_INT value
= INTVAL (operands
[1]);
3925 int ivalue
= (int) value
;
3926 unsigned int uvalue
= (unsigned int) value
;
3928 trick_type
= try_csky_constant_tricks (value
, &x
, &y
);
3929 /* lrw's are handled separately: Large inlinable constants never get
3930 turned into lrw's. Our caller uses try_csky_constant_tricks to back
3931 off to an lrw rather than calling this routine. */
3932 gcc_assert (trick_type
!= IC_UNINLINABLE
);
3934 /* Operands: 0 = dst, 1 = load immedate., 2 = adjust immedate. */
3935 out_operands
[0] = operands
[0];
3936 out_operands
[1] = GEN_INT (x
);
3937 if (trick_type
!= IC_SINGLE
&& trick_type
!= IC_APPEND_NOT
)
3938 out_operands
[2] = GEN_INT (y
);
3940 /* Select dst format based on mode. */
3941 if (mode
== DImode
&& TARGET_BIG_ENDIAN
)
3946 /* Try movi16: 0~31,movi32: 0~65535. */
3947 if (CSKY_CONST_OK_FOR_I (x
))
3948 sprintf (load_op
, "movi\t%s, %%1", dst_fmt
);
3949 /* Try exact power of two - 1. */
3950 else if (CSKY_CONST_OK_FOR_Uc (x
))
3951 sprintf (load_op
, "bmaski\t%s, %%N1", dst_fmt
);
3953 else if (CSKY_CONST_OK_FOR_MOVIH (x
))
3954 sprintf (load_op
, "movih\t%s, %%H1", dst_fmt
);
3957 sprintf (load_op
, "BADMOVI-inline_const %s, %%1", dst_fmt
);
3964 strcpy (buf
, load_op
);
3966 /* Add instruction 'not'. */
3968 sprintf (buf
, "%s\n\tnot\t%s, %s\t// %d 0x%x", load_op
, dst_fmt
,
3969 dst_fmt
, ivalue
, uvalue
);
3971 /* Add instruction 'addi'. */
3972 case IC_APPEND_ADDI
:
3973 sprintf (buf
, "%s\n\taddi\t%s, %s, %%2\t// %d 0x%x", load_op
,
3974 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
3976 /* Add instruction 'subi'. */
3977 case IC_APPEND_SUBI
:
3978 sprintf (buf
, "%s\n\tsubi\t%s, %s, %%2\t// %d 0x%x", load_op
,
3979 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
3981 /* Add instruction 'addi', the last instruction is bgeni. */
3983 sprintf (buf
, "%s\n\taddi\t%s, %s, %%2\t// %d 0x%x", load_op
,
3984 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
3986 /* Add instruction 'subi', the last instruction is bgeni. */
3988 sprintf (buf
, "%s\n\tsubi\t%s, %s, %%2\t// %d 0x%x", load_op
,
3989 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
3991 /* Add instruction 'bseti'. */
3992 case IC_APPEND_BSETI
:
3993 sprintf (buf
, "%s\n\tbseti\t%s, %s, %%P2\t// %d 0x%x", load_op
,
3994 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
3996 /* Add instruction 'movi'. */
3997 case IC_APPEND_MOVI
:
3998 sprintf (buf
, "%s\n\tmovi\t%s, %%2\t// %d 0x%x", load_op
, dst_fmt
,
4001 /* Add instruction 'bclri'. */
4002 case IC_APPEND_BCLRI
:
4003 sprintf (buf
, "%s\n\tbclri\t%s, %s, %%Q2\t// %d 0x%x", load_op
,
4004 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
4006 /* Add instruction 'rotli'. */
4007 case IC_APPEND_ROTLI
:
4008 sprintf (buf
, "%s\n\trotli\t%s, %s, %%2\t// %d 0x%x", load_op
,
4009 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
4011 /* Add instruction 'lsli'. */
4012 case IC_APPEND_LSLI
:
4013 sprintf (buf
, "%s\n\tlsli\t%s, %s, %%2\t// %d 0x%x", load_op
,
4014 dst_fmt
, dst_fmt
, ivalue
, uvalue
);
4016 /* Add instruction 'ixh'. */
4018 sprintf (buf
, "%s\n\tixh\t%s, %s, %s\t// %d 0x%x", load_op
,
4019 dst_fmt
, dst_fmt
, dst_fmt
, ivalue
, uvalue
);
4021 /* Add instruction 'ixw'. */
4023 sprintf (buf
, "%s\n\tixw\t%s, %s, %s\t// %d 0x%x", load_op
,
4024 dst_fmt
, dst_fmt
, dst_fmt
, ivalue
, uvalue
);
4030 output_asm_insn (buf
, out_operands
);
4035 /* This is a helper function for the Uo constraint for movsi patterns. */
4038 csky_inlinable_constant (HOST_WIDE_INT value
)
4041 return (!(CSKY_TARGET_ARCH (CK802
) || CSKY_TARGET_ARCH (CK801
))
4042 && try_csky_constant_tricks (value
, &x
, &y
));
4046 /* Return true if the constant VAL can be expressed by an 8-bit constant
4047 with a shift value, filling in *BASE and *SHIFT. */
4050 csky_shifted_imm8_constant (unsigned HOST_WIDE_INT val
,
4051 unsigned int *base
, unsigned int *shift
)
4053 unsigned HOST_WIDE_INT mask
= 0xff;
4055 val
= val
& (unsigned HOST_WIDE_INT
) 0xffffffffu
;
4059 for (i
= 0; i
< 25; i
++)
4060 if ((val
& (mask
<< i
)) == val
)
4063 *base
= (unsigned int) (val
>> i
);
4065 *shift
= (unsigned int) i
;
4073 /* Output a move of a word or less value. */
4076 csky_output_move (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
4077 machine_mode mode ATTRIBUTE_UNUSED
)
4079 rtx dst
= operands
[0];
4080 rtx src
= operands
[1];
4081 struct csky_address op0
, op1
;
4085 /* The situation mov reg to reg. */
4088 int dstreg
= REGNO (dst
);
4089 int srcreg
= REGNO (src
);
4091 /* hilo registers exchange their places,
4092 and their order of Dimode as same as other
4093 general registers in LITTLE_ENDIAN mode. */
4094 if (TARGET_BIG_ENDIAN
)
4096 if (dstreg
== CSKY_HI_REGNUM
)
4098 else if (dstreg
== CSKY_LO_REGNUM
)
4100 else if (srcreg
== CSKY_HI_REGNUM
)
4102 else if (srcreg
== CSKY_LO_REGNUM
)
4107 if (dstreg
== CSKY_HI_REGNUM
)
4109 else if (dstreg
== CSKY_LO_REGNUM
)
4111 else if (srcreg
== CSKY_HI_REGNUM
)
4113 else if (srcreg
== CSKY_LO_REGNUM
)
4117 if (CSKY_VREG_P (dstreg
) && CSKY_VREG_P (srcreg
))
4119 if (CSKY_ISA_FEATURE (fpv2_sf
))
4120 return "fmovs\t%0, %1";
4121 else if (CSKY_ISA_FEATURE (fpv3_sf
))
4122 return "fmov.32\t%0, %1";
4126 if (CSKY_VREG_P (dstreg
))
4128 if (CSKY_ISA_FEATURE (fpv2_sf
))
4129 return "fmtvrl\t%0, %1";
4130 else if (CSKY_ISA_FEATURE (fpv3_sf
))
4131 return "fmtvr.32.1\t%0, %1";
4135 if (CSKY_VREG_P (srcreg
))
4137 if (CSKY_ISA_FEATURE (fpv2_sf
))
4138 return "fmfvrl\t%0, %1";
4139 else if (CSKY_ISA_FEATURE (fpv3_sf
))
4140 return "fmfvr.32.1\t%0, %1";
4144 if (REGNO (src
) == CSKY_CC_REGNUM
)
4147 return "mov\t%0, %1";
4149 /* The situation mov memory to reg. */
4150 else if (GET_CODE (src
) == MEM
)
4152 decompose_csky_address (XEXP (src
, 0), &op1
);
4155 switch (GET_MODE (src
))
4159 return "ldr.h\t%0, %1";
4161 return "ldr.b\t%0, %1";
4164 if (CSKY_VREG_P (REGNO (dst
)))
4166 if (CSKY_ISA_FEATURE(fpv2_sf
))
4167 return "fldrs\t%0, %1";
4168 else if (CSKY_ISA_FEATURE(fpv3_sf
))
4169 return "fldr.32\t%0, %1";
4174 return "ldr.w\t%0, %1";
4178 /* Generate lrw rx, [LABEL]. This happens when the compiler
4179 generates constant pool references and uses lrw to get the
4180 constant into memory. */
4182 return "lrw\t%0, %1";
4183 /* Generate lrs.w rx, [symbol@GOT/PLT]. */
4184 else if (flag_pic
== 1 && op1
.disp
&& GET_CODE (op1
.disp
) == UNSPEC
)
4185 return "lrs.w\t%0, %1";
4187 switch (GET_MODE (src
))
4191 return "ld.h\t%0, %1";
4193 return "ld.b\t%0, %1";
4196 if (CSKY_VREG_P (REGNO (dst
)))
4198 if (CSKY_ISA_FEATURE(fpv2_sf
))
4199 return "flds\t%0, %1";
4200 else if (CSKY_ISA_FEATURE(fpv3_sf
))
4201 return "fld.32\t%0, %1";
4206 return "ld.w\t%0, %1";
4211 /* The situation mov integer to reg. */
4212 else if (GET_CODE (src
) == CONST_INT
||
4213 (GET_CODE (src
) == CONST_DOUBLE
&& GET_MODE (src
) == SFmode
))
4216 const REAL_VALUE_TYPE
*d
;
4219 if (GET_CODE (src
) == CONST_DOUBLE
&& GET_MODE (src
) == SFmode
)
4221 d
= CONST_DOUBLE_REAL_VALUE (src
);
4222 REAL_VALUE_TO_TARGET_SINGLE (*d
, l
);
4223 operands
[1] = GEN_INT (l
);
4227 if (try_csky_constant_tricks (INTVAL (src
), &x
, &y
))
4228 return csky_output_inline_const (SImode
, operands
);
4229 /* Return '#' to split it. */
4230 else if (CSKY_CONST_OK_FOR_T (INTVAL (src
)))
4233 return "lrw\t%0, %x1\t";
4235 else if (TARGET_ANCHOR
&& GET_CODE (src
) == SYMBOL_REF
)
4237 if (SYMBOL_REF_FUNCTION_P (src
))
4238 return "lrw\t%0, %1@BTEXT";
4240 return "lrw\t%0, %1@BDATA";
4242 else if (GET_CODE (src
) == UNSPEC
4243 && XINT (src
, 1) == UNSPEC_PIC_SYMBOL_GRS
)
4244 return "grs\t%0, %1";
4246 return "lrw\t%0, %1";
4248 else if (GET_CODE (dst
) == MEM
)
4250 decompose_csky_address (XEXP (dst
, 0), &op0
);
4253 switch (GET_MODE (src
))
4256 return "str.h\t%1, %0";
4258 return "str.b\t%1, %0";
4261 if (CSKY_VREG_P (REGNO (src
)))
4263 if (CSKY_ISA_FEATURE(fpv2_sf
))
4264 return "fstrs\t%1, %0";
4265 else if (CSKY_ISA_FEATURE(fpv3_sf
))
4266 return "fstr.32\t%1, %0";
4271 return "str.w\t%1, %0";
4276 switch (GET_MODE (dst
))
4279 return "st.h\t%1, %0";
4281 return "st.b\t%1, %0";
4284 if (CSKY_VREG_P (REGNO (src
)))
4286 if (CSKY_ISA_FEATURE(fpv2_sf
))
4287 return "fsts\t%1, %0";
4288 else if (CSKY_ISA_FEATURE(fpv3_sf
))
4289 return "fst.32\t%1, %0";
4294 return "st.w\t%1, %0";
4304 /* Output a move of a word or less value. Specific for ck801. */
4307 csky_output_ck801_move (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
4308 machine_mode mode ATTRIBUTE_UNUSED
)
4310 rtx dst
= operands
[0];
4311 rtx src
= operands
[1];
4312 struct csky_address op1
;
4317 return "mov\t%0, %1";
4318 else if (GET_CODE (src
) == MEM
)
4320 decompose_csky_address (XEXP (src
, 0), &op1
);
4322 /* Generate lrw rx, [LABEL]. This happens when the compiler
4323 generates constant pool references and uses lrw to get the
4324 constant in memory. */
4326 return "lrw\t%0, %1";
4328 switch (GET_MODE (src
))
4331 return "ld.h\t%0, %1";
4333 return "ld.b\t%0, %1";
4336 return "ld.w\t%0, %1";
4341 else if (GET_CODE (src
) == CONST_INT
)
4343 if (REGNO (dst
) > 7)
4344 return "lrw\t%0, %x1\t";
4345 else if (CSKY_CONST_OK_FOR_N (INTVAL (src
) + 1))
4346 return "movi\t%0, %1";
4347 /* Return '#' to split it. */
4348 else if (CSKY_CONST_OK_FOR_T (INTVAL (src
)))
4350 else if (csky_shifted_imm8_constant (INTVAL (src
), NULL
, NULL
))
4353 return "lrw\t%0, %x1\t";
4355 else if (GET_CODE (src
) == CONST_DOUBLE
&& GET_MODE (src
) == SFmode
)
4357 const REAL_VALUE_TYPE
*d
;
4360 d
= CONST_DOUBLE_REAL_VALUE (src
);
4361 REAL_VALUE_TO_TARGET_SINGLE (*d
, l
);
4362 operands
[1] = GEN_INT (l
);
4365 if (CSKY_CONST_OK_FOR_N (INTVAL (src
) + 1))
4366 return "movi\t%0, %1";
4368 return "lrw\t%0, %x1\t";
4370 else if (TARGET_ANCHOR
&& GET_CODE (src
) == SYMBOL_REF
)
4372 if (SYMBOL_REF_FUNCTION_P (src
))
4373 return "lrw\t%0, %1@BTEXT";
4375 return "lrw\t%0, %1@BDATA";
4378 return "lrw\t%0, %1";
4380 else if (GET_CODE (dst
) == MEM
)
4381 switch (GET_MODE (dst
))
4384 return "st.h\t%1, %0";
4386 return "st.b\t%1, %0";
4389 return "st.w\t%1, %0";
4398 /* Return a sequence of instructions to perform DI or DF move.
4399 Since the CSKY cannot move a DI or DF in one instruction, we have
4400 to take care when we see overlapping source and dest registers. */
4403 csky_output_movedouble (rtx operands
[],
4404 machine_mode mode ATTRIBUTE_UNUSED
)
4406 rtx dst
= operands
[0];
4407 rtx src
= operands
[1];
4413 int dstreg
= REGNO (dst
);
4414 int srcreg
= REGNO (src
);
4416 if (CSKY_HILO_REG_P (srcreg
))
4418 if (TARGET_BIG_ENDIAN
)
4419 return "mfhi\t%0\n\tmflo\t%R0";
4421 return "mfhi\t%R0\n\tmflo\t%0";
4423 else if (CSKY_HILO_REG_P (dstreg
))
4425 if (TARGET_BIG_ENDIAN
)
4426 return "mthi\t%1\n\tmtlo\t%R1";
4428 return "mthi\t%R1\n\tmtlo\t%1";
4430 else if (CSKY_VREG_P (srcreg
) && CSKY_VREG_P (dstreg
))
4432 if (CSKY_ISA_FEATURE(fpv2_df
))
4433 return "fmovd\t%0, %1";
4434 else if (CSKY_ISA_FEATURE(fpv3_df
))
4435 return "fmov.64\t%0, %1";
4439 else if (CSKY_VREG_P (srcreg
))
4441 /* Since the vector registers in fpuv2_soft processors
4442 like ck803f are 32 bits wide, just one insn is needed
4443 to complete the move operation. */
4444 if (TARGET_SOFT_FPU
)
4445 return "fmfvrl\t%0, %1";
4446 else if (TARGET_BIG_ENDIAN
)
4448 if (CSKY_ISA_FEATURE(fpv2_df
))
4449 return "fmfvrh\t%0, %1\n\tfmfvrl\t%R0, %1";
4450 else if (CSKY_ISA_FEATURE(fpv3_df
))
4451 return "fmfvr.64\t%R0, %0, %1";
4457 if (CSKY_ISA_FEATURE(fpv2_df
))
4458 return "fmfvrh\t%R0, %1\n\tfmfvrl\t%0, %1";
4459 else if (CSKY_ISA_FEATURE(fpv3_df
))
4460 return "fmfvr.64\t%0, %R0, %1";
4465 else if (CSKY_VREG_P (dstreg
))
4467 if (TARGET_SOFT_FPU
)
4468 return "fmtvrl\t%0, %1";
4469 else if (TARGET_BIG_ENDIAN
)
4471 if (CSKY_ISA_FEATURE(fpv2_df
))
4472 return "fmtvrh\t%0, %1\n\tfmtvrl\t%0, %R1";
4473 else if (CSKY_ISA_FEATURE(fpv3_df
))
4474 return "fmtvr.64\t%0, %R1, %1";
4480 if (CSKY_ISA_FEATURE(fpv2_df
))
4481 return "fmtvrh\t%0, %R1\n\tfmtvrl\t%0, %1";
4482 else if (CSKY_ISA_FEATURE(fpv3_df
))
4483 return "fmtvr.64\t%0, %1, %R1";
4489 /* Ensure the second source not overwritten. */
4490 if (srcreg
+ 1 == dstreg
)
4491 return "mov\t%R0, %R1\n\tmov\t%0, %1";
4493 return "mov\t%0, %1\n\tmov\t%R0, %R1";
4495 else if (GET_CODE (src
) == MEM
)
4497 rtx memexp
= XEXP (src
, 0);
4498 int dstreg
= REGNO (dst
);
4500 struct csky_address op0
;
4502 decompose_csky_address (XEXP (src
, 0), &op0
);
4504 if (GET_CODE (memexp
) == LABEL_REF
4505 || (GET_CODE (memexp
) == CONST
4506 && GET_CODE (XEXP (memexp
, 0)) == PLUS
4507 && GET_CODE (XEXP (XEXP (memexp
, 0), 0)) == LABEL_REF
))
4508 return "lrw\t%0, [%1]\n\tlrw\t%R0, [%R1]";
4509 else if (GET_CODE (memexp
) == REG
)
4510 basereg
= REGNO (memexp
);
4511 else if (GET_CODE (memexp
) == PLUS
)
4513 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
4514 basereg
= REGNO (XEXP (memexp
, 0));
4515 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
4516 basereg
= REGNO (XEXP (memexp
, 1));
4525 if (CSKY_VREG_P (dstreg
))
4529 if (CSKY_ISA_FEATURE(fpv2_df
))
4530 return "fldrd\t%0, %1";
4531 else if (CSKY_ISA_FEATURE(fpv3_df
))
4532 return "fldr.64\t%0, %1";
4538 if (CSKY_ISA_FEATURE(fpv2_df
))
4539 return "fldd\t%0, %1";
4540 else if (CSKY_ISA_FEATURE(fpv3_df
))
4541 return "fld.64\t%0, %1";
4546 /* FIXME length attribute is wrong here. */
4547 if (dstreg
== basereg
)
4548 /* Just load them in reverse order. */
4549 return "ld.w\t%R0, %R1\n\tld.w\t%0, %1";
4551 return "ld.w\t%0, %1\n\tld.w\t%R0, %R1";
4553 else if (GET_CODE (src
) == CONST_INT
|| GET_CODE (src
) == CONST_DOUBLE
)
4555 split_double (src
, operands
+ 2, operands
+ 3);
4557 if (CSKY_CONST_OK_FOR_I (INTVAL (operands
[2])))
4558 output_asm_insn ("movi\t%0, %2", operands
);
4559 else if (CSKY_CONST_OK_FOR_Uc (INTVAL (operands
[2])))
4560 output_asm_insn ("bmaski\t%0, %N2", operands
);
4561 else if (CSKY_CONST_OK_FOR_Ub (INTVAL (operands
[2])))
4562 output_asm_insn ("bgeni\t%0, %P2", operands
);
4564 output_asm_insn ("lrw\t%0, %2", operands
);
4566 if (CSKY_CONST_OK_FOR_I (INTVAL (operands
[3])))
4567 output_asm_insn ("movi\t%R0, %3", operands
);
4568 else if (CSKY_CONST_OK_FOR_Uc (INTVAL (operands
[3])))
4569 output_asm_insn ("bmaski\t%R0, %N3", operands
);
4571 else if (CSKY_CONST_OK_FOR_Ub (INTVAL (operands
[3])))
4572 output_asm_insn ("bgeni\t%R0, %P3", operands
);
4574 output_asm_insn ("lrw\t%R0, %3", operands
);
4581 else if (GET_CODE (dst
) == MEM
&& GET_CODE (src
) == REG
)
4583 rtx memexp
= XEXP (dst
, 0);
4584 int srcreg
= REGNO (src
);
4586 struct csky_address op0
;
4588 decompose_csky_address (XEXP (dst
, 0), &op0
);
4590 if (GET_CODE (memexp
) == REG
)
4591 basereg
= REGNO (memexp
);
4592 else if (GET_CODE (memexp
) == PLUS
)
4594 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
4595 basereg
= REGNO (XEXP (memexp
, 0));
4596 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
4597 basereg
= REGNO (XEXP (memexp
, 1));
4605 if (CSKY_VREG_P (srcreg
))
4609 if (CSKY_ISA_FEATURE(fpv2_df
))
4610 return "fstrd\t%1, %0";
4611 else if (CSKY_ISA_FEATURE(fpv3_df
))
4612 return "fstr.64\t%1, %0";
4618 if (CSKY_ISA_FEATURE(fpv2_df
))
4619 return "fstd\t%1, %0";
4620 else if (CSKY_ISA_FEATURE(fpv3_df
))
4621 return "fst.64\t%1, %0";
4626 /* FIXME length attribute is wrong here. */
4627 if (srcreg
== basereg
)
4628 /* Just load them in reverse order. */
4629 return "st.w\t%R1, %R0\n\tst.w\t%1, %0";
4631 return "st.w\t%1, %0\n\tst.w\t%R1, %R0";
4639 csky_output_ck801_movedouble (rtx operands
[],
4640 machine_mode mode ATTRIBUTE_UNUSED
)
4642 rtx dst
= operands
[0];
4643 rtx src
= operands
[1];
4649 int dstreg
= REGNO (dst
);
4650 int srcreg
= REGNO (src
);
4652 /* Ensure the second source not overwritten. */
4653 if (srcreg
+ 1 == dstreg
)
4654 return "mov\t%R0, %R1\n\tmov\t%0, %1";
4656 return "mov\t%0, %1\n\tmov\t%R0, %R1";
4658 else if (GET_CODE (src
) == MEM
)
4660 rtx memexp
= XEXP (src
, 0);
4661 int dstreg
= REGNO (dst
);
4663 struct csky_address op0
;
4665 decompose_csky_address (XEXP (src
, 0), &op0
);
4667 if (GET_CODE (memexp
) == LABEL_REF
4668 || (GET_CODE (memexp
) == CONST
4669 && GET_CODE (XEXP (memexp
, 0)) == PLUS
4670 && GET_CODE (XEXP (XEXP (memexp
, 0), 0)) == LABEL_REF
))
4671 return "lrw\t%0, [%1]\n\tlrw\t%R0, [%R1]";
4672 else if (GET_CODE (memexp
) == REG
)
4673 basereg
= REGNO (memexp
);
4674 else if (GET_CODE (memexp
) == PLUS
)
4676 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
4677 basereg
= REGNO (XEXP (memexp
, 0));
4678 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
4679 basereg
= REGNO (XEXP (memexp
, 1));
4686 /* FIXME length attribute is wrong here. */
4687 if (dstreg
== basereg
)
4688 /* Just load them in reverse order. */
4689 return "ld.w\t%R0, %R1\n\tld.w\t%0, %1";
4691 return "ld.w\t%0, %1\n\tld.w\t%R0, %R1";
4693 else if (GET_CODE (src
) == CONST_INT
|| GET_CODE (src
) == CONST_DOUBLE
)
4695 split_double (src
, operands
+ 2, operands
+ 3);
4697 if (REGNO (dst
) <= 7
4698 && CSKY_CONST_OK_FOR_N (INTVAL (operands
[2]) + 1))
4699 output_asm_insn ("movi\t%0, %2", operands
);
4701 output_asm_insn ("lrw\t%0, %2", operands
);
4704 if (REGNO (dst
) <= 6
4705 && CSKY_CONST_OK_FOR_N (INTVAL (operands
[3]) + 1))
4706 output_asm_insn ("movi\t%R0, %3", operands
);
4708 output_asm_insn ("lrw\t%R0, %3", operands
);
4717 else if (GET_CODE (dst
) == MEM
&& GET_CODE (src
) == REG
)
4719 rtx memexp
= XEXP (dst
, 0);
4720 int srcreg
= REGNO (src
);
4722 struct csky_address op0
;
4724 decompose_csky_address (XEXP (dst
, 0), &op0
);
4726 if (GET_CODE (memexp
) == REG
)
4727 basereg
= REGNO (memexp
);
4728 else if (GET_CODE (memexp
) == PLUS
)
4730 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
4731 basereg
= REGNO (XEXP (memexp
, 0));
4732 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
4733 basereg
= REGNO (XEXP (memexp
, 1));
4740 /* FIXME length attribute is wrong here. */
4741 if (srcreg
== basereg
)
4742 /* Just load them in reverse order. */
4743 return "st.w\t%R1, %R0\n\tst.w\t%1, %0";
4745 return "st.w\t%1, %0\n\tst.w\t%R1, %R0";
4751 /* Calculate the instruction's length for moving double-word data. */
4754 csky_get_movedouble_length(rtx operands
[])
4756 rtx dst
= operands
[0];
4757 rtx src
= operands
[1];
4763 int dstreg
= REGNO (dst
);
4764 int srcreg
= REGNO (src
);
4766 if (CSKY_VREG_P (srcreg
) && CSKY_VREG_P (dstreg
))
4771 else if (GET_CODE (src
) == MEM
)
4773 rtx memexp
= XEXP (src
, 0);
4774 int dstreg
= REGNO (dst
);
4775 struct csky_address op0
;
4776 decompose_csky_address (XEXP (src
, 0), &op0
);
4778 if (GET_CODE (memexp
) == LABEL_REF
)
4780 if (CSKY_VREG_P (dstreg
))
4784 else if (GET_CODE (src
) == CONST_INT
|| GET_CODE (src
) == CONST_DOUBLE
)
4786 split_double (src
, operands
+ 2, operands
+ 3);
4787 if (CSKY_CONST_OK_FOR_N (INTVAL (operands
[2]) + 1)
4788 && CSKY_CONST_OK_FOR_N (INTVAL (operands
[3]) + 1)
4789 && REGNO (operands
[0]) < 6)
4795 else if (GET_CODE (dst
) == MEM
&& GET_CODE (src
) == REG
)
4797 rtx memexp
= XEXP (dst
, 0);
4798 int srcreg
= REGNO (src
);
4800 if (CSKY_VREG_P (srcreg
))
4803 if (GET_CODE (memexp
) == REG
)
4805 else if (GET_CODE (memexp
) == PLUS
)
4807 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
4808 offset
= INTVAL (XEXP (memexp
, 1));
4809 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
4810 offset
= INTVAL (XEXP (memexp
, 0));
4817 if (srcreg
<= 6 && offset
<= 1020)
4819 else if ((srcreg
== 7 && offset
<= 1024) || (srcreg
<= 7 && offset
== 1024))
4830 /* Output float point load/store instructions for fpuv3. */
4833 fpuv3_output_move (rtx
*operands
)
4835 rtx reg
, mem
, addr
, ops
[2];
4836 bool isload
= REG_P (operands
[0]);
4838 const char *templ
= "f%s%s.%s\t%%0, %%1";
4842 reg
= operands
[isload
? 0 : 1];
4843 mem
= operands
[isload
? 1 : 0];
4845 gcc_assert (REG_P (reg
));
4846 gcc_assert (CSKY_VREG_P (REGNO (reg
)));
4847 gcc_assert (MEM_P (mem
));
4849 mode
= GET_MODE (reg
);
4850 const char *type
= mode
== DFmode
? "64" :
4851 mode
== SFmode
? "32" :
4852 mode
== HFmode
? "16" :
4854 gcc_assert(type
!= NULL
);
4856 addr
= XEXP (mem
, 0);
4857 struct csky_address caddr
;
4858 decompose_csky_address (addr
, &caddr
);
4862 sprintf (buff
, templ
,
4863 isload
? "ld" : "st",
4864 caddr
.index
? "r" : "",
4866 output_asm_insn (buff
, ops
);
4871 /* Check if a const_double can be used by a VFP fmovi instruction. */
4874 fpuv3_const_double_rtx (rtx x
)
4876 REAL_VALUE_TYPE r
, m
;
4877 r
= *CONST_DOUBLE_REAL_VALUE (x
);
4879 /* Fpuv3 doesn't support the following values. */
4880 if (REAL_VALUE_ISINF (r
) || REAL_VALUE_ISNAN (r
) || REAL_VALUE_MINUS_ZERO (r
)
4881 || r
.cl
== rvc_zero
)
4884 /* Extract sign, exponent and mantissa. */
4886 r
= real_value_abs (&r
);
4887 exponent
= REAL_EXP (&r
);
4890 unsigned HOST_WIDE_INT mantissa
, mant_hi
;
4891 unsigned HOST_WIDE_INT mask
;
4892 int point_pos
= 2 * HOST_BITS_PER_WIDE_INT
- 1;
4893 real_ldexp (&m
, &r
, point_pos
- exponent
);
4894 wide_int w
= real_to_integer (&m
, &fail
, HOST_BITS_PER_WIDE_INT
* 2);
4895 mantissa
= w
.elt (0);
4896 mant_hi
= w
.elt (1);
4900 if (!IN_RANGE (exponent
, -4, 11))
4903 /* If there are bits set in the low part of the mantissa, these values are
4908 /* Now, make the mantissa contain the most-significant bits, and the
4909 point_pos indicates the number of these bits. */
4910 point_pos
-= HOST_BITS_PER_WIDE_INT
;
4913 /* We can only allow a mantissa of 9 significant digits, top of which is always 1. */
4914 mask
= ((unsigned HOST_WIDE_INT
)1 << (point_pos
- 9)) - 1;
4915 if ((mantissa
& mask
) != 0)
4922 /* Split operands for an AND expression when OPERANDS[2] is a constant.
4923 Note operands[0] is marked earlyclobber in this case and can be
4924 overwritten. Return true if "DONE", false otherwise. */
4927 csky_split_and (rtx
*operands
)
4929 HOST_WIDE_INT mask
= INTVAL (operands
[2]);
4930 rtx not_value
= GEN_INT (~mask
);
4933 /* All zeros or all ones can be handled by a move instruction. */
4936 emit_move_insn (operands
[0], const0_rtx
);
4941 emit_move_insn (operands
[0], operands
[1]);
4945 /* Check for constants that can be handled directly by the 32-bit andi
4947 if (CSKY_ISA_FEATURE (E2
) && csky_arith_O_operand (operands
[2], SImode
))
4950 /* Try to transform to andni instruction. */
4951 if (CSKY_ISA_FEATURE (E2
) && csky_arith_O_operand (not_value
, SImode
))
4953 emit_insn (gen_cskyv2_andnsi3 (operands
[0], not_value
, operands
[1]));
4957 /* If there are only one or two 0 bits in the constant, we can
4958 replace the operation with bclri instructions on those bits.
4959 Note CK801 has only the 16-bit bclri that operates on a single
4960 register, so we must count a move if we are post-reload. */
4961 if (popcount_hwi (~mask
& 0xffffffff)
4962 <= (reload_completed
&& !CSKY_ISA_FEATURE (E2
) ? 1 : 2))
4964 rtx input
= operands
[1];
4966 if (!CSKY_ISA_FEATURE (E2
))
4968 emit_move_insn (operands
[0], input
);
4969 input
= operands
[0];
4972 for (i
= 0; i
< 32; i
++)
4973 if ((mask
& (1 << i
)) == 0x0)
4975 emit_insn (gen_bclri (operands
[0], input
, GEN_INT (i
)));
4976 input
= operands
[0];
4981 /* If the constant mask is outside the [0, 4095] range for
4982 constraint O, or if constraint O is not allowed (ck801),
4983 maybe the constant is a contiguous bit range that we can
4984 handle by bit extract (low bits) or shifts (high bits). */
4985 for (i
= (CSKY_ISA_FEATURE (E2
) ? 13 : 1); i
< 32; i
++)
4987 if ((((HOST_WIDE_INT
) 1) << i
) - 1 == mask
)
4989 if (CSKY_ISA_FEATURE (2E3
))
4990 emit_insn (gen_cskyv2_extzv (operands
[0], operands
[1],
4991 GEN_INT (i
), const0_rtx
));
4994 rtx shift
= GEN_INT (32 - i
);
4995 rtx reg
= (reload_completed
4996 ? operands
[0] : gen_reg_rtx (SImode
));
4998 emit_insn (gen_ashlsi3 (reg
, operands
[1], shift
));
4999 emit_insn (gen_lshrsi3 (operands
[0], reg
, shift
));
5003 else if ((((HOST_WIDE_INT
) 1) << i
) - 1 == ~mask
)
5005 rtx shift
= GEN_INT (i
);
5006 rtx reg
= (reload_completed
5007 ? operands
[0] : gen_reg_rtx (SImode
));
5009 emit_insn (gen_lshrsi3 (reg
, operands
[1], shift
));
5010 emit_insn (gen_ashlsi3 (operands
[0], reg
, shift
));
5015 /* If the constant is a negative number, it seems better to use
5016 andn and copy the NOT_VALUE to a register instead of the
5017 original value, since the NOT_VALUE is always smaller and thus
5018 more likely to be representable as a small constant.
5019 This transformation can only be done before reload because
5020 it requires a temporary. Hopefully register allocation can get
5021 rid of the extra move required for CK801. */
5022 if (!reload_completed
&& INTVAL (operands
[2]) < 0)
5024 rtx reg
= copy_to_mode_reg (SImode
, not_value
);
5026 if (CSKY_ISA_FEATURE (E2
))
5027 emit_insn (gen_cskyv2_andnsi3 (operands
[0], reg
, operands
[1]));
5030 emit_move_insn (operands
[0], operands
[1]);
5031 emit_insn (gen_ck801_andnsi3 (operands
[0], reg
, operands
[0]));
5036 /* If the above ways are all not working, move the constant
5037 to a register. We can clobber operands[0] as it is
5038 marked earlyclobber in the insn constraints, but then we have to
5039 swap operands 1 and 2 to match the constraints on the 2-operand
5040 16-bit and instruction. */
5041 if (reload_completed
)
5043 emit_move_insn (operands
[0], operands
[2]);
5044 operands
[2] = operands
[1];
5045 operands
[1] = operands
[0];
5048 operands
[2] = copy_to_mode_reg (SImode
, operands
[2]);
5052 /* Split operands for an IOR expression when OPERANDS[2] is a constant.
5053 Note operands[0] is marked earlyclobber in this case and can be
5054 overwritten. Return true if "DONE", false otherwise. */
5057 csky_split_ior (rtx
*operands
)
5059 HOST_WIDE_INT mask
= INTVAL (operands
[2]);
5062 /* All zeros or all ones can be handled by a move instruction. */
5065 emit_move_insn (operands
[0], operands
[1]);
5070 emit_move_insn (operands
[0], gen_int_mode (-1, SImode
));
5074 /* Check for constants that can be handled directly by the 32-bit ori
5076 if (CSKY_ISA_FEATURE (E2
) && csky_literal_I_operand (operands
[2], SImode
))
5079 /* If there are only one or two 1 bits in the value, we can replace
5080 the operation with bseti instructions to set those bits.
5081 Note CK801 has only the 16-bit bclri that operates on a single
5082 register, so we must count a move if we are post-reload. */
5083 if (popcount_hwi (mask
& 0xffffffff)
5084 <= (reload_completed
&& !CSKY_ISA_FEATURE (E2
) ? 1 : 2))
5086 rtx input
= operands
[1];
5088 if (!CSKY_ISA_FEATURE (E2
))
5090 emit_move_insn (operands
[0], input
);
5091 input
= operands
[0];
5094 for (i
= 0; i
< 32; i
++)
5095 if (mask
& (1 << i
))
5097 emit_insn (gen_bseti (operands
[0], input
, GEN_INT (i
)));
5098 input
= operands
[0];
5103 /* If the above ways are all not working, move the constant
5104 to a register. We can clobber operands[0] as it is
5105 marked earlyclobber in the insn constraints, but then we have to
5106 swap operands 1 and 2 to match the constraints on the 2-operand
5107 16-bit ior instruction. */
5108 if (reload_completed
)
5110 emit_move_insn (operands
[0], operands
[2]);
5111 operands
[2] = operands
[1];
5112 operands
[1] = operands
[0];
5115 operands
[2] = copy_to_mode_reg (SImode
, operands
[2]);
5120 /* Split operands for an XOR expression when OPERANDS[2] is a constant.
5121 Note operands[0] is marked earlyclobber in this case and can be
5122 overwritten. Return true if "DONE", false otherwise. */
5125 csky_split_xor (rtx
*operands
)
5127 HOST_WIDE_INT mask
= INTVAL (operands
[2]);
5129 /* All zeros can be turned into move instruction. */
5132 emit_move_insn (operands
[0], operands
[1]);
5136 /* All ones can be turned into a bitwise not. */
5139 if (CSKY_ISA_FEATURE (E2
))
5140 emit_insn (gen_cskyv2_one_cmplsi2 (operands
[0], operands
[1]));
5143 emit_move_insn (operands
[0], operands
[1]);
5144 emit_insn (gen_ck801_one_cmplsi2 (operands
[0], operands
[0]));
5149 /* Check for constants that can be handled directly by the 32-bit xori
5151 if (CSKY_ISA_FEATURE (E2
) && csky_arith_O_operand (operands
[2], SImode
))
5154 /* If the above ways are all not working, move the constant
5155 to a register. We can clobber operands[0] as it is
5156 marked earlyclobber in the insn constraints, but then we have to
5157 swap operands 1 and 2 to match the constraints on the 2-operand
5158 16-bit ior instruction. */
5159 if (reload_completed
)
5161 emit_move_insn (operands
[0], operands
[2]);
5162 operands
[2] = operands
[1];
5163 operands
[1] = operands
[0];
5166 operands
[2] = copy_to_mode_reg (SImode
, operands
[2]);
5171 /* Return true if X is an address form involving a symbol or label ref. */
5174 csky_symbolic_address_p (rtx x
)
5176 switch (GET_CODE (x
))
5183 return ((GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
5184 || GET_CODE (XEXP (x
, 0)) == LABEL_REF
)
5185 && GET_CODE (XEXP (x
, 1)) == CONST_INT
);
5192 /* Emit a comparison instruction.
5193 Return true if an inverted comparison is generated. */
5196 csky_emit_compare (enum rtx_code code
, rtx op0
, rtx op1
)
5199 rtx cc_reg
= gen_rtx_REG (CCmode
, CSKY_CC_REGNUM
);
5201 if (GET_MODE_CLASS(GET_MODE (op0
)) == MODE_FLOAT
)
5202 return csky_emit_compare_float(code
, op0
, op1
);
5204 if (GET_CODE (op1
) == CONST_INT
)
5206 HOST_WIDE_INT val
= INTVAL (op1
);
5211 /* Unsigned (GTU 0) is the same as (NE 0); everything else is
5212 converted below to LEU (reversed cmphs). */
5215 /* Check whether (GTU A imm) can become (GEU A imm + 1). */
5216 else if (TARGET_MINI_REGISTERS
5217 ? CSKY_CONST_OK_FOR_J (val
+ 1)
5218 : CSKY_CONST_OK_FOR_Uk (val
+ 1))
5220 op1
= GEN_INT (val
+ 1);
5224 /* Check whether (LE A imm) can become (LT A imm + 1),
5225 or (GT A imm) can become (GE A imm + 1). */
5228 if (TARGET_MINI_REGISTERS
5229 ? CSKY_CONST_OK_FOR_J (val
+ 1)
5230 : CSKY_CONST_OK_FOR_Uk (val
+ 1))
5232 op1
= GEN_INT (val
+ 1);
5233 code
= code
== LE
? LT
: GE
;
5242 if (CONSTANT_P (op1
) && GET_CODE (op1
) != CONST_INT
)
5243 op1
= force_reg (GET_MODE (op1
), op1
);
5245 /* cmpnei: 0-31 (K immediate)
5246 ti: 1-32 (J immediate, 0 using btsti x,31). */
5250 /* Use inverted condition, cmpne. */
5255 /* Use normal condition, cmpne. */
5257 if (GET_CODE (op1
) == CONST_INT
5258 && (TARGET_MINI_REGISTERS
5259 ? !csky_literal_K_operand (op1
, SImode
)
5260 : !csky_literal_I_operand (op1
, SImode
)))
5261 op1
= force_reg (SImode
, op1
);
5264 /* Use inverted condition, reversed cmplt. */
5269 /* Use normal condition, reversed cmplt. */
5271 if (GET_CODE (op1
) == CONST_INT
)
5272 op1
= force_reg (SImode
, op1
);
5275 /* Use inverted condition, cmplt. */
5280 /* Use normal condition, cmplt. */
5282 /* covered by btsti x,31. */
5283 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0
5284 && (TARGET_MINI_REGISTERS
5285 ? !csky_literal_J_operand (op1
, SImode
)
5286 : !csky_literal_Uk_operand (op1
, SImode
)))
5287 op1
= force_reg (SImode
, op1
);
5290 /* Use inverted condition, cmple. */
5292 /* We coped with unsigned > 0 above. */
5293 gcc_assert (GET_CODE (op1
) != CONST_INT
|| INTVAL (op1
) != 0);
5297 /* Use normal condition, reversed cmphs. */
5299 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0)
5300 op1
= force_reg (SImode
, op1
);
5303 /* Use inverted condition, cmphs. */
5308 /* Use normal condition, cmphs. */
5310 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0
5311 && (TARGET_MINI_REGISTERS
5312 ? !csky_literal_J_operand (op1
, SImode
)
5313 : !csky_literal_Uk_operand (op1
, SImode
)))
5314 op1
= force_reg (SImode
, op1
);
5321 emit_insn (gen_rtx_SET (cc_reg
,
5322 gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
)));
5326 /* Return true if push/pop can be used to save/restore all the registers
5327 indicated by MASK. We currently don't attempt to handle situations where
5328 some of the registers could be handled by push/pop and others saved and
5329 restored individually. */
5332 csky_can_use_pushpop (unsigned int mask
)
5337 if (!TARGET_PUSHPOP
)
5343 /* Regs 0-3, 12-14, 18-27, 29-31 cannot be in the mask. */
5344 if (mask
& 0xeffc700f)
5347 /* Regs in the range r4-r11 must be contiguous. */
5348 for (end_reg
= 0, i
= 11; i
>= 4; i
--)
5350 if (!end_reg
&& (mask
& (1 << i
)))
5352 if (end_reg
&& !(mask
& (1 << i
)))
5356 /* Likewise for regs in the range r16-r17. */
5357 for (end_reg
= 0, i
= 17; i
>= 16; i
--)
5359 if (!end_reg
&& (mask
& (1 << i
)))
5361 if (end_reg
&& !(mask
& (1 << i
)))
5369 /* Return true if store/load multiple instructions can be used to
5370 save/restore at least some of the registers indicated by MASK.
5371 Unlike the push/pop case, this does handle partial ranges.
5372 Set *BR and *ER to the beginning and end (respectively) of the
5373 register range that can be handled. */
5376 csky_can_use_ldstm (int mask
, int *br
, int *er
)
5379 int begin_reg
= 0, end_reg
= 0;
5382 if (!TARGET_MULTIPLE_STLD
)
5385 /* We'll only handle registers in the range 4-11, the contiguous range
5386 of caller-saved registers. Higher-numbered registers are handled
5387 individually in addition to this, but we'll give up on doing ldstm
5388 entirely if we need to save/restore the low-numbered EH registers. */
5392 for (regno
= 4; regno
<= 11; regno
++)
5394 if (mask
& 1 << regno
)
5405 if (count
>= CSKY_MIN_MULTIPLE_STLD
&& count
<= CSKY_MAX_MULTIPLE_STLD
)
5418 csky_output_return_instruction (void)
5420 unsigned long func_type
= get_csky_current_func_type ();
5422 if (CSKY_FUNCTION_IS_NAKED (func_type
))
5424 if (CSKY_FUNCTION_IS_INTERRUPT (func_type
))
5425 return "ipop\n\tnir\n";
5431 /* Adjust the stack pointer by OFFSET bytes. OFFSET is negative if this
5432 is in the prologue, positive if in the epilogue. This may require
5433 multiple instructions and/or use of CSKY_STACKADJUST_REGNUM as
5434 a scratch register. Emit CFA notes as appropriate. */
5436 expand_csky_stack_adjust (int offset
)
5440 int size
= (offset
> 0 ? offset
: -offset
);
5445 /* If OFFSET is too large for addi/subi, load it into
5446 CSKY_STACKADJUST_REGNUM and use a register add/sub instead.
5447 This case is not mentioned in the ABI documentation, but it is
5448 supported by GDB prologue analysis provided that the instruction(s)
5449 to initialize CSKY_STACKADJUST_REGNUM appear directly before
5450 the sub. Depending on the value of OFFSET, this might be a
5451 lrw instruction or the "tricks" used by csky_output_inline_const to
5452 encode special-case integer constants. */
5453 if (size
> CSKY_MAX_SP_ADJUST
* 2)
5457 /* We should have reserved the scratch register already in
5458 csky_layout_stack_frame. */
5459 gcc_assert (cfun
->machine
->reg_size
!= 0
5460 && (cfun
->machine
->reg_mask
5461 & (1 << CSKY_STACKADJUST_REGNUM
)));
5463 /* Prevent the optimizer from reordering these instructions to
5465 if (!flag_sched_prolog
)
5466 emit_insn (gen_blockage ());
5468 tmp
= gen_rtx_REG (SImode
, CSKY_STACKADJUST_REGNUM
);
5469 emit_move_insn (tmp
, GEN_INT (size
));
5472 set
= gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, tmp
);
5474 set
= gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, tmp
);
5475 insn
= emit_insn (set
);
5476 RTX_FRAME_RELATED_P (insn
) = 1;
5477 dwarf
= gen_rtx_SET (stack_pointer_rtx
,
5478 plus_constant (Pmode
, stack_pointer_rtx
, offset
));
5479 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, dwarf
);
5481 /* More make GDB happy. */
5482 if (!flag_sched_prolog
)
5483 emit_insn (gen_blockage ());
5486 /* Use one or two addi or subi insns to adjust stack. */
5490 int delta
= (size
> CSKY_MAX_SP_ADJUST
5491 ? CSKY_MAX_SP_ADJUST
: size
);
5494 set
= gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5497 set
= gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5499 insn
= emit_insn (set
);
5500 RTX_FRAME_RELATED_P (insn
) = 1;
5506 /* Generate and emit an insn that we will recognize as a push_multi.
5507 Unfortunately, since this insn does not reflect very well the actual
5508 semantics of the operation, we need to annotate the insn for the benefit
5509 of DWARF2 frame unwind information. DWARF_REGS_MASK is a subset of
5510 MASK for registers that should be annotated for DWARF2 frame unwind
5514 emit_csky_regs_push (unsigned long mask
)
5521 int dwarf_par_index
;
5523 for (i
= 0; i
< CSKY_NGPR_REGS
; i
++)
5525 if (mask
& (1 << i
))
5529 /* The reg range for push is:r4-r11,r15-r17,r28. */
5530 gcc_assert (num_regs
&& num_regs
<= 12);
5532 /* For the body of the insn we are going to generate an UNSPEC in
5533 parallel with several USEs. This allows the insn to be recognized
5534 by the push_multi pattern in the csky.md file.
5536 The body of the insn looks something like this:
5539 (set (mem:BLK (pre_modify:SI (reg:SI sp)
5540 (const_int:SI <num>)))
5541 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSHPOP_MULT))
5547 For the frame note however, we try to be more explicit and actually
5548 show each register being stored into the stack frame, plus a (single)
5549 decrement of the stack pointer. We do it this way in order to be
5550 friendly to the stack unwinding code, which only wants to see a single
5551 stack decrement per instruction. The RTL we generate for the note looks
5552 something like this:
5555 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
5556 (set (mem:SI (reg:SI sp)) (reg:SI r4))
5557 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI XX))
5558 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI YY))
5562 FIXME:: In an ideal world the PRE_MODIFY would not exist and
5563 instead we'd have a parallel expression detailing all
5564 the stores to the various memory addresses so that debug
5565 information is more up-to-date. Remember however while writing
5566 this to take care of the constraints with the push instruction.
5568 Note also that this has to be taken care of for the VFP registers.
5570 For more see PR43399. */
5572 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (num_regs
));
5573 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (num_regs
+ 1));
5574 dwarf_par_index
= 1;
5576 for (i
= 0; i
< CSKY_NGPR_REGS
; i
++)
5577 if (mask
& (1 << i
))
5579 rtx reg
= gen_rtx_REG (SImode
, i
);
5580 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
, -4 * num_regs
);
5581 tmp
= gen_frame_mem (BLKmode
,
5582 gen_rtx_PRE_MODIFY (Pmode
,
5583 stack_pointer_rtx
, addr
));
5586 gen_rtx_UNSPEC (BLKmode
,
5588 UNSPEC_PUSHPOP_MULT
));
5589 tmp
= gen_rtx_SET (gen_frame_mem (SImode
, stack_pointer_rtx
),
5591 RTX_FRAME_RELATED_P (tmp
) = 1;
5592 XVECEXP (dwarf
, 0, dwarf_par_index
++) = tmp
;
5597 for (j
= 1, i
++; j
< num_regs
; i
++)
5598 if (mask
& (1 << i
))
5600 rtx reg
= gen_rtx_REG (SImode
, i
);
5601 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
, 4 * j
);
5602 tmp
= gen_rtx_SET (gen_frame_mem (SImode
, addr
), reg
);
5603 RTX_FRAME_RELATED_P (tmp
) = 1;
5604 XVECEXP (par
, 0, j
) = gen_rtx_USE (VOIDmode
, reg
);
5605 XVECEXP (dwarf
, 0, dwarf_par_index
++) = tmp
;
5609 par
= emit_insn (par
);
5611 tmp
= gen_rtx_SET (stack_pointer_rtx
,
5612 plus_constant (Pmode
, stack_pointer_rtx
, -4 * num_regs
));
5613 RTX_FRAME_RELATED_P (tmp
) = 1;
5614 XVECEXP (dwarf
, 0, 0) = tmp
;
5616 add_reg_note (par
, REG_FRAME_RELATED_EXPR
, dwarf
);
5617 RTX_FRAME_RELATED_P (par
) = 1;
5623 /* Generate and emit an insn pattern that we will recognize as a pop_multi.
5624 SAVED_REGS_MASK shows which registers need to be restored.
5626 Unfortunately, since this insn does not reflect very well the actual
5627 semantics of the operation, we need to annotate the insn for the benefit
5628 of DWARF2 frame unwind information. */
5631 emit_csky_regs_pop (unsigned long mask
)
5637 for (i
= 0; i
< CSKY_NGPR_REGS
; i
++)
5638 if (mask
& (1 << i
))
5641 /* The reg range for push is:r4-r11,r15-r17,r28. */
5642 gcc_assert (num_regs
&& num_regs
<= 12);
5644 /* The first element is (return),
5645 the second element is
5646 (set (reg:SI 'first reg number')
5647 (unspec:SI [(mem)] UNSPEC_PUSHPOP_MULT),
5648 the rest elements is (use (reg:SI 'rest reg number')),
5649 so the length should be number of register to be poped
5651 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (num_regs
+ 1));
5653 XVECEXP (par
, 0, 0) = ret_rtx
;
5655 for (i
= 0; i
< CSKY_NGPR_REGS
; i
++)
5656 if (mask
& (1 << i
))
5658 rtx reg
= gen_rtx_REG (SImode
, i
);
5659 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
, 4 * num_regs
);
5660 rtx tmp
= gen_frame_mem (SImode
,
5661 gen_rtx_POST_MODIFY (Pmode
,
5662 stack_pointer_rtx
, addr
));
5665 gen_rtx_UNSPEC (SImode
,
5667 UNSPEC_PUSHPOP_MULT
));
5671 for (j
= 2, i
++; j
< (num_regs
+ 1); i
++)
5672 if (mask
& (1 << i
))
5674 rtx reg
= gen_rtx_REG (SImode
, i
);
5675 XVECEXP (par
, 0, j
) = gen_rtx_USE (VOIDmode
, reg
);
5679 par
= emit_jump_insn (par
);
5683 /* Generate the function prologue. */
5686 csky_expand_prologue (void)
5689 unsigned long func_type
= get_csky_current_func_type ();
5690 unsigned int reg_mask
;
5693 if (CSKY_FUNCTION_IS_NAKED (func_type
))
5695 if (flag_stack_usage_info
)
5696 current_function_static_stack_size
= 0;
5700 csky_layout_stack_frame ();
5701 reg_mask
= cfun
->machine
->reg_mask
;
5702 reg_size
= cfun
->machine
->reg_size
;
5704 /* Adjust stack pointer past argument overflow area. */
5705 if (cfun
->machine
->arg_size
!= 0)
5707 int offset
= cfun
->machine
->arg_size
;
5708 expand_csky_stack_adjust (- offset
);
5710 /* If we have a parameter passed partially in regs and partially
5711 in memory, the registers will have been stored to memory already
5712 in function.c. So we only need to copy varargs from registers
5714 if (cfun
->machine
->uses_anonymous_args
)
5716 int rn
= CSKY_FIRST_PARM_REGNUM
+ CSKY_NPARM_REGS
- 1;
5717 for (offset
-= 4; offset
>= 0; offset
-= 4, rn
--)
5719 rtx dst
= gen_frame_mem (SImode
,
5720 plus_constant (Pmode
,
5723 insn
= emit_move_insn (dst
, gen_rtx_REG (SImode
, rn
));
5724 RTX_FRAME_RELATED_P (insn
) = 1;
5729 /* Push caller-saved registers to stack. */
5730 if (csky_can_use_pushpop (reg_mask
))
5731 emit_csky_regs_push (reg_mask
);
5734 int sreg
= -1, ereg
= -1;
5735 bool stm_p
= csky_can_use_ldstm (reg_mask
, &sreg
, &ereg
);
5736 int stm_regs
= stm_p
? ereg
- sreg
+ 1 : 0;
5737 int stm_size
= stm_regs
* 4;
5739 /* First adjust the SP to the low end of the register save area. */
5740 expand_csky_stack_adjust (- reg_size
);
5742 /* Emit individual register saves. Even if we are going to emit an
5743 stm, we may need to save individual registers above that too. */
5744 if (reg_size
> stm_size
)
5746 int offset
= reg_size
- 4;
5748 for ( ; regno
> ereg
; regno
--)
5749 if (reg_mask
& (1 << regno
))
5751 rtx dst
= gen_rtx_MEM (SImode
,
5752 plus_constant (Pmode
,
5755 rtx insn
= emit_insn (gen_movsi (dst
,
5756 gen_rtx_REG (SImode
, regno
)));
5757 RTX_FRAME_RELATED_P (insn
) = 1;
5758 if (offset
== stm_size
)
5764 /* If possible, emit a stm to do a bulk store of sequential
5765 registers to the stack. Note that it is an error in the ABI
5766 documentation that it doesn't list stm as a valid prologue
5770 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (stm_regs
));
5772 for (regno
= sreg
, slot
= 0; regno
<= ereg
; regno
++, slot
++)
5774 rtx reg
= gen_rtx_REG (SImode
, regno
);
5775 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
, slot
* 4);
5776 rtx set
= gen_rtx_SET (gen_frame_mem (SImode
, addr
), reg
);
5777 RTX_FRAME_RELATED_P (set
) = 1;
5778 XVECEXP (par
, 0, slot
) = set
;
5780 insn
= emit_insn (par
);
5781 RTX_FRAME_RELATED_P (insn
) = 1;
5785 /* Initialize hard frame pointer, if necessary. It points at the base
5786 of the register save area. */
5787 if (frame_pointer_needed
)
5789 insn
= emit_insn (gen_movsi (hard_frame_pointer_rtx
, stack_pointer_rtx
));
5790 RTX_FRAME_RELATED_P (insn
) = 1;
5793 /* Reserve stack space for locals and outgoing args. */
5794 expand_csky_stack_adjust (- cfun
->machine
->reg_offset
);
5796 /* Put the GOT address in reg_gb for PIC, using R13 as a scratch.
5797 See section 4.7.1 in the ABI documentation,
5798 "Function Prologue for PIC". */
5799 if (flag_pic
&& (reg_mask
& (1 << PIC_OFFSET_TABLE_REGNUM
)))
5801 rtx l1
= gen_label_rtx ();
5802 rtx grs_label
= gen_rtx_LABEL_REF (SImode
, l1
);
5803 rtx reg_gb
= gen_rtx_REG (SImode
, PIC_OFFSET_TABLE_REGNUM
);
5804 rtx reg_temp
= gen_rtx_REG (SImode
, 13);
5806 rtx tmp0_unspec
= gen_rtx_UNSPEC (Pmode
,
5807 gen_rtvec (1, grs_label
),
5808 UNSPEC_PIC_SYMBOL_GOTPC_GRS
);
5809 rtx tmp1_unspec
= gen_rtx_UNSPEC (Pmode
,
5810 gen_rtvec (1, grs_label
),
5811 UNSPEC_PIC_SYMBOL_GOTPC
);
5813 emit_insn (gen_prologue_get_pc (tmp0_unspec
));
5814 emit_move_insn (reg_temp
, tmp1_unspec
);
5815 emit_insn (gen_addsi3 (reg_gb
, reg_gb
, reg_temp
));
5818 if (flag_stack_usage_info
)
5819 current_function_static_stack_size
= cfun
->machine
->frame_size
;
5821 if (!flag_sched_prolog
)
5822 emit_insn (gen_blockage ());
5826 csky_expand_epilogue (void)
5828 unsigned long func_type
= get_csky_current_func_type ();
5829 unsigned int reg_mask
;
5834 if (!flag_sched_prolog
)
5835 emit_insn (gen_blockage ());
5837 if (CSKY_FUNCTION_IS_NAKED (func_type
))
5839 emit_jump_insn (gen_simple_return ());
5843 /* Get the frame information. */
5844 csky_layout_stack_frame ();
5845 reg_mask
= cfun
->machine
->reg_mask
;
5846 reg_size
= cfun
->machine
->reg_size
;
5847 adjust
= reg_size
+ cfun
->machine
->arg_size
;
5849 /* Restore the SP to the base of the register save area. */
5850 if (frame_pointer_needed
)
5852 insn
= emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
);
5853 RTX_FRAME_RELATED_P (insn
) = 1;
5856 expand_csky_stack_adjust (cfun
->machine
->reg_offset
);
5858 /* Restore the callee-saved registers. */
5859 if (csky_can_use_pushpop (reg_mask
)
5860 && cfun
->machine
->arg_size
== 0
5861 && !CSKY_FUNCTION_IS_INTERRUPT (func_type
)
5862 && !crtl
->calls_eh_return
)
5864 /* Pop includes an implicit return, so we are done. */
5865 emit_csky_regs_pop (reg_mask
);
5870 int sreg
= -1, ereg
= -1;
5871 bool ldm_p
= csky_can_use_ldstm (reg_mask
, &sreg
, &ereg
);
5872 int ldm_regs
= ldm_p
? ereg
- sreg
+ 1 : 0;
5873 int ldm_size
= ldm_regs
* 4;
5875 /* Emit individual register loads. Even if we are going to emit an
5876 ldm, we may need to load individual registers above that too. */
5877 if (reg_size
> ldm_size
)
5879 int offset
= reg_size
- 4;
5881 for ( ; regno
> ereg
; regno
--)
5882 if (reg_mask
& (1 << regno
))
5884 rtx src
= gen_frame_mem (SImode
,
5885 plus_constant (Pmode
,
5888 rtx reg
= gen_rtx_REG (SImode
, regno
);
5889 insn
= emit_move_insn (reg
, src
);
5890 RTX_FRAME_RELATED_P (insn
) = 1;
5891 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
5892 if (offset
== ldm_size
)
5898 /* If possible, emit a ldm to do a bulk load of sequential
5899 registers from the stack. */
5902 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (ldm_regs
));
5904 for (regno
= sreg
, slot
= 0; regno
<= ereg
; regno
++, slot
++)
5906 rtx reg
= gen_rtx_REG (SImode
, regno
);
5907 rtx addr
= plus_constant (Pmode
, stack_pointer_rtx
, slot
* 4);
5908 rtx set
= gen_rtx_SET (reg
, gen_frame_mem (SImode
, addr
));
5909 XVECEXP (par
, 0, slot
) = set
;
5911 insn
= emit_insn (par
);
5912 RTX_FRAME_RELATED_P (insn
) = 1;
5913 for (regno
= sreg
; regno
<= ereg
; regno
++)
5915 rtx reg
= gen_rtx_REG (SImode
, regno
);
5916 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
5921 /* Emit the final stack pointer adjustment to deallocate the saved
5922 registers and incoming argument area. */
5923 expand_csky_stack_adjust (adjust
);
5925 /* Extra stack adjustment for exception handler return. */
5926 if (crtl
->calls_eh_return
)
5927 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5928 EH_RETURN_STACKADJ_RTX
));
5930 /* Now we can return. */
5931 emit_jump_insn (gen_simple_return ());
5936 csky_output_function_prologue (FILE *f
)
5938 unsigned long func_type
= get_csky_current_func_type ();
5940 switch ((int) CSKY_FUNCTION_TYPE (func_type
))
5943 case CSKY_FT_NORMAL
:
5945 case CSKY_FT_INTERRUPT
:
5947 asm_fprintf (f
, "\t# Interrupt Service Routine.\n");
5948 asm_fprintf (f
, "\tnie\n\tipush\n");
5952 asm_fprintf (f
, "\t# Fast Interrupt Service Routine.\n");
5954 case CSKY_FT_EXCEPTION
:
5955 asm_fprintf (f
, "\t# CSKY Exception Handler.\n");
5958 asm_fprintf (f
, "\t# Naked Function: prologue and epilogue \
5959 provided by programmer.\n");
5963 csky_layout_stack_frame ();
5965 /* Generate .stack_size function-name, size for callgraph;
5966 the default stack size is 0. */
5967 if (TARGET_STACK_SIZE
&& cfun
->machine
->frame_size
> 0)
5969 gcc_assert (current_function_decl
!= NULL
);
5970 const char *func_name
=
5971 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl
));
5972 if (func_name
[0] == '*')
5973 asm_fprintf (f
, "\t.stack_size %s, %d\n",
5974 &func_name
[1], cfun
->machine
->frame_size
);
5976 asm_fprintf (f
, "\t.stack_size %s, %d\n",
5977 func_name
, cfun
->machine
->frame_size
);
5983 csky_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
)
5989 /* Helper for csky_eh_return splitter: store the call frame exception
5990 handler address in lr. */
5992 csky_set_eh_return_address (rtx source
, rtx scratch
)
5994 HOST_WIDE_INT delta
= 0;
5996 unsigned int reg_mask
;
5998 csky_layout_stack_frame ();
5999 reg_mask
= cfun
->machine
->reg_mask
;
6001 if (reg_mask
& (1 << CSKY_LR_REGNUM
))
6003 /* Find LR in the stack frame. */
6006 if (frame_pointer_needed
)
6008 basereg
= hard_frame_pointer_rtx
;
6013 basereg
= stack_pointer_rtx
;
6014 delta
= cfun
->machine
->reg_offset
;
6017 /* At this point, (basereg + delta) points at the low end of
6018 the reg save area. Regs are saved sequentially from low
6019 to high from this address. */
6020 for (i
= 0; i
< CSKY_LR_REGNUM
; i
++)
6021 if (reg_mask
& (1 << i
))
6024 if ((CSKY_TARGET_ARCH (CK801
) && delta
>= CSKY_LD16_MAX_OFFSET (Pmode
))
6025 || delta
>= CSKY_LD32_MAX_OFFSET (Pmode
))
6027 emit_insn (gen_movsi (scratch
, GEN_INT (delta
)));
6028 emit_insn (gen_addsi3 (scratch
, scratch
, basereg
));
6032 addr
= plus_constant (Pmode
, basereg
, delta
);
6033 emit_move_insn (gen_frame_mem (Pmode
, addr
), source
);
6036 emit_move_insn (gen_rtx_REG (Pmode
, CSKY_LR_REGNUM
), source
);
6039 /* Return TRUE if X references a SYMBOL_REF. */
6042 csky_symbol_mentioned_p (rtx x
)
6047 if (GET_CODE (x
) == SYMBOL_REF
)
6050 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6051 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6057 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6058 if (csky_symbol_mentioned_p (XVECEXP (x
, i
, j
)))
6061 else if (fmt
[i
] == 'e' && csky_symbol_mentioned_p (XEXP (x
, i
)))
6068 /* Return TRUE if X references a LABEL_REF. */
6071 csky_label_mentioned_p (rtx x
)
6076 if (GET_CODE (x
) == LABEL_REF
)
6079 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
6080 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
6086 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
6087 if (csky_label_mentioned_p (XVECEXP (x
, i
, j
)))
6090 else if (fmt
[i
] == 'e' && csky_label_mentioned_p (XEXP (x
, i
)))
6099 tls_unspec_mentioned_p (rtx x
)
6101 switch (GET_CODE (x
))
6104 return tls_unspec_mentioned_p (XEXP (x
, 0));
6107 if (XINT (x
, 1) == UNSPEC_TLS
)
6117 /* Implement LEGITIMATE_PIC_OPERAND_P. */
6120 csky_legitimate_pic_operand_p (rtx x
)
6122 if (tls_unspec_mentioned_p (x
))
6124 if (csky_symbol_mentioned_p (x
) || csky_label_mentioned_p (x
))
6130 csky_legitimize_pic_address (rtx orig
, rtx reg
, bool gotrel_p
)
6132 rtx pic_reg
= gen_rtx_REG (SImode
, PIC_OFFSET_TABLE_REGNUM
);
6133 bool optimize_p
= false;
6135 if (GET_CODE (orig
) == SYMBOL_REF
|| GET_CODE (orig
) == LABEL_REF
)
6137 rtx pic_ref
, address
, rtx_tmp
;
6139 rtx pic_reg
= gen_rtx_REG (SImode
, PIC_OFFSET_TABLE_REGNUM
);
6144 gcc_assert (can_create_pseudo_p ());
6145 reg
= gen_reg_rtx (Pmode
);
6150 address
= gen_reg_rtx (Pmode
);
6154 if (GET_CODE (orig
) == SYMBOL_REF
&& !SYMBOL_REF_LOCAL_P (orig
))
6156 /* When gotrel_p generate sym@GOT, otherwise generate sym@PLT. */
6157 rtx_tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, orig
),
6159 ? UNSPEC_PIC_SYMBOL_GOT
6160 : UNSPEC_PIC_SYMBOL_PLT
));
6161 optimize_p
= gotrel_p
;
6164 emit_move_insn (address
, rtx_tmp
);
6165 rtx_tmp
= gen_rtx_MULT (Pmode
, address
, GEN_INT (1));
6167 pic_ref
= gen_const_mem (Pmode
,
6168 gen_rtx_PLUS (Pmode
, pic_reg
, rtx_tmp
));
6173 if (flag_pic
== 1 && !gotrel_p
)
6175 pic_ref
= gen_rtx_UNSPEC (Pmode
,
6176 gen_rtvec (1, orig
),
6177 UNSPEC_PIC_SYMBOL_BSR
);
6180 /* grs rx, symbol */
6181 else if (flag_pic
== 1 && (GET_CODE (orig
) == SYMBOL_REF
)
6182 && SYMBOL_REF_FUNCTION_P (orig
))
6184 pic_ref
= gen_rtx_UNSPEC (Pmode
,
6185 gen_rtvec (1, orig
),
6186 UNSPEC_PIC_SYMBOL_GRS
);
6189 /* lrw rx, symbol@GOTOFF; add rx, rx, gb */
6192 rtx_tmp
= gen_rtx_UNSPEC (Pmode
,
6193 gen_rtvec (1, orig
),
6194 UNSPEC_PIC_SYMBOL_GOTOFF
);
6195 emit_move_insn (address
, rtx_tmp
);
6196 pic_ref
= gen_rtx_PLUS (Pmode
, address
, pic_reg
);
6201 insn
= emit_move_insn (reg
, pic_ref
);
6202 /* Put a REG_EQUAL note on this insn,
6203 so that it can be optimized by loop. */
6205 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
6209 else if (GET_CODE (orig
) == CONST
)
6213 if (GET_CODE (XEXP (orig
, 0)) == PLUS
6214 && XEXP (XEXP (orig
, 0), 1) == pic_reg
)
6219 gcc_assert (can_create_pseudo_p ());
6220 reg
= gen_reg_rtx (Pmode
);
6223 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
6225 base
= csky_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
6227 offset
= csky_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
6228 base
== reg
? 0 : reg
, gotrel_p
);
6230 if (GET_CODE (offset
) == CONST_INT
)
6231 return plus_constant (Pmode
, base
, INTVAL (offset
));
6233 return gen_rtx_PLUS (Pmode
, base
, offset
);
6240 /* Functions to output assembly code for a function call. */
6243 csky_output_call (rtx
*operands
, int index
)
6245 static char buffer
[20];
6246 rtx addr
= operands
[index
];
6249 sprintf (buffer
, "jsr\t%%%d", index
);
6250 else if (flag_pic
&& (GET_CODE (addr
) == UNSPEC
))
6251 sprintf (buffer
, "bsr\t%%%d", index
);
6253 sprintf (buffer
, "jbsr\t%%%d", index
);
6259 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
6260 Output assembler code for a block containing the constant parts
6261 of a trampoline, leaving space for the variable parts.
6262 Note that STATIC_CHAIN_REGNUM is t1 (aka r12) on ck801 and
6263 t1 (r13) otherwise. */
6266 csky_asm_trampoline_template (FILE *f
)
6268 if (CSKY_ISA_FEATURE (2E3
))
6270 fprintf (f
, "\tlrw\t%s, [.Lstatic_chain]\n",
6271 reg_names
[STATIC_CHAIN_REGNUM
]);
6272 fprintf (f
, "\tjmpi\t[.Lfunc_address]\n");
6273 /* 2 32-bit insns = 8 bytes. */
6275 else if (CSKY_TARGET_ARCH (CK801
))
6277 /* It's hard to provide general support for trampolines on this
6278 core. We need a register other than the one holding the
6279 static chain (r13) to hold the function pointer for the
6280 indirect jump to it. But ck801 has such a limited register set
6281 there is no other call-clobbered scratch register available -- in
6282 particular, this core does not have r12, which we use for the
6283 ck802 case below. If we use a callee-saved register like r4,
6284 saving the old value on the stack screws up the stack frame
6285 if there are overflow arguments pushed on the stack
6286 by the caller. In theory we could test for that and handle
6287 limited cases with parameters that all fit in r0-r3 with no
6288 stack overflow, but punt for now. */
6289 sorry ("Nested function trampolines not supported on CK801.");
6293 fprintf (f
, "\tlrw\t%s, [.Lfunc_address]\n",
6294 reg_names
[CSKY_T1_REGNUM
]);
6295 fprintf (f
, "\tlrw\t%s, [.Lstatic_chain]\n",
6296 reg_names
[STATIC_CHAIN_REGNUM
]);
6297 fprintf (f
, "\tjmp\t%s\n",
6298 reg_names
[CSKY_T1_REGNUM
]);
6299 /* To align constant pool on a word boundary. */
6300 fprintf (f
, "\t.align 2\n");
6301 /* 2 32-bit lrw insns + 16-bit jump + 16-bit pad = 12 bytes. */
6304 fprintf (f
, ".Lstatic_chain:\n");
6305 fprintf (f
, "\t.long 0\n");
6306 fprintf (f
, ".Lfunc_address:\n");
6307 fprintf (f
, "\t.long 0\n");
6308 /* 2 words of constant pool = 8 bytes. */
6311 /* Worker function for TARGET_TRAMPOLINE_INIT. */
6314 csky_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
6316 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
6318 int pool
= TRAMPOLINE_SIZE
- 8;
6320 emit_block_move (m_tramp
, assemble_trampoline_template (),
6321 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
6323 mem
= adjust_address (m_tramp
, SImode
, pool
);
6324 emit_move_insn (mem
, chain_value
);
6325 mem
= adjust_address (m_tramp
, SImode
, pool
+ 4);
6326 emit_move_insn (mem
, fnaddr
);
6328 a_tramp
= XEXP (m_tramp
, 0);
6329 maybe_emit_call_builtin___clear_cache (a_tramp
,
6330 plus_constant (Pmode
,
6336 /* Emit a comparison insn for float values.
6337 Return true if the comparison is inverted. */
6340 csky_emit_compare_float (enum rtx_code code
, rtx op0
, rtx op1
)
6342 rtx cc_reg
= gen_rtx_REG (CCmode
, CSKY_CC_REGNUM
);
6344 machine_mode mode
= GET_MODE (op1
);
6346 if (op1
!= CONST0_RTX (mode
))
6347 op1
= force_reg (mode
, op1
);
6360 if (op1
== CONST0_RTX (mode
) && (CSKY_ISA_FEATURE_GET(fpv2_sf
)
6361 || CSKY_ISA_FEATURE_GET(fpv2_df
)
6362 || CSKY_ISA_FEATURE_GET(fpv2_divd
)))
6363 op1
= force_reg (mode
, op1
);
6374 emit_insn (gen_rtx_SET (cc_reg
, gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
)));
6379 /* Support for the Q or W memory constraint. Returns true if OP is a MEM
6380 RTX with an address consisting of base + index or base + displacement. */
6383 csky_valid_mem_constraint_operand (rtx op
, const char *constraint
)
6385 struct csky_address addr
;
6387 if (GET_CODE (op
) != MEM
)
6390 if (!decompose_csky_address (XEXP (op
, 0), &addr
))
6393 /* Verify base register. */
6394 if (!is_csky_address_register_rtx_p (addr
.base
, 0))
6397 /* Verify index operand. */
6398 if (addr
.index
&& (constraint
[0] == 'Q' || constraint
[0] == 'W'))
6400 if (!is_csky_address_register_rtx_p (addr
.index
, 0))
6403 if (addr
.scale
== 1 || addr
.scale
== 2 || addr
.scale
== 4
6409 /* Verify disp operand. */
6410 else if (addr
.disp
&& constraint
[0] == 'Q')
6412 rtx disp
= addr
.disp
;
6414 if (!CONST_INT_P (disp
))
6417 if (((unsigned) INTVAL (disp
) % 4) == 0
6418 && (unsigned) INTVAL (disp
) <= (unsigned) 1020)
6423 else if (constraint
[0] == 'Q')
6424 /* Single reg is valid for 'Q'. */
6431 /* Returns the (interrupt) function type of the current
6432 function, or CSKY_FT_UNKNOWN if the type cannot be determined. */
6434 static unsigned long
6435 csky_isr_value (tree argument
)
6437 const isr_attribute_entry
*ptr
;
6440 /* No argument - default to IRQ. */
6441 if (argument
== NULL_TREE
)
6444 /* Get the value of the argument. */
6445 if (TREE_VALUE (argument
) == NULL_TREE
6446 || TREE_CODE (TREE_VALUE (argument
)) != STRING_CST
)
6447 return CSKY_FT_UNKNOWN
;
6449 arg
= TREE_STRING_POINTER (TREE_VALUE (argument
));
6451 /* Check it against the list of known arguments. */
6452 for (ptr
= isr_attribute_map
; ptr
->arg
!= NULL
; ptr
++)
6453 if (strcmp (arg
, ptr
->arg
) == 0)
6454 return ptr
->return_value
;
6456 /* An unrecognized interrupt type. */
6457 return CSKY_FT_UNKNOWN
;
6460 /* Handle an attribute requiring a FUNCTION_DECL;
6461 arguments as in struct attribute_spec.handler. */
6464 csky_handle_fndecl_attribute (tree
*node
, tree name
, tree args ATTRIBUTE_UNUSED
,
6465 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
6467 if (TREE_CODE (*node
) != FUNCTION_DECL
)
6469 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
6471 *no_add_attrs
= true;
6477 /* Handle an "interrupt" or "isr" attribute;
6478 arguments as in struct attribute_spec.handler. */
6481 csky_handle_isr_attribute (tree
*node
, tree name
, tree args
, int flags
,
6487 warning (OPT_Wattributes
, "%qE attribute ignored without %<-mistack%>",
6489 *no_add_attrs
= true;
6495 if (TREE_CODE (*node
) != FUNCTION_DECL
)
6497 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
6499 *no_add_attrs
= true;
6504 if (TREE_CODE (*node
) == FUNCTION_TYPE
6505 || TREE_CODE (*node
) == METHOD_TYPE
)
6507 if (csky_isr_value (args
) == CSKY_FT_UNKNOWN
)
6509 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
6510 *no_add_attrs
= true;
6513 else if (TREE_CODE (*node
) == POINTER_TYPE
6514 && (TREE_CODE (TREE_TYPE (*node
)) == FUNCTION_TYPE
6515 || TREE_CODE (TREE_TYPE (*node
)) == METHOD_TYPE
)
6516 && csky_isr_value (args
) != CSKY_FT_UNKNOWN
)
6518 *node
= build_variant_type_copy (*node
);
6519 TREE_TYPE (*node
) = build_type_attribute_variant (TREE_TYPE (*node
),
6520 tree_cons (name
, args
, TYPE_ATTRIBUTES (TREE_TYPE (*node
))));
6521 *no_add_attrs
= true;
6523 else if (flags
& ((int)ATTR_FLAG_DECL_NEXT
6524 | (int)ATTR_FLAG_FUNCTION_NEXT
6525 | (int)ATTR_FLAG_ARRAY_NEXT
))
6527 *no_add_attrs
= true;
6528 return tree_cons (name
, args
, NULL_TREE
);
6531 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
6537 /* Implement TARGET_REGISTER_MOVE_COST: compute extra cost of moving data
6538 between one register class and another. */
6541 csky_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
6542 reg_class_t from
, reg_class_t to
)
6544 #define GR_REG_CLASS_P(CLASS) \
6545 ((CLASS) == GENERAL_REGS || (CLASS) == MINI_REGS || (CLASS) == SP_REGS \
6546 || (CLASS) == LOW_REGS)
6548 #define HILO_REG_CLASS_P(CLASS) \
6549 ((CLASS) == HILO_REGS)
6551 #define V_REG_CLASS_P(CLASS) \
6554 if (V_REG_CLASS_P (from
) && V_REG_CLASS_P (to
))
6557 if ((V_REG_CLASS_P (from
) && GR_REG_CLASS_P (to
))
6558 || (GR_REG_CLASS_P (from
) && V_REG_CLASS_P (to
)))
6561 if ((HILO_REG_CLASS_P (from
) && GR_REG_CLASS_P (to
))
6562 || (GR_REG_CLASS_P (from
) && HILO_REG_CLASS_P (to
)))
6565 if (HILO_REG_CLASS_P (from
) && HILO_REG_CLASS_P (to
))
6568 if ((HILO_REG_CLASS_P (from
) && V_REG_CLASS_P (to
))
6569 || (V_REG_CLASS_P (from
) && HILO_REG_CLASS_P (to
)))
6576 /* Implement TARGET_MEMORY_MOVE_COST: compute the cost of moving data
6577 between registers and memory. */
6580 csky_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
6583 return (4 + memory_move_secondary_cost (mode
, rclass
, in
));
6587 /* TARGET_RTX_COSTS helper for ck801/ck802. */
6590 ck802_ck801_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
6593 machine_mode mode
= GET_MODE (x
);
6596 /* Accessing memory costs quite a lot for first word; */
6598 *total
= COSTS_N_INSNS (1 + CSKY_NUM_REGS (mode
));
6615 *total
= COSTS_N_INSNS (1);
6620 *total
= COSTS_N_INSNS (CSKY_NUM_REGS (mode
));
6625 enum rtx_code subcode
= GET_CODE (XEXP (x
, 1));
6627 /* If subcode is "not", we'll try to combine it into e.g. "andn"
6628 instruction, so give AND itself zero cost. */
6638 *total
= COSTS_N_INSNS (CSKY_NUM_REGS (mode
));
6642 /* FIXME: is ixw supported on ck801/ck802? */
6643 /* We can use "ix.h/w" insn to replace multiply by 2 or 4.
6644 "ix.h/w" is a 32-bit insn, so let its cost be a little less than
6646 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
6648 unsigned HOST_WIDE_INT m
6649 = (unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)));
6650 if ((m
== 2 || m
== 4) && outer_code
== PLUS
)
6657 /* Because mult is relatively slower than other operations,
6658 we try to use other insns when optimizing for speed.
6659 When optimizing for size, give it lower cost. */
6662 *total
= COSTS_N_INSNS (10 * CSKY_NUM_REGS (mode
));
6671 *total
= COSTS_N_INSNS (1) + cycle
;
6676 *total
= COSTS_N_INSNS (1);
6680 /* Usually, we use subtract from 0 to substitute for neg, and
6681 it costs 1 extra insn to move 0 to a register. */
6682 *total
= COSTS_N_INSNS (2 * CSKY_NUM_REGS (mode
));
6686 *total
= COSTS_N_INSNS (CSKY_NUM_REGS (mode
));
6690 *total
= COSTS_N_INSNS (1);
6695 *total
= COSTS_N_INSNS (CSKY_NUM_REGS (mode
));
6700 if (REG_P (XEXP (x
, 0))
6701 && CONST_INT_P (XEXP (x
, 1))
6702 && CONST_INT_P (XEXP (x
, 2))
6703 && INTVAL (XEXP (x
, 1)) == 8
6704 && INTVAL (XEXP (x
, 2)) % 8 == 0)
6706 *total
= COSTS_N_INSNS (1);
6709 *total
= COSTS_N_INSNS (CSKY_NUM_REGS (mode
));
6714 unsigned HOST_WIDE_INT t
= (unsigned HOST_WIDE_INT
) (INTVAL (x
));
6716 if (outer_code
== COMPARE
)
6721 *total
= COSTS_N_INSNS (2);
6723 else if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
6725 /* "andi,xori,ori" are 32-bit insns, so let it cost a
6729 /* Try replacing "andi" by "sextb/h", so let it cost more. */
6730 if (outer_code
== AND
&& (t
== 0xff || t
== 0xffff))
6737 else if (t
< 0x10000)
6738 *total
= COSTS_N_INSNS (1);
6740 *total
= COSTS_N_INSNS (2);
6742 else if (outer_code
== PLUS
|| outer_code
== MINUS
)
6744 /* "addi/subi rx,ry,imm", if imm<9, it is more often a
6745 16-bit insn. If imm>=9, use "movi" insn; it's probably
6746 less than "addi/subi". */
6749 else if (t
< 0x1000)
6751 else if (t
< 0x10000)
6752 *total
= COSTS_N_INSNS (1);
6754 *total
= COSTS_N_INSNS (2);
6756 else if (outer_code
== ROTATE
|| outer_code
== ROTATERT
6757 || outer_code
== LSHIFTRT
|| outer_code
== ASHIFTRT
6758 || outer_code
== ASHIFT
)
6763 *total
= COSTS_N_INSNS (2);
6768 if (outer_code
== SET
&& t
< 256)
6771 *total
= COSTS_N_INSNS (1);
6773 *total
= COSTS_N_INSNS (2);
6781 *total
= COSTS_N_INSNS (3);
6789 /* TARGET_RTX_COSTS helper for ck803. */
6792 ck803_rtx_costs (rtx x
, int code
, int outer_code ATTRIBUTE_UNUSED
,
6793 int *total
, bool speed ATTRIBUTE_UNUSED
)
6798 if (MEM_P (XEXP (x
, 1)))
6800 struct csky_address op1
;
6802 = decompose_csky_address (XEXP (XEXP (x
, 1), 0), &op1
);
6805 *total
= COSTS_N_INSNS (3);
6808 else if (address_valid
)
6810 *total
= COSTS_N_INSNS (1);
6814 if (REG_P (XEXP (x
, 0)) && (GET_CODE (XEXP (x
, 1)) == PLUS
))
6816 rtx sub_exp
= XEXP (x
, 1);
6817 if (REG_P (XEXP (sub_exp
, 0)) && REG_P (XEXP (sub_exp
, 1)))
6819 *total
= COSTS_N_INSNS (1);
6825 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
6827 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6828 if (val
% 2 == 0 && val
< 0xffffffff && val
> 0)
6830 *total
= COSTS_N_INSNS (1);
6839 *total
= COSTS_N_INSNS (3);
6846 /* TARGET_RTX_COSTS helper for ck807/ck810 arches. */
6849 ck807_ck810_rtx_costs (rtx x
, int code
,
6850 int outer_code ATTRIBUTE_UNUSED
,
6851 int *total
, bool speed ATTRIBUTE_UNUSED
)
6856 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
6858 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6859 if (val
% 2 == 0 && val
< 0xffffffff && val
> 0)
6861 *total
= COSTS_N_INSNS (1);
6870 *total
= COSTS_N_INSNS (3);
6877 /* TARGET_RTX_COSTS helper for ck860 arches. */
6880 ck860_rtx_costs (rtx x
, int code
, machine_mode mode
,
6881 int outer_code ATTRIBUTE_UNUSED
,
6882 int *total
, bool speed ATTRIBUTE_UNUSED
)
6887 /* The costs of mula is 1 more than mult. */
6888 if (GET_CODE (XEXP (x
, 0)) == MULT
&& REG_P (XEXP (x
, 1)) && speed
)
6890 rtx mul_op0
= XEXP (XEXP (x
, 0), 0);
6891 rtx mul_op1
= XEXP (XEXP (x
, 0), 1);
6892 if (REG_P (mul_op0
) && REG_P (mul_op1
))
6894 *total
= COSTS_N_INSNS (1);
6895 *total
+= rtx_cost (XEXP (x
, 0), mode
,
6896 (enum rtx_code
) code
, 0, speed
);
6902 if (REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
6904 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6905 if (val
% 2 == 0 && val
< 0xffffffff && val
> 0)
6907 *total
= COSTS_N_INSNS (1);
6916 *total
= COSTS_N_INSNS (3);
6924 /* Implement TARGET_RTX_COSTS, to compute a (partial) cost for rtx X.
6925 Return true if the complete cost has been computed, and false if
6926 subexpressions should be scanned. In either case, *TOTAL contains
6930 csky_rtx_costs (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
, int outer_code
,
6931 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
6933 int code
= GET_CODE (x
);
6935 if (CSKY_TARGET_ARCH (CK802
) || CSKY_TARGET_ARCH (CK801
))
6936 return ck802_ck801_rtx_costs (x
, code
, outer_code
, total
, speed
);
6937 else if (CSKY_TARGET_ARCH (CK803
))
6938 return ck803_rtx_costs (x
, code
, outer_code
, total
, speed
);
6939 else if (CSKY_TARGET_ARCH (CK807
) || CSKY_TARGET_ARCH (CK810
))
6940 return ck807_ck810_rtx_costs (x
, code
, outer_code
, total
, speed
);
6941 else if (CSKY_TARGET_ARCH (CK860
))
6942 return ck860_rtx_costs (x
, code
, mode
, outer_code
, total
, speed
);
6947 /* Emit assembly code for CASESI. This is only used on CK801 and CK802
6948 when optimizing for size, and uses helper functions in libgcc instead
6949 of doing the control transfer inline. */
6952 csky_output_casesi (rtx
*operands
)
6954 rtx diff_vec
= PATTERN (NEXT_INSN (as_a
<rtx_insn
*> (operands
[0])));
6956 gcc_assert (GET_CODE (diff_vec
) == ADDR_DIFF_VEC
);
6958 switch (GET_MODE (diff_vec
))
6961 return (ADDR_DIFF_VEC_FLAGS (diff_vec
).offset_unsigned
6962 ? "jbsr\t___gnu_csky_case_uqi"
6963 : "jbsr\t___gnu_csky_case_sqi");
6965 return (ADDR_DIFF_VEC_FLAGS (diff_vec
).offset_unsigned
6966 ? "jbsr\t___gnu_csky_case_uhi"
6967 : "jbsr\t___gnu_csky_case_shi");
6969 return "jbsr\t___gnu_csky_case_si";
6975 /* Implement TARGET_SCHED_ISSUE_RATE. Lookup the issue rate in the
6976 per-core tuning structs. */
6978 csky_sched_issue_rate (void)
6980 if (CSKY_TARGET_ARCH (CK810
))
6987 /* This function implements the target macro TARGET_SCHED_ADJUST_COST.
6988 It corrects the value of COST based on the relationship between
6989 INSN and DEP through the dependence DEP_TYPE. It returns the new
6993 csky_sched_adjust_cost (rtx_insn
*insn
,
6997 unsigned int dw ATTRIBUTE_UNUSED
)
6999 if (dep_type
== REG_DEP_ANTI
|| dep_type
== REG_DEP_OUTPUT
)
7001 /* The REG_DEP_TRUE situation. */
7002 else if (recog_memoized (insn
) >= 0 && recog_memoized (dep
) >= 0)
7004 enum attr_type insn_type
= get_attr_type (insn
);
7005 if (CSKY_TARGET_ARCH (CK803
))
7007 /* The ld or st's base reg depends on the pre insn,
7008 it will delay 1 cycle. */
7009 if (insn_type
== TYPE_LOAD
|| insn_type
== TYPE_STORE
)
7011 rtx pattern
= PATTERN (insn
);
7013 gcc_assert (GET_CODE (pattern
) == SET
);
7014 rtx addr
= (insn_type
== TYPE_LOAD
7015 ? SET_SRC (pattern
) : SET_DEST (pattern
));
7017 enum rtx_code code
= GET_CODE (addr
);
7018 if (code
== ZERO_EXTEND
|| code
== SIGN_EXTEND
)
7019 addr
= XEXP (addr
, 0);
7020 gcc_assert (GET_CODE (addr
) == MEM
);
7022 rtx base
= XEXP (addr
, 0);
7026 if (GET_CODE (base
) == PLUS
7027 && GET_CODE (XEXP (base
, 0)) == REG
)
7028 reg
= XEXP (base
, 0);
7029 if ((reg
!= NULL_RTX
) && reg_set_p (reg
, PATTERN (dep
)))
7033 else if (CSKY_TARGET_ARCH (CK802
))
7035 if ((insn_type
== TYPE_CALL_JSR
|| insn_type
== TYPE_BRANCH_JMP
)
7036 && get_attr_type (dep
) != TYPE_LOAD
)
7039 if (insn_type
== TYPE_LOAD
|| insn_type
== TYPE_STORE
)
7041 rtx pattern
= PATTERN (insn
);
7043 gcc_assert (GET_CODE (pattern
) == SET
);
7045 rtx addr
= (insn_type
== TYPE_LOAD
7046 ? SET_SRC (pattern
) : SET_DEST (pattern
));
7048 enum rtx_code code
= GET_CODE (addr
);
7049 if (code
== ZERO_EXTEND
|| code
== SIGN_EXTEND
)
7050 addr
= XEXP (addr
, 0);
7051 gcc_assert (GET_CODE (addr
) == MEM
);
7053 rtx base
= XEXP (addr
, 0);
7057 if (GET_CODE (base
) == PLUS
7058 && GET_CODE (XEXP (base
, 0)) == REG
)
7059 reg
= XEXP (base
, 0);
7060 if ((reg
!= NULL_RTX
) && reg_set_p (reg
, PATTERN (dep
))
7061 && get_attr_type (dep
) != TYPE_LOAD
)
7064 if (insn_type
== TYPE_STORE
7065 && reg_referenced_p (SET_SRC (pattern
), PATTERN (dep
)))
7074 csky_warn_func_return (tree decl
)
7076 /* Naked functions are implemented entirely in assembly, including the
7077 return sequence, so suppress warnings about this. */
7078 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl
)) == NULL_TREE
;
7082 /* Implement TARGET_RETURN_IN_MEMORY to decide whether TYPE should be
7083 returned in memory (true) or in a register (false).
7084 FNTYPE is the type of the function making the call. */
7087 csky_return_in_memory (const_tree type
,
7088 const_tree fntype ATTRIBUTE_UNUSED
)
7090 const HOST_WIDE_INT size
= int_size_in_bytes (type
);
7091 return (size
== -1 || size
> 2 * UNITS_PER_WORD
);
7095 /* Implement TARGET_DWARF_REGISTER_SPAN.
7096 Dwarf models VFP registers as 64-bit or 128-bit registers default.
7097 GCC models tham as 32-bit registers, so we need to describe this to
7098 the DWARF generation code. Other registers can use the default. */
7101 csky_dwarf_register_span (rtx rtl
)
7109 regno
= REGNO (rtl
);
7110 if (!CSKY_VREG_P (regno
))
7113 if (CSKY_VREG_HI_P (regno
))
7116 mode
= GET_MODE (rtl
);
7117 if (GET_MODE_SIZE (mode
) < 8)
7121 if (TARGET_SINGLE_FPU
)
7123 nregs
= GET_MODE_SIZE (mode
) / 4;
7124 for (i
= 0; i
< nregs
; i
+= 2)
7125 if (TARGET_BIG_ENDIAN
)
7127 parts
[i
] = gen_rtx_REG (SImode
, regno
+ i
+ 1);
7128 parts
[i
+ 1] = gen_rtx_REG (SImode
, regno
+ i
);
7132 parts
[i
] = gen_rtx_REG (SImode
, regno
+ i
);
7133 parts
[i
+ 1] = gen_rtx_REG (SImode
, regno
+ i
+ 1);
7138 /* FIXME: dwarf2 considers all general registers to be the same
7139 as the CPU bit width. Transform the 64-bit FPU registers to
7140 32 bits here, and we will modify the unwind processing to
7141 fit CSKY architecture later. */
7142 nregs
= GET_MODE_SIZE (mode
) / 4;
7143 for (i
= 0; i
< nregs
; i
+= 2)
7144 if (TARGET_BIG_ENDIAN
)
7146 parts
[i
] = gen_rtx_REG (SImode
, regno
+ i
- 16);
7147 parts
[i
+ 1] = gen_rtx_REG (SImode
, regno
+ i
);
7151 parts
[i
] = gen_rtx_REG (SImode
, regno
+ i
);
7152 parts
[i
+ 1] = gen_rtx_REG (SImode
, regno
+ i
- 16);
7156 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (nregs
, parts
));
7159 /* Implement TARGET_INIT_LIBFUNCS. */
7162 csky_init_libfuncs (void)
7164 if (TARGET_CSKY_LINUX
)
7165 init_sync_libfuncs (UNITS_PER_WORD
);
7166 if (!TARGET_LIBCCRT
)
7169 #define CSKY_GCC_SYM(sym) "__csky_ccrt_" # sym
7173 /* Arithmetic functions */
7174 set_optab_libfunc (ashl_optab
, DImode
, CSKY_GCC_SYM (ashldi3
));
7175 set_optab_libfunc (ashr_optab
, DImode
, CSKY_GCC_SYM (ashrdi3
));
7176 set_optab_libfunc (sdiv_optab
, SImode
, CSKY_GCC_SYM (divsi3
));
7177 set_optab_libfunc (sdiv_optab
, DImode
, CSKY_GCC_SYM (divdi3
));
7178 set_optab_libfunc (lshr_optab
, DImode
, CSKY_GCC_SYM (lshrdi3
));
7179 set_optab_libfunc (smod_optab
, SImode
, CSKY_GCC_SYM (modsi3
));
7180 set_optab_libfunc (smod_optab
, DImode
, CSKY_GCC_SYM (moddi3
));
7181 set_optab_libfunc (smul_optab
, DImode
, CSKY_GCC_SYM (muldi3
));
7182 set_optab_libfunc (neg_optab
, DImode
, CSKY_GCC_SYM (negdi2
));
7183 set_optab_libfunc (udiv_optab
, SImode
, CSKY_GCC_SYM (udivsi3
));
7184 set_optab_libfunc (udiv_optab
, DImode
, CSKY_GCC_SYM (udivdi3
));
7185 set_optab_libfunc (udivmod_optab
, DImode
, CSKY_GCC_SYM (udivmoddi4
));
7186 set_optab_libfunc (umod_optab
, SImode
, CSKY_GCC_SYM (umodsi3
));
7187 set_optab_libfunc (umod_optab
, DImode
, CSKY_GCC_SYM (umoddi3
));
7189 /* Comparison functions */
7190 set_optab_libfunc (cmp_optab
, DImode
, CSKY_GCC_SYM (cmpdi2
));
7191 set_optab_libfunc (ucmp_optab
, DImode
, CSKY_GCC_SYM (ucmpdi2
));
7193 /* Trapping arithmetic functions */
7194 set_optab_libfunc (absv_optab
, SImode
, CSKY_GCC_SYM (absvsi2
));
7195 set_optab_libfunc (absv_optab
, DImode
, CSKY_GCC_SYM (absvdi2
));
7196 set_optab_libfunc (addv_optab
, SImode
, CSKY_GCC_SYM (addvsi3
));
7197 set_optab_libfunc (addv_optab
, DImode
, CSKY_GCC_SYM (addvdi3
));
7198 set_optab_libfunc (smulv_optab
, SImode
, CSKY_GCC_SYM (mulvsi3
));
7199 set_optab_libfunc (smulv_optab
, DImode
, CSKY_GCC_SYM (mulvdi3
));
7200 set_optab_libfunc (negv_optab
, SImode
, CSKY_GCC_SYM (negvsi2
));
7201 set_optab_libfunc (negv_optab
, DImode
, CSKY_GCC_SYM (negvdi2
));
7202 set_optab_libfunc (subv_optab
, SImode
, CSKY_GCC_SYM (subvsi3
));
7203 set_optab_libfunc (subv_optab
, DImode
, CSKY_GCC_SYM (subvdi3
));
7205 /* Bit operations */
7206 set_optab_libfunc (clz_optab
, SImode
, CSKY_GCC_SYM (clzsi2
));
7207 set_optab_libfunc (clz_optab
, DImode
, CSKY_GCC_SYM (clzdi2
));
7208 set_optab_libfunc (ctz_optab
, SImode
, CSKY_GCC_SYM (ctzsi2
));
7209 set_optab_libfunc (ctz_optab
, DImode
, CSKY_GCC_SYM (ctzdi2
));
7210 set_optab_libfunc (ffs_optab
, DImode
, CSKY_GCC_SYM (ffsdi2
));
7211 set_optab_libfunc (parity_optab
, SImode
, CSKY_GCC_SYM (paritysi2
));
7212 set_optab_libfunc (parity_optab
, DImode
, CSKY_GCC_SYM (paritydi2
));
7213 set_optab_libfunc (popcount_optab
,SImode
, CSKY_GCC_SYM (popcountsi2
));
7214 set_optab_libfunc (popcount_optab
,DImode
, CSKY_GCC_SYM (popcountdi2
));
7215 set_optab_libfunc (bswap_optab
, SImode
, CSKY_GCC_SYM (bswapsi2
));
7216 set_optab_libfunc (bswap_optab
, DImode
, CSKY_GCC_SYM (bswapdi2
));
7220 /* Arithmetic functions */
7221 set_optab_libfunc (add_optab
, SFmode
, CSKY_GCC_SYM (addsf3
));
7222 set_optab_libfunc (add_optab
, DFmode
, CSKY_GCC_SYM (adddf3
));
7223 set_optab_libfunc (sub_optab
, SFmode
, CSKY_GCC_SYM (subsf3
));
7224 set_optab_libfunc (sub_optab
, DFmode
, CSKY_GCC_SYM (subdf3
));
7225 set_optab_libfunc (smul_optab
, SFmode
, CSKY_GCC_SYM (mulsf3
));
7226 set_optab_libfunc (smul_optab
, DFmode
, CSKY_GCC_SYM (muldf3
));
7227 set_optab_libfunc (sdiv_optab
, SFmode
, CSKY_GCC_SYM (divsf3
));
7228 set_optab_libfunc (sdiv_optab
, DFmode
, CSKY_GCC_SYM (divdf3
));
7229 set_optab_libfunc (neg_optab
, SFmode
, CSKY_GCC_SYM (negsf2
));
7230 set_optab_libfunc (neg_optab
, DFmode
, CSKY_GCC_SYM (negdf2
));
7232 /* Conversion functions */
7233 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, CSKY_GCC_SYM (extendsfdf2
));
7234 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, CSKY_GCC_SYM (truncdfsf2
));
7235 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, CSKY_GCC_SYM (fixsfsi
));
7236 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, CSKY_GCC_SYM (fixdfsi
));
7237 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, CSKY_GCC_SYM (fixsfdi
));
7238 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, CSKY_GCC_SYM (fixdfdi
));
7239 set_conv_libfunc (ufix_optab
, SImode
, SFmode
, CSKY_GCC_SYM (fixunssfsi
));
7240 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, CSKY_GCC_SYM (fixunsdfsi
));
7241 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, CSKY_GCC_SYM (fixunssfdi
));
7242 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, CSKY_GCC_SYM (fixunsdfdi
));
7243 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, CSKY_GCC_SYM (floatsisf
));
7244 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, CSKY_GCC_SYM (floatsidf
));
7245 set_conv_libfunc (sfloat_optab
, SFmode
, DImode
, CSKY_GCC_SYM (floatdisf
));
7246 set_conv_libfunc (sfloat_optab
, DFmode
, DImode
, CSKY_GCC_SYM (floatdidf
));
7247 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, CSKY_GCC_SYM (floatunsisf
));
7248 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, CSKY_GCC_SYM (floatunsidf
));
7249 set_conv_libfunc (ufloat_optab
, SFmode
, DImode
, CSKY_GCC_SYM (floatundisf
));
7250 set_conv_libfunc (ufloat_optab
, DFmode
, DImode
, CSKY_GCC_SYM (floatundidf
));
7252 /* Comparison functions */
7253 set_optab_libfunc (cmp_optab
, SFmode
, CSKY_GCC_SYM (cmpsf2
));
7254 set_optab_libfunc (cmp_optab
, DFmode
, CSKY_GCC_SYM (cmpdf2
));
7255 set_optab_libfunc (unord_optab
, SFmode
, CSKY_GCC_SYM (unordsf2
));
7256 set_optab_libfunc (unord_optab
, DFmode
, CSKY_GCC_SYM (unorddf2
));
7257 set_optab_libfunc (eq_optab
, SFmode
, CSKY_GCC_SYM (eqsf2
));
7258 set_optab_libfunc (eq_optab
, DFmode
, CSKY_GCC_SYM (eqdf2
));
7259 set_optab_libfunc (ne_optab
, SFmode
, CSKY_GCC_SYM (nesf2
));
7260 set_optab_libfunc (ne_optab
, DFmode
, CSKY_GCC_SYM (nedf2
));
7261 set_optab_libfunc (ge_optab
, SFmode
, CSKY_GCC_SYM (gesf2
));
7262 set_optab_libfunc (ge_optab
, DFmode
, CSKY_GCC_SYM (gedf2
));
7263 set_optab_libfunc (lt_optab
, SFmode
, CSKY_GCC_SYM (ltsf2
));
7264 set_optab_libfunc (lt_optab
, DFmode
, CSKY_GCC_SYM (ltdf2
));
7265 set_optab_libfunc (le_optab
, SFmode
, CSKY_GCC_SYM (lesf2
));
7266 set_optab_libfunc (le_optab
, DFmode
, CSKY_GCC_SYM (ledf2
));
7267 set_optab_libfunc (gt_optab
, SFmode
, CSKY_GCC_SYM (gtsf2
));
7268 set_optab_libfunc (gt_optab
, DFmode
, CSKY_GCC_SYM (gtdf2
));
7272 /* Implement TARGET_ADDRESS_COST to estimate cost of the memory address X.
7273 For C-SKY, (register) and (register + offset) have the same cost.
7274 Other situations cost more. */
7277 csky_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
7278 addr_space_t as ATTRIBUTE_UNUSED
,
7279 bool speed ATTRIBUTE_UNUSED
)
7281 enum rtx_code code
= GET_CODE (x
);
7284 return COSTS_N_INSNS (1);
7286 && REG_P (XEXP (x
, 0))
7287 && CONST_INT_P (XEXP (x
, 1)))
7288 return COSTS_N_INSNS (1);
7290 return COSTS_N_INSNS (3);
7294 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
7297 csky_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
7299 *p1
= CSKY_CC_REGNUM
;
7300 *p2
= INVALID_REGNUM
;
7305 csky_init_cumulative_args (CUMULATIVE_ARGS
*pcum
, tree fntype
,
7306 rtx libname ATTRIBUTE_UNUSED
,
7307 tree fndecl ATTRIBUTE_UNUSED
)
7309 memset(pcum
, 0, sizeof(*pcum
));
7310 if (stdarg_p (fntype
))
7311 pcum
->is_stdarg
= true;
7315 /* Implement the TARGET_INIT_BUILTINS target macro. */
7318 csky_init_builtins (void)
7321 static tree csky_floatHF_type_node
= make_node (REAL_TYPE
);
7322 TYPE_PRECISION (csky_floatHF_type_node
) = GET_MODE_PRECISION (HFmode
);
7323 layout_type (csky_floatHF_type_node
);
7324 (*lang_hooks
.types
.register_builtin_type
) (csky_floatHF_type_node
, "__fp16");
7328 /* Implement TARGET_MANGLE_TYPE. */
7331 csky_mangle_type (const_tree type
)
7333 if (TYPE_NAME (type
) && TREE_CODE (TYPE_NAME (type
)) == TYPE_DECL
7334 && DECL_NAME (TYPE_NAME (type
))
7335 && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type
))), "__fp16"))
7338 /* Use the default mangling. */
7342 struct gcc_target targetm
= TARGET_INITIALIZER
;
7344 #include "gt-csky.h"