]> gcc.gnu.org Git - gcc.git/blob - gcc/config/csky/csky.c
C-SKY: Support fldrd/fstrd for fpuv2 and fldr.64/fstr.64 for fpuv3.
[gcc.git] / gcc / config / csky / csky.c
1 /* GCC backend functions for C-SKY targets.
2 Copyright (C) 2018-2021 Free Software Foundation, Inc.
3 Contributed by C-SKY Microsystems and Mentor Graphics.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "cfghooks.h"
32 #include "df.h"
33 #include "tm_p.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "emit-rtl.h"
39 #include "recog.h"
40 #include "cgraph.h"
41 #include "c-family/c-common.h"
42 #include "cpplib.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "flags.h"
52 #include "reload.h"
53 #include "explow.h"
54 #include "expr.h"
55 #include "cfgrtl.h"
56 #include "sched-int.h"
57 #include "common/common-target.h"
58 #include "langhooks.h"
59 #include "intl.h"
60 #include "libfuncs.h"
61 #include "opts.h"
62 #include "dumpfile.h"
63 #include "target-globals.h"
64 #include "builtins.h"
65 #include "tm-constrs.h"
66 #include "rtl-iter.h"
67 #include "pass_manager.h"
68 #include "tree-pass.h"
69 #include "context.h"
70
71 /* This file should be included last. */
72 #include "target-def.h"
73
74 /* Stack and register size macros. */
75
76 #define CSKY_NUM_WORDS(SIZE) \
77 (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
78 #define CSKY_NUM_REGS(MODE) \
79 CSKY_NUM_WORDS (GET_MODE_SIZE (MODE))
80 #define CSKY_STACK_ALIGN(SIZE) \
81 (CSKY_NUM_WORDS (SIZE) * UNITS_PER_WORD)
82
83 /* Offsets and range macros. */
84
85 #define CSKY_LD16_MAX_OFFSET(MODE) \
86 (31 * GET_MODE_SIZE (MODE))
87 #define CSKY_LD32_MAX_OFFSET(MODE) \
88 (4095 * GET_MODE_SIZE (MODE))
89 #define CSKY_LD16_OFFSET_MASK(MODE) \
90 (CSKY_LD16_MAX_OFFSET (MODE) + GET_MODE_SIZE (MODE) - 1)
91
92 #define CSKY_ADDI16_MAX_IMM 256
93 #define CSKY_SUBI16_MAX_IMM 256
94
95 #define CSKY_CONSTPOOL_LABEL_PREFIX "LCP"
96
97 /* Array of the smallest class containing reg number REGNO, indexed by
98 REGNO. Used by REGNO_REG_CLASS. */
99 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
100 {
101 /* Registers r0-r7. */
102 MINI_REGS, MINI_REGS, MINI_REGS, MINI_REGS,
103 MINI_REGS, MINI_REGS, MINI_REGS, MINI_REGS,
104 /* Registers r8-r15. */
105 LOW_REGS, LOW_REGS, LOW_REGS, LOW_REGS,
106 LOW_REGS, LOW_REGS, SP_REGS, LOW_REGS,
107 /* Registers r16-r31. */
108 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
109 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
110 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
111 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
112 /* Reserved. */
113 RESERVE_REGS,
114 /* CC,HI,LO registers. */
115 C_REGS, HILO_REGS, HILO_REGS,
116 /* Reserved. */
117 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
118 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
119 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
120 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
121 /* Vec registers. */
122 V_REGS, V_REGS, V_REGS, V_REGS,
123 V_REGS, V_REGS, V_REGS, V_REGS,
124 V_REGS, V_REGS, V_REGS, V_REGS,
125 V_REGS, V_REGS, V_REGS, V_REGS,
126 /* Reserved. */
127 RESERVE_REGS, RESERVE_REGS,
128 /* Register epc. */
129 OTHER_REGS,
130 /* Vec registers. */
131 V_REGS, V_REGS, V_REGS, V_REGS,
132 V_REGS, V_REGS, V_REGS, V_REGS,
133 V_REGS, V_REGS, V_REGS, V_REGS,
134 V_REGS, V_REGS, V_REGS, V_REGS,
135 /* Reserved. */
136 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
137 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
138 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
139 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
140 /* Reserved. */
141 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
142 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
143 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
144 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
145 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
146 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
147 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
148 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
149
150 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
151 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
152 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
153 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
154 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
155 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
156 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
157 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
158
159 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
160 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
161 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
162 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
163 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
164 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
165 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
166 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
167
168 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS
169 };
170
171 /* Arrays that map GCC register numbers to debugger register numbers,
172 '-1' means that is INVALID_REGNUM.
173 TODO: which rules according to here ? */
174 const int csky_dbx_regno[FIRST_PSEUDO_REGISTER] =
175 {
176 0, 1, 2, 3, 4, 5, 6, 7,
177 8, 9, 10, 11, 12, 13, 14, 15,
178 16, 17, 18, 19, 20, 21, 22, 23,
179 24, 25, 26, 27, 28, 29, 30, 31,
180 -1, -1, 36, 37,
181 75, 79, 83, 87, 91, 95, 99, 103,
182 107, 111, 115, 119, 123, 127, 131, 135,
183 74, 78, 82, 86, 90, 94, 98, 102,
184 106, 110, 114, 118, 122, 126, 130, 134,
185 -1, -1, 72,
186 /* vr: 71 - 86 */
187 139, 143, 147, 151, 155, 159, 163, 167,
188 171, 175, 179, 183, 187, 191, 195, 199,
189 138, 142, 146, 150, 154, 158, 162, 166,
190 170, 174, 178, 182, 186, 190, 194, 198,
191 /* resereved */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1,
196
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1,
201
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206
207 -1, -1, -1
208 };
209
210 /* Table of machine attributes. */
211 static tree csky_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
212 static tree csky_handle_isr_attribute (tree *, tree, tree, int, bool *);
213 static const struct attribute_spec csky_attribute_table[] =
214 {
215 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
216 affects_type_identity, handler, exclude } */
217 { "naked", 0, 0, true, false, false, false, csky_handle_fndecl_attribute, NULL },
218 /* Interrupt Service Routines have special prologue and epilogue requirements. */
219 { "interrupt", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL },
220 { "isr", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL },
221 { NULL, 0, 0, false, false, false, false, NULL, NULL }
222 };
223
224 /* A C structure for machine-specific, per-function data.
225 This is added to the cfun structure. */
226 typedef struct GTY(()) machine_function
227 {
228 /* Records if LR has to be saved for far jumps. */
229 int far_jump_used;
230 /* Records the type of the current function. */
231 unsigned long func_type;
232 /* Record if the function has a variable argument list. */
233 int uses_anonymous_args;
234
235 /* Stack frame layout information. If frame_init_p is true,
236 these fields have been initialized and don't need to be
237 recomputed. */
238 unsigned int reg_mask; /* non-volatile reg saves */
239 int arg_size; /* stdarg spills (bytes) */
240 int reg_size; /* non-volatile reg saves (bytes) */
241 int local_size; /* locals */
242 int outbound_size; /* arg overflow on calls out */
243 int frame_size; /* total static size of stack frame */
244 int local_offset;
245 int reg_offset;
246 int arg_offset;
247 int frame_init_p;
248
249 } machine_function;
250
251 /* These macros are for the func_type values above. */
252 #define CSKY_FT_TYPE_MASK ((1 << 3) - 1)
253 #define CSKY_FT_UNKNOWN 0 /* Type not been determined */
254 #define CSKY_FT_NORMAL 1 /* Normal function */
255 #define CSKY_FT_ISR 4 /* Interrupt service routine */
256 #define CSKY_FT_FIQ 5 /* Fast interrupt service routine */
257 #define CSKY_FT_EXCEPTION 6 /* Exception handler */
258 #define CSKY_FT_INTERRUPT (1 << 2) /* overlap CSKY_FT_ISR */
259 #define CSKY_FT_NAKED (1 << 3) /* No prologue and epilogue */
260 #define CSKY_FUNCTION_TYPE(t) ((t) & CSKY_FT_TYPE_MASK)
261 #define CSKY_FUNCTION_IS_INTERRUPT(t) ((t) & CSKY_FT_INTERRUPT)
262 #define CSKY_FUNCTION_IS_NAKED(t) ((t) & CSKY_FT_NAKED)
263
264 struct csky_processors
265 {
266 const char *const name;
267 enum csky_processor_type core;
268 const char *arch;
269 enum csky_base_architecture base_arch;
270 enum csky_isa_feature isa_bits[CSKY_ISA_FEATURE_GET (max)];
271 };
272
273 static struct csky_processors all_cores[] =
274 {
275 #undef CSKY_CORE
276 #define CSKY_CORE(NAME, CORE, X, ARCH, ISA) \
277 {NAME, TARGET_CPU_##CORE, #ARCH, CSKY_BASE_ARCH_##ARCH, \
278 {ISA CSKY_ISA_FEATURE_GET (none)}},
279 #include "csky_cores.def"
280 #undef CSKY_CORE
281 {NULL, TARGET_CPU_csky_none, NULL, CSKY_BASE_ARCH_NONE, \
282 {CSKY_ISA_FEATURE_GET (none)}}
283 };
284
285 static struct csky_processors all_architectures[] =
286 {
287 #undef CSKY_ARCH
288 #define CSKY_ARCH(NAME, CORE, ARCH, ISA) \
289 {NAME, TARGET_CPU_##CORE, #ARCH, CSKY_BASE_ARCH_##ARCH, \
290 {ISA CSKY_ISA_FEATURE_GET (none)}},
291 #include "csky_cores.def"
292 #undef CSKY_ARCH
293 {NULL, TARGET_CPU_csky_none, NULL, CSKY_BASE_ARCH_NONE, \
294 {CSKY_ISA_FEATURE_GET (none)}}
295 };
296
297 struct csky_fpu_desc
298 {
299 const char *name;
300 enum csky_isa_feature isa_bits[CSKY_ISA_FEATURE_GET (max)];
301 };
302
303 static const struct csky_fpu_desc all_fpus[] =
304 {
305 #undef CSKY_FPU
306 #define CSKY_FPU(NAME, CNAME, ISA) \
307 {NAME, {ISA CSKY_ISA_FEATURE_GET (none)}},
308 #include "csky_cores.def"
309 #undef CSKY_FPU
310 };
311
312 /* Active target architecture. */
313 struct csky_build_target
314 {
315 /* Name of the target CPU, if known, or NULL if the target CPU was not
316 specified by the user (and inferred from the -march option). */
317 const char *core_name;
318 /* Name of the target ARCH. NULL if there is a selected CPU. */
319 const char *arch_name;
320 /* Preprocessor substring (never NULL). */
321 const char *arch_pp_name;
322 /* CPU identifier for the core we're compiling for (architecturally). */
323 enum csky_processor_type arch_core;
324 /* The base architecture value. */
325 enum csky_base_architecture base_arch;
326 /* Bitmap encapsulating the isa_bits for the target environment. */
327 sbitmap isa;
328 };
329
330 struct csky_build_target csky_active_target;
331
332 /* The following are used in the .md file as equivalents to bits. */
333 int csky_arch_isa_features[CSKY_ISA_FEATURE_GET (max)] = {0};
334
335 /* The highest CSKY architecture version supported by the target. */
336 enum csky_base_architecture csky_base_arch = CSKY_TARGET_ARCH_GET (NONE);
337
338 /* Forward definitions of types. */
339 typedef struct minipool_node Mnode;
340 typedef struct minipool_fixup Mfix;
341
342 static GTY(()) int tls_labelno;
343
344
345 /* Maximum constant offset that can be added/subtracted from SP in a
346 single instruction. For ck801, this is for addsp/subsp, otherwise
347 it is the range of addi/subi. */
348 #define CSKY_MAX_SP_ADJUST \
349 (CSKY_TARGET_ARCH (CK801) ? 508 : 4096)
350
351
352 /* Implement TARGET_CPU_CPP_BUILTINS. */
353
354 #define builtin_define(MACRO) cpp_define (pfile, MACRO)
355
356 void
357 csky_cpu_cpp_builtins (cpp_reader *pfile)
358 {
359 const char *arch_name = csky_active_target.arch_pp_name;
360 char *pp_name = (char *) alloca (1 + strlen (arch_name) + 4);
361 sprintf (pp_name, "__%s__", arch_name);
362 builtin_define (pp_name);
363
364 builtin_define ("__csky__=2");
365 builtin_define ("__CSKY__=2");
366 builtin_define ("__ckcore__=2");
367 builtin_define ("__CKCORE__=2");
368
369 builtin_define ("__CSKYABIV2__");
370 builtin_define ("__cskyabiv2__");
371 builtin_define ("__CSKYABI__=2");
372 builtin_define ("__cskyabi__=2");
373
374 if (TARGET_BIG_ENDIAN)
375 {
376 builtin_define ("__ckcoreBE__");
377 builtin_define ("__cskyBE__");
378 builtin_define ("__cskybe__");
379 builtin_define ("__CSKYBE__");
380 }
381 else
382 {
383 builtin_define ("__ckcoreLE__");
384 builtin_define ("__cskyLE__");
385 builtin_define ("__cskyle__");
386 builtin_define ("__CSKYLE__");
387 }
388
389 if (TARGET_HARD_FLOAT)
390 {
391 builtin_define ("__csky_hard_float__");
392 builtin_define ("__CSKY_HARD_FLOAT__");
393 if (TARGET_HARD_FLOAT_ABI)
394 {
395 builtin_define ("__csky_hard_float_abi__");
396 builtin_define ("__CSKY_HARD_FLOAT_ABI__");
397 }
398 if (TARGET_SINGLE_FPU)
399 {
400 builtin_define ("__csky_hard_float_fpu_sf__");
401 builtin_define ("__CSKY_HARD_FLOAT_FPU_SF__");
402 }
403 }
404 else
405 {
406 builtin_define ("__csky_soft_float__");
407 builtin_define ("__CSKY_SOFT_FLOAT__");
408 }
409
410 if (CSKY_ISA_FEATURE (fpv2_sf))
411 {
412 builtin_define ("__csky_fpuv2__");
413 builtin_define ("__CSKY_FPUV2__");
414 }
415
416 if (TARGET_SUPPORT_FPV3)
417 {
418 builtin_define ("__csky_fpuv3__");
419 builtin_define ("__CSKY_FPUV3__");
420 }
421
422 if (TARGET_ELRW)
423 {
424 builtin_define ("__csky_elrw__");
425 builtin_define ("__CSKY_ELRW__");
426 }
427 if (TARGET_ISTACK)
428 {
429 builtin_define ("__csky_istack__");
430 builtin_define ("__CSKY_ISTACK__");
431 }
432 if (TARGET_MP)
433 {
434 builtin_define ("__csky_mp__");
435 builtin_define ("__CSKY_MP__");
436 }
437 if (TARGET_CP)
438 {
439 builtin_define ("__csky_cp__");
440 builtin_define ("__CSKY_CP__");
441 }
442 if (TARGET_CACHE)
443 {
444 builtin_define ("__csky_cache__");
445 builtin_define ("__CSKY_CACHE__");
446 }
447 if (TARGET_SECURITY)
448 {
449 builtin_define ("__csky_security__");
450 builtin_define ("__CSKY_SECURITY__");
451 }
452 if (TARGET_TRUST)
453 {
454 builtin_define ("__csky_trust__");
455 builtin_define ("__CSKY_TRUST__");
456 }
457 if (TARGET_DSP)
458 {
459 builtin_define ("__csky_dsp__");
460 builtin_define ("__CSKY_DSP__");
461 }
462 if (TARGET_EDSP)
463 {
464 builtin_define ("__csky_edsp__");
465 builtin_define ("__CSKY_EDSP__");
466 }
467 if (TARGET_VDSP)
468 {
469 builtin_define ("__csky_vdsp__");
470 builtin_define ("__CSKY_VDSP__");
471 }
472 }
473
474
475 /******************************************************************
476 * Storage Layout *
477 ******************************************************************/
478
479 #undef TARGET_PROMOTE_FUNCTION_MODE
480 #define TARGET_PROMOTE_FUNCTION_MODE \
481 default_promote_function_mode_always_promote
482
483 #undef TARGET_CONSTANT_ALIGNMENT
484 #define TARGET_CONSTANT_ALIGNMENT csky_constant_alignment
485
486 #undef TARGET_MANGLE_TYPE
487 #define TARGET_MANGLE_TYPE csky_mangle_type
488
489
490 /******************************************************************
491 * Stack Layout and Calling Conventions *
492 ******************************************************************/
493
494 #undef TARGET_CAN_ELIMINATE
495 #define TARGET_CAN_ELIMINATE csky_can_eliminate
496
497 #undef TARGET_FUNCTION_ARG
498 #define TARGET_FUNCTION_ARG csky_function_arg
499
500 #undef TARGET_FUNCTION_ARG_ADVANCE
501 #define TARGET_FUNCTION_ARG_ADVANCE csky_function_arg_advance
502
503 #undef TARGET_FUNCTION_VALUE
504 #define TARGET_FUNCTION_VALUE csky_function_value
505
506 #undef TARGET_LIBCALL_VALUE
507 #define TARGET_LIBCALL_VALUE csky_libcall_value
508
509 #undef TARGET_FUNCTION_VALUE_REGNO_P
510 #define TARGET_FUNCTION_VALUE_REGNO_P csky_function_value_regno_p
511
512 #undef TARGET_SPLIT_COMPLEX_ARG
513 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
514
515 #undef TARGET_MUST_PASS_IN_STACK
516 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
517
518 #undef TARGET_ARG_PARTIAL_BYTES
519 #define TARGET_ARG_PARTIAL_BYTES csky_arg_partial_bytes
520
521 #undef TARGET_PASS_BY_REFERENCE
522 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
523
524 #undef TARGET_ASM_OUTPUT_MI_THUNK
525 #define TARGET_ASM_OUTPUT_MI_THUNK csky_output_mi_thunk
526
527 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
528 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
529 hook_bool_const_tree_hwi_hwi_const_tree_true
530
531 #undef TARGET_ASM_FUNCTION_PROLOGUE
532 #define TARGET_ASM_FUNCTION_PROLOGUE csky_output_function_prologue
533
534 #undef TARGET_ASM_FUNCTION_EPILOGUE
535 #define TARGET_ASM_FUNCTION_EPILOGUE csky_output_function_epilogue
536
537 #undef TARGET_WARN_FUNC_RETURN
538 #define TARGET_WARN_FUNC_RETURN csky_warn_func_return
539
540 #undef TARGET_RETURN_IN_MEMORY
541 #define TARGET_RETURN_IN_MEMORY csky_return_in_memory
542
543
544 /******************************************************************
545 * Implementing the Varargs Macros *
546 ******************************************************************/
547
548
549 #undef TARGET_SETUP_INCOMING_VARARGS
550 #define TARGET_SETUP_INCOMING_VARARGS csky_setup_incoming_varargs
551
552
553 /******************************************************************
554 * Implicit Calls to Library Routines *
555 ******************************************************************/
556
557
558 #undef TARGET_INIT_LIBFUNCS
559 #define TARGET_INIT_LIBFUNCS csky_init_libfuncs
560
561
562 /******************************************************************
563 * Dividing the Output into Sections (Texts, Data, . . . ) *
564 ******************************************************************/
565
566
567 #undef TARGET_HAVE_TLS
568 #define TARGET_HAVE_TLS TARGET_CSKY_LINUX
569
570
571 /******************************************************************
572 * Defining target-specific uses of __attribute__ *
573 ******************************************************************/
574
575
576 #undef TARGET_ATTRIBUTE_TABLE
577 #define TARGET_ATTRIBUTE_TABLE csky_attribute_table
578
579 #undef TARGET_OPTION_OVERRIDE
580 #define TARGET_OPTION_OVERRIDE csky_option_override
581
582
583 /* Implement the BRANCH_COST target macro. */
584
585 int
586 csky_default_branch_cost (bool speed_p ATTRIBUTE_UNUSED,
587 bool predictable_p ATTRIBUTE_UNUSED)
588 {
589 return csky_branch_cost;
590 }
591
592 bool
593 csky_default_logical_op_non_short_circuit (void)
594 {
595 return BRANCH_COST (optimize_function_for_speed_p (cfun), false) >= 2;
596 }
597
598 /******************************************************************
599 * Register Usage *
600 ******************************************************************/
601
602 #undef TARGET_HARD_REGNO_NREGS
603 #define TARGET_HARD_REGNO_NREGS csky_hard_regno_nregs
604
605 #undef TARGET_HARD_REGNO_MODE_OK
606 #define TARGET_HARD_REGNO_MODE_OK csky_hard_regno_mode_ok
607
608 #undef TARGET_MODES_TIEABLE_P
609 #define TARGET_MODES_TIEABLE_P csky_modes_tieable_p
610
611 #undef TARGET_CAN_CHANGE_MODE_CLASS
612 #define TARGET_CAN_CHANGE_MODE_CLASS csky_can_change_mode_class
613
614 #undef TARGET_CONDITIONAL_REGISTER_USAGE
615 #define TARGET_CONDITIONAL_REGISTER_USAGE csky_conditional_register_usage
616
617 #undef TARGET_CLASS_LIKELY_SPILLED_P
618 #define TARGET_CLASS_LIKELY_SPILLED_P csky_class_likely_spilled_p
619
620 #undef TARGET_PREFERRED_RELOAD_CLASS
621 #define TARGET_PREFERRED_RELOAD_CLASS csky_preferred_reload_class
622
623 #undef TARGET_CLASS_MAX_NREGS
624 #define TARGET_CLASS_MAX_NREGS csky_class_max_nregs
625
626 #undef TARGET_SECONDARY_RELOAD
627 #define TARGET_SECONDARY_RELOAD csky_secondary_reload
628
629 #undef TARGET_SPILL_CLASS
630 #define TARGET_SPILL_CLASS csky_spill_class
631
632
633 /******************************************************************
634 * Addressing Modes *
635 ******************************************************************/
636
637
638 #undef TARGET_CANNOT_FORCE_CONST_MEM
639 #define TARGET_CANNOT_FORCE_CONST_MEM csky_cannot_force_const_mem
640
641 #undef TARGET_LEGITIMATE_CONSTANT_P
642 #define TARGET_LEGITIMATE_CONSTANT_P csky_legitimate_constant_p
643
644 #undef TARGET_LEGITIMIZE_ADDRESS
645 #define TARGET_LEGITIMIZE_ADDRESS csky_legitimize_address
646
647 #undef TARGET_LEGITIMATE_ADDRESS_P
648 #define TARGET_LEGITIMATE_ADDRESS_P csky_legitimate_address_p
649
650
651 /******************************************************************
652 * Others *
653 ******************************************************************/
654
655
656 #undef TARGET_CANNOT_COPY_INSN_P
657 #define TARGET_CANNOT_COPY_INSN_P csky_cannot_copy_insn_p
658
659
660 /******************************************************************
661 * Assembler Format *
662 ******************************************************************/
663
664
665 #undef TARGET_PRINT_OPERAND
666 #define TARGET_PRINT_OPERAND csky_print_operand
667
668 #undef TARGET_PRINT_OPERAND_ADDRESS
669 #define TARGET_PRINT_OPERAND_ADDRESS csky_print_operand_address
670
671 #undef TARGET_ASM_UNALIGNED_HI_OP
672 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
673
674 #undef TARGET_ASM_UNALIGNED_SI_OP
675 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
676
677 #undef TARGET_DWARF_REGISTER_SPAN
678 #define TARGET_DWARF_REGISTER_SPAN csky_dwarf_register_span
679
680
681 /******************************************************************
682 * Miscellaneous Parameters *
683 ******************************************************************/
684
685
686 #undef TARGET_MACHINE_DEPENDENT_REORG
687 #define TARGET_MACHINE_DEPENDENT_REORG csky_reorg
688
689 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
690 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS csky_allocate_stack_slots_for_args
691
692 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
693 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
694
695
696 /******************************************************************
697 * Trampolines for Nested Functions *
698 ******************************************************************/
699
700
701 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
702 #define TARGET_ASM_TRAMPOLINE_TEMPLATE csky_asm_trampoline_template
703 #undef TARGET_TRAMPOLINE_INIT
704 #define TARGET_TRAMPOLINE_INIT csky_trampoline_init
705
706 /* The low bit is ignored by jsr and jmp instructions so is safe to use. */
707 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
708 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
709
710 /******************************************************************
711 * Describing Relative Costs of Operations *
712 ******************************************************************/
713
714
715 #undef TARGET_REGISTER_MOVE_COST
716 #define TARGET_REGISTER_MOVE_COST csky_register_move_cost
717
718 #undef TARGET_MEMORY_MOVE_COST
719 #define TARGET_MEMORY_MOVE_COST csky_memory_move_cost
720
721 #undef TARGET_RTX_COSTS
722 #define TARGET_RTX_COSTS csky_rtx_costs
723
724 #undef TARGET_ADDRESS_COST
725 #define TARGET_ADDRESS_COST csky_address_cost
726
727
728 /******************************************************************
729 * Anchor address *
730 ******************************************************************/
731
732
733 /* FIXME: the max offset is related to mode size, the following is
734 defined according to SImode. How to deal with HImode and
735 QImode, and should the min offset be defined? */
736 #undef TARGET_MAX_ANCHOR_OFFSET
737 #define TARGET_MAX_ANCHOR_OFFSET \
738 ((TARGET_MINI_REGISTERS && optimize_size) ? 127 : 4095)
739
740
741 /******************************************************************
742 * Condition Code Status *
743 ******************************************************************/
744
745
746 #undef TARGET_FIXED_CONDITION_CODE_REGS
747 #define TARGET_FIXED_CONDITION_CODE_REGS csky_fixed_condition_code_regs
748
749
750 /******************************************************************
751 * Adjusting the Instruction Scheduler *
752 ******************************************************************/
753
754
755 #undef TARGET_SCHED_ISSUE_RATE
756 #define TARGET_SCHED_ISSUE_RATE csky_sched_issue_rate
757
758 #undef TARGET_SCHED_ADJUST_COST
759 #define TARGET_SCHED_ADJUST_COST csky_sched_adjust_cost
760
761
762 /******************************************************************
763 * Builtin *
764 ******************************************************************/
765
766
767 #undef TARGET_INIT_BUILTINS
768 #define TARGET_INIT_BUILTINS csky_init_builtins
769
770
771 /* The declaration of functions. */
772 static void push_csky_minipool_fix (rtx_insn *, HOST_WIDE_INT, rtx *,
773 machine_mode, rtx);
774 static void csky_print_operand (FILE *stream, rtx x, int code);
775
776
777 /* Define a table to map ISR attribute arguments onto function type
778 modifiers. */
779
780 typedef struct
781 {
782 const char *const arg;
783 const unsigned long return_value;
784 } isr_attribute_entry;
785
786 static const isr_attribute_entry isr_attribute_map[] =
787 {
788 {"irq", CSKY_FT_ISR },
789 {"IRQ", CSKY_FT_ISR },
790 {"fiq", CSKY_FT_FIQ },
791 {"FIQ", CSKY_FT_FIQ },
792 {NULL, CSKY_FT_NORMAL }
793 };
794
795
796 /* Return the function type of the current function, if it has not been
797 determined, return CSKY_FT_UNKNOWN. */
798
799 static unsigned long
800 get_csky_isr_type (tree argument)
801 {
802 const isr_attribute_entry *ptr;
803 const char *arg;
804
805 /* if argument is NULL, set default value ISR. */
806 if (argument == NULL_TREE)
807 return CSKY_FT_ISR;
808
809 if (TREE_VALUE (argument) == NULL_TREE
810 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
811 return CSKY_FT_UNKNOWN;
812
813 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
814
815 for (ptr = isr_attribute_map; ptr->arg != NULL; ptr++)
816 if (strcmp (arg, ptr->arg) == 0)
817 return ptr->return_value;
818
819 return CSKY_FT_UNKNOWN;
820 }
821
822 /* Classify cfun as a normal function or some sort of interrupt
823 handler, and set the corresponding bits in cfun->machine->func_type. */
824
825 static unsigned long
826 get_csky_current_func_type (void)
827 {
828 if (CSKY_FUNCTION_TYPE (cfun->machine->func_type) == CSKY_FT_UNKNOWN)
829 {
830 unsigned long type = CSKY_FT_UNKNOWN;
831 tree a;
832 tree attr;
833
834 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
835
836 attr = DECL_ATTRIBUTES (current_function_decl);
837 a = lookup_attribute ("naked", attr);
838 if (a != NULL_TREE)
839 type |= CSKY_FT_NAKED;
840 a = lookup_attribute ("isr", attr);
841 if (a == NULL_TREE)
842 a = lookup_attribute ("interrupt", attr);
843 if (a == NULL_TREE)
844 type |= CSKY_FT_NORMAL;
845 else
846 type |= get_csky_isr_type (TREE_VALUE (a));
847
848 cfun->machine->func_type = type;
849 }
850
851 return cfun->machine->func_type;
852 }
853
854 /* These typedefs are located at the start of this file, so that
855 they can be used in the prototypes there. This comment is to
856 remind readers of that fact so that the following structures
857 can be understood more easily.
858
859 typedef struct minipool_node Mnode;
860 typedef struct minipool_fixup Mfix; */
861
862 struct minipool_node
863 {
864 /* Doubly linked chain of entries. */
865 Mnode *next;
866 Mnode *prev;
867 /* The maximum offset into the code that this entry can be placed. While
868 pushing fixes for forward references, all entries are sorted in order
869 of increasing max_address. */
870 HOST_WIDE_INT max_address;
871 /* Similarly for an entry inserted for a backwards ref. */
872 HOST_WIDE_INT min_address;
873 /* The number of fixes referencing this entry. This can become zero
874 if we "unpush" an entry. In this case we ignore the entry when we
875 come to emit the code. */
876 int refcount;
877 /* The offset from the start of the minipool. */
878 HOST_WIDE_INT offset;
879 /* The value in table. */
880 rtx value;
881 /* The mode of value. */
882 machine_mode mode;
883 /* The size of the value. */
884 int fix_size;
885 };
886
887 struct minipool_fixup
888 {
889 Mfix *next;
890 rtx_insn *insn;
891 HOST_WIDE_INT address;
892 rtx *loc;
893 machine_mode mode;
894 int fix_size;
895 rtx value;
896 Mnode *minipool;
897 HOST_WIDE_INT forwards;
898 HOST_WIDE_INT backwards;
899 };
900
901 static Mnode *minipool_vector_head;
902 static Mnode *minipool_vector_tail;
903 static rtx minipool_vector_label;
904 static HOST_WIDE_INT constpool_label_no = 0;
905
906 /* Obstack for minipool constant handling. */
907 static struct obstack minipool_obstack;
908 static char *minipool_startobj;
909 /* The linked list of all minipool fixes required for this function. */
910 Mfix *minipool_fix_head;
911 Mfix *minipool_fix_tail;
912 /* The fix entry for the current minipool, once it has been placed. */
913 Mfix *minipool_barrier;
914
915 /* Allow GC scanning of the minipool obstack. */
916
917 static void
918 csky_add_gc_roots (void)
919 {
920 gcc_obstack_init (&minipool_obstack);
921 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
922 }
923
924 /* Implement TARGET_CONSTANT_ALIGNMENT.
925 Make strings word-aligned so strcpy from constants will be faster. */
926
927 static HOST_WIDE_INT
928 csky_constant_alignment (const_tree exp, HOST_WIDE_INT align)
929 {
930 if (TREE_CODE (exp) == STRING_CST
931 && !optimize_size
932 && align < BITS_PER_WORD)
933 return BITS_PER_WORD;
934 return align;
935 }
936
937 /* Record that there is a natural barrier in the insn stream at
938 ADDRESS. */
939
940 static void
941 push_csky_minipool_barrier (rtx_insn *insn, HOST_WIDE_INT address)
942 {
943 Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
944
945 fix->insn = insn;
946 fix->address = address;
947
948 fix->next = NULL;
949 if (minipool_fix_head != NULL)
950 minipool_fix_tail->next = fix;
951 else
952 minipool_fix_head = fix;
953
954 minipool_fix_tail = fix;
955 }
956
957 /* Compute the size of a vector jump table. */
958
959 static HOST_WIDE_INT
960 get_csky_jump_table_size (rtx insn)
961 {
962 /* ADDR_VECs only take room if read-only data does into the text
963 section. */
964 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
965 {
966 rtx body = PATTERN (insn);
967 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
968 HOST_WIDE_INT size;
969 HOST_WIDE_INT modesize;
970
971 modesize = GET_MODE_SIZE (GET_MODE (body));
972 size = modesize * XVECLEN (body, elt);
973 switch (modesize)
974 {
975 case 1:
976 /* Round up size of TBB table to a halfword boundary. */
977 size = (size + 1) & ~(HOST_WIDE_INT)1;
978 break;
979 case 2:
980 /* No padding necessary for TBH. */
981 break;
982 case 4:
983 break;
984 default:
985 gcc_unreachable ();
986 }
987 return size;
988 }
989
990 return 0;
991 }
992
993
994 /* Scan INSN and note any of its operands that need fixing.
995 If DO_PUSHES is false we do not actually push any of the fixups
996 needed. The function returns TRUE if any fixups were needed/pushed. */
997
998 static bool
999 note_csky_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address,
1000 int do_pushes)
1001 {
1002 bool result = false;
1003 int opno;
1004
1005 extract_constrain_insn (insn);
1006
1007 if (recog_data.n_alternatives == 0)
1008 return false;
1009
1010 /* Fill in recog_op_alt with information about the constraints of
1011 this insn. */
1012 preprocess_constraints (insn);
1013
1014 const operand_alternative *op_alt = which_op_alt ();
1015 for (opno = 0; opno < recog_data.n_operands; opno++)
1016 {
1017 /* Things we need to fix can only occur in inputs. */
1018 if (recog_data.operand_type[opno] != OP_IN)
1019 continue;
1020
1021 /* If this alternative is a memory reference, then any mention
1022 of constants in this alternative is really to fool reload
1023 into allowing us to accept one there. We need to fix them up
1024 now so that we output the right code. */
1025 if (op_alt[opno].memory_ok)
1026 {
1027 rtx op = recog_data.operand[opno];
1028
1029 if (CONSTANT_P (op))
1030 {
1031 if (do_pushes)
1032 push_csky_minipool_fix (insn, address,
1033 recog_data.operand_loc[opno],
1034 recog_data.operand_mode[opno], op);
1035 result = true;
1036 }
1037 }
1038 }
1039
1040 return result;
1041 }
1042
1043
1044 /* Add a constant to the minipool for a forward reference. Returns the
1045 node added or NULL if the constant will not fit in this pool. */
1046
1047 static Mnode *
1048 add_csky_minipool_forward_ref (Mfix *fix)
1049 {
1050 /* If set, max_mp is the first pool_entry that has a lower
1051 constraint than the one we are trying to add. */
1052 Mnode *max_mp = NULL;
1053 HOST_WIDE_INT max_address = fix->address + fix->forwards;
1054 Mnode *mp;
1055
1056 /* If the minipool starts before the end of FIX->INSN then this FIX
1057 cannot be placed into the current pool. Furthermore, adding the
1058 new constant pool entry may cause the pool to start FIX_SIZE bytes
1059 earlier. */
1060 if (minipool_vector_head
1061 && (fix->address + get_attr_length (fix->insn)
1062 >= minipool_vector_head->max_address - fix->fix_size))
1063 return NULL;
1064
1065 /* Scan the pool to see if a constant with the same value has
1066 already been added. While we are doing this, also note the
1067 location where we must insert the constant if it doesn't already
1068 exist. */
1069 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
1070 {
1071 if (GET_CODE (fix->value) == GET_CODE (mp->value)
1072 && fix->mode == mp->mode
1073 && (GET_CODE (fix->value) != CODE_LABEL
1074 || (CODE_LABEL_NUMBER (fix->value)
1075 == CODE_LABEL_NUMBER (mp->value)))
1076 && rtx_equal_p (fix->value, mp->value))
1077 {
1078 /* More than one fix references this entry. */
1079 mp->refcount++;
1080 return mp;
1081 }
1082
1083 /* Note the insertion point if necessary. */
1084 if (max_mp == NULL && mp->max_address > max_address)
1085 max_mp = mp;
1086 }
1087
1088 /* The value is not currently in the minipool, so we need to create
1089 a new entry for it. If MAX_MP is NULL, the entry will be put on
1090 the end of the list since the placement is less constrained than
1091 any existing entry. Otherwise, we insert the new fix before
1092 MAX_MP and, if necessary, adjust the constraints on the other
1093 entries. */
1094 mp = XNEW (Mnode);
1095 mp->fix_size = fix->fix_size;
1096 mp->mode = fix->mode;
1097 mp->value = fix->value;
1098 mp->refcount = 1;
1099 /* Not yet required for a backwards ref. */
1100 mp->min_address = -65536;
1101
1102 if (max_mp == NULL)
1103 {
1104 mp->max_address = max_address;
1105 mp->next = NULL;
1106 mp->prev = minipool_vector_tail;
1107
1108 if (mp->prev == NULL)
1109 {
1110 minipool_vector_head = mp;
1111 minipool_vector_label
1112 = gen_csky_constpool_label (gen_rtx_CONST_INT (VOIDmode,
1113 constpool_label_no++));
1114 }
1115 else
1116 mp->prev->next = mp;
1117
1118 minipool_vector_tail = mp;
1119 }
1120 else
1121 {
1122 if (max_address > max_mp->max_address - mp->fix_size)
1123 mp->max_address = max_mp->max_address - mp->fix_size;
1124 else
1125 mp->max_address = max_address;
1126
1127 mp->next = max_mp;
1128 mp->prev = max_mp->prev;
1129 max_mp->prev = mp;
1130 if (mp->prev != NULL)
1131 mp->prev->next = mp;
1132 else
1133 minipool_vector_head = mp;
1134 }
1135
1136 /* Save the new entry. */
1137 max_mp = mp;
1138
1139 /* Scan over the preceding entries and adjust their addresses as
1140 required. */
1141 while (mp->prev != NULL
1142 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
1143 {
1144 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
1145 mp = mp->prev;
1146 }
1147
1148 return max_mp;
1149 }
1150
1151
1152 /* Return the cost of forcibly inserting a barrier after INSN. */
1153
1154 static int
1155 get_csky_barrier_cost (rtx_insn *insn)
1156 {
1157 /* Basing the location of the pool on the loop depth is preferable,
1158 but at the moment, the basic block information seems to be
1159 corrupt by this stage of the compilation. */
1160 int base_cost = 50;
1161 rtx next = next_nonnote_insn (insn);
1162
1163 if (next != NULL && GET_CODE (next) == CODE_LABEL)
1164 base_cost -= 20;
1165
1166 switch (GET_CODE (insn))
1167 {
1168 case CODE_LABEL:
1169 /* It will always be better to place the table before the label, rather
1170 than after it. */
1171 return 50;
1172
1173 case INSN:
1174 case CALL_INSN:
1175 return base_cost;
1176
1177 case JUMP_INSN:
1178 return base_cost - 10;
1179
1180 default:
1181 return base_cost + 10;
1182 }
1183 }
1184
1185
1186 /* Find the best place in the insn stream in the range
1187 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
1188 Create the barrier by inserting a jump and add a new fix entry for
1189 it. */
1190
1191 static Mfix *
1192 create_csky_fix_barrier (Mfix *fix, Mfix *fix_next,
1193 HOST_WIDE_INT max_address)
1194 {
1195 rtx_barrier *barrier;
1196 rtx_insn *from = (fix ? fix->insn : get_insns ());
1197 /* The instruction after which we will insert the jump. */
1198 rtx_insn *selected = NULL;
1199 int selected_cost;
1200 /* The address at which the jump instruction will be placed. */
1201 HOST_WIDE_INT selected_address = 0;
1202 Mfix *new_fix;
1203 HOST_WIDE_INT count = (fix ? fix->address : 0);
1204 HOST_WIDE_INT max_count = max_address;
1205 rtx_code_label *label = gen_label_rtx ();
1206
1207 selected_cost = get_csky_barrier_cost (from);
1208
1209 while (from && count < max_count)
1210 {
1211 int new_cost;
1212 rtx_jump_table_data *table;
1213
1214 /* Count the length of this insn. */
1215 count += get_attr_length (from);
1216
1217 /* If there is a jump table, add its length. */
1218 if (tablejump_p (from, NULL, &table))
1219 {
1220 count += get_csky_jump_table_size (table);
1221
1222 /* Jump tables aren't in a basic block, so base the cost on
1223 the dispatch insn. If we select this location, we will
1224 still put the pool after the table. */
1225 new_cost = get_csky_barrier_cost (from);
1226
1227 if (count < max_count
1228 && (!selected || new_cost <= selected_cost))
1229 {
1230 selected = table;
1231 selected_cost = new_cost;
1232 selected_address = count;
1233 }
1234
1235 /* Continue after the dispatch table. */
1236 from = NEXT_INSN (table);
1237 continue;
1238 }
1239
1240 new_cost = get_csky_barrier_cost (from);
1241
1242 if (count < max_count
1243 && (!selected || new_cost <= selected_cost))
1244 {
1245 selected = from;
1246 selected_cost = new_cost;
1247 selected_address = count;
1248 }
1249
1250 from = NEXT_INSN (from);
1251 }
1252
1253 /* Make sure that we found a place to insert the jump. */
1254 gcc_assert (selected);
1255
1256 /* Create a new JUMP_INSN that branches around a barrier. */
1257 from = emit_jump_insn_after (gen_jump (label), selected);
1258 JUMP_LABEL (from) = label;
1259 barrier = emit_barrier_after (from);
1260 emit_label_after (label, barrier);
1261
1262 /* Create a minipool barrier entry for the new barrier. */
1263 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
1264 new_fix->insn = barrier;
1265 new_fix->address = selected_address;
1266 if (fix)
1267 {
1268 new_fix->next = fix->next;
1269 fix->next = new_fix;
1270 }
1271 else
1272 new_fix->next = fix_next;
1273
1274 return new_fix;
1275 }
1276
1277
1278 /* Print a symbolic form of the constant X to the dump file F.
1279 This is used for dump output for -mconstpool in the target-dependent
1280 reorg pass. */
1281
1282 static void
1283 print_csky_value (FILE *f, rtx x)
1284 {
1285 switch (GET_CODE (x))
1286 {
1287 case CONST_INT:
1288 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
1289 return;
1290
1291 case CONST_DOUBLE:
1292 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
1293 return;
1294
1295 case CONST_VECTOR:
1296 {
1297 int i;
1298
1299 fprintf (f, "<");
1300 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
1301 {
1302 fprintf (f, HOST_WIDE_INT_PRINT_HEX,
1303 INTVAL (CONST_VECTOR_ELT (x, i)));
1304 if (i < (CONST_VECTOR_NUNITS (x) - 1))
1305 fputc (',', f);
1306 }
1307 fprintf (f, ">");
1308 }
1309 return;
1310
1311 case CONST_STRING:
1312 fprintf (f, "\"%s\"", XSTR (x, 0));
1313 return;
1314
1315 case SYMBOL_REF:
1316 fprintf (f, "`%s'", XSTR (x, 0));
1317 return;
1318
1319 case LABEL_REF:
1320 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
1321 return;
1322
1323 case CONST:
1324 print_csky_value (f, XEXP (x, 0));
1325 return;
1326
1327 case PLUS:
1328 print_csky_value (f, XEXP (x, 0));
1329 fprintf (f, "+");
1330 print_csky_value (f, XEXP (x, 1));
1331 return;
1332
1333 case PC:
1334 fprintf (f, "pc");
1335 return;
1336
1337 default:
1338 fprintf (f, "????");
1339 return;
1340 }
1341 }
1342
1343
1344 /* Record INSN, which will need fixing up to load a value from the
1345 minipool. ADDRESS is the offset of the insn since the start of the
1346 function; LOC is a pointer to the part of the insn which requires
1347 fixing; VALUE is the constant that must be loaded, which is of type
1348 MODE. */
1349
1350 static void
1351 push_csky_minipool_fix (rtx_insn *insn, HOST_WIDE_INT address, rtx *loc,
1352 machine_mode mode, rtx value)
1353 {
1354 #define CSKY_ELRW16_RANGE 1400
1355 #define CSKY_LRW16_RANGE 700
1356 #define CSKY_CONSTANT_POOL_RANGE (TARGET_ELRW ? CSKY_ELRW16_RANGE \
1357 : CSKY_LRW16_RANGE)
1358
1359 /* Fixes less than a word need padding out to a word boundary. */
1360 #define CSKY_MINIPOOL_FIX_SIZE(mode) \
1361 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
1362
1363 Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
1364
1365 fix->insn = insn;
1366 fix->address = address;
1367 fix->loc = loc;
1368 fix->mode = mode;
1369 fix->fix_size = CSKY_MINIPOOL_FIX_SIZE (mode);
1370 fix->value = value;
1371 fix->forwards = CSKY_CONSTANT_POOL_RANGE;
1372 fix->backwards = 0;
1373 fix->minipool = NULL;
1374
1375 /* If an insn doesn't have a range defined for it, then it isn't
1376 expecting to be reworked by this code. Better to stop now than
1377 to generate duff assembly code. */
1378 gcc_assert (fix->forwards || fix->backwards);
1379
1380 if (dump_file)
1381 {
1382 fprintf (dump_file,
1383 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
1384 GET_MODE_NAME (mode),
1385 INSN_UID (insn), (unsigned long) address,
1386 -1 * (long)fix->backwards, (long)fix->forwards);
1387 print_csky_value (dump_file, fix->value);
1388 fprintf (dump_file, "\n");
1389 }
1390
1391 /* Add it to the chain of fixes. */
1392 fix->next = NULL;
1393
1394 if (minipool_fix_head != NULL)
1395 minipool_fix_tail->next = fix;
1396 else
1397 minipool_fix_head = fix;
1398
1399 minipool_fix_tail = fix;
1400 }
1401
1402
1403 /* Fill in the offsets for minipool entries. */
1404
1405 static void
1406 assign_csky_minipool_offsets (Mfix *barrier)
1407 {
1408 HOST_WIDE_INT offset = 0;
1409 Mnode *mp;
1410
1411 minipool_barrier = barrier;
1412
1413 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
1414 {
1415 mp->offset = offset;
1416
1417 if (mp->refcount > 0)
1418 offset += mp->fix_size;
1419 }
1420 }
1421
1422
1423 /* Output the literal table. */
1424
1425 static HOST_WIDE_INT
1426 dump_csky_minipool (rtx_insn *scan)
1427 {
1428 Mnode *mp;
1429 Mnode *nmp;
1430 HOST_WIDE_INT pool_length = 0;
1431
1432 if (dump_file)
1433 fprintf (dump_file,
1434 ";; Emitting minipool after insn %u;\
1435 address %ld; align %d (bytes)\n",
1436 INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
1437
1438 scan = emit_insn_after (gen_align_4 (), scan);
1439 scan = emit_insn_after (minipool_vector_label, scan);
1440
1441 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
1442 {
1443 if (mp->refcount > 0)
1444 {
1445 if (dump_file)
1446 {
1447 fprintf (dump_file, ";; Offset %u, min %ld, max %ld ",
1448 (unsigned) mp->offset, (unsigned long) mp->min_address,
1449 (unsigned long) mp->max_address);
1450 print_csky_value (dump_file, mp->value);
1451 fputc ('\n', dump_file);
1452 }
1453
1454 switch (mp->fix_size)
1455 {
1456 case 4:
1457 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
1458 pool_length += 4;
1459 break;
1460 case 8:
1461 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
1462 pool_length += 8;
1463 break;
1464 default:
1465 gcc_unreachable ();
1466 }
1467 }
1468
1469 nmp = mp->next;
1470 free (mp);
1471 }
1472
1473 minipool_vector_head = minipool_vector_tail = NULL;
1474 scan = emit_barrier_after (scan);
1475
1476 return pool_length;
1477 }
1478
1479 /* Return true if INSN is a minipool load or instruction that will be
1480 converted to one. It is assumed that INSN has type attribute "load". */
1481
1482 bool
1483 csky_minipool_load_p (rtx_insn *insn)
1484 {
1485 rtx op1, addr;
1486
1487 extract_insn_cached (insn);
1488
1489 op1 = recog_data.operand[1];
1490
1491 /* This is a constant that has not yet been turned into
1492 a minipool load. */
1493 if (CONSTANT_P (op1))
1494 return true;
1495
1496 /* Constant pool loads are label_refs. */
1497 if (GET_CODE (op1) == ZERO_EXTEND || GET_CODE (op1) == SIGN_EXTEND)
1498 op1 = XEXP (op1, 0);
1499 if (GET_CODE (op1) != MEM)
1500 return false;
1501 addr = XEXP (op1, 0);
1502 if (GET_CODE (addr) == PLUS && CONST_INT_P (XEXP (addr, 1)))
1503 addr = XEXP (addr, 0);
1504 return GET_CODE (addr) == LABEL_REF;
1505 }
1506
1507
1508 /* Compute the attribute "length" of push or pop insn, according to
1509 the registers it uses. */
1510
1511 int
1512 csky_compute_pushpop_length (rtx *operands)
1513 {
1514 rtx parallel_op = operands[2];
1515 /* Initialize to elements number of PARALLEL. */
1516 unsigned indx = XVECLEN (parallel_op, 0) - 1;
1517 unsigned first_indx = 0;
1518 unsigned regno = REGNO (operands[1]);
1519
1520 if (regno > CSKY_LR_REGNUM)
1521 return 4;
1522
1523 /* Check each register in the list. */
1524 for (; indx > first_indx; indx--)
1525 {
1526 regno = REGNO (XEXP (XVECEXP (parallel_op, 0, indx), 0));
1527 /* If a register number higher than 15 is included, a 32-bit insn
1528 is used. */
1529 if (regno > CSKY_LR_REGNUM)
1530 return 4;
1531 }
1532
1533 return 2;
1534 }
1535
1536 /* Emit constant pools for -mconstpool. */
1537
1538 static void
1539 csky_emit_constant_pools (void)
1540 {
1541 rtx_insn *insn;
1542 HOST_WIDE_INT address = 0;
1543 Mfix *fix;
1544
1545 minipool_fix_head = minipool_fix_tail = NULL;
1546
1547 /* The first insn must always be a note, or the code below won't
1548 scan it properly. */
1549 insn = get_insns ();
1550 gcc_assert (NOTE_P (insn));
1551
1552 /* Scan the insns and record the operands that need fixing. */
1553 for (insn = next_nonnote_insn (insn); insn;
1554 insn = next_nonnote_insn (insn))
1555 {
1556 if (BARRIER_P (insn))
1557 push_csky_minipool_barrier (insn, address);
1558 else if (INSN_P (insn))
1559 {
1560 rtx_jump_table_data *table;
1561
1562 note_csky_invalid_constants (insn, address, true);
1563 address += get_attr_length (insn);
1564
1565 /* If the insn is a vector jump, add the size of the table
1566 and skip the table. */
1567 if (tablejump_p (insn, NULL, &table))
1568 {
1569 address += get_csky_jump_table_size (table);
1570 insn = table;
1571 }
1572 }
1573 }
1574
1575 fix = minipool_fix_head;
1576
1577 /* Now scan the fixups and perform the required changes. */
1578 while (fix)
1579 {
1580 Mfix *ftmp;
1581 Mfix *last_added_fix;
1582 Mfix *last_barrier = NULL;
1583 Mfix *this_fix;
1584 Mnode *mp;
1585 bool has_pending_const = false;
1586
1587 /* Check if there is any pending constant not processed. */
1588 for (mp = minipool_vector_head; mp; mp = mp->next)
1589 if (mp->refcount > 0)
1590 {
1591 has_pending_const = true;
1592 break;
1593 }
1594
1595 /* If no pending constant, skip over barrier insns. */
1596 if (has_pending_const == false)
1597 {
1598 while (fix && BARRIER_P (fix->insn))
1599 fix = fix->next;
1600 if (fix == NULL)
1601 break;
1602 }
1603
1604 last_added_fix = NULL;
1605
1606 for (ftmp = fix; ftmp; ftmp = ftmp->next)
1607 {
1608 if (BARRIER_P (ftmp->insn))
1609 {
1610 if (minipool_vector_head
1611 && ftmp->address >= minipool_vector_head->max_address)
1612 break;
1613
1614 last_barrier = ftmp;
1615 }
1616 else
1617 {
1618 ftmp->minipool = add_csky_minipool_forward_ref (ftmp);
1619 if (ftmp->minipool == NULL)
1620 break;
1621 }
1622 last_added_fix = ftmp; /* Keep track of the last fix added. */
1623 }
1624
1625 /* If the last added fix is a barrier, dump minipool after it. */
1626 if (last_added_fix && BARRIER_P (last_added_fix->insn))
1627 ftmp = last_barrier;
1628 else
1629 {
1630 /* ftmp is first fix that we can't fit into this pool.
1631 Insert a new barrier in the code somewhere between the previous
1632 fix and this one, and arrange to jump around it. */
1633 HOST_WIDE_INT max_address;
1634
1635 /* The last item on the list of fixes must be a barrier, so
1636 we can never run off the end of the list of fixes without
1637 last_barrier being set. */
1638 gcc_assert (ftmp);
1639
1640 /* Check that there isn't another fix that is in range that
1641 we couldn't fit into this pool because the pool was
1642 already too large: we need to put the pool before such an
1643 instruction. The pool itself may come just after the
1644 fix because create_csky_fix_barrier also allows space for a
1645 jump instruction. */
1646 max_address = minipool_vector_head->max_address;
1647 if (ftmp->address < max_address)
1648 max_address = ftmp->address + 1;
1649 last_barrier = create_csky_fix_barrier (last_added_fix, ftmp,
1650 max_address);
1651 }
1652
1653 assign_csky_minipool_offsets (last_barrier);
1654
1655 /* Scan over the fixes we have identified for this pool, fixing them
1656 up and adding the constants to the pool itself. */
1657 for (this_fix = fix; this_fix && ftmp != this_fix;
1658 this_fix = this_fix->next)
1659 {
1660 if (GET_CODE (this_fix->insn) != BARRIER)
1661 {
1662 rtx addr
1663 = plus_constant (Pmode,
1664 gen_rtx_LABEL_REF (VOIDmode,
1665 minipool_vector_label),
1666 this_fix->minipool->offset);
1667 rtx insn_body = PATTERN (this_fix->insn);
1668 rtx src = XEXP (insn_body, 1);
1669 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
1670 if (GET_CODE (this_fix->value) == SYMBOL_REF)
1671 emit_insn_after (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
1672 gen_rtvec (1, src),
1673 VUNSPEC_SYMBOL_REF),
1674 this_fix->insn);
1675 }
1676 }
1677 dump_csky_minipool (last_barrier->insn);
1678 fix = ftmp;
1679 if (fix->next == NULL)
1680 break;
1681 }
1682
1683 /* Free the minipool memory. */
1684 obstack_free (&minipool_obstack, minipool_startobj);
1685 }
1686
1687
1688 /* Implement TARGET_MACHINE_DEPENDENT_REORG. This handles
1689 -mconstpool output. */
1690
1691 static void
1692 csky_reorg (void)
1693 {
1694 if (TARGET_CONSTANT_POOL)
1695 csky_emit_constant_pools ();
1696 }
1697
1698
1699 /* Check to see if the current function contains a branch insn with the
1700 far jump attribute set. Such a function uses the LR register. */
1701
1702 static bool
1703 csky_far_jump_used_p (void)
1704 {
1705 rtx_insn *insn;
1706 if (cfun->machine->far_jump_used)
1707 return true;
1708
1709 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1710 if (GET_CODE (insn) == JUMP_INSN
1711 /* Ignore tablejump patterns. */
1712 && GET_CODE (PATTERN (insn)) != ADDR_VEC
1713 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
1714 && get_attr_far_jump (insn) == FAR_JUMP_YES)
1715 {
1716 cfun->machine->far_jump_used = 1;
1717 return true;
1718 }
1719 return false;
1720 }
1721
1722
1723 /* Return the mask of registers used by the current function. Set
1724 COUNT to the number of registers used. */
1725
1726 static unsigned int
1727 get_csky_live_regs (int *count)
1728 {
1729 int reg;
1730 unsigned int live_regs_mask = 0;
1731
1732 *count = 0;
1733 for (reg = 0; reg < CSKY_NGPR_REGS; reg++)
1734 {
1735 bool save = false;
1736
1737 /* Ignore unsupported registers. */
1738 if (CSKY_TARGET_ARCH (CK801) && reg > 8 && reg < 13)
1739 continue;
1740 if ((CSKY_TARGET_ARCH (CK801)
1741 || CSKY_TARGET_ARCH (CK802)
1742 || CSKY_TARGET_ARCH (CK803))
1743 && reg > 15)
1744 break;
1745
1746 /* Caller-saved registers marked as used. */
1747 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
1748 save = true;
1749
1750 /* Frame pointer marked used. */
1751 else if (frame_pointer_needed && reg == HARD_FRAME_POINTER_REGNUM)
1752 save = true;
1753
1754 /* This is required for CK801/802 where FP is a fixed reg, otherwise
1755 we end up with no FP value available to the DWARF-2 unwinder. */
1756 else if (crtl->calls_eh_return && reg == HARD_FRAME_POINTER_REGNUM)
1757 save = true;
1758
1759 /* CK801/802 also need special handling for LR because it's clobbered
1760 by far jumps. */
1761 else if ((CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
1762 && reg == CSKY_LR_REGNUM
1763 && (!crtl->is_leaf || csky_far_jump_used_p ()))
1764 save = true;
1765
1766 /* Register is used for EH data return. */
1767 else if (crtl->calls_eh_return
1768 && reg >= CSKY_FIRST_EH_RETDATA_REGNUM
1769 && reg <= CSKY_LAST_EH_RETDATA_REGNUM)
1770 save = true;
1771
1772 /* We need a temporary reg to hold the offset for adjusting the SP
1773 for a large stack frame. */
1774 if (reg == CSKY_STACKADJUST_REGNUM
1775 && cfun->machine->reg_offset > CSKY_MAX_SP_ADJUST * 2)
1776 save = true;
1777
1778 /* Add reg to the mask. */
1779 if (save)
1780 {
1781 (*count)++;
1782 live_regs_mask |= (1 << reg);
1783 }
1784 }
1785 return live_regs_mask;
1786 }
1787
1788 /* Compute the stack frame layout, storing sizes of the various pieces
1789 in cfun->machine.
1790
1791 Stack frames constructed in the prologue look like:
1792 ... caller's frame ...
1793 incoming SP -> caller's outbound argument overflow
1794 argument spill
1795 optional FP -> register save
1796 local variables
1797 alloca() space
1798 adjusted SP -> outbound argument overflow
1799
1800 with SP/FP pointing at the base (low address) of the respective area,
1801 and each area aligned to a word boundary. */
1802
1803 static void
1804 csky_layout_stack_frame (void)
1805 {
1806 machine_function *infp = cfun->machine;
1807 int reg_count;
1808
1809 if (infp->frame_init_p)
1810 return;
1811
1812 /* Get sizes of local variables & outbound arguments. */
1813 infp->outbound_size = CSKY_STACK_ALIGN (crtl->outgoing_args_size);
1814 infp->local_offset = infp->outbound_size;
1815 infp->local_size = CSKY_STACK_ALIGN (get_frame_size ());
1816 infp->reg_offset = infp->local_offset + infp->local_size;
1817
1818 /* Now compute size of argument spill + saved regs. These do not
1819 need explicit alignment since they are already word-sized. */
1820 infp->reg_mask = get_csky_live_regs (&reg_count);
1821 infp->reg_size = reg_count * UNITS_PER_WORD;
1822 infp->arg_offset = infp->reg_offset + infp->reg_size;
1823 infp->arg_size = crtl->args.pretend_args_size;
1824 infp->frame_size = infp->arg_offset + infp->arg_size;
1825 infp->frame_init_p = reload_completed;
1826 }
1827
1828 /* Implement TARGET_CAN_ELIMINATE. */
1829 static bool
1830 csky_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
1831 {
1832 if (to == FRAME_POINTER_REGNUM)
1833 return from != ARG_POINTER_REGNUM;
1834 if (to == STACK_POINTER_REGNUM)
1835 return !frame_pointer_needed;
1836 return true;
1837 }
1838
1839 /* Worker function for INITIAL_ELIMINATION_OFFSET macro.
1840 Define the offset between two registers, one to be eliminated, and
1841 the other its replacement, at the start of a routine. */
1842
1843 HOST_WIDE_INT
1844 csky_initial_elimination_offset (int from, int to)
1845 {
1846 int offset;
1847
1848 csky_layout_stack_frame ();
1849
1850 /* Set OFFSET to the offset to the initial stack pointer. */
1851 switch (from)
1852 {
1853 case FRAME_POINTER_REGNUM:
1854 case HARD_FRAME_POINTER_REGNUM:
1855 offset = cfun->machine->reg_offset;
1856 break;
1857
1858 case ARG_POINTER_REGNUM:
1859 offset = cfun->machine->arg_offset;
1860 break;
1861
1862 default:
1863 gcc_unreachable ();
1864 }
1865
1866 /* If we are asked for the offset to the frame pointer instead,
1867 then subtract the difference between the frame pointer and stack
1868 pointer. */
1869 if (to == FRAME_POINTER_REGNUM || to == HARD_FRAME_POINTER_REGNUM)
1870 offset -= cfun->machine->reg_offset;
1871 return offset;
1872 }
1873
1874
1875 /* Determine where to put an argument to a function.
1876 Value is zero to push the argument on the stack,
1877 or a hard register in which to store the argument.
1878
1879 CUM is a variable of type CUMULATIVE_ARGS which gives info about
1880 the preceding args and about the function being called.
1881 ARG is a description of the argument. */
1882
1883 static rtx
1884 csky_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
1885 {
1886 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1887 int reg = pcum->reg;
1888 machine_mode mode = arg.mode;
1889
1890 if (FUNCTION_VARG_MODE_P(mode)
1891 && !pcum->is_stdarg)
1892 {
1893 reg = pcum->freg;
1894
1895 if (reg < CSKY_NPARM_FREGS)
1896 return gen_rtx_REG (mode, CSKY_FIRST_VFP_REGNUM + reg);
1897 else
1898 return NULL_RTX;
1899 }
1900
1901 if (reg < CSKY_NPARM_REGS)
1902 return gen_rtx_REG (mode, CSKY_FIRST_PARM_REGNUM + reg);
1903
1904 return NULL_RTX;
1905 }
1906
1907
1908 /* Return the number of registers (words) needed to pass an argument of
1909 MODE and TYPE. */
1910
1911 static int
1912 csky_num_arg_regs (machine_mode mode, const_tree type, bool is_stdarg)
1913 {
1914 int size;
1915
1916 if (type && mode == BLKmode)
1917 size = int_size_in_bytes (type);
1918 else
1919 size = GET_MODE_SIZE (mode);
1920
1921 if (TARGET_HARD_FLOAT_ABI
1922 && !is_stdarg)
1923 {
1924 if (CSKY_VREG_MODE_P(mode)
1925 && !TARGET_SINGLE_FPU)
1926 return ((CSKY_NUM_WORDS (size) + 1) / 2);
1927 }
1928
1929 return CSKY_NUM_WORDS (size);
1930 }
1931
1932
1933 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
1934
1935 static void
1936 csky_function_arg_advance (cumulative_args_t pcum_v,
1937 const function_arg_info &arg)
1938 {
1939 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1940 int *reg = &pcum->reg;
1941 machine_mode mode = arg.mode;
1942
1943 int param_size = csky_num_arg_regs (mode, arg.type, pcum->is_stdarg);
1944 int param_regs_nums = CSKY_NPARM_REGS;
1945
1946 if (FUNCTION_VARG_MODE_P(mode)
1947 && !pcum->is_stdarg)
1948 {
1949 reg = &pcum->freg;
1950 param_regs_nums = CSKY_NPARM_FREGS;
1951 }
1952
1953 if (*reg + param_size > param_regs_nums)
1954 *reg = param_regs_nums;
1955 else
1956 *reg += param_size;
1957 }
1958
1959
1960 /* Implement TARGET_FUNCTION_VALUE. */
1961 static rtx
1962 csky_function_value (const_tree type, const_tree func,
1963 bool outgoing ATTRIBUTE_UNUSED)
1964 {
1965 machine_mode mode;
1966 int unsignedp ATTRIBUTE_UNUSED;
1967 int size;
1968
1969 mode = TYPE_MODE (type);
1970 size = int_size_in_bytes (type);
1971
1972 if (FUNCTION_VARG_MODE_P(mode))
1973 {
1974 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
1975 return gen_rtx_REG (mode, CSKY_FIRST_VFP_REGNUM);
1976 }
1977
1978 /* Since we promote return types, we must promote the mode here too. */
1979 if (INTEGRAL_TYPE_P (type))
1980 {
1981 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
1982 return gen_rtx_REG (mode, CSKY_FIRST_RET_REGNUM);
1983 }
1984
1985 if (mode == BLKmode && size > UNITS_PER_WORD
1986 && size <= UNITS_PER_WORD * 2)
1987 {
1988 rtx ret_regs[2];
1989 ret_regs[0] = gen_rtx_EXPR_LIST (SImode,
1990 gen_rtx_REG (SImode,
1991 CSKY_FIRST_RET_REGNUM),
1992 GEN_INT (0 * UNITS_PER_WORD));
1993 ret_regs[1] = gen_rtx_EXPR_LIST (SImode,
1994 gen_rtx_REG (SImode,
1995 CSKY_FIRST_RET_REGNUM + 1),
1996 GEN_INT (1 * UNITS_PER_WORD));
1997
1998 rtvec vec = gen_rtvec (2, ret_regs[0], ret_regs[1]);
1999
2000 return gen_rtx_PARALLEL (mode, vec);
2001 }
2002
2003 return gen_rtx_REG (mode, CSKY_FIRST_RET_REGNUM);
2004 }
2005
2006
2007 /* Implement TARGET_LIBCALL_VALUE. */
2008
2009 static rtx
2010 csky_libcall_value (machine_mode mode,
2011 const_rtx libcall ATTRIBUTE_UNUSED)
2012 {
2013 if (FUNCTION_VARG_MODE_P(mode))
2014 {
2015 return gen_rtx_REG (mode, CSKY_FIRST_VFP_REGNUM);
2016 }
2017 return gen_rtx_REG (mode, CSKY_FIRST_RET_REGNUM);
2018 }
2019
2020
2021 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
2022 On C-SKY, only r0 can return results. */
2023
2024 static bool
2025 csky_function_value_regno_p (const unsigned int regno)
2026 {
2027 if (regno == CSKY_FIRST_RET_REGNUM
2028 || (TARGET_HARD_FLOAT_ABI
2029 && regno == CSKY_FIRST_VFP_REGNUM))
2030 return true;
2031 return false;
2032 }
2033
2034
2035 /* Return an RTX indicating where the return address to the
2036 calling function can be found. */
2037
2038 rtx
2039 csky_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
2040 {
2041 if (count != 0)
2042 return NULL_RTX;
2043
2044 return get_hard_reg_initial_val (Pmode, CSKY_LR_REGNUM);
2045 }
2046
2047
2048 /* Implement TARGET_ARG_PARTIAL_BYTES.
2049 Return the number of bytes at the beginning of an argument
2050 that must be put in registers. The value must be zero for arguments
2051 that are passed entirely in registers or
2052 that are entirely pushed on the stack. */
2053
2054 static int
2055 csky_arg_partial_bytes (cumulative_args_t pcum_v, const function_arg_info &arg)
2056 {
2057 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
2058 int param_size = csky_num_arg_regs (arg.mode, arg.type, pcum->is_stdarg);
2059 int reg = pcum->reg;
2060
2061 if (FUNCTION_VARG_MODE_P(arg.mode)
2062 && !pcum->is_stdarg)
2063 return 0;
2064
2065 if (reg < CSKY_NPARM_REGS
2066 && reg + param_size > CSKY_NPARM_REGS)
2067 return (CSKY_NPARM_REGS - reg) * UNITS_PER_WORD;
2068
2069 return 0;
2070 }
2071
2072
2073 /* Implement TARGET_SETUP_INCOMING_VARARGS.
2074 On C-Sky the copy from the argument registers to the stack is emitted
2075 by the prologue hooks, so here we just have to note how much stack space
2076 to save. */
2077
2078 static void
2079 csky_setup_incoming_varargs (cumulative_args_t pcum_v,
2080 const function_arg_info &arg,
2081 int *pretend_size,
2082 int second_time ATTRIBUTE_UNUSED)
2083 {
2084 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
2085 CUMULATIVE_ARGS local_cum;
2086 cumulative_args_t local_cum_v = pack_cumulative_args (&local_cum);
2087 int regs_to_push;
2088
2089 cfun->machine->uses_anonymous_args = 1;
2090 local_cum = *pcum;
2091 csky_function_arg_advance (local_cum_v, arg);
2092 regs_to_push = CSKY_NPARM_REGS - local_cum.reg;
2093 if (regs_to_push)
2094 *pretend_size = regs_to_push * UNITS_PER_WORD;
2095 }
2096
2097
2098 /* Implement TARGET_ASM_OUTPUT_MI_THUNK.
2099 Output code to add DELTA to the first argument, and then jump
2100 to FUNCTION. Used for C++ multiple inheritance. */
2101
2102 static void
2103 csky_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
2104 HOST_WIDE_INT delta,
2105 HOST_WIDE_INT vcall_offset,
2106 tree function)
2107 {
2108 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk));
2109 const char *thiz = "a0";
2110 const char *reg0 = "t0";
2111 const char *reg1 = "t1";
2112 int maxoff = 4096; /* Constant range for addi/subi. */
2113
2114 assemble_start_function (thunk, fnname);
2115 final_start_function (emit_barrier (), file, 1);
2116
2117 rtx fnaddr = XEXP (DECL_RTL (function), 0);
2118
2119 if (CSKY_TARGET_ARCH (CK801))
2120 {
2121 /* CK801 can't use t registers and has only 16-bit addi/subi. */
2122 reg0 = "l0";
2123 reg1 = "l1";
2124 maxoff = 256;
2125 if (vcall_offset > maxoff || vcall_offset < -maxoff)
2126 fprintf (file, "\tpush\tl0, l1\n");
2127 else if (delta > maxoff || delta < -maxoff)
2128 fprintf (file, "\tpush\tl0\n");
2129 }
2130
2131 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2132 thiz = "a1";
2133
2134 /* Add delta to this_rtx. */
2135 if (delta != 0)
2136 {
2137 if (delta > maxoff || delta < -maxoff)
2138 {
2139 fprintf (file, "\tlrw\t%s, %ld\n", reg0, (long)delta);
2140 fprintf (file, "\taddu\t%s, %s, %s\n", thiz, thiz, reg0);
2141 }
2142 else
2143 fprintf (file, "\t%s\t%s, %s, %ld\n",
2144 (delta > 0 ? "addi" : "subi"), thiz, thiz,
2145 (long)(delta > 0 ? delta : -delta));
2146 }
2147
2148 /* If needed, add *(*this_rtx + vcall_offset) to this_rtx. */
2149 if (vcall_offset != 0)
2150 {
2151 fprintf (file, "\tld.w\t%s, (%s, 0)\n", reg0, thiz);
2152
2153 if (vcall_offset > maxoff || vcall_offset < -maxoff)
2154 {
2155 fprintf (file, "\tlrw\t%s, %ld\n", reg1, (long)vcall_offset);
2156 fprintf (file, "\taddu\t%s, %s, %s\n", reg0, reg0, reg1);
2157 }
2158 else
2159 fprintf (file, "\t%s\t%s, %s, %ld\n",
2160 (vcall_offset > 0 ? "addi" : "subi"), reg0, reg0,
2161 (long)(vcall_offset > 0 ? vcall_offset : -vcall_offset));
2162
2163 /* Load the offset and add it to this_rtx */
2164 fprintf (file, "\tld.w\t%s, (%s, 0)\n", reg0, reg0);
2165 fprintf (file, "\taddu\t%s, %s, %s\n", thiz, thiz, reg0);
2166 }
2167
2168 /* We must pop the scratch regs individually instead of using the
2169 "pop" insn, which also does a return. */
2170 if (CSKY_TARGET_ARCH (CK801))
2171 {
2172 if (vcall_offset > maxoff || vcall_offset < -maxoff)
2173 {
2174 fprintf (file, "\tld.w\tl0, (sp, 0)\n");
2175 fprintf (file, "\tld.w\tl1, (sp, 4)\n");
2176 fprintf (file, "\taddi\t sp, sp, 8\n");
2177 }
2178 else if (delta > maxoff || delta < -maxoff)
2179 {
2180 fprintf (file, "\tld.w\tl0, (sp, 0)\n");
2181 fprintf (file, "\taddi\tsp, sp, 4\n");
2182 }
2183 }
2184
2185 fprintf (file, "\tjbr\t");
2186 output_addr_const (file, fnaddr);
2187 fprintf (file, "\n");
2188
2189 final_end_function ();
2190 assemble_end_function (thunk, fnname);
2191 }
2192
2193
2194 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE.
2195 Conditionally modify five variables fixed_regs, call_used_regs, global_regs,
2196 reg_names, and reg_class_contents, to take into account any dependence of
2197 these register sets on target flags.
2198
2199 CK801 has registers r0-r8 and r13-r15. CK802 and CK803 have registers
2200 r0-r15 (the "low" registers). Other cpus use registers r0-r31 with
2201 -mhigh-registers, otherwise also only r0-r15.
2202
2203 CK801 only has 16-bit instructions, most of which can only reference
2204 r0-r7 (the "mini" registers). So we mark regs outside that range as
2205 fixed. -msmart can be used on other arch variants to force the same
2206 behavior because it results in smaller code size.
2207
2208 TODO: investigate whether it's beneficial to use r8-r13 as a spill
2209 class when TARGET_MINI_REGISTERS instead of making them unusable by
2210 the register allocator. */
2211
2212 static void
2213 csky_conditional_register_usage (void)
2214 {
2215 /* Only use mini registers in smart mode or 801. */
2216 if (TARGET_MINI_REGISTERS)
2217 {
2218 int i;
2219
2220 for (i = (CSKY_LAST_MINI_REGNUM + 1); i < 32; i++)
2221 {
2222 fixed_regs[i] = 1;
2223 call_used_regs[i] = 1;
2224 }
2225 }
2226 /* For some targets, the high registers are not supported.
2227 CPUs other than ck801/ck802/ck803 use high registers
2228 depending on -mhigh-registers option. */
2229 else if (CSKY_TARGET_ARCH (CK802)
2230 || CSKY_TARGET_ARCH (CK803)
2231 || !TARGET_HIGH_REGISTERS)
2232 {
2233 int i;
2234
2235 for (i = CSKY_FIRST_HIGH_REGNUM; i <= CSKY_LAST_HIGH_REGNUM; i++)
2236 {
2237 fixed_regs[i] = 1;
2238 call_used_regs[i] = 1;
2239 }
2240 }
2241
2242 /* On CK801/CK802 we must mark lr as a fixed register because it is
2243 used to implement far jumps.
2244 FIXME: perhaps there should be a command-line option controlling
2245 use of lr for far jumps on ck802 when !TARGET_MINI_REGS, when
2246 you really want lr to be available to the register allocator and
2247 you know there are no far jumps in the code. */
2248 if (CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
2249 {
2250 fixed_regs[CSKY_LR_REGNUM] = 1;
2251 call_used_regs[CSKY_LR_REGNUM] = 0;
2252 }
2253
2254 /* The hi/lo registers are only supported in dsp mode. */
2255 if (!TARGET_DSP)
2256 {
2257 fixed_regs[CSKY_HI_REGNUM] = 1;
2258 call_used_regs[CSKY_HI_REGNUM] = 1;
2259
2260 fixed_regs[CSKY_LO_REGNUM] = 1;
2261 call_used_regs[CSKY_LO_REGNUM] = 1;
2262 }
2263
2264 /* The V_REGS are only supported in hard float mode. */
2265 if (!TARGET_HARD_FLOAT)
2266 {
2267 int regno;
2268
2269 for (regno = CSKY_FIRST_VFP_REGNUM;
2270 regno <= CSKY_LAST_VFP3_REGNUM; regno++)
2271 {
2272 fixed_regs[regno] = 1;
2273 call_used_regs[regno] = 1;
2274 }
2275 }
2276
2277 if (!TARGET_SUPPORT_FPV3)
2278 {
2279 int regno;
2280
2281 for (regno = CSKY_FIRST_VFP3_REGNUM;
2282 regno <= CSKY_LAST_VFP3_REGNUM; regno++)
2283 {
2284 fixed_regs[regno] = 1;
2285 call_used_regs[regno] = 1;
2286 }
2287 }
2288
2289 /* In pic mode, the gb register is not available for register
2290 allocation. Since gb is not clobbered by function
2291 calls, set its call_used_regs to 0. */
2292 if (flag_pic)
2293 {
2294 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2295 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0;
2296 }
2297 }
2298
2299 /* Implement TARGET_HARD_REGNO_NREGS. */
2300
2301 static unsigned int
2302 csky_hard_regno_nregs (unsigned int regno, machine_mode mode)
2303 {
2304 if (regno >= CSKY_FIRST_VFP_REGNUM && !CSKY_TARGET_ARCH (CK803))
2305 return 1;
2306 else
2307 return CSKY_NUM_REGS (mode);
2308 }
2309
2310 /* Implement TARGET_HARD_REGNO_MODE_OK. Return true if REGNO is a
2311 valid register for holding a quantity of type MODE. */
2312
2313 static bool
2314 csky_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2315 {
2316 int nregs = CSKY_NUM_REGS (mode);
2317
2318 /* We can't handle more than doubleword sizes for any register. */
2319 if (nregs > 2)
2320 return false;
2321
2322 /* For general registers, return true if mode is one word size.
2323 When the size is larger than one word size, there should
2324 be two successive hard registers to put the data. */
2325 if (regno < CSKY_NGPR_REGS)
2326 {
2327 if (nregs < 2)
2328 return true;
2329 else if (TARGET_MINI_REGISTERS)
2330 return (regno < CSKY_LAST_MINI_REGNUM);
2331 else if (CSKY_TARGET_ARCH (CK802)
2332 || CSKY_TARGET_ARCH (CK803)
2333 || !TARGET_HIGH_REGISTERS)
2334 /* Without high register, r15 cannot hold doubleword data. */
2335 return (regno < (CSKY_SP_REGNUM - 1));
2336 else
2337 return (regno < (CSKY_SP_REGNUM - 1)
2338 || (regno >= CSKY_LR_REGNUM
2339 && regno < CSKY_LAST_HIGH_UNFIXED_REGNUM));
2340 }
2341 else if (regno == CSKY_CC_REGNUM)
2342 return (mode == CCmode);
2343 else if (regno == CSKY_HI_REGNUM || regno == CSKY_LO_REGNUM)
2344 {
2345 /* Don't allocate hi,lo register for float data even
2346 if in dsp mode, because it will cause high cost
2347 to reload data from hi,lo register. */
2348 if (!TARGET_DSP || mode == SFmode || mode == DFmode)
2349 return false;
2350 else if (nregs == 2)
2351 return (regno == CSKY_HI_REGNUM);
2352 else
2353 return true;
2354 }
2355 else if (CSKY_VREG_P (regno) && TARGET_HARD_FLOAT)
2356 return true;
2357
2358 return false;
2359 }
2360
2361 /* Implement TARGET_MODES_TIEABLE_P. We can't tie DFmode with other modes
2362 when V_REGs might be in use because those registers mess with the stored
2363 bits. */
2364
2365 static bool
2366 csky_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2367 {
2368 return !(TARGET_HARD_FLOAT
2369 && mode1 != mode2
2370 && (mode1 == DFmode || mode2 == DFmode));
2371 }
2372
2373 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.
2374 V_REG registers can't do subreg as all values are reformatted to
2375 internal precision. */
2376
2377 static bool
2378 csky_can_change_mode_class (machine_mode from,
2379 machine_mode to,
2380 reg_class_t rclass)
2381 {
2382 return (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)
2383 || !reg_classes_intersect_p (V_REGS, rclass));
2384 }
2385
2386 /* Implement TARGET_CLASS_LIKELY_SPILLED_P.
2387 We need to define this for MINI_REGS when we only use r0 - r7.
2388 Otherwise we can end up using r0-r4 for function arguments, and don't
2389 have enough left over to do doubleword arithmetic. */
2390
2391 static bool
2392 csky_class_likely_spilled_p (reg_class_t rclass)
2393 {
2394 if ((TARGET_MINI_REGISTERS && rclass == MINI_REGS)
2395 || rclass == C_REGS)
2396 return true;
2397
2398 return false;
2399 }
2400
2401
2402 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
2403 Given an rtx X being reloaded into a reg required to be
2404 in class CLASS, return the class of reg to actually use.
2405 In general this is just CLASS. */
2406
2407 static reg_class_t
2408 csky_preferred_reload_class (rtx x, reg_class_t rclass)
2409 {
2410 if (TARGET_HARD_FLOAT
2411 && CONST_DOUBLE_P (x)
2412 && (GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2413 && rclass == NO_REGS)
2414 return GENERAL_REGS;
2415 return rclass;
2416 }
2417
2418
2419 /* Implement TARGET_CLASS_MAX_NREGS.
2420 Return the maximum number of consecutive registers of class rclass needed
2421 to hold a value of mode mode.
2422 On the csky, this is the size of MODE in words,
2423 except in the FP regs, where a single reg is always enough. */
2424
2425 static unsigned char
2426 csky_class_max_nregs (reg_class_t rclass, machine_mode mode)
2427 {
2428 if (rclass == V_REGS)
2429 return 1;
2430 else
2431 return CSKY_NUM_REGS (mode);
2432 }
2433
2434
2435 /* Implement TARGET_SECONDARY_RELOAD.
2436 If copying a register of RCLASS from/to X requires an intermediate
2437 register, the hook should return the REGISTER_CLASS required for this
2438 intermediate register.
2439 If no intermediate register is required, it should return NO_REGS.
2440 If more than one intermediate register is required, describe the one
2441 that is closest in the copy chain to the reload register. */
2442
2443 reg_class_t
2444 csky_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
2445 reg_class_t rclass,
2446 machine_mode mode,
2447 secondary_reload_info *sri ATTRIBUTE_UNUSED)
2448 {
2449 int regno = -1;
2450
2451 /* Extract the real regno from X. */
2452 if (GET_CODE (x) == SIGN_EXTEND)
2453 {
2454 int off = 0;
2455
2456 x = XEXP (x, 0);
2457
2458 if (reg_renumber)
2459 regno = true_regnum (x);
2460 else
2461 {
2462 while (GET_CODE (x) == SUBREG)
2463 {
2464 off += subreg_regno_offset (REGNO (SUBREG_REG (x)),
2465 GET_MODE (SUBREG_REG (x)),
2466 SUBREG_BYTE (x), GET_MODE (x));
2467 x = SUBREG_REG (x);
2468 }
2469
2470 if (GET_CODE (x) == REG)
2471 regno = REGNO (x) + off;
2472 }
2473 }
2474 else if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2475 regno = true_regnum (x);
2476
2477 /* We always require a general register when copying anything to
2478 HI/LO_REGNUM, except when copying an SImode value from HI/LO_REGNUM
2479 to a general register, or when copying from register 0. */
2480 if (rclass == HILO_REGS && !CSKY_GENERAL_REGNO_P (regno))
2481 return GENERAL_REGS;
2482
2483 if (rclass == V_REGS && !CSKY_GENERAL_REGNO_P (regno))
2484 {
2485 /* Reload between vector reg and memory does not need an
2486 intermediate register. */
2487 if (MEM_P (x) && (mode == SFmode || mode == DFmode))
2488 return NO_REGS;
2489 else
2490 return GENERAL_REGS;
2491 }
2492
2493 return NO_REGS;
2494 }
2495
2496 /* Implement TARGET_SPILL_CLASS.
2497 Try spilling to a larger register class before spilling to memory. */
2498
2499 static reg_class_t
2500 csky_spill_class (reg_class_t rclass, machine_mode mode ATTRIBUTE_UNUSED)
2501 {
2502 if ((rclass == MINI_REGS && !TARGET_MINI_REGISTERS)
2503 || (rclass == LOW_REGS && TARGET_HIGH_REGISTERS))
2504 return GENERAL_REGS;
2505 return NO_REGS;
2506 }
2507
2508 /* Convert a static initializer array of feature bits to sbitmap
2509 representation. */
2510
2511 static void
2512 csky_initialize_isa (sbitmap isa, const enum csky_isa_feature *isa_bits)
2513 {
2514 bitmap_clear (isa);
2515 while (*isa_bits != CSKY_ISA_FEATURE_GET (none))
2516 bitmap_set_bit (isa, *(isa_bits++));
2517 }
2518
2519
2520 /* Configure a build target TARGET from the user-specified options OPTS and
2521 OPTS_SET. */
2522
2523 static void
2524 csky_configure_build_target (struct csky_build_target *target,
2525 struct cl_target_option *opts,
2526 struct gcc_options *opts_set)
2527 {
2528 const struct csky_processors *csky_selected_tune = NULL;
2529 struct csky_processors *csky_selected_cpu = NULL;
2530 struct csky_processors *csky_selected_arch = NULL;
2531 sbitmap all_sbits = sbitmap_alloc (CSKY_ISA_FEATURE_GET (max));
2532 bitmap_clear (all_sbits);
2533
2534 bitmap_clear (target->isa);
2535 target->core_name = NULL;
2536 target->arch_name = NULL;
2537
2538 if (opts_set->x_csky_arch_option)
2539 csky_selected_arch = &all_architectures[opts->x_csky_arch_option];
2540
2541 if (opts_set->x_csky_cpu_option)
2542 {
2543 csky_selected_cpu = &all_cores[opts->x_csky_cpu_option];
2544 csky_selected_tune = &all_cores[opts->x_csky_cpu_option];
2545 }
2546
2547 if (csky_selected_cpu)
2548 {
2549 /* TODO: support combination of features
2550 between different cpu & arch, should based on arch. */
2551 if (csky_selected_arch
2552 && (csky_selected_cpu->base_arch != csky_selected_arch->base_arch))
2553 warning (0, "cpu %s is not based on arch %s, ignoring the arch",
2554 csky_selected_cpu->name, csky_selected_arch->name);
2555 if (!csky_selected_arch)
2556 csky_selected_arch = &all_architectures[csky_selected_cpu->base_arch];
2557 csky_initialize_isa (all_sbits, csky_selected_arch->isa_bits);
2558 target->core_name = csky_selected_cpu->name;
2559 }
2560 else if (csky_selected_arch)
2561 {
2562 csky_selected_cpu = csky_selected_arch;
2563 target->arch_name = csky_selected_arch->name;
2564 }
2565 else /* If the user did not specify a processor, choose one for them. */
2566 {
2567 csky_selected_cpu = &all_cores[TARGET_CPU_DEFAULT];
2568 csky_selected_arch = &all_architectures[csky_selected_cpu->base_arch];
2569 csky_initialize_isa (all_sbits, csky_selected_arch->isa_bits);
2570 target->core_name = csky_selected_cpu->name;
2571 }
2572
2573 /* The selected cpu may be an architecture, so lookup tuning by core ID. */
2574 if (!csky_selected_tune)
2575 csky_selected_tune = &all_cores[csky_selected_cpu->core];
2576 gcc_assert (csky_selected_tune);
2577
2578 gcc_assert (csky_selected_arch);
2579 gcc_assert (csky_selected_cpu);
2580 csky_initialize_isa (target->isa, csky_selected_cpu->isa_bits);
2581 bitmap_ior (target->isa, target->isa, all_sbits);
2582
2583 /* Finish initializing the target structure. */
2584 target->arch_pp_name = csky_selected_cpu->arch;
2585 target->base_arch = csky_selected_cpu->base_arch;
2586 target->arch_core = csky_selected_cpu->core;
2587
2588 sbitmap_free (all_sbits);
2589 }
2590
2591
2592 /* Implement TARGET_OPTION_OVERRIDE. */
2593
2594 static void
2595 csky_option_override (void)
2596 {
2597 csky_active_target.isa = sbitmap_alloc (CSKY_ISA_FEATURE_GET (max));
2598
2599 /* Create the default target_options structure. We need this early
2600 to configure the overall build target. */
2601 target_option_default_node = target_option_current_node
2602 = build_target_option_node (&global_options, &global_options_set);
2603
2604 csky_configure_build_target (&csky_active_target,
2605 TREE_TARGET_OPTION (target_option_default_node),
2606 &global_options_set);
2607
2608 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2609 SUBTARGET_OVERRIDE_OPTIONS;
2610 #endif
2611
2612 csky_base_arch = csky_active_target.base_arch;
2613
2614 if (flag_pic && !(CSKY_TARGET_ARCH (CK807)
2615 || CSKY_TARGET_ARCH (CK810)
2616 || CSKY_TARGET_ARCH (CK860)))
2617 {
2618 flag_pic = 0;
2619 warning (0, "%qs is not supported by arch %s",
2620 "-fPIC", csky_active_target.arch_pp_name);
2621 }
2622
2623 /* Check floating-point options for consistency. */
2624 if (TARGET_HARD_FLOAT)
2625 {
2626 const struct csky_fpu_desc *csky_selected_fpu = NULL;
2627
2628 if (csky_fpu_index == TARGET_FPU_auto)
2629 {
2630 const char *target_fpu_name;
2631 bool ok;
2632 int fpu_index;
2633
2634 if (csky_active_target.core_name != NULL
2635 && !strchr (csky_active_target.core_name, 'f'))
2636 target_fpu_name = "auto";
2637 else if (CSKY_TARGET_ARCH (CK803) || !TARGET_DOUBLE_FLOAT)
2638 target_fpu_name = "fpv2_sf";
2639 else if (CSKY_TARGET_ARCH (CK860))
2640 target_fpu_name = "fpv3";
2641 else if (TARGET_DOUBLE_FLOAT && TARGET_FDIVDU)
2642 target_fpu_name = "fpv2_divd";
2643 else
2644 #ifdef CSKY_FPUTYPE_DEFAULT
2645 target_fpu_name = CSKY_FPUTYPE_DEFAULT;
2646 #else
2647 target_fpu_name = "fpv2";
2648 #endif
2649
2650 ok = opt_enum_arg_to_value (OPT_mfpu_, target_fpu_name, &fpu_index,
2651 CL_TARGET);
2652 gcc_assert (ok);
2653 csky_fpu_index = (enum csky_fpu_type) fpu_index;
2654 }
2655
2656 if (CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
2657 error ("%qs is not supported by arch %s",
2658 "-mhard-float", csky_active_target.arch_pp_name);
2659 else if (csky_fpu_index == TARGET_FPU_auto)
2660 error ("%<-mhard-float%> is not supported by the selected CPU");
2661 else
2662 {
2663 csky_selected_fpu = &all_fpus[csky_fpu_index];
2664 sbitmap fpu_bits = sbitmap_alloc (CSKY_ISA_FEATURE_GET (max));
2665 csky_initialize_isa (fpu_bits, csky_selected_fpu->isa_bits);
2666
2667 bitmap_ior (csky_active_target.isa, csky_active_target.isa,
2668 fpu_bits);
2669
2670 sbitmap_free (fpu_bits);
2671 }
2672 }
2673 else
2674 {
2675 if (TARGET_DOUBLE_FLOAT > 0)
2676 warning (0, "%<-mdouble-float%> ignored without %<-mhard-float%>");
2677 TARGET_DOUBLE_FLOAT = 0;
2678 if (TARGET_FDIVDU > 0)
2679 warning (0, "%<-mfdivdu%> ignored without %<-mhard-float%>");
2680 TARGET_FDIVDU = 0;
2681 }
2682
2683 /* Initialize boolean versions of the architectural flags, for use
2684 in the .md file. */
2685
2686 #undef CSKY_ISA
2687 #define CSKY_ISA(IDENT, DESC) \
2688 { \
2689 csky_arch_isa_features[CSKY_ISA_FEATURE_GET (IDENT)] = \
2690 bitmap_bit_p (csky_active_target.isa, CSKY_ISA_FEATURE_GET (IDENT)); \
2691 }
2692 #include "csky_isa.def"
2693 #undef CSKY_ISA
2694
2695 /* Extended LRW instructions are enabled by default on CK801, disabled
2696 otherwise. */
2697 if (TARGET_ELRW == -1)
2698 TARGET_ELRW = CSKY_TARGET_ARCH (CK801);
2699
2700 /* DSP is enabled either by the processor feature or -mdsp
2701 command-line option. There is no -mno-dsp option as the assembler
2702 doesn't take one. */
2703 if (!TARGET_DSP)
2704 TARGET_DSP = CSKY_ISA_FEATURE (dsp);
2705
2706 /* There's both -mdiv and -mno-div. Take default from processor if
2707 neither is specified explicitly. */
2708 if (TARGET_DIV == -1)
2709 TARGET_DIV = CSKY_ISA_FEATURE (div);
2710
2711 /* TARGET_CONSTANT_POOL is mandatory for CK801 and CK802 and optional
2712 for other CPUs.
2713 The reason why the compiler has to generate constant pools for CK801/2
2714 instead of deferring to the assembler is that these cores don't have a
2715 long branch instruction other than jbsr, which clobbers lr. So for
2716 the compiler to correctly save/restore lr it has to know whether there
2717 are long branches, which depends on having accurate branch length
2718 counts, which in turn depends on having control over where constant
2719 pools are placed. */
2720 if ((CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
2721 && !TARGET_CONSTANT_POOL)
2722 error ("%qs is not supported by arch %s",
2723 "-mno-constpool", csky_active_target.arch_pp_name);
2724 else if (TARGET_CONSTANT_POOL == -1)
2725 TARGET_CONSTANT_POOL = (CSKY_TARGET_ARCH (CK801)
2726 || CSKY_TARGET_ARCH (CK802));
2727
2728 /* TARGET_MINI_REGISTERS is mandatory for CK801, the default for CK802,
2729 and optional for other CPUs. TARGET_HIGH_REGISTERS is incompatible
2730 with TARGET_MINI_REGISTERS, is not supported by CK801/802/803,
2731 and is the default for other processors.
2732 See csky_conditional_register_usage. */
2733 if (TARGET_MINI_REGISTERS > 0 && TARGET_HIGH_REGISTERS > 0)
2734 error ("%<-msmart%> is incompatible with %<-mhigh-registers%>");
2735 else if (CSKY_TARGET_ARCH (CK801)
2736 || CSKY_TARGET_ARCH (CK802)
2737 || CSKY_TARGET_ARCH (CK803))
2738 {
2739 if (CSKY_TARGET_ARCH (CK801)
2740 || (CSKY_TARGET_ARCH (CK802) && TARGET_MINI_REGISTERS == -1))
2741 TARGET_MINI_REGISTERS = 1;
2742 else if (TARGET_MINI_REGISTERS == -1)
2743 TARGET_MINI_REGISTERS = 0;
2744 if (TARGET_HIGH_REGISTERS > 0)
2745 warning (0, "%qs is not supported by arch %s",
2746 "-mhigh-registers", csky_active_target.arch_pp_name);
2747 TARGET_HIGH_REGISTERS = 0;
2748 }
2749 else
2750 {
2751 if (TARGET_MINI_REGISTERS == -1)
2752 TARGET_MINI_REGISTERS = 0;
2753 if (TARGET_HIGH_REGISTERS == -1)
2754 TARGET_HIGH_REGISTERS = !TARGET_MINI_REGISTERS;
2755 }
2756
2757 /* -mmultiple-stld is the default for everything but CK801, which
2758 doesn't support it. */
2759 if (CSKY_TARGET_ARCH (CK801))
2760 {
2761 if (TARGET_MULTIPLE_STLD > 0)
2762 warning (0, "%qs is not supported by arch %s",
2763 "-mmultiple-stld", csky_active_target.arch_pp_name);
2764 TARGET_MULTIPLE_STLD = 0;
2765 }
2766
2767 /* TODO */
2768
2769 /* Resynchronize the saved target options. */
2770 cl_target_option_save (TREE_TARGET_OPTION (target_option_default_node),
2771 &global_options, &global_options_set);
2772
2773 #ifdef ENABLE_TPF_DEBUG
2774 /* Don't emit DWARF4 unless specifically selected. The TPF
2775 debuggers do not yet support DWARF 3/4. */
2776 if (!global_options_set.x_dwarf_strict)
2777 dwarf_strict = 1;
2778 if (!global_options_set.x_dwarf_version)
2779 dwarf_version = 3;
2780 #endif
2781
2782 /* Don't run the scheduler before reload by default,
2783 since it tends to increase register pressure. */
2784 if (!global_options_set.x_flag_schedule_insns)
2785 flag_schedule_insns = 0;
2786
2787 csky_add_gc_roots ();
2788 }
2789
2790
2791 /* Return TRUE if X contains any references to TLS symbols. */
2792
2793 bool
2794 csky_tls_referenced_p (rtx x)
2795 {
2796 if (!TARGET_TLS)
2797 return false;
2798
2799 subrtx_iterator::array_type array;
2800 FOR_EACH_SUBRTX (iter, array, x, ALL)
2801 {
2802 const_rtx x = *iter;
2803 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2804 return true;
2805
2806 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
2807 TLS offsets, not real symbol references. */
2808 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
2809 iter.skip_subrtxes ();
2810 }
2811 return false;
2812 }
2813
2814
2815 /* Implement TARGET_CANNOT_FORCE_CONST_MEM.
2816 Determine if it's legal to put X into the constant pool. This
2817 is not possible for the address of thread-local symbols, which
2818 is checked above. */
2819
2820 static bool
2821 csky_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED,
2822 rtx x)
2823 {
2824 return csky_tls_referenced_p (x);
2825 }
2826
2827
2828 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns nonzero if the
2829 constant value X is a legitimate general operand.
2830 It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2831
2832 static bool
2833 csky_legitimate_constant_p (machine_mode mode, rtx x)
2834 {
2835 return (!csky_cannot_force_const_mem (mode, x)
2836 && CONSTANT_P (x));
2837 }
2838
2839
2840 /* Return true if X is valid as an CSKY addressing register. */
2841
2842 static bool
2843 is_csky_address_register_rtx_p (rtx x, int strict_p)
2844 {
2845 int regno;
2846
2847 if (!x)
2848 return false;
2849 if (!REG_P (x))
2850 return false;
2851
2852 regno = REGNO (x);
2853
2854 if (strict_p)
2855 return (CSKY_GENERAL_REGNO_P (regno)
2856 || CSKY_GENERAL_REGNO_P (reg_renumber[regno]));
2857 else
2858 return CSKY_GENERAL_REGNO_P (regno) || regno >= FIRST_PSEUDO_REGISTER;
2859 }
2860
2861
2862 /* Return TRUE if X is a thread-local symbol. */
2863
2864 static bool
2865 csky_tls_symbol_p (rtx x)
2866 {
2867 if (!TARGET_TLS)
2868 return false;
2869
2870 if (GET_CODE (x) != SYMBOL_REF)
2871 return false;
2872
2873 return SYMBOL_REF_TLS_MODEL (x) != 0;
2874 }
2875
2876
2877 /* Handle lazy initialization of __tls_get_addr libfunc. */
2878 static GTY(()) rtx tls_get_addr_libfunc;
2879
2880 static rtx
2881 get_tls_get_addr (void)
2882 {
2883 if (!tls_get_addr_libfunc)
2884 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
2885 return tls_get_addr_libfunc;
2886 }
2887
2888
2889 /* Emit a call to __tls_get_addr. */
2890
2891 static rtx_insn *
2892 csky_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
2893 {
2894 rtx label, labelno, unspec, tmp;
2895 rtx_insn *insns;
2896
2897 start_sequence ();
2898
2899 labelno = GEN_INT (tls_labelno++);
2900 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_TLS_LABEL);
2901 unspec = gen_rtx_UNSPEC (Pmode,
2902 gen_rtvec (3, x, GEN_INT (reloc), label),
2903 UNSPEC_TLS);
2904 tmp = gen_reg_rtx (SImode);
2905 emit_move_insn (reg, unspec);
2906 emit_move_insn (tmp, label);
2907 emit_insn (gen_addsi3 (reg, reg, tmp));
2908 *valuep = emit_library_call_value (get_tls_get_addr (),
2909 NULL_RTX, LCT_PURE, /* LCT_CONST? */
2910 Pmode, reg, Pmode);
2911 insns = get_insns ();
2912 end_sequence ();
2913 return insns;
2914 }
2915
2916 /* Helper function for csky_legitimize_address, to handle the TLS cases.
2917 REG is a scratch register and may be null. */
2918
2919 rtx
2920 csky_legitimize_tls_address (rtx x, rtx reg)
2921 {
2922 rtx dest, tp, label, labelno, unspec, ret, eqv, addend, tmp;
2923 rtx_insn *insns;
2924 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
2925
2926 if (!reg)
2927 reg = gen_reg_rtx (SImode);
2928
2929 switch (model)
2930 {
2931 case TLS_MODEL_GLOBAL_DYNAMIC:
2932 insns = csky_call_tls_get_addr (x, reg, &ret, TLS_GD32);
2933 dest = gen_reg_rtx (Pmode);
2934 emit_libcall_block (insns, dest, ret, x);
2935 return dest;
2936
2937 case TLS_MODEL_LOCAL_DYNAMIC:
2938 insns = csky_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
2939
2940 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2941 share the LDM result with other LD model accesses. */
2942 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_TLS);
2943 dest = gen_reg_rtx (Pmode);
2944 emit_libcall_block (insns, dest, ret, eqv);
2945
2946 /* Load the addend. */
2947 addend = gen_rtx_UNSPEC (Pmode,
2948 gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
2949 UNSPEC_TLS);
2950 addend = force_reg (SImode, addend);
2951 return gen_rtx_PLUS (Pmode, dest, addend);
2952
2953 case TLS_MODEL_INITIAL_EXEC:
2954 labelno = GEN_INT (tls_labelno++);
2955 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_TLS_LABEL);
2956 unspec = gen_rtx_UNSPEC (Pmode,
2957 gen_rtvec (3, x, GEN_INT (TLS_IE32), label),
2958 UNSPEC_TLS);
2959 tmp = gen_reg_rtx (SImode);
2960 emit_move_insn (reg, unspec);
2961 emit_move_insn (tmp, label);
2962 emit_insn (gen_addsi3 (reg, reg, tmp));
2963 emit_move_insn (reg, gen_const_mem (Pmode, reg));
2964 tp = gen_rtx_REG (SImode, CSKY_TLS_REGNUM);
2965 return gen_rtx_PLUS (Pmode, tp, reg);
2966
2967 case TLS_MODEL_LOCAL_EXEC:
2968 unspec = gen_rtx_UNSPEC (Pmode,
2969 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
2970 UNSPEC_TLS);
2971 emit_move_insn (reg, unspec);
2972 tp = gen_rtx_REG (SImode, CSKY_TLS_REGNUM);
2973 return gen_rtx_PLUS (Pmode, tp, reg);
2974
2975 default:
2976 abort ();
2977 }
2978 }
2979
2980
2981 /* Implement TARGET_LEGITIMIZE_ADDRESS. */
2982
2983 static rtx
2984 csky_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
2985 machine_mode mode)
2986 {
2987 if (csky_tls_symbol_p (x))
2988 return csky_legitimize_tls_address (x, NULL_RTX);
2989
2990 if (GET_CODE (x) == PLUS)
2991 {
2992 rtx xop0 = XEXP (x, 0);
2993 rtx xop1 = XEXP (x, 1);
2994
2995 if (is_csky_address_register_rtx_p (xop0, 0)
2996 && CONST_INT_P (xop1))
2997 {
2998 HOST_WIDE_INT offset = INTVAL (xop1);
2999
3000 /* Try to replace ld32 rx,(ry, offset), to addi16 rz, oimm8
3001 and ld16 rx,(rz, new_ld_offset) to avoid emitting a
3002 32-bit ld, but this addi has a range limitation. */
3003 if (optimize_size
3004 && offset > CSKY_LD16_MAX_OFFSET (mode)
3005 && offset <= (CSKY_ADDI16_MAX_IMM
3006 + CSKY_LD16_MAX_OFFSET (mode)))
3007 {
3008 HOST_WIDE_INT new_ld_offset
3009 = offset & CSKY_LD16_OFFSET_MASK (mode);
3010
3011 xop0 = force_operand (plus_constant (Pmode, xop0,
3012 offset - new_ld_offset),
3013 NULL_RTX);
3014 x = plus_constant (Pmode, xop0, new_ld_offset);
3015 }
3016 else if (offset < 0 && offset >= (-CSKY_SUBI16_MAX_IMM))
3017 x = force_operand (x, NULL_RTX);
3018 else if (offset > CSKY_LD16_MAX_OFFSET (mode)
3019 || offset < 0)
3020 {
3021 /* For the remaining cases, force the constant into a
3022 register. */
3023 xop1 = force_reg (SImode, xop1);
3024 x = gen_rtx_PLUS (SImode, xop0, xop1);
3025 }
3026 }
3027
3028 /* If the index is store in register, force the
3029 base to register. */
3030 if (is_csky_address_register_rtx_p (xop1, 0)
3031 && !is_csky_address_register_rtx_p (xop0, 0))
3032 {
3033 xop0 = force_operand (xop0, NULL_RTX);
3034 x = gen_rtx_PLUS (SImode, xop0, xop1);
3035 }
3036 }
3037 /* Make sure to take full advantage of the pre-indexed addressing mode
3038 with absolute addresses which often allows for the base register to
3039 be factorized for multiple adjacent memory references, and it might
3040 even allows for the mini pool to be avoided entirely. */
3041 else if (CONST_INT_P (x) && optimize > 0)
3042 {
3043 HOST_WIDE_INT mask, base, index;
3044 rtx base_reg;
3045
3046 mask = CSKY_LD16_OFFSET_MASK (mode);
3047 base = INTVAL (x) & ~mask;
3048 index = INTVAL (x) & mask;
3049 base_reg = force_reg (SImode, GEN_INT (base));
3050 x = plus_constant (Pmode, base_reg, index);
3051 }
3052
3053 return x;
3054 }
3055
3056
3057 /* Return nonzero if INDEX is valid for an address index operand.
3058 ck801 use 16 bits ld
3059 ck802 use 16 and 32 bits ld
3060 others use ld and ldr. */
3061
3062 static int
3063 ck801_legitimate_index_p (machine_mode mode, rtx index,
3064 int strict_p ATTRIBUTE_UNUSED)
3065 {
3066 enum rtx_code code = GET_CODE (index);
3067
3068 /* When the mode size is larger than 4, we may use two ld instruction
3069 to get data, the index and (index+1) should be valid. */
3070 if (GET_MODE_SIZE (mode) >= 8)
3071 return (code == CONST_INT
3072 && INTVAL (index) < CSKY_LD16_MAX_OFFSET (SImode)
3073 && INTVAL (index) >= 0 && (INTVAL (index) & 3) == 0);
3074
3075 if (code == CONST_INT && GET_MODE_SIZE (mode) > 0
3076 && INTVAL (index) <= CSKY_LD16_MAX_OFFSET (mode)
3077 && INTVAL (index) >= 0)
3078 return ((INTVAL (index) % GET_MODE_SIZE (mode)) == 0);
3079
3080 return 0;
3081 }
3082
3083
3084 static int
3085 ck802_legitimate_index_p (machine_mode mode, rtx index,
3086 int strict_p ATTRIBUTE_UNUSED)
3087 {
3088 enum rtx_code code = GET_CODE (index);
3089
3090 /* When the mode size is larger than 4, we may use two ld instruction
3091 to get data, the index and (index+1) should be valid. */
3092 if (GET_MODE_SIZE (mode) >= 8)
3093 return (code == CONST_INT
3094 && INTVAL (index) < CSKY_LD32_MAX_OFFSET (SImode)
3095 && INTVAL (index) >= 0 && (INTVAL (index) & 3) == 0);
3096
3097 if (code == CONST_INT && GET_MODE_SIZE (mode) > 0
3098 && INTVAL (index) <= CSKY_LD32_MAX_OFFSET (mode)
3099 && INTVAL (index) >= 0)
3100 return ((INTVAL (index) % GET_MODE_SIZE (mode)) == 0);
3101
3102 return 0;
3103 }
3104
3105
3106 /* The instruction ldr rz, (rx, ry << i), i can be 0,1,2,3.
3107 Check that SHIFT is valid, that the code is MULT, and that
3108 the shift is a power of 2. */
3109
3110 static bool
3111 is_ldr_shift_p (HOST_WIDE_INT shift, enum rtx_code code)
3112 {
3113 if (code == ASHIFT)
3114 return (shift >= 0 && shift <= 3);
3115 else if (code == MULT)
3116 return (shift == 1
3117 || shift == 2
3118 || shift == 4
3119 || shift == 8);
3120 else
3121 return false;
3122 }
3123
3124
3125 static int
3126 ck810_legitimate_index_p (machine_mode mode, rtx index, int strict_p)
3127 {
3128 enum rtx_code code = GET_CODE (index);
3129
3130 if (code == CONST_INT && TARGET_HARD_FLOAT && CSKY_VREG_MODE_P (mode))
3131 return (INTVAL (index) < 1024 && INTVAL (index) >= 0
3132 && (INTVAL (index) & 3) == 0);
3133
3134 if (code == CONST_INT)
3135 {
3136 /* When the mode size is larger than 4, we may use two ld instruction
3137 to get data, the index and (index+1) should be valid. */
3138 if (GET_MODE_SIZE (mode) >= 8)
3139 return (INTVAL (index) < CSKY_LD32_MAX_OFFSET (SImode)
3140 && INTVAL (index) >= 0 && (INTVAL (index) & 3) == 0);
3141
3142 if (GET_MODE_SIZE (mode) > 0
3143 && INTVAL (index) <= CSKY_LD32_MAX_OFFSET (mode)
3144 && INTVAL (index) >= 0)
3145 return ((INTVAL (index) % GET_MODE_SIZE (mode)) == 0);
3146 }
3147 /* Allow ld.w rx, (gb, sym@got) when -fpic specially. */
3148 else if (code == UNSPEC)
3149 return (flag_pic == 1
3150 && (XINT (index, 1) == UNSPEC_PIC_SYMBOL_PLT
3151 || XINT (index, 1) == UNSPEC_PIC_SYMBOL_GOT));
3152 /* The follow index is for ldr instruction, the ldr cannot
3153 load dword data, so the mode size should not be larger than
3154 4. */
3155 else if (GET_MODE_SIZE (mode) <= 4
3156 || (TARGET_HARD_FLOAT && CSKY_VREG_MODE_P (mode)))
3157 {
3158 if (is_csky_address_register_rtx_p (index, strict_p))
3159 return 1;
3160 else if (code == MULT || code == ASHIFT)
3161 {
3162 rtx xiop0 = XEXP (index, 0);
3163 rtx xiop1 = XEXP (index, 1);
3164
3165 /* FIXME can the xiop1 be the reg and xiop0 be the int when mult? */
3166 return (is_csky_address_register_rtx_p (xiop0, strict_p)
3167 && CONST_INT_P (xiop1)
3168 && is_ldr_shift_p (INTVAL (xiop1), code));
3169 }
3170 }
3171
3172 return 0;
3173 }
3174
3175
3176 static int
3177 csky_legitimate_index_p (machine_mode mode, rtx index, int strict_p)
3178 {
3179 if (CSKY_TARGET_ARCH (CK801))
3180 return ck801_legitimate_index_p (mode, index, strict_p);
3181 else if (CSKY_TARGET_ARCH (CK802))
3182 return ck802_legitimate_index_p (mode, index, strict_p);
3183 else
3184 return ck810_legitimate_index_p (mode, index, strict_p);
3185 }
3186
3187
3188 /* Implement TARGET_LEGITIMATE_ADDRESS_P.
3189 Recognizes RTL expressions that are valid memory addresses for an
3190 instruction. The MODE argument is the machine mode for the MEM
3191 expression that wants to use this address.
3192
3193 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
3194 convert common non-canonical forms to canonical form so that they will
3195 be recognized. */
3196
3197 static bool
3198 csky_legitimate_address_p (machine_mode mode, rtx addr, bool strict_p)
3199 {
3200 enum rtx_code code = GET_CODE (addr);
3201
3202 /* Match the RTX form emitted for constant pool references.
3203 After reload constants split into minipools will have addresses
3204 from a LABEL_REF. */
3205 if (reload_completed
3206 && ((code == LABEL_REF)
3207 || (code == CONST
3208 && GET_CODE (XEXP (addr, 0)) == PLUS
3209 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == LABEL_REF
3210 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))))
3211 return 1;
3212
3213 if (is_csky_address_register_rtx_p (addr, strict_p))
3214 return 1;
3215 /* It is a pc-relative load, may be generated for constpool. */
3216 else if (GET_CODE (addr) == LABEL_REF)
3217 return 1;
3218
3219 if (code == PLUS)
3220 {
3221 rtx xop0 = XEXP (addr, 0);
3222 rtx xop1 = XEXP (addr, 1);
3223
3224 return ((is_csky_address_register_rtx_p (xop0, strict_p)
3225 && csky_legitimate_index_p (mode, xop1, strict_p))
3226 || (is_csky_address_register_rtx_p (xop1, strict_p)
3227 && csky_legitimate_index_p (mode, xop0, strict_p)));
3228 }
3229
3230 return 0;
3231 }
3232
3233
3234 /* Functions to save and restore machine-specific function data. */
3235
3236 static struct machine_function *
3237 csky_init_machine_status (void)
3238 {
3239 struct machine_function *machine;
3240
3241 machine = ggc_cleared_alloc<machine_function> ();
3242
3243 #if CSKY_FT_UNKNOWN != 0
3244 machine->func_type = CSKY_FT_UNKNOWN;
3245 #endif
3246 return machine;
3247 }
3248
3249
3250 /* Implement INIT_EXPANDERS. */
3251
3252 void
3253 csky_init_expanders (void)
3254 {
3255 /* Arrange to initialize and mark the machine per-function status. */
3256 init_machine_status = csky_init_machine_status;
3257 }
3258
3259
3260 /* Implement TARGET_CANNOT_COPY_INSN_P.
3261 We must not copy any rtx that uses a pc-relative address. */
3262
3263 static bool
3264 csky_cannot_copy_insn_p (rtx_insn *insn)
3265 {
3266 subrtx_iterator::array_type array;
3267 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
3268 {
3269 const_rtx x = *iter;
3270 if (GET_CODE (x) == UNSPEC
3271 && (XINT (x, 1) == UNSPEC_TLS_LABEL
3272 || XINT (x, 1) == UNSPEC_PIC_SYMBOL_GOTPC_GRS))
3273 return true;
3274 }
3275 return false;
3276 }
3277
3278
3279 /* Extract the parts of an RTL expression that is a valid memory address
3280 for an instruction. Return FALSE if it is a invalid memory address. */
3281
3282 struct csky_address
3283 {
3284 rtx base, index, symbol, label, disp;
3285 HOST_WIDE_INT scale;
3286 };
3287
3288 static bool
3289 decompose_csky_address (rtx addr, struct csky_address *out)
3290 {
3291 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
3292 HOST_WIDE_INT scale = 0;
3293 rtx scale_rtx = NULL_RTX;
3294 int i;
3295
3296 out->base = out->index = out->symbol = out->label = out->disp = NULL_RTX;
3297 out->scale = 0;
3298
3299 if (REG_P (addr))
3300 {
3301 out->base = addr;
3302 return true;
3303 }
3304
3305 if (GET_CODE (addr) == LABEL_REF)
3306 {
3307 out->label = addr;
3308 return true;
3309 }
3310
3311 if (GET_CODE (addr) == CONST)
3312 addr = XEXP (addr, 0);
3313
3314 if (GET_CODE (addr) == PLUS)
3315 {
3316 rtx addends[2], op;
3317
3318 addends[0] = XEXP (addr, 0);
3319 addends[1] = XEXP (addr, 1);
3320
3321 if (GET_CODE (addends[0]) == LABEL_REF && CONST_INT_P (addends[1]))
3322 {
3323 out->label = addends[0];
3324 out->disp = addends[1];
3325 return true;
3326 }
3327
3328 if (!REG_P (addends[0]))
3329 std::swap (addends[0], addends[1]);
3330
3331 for (i = 0; i < 2; ++i)
3332 {
3333 op = addends[i];
3334 switch (GET_CODE (op))
3335 {
3336 case REG:
3337 if (!base)
3338 base = op;
3339 else if (!index)
3340 {
3341 index = op;
3342 scale = 1;
3343 }
3344 else
3345 return false;
3346 break;
3347 case CONST_INT:
3348 case UNSPEC:
3349 if (disp)
3350 return false;
3351 disp = op;
3352 break;
3353 case MULT:
3354 if (index)
3355 return false;
3356 index = XEXP (op, 0);
3357 scale_rtx = XEXP (op, 1);
3358 if (!CONST_INT_P (index) && !CONST_INT_P (scale_rtx))
3359 return false;
3360 else if (CONST_INT_P (index))
3361 std::swap (index, scale_rtx);
3362 scale = INTVAL (scale_rtx);
3363 break;
3364 case ASHIFT:
3365 if (index)
3366 return false;
3367 index = XEXP (op, 0);
3368 scale_rtx = XEXP (op, 1);
3369 if (!CONST_INT_P (scale_rtx))
3370 return false;
3371 scale = 1 << INTVAL (scale_rtx);
3372 break;
3373 default:
3374 return false;
3375 }
3376 }
3377 }
3378
3379 if (!base)
3380 return false;
3381
3382 out->base = base;
3383 out->index = index;
3384 out->disp = disp;
3385 out->scale = scale;
3386
3387 return true;
3388 }
3389
3390 /* Helper function for the csky_simple_mem_operand predicate. Returns
3391 true if OP is an address of the form reg + displacement. */
3392
3393 bool
3394 csky_simple_addr_operand_p (rtx op)
3395 {
3396 struct csky_address addr;
3397
3398 if (!decompose_csky_address (op, &addr))
3399 return false;
3400
3401 /* FIXME The PIC related code.
3402 Check if load the symbol address from got table. */
3403 if (addr.disp && GET_CODE (addr.disp) == UNSPEC)
3404 return false;
3405 if (!addr.index && !addr.symbol)
3406 return true;
3407 return false;
3408 }
3409
3410
3411 /* Print the UNSPEC operand in X to the STREAM. */
3412
3413 static void
3414 csky_output_pic_addr_const (FILE *stream, rtx x, int code)
3415 {
3416
3417 if (GET_CODE (x) != UNSPEC)
3418 return;
3419
3420 if (UNSPEC_TLS == XINT (x, 1))
3421 {
3422 /* FIXME It is not reached */
3423 return;
3424 }
3425
3426 csky_print_operand (stream, XVECEXP (x, 0, 0), code);
3427
3428 switch (XINT (x, 1))
3429 {
3430 case UNSPEC_PIC_SYMBOL_GOTOFF:
3431 fputs ("@GOTOFF", stream);
3432 break;
3433 case UNSPEC_PIC_SYMBOL_PLT:
3434 fputs ("@PLT", stream);
3435 break;
3436 case UNSPEC_PIC_SYMBOL_GOT:
3437 fputs ("@GOT", stream);
3438 break;
3439 case UNSPEC_PIC_SYMBOL_GOTPC:
3440 fputs ("@GOTPC", stream);
3441 break;
3442 case UNSPEC_PIC_SYMBOL_BSR:
3443 break;
3444 default:
3445 break;
3446 }
3447 }
3448
3449
3450 /* Output the constpool label according to the rtx expression X. */
3451
3452 static void
3453 csky_output_constpool_label (FILE *stream, rtx x)
3454 {
3455 char buf[15];
3456
3457 gcc_assert (GET_CODE (x) == LABEL_REF);
3458 x = XEXP (x, 0);
3459
3460 if (GET_CODE (x) == UNSPEC_VOLATILE && XINT (x, 1) == VUNSPEC_POOL_LABEL)
3461 {
3462 ASM_GENERATE_INTERNAL_LABEL (buf, CSKY_CONSTPOOL_LABEL_PREFIX,
3463 INTVAL (XVECEXP (x, 0, 0)));
3464 assemble_name (stream, buf);
3465 }
3466 }
3467
3468
3469 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
3470
3471 static void
3472 csky_print_operand_address (FILE *stream,
3473 machine_mode mode ATTRIBUTE_UNUSED,
3474 rtx x)
3475 {
3476
3477 struct csky_address addr;
3478
3479 decompose_csky_address (x, &addr);
3480
3481 if (addr.label && addr.disp && GET_CODE (addr.disp) == CONST_INT)
3482 {
3483 fprintf (stream, "[");
3484 csky_output_constpool_label (stream, addr.label);
3485 fprintf (stream, "+%d]", (int) INTVAL (addr.disp));
3486 }
3487 else if (addr.label)
3488 {
3489 fprintf (stream, "[");
3490 csky_output_constpool_label (stream, addr.label);
3491 fprintf (stream, "]");
3492 }
3493 else if (addr.symbol && addr.disp && GET_CODE (addr.disp) == CONST_INT)
3494 {
3495 fprintf (stream, "[");
3496 output_addr_const (stream, addr.symbol);
3497 fprintf (stream, "+%d]", (int) INTVAL (addr.disp));
3498 }
3499 else if (addr.symbol)
3500 {
3501 fprintf (stream, "[");
3502 output_addr_const (stream, addr.symbol);
3503 fprintf (stream, "]");
3504 }
3505 else if (addr.disp && GET_CODE (addr.disp) == CONST_INT)
3506 fprintf (stream, "(%s, %d)",
3507 reg_names[REGNO (addr.base)], (int) INTVAL (addr.disp));
3508 else if (addr.disp && GET_CODE (addr.disp) == UNSPEC)
3509 {
3510 if (REGNO (addr.base) != CSKY_GB_REGNUM)
3511 fprintf (stream, "(%s, ", reg_names[REGNO (addr.base)]);
3512 else
3513 fprintf (stream, "[");
3514 csky_output_pic_addr_const (stream, addr.disp, 0);
3515 fprintf (stream, "%s", (REGNO (addr.base) != CSKY_GB_REGNUM)
3516 ? ")" : "]");
3517 }
3518 else if (addr.index)
3519 fprintf (stream, "(%s, %s << %d)",
3520 reg_names[REGNO (addr.base)], reg_names[REGNO (addr.index)],
3521 exact_log2 ((int) (addr.scale)));
3522 else
3523 fprintf (stream, "(%s, 0)", reg_names[REGNO (addr.base)]);
3524 }
3525
3526
3527 /* Implement TARGET_PRINT_OPERAND.
3528 Print operand X (an rtx) in assembler syntax to file STREAM
3529 according to modifier CODE.
3530
3531 'N' print the log2(X+1), mainly used for bmaski
3532 'P' print the log2(X)
3533 'Q' print the log2(~X)
3534 'O' print a decimal number
3535 'M' print a decimal number as its negative
3536 'R' print the next register or memory location along, i.e. the lsw in
3537 a double word value
3538 'H' print the high 16 bits of a constant. */
3539
3540 static void
3541 csky_print_operand (FILE *stream, rtx x, int code)
3542 {
3543 switch (code)
3544 {
3545 case 'N':
3546 if ((INTVAL (x) & 0xffffffff) == 0xffffffff)
3547 fprintf (stream, "0");
3548 else
3549 fprintf (stream, "%d",
3550 (int) exact_log2 ((INTVAL (x) & 0xffffffff) + 1) % 32);
3551 break;
3552 case 'P':
3553 fprintf (stream, "%d",
3554 (int) exact_log2 (INTVAL (x) & 0xffffffff));
3555 break;
3556 case 'Q':
3557 fprintf (stream, "%d",
3558 (int) exact_log2 (~INTVAL (x) & 0xffffffff));
3559 break;
3560 case 'O':
3561 fprintf (stream, "%d", (int) INTVAL (x));
3562 break;
3563 case 'M':
3564 fprintf (stream, "%d", (int) (-INTVAL (x)));
3565 break;
3566 case 'R':
3567 /* Next location along in memory or register. */
3568 switch (GET_CODE (x))
3569 {
3570 case REG:
3571 fputs (reg_names[REGNO (x) + 1], stream);
3572 break;
3573 case MEM:
3574 csky_print_operand_address
3575 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
3576 break;
3577 default:
3578 gcc_unreachable ();
3579 }
3580 break;
3581 case 'H':
3582 fprintf (stream, "%ld", (long)((INTVAL (x) & 0xFFFF0000) >> 16));
3583 break;
3584 default:
3585 switch (GET_CODE (x))
3586 {
3587 case REG:
3588 fputs (reg_names[REGNO (x)], stream);
3589 break;
3590 case MEM:
3591 output_address (GET_MODE (x), XEXP (x, 0));
3592 break;
3593 case UNSPEC:
3594 csky_output_pic_addr_const (stream, x, code);
3595 break;
3596 case CONST_DOUBLE:
3597 {
3598 char fpstr[20];
3599 real_to_decimal ( fpstr, CONST_DOUBLE_REAL_VALUE (x),
3600 sizeof (fpstr), 0, 1);
3601 fprintf (stream, "%s", fpstr);
3602 }
3603 break;
3604 default:
3605 output_addr_const (stream, x);
3606 break;
3607 }
3608 break;
3609 }
3610 }
3611
3612
3613
3614 /* Implement TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. */
3615
3616 static bool
3617 csky_allocate_stack_slots_for_args (void)
3618 {
3619 /* Naked functions should not allocate stack slots for arguments. */
3620 return !CSKY_FUNCTION_IS_NAKED (get_csky_current_func_type ());
3621 }
3622
3623
3624 /* Can we generate a constant with a single instruction, without using
3625 lrw? */
3626
3627 static int
3628 const_ok_for_cskyv2 (HOST_WIDE_INT value)
3629 {
3630 /* Try exact power of two. It can be generated by bgeni. */
3631 if (CSKY_CONST_OK_FOR_Ub (value))
3632 return 1;
3633
3634 /* Try exact power of two - 1. It can be generated by bmaski. */
3635 if (CSKY_CONST_OK_FOR_Uc (value) && value != -1)
3636 return 1;
3637
3638 /* Try if it can be generated by movi. */
3639 if (CSKY_CONST_OK_FOR_I (value))
3640 return 1;
3641
3642 /* The constant can be generated by movih.
3643 Notice that movih is a 32-bit instruction. */
3644 if (CSKY_CONST_OK_FOR_MOVIH (value))
3645 return 1;
3646
3647 return 0;
3648 }
3649
3650
3651 /* Tricks for synthesizing constants from values that can be directly
3652 manipulated by machine instructions. */
3653
3654 enum csky_inline_const_type
3655 {
3656 IC_UNINLINABLE = 0, /* Not inlineable */
3657 IC_SINGLE, /* Single instruction */
3658 IC_APPEND_NOT, /* Single instruction followed by a not */
3659 IC_APPEND_ADDI, /* Single insn followed by an addi */
3660 IC_APPEND_SUBI, /* Single insn followed by a subi */
3661 IC_BGENI_ADDI, /* Single insn(bgeni) followed by an addi */
3662 IC_BGENI_SUBI, /* Single insn(bgeni) followed by a subi */
3663 IC_APPEND_BSETI, /* Single insn followed by bseti */
3664 IC_APPEND_MOVI, /* Single insn followed by movi */
3665 IC_APPEND_BCLRI, /* Single insn followed by bclri */
3666 IC_APPEND_ROTLI, /* Single insn followed by rotli */
3667 IC_APPEND_LSLI, /* Single insn followed by lsli */
3668 IC_APPEND_IXH, /* Single insn followed by ixh */
3669 IC_APPEND_IXW /* Single insn followed by ixw */
3670 };
3671
3672
3673 /* Try tricks to load a constant inline and return the trick number if
3674 success, or IC_UNINLINABLE. */
3675
3676 static enum csky_inline_const_type
3677 try_csky_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT *x,
3678 HOST_WIDE_INT *y)
3679 {
3680 HOST_WIDE_INT i, value_invert;
3681 unsigned HOST_WIDE_INT bit, shf, rot, lobits, hibits;
3682
3683 value &= 0xffffffff;
3684 value_invert = ~value & 0xffffffff;
3685
3686 if (const_ok_for_cskyv2 (value))
3687 {
3688 *x = value;
3689 return IC_SINGLE;
3690 }
3691
3692 /* Since movih is 32 bits, do not use it here, better code may
3693 be generated later. */
3694 if (const_ok_for_cskyv2 (value_invert)
3695 && !CSKY_CONST_OK_FOR_MOVIH (value_invert))
3696 {
3697 *x = value_invert;
3698 return IC_APPEND_NOT;
3699 }
3700
3701 /* One immediate generate instruction, and one 16-bit subi or addi. */
3702 for (i = 1; i <= 32; i++)
3703 {
3704 if (const_ok_for_cskyv2 (value - i)
3705 && !CSKY_CONST_OK_FOR_MOVIH (value - i))
3706 {
3707 *x = value - i;
3708 *y = i;
3709 return IC_APPEND_ADDI;
3710 }
3711
3712 if (const_ok_for_cskyv2 (value + i)
3713 && !CSKY_CONST_OK_FOR_MOVIH (value - i))
3714 {
3715 *x = value + i;
3716 *y = i;
3717 return IC_APPEND_SUBI;
3718 }
3719 }
3720
3721 /* Generate bgeni + addi. */
3722 if (CSKY_CONST_OK_FOR_Ub (value & 0xfffff000))
3723 {
3724 *x = (value & 0xfffff000);
3725 *y = (value & 0xfff);
3726 return IC_BGENI_ADDI;
3727 }
3728
3729 /* Generate bgeni + subi. */
3730 lobits = value & 0xfff;
3731 hibits = (unsigned HOST_WIDE_INT)(value & 0xfffff000) + (1 << 12);
3732 if (exact_log2 (hibits) >= 1
3733 && exact_log2 (hibits) <= 30
3734 && lobits != 0)
3735 {
3736 *x = hibits;
3737 *y = (0x1000 - lobits);
3738 return IC_BGENI_SUBI;
3739 }
3740
3741 /* One immediate generate instruction, and one bseti or bclri. */
3742 bit = 0x80000000ULL;
3743 for (i = 0; i <= 31; i++)
3744 {
3745 if (const_ok_for_cskyv2 (value & ~bit)
3746 && !CSKY_CONST_OK_FOR_MOVIH (value & ~bit))
3747 {
3748 *y = bit;
3749 *x = (value & ~bit);
3750 return IC_APPEND_BSETI;
3751 }
3752
3753 if (const_ok_for_cskyv2 (value | bit)
3754 && !CSKY_CONST_OK_FOR_MOVIH (value | bit))
3755 {
3756 *y = ~bit & 0xffffffff;
3757 *x = value | bit;
3758 return IC_APPEND_BCLRI;
3759 }
3760
3761 bit >>= 1;
3762 }
3763
3764 /* One immediate generate instruction, and one rotli or lsli. */
3765 shf = value;
3766 rot = value;
3767 for (i = 1; i < 31; i++)
3768 {
3769 int c;
3770
3771 /* Rotate left. */
3772 c = rot << 31;
3773 rot >>= 1;
3774 rot &= 0x7FFFFFFF;
3775 rot |= c;
3776
3777 if (const_ok_for_cskyv2 (rot) && !CSKY_CONST_OK_FOR_MOVIH (rot))
3778 {
3779 *y = i;
3780 *x = rot;
3781 return IC_APPEND_ROTLI;
3782 }
3783
3784 /* Can't use logical shift when low order bit is one. */
3785 if (shf & 1)
3786 shf = 0;
3787 else
3788 shf >>= 1;
3789
3790 if (shf != 0 && const_ok_for_cskyv2 (shf)
3791 && !CSKY_CONST_OK_FOR_MOVIH (shf))
3792 {
3793 *y = i;
3794 *x = shf;
3795 return IC_APPEND_LSLI;
3796 }
3797 }
3798
3799 /* One immediate generate instruction, and one ixh. */
3800 if (CSKY_ISA_FEATURE (E2)
3801 && (value % 3) == 0
3802 && const_ok_for_cskyv2 (value / 3)
3803 && !CSKY_CONST_OK_FOR_MOVIH (value / 3))
3804 {
3805 *x = value / 3;
3806 return IC_APPEND_IXH;
3807 }
3808
3809 /* One immediate generate instruction, and one ixw. */
3810 if (CSKY_ISA_FEATURE (E2)
3811 && (value % 5) == 0
3812 && const_ok_for_cskyv2 (value / 5)
3813 && !CSKY_CONST_OK_FOR_MOVIH (value / 5))
3814 {
3815 *x = value / 5;
3816 return IC_APPEND_IXW;
3817 }
3818
3819 /* Generate movih + bseti. */
3820 if (CSKY_CONST_OK_FOR_Ub (value & 0xffff))
3821 {
3822 *x = value & 0xffff0000;
3823 *y = value & 0xffff;
3824 return IC_APPEND_BSETI;
3825 }
3826
3827 /* Generate movih + not. */
3828 if (CSKY_CONST_OK_FOR_MOVIH (value_invert))
3829 {
3830 *x = value_invert;
3831 return IC_APPEND_NOT;
3832 }
3833
3834 /* One movih, and one 16bits addi or subi. */
3835 for (i = 1; i <= 32; i++)
3836 {
3837 if (CSKY_CONST_OK_FOR_MOVIH (value - i))
3838 {
3839 *x = value - i;
3840 *y = i;
3841 return IC_APPEND_ADDI;
3842 }
3843
3844 if (CSKY_CONST_OK_FOR_MOVIH (value + i))
3845 {
3846 *x = value + i;
3847 *y = i;
3848 return IC_APPEND_SUBI;
3849 }
3850 }
3851
3852 /* One movih, and one bseti or bclri. */
3853 bit = 0x80000000ULL;
3854 for (i = 0; i <= 31; i++)
3855 {
3856 if (CSKY_CONST_OK_FOR_MOVIH (value & ~bit))
3857 {
3858 *y = bit;
3859 *x = value & ~bit;
3860 return IC_APPEND_BSETI;
3861 }
3862
3863 if (CSKY_CONST_OK_FOR_MOVIH (value | bit))
3864 {
3865 *y = ~bit & 0xffffffff;
3866 *x = value | bit;
3867 return IC_APPEND_BCLRI;
3868 }
3869
3870 bit >>= 1;
3871 }
3872
3873 /* One movih, and one rotli or lsli. */
3874 shf = value;
3875 rot = value;
3876 for (i = 1; i < 31; i++)
3877 {
3878 int c;
3879
3880 /* Rotate left. */
3881 c = rot << 31;
3882 rot >>= 1;
3883 rot &= 0x7FFFFFFF;
3884 rot |= c;
3885
3886 if (CSKY_CONST_OK_FOR_MOVIH (rot))
3887 {
3888 *y = i;
3889 *x = rot;
3890 return IC_APPEND_ROTLI;
3891 }
3892
3893 /* Can't use logical shift when low order bit is one. */
3894 if (shf & 1)
3895 shf = 0;
3896 else
3897 shf >>= 1;
3898
3899 if (shf != 0 && CSKY_CONST_OK_FOR_MOVIH (shf))
3900 {
3901 *y = i;
3902 *x = shf;
3903 return IC_APPEND_LSLI;
3904 }
3905 }
3906
3907 return IC_UNINLINABLE;
3908 }
3909
3910
3911 /* Actually output a constant using a trick.
3912 FIXME: I think this would be better handled by a splitter than at the
3913 asm output level. */
3914
3915 static const char *
3916 csky_output_inline_const (machine_mode mode, rtx operands[])
3917 {
3918 HOST_WIDE_INT x = 0, y = 0;
3919 enum csky_inline_const_type trick_type;
3920 rtx out_operands[3];
3921 char buf[256];
3922 char load_op[128];
3923 const char *dst_fmt;
3924 HOST_WIDE_INT value = INTVAL (operands[1]);
3925 int ivalue = (int) value;
3926 unsigned int uvalue = (unsigned int) value;
3927
3928 trick_type = try_csky_constant_tricks (value, &x, &y);
3929 /* lrw's are handled separately: Large inlinable constants never get
3930 turned into lrw's. Our caller uses try_csky_constant_tricks to back
3931 off to an lrw rather than calling this routine. */
3932 gcc_assert (trick_type != IC_UNINLINABLE);
3933
3934 /* Operands: 0 = dst, 1 = load immedate., 2 = adjust immedate. */
3935 out_operands[0] = operands[0];
3936 out_operands[1] = GEN_INT (x);
3937 if (trick_type != IC_SINGLE && trick_type != IC_APPEND_NOT)
3938 out_operands[2] = GEN_INT (y);
3939
3940 /* Select dst format based on mode. */
3941 if (mode == DImode && TARGET_BIG_ENDIAN)
3942 dst_fmt = "%R0";
3943 else
3944 dst_fmt = "%0";
3945
3946 /* Try movi16: 0~31,movi32: 0~65535. */
3947 if (CSKY_CONST_OK_FOR_I (x))
3948 sprintf (load_op, "movi\t%s, %%1", dst_fmt);
3949 /* Try exact power of two - 1. */
3950 else if (CSKY_CONST_OK_FOR_Uc (x))
3951 sprintf (load_op, "bmaski\t%s, %%N1", dst_fmt);
3952 /* Try movih. */
3953 else if (CSKY_CONST_OK_FOR_MOVIH (x))
3954 sprintf (load_op, "movih\t%s, %%H1", dst_fmt);
3955 else
3956 {
3957 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
3958 gcc_unreachable ();
3959 }
3960
3961 switch (trick_type)
3962 {
3963 case IC_SINGLE:
3964 strcpy (buf, load_op);
3965 break;
3966 /* Add instruction 'not'. */
3967 case IC_APPEND_NOT:
3968 sprintf (buf, "%s\n\tnot\t%s, %s\t// %d 0x%x", load_op, dst_fmt,
3969 dst_fmt, ivalue, uvalue);
3970 break;
3971 /* Add instruction 'addi'. */
3972 case IC_APPEND_ADDI:
3973 sprintf (buf, "%s\n\taddi\t%s, %s, %%2\t// %d 0x%x", load_op,
3974 dst_fmt, dst_fmt, ivalue, uvalue);
3975 break;
3976 /* Add instruction 'subi'. */
3977 case IC_APPEND_SUBI:
3978 sprintf (buf, "%s\n\tsubi\t%s, %s, %%2\t// %d 0x%x", load_op,
3979 dst_fmt, dst_fmt, ivalue, uvalue);
3980 break;
3981 /* Add instruction 'addi', the last instruction is bgeni. */
3982 case IC_BGENI_ADDI:
3983 sprintf (buf, "%s\n\taddi\t%s, %s, %%2\t// %d 0x%x", load_op,
3984 dst_fmt, dst_fmt, ivalue, uvalue);
3985 break;
3986 /* Add instruction 'subi', the last instruction is bgeni. */
3987 case IC_BGENI_SUBI:
3988 sprintf (buf, "%s\n\tsubi\t%s, %s, %%2\t// %d 0x%x", load_op,
3989 dst_fmt, dst_fmt, ivalue, uvalue);
3990 break;
3991 /* Add instruction 'bseti'. */
3992 case IC_APPEND_BSETI:
3993 sprintf (buf, "%s\n\tbseti\t%s, %s, %%P2\t// %d 0x%x", load_op,
3994 dst_fmt, dst_fmt, ivalue, uvalue);
3995 break;
3996 /* Add instruction 'movi'. */
3997 case IC_APPEND_MOVI:
3998 sprintf (buf, "%s\n\tmovi\t%s, %%2\t// %d 0x%x", load_op, dst_fmt,
3999 ivalue, uvalue);
4000 break;
4001 /* Add instruction 'bclri'. */
4002 case IC_APPEND_BCLRI:
4003 sprintf (buf, "%s\n\tbclri\t%s, %s, %%Q2\t// %d 0x%x", load_op,
4004 dst_fmt, dst_fmt, ivalue, uvalue);
4005 break;
4006 /* Add instruction 'rotli'. */
4007 case IC_APPEND_ROTLI:
4008 sprintf (buf, "%s\n\trotli\t%s, %s, %%2\t// %d 0x%x", load_op,
4009 dst_fmt, dst_fmt, ivalue, uvalue);
4010 break;
4011 /* Add instruction 'lsli'. */
4012 case IC_APPEND_LSLI:
4013 sprintf (buf, "%s\n\tlsli\t%s, %s, %%2\t// %d 0x%x", load_op,
4014 dst_fmt, dst_fmt, ivalue, uvalue);
4015 break;
4016 /* Add instruction 'ixh'. */
4017 case IC_APPEND_IXH:
4018 sprintf (buf, "%s\n\tixh\t%s, %s, %s\t// %d 0x%x", load_op,
4019 dst_fmt, dst_fmt, dst_fmt, ivalue, uvalue);
4020 break;
4021 /* Add instruction 'ixw'. */
4022 case IC_APPEND_IXW:
4023 sprintf (buf, "%s\n\tixw\t%s, %s, %s\t// %d 0x%x", load_op,
4024 dst_fmt, dst_fmt, dst_fmt, ivalue, uvalue);
4025 break;
4026 default:
4027 return "";
4028 }
4029
4030 output_asm_insn (buf, out_operands);
4031
4032 return "";
4033 }
4034
4035 /* This is a helper function for the Uo constraint for movsi patterns. */
4036
4037 bool
4038 csky_inlinable_constant (HOST_WIDE_INT value)
4039 {
4040 HOST_WIDE_INT x, y;
4041 return (!(CSKY_TARGET_ARCH (CK802) || CSKY_TARGET_ARCH (CK801))
4042 && try_csky_constant_tricks (value, &x, &y));
4043 }
4044
4045
4046 /* Return true if the constant VAL can be expressed by an 8-bit constant
4047 with a shift value, filling in *BASE and *SHIFT. */
4048
4049 bool
4050 csky_shifted_imm8_constant (unsigned HOST_WIDE_INT val,
4051 unsigned int *base, unsigned int *shift)
4052 {
4053 unsigned HOST_WIDE_INT mask = 0xff;
4054 int i;
4055 val = val & (unsigned HOST_WIDE_INT) 0xffffffffu;
4056 if (val == 0)
4057 return 0;
4058
4059 for (i = 0; i < 25; i++)
4060 if ((val & (mask << i)) == val)
4061 {
4062 if (base)
4063 *base = (unsigned int) (val >> i);
4064 if (shift)
4065 *shift = (unsigned int) i;
4066 return true;
4067 }
4068
4069 return false;
4070 }
4071
4072
4073 /* Output a move of a word or less value. */
4074
4075 const char *
4076 csky_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
4077 machine_mode mode ATTRIBUTE_UNUSED)
4078 {
4079 rtx dst = operands[0];
4080 rtx src = operands[1];
4081 struct csky_address op0, op1;
4082
4083 if (REG_P (dst))
4084 {
4085 /* The situation mov reg to reg. */
4086 if (REG_P (src))
4087 {
4088 int dstreg = REGNO (dst);
4089 int srcreg = REGNO (src);
4090
4091 /* hilo registers exchange their places,
4092 and their order of Dimode as same as other
4093 general registers in LITTLE_ENDIAN mode. */
4094 if (TARGET_BIG_ENDIAN)
4095 {
4096 if (dstreg == CSKY_HI_REGNUM)
4097 return "mthi\t%1";
4098 else if (dstreg == CSKY_LO_REGNUM)
4099 return "mtlo\t%1";
4100 else if (srcreg == CSKY_HI_REGNUM)
4101 return "mfhi\t%0";
4102 else if (srcreg == CSKY_LO_REGNUM)
4103 return "mflo\t%0";
4104 }
4105 else
4106 {
4107 if (dstreg == CSKY_HI_REGNUM)
4108 return "mtlo\t%1";
4109 else if (dstreg == CSKY_LO_REGNUM)
4110 return "mthi\t%1";
4111 else if (srcreg == CSKY_HI_REGNUM)
4112 return "mflo\t%0";
4113 else if (srcreg == CSKY_LO_REGNUM)
4114 return "mfhi\t%0";
4115 }
4116
4117 if (CSKY_VREG_P (dstreg) && CSKY_VREG_P (srcreg))
4118 {
4119 if (CSKY_ISA_FEATURE (fpv2_sf))
4120 return "fmovs\t%0, %1";
4121 else if (CSKY_ISA_FEATURE (fpv3_sf))
4122 return "fmov.32\t%0, %1";
4123 else
4124 gcc_unreachable ();
4125 }
4126 if (CSKY_VREG_P (dstreg))
4127 {
4128 if (CSKY_ISA_FEATURE (fpv2_sf))
4129 return "fmtvrl\t%0, %1";
4130 else if (CSKY_ISA_FEATURE (fpv3_sf))
4131 return "fmtvr.32.1\t%0, %1";
4132 else
4133 gcc_unreachable ();
4134 }
4135 if (CSKY_VREG_P (srcreg))
4136 {
4137 if (CSKY_ISA_FEATURE (fpv2_sf))
4138 return "fmfvrl\t%0, %1";
4139 else if (CSKY_ISA_FEATURE (fpv3_sf))
4140 return "fmfvr.32.1\t%0, %1";
4141 else
4142 gcc_unreachable ();
4143 }
4144 if (REGNO (src) == CSKY_CC_REGNUM)
4145 return "mvc\t%0";
4146 else
4147 return "mov\t%0, %1";
4148 }
4149 /* The situation mov memory to reg. */
4150 else if (GET_CODE (src) == MEM)
4151 {
4152 decompose_csky_address (XEXP (src, 0), &op1);
4153
4154 if (op1.index)
4155 switch (GET_MODE (src))
4156 {
4157 case E_HImode:
4158 case E_HFmode:
4159 return "ldr.h\t%0, %1";
4160 case E_QImode:
4161 return "ldr.b\t%0, %1";
4162 case E_SImode:
4163 case E_SFmode:
4164 if (CSKY_VREG_P (REGNO (dst)))
4165 {
4166 if (CSKY_ISA_FEATURE(fpv2_sf))
4167 return "fldrs\t%0, %1";
4168 else if (CSKY_ISA_FEATURE(fpv3_sf))
4169 return "fldr.32\t%0, %1";
4170 else
4171 gcc_unreachable ();
4172 }
4173 else
4174 return "ldr.w\t%0, %1";
4175 default:
4176 gcc_unreachable ();
4177 }
4178 /* Generate lrw rx, [LABEL]. This happens when the compiler
4179 generates constant pool references and uses lrw to get the
4180 constant into memory. */
4181 else if (op1.label)
4182 return "lrw\t%0, %1";
4183 /* Generate lrs.w rx, [symbol@GOT/PLT]. */
4184 else if (flag_pic == 1 && op1.disp && GET_CODE (op1.disp) == UNSPEC)
4185 return "lrs.w\t%0, %1";
4186 else
4187 switch (GET_MODE (src))
4188 {
4189 case E_HImode:
4190 case E_HFmode:
4191 return "ld.h\t%0, %1";
4192 case E_QImode:
4193 return "ld.b\t%0, %1";
4194 case E_SFmode:
4195 case E_SImode:
4196 if (CSKY_VREG_P (REGNO (dst)))
4197 {
4198 if (CSKY_ISA_FEATURE(fpv2_sf))
4199 return "flds\t%0, %1";
4200 else if (CSKY_ISA_FEATURE(fpv3_sf))
4201 return "fld.32\t%0, %1";
4202 else
4203 gcc_unreachable ();
4204 }
4205 else
4206 return "ld.w\t%0, %1";
4207 default:
4208 gcc_unreachable ();
4209 }
4210 }
4211 /* The situation mov integer to reg. */
4212 else if (GET_CODE (src) == CONST_INT ||
4213 (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode))
4214 {
4215 HOST_WIDE_INT x, y;
4216 const REAL_VALUE_TYPE *d;
4217 long l;
4218
4219 if (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode)
4220 {
4221 d = CONST_DOUBLE_REAL_VALUE (src);
4222 REAL_VALUE_TO_TARGET_SINGLE (*d, l);
4223 operands[1] = GEN_INT (l);
4224 src = operands[1];
4225 }
4226
4227 if (try_csky_constant_tricks (INTVAL (src), &x, &y))
4228 return csky_output_inline_const (SImode, operands);
4229 /* Return '#' to split it. */
4230 else if (CSKY_CONST_OK_FOR_T (INTVAL (src)))
4231 return "#";
4232 else
4233 return "lrw\t%0, %x1\t";
4234 }
4235 else if (TARGET_ANCHOR && GET_CODE (src) == SYMBOL_REF)
4236 {
4237 if (SYMBOL_REF_FUNCTION_P (src))
4238 return "lrw\t%0, %1@BTEXT";
4239 else
4240 return "lrw\t%0, %1@BDATA";
4241 }
4242 else if (GET_CODE (src) == UNSPEC
4243 && XINT (src, 1) == UNSPEC_PIC_SYMBOL_GRS)
4244 return "grs\t%0, %1";
4245 else
4246 return "lrw\t%0, %1";
4247 }
4248 else if (GET_CODE (dst) == MEM)
4249 {
4250 decompose_csky_address (XEXP (dst, 0), &op0);
4251
4252 if (op0.index)
4253 switch (GET_MODE (src))
4254 {
4255 case E_HImode:
4256 return "str.h\t%1, %0";
4257 case E_QImode:
4258 return "str.b\t%1, %0";
4259 case E_SFmode:
4260 case E_SImode:
4261 if (CSKY_VREG_P (REGNO (src)))
4262 {
4263 if (CSKY_ISA_FEATURE(fpv2_sf))
4264 return "fstrs\t%1, %0";
4265 else if (CSKY_ISA_FEATURE(fpv3_sf))
4266 return "fstr.32\t%1, %0";
4267 else
4268 gcc_unreachable ();
4269 }
4270 else
4271 return "str.w\t%1, %0";
4272 default:
4273 gcc_unreachable ();
4274 }
4275 else
4276 switch (GET_MODE (dst))
4277 {
4278 case E_HImode:
4279 return "st.h\t%1, %0";
4280 case E_QImode:
4281 return "st.b\t%1, %0";
4282 case E_SImode:
4283 case E_SFmode:
4284 if (CSKY_VREG_P (REGNO (src)))
4285 {
4286 if (CSKY_ISA_FEATURE(fpv2_sf))
4287 return "fsts\t%1, %0";
4288 else if (CSKY_ISA_FEATURE(fpv3_sf))
4289 return "fst.32\t%1, %0";
4290 else
4291 gcc_unreachable ();
4292 }
4293 else
4294 return "st.w\t%1, %0";
4295 default:
4296 gcc_unreachable ();
4297 }
4298 }
4299
4300 gcc_unreachable ();
4301 }
4302
4303
4304 /* Output a move of a word or less value. Specific for ck801. */
4305
4306 const char *
4307 csky_output_ck801_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
4308 machine_mode mode ATTRIBUTE_UNUSED)
4309 {
4310 rtx dst = operands[0];
4311 rtx src = operands[1];
4312 struct csky_address op1;
4313
4314 if (REG_P (dst))
4315 {
4316 if (REG_P (src))
4317 return "mov\t%0, %1";
4318 else if (GET_CODE (src) == MEM)
4319 {
4320 decompose_csky_address (XEXP (src, 0), &op1);
4321
4322 /* Generate lrw rx, [LABEL]. This happens when the compiler
4323 generates constant pool references and uses lrw to get the
4324 constant in memory. */
4325 if (op1.label)
4326 return "lrw\t%0, %1";
4327 else
4328 switch (GET_MODE (src))
4329 {
4330 case E_HImode:
4331 return "ld.h\t%0, %1";
4332 case E_QImode:
4333 return "ld.b\t%0, %1";
4334 case E_SFmode:
4335 case E_SImode:
4336 return "ld.w\t%0, %1";
4337 default:
4338 gcc_unreachable ();
4339 }
4340 }
4341 else if (GET_CODE (src) == CONST_INT)
4342 {
4343 if (REGNO (dst) > 7)
4344 return "lrw\t%0, %x1\t";
4345 else if (CSKY_CONST_OK_FOR_N (INTVAL (src) + 1))
4346 return "movi\t%0, %1";
4347 /* Return '#' to split it. */
4348 else if (CSKY_CONST_OK_FOR_T (INTVAL (src)))
4349 return "#";
4350 else if (csky_shifted_imm8_constant (INTVAL (src), NULL, NULL))
4351 return "#";
4352 else
4353 return "lrw\t%0, %x1\t";
4354 }
4355 else if (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode)
4356 {
4357 const REAL_VALUE_TYPE *d;
4358 long l;
4359
4360 d = CONST_DOUBLE_REAL_VALUE (src);
4361 REAL_VALUE_TO_TARGET_SINGLE (*d, l);
4362 operands[1] = GEN_INT (l);
4363 src = operands[1];
4364
4365 if (CSKY_CONST_OK_FOR_N (INTVAL (src) + 1))
4366 return "movi\t%0, %1";
4367 else
4368 return "lrw\t%0, %x1\t";
4369 }
4370 else if (TARGET_ANCHOR && GET_CODE (src) == SYMBOL_REF)
4371 {
4372 if (SYMBOL_REF_FUNCTION_P (src))
4373 return "lrw\t%0, %1@BTEXT";
4374 else
4375 return "lrw\t%0, %1@BDATA";
4376 }
4377 else
4378 return "lrw\t%0, %1";
4379 }
4380 else if (GET_CODE (dst) == MEM)
4381 switch (GET_MODE (dst))
4382 {
4383 case E_HImode:
4384 return "st.h\t%1, %0";
4385 case E_QImode:
4386 return "st.b\t%1, %0";
4387 case E_SImode:
4388 case E_SFmode:
4389 return "st.w\t%1, %0";
4390 default:
4391 gcc_unreachable ();
4392 }
4393
4394 gcc_unreachable ();
4395 }
4396
4397
4398 /* Return a sequence of instructions to perform DI or DF move.
4399 Since the CSKY cannot move a DI or DF in one instruction, we have
4400 to take care when we see overlapping source and dest registers. */
4401
4402 const char *
4403 csky_output_movedouble (rtx operands[],
4404 machine_mode mode ATTRIBUTE_UNUSED)
4405 {
4406 rtx dst = operands[0];
4407 rtx src = operands[1];
4408
4409 if (REG_P (dst))
4410 {
4411 if (REG_P (src))
4412 {
4413 int dstreg = REGNO (dst);
4414 int srcreg = REGNO (src);
4415
4416 if (CSKY_HILO_REG_P (srcreg))
4417 {
4418 if (TARGET_BIG_ENDIAN)
4419 return "mfhi\t%0\n\tmflo\t%R0";
4420 else
4421 return "mfhi\t%R0\n\tmflo\t%0";
4422 }
4423 else if (CSKY_HILO_REG_P (dstreg))
4424 {
4425 if (TARGET_BIG_ENDIAN)
4426 return "mthi\t%1\n\tmtlo\t%R1";
4427 else
4428 return "mthi\t%R1\n\tmtlo\t%1";
4429 }
4430 else if (CSKY_VREG_P (srcreg) && CSKY_VREG_P (dstreg))
4431 {
4432 if (CSKY_ISA_FEATURE(fpv2_df))
4433 return "fmovd\t%0, %1";
4434 else if (CSKY_ISA_FEATURE(fpv3_df))
4435 return "fmov.64\t%0, %1";
4436 else
4437 gcc_unreachable ();
4438 }
4439 else if (CSKY_VREG_P (srcreg))
4440 {
4441 /* Since the vector registers in fpuv2_soft processors
4442 like ck803f are 32 bits wide, just one insn is needed
4443 to complete the move operation. */
4444 if (TARGET_SOFT_FPU)
4445 return "fmfvrl\t%0, %1";
4446 else if (TARGET_BIG_ENDIAN)
4447 {
4448 if (CSKY_ISA_FEATURE(fpv2_df))
4449 return "fmfvrh\t%0, %1\n\tfmfvrl\t%R0, %1";
4450 else if (CSKY_ISA_FEATURE(fpv3_df))
4451 return "fmfvr.64\t%R0, %0, %1";
4452 else
4453 gcc_unreachable ();
4454 }
4455 else
4456 {
4457 if (CSKY_ISA_FEATURE(fpv2_df))
4458 return "fmfvrh\t%R0, %1\n\tfmfvrl\t%0, %1";
4459 else if (CSKY_ISA_FEATURE(fpv3_df))
4460 return "fmfvr.64\t%0, %R0, %1";
4461 else
4462 gcc_unreachable ();
4463 }
4464 }
4465 else if (CSKY_VREG_P (dstreg))
4466 {
4467 if (TARGET_SOFT_FPU)
4468 return "fmtvrl\t%0, %1";
4469 else if (TARGET_BIG_ENDIAN)
4470 {
4471 if (CSKY_ISA_FEATURE(fpv2_df))
4472 return "fmtvrh\t%0, %1\n\tfmtvrl\t%0, %R1";
4473 else if (CSKY_ISA_FEATURE(fpv3_df))
4474 return "fmtvr.64\t%0, %R1, %1";
4475 else
4476 gcc_unreachable ();
4477 }
4478 else
4479 {
4480 if (CSKY_ISA_FEATURE(fpv2_df))
4481 return "fmtvrh\t%0, %R1\n\tfmtvrl\t%0, %1";
4482 else if (CSKY_ISA_FEATURE(fpv3_df))
4483 return "fmtvr.64\t%0, %1, %R1";
4484 else
4485 gcc_unreachable ();
4486 }
4487 }
4488
4489 /* Ensure the second source not overwritten. */
4490 if (srcreg + 1 == dstreg)
4491 return "mov\t%R0, %R1\n\tmov\t%0, %1";
4492 else
4493 return "mov\t%0, %1\n\tmov\t%R0, %R1";
4494 }
4495 else if (GET_CODE (src) == MEM)
4496 {
4497 rtx memexp = XEXP (src, 0);
4498 int dstreg = REGNO (dst);
4499 int basereg = -1;
4500 struct csky_address op0;
4501
4502 decompose_csky_address (XEXP (src, 0), &op0);
4503
4504 if (GET_CODE (memexp) == LABEL_REF
4505 || (GET_CODE (memexp) == CONST
4506 && GET_CODE (XEXP (memexp, 0)) == PLUS
4507 && GET_CODE (XEXP (XEXP (memexp, 0), 0)) == LABEL_REF))
4508 return "lrw\t%0, [%1]\n\tlrw\t%R0, [%R1]";
4509 else if (GET_CODE (memexp) == REG)
4510 basereg = REGNO (memexp);
4511 else if (GET_CODE (memexp) == PLUS)
4512 {
4513 if (GET_CODE (XEXP (memexp, 0)) == REG)
4514 basereg = REGNO (XEXP (memexp, 0));
4515 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4516 basereg = REGNO (XEXP (memexp, 1));
4517 else
4518 gcc_unreachable ();
4519 }
4520 else
4521 gcc_unreachable ();
4522
4523
4524 /* When FPUV2. */
4525 if (CSKY_VREG_P (dstreg))
4526 {
4527 if (op0.index)
4528 {
4529 if (CSKY_ISA_FEATURE(fpv2_df))
4530 return "fldrd\t%0, %1";
4531 else if (CSKY_ISA_FEATURE(fpv3_df))
4532 return "fldr.64\t%0, %1";
4533 else
4534 gcc_unreachable ();
4535 }
4536 else
4537 {
4538 if (CSKY_ISA_FEATURE(fpv2_df))
4539 return "fldd\t%0, %1";
4540 else if (CSKY_ISA_FEATURE(fpv3_df))
4541 return "fld.64\t%0, %1";
4542 else
4543 gcc_unreachable ();
4544 }
4545 }
4546 /* FIXME length attribute is wrong here. */
4547 if (dstreg == basereg)
4548 /* Just load them in reverse order. */
4549 return "ld.w\t%R0, %R1\n\tld.w\t%0, %1";
4550 else
4551 return "ld.w\t%0, %1\n\tld.w\t%R0, %R1";
4552 }
4553 else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
4554 {
4555 split_double (src, operands + 2, operands + 3);
4556
4557 if (CSKY_CONST_OK_FOR_I (INTVAL (operands[2])))
4558 output_asm_insn ("movi\t%0, %2", operands);
4559 else if (CSKY_CONST_OK_FOR_Uc (INTVAL (operands[2])))
4560 output_asm_insn ("bmaski\t%0, %N2", operands);
4561 else if (CSKY_CONST_OK_FOR_Ub (INTVAL (operands[2])))
4562 output_asm_insn ("bgeni\t%0, %P2", operands);
4563 else
4564 output_asm_insn ("lrw\t%0, %2", operands);
4565
4566 if (CSKY_CONST_OK_FOR_I (INTVAL (operands[3])))
4567 output_asm_insn ("movi\t%R0, %3", operands);
4568 else if (CSKY_CONST_OK_FOR_Uc (INTVAL (operands[3])))
4569 output_asm_insn ("bmaski\t%R0, %N3", operands);
4570
4571 else if (CSKY_CONST_OK_FOR_Ub (INTVAL (operands[3])))
4572 output_asm_insn ("bgeni\t%R0, %P3", operands);
4573 else
4574 output_asm_insn ("lrw\t%R0, %3", operands);
4575
4576 return "";
4577 }
4578 else
4579 gcc_unreachable ();
4580 }
4581 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
4582 {
4583 rtx memexp = XEXP (dst, 0);
4584 int srcreg = REGNO (src);
4585 int basereg = -1;
4586 struct csky_address op0;
4587
4588 decompose_csky_address (XEXP (dst, 0), &op0);
4589
4590 if (GET_CODE (memexp) == REG)
4591 basereg = REGNO (memexp);
4592 else if (GET_CODE (memexp) == PLUS)
4593 {
4594 if (GET_CODE (XEXP (memexp, 0)) == REG)
4595 basereg = REGNO (XEXP (memexp, 0));
4596 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4597 basereg = REGNO (XEXP (memexp, 1));
4598 else
4599 gcc_unreachable ();
4600 }
4601 else
4602 gcc_unreachable ();
4603
4604 /* When FPUV2. */
4605 if (CSKY_VREG_P (srcreg))
4606 {
4607 if (op0.index)
4608 {
4609 if (CSKY_ISA_FEATURE(fpv2_df))
4610 return "fstrd\t%1, %0";
4611 else if (CSKY_ISA_FEATURE(fpv3_df))
4612 return "fstr.64\t%1, %0";
4613 else
4614 gcc_unreachable ();
4615 }
4616 else
4617 {
4618 if (CSKY_ISA_FEATURE(fpv2_df))
4619 return "fstd\t%1, %0";
4620 else if (CSKY_ISA_FEATURE(fpv3_df))
4621 return "fst.64\t%1, %0";
4622 else
4623 gcc_unreachable ();
4624 }
4625 }
4626 /* FIXME length attribute is wrong here. */
4627 if (srcreg == basereg)
4628 /* Just load them in reverse order. */
4629 return "st.w\t%R1, %R0\n\tst.w\t%1, %0";
4630 else
4631 return "st.w\t%1, %0\n\tst.w\t%R1, %R0";
4632 }
4633 else
4634 gcc_unreachable ();
4635 }
4636
4637
4638 const char *
4639 csky_output_ck801_movedouble (rtx operands[],
4640 machine_mode mode ATTRIBUTE_UNUSED)
4641 {
4642 rtx dst = operands[0];
4643 rtx src = operands[1];
4644
4645 if (REG_P (dst))
4646 {
4647 if (REG_P (src))
4648 {
4649 int dstreg = REGNO (dst);
4650 int srcreg = REGNO (src);
4651
4652 /* Ensure the second source not overwritten. */
4653 if (srcreg + 1 == dstreg)
4654 return "mov\t%R0, %R1\n\tmov\t%0, %1";
4655 else
4656 return "mov\t%0, %1\n\tmov\t%R0, %R1";
4657 }
4658 else if (GET_CODE (src) == MEM)
4659 {
4660 rtx memexp = XEXP (src, 0);
4661 int dstreg = REGNO (dst);
4662 int basereg = -1;
4663 struct csky_address op0;
4664
4665 decompose_csky_address (XEXP (src, 0), &op0);
4666
4667 if (GET_CODE (memexp) == LABEL_REF
4668 || (GET_CODE (memexp) == CONST
4669 && GET_CODE (XEXP (memexp, 0)) == PLUS
4670 && GET_CODE (XEXP (XEXP (memexp, 0), 0)) == LABEL_REF))
4671 return "lrw\t%0, [%1]\n\tlrw\t%R0, [%R1]";
4672 else if (GET_CODE (memexp) == REG)
4673 basereg = REGNO (memexp);
4674 else if (GET_CODE (memexp) == PLUS)
4675 {
4676 if (GET_CODE (XEXP (memexp, 0)) == REG)
4677 basereg = REGNO (XEXP (memexp, 0));
4678 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4679 basereg = REGNO (XEXP (memexp, 1));
4680 else
4681 gcc_unreachable ();
4682 }
4683 else
4684 gcc_unreachable ();
4685
4686 /* FIXME length attribute is wrong here. */
4687 if (dstreg == basereg)
4688 /* Just load them in reverse order. */
4689 return "ld.w\t%R0, %R1\n\tld.w\t%0, %1";
4690 else
4691 return "ld.w\t%0, %1\n\tld.w\t%R0, %R1";
4692 }
4693 else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
4694 {
4695 split_double (src, operands + 2, operands + 3);
4696
4697 if (REGNO (dst) <= 7
4698 && CSKY_CONST_OK_FOR_N (INTVAL (operands[2]) + 1))
4699 output_asm_insn ("movi\t%0, %2", operands);
4700 else
4701 output_asm_insn ("lrw\t%0, %2", operands);
4702
4703
4704 if (REGNO (dst) <= 6
4705 && CSKY_CONST_OK_FOR_N (INTVAL (operands[3]) + 1))
4706 output_asm_insn ("movi\t%R0, %3", operands);
4707 else
4708 output_asm_insn ("lrw\t%R0, %3", operands);
4709
4710 return "";
4711
4712
4713 }
4714 else
4715 gcc_unreachable ();
4716 }
4717 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
4718 {
4719 rtx memexp = XEXP (dst, 0);
4720 int srcreg = REGNO (src);
4721 int basereg = -1;
4722 struct csky_address op0;
4723
4724 decompose_csky_address (XEXP (dst, 0), &op0);
4725
4726 if (GET_CODE (memexp) == REG)
4727 basereg = REGNO (memexp);
4728 else if (GET_CODE (memexp) == PLUS)
4729 {
4730 if (GET_CODE (XEXP (memexp, 0)) == REG)
4731 basereg = REGNO (XEXP (memexp, 0));
4732 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4733 basereg = REGNO (XEXP (memexp, 1));
4734 else
4735 gcc_unreachable ();
4736 }
4737 else
4738 gcc_unreachable ();
4739
4740 /* FIXME length attribute is wrong here. */
4741 if (srcreg == basereg)
4742 /* Just load them in reverse order. */
4743 return "st.w\t%R1, %R0\n\tst.w\t%1, %0";
4744 else
4745 return "st.w\t%1, %0\n\tst.w\t%R1, %R0";
4746 }
4747 else
4748 gcc_unreachable ();
4749 }
4750
4751 /* Calculate the instruction's length for moving double-word data. */
4752
4753 int
4754 csky_get_movedouble_length(rtx operands[])
4755 {
4756 rtx dst = operands[0];
4757 rtx src = operands[1];
4758
4759 if (REG_P (dst))
4760 {
4761 if (REG_P (src))
4762 {
4763 int dstreg = REGNO (dst);
4764 int srcreg = REGNO (src);
4765
4766 if (CSKY_VREG_P (srcreg) && CSKY_VREG_P (dstreg))
4767 return 4;
4768 else
4769 return 8;
4770 }
4771 else if (GET_CODE (src) == MEM)
4772 {
4773 rtx memexp = XEXP (src, 0);
4774 int dstreg = REGNO (dst);
4775 struct csky_address op0;
4776 decompose_csky_address (XEXP (src, 0), &op0);
4777
4778 if (GET_CODE (memexp) == LABEL_REF)
4779 return 8;
4780 if (CSKY_VREG_P (dstreg))
4781 return 4;
4782 return 8;
4783 }
4784 else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
4785 {
4786 split_double (src, operands + 2, operands + 3);
4787 if (CSKY_CONST_OK_FOR_N (INTVAL (operands[2]) + 1)
4788 && CSKY_CONST_OK_FOR_N (INTVAL (operands[3]) + 1)
4789 && REGNO (operands[0]) < 6)
4790 return 4;
4791 else
4792 return 8;
4793 }
4794 }
4795 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
4796 {
4797 rtx memexp = XEXP (dst, 0);
4798 int srcreg = REGNO (src);
4799 int offset = -1;
4800 if (CSKY_VREG_P (srcreg))
4801 return 4;
4802
4803 if (GET_CODE (memexp) == REG)
4804 offset = 0;
4805 else if (GET_CODE (memexp) == PLUS)
4806 {
4807 if (GET_CODE (XEXP (memexp, 0)) == REG)
4808 offset = INTVAL (XEXP (memexp, 1));
4809 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4810 offset = INTVAL (XEXP (memexp, 0));
4811 else
4812 gcc_unreachable ();
4813 }
4814 else
4815 gcc_unreachable ();
4816
4817 if (srcreg <= 6 && offset <= 1020)
4818 return 4;
4819 else if ((srcreg == 7 && offset <= 1024) || (srcreg <= 7 && offset == 1024))
4820 return 6;
4821 else
4822 return 8;
4823 }
4824 else
4825 gcc_unreachable ();
4826
4827 return 0;
4828 }
4829
4830 /* Output float point load/store instructions for fpuv3. */
4831
4832 const char *
4833 fpuv3_output_move (rtx *operands)
4834 {
4835 rtx reg, mem, addr, ops[2];
4836 bool isload = REG_P (operands[0]);
4837
4838 const char *templ = "f%s%s.%s\t%%0, %%1";
4839 char buff[50];
4840 machine_mode mode;
4841
4842 reg = operands[isload ? 0 : 1];
4843 mem = operands[isload ? 1 : 0];
4844
4845 gcc_assert (REG_P (reg));
4846 gcc_assert (CSKY_VREG_P (REGNO (reg)));
4847 gcc_assert (MEM_P (mem));
4848
4849 mode = GET_MODE (reg);
4850 const char *type = mode == DFmode ? "64" :
4851 mode == SFmode ? "32" :
4852 mode == HFmode ? "16" :
4853 NULL;
4854 gcc_assert(type != NULL);
4855
4856 addr = XEXP (mem, 0);
4857 struct csky_address caddr;
4858 decompose_csky_address (addr, &caddr);
4859
4860 ops[0] = reg;
4861 ops[1] = mem;
4862 sprintf (buff, templ,
4863 isload ? "ld" : "st",
4864 caddr.index ? "r" : "",
4865 type);
4866 output_asm_insn (buff, ops);
4867
4868 return "";
4869 }
4870
4871 /* Check if a const_double can be used by a VFP fmovi instruction. */
4872
4873 int
4874 fpuv3_const_double_rtx (rtx x)
4875 {
4876 REAL_VALUE_TYPE r, m;
4877 r = *CONST_DOUBLE_REAL_VALUE (x);
4878
4879 /* Fpuv3 doesn't support the following values. */
4880 if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) || REAL_VALUE_MINUS_ZERO (r)
4881 || r.cl == rvc_zero)
4882 return 0;
4883
4884 /* Extract sign, exponent and mantissa. */
4885 int exponent;
4886 r = real_value_abs (&r);
4887 exponent = REAL_EXP (&r);
4888
4889 bool fail;
4890 unsigned HOST_WIDE_INT mantissa, mant_hi;
4891 unsigned HOST_WIDE_INT mask;
4892 int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
4893 real_ldexp (&m, &r, point_pos - exponent);
4894 wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
4895 mantissa = w.elt (0);
4896 mant_hi = w.elt (1);
4897
4898 exponent -= 1;
4899
4900 if (!IN_RANGE (exponent, -4, 11))
4901 return 0;
4902
4903 /* If there are bits set in the low part of the mantissa, these values are
4904 not supported. */
4905 if (mantissa != 0)
4906 return 0;
4907
4908 /* Now, make the mantissa contain the most-significant bits, and the
4909 point_pos indicates the number of these bits. */
4910 point_pos -= HOST_BITS_PER_WIDE_INT;
4911 mantissa = mant_hi;
4912
4913 /* We can only allow a mantissa of 9 significant digits, top of which is always 1. */
4914 mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 9)) - 1;
4915 if ((mantissa & mask) != 0)
4916 return 0;
4917
4918 return 1;
4919 }
4920
4921
4922 /* Split operands for an AND expression when OPERANDS[2] is a constant.
4923 Note operands[0] is marked earlyclobber in this case and can be
4924 overwritten. Return true if "DONE", false otherwise. */
4925
4926 bool
4927 csky_split_and (rtx *operands)
4928 {
4929 HOST_WIDE_INT mask = INTVAL (operands[2]);
4930 rtx not_value = GEN_INT (~mask);
4931 int i;
4932
4933 /* All zeros or all ones can be handled by a move instruction. */
4934 if (mask == 0)
4935 {
4936 emit_move_insn (operands[0], const0_rtx);
4937 return true;
4938 }
4939 if (mask == -1)
4940 {
4941 emit_move_insn (operands[0], operands[1]);
4942 return true;
4943 }
4944
4945 /* Check for constants that can be handled directly by the 32-bit andi
4946 instruction. */
4947 if (CSKY_ISA_FEATURE (E2) && csky_arith_O_operand (operands[2], SImode))
4948 return false;
4949
4950 /* Try to transform to andni instruction. */
4951 if (CSKY_ISA_FEATURE (E2) && csky_arith_O_operand (not_value, SImode))
4952 {
4953 emit_insn (gen_cskyv2_andnsi3 (operands[0], not_value, operands[1]));
4954 return true;
4955 }
4956
4957 /* If there are only one or two 0 bits in the constant, we can
4958 replace the operation with bclri instructions on those bits.
4959 Note CK801 has only the 16-bit bclri that operates on a single
4960 register, so we must count a move if we are post-reload. */
4961 if (popcount_hwi (~mask & 0xffffffff)
4962 <= (reload_completed && !CSKY_ISA_FEATURE (E2) ? 1 : 2))
4963 {
4964 rtx input = operands[1];
4965
4966 if (!CSKY_ISA_FEATURE (E2))
4967 {
4968 emit_move_insn (operands[0], input);
4969 input = operands[0];
4970 }
4971
4972 for (i = 0; i < 32; i++)
4973 if ((mask & (1 << i)) == 0x0)
4974 {
4975 emit_insn (gen_bclri (operands[0], input, GEN_INT (i)));
4976 input = operands[0];
4977 }
4978 return true;
4979 }
4980
4981 /* If the constant mask is outside the [0, 4095] range for
4982 constraint O, or if constraint O is not allowed (ck801),
4983 maybe the constant is a contiguous bit range that we can
4984 handle by bit extract (low bits) or shifts (high bits). */
4985 for (i = (CSKY_ISA_FEATURE (E2) ? 13 : 1); i < 32; i++)
4986 {
4987 if ((((HOST_WIDE_INT) 1) << i) - 1 == mask)
4988 {
4989 if (CSKY_ISA_FEATURE (2E3))
4990 emit_insn (gen_cskyv2_extzv (operands[0], operands[1],
4991 GEN_INT (i), const0_rtx));
4992 else
4993 {
4994 rtx shift = GEN_INT (32 - i);
4995 rtx reg = (reload_completed
4996 ? operands[0] : gen_reg_rtx (SImode));
4997
4998 emit_insn (gen_ashlsi3 (reg, operands[1], shift));
4999 emit_insn (gen_lshrsi3 (operands[0], reg, shift));
5000 }
5001 return true;
5002 }
5003 else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~mask)
5004 {
5005 rtx shift = GEN_INT (i);
5006 rtx reg = (reload_completed
5007 ? operands[0] : gen_reg_rtx (SImode));
5008
5009 emit_insn (gen_lshrsi3 (reg, operands[1], shift));
5010 emit_insn (gen_ashlsi3 (operands[0], reg, shift));
5011 return true;
5012 }
5013 }
5014
5015 /* If the constant is a negative number, it seems better to use
5016 andn and copy the NOT_VALUE to a register instead of the
5017 original value, since the NOT_VALUE is always smaller and thus
5018 more likely to be representable as a small constant.
5019 This transformation can only be done before reload because
5020 it requires a temporary. Hopefully register allocation can get
5021 rid of the extra move required for CK801. */
5022 if (!reload_completed && INTVAL (operands[2]) < 0)
5023 {
5024 rtx reg = copy_to_mode_reg (SImode, not_value);
5025
5026 if (CSKY_ISA_FEATURE (E2))
5027 emit_insn (gen_cskyv2_andnsi3 (operands[0], reg, operands[1]));
5028 else
5029 {
5030 emit_move_insn (operands[0], operands[1]);
5031 emit_insn (gen_ck801_andnsi3 (operands[0], reg, operands[0]));
5032 }
5033 return true;
5034 }
5035
5036 /* If the above ways are all not working, move the constant
5037 to a register. We can clobber operands[0] as it is
5038 marked earlyclobber in the insn constraints, but then we have to
5039 swap operands 1 and 2 to match the constraints on the 2-operand
5040 16-bit and instruction. */
5041 if (reload_completed)
5042 {
5043 emit_move_insn (operands[0], operands[2]);
5044 operands[2] = operands[1];
5045 operands[1] = operands[0];
5046 }
5047 else
5048 operands[2] = copy_to_mode_reg (SImode, operands[2]);
5049 return false;
5050 }
5051
5052 /* Split operands for an IOR expression when OPERANDS[2] is a constant.
5053 Note operands[0] is marked earlyclobber in this case and can be
5054 overwritten. Return true if "DONE", false otherwise. */
5055
5056 bool
5057 csky_split_ior (rtx *operands)
5058 {
5059 HOST_WIDE_INT mask = INTVAL (operands[2]);
5060 int i;
5061
5062 /* All zeros or all ones can be handled by a move instruction. */
5063 if (mask == 0)
5064 {
5065 emit_move_insn (operands[0], operands[1]);
5066 return true;
5067 }
5068 if (mask == -1)
5069 {
5070 emit_move_insn (operands[0], gen_int_mode (-1, SImode));
5071 return true;
5072 }
5073
5074 /* Check for constants that can be handled directly by the 32-bit ori
5075 instruction. */
5076 if (CSKY_ISA_FEATURE (E2) && csky_literal_I_operand (operands[2], SImode))
5077 return false;
5078
5079 /* If there are only one or two 1 bits in the value, we can replace
5080 the operation with bseti instructions to set those bits.
5081 Note CK801 has only the 16-bit bclri that operates on a single
5082 register, so we must count a move if we are post-reload. */
5083 if (popcount_hwi (mask & 0xffffffff)
5084 <= (reload_completed && !CSKY_ISA_FEATURE (E2) ? 1 : 2))
5085 {
5086 rtx input = operands[1];
5087
5088 if (!CSKY_ISA_FEATURE (E2))
5089 {
5090 emit_move_insn (operands[0], input);
5091 input = operands[0];
5092 }
5093
5094 for (i = 0; i < 32; i++)
5095 if (mask & (1 << i))
5096 {
5097 emit_insn (gen_bseti (operands[0], input, GEN_INT (i)));
5098 input = operands[0];
5099 }
5100 return true;
5101 }
5102
5103 /* If the above ways are all not working, move the constant
5104 to a register. We can clobber operands[0] as it is
5105 marked earlyclobber in the insn constraints, but then we have to
5106 swap operands 1 and 2 to match the constraints on the 2-operand
5107 16-bit ior instruction. */
5108 if (reload_completed)
5109 {
5110 emit_move_insn (operands[0], operands[2]);
5111 operands[2] = operands[1];
5112 operands[1] = operands[0];
5113 }
5114 else
5115 operands[2] = copy_to_mode_reg (SImode, operands[2]);
5116 return false;
5117 }
5118
5119
5120 /* Split operands for an XOR expression when OPERANDS[2] is a constant.
5121 Note operands[0] is marked earlyclobber in this case and can be
5122 overwritten. Return true if "DONE", false otherwise. */
5123
5124 bool
5125 csky_split_xor (rtx *operands)
5126 {
5127 HOST_WIDE_INT mask = INTVAL (operands[2]);
5128
5129 /* All zeros can be turned into move instruction. */
5130 if (mask == 0)
5131 {
5132 emit_move_insn (operands[0], operands[1]);
5133 return true;
5134 }
5135
5136 /* All ones can be turned into a bitwise not. */
5137 if (mask == -1)
5138 {
5139 if (CSKY_ISA_FEATURE (E2))
5140 emit_insn (gen_cskyv2_one_cmplsi2 (operands[0], operands[1]));
5141 else
5142 {
5143 emit_move_insn (operands[0], operands[1]);
5144 emit_insn (gen_ck801_one_cmplsi2 (operands[0], operands[0]));
5145 }
5146 return true;
5147 }
5148
5149 /* Check for constants that can be handled directly by the 32-bit xori
5150 instruction. */
5151 if (CSKY_ISA_FEATURE (E2) && csky_arith_O_operand (operands[2], SImode))
5152 return false;
5153
5154 /* If the above ways are all not working, move the constant
5155 to a register. We can clobber operands[0] as it is
5156 marked earlyclobber in the insn constraints, but then we have to
5157 swap operands 1 and 2 to match the constraints on the 2-operand
5158 16-bit ior instruction. */
5159 if (reload_completed)
5160 {
5161 emit_move_insn (operands[0], operands[2]);
5162 operands[2] = operands[1];
5163 operands[1] = operands[0];
5164 }
5165 else
5166 operands[2] = copy_to_mode_reg (SImode, operands[2]);
5167 return false;
5168 }
5169
5170
5171 /* Return true if X is an address form involving a symbol or label ref. */
5172
5173 bool
5174 csky_symbolic_address_p (rtx x)
5175 {
5176 switch (GET_CODE (x))
5177 {
5178 case SYMBOL_REF:
5179 case LABEL_REF:
5180 return 1;
5181 case CONST:
5182 x = XEXP (x, 0);
5183 return ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
5184 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
5185 && GET_CODE (XEXP (x, 1)) == CONST_INT);
5186 default:
5187 return 0;
5188 }
5189 }
5190
5191
5192 /* Emit a comparison instruction.
5193 Return true if an inverted comparison is generated. */
5194
5195 bool
5196 csky_emit_compare (enum rtx_code code, rtx op0, rtx op1)
5197 {
5198 bool invert;
5199 rtx cc_reg = gen_rtx_REG (CCmode, CSKY_CC_REGNUM);
5200
5201 if (GET_MODE_CLASS(GET_MODE (op0)) == MODE_FLOAT)
5202 return csky_emit_compare_float(code, op0, op1);
5203
5204 if (GET_CODE (op1) == CONST_INT)
5205 {
5206 HOST_WIDE_INT val = INTVAL (op1);
5207
5208 switch (code)
5209 {
5210 case GTU:
5211 /* Unsigned (GTU 0) is the same as (NE 0); everything else is
5212 converted below to LEU (reversed cmphs). */
5213 if (val == 0)
5214 code = NE;
5215 /* Check whether (GTU A imm) can become (GEU A imm + 1). */
5216 else if (TARGET_MINI_REGISTERS
5217 ? CSKY_CONST_OK_FOR_J (val + 1)
5218 : CSKY_CONST_OK_FOR_Uk (val + 1))
5219 {
5220 op1 = GEN_INT (val + 1);
5221 code = GEU;
5222 }
5223 break;
5224 /* Check whether (LE A imm) can become (LT A imm + 1),
5225 or (GT A imm) can become (GE A imm + 1). */
5226 case GT:
5227 case LE:
5228 if (TARGET_MINI_REGISTERS
5229 ? CSKY_CONST_OK_FOR_J (val + 1)
5230 : CSKY_CONST_OK_FOR_Uk (val + 1))
5231 {
5232 op1 = GEN_INT (val + 1);
5233 code = code == LE ? LT : GE;
5234 }
5235 break;
5236
5237 default:
5238 break;
5239 }
5240 }
5241
5242 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
5243 op1 = force_reg (GET_MODE (op1), op1);
5244
5245 /* cmpnei: 0-31 (K immediate)
5246 ti: 1-32 (J immediate, 0 using btsti x,31). */
5247 invert = false;
5248 switch (code)
5249 {
5250 /* Use inverted condition, cmpne. */
5251 case EQ:
5252 code = NE;
5253 invert = true;
5254 /* Fall through. */
5255 /* Use normal condition, cmpne. */
5256 case NE:
5257 if (GET_CODE (op1) == CONST_INT
5258 && (TARGET_MINI_REGISTERS
5259 ? !csky_literal_K_operand (op1, SImode)
5260 : !csky_literal_I_operand (op1, SImode)))
5261 op1 = force_reg (SImode, op1);
5262 break;
5263
5264 /* Use inverted condition, reversed cmplt. */
5265 case LE:
5266 code = GT;
5267 invert = true;
5268 /* Fall through. */
5269 /* Use normal condition, reversed cmplt. */
5270 case GT:
5271 if (GET_CODE (op1) == CONST_INT)
5272 op1 = force_reg (SImode, op1);
5273 break;
5274
5275 /* Use inverted condition, cmplt. */
5276 case GE:
5277 code = LT;
5278 invert = true;
5279 /* Fall through. */
5280 /* Use normal condition, cmplt. */
5281 case LT:
5282 /* covered by btsti x,31. */
5283 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0
5284 && (TARGET_MINI_REGISTERS
5285 ? !csky_literal_J_operand (op1, SImode)
5286 : !csky_literal_Uk_operand (op1, SImode)))
5287 op1 = force_reg (SImode, op1);
5288 break;
5289
5290 /* Use inverted condition, cmple. */
5291 case GTU:
5292 /* We coped with unsigned > 0 above. */
5293 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
5294 code = LEU;
5295 invert = true;
5296 /* Fall through. */
5297 /* Use normal condition, reversed cmphs. */
5298 case LEU:
5299 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
5300 op1 = force_reg (SImode, op1);
5301 break;
5302
5303 /* Use inverted condition, cmphs. */
5304 case LTU:
5305 code = GEU;
5306 invert = true;
5307 /* Fall through. */
5308 /* Use normal condition, cmphs. */
5309 case GEU:
5310 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0
5311 && (TARGET_MINI_REGISTERS
5312 ? !csky_literal_J_operand (op1, SImode)
5313 : !csky_literal_Uk_operand (op1, SImode)))
5314 op1 = force_reg (SImode, op1);
5315 break;
5316
5317 default:
5318 break;
5319 }
5320
5321 emit_insn (gen_rtx_SET (cc_reg,
5322 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
5323 return invert;
5324 }
5325
5326 /* Return true if push/pop can be used to save/restore all the registers
5327 indicated by MASK. We currently don't attempt to handle situations where
5328 some of the registers could be handled by push/pop and others saved and
5329 restored individually. */
5330
5331 static bool
5332 csky_can_use_pushpop (unsigned int mask)
5333 {
5334 int i;
5335 int end_reg;
5336
5337 if (!TARGET_PUSHPOP)
5338 return false;
5339
5340 if (mask == 0)
5341 return false;
5342
5343 /* Regs 0-3, 12-14, 18-27, 29-31 cannot be in the mask. */
5344 if (mask & 0xeffc700f)
5345 return false;
5346
5347 /* Regs in the range r4-r11 must be contiguous. */
5348 for (end_reg = 0, i = 11; i >= 4; i--)
5349 {
5350 if (!end_reg && (mask & (1 << i)))
5351 end_reg = i;
5352 if (end_reg && !(mask & (1 << i)))
5353 return false;
5354 }
5355
5356 /* Likewise for regs in the range r16-r17. */
5357 for (end_reg = 0, i = 17; i >= 16; i--)
5358 {
5359 if (!end_reg && (mask & (1 << i)))
5360 end_reg = i;
5361 if (end_reg && !(mask & (1 << i)))
5362 return false;
5363 }
5364
5365 return true;
5366 }
5367
5368
5369 /* Return true if store/load multiple instructions can be used to
5370 save/restore at least some of the registers indicated by MASK.
5371 Unlike the push/pop case, this does handle partial ranges.
5372 Set *BR and *ER to the beginning and end (respectively) of the
5373 register range that can be handled. */
5374
5375 static bool
5376 csky_can_use_ldstm (int mask, int *br, int *er)
5377 {
5378 int regno;
5379 int begin_reg = 0, end_reg = 0;
5380 int count = 0;
5381
5382 if (!TARGET_MULTIPLE_STLD)
5383 return false;
5384
5385 /* We'll only handle registers in the range 4-11, the contiguous range
5386 of caller-saved registers. Higher-numbered registers are handled
5387 individually in addition to this, but we'll give up on doing ldstm
5388 entirely if we need to save/restore the low-numbered EH registers. */
5389 if (mask & 0xf)
5390 return false;
5391
5392 for (regno = 4; regno <= 11; regno++)
5393 {
5394 if (mask & 1 << regno)
5395 {
5396 if (!begin_reg)
5397 begin_reg = regno;
5398 end_reg = regno;
5399 count++;
5400 }
5401 else if (begin_reg)
5402 break;
5403 }
5404
5405 if (count >= CSKY_MIN_MULTIPLE_STLD && count <= CSKY_MAX_MULTIPLE_STLD)
5406 {
5407 if (br)
5408 *br = begin_reg;
5409 if (er)
5410 *er = end_reg;
5411 return true;
5412 }
5413 return false;
5414 }
5415
5416
5417 const char *
5418 csky_output_return_instruction (void)
5419 {
5420 unsigned long func_type = get_csky_current_func_type ();
5421
5422 if (CSKY_FUNCTION_IS_NAKED (func_type))
5423 return "";
5424 if (CSKY_FUNCTION_IS_INTERRUPT (func_type))
5425 return "ipop\n\tnir\n";
5426 else
5427 return "rts\n";
5428 }
5429
5430
5431 /* Adjust the stack pointer by OFFSET bytes. OFFSET is negative if this
5432 is in the prologue, positive if in the epilogue. This may require
5433 multiple instructions and/or use of CSKY_STACKADJUST_REGNUM as
5434 a scratch register. Emit CFA notes as appropriate. */
5435 static void
5436 expand_csky_stack_adjust (int offset)
5437 {
5438 rtx set;
5439 rtx_insn *insn;
5440 int size = (offset > 0 ? offset : -offset);
5441
5442 if (offset == 0)
5443 return;
5444
5445 /* If OFFSET is too large for addi/subi, load it into
5446 CSKY_STACKADJUST_REGNUM and use a register add/sub instead.
5447 This case is not mentioned in the ABI documentation, but it is
5448 supported by GDB prologue analysis provided that the instruction(s)
5449 to initialize CSKY_STACKADJUST_REGNUM appear directly before
5450 the sub. Depending on the value of OFFSET, this might be a
5451 lrw instruction or the "tricks" used by csky_output_inline_const to
5452 encode special-case integer constants. */
5453 if (size > CSKY_MAX_SP_ADJUST * 2)
5454 {
5455 rtx tmp, dwarf;
5456
5457 /* We should have reserved the scratch register already in
5458 csky_layout_stack_frame. */
5459 gcc_assert (cfun->machine->reg_size != 0
5460 && (cfun->machine->reg_mask
5461 & (1 << CSKY_STACKADJUST_REGNUM)));
5462
5463 /* Prevent the optimizer from reordering these instructions to
5464 keep GDB happy. */
5465 if (!flag_sched_prolog)
5466 emit_insn (gen_blockage ());
5467
5468 tmp = gen_rtx_REG (SImode, CSKY_STACKADJUST_REGNUM);
5469 emit_move_insn (tmp, GEN_INT (size));
5470
5471 if (offset > 0)
5472 set = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp);
5473 else
5474 set = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp);
5475 insn = emit_insn (set);
5476 RTX_FRAME_RELATED_P (insn) = 1;
5477 dwarf = gen_rtx_SET (stack_pointer_rtx,
5478 plus_constant (Pmode, stack_pointer_rtx, offset));
5479 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
5480
5481 /* More make GDB happy. */
5482 if (!flag_sched_prolog)
5483 emit_insn (gen_blockage ());
5484 }
5485
5486 /* Use one or two addi or subi insns to adjust stack. */
5487 else
5488 while (size)
5489 {
5490 int delta = (size > CSKY_MAX_SP_ADJUST
5491 ? CSKY_MAX_SP_ADJUST : size);
5492
5493 if (offset > 0)
5494 set = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
5495 GEN_INT (delta));
5496 else
5497 set = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx,
5498 GEN_INT (delta));
5499 insn = emit_insn (set);
5500 RTX_FRAME_RELATED_P (insn) = 1;
5501 size -= delta;
5502 }
5503 }
5504
5505
5506 /* Generate and emit an insn that we will recognize as a push_multi.
5507 Unfortunately, since this insn does not reflect very well the actual
5508 semantics of the operation, we need to annotate the insn for the benefit
5509 of DWARF2 frame unwind information. DWARF_REGS_MASK is a subset of
5510 MASK for registers that should be annotated for DWARF2 frame unwind
5511 information. */
5512
5513 static rtx
5514 emit_csky_regs_push (unsigned long mask)
5515 {
5516 int num_regs = 0;
5517 int i, j;
5518 rtx par;
5519 rtx dwarf;
5520 rtx tmp;
5521 int dwarf_par_index;
5522
5523 for (i = 0; i < CSKY_NGPR_REGS; i++)
5524 {
5525 if (mask & (1 << i))
5526 num_regs++;
5527 }
5528
5529 /* The reg range for push is:r4-r11,r15-r17,r28. */
5530 gcc_assert (num_regs && num_regs <= 12);
5531
5532 /* For the body of the insn we are going to generate an UNSPEC in
5533 parallel with several USEs. This allows the insn to be recognized
5534 by the push_multi pattern in the csky.md file.
5535
5536 The body of the insn looks something like this:
5537
5538 (parallel [
5539 (set (mem:BLK (pre_modify:SI (reg:SI sp)
5540 (const_int:SI <num>)))
5541 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSHPOP_MULT))
5542 (use (reg:SI XX))
5543 (use (reg:SI YY))
5544 ...
5545 ])
5546
5547 For the frame note however, we try to be more explicit and actually
5548 show each register being stored into the stack frame, plus a (single)
5549 decrement of the stack pointer. We do it this way in order to be
5550 friendly to the stack unwinding code, which only wants to see a single
5551 stack decrement per instruction. The RTL we generate for the note looks
5552 something like this:
5553
5554 (sequence [
5555 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
5556 (set (mem:SI (reg:SI sp)) (reg:SI r4))
5557 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI XX))
5558 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI YY))
5559 ...
5560 ])
5561
5562 FIXME:: In an ideal world the PRE_MODIFY would not exist and
5563 instead we'd have a parallel expression detailing all
5564 the stores to the various memory addresses so that debug
5565 information is more up-to-date. Remember however while writing
5566 this to take care of the constraints with the push instruction.
5567
5568 Note also that this has to be taken care of for the VFP registers.
5569
5570 For more see PR43399. */
5571
5572 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
5573 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_regs + 1));
5574 dwarf_par_index = 1;
5575
5576 for (i = 0; i < CSKY_NGPR_REGS; i++)
5577 if (mask & (1 << i))
5578 {
5579 rtx reg = gen_rtx_REG (SImode, i);
5580 rtx addr = plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs);
5581 tmp = gen_frame_mem (BLKmode,
5582 gen_rtx_PRE_MODIFY (Pmode,
5583 stack_pointer_rtx, addr));
5584 XVECEXP (par, 0, 0)
5585 = gen_rtx_SET (tmp,
5586 gen_rtx_UNSPEC (BLKmode,
5587 gen_rtvec (1, reg),
5588 UNSPEC_PUSHPOP_MULT));
5589 tmp = gen_rtx_SET (gen_frame_mem (SImode, stack_pointer_rtx),
5590 reg);
5591 RTX_FRAME_RELATED_P (tmp) = 1;
5592 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
5593
5594 break;
5595 }
5596
5597 for (j = 1, i++; j < num_regs; i++)
5598 if (mask & (1 << i))
5599 {
5600 rtx reg = gen_rtx_REG (SImode, i);
5601 rtx addr = plus_constant (Pmode, stack_pointer_rtx, 4 * j);
5602 tmp = gen_rtx_SET (gen_frame_mem (SImode, addr), reg);
5603 RTX_FRAME_RELATED_P (tmp) = 1;
5604 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
5605 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
5606 j++;
5607 }
5608
5609 par = emit_insn (par);
5610
5611 tmp = gen_rtx_SET (stack_pointer_rtx,
5612 plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs));
5613 RTX_FRAME_RELATED_P (tmp) = 1;
5614 XVECEXP (dwarf, 0, 0) = tmp;
5615
5616 add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
5617 RTX_FRAME_RELATED_P (par) = 1;
5618
5619 return par;
5620 }
5621
5622
5623 /* Generate and emit an insn pattern that we will recognize as a pop_multi.
5624 SAVED_REGS_MASK shows which registers need to be restored.
5625
5626 Unfortunately, since this insn does not reflect very well the actual
5627 semantics of the operation, we need to annotate the insn for the benefit
5628 of DWARF2 frame unwind information. */
5629
5630 static void
5631 emit_csky_regs_pop (unsigned long mask)
5632 {
5633 int num_regs = 0;
5634 int i, j;
5635 rtx par;
5636
5637 for (i = 0; i < CSKY_NGPR_REGS; i++)
5638 if (mask & (1 << i))
5639 num_regs++;
5640
5641 /* The reg range for push is:r4-r11,r15-r17,r28. */
5642 gcc_assert (num_regs && num_regs <= 12);
5643
5644 /* The first element is (return),
5645 the second element is
5646 (set (reg:SI 'first reg number')
5647 (unspec:SI [(mem)] UNSPEC_PUSHPOP_MULT),
5648 the rest elements is (use (reg:SI 'rest reg number')),
5649 so the length should be number of register to be poped
5650 plus one. */
5651 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs + 1));
5652
5653 XVECEXP (par, 0, 0) = ret_rtx;
5654
5655 for (i = 0; i < CSKY_NGPR_REGS; i++)
5656 if (mask & (1 << i))
5657 {
5658 rtx reg = gen_rtx_REG (SImode, i);
5659 rtx addr = plus_constant (Pmode, stack_pointer_rtx, 4 * num_regs);
5660 rtx tmp = gen_frame_mem (SImode,
5661 gen_rtx_POST_MODIFY (Pmode,
5662 stack_pointer_rtx, addr));
5663 XVECEXP (par, 0, 1)
5664 = gen_rtx_SET (reg,
5665 gen_rtx_UNSPEC (SImode,
5666 gen_rtvec (1, tmp),
5667 UNSPEC_PUSHPOP_MULT));
5668 break;
5669 }
5670
5671 for (j = 2, i++; j < (num_regs + 1); i++)
5672 if (mask & (1 << i))
5673 {
5674 rtx reg = gen_rtx_REG (SImode, i);
5675 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
5676 j++;
5677 }
5678
5679 par = emit_jump_insn (par);
5680 }
5681
5682
5683 /* Generate the function prologue. */
5684
5685 void
5686 csky_expand_prologue (void)
5687 {
5688 rtx_insn *insn;
5689 unsigned long func_type = get_csky_current_func_type ();
5690 unsigned int reg_mask;
5691 int reg_size;
5692
5693 if (CSKY_FUNCTION_IS_NAKED (func_type))
5694 {
5695 if (flag_stack_usage_info)
5696 current_function_static_stack_size = 0;
5697 return;
5698 }
5699
5700 csky_layout_stack_frame ();
5701 reg_mask = cfun->machine->reg_mask;
5702 reg_size = cfun->machine->reg_size;
5703
5704 /* Adjust stack pointer past argument overflow area. */
5705 if (cfun->machine->arg_size != 0)
5706 {
5707 int offset = cfun->machine->arg_size;
5708 expand_csky_stack_adjust (- offset);
5709
5710 /* If we have a parameter passed partially in regs and partially
5711 in memory, the registers will have been stored to memory already
5712 in function.c. So we only need to copy varargs from registers
5713 to stack. */
5714 if (cfun->machine->uses_anonymous_args)
5715 {
5716 int rn = CSKY_FIRST_PARM_REGNUM + CSKY_NPARM_REGS - 1;
5717 for (offset -= 4; offset >= 0; offset -= 4, rn--)
5718 {
5719 rtx dst = gen_frame_mem (SImode,
5720 plus_constant (Pmode,
5721 stack_pointer_rtx,
5722 offset));
5723 insn = emit_move_insn (dst, gen_rtx_REG (SImode, rn));
5724 RTX_FRAME_RELATED_P (insn) = 1;
5725 }
5726 }
5727 }
5728
5729 /* Push caller-saved registers to stack. */
5730 if (csky_can_use_pushpop (reg_mask))
5731 emit_csky_regs_push (reg_mask);
5732 else if (reg_size)
5733 {
5734 int sreg = -1, ereg = -1;
5735 bool stm_p = csky_can_use_ldstm (reg_mask, &sreg, &ereg);
5736 int stm_regs = stm_p ? ereg - sreg + 1 : 0;
5737 int stm_size = stm_regs * 4;
5738
5739 /* First adjust the SP to the low end of the register save area. */
5740 expand_csky_stack_adjust (- reg_size);
5741
5742 /* Emit individual register saves. Even if we are going to emit an
5743 stm, we may need to save individual registers above that too. */
5744 if (reg_size > stm_size)
5745 {
5746 int offset = reg_size - 4;
5747 int regno = 31;
5748 for ( ; regno > ereg; regno--)
5749 if (reg_mask & (1 << regno))
5750 {
5751 rtx dst = gen_rtx_MEM (SImode,
5752 plus_constant (Pmode,
5753 stack_pointer_rtx,
5754 offset));
5755 rtx insn = emit_insn (gen_movsi (dst,
5756 gen_rtx_REG (SImode, regno)));
5757 RTX_FRAME_RELATED_P (insn) = 1;
5758 if (offset == stm_size)
5759 break;
5760 offset -= 4;
5761 }
5762 }
5763
5764 /* If possible, emit a stm to do a bulk store of sequential
5765 registers to the stack. Note that it is an error in the ABI
5766 documentation that it doesn't list stm as a valid prologue
5767 instruction. */
5768 if (stm_p)
5769 {
5770 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (stm_regs));
5771 int regno, slot;
5772 for (regno = sreg, slot = 0; regno <= ereg; regno++, slot++)
5773 {
5774 rtx reg = gen_rtx_REG (SImode, regno);
5775 rtx addr = plus_constant (Pmode, stack_pointer_rtx, slot * 4);
5776 rtx set = gen_rtx_SET (gen_frame_mem (SImode, addr), reg);
5777 RTX_FRAME_RELATED_P (set) = 1;
5778 XVECEXP (par, 0, slot) = set;
5779 }
5780 insn = emit_insn (par);
5781 RTX_FRAME_RELATED_P (insn) = 1;
5782 }
5783 }
5784
5785 /* Initialize hard frame pointer, if necessary. It points at the base
5786 of the register save area. */
5787 if (frame_pointer_needed)
5788 {
5789 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
5790 RTX_FRAME_RELATED_P (insn) = 1;
5791 }
5792
5793 /* Reserve stack space for locals and outgoing args. */
5794 expand_csky_stack_adjust (- cfun->machine->reg_offset);
5795
5796 /* Put the GOT address in reg_gb for PIC, using R13 as a scratch.
5797 See section 4.7.1 in the ABI documentation,
5798 "Function Prologue for PIC". */
5799 if (flag_pic && (reg_mask & (1 << PIC_OFFSET_TABLE_REGNUM)))
5800 {
5801 rtx l1 = gen_label_rtx ();
5802 rtx grs_label = gen_rtx_LABEL_REF (SImode, l1);
5803 rtx reg_gb = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
5804 rtx reg_temp = gen_rtx_REG (SImode, 13);
5805
5806 rtx tmp0_unspec = gen_rtx_UNSPEC (Pmode,
5807 gen_rtvec (1, grs_label),
5808 UNSPEC_PIC_SYMBOL_GOTPC_GRS);
5809 rtx tmp1_unspec = gen_rtx_UNSPEC (Pmode,
5810 gen_rtvec (1, grs_label),
5811 UNSPEC_PIC_SYMBOL_GOTPC);
5812
5813 emit_insn (gen_prologue_get_pc (tmp0_unspec));
5814 emit_move_insn (reg_temp, tmp1_unspec);
5815 emit_insn (gen_addsi3 (reg_gb, reg_gb, reg_temp));
5816 }
5817
5818 if (flag_stack_usage_info)
5819 current_function_static_stack_size = cfun->machine->frame_size;
5820
5821 if (!flag_sched_prolog)
5822 emit_insn (gen_blockage ());
5823 }
5824
5825 void
5826 csky_expand_epilogue (void)
5827 {
5828 unsigned long func_type = get_csky_current_func_type ();
5829 unsigned int reg_mask;
5830 int reg_size;
5831 int adjust;
5832 rtx_insn *insn;
5833
5834 if (!flag_sched_prolog)
5835 emit_insn (gen_blockage ());
5836
5837 if (CSKY_FUNCTION_IS_NAKED (func_type))
5838 {
5839 emit_jump_insn (gen_simple_return ());
5840 return;
5841 }
5842
5843 /* Get the frame information. */
5844 csky_layout_stack_frame ();
5845 reg_mask = cfun->machine->reg_mask;
5846 reg_size = cfun->machine->reg_size;
5847 adjust = reg_size + cfun->machine->arg_size;
5848
5849 /* Restore the SP to the base of the register save area. */
5850 if (frame_pointer_needed)
5851 {
5852 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
5853 RTX_FRAME_RELATED_P (insn) = 1;
5854 }
5855 else
5856 expand_csky_stack_adjust (cfun->machine->reg_offset);
5857
5858 /* Restore the callee-saved registers. */
5859 if (csky_can_use_pushpop (reg_mask)
5860 && cfun->machine->arg_size == 0
5861 && !CSKY_FUNCTION_IS_INTERRUPT (func_type)
5862 && !crtl->calls_eh_return)
5863 {
5864 /* Pop includes an implicit return, so we are done. */
5865 emit_csky_regs_pop (reg_mask);
5866 return;
5867 }
5868 else if (reg_size)
5869 {
5870 int sreg = -1, ereg = -1;
5871 bool ldm_p = csky_can_use_ldstm (reg_mask, &sreg, &ereg);
5872 int ldm_regs = ldm_p ? ereg - sreg + 1 : 0;
5873 int ldm_size = ldm_regs * 4;
5874
5875 /* Emit individual register loads. Even if we are going to emit an
5876 ldm, we may need to load individual registers above that too. */
5877 if (reg_size > ldm_size)
5878 {
5879 int offset = reg_size - 4;
5880 int regno = 31;
5881 for ( ; regno > ereg; regno--)
5882 if (reg_mask & (1 << regno))
5883 {
5884 rtx src = gen_frame_mem (SImode,
5885 plus_constant (Pmode,
5886 stack_pointer_rtx,
5887 offset));
5888 rtx reg = gen_rtx_REG (SImode, regno);
5889 insn = emit_move_insn (reg, src);
5890 RTX_FRAME_RELATED_P (insn) = 1;
5891 add_reg_note (insn, REG_CFA_RESTORE, reg);
5892 if (offset == ldm_size)
5893 break;
5894 offset -= 4;
5895 }
5896 }
5897
5898 /* If possible, emit a ldm to do a bulk load of sequential
5899 registers from the stack. */
5900 if (ldm_p)
5901 {
5902 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (ldm_regs));
5903 int regno, slot;
5904 for (regno = sreg, slot = 0; regno <= ereg; regno++, slot++)
5905 {
5906 rtx reg = gen_rtx_REG (SImode, regno);
5907 rtx addr = plus_constant (Pmode, stack_pointer_rtx, slot * 4);
5908 rtx set = gen_rtx_SET (reg, gen_frame_mem (SImode, addr));
5909 XVECEXP (par, 0, slot) = set;
5910 }
5911 insn = emit_insn (par);
5912 RTX_FRAME_RELATED_P (insn) = 1;
5913 for (regno = sreg; regno <= ereg; regno++)
5914 {
5915 rtx reg = gen_rtx_REG (SImode, regno);
5916 add_reg_note (insn, REG_CFA_RESTORE, reg);
5917 }
5918 }
5919 }
5920
5921 /* Emit the final stack pointer adjustment to deallocate the saved
5922 registers and incoming argument area. */
5923 expand_csky_stack_adjust (adjust);
5924
5925 /* Extra stack adjustment for exception handler return. */
5926 if (crtl->calls_eh_return)
5927 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
5928 EH_RETURN_STACKADJ_RTX));
5929
5930 /* Now we can return. */
5931 emit_jump_insn (gen_simple_return ());
5932 }
5933
5934
5935 static void
5936 csky_output_function_prologue (FILE *f)
5937 {
5938 unsigned long func_type = get_csky_current_func_type ();
5939
5940 switch ((int) CSKY_FUNCTION_TYPE (func_type))
5941 {
5942 default:
5943 case CSKY_FT_NORMAL:
5944 break;
5945 case CSKY_FT_INTERRUPT:
5946 {
5947 asm_fprintf (f, "\t# Interrupt Service Routine.\n");
5948 asm_fprintf (f, "\tnie\n\tipush\n");
5949 break;
5950 }
5951 case CSKY_FT_FIQ:
5952 asm_fprintf (f, "\t# Fast Interrupt Service Routine.\n");
5953 break;
5954 case CSKY_FT_EXCEPTION:
5955 asm_fprintf (f, "\t# CSKY Exception Handler.\n");
5956 break;
5957 case CSKY_FT_NAKED:
5958 asm_fprintf (f, "\t# Naked Function: prologue and epilogue \
5959 provided by programmer.\n");
5960 return;
5961 }
5962
5963 csky_layout_stack_frame ();
5964
5965 /* Generate .stack_size function-name, size for callgraph;
5966 the default stack size is 0. */
5967 if (TARGET_STACK_SIZE && cfun->machine->frame_size > 0)
5968 {
5969 gcc_assert (current_function_decl != NULL);
5970 const char *func_name =
5971 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl));
5972 if (func_name[0] == '*')
5973 asm_fprintf (f, "\t.stack_size %s, %d\n",
5974 &func_name[1], cfun->machine->frame_size);
5975 else
5976 asm_fprintf (f, "\t.stack_size %s, %d\n",
5977 func_name, cfun->machine->frame_size);
5978 }
5979 }
5980
5981
5982 static void
5983 csky_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED)
5984 {
5985
5986 }
5987
5988
5989 /* Helper for csky_eh_return splitter: store the call frame exception
5990 handler address in lr. */
5991 void
5992 csky_set_eh_return_address (rtx source, rtx scratch)
5993 {
5994 HOST_WIDE_INT delta = 0;
5995 rtx basereg, addr;
5996 unsigned int reg_mask;
5997
5998 csky_layout_stack_frame ();
5999 reg_mask = cfun->machine->reg_mask;
6000
6001 if (reg_mask & (1 << CSKY_LR_REGNUM))
6002 {
6003 /* Find LR in the stack frame. */
6004 int i = 0;
6005
6006 if (frame_pointer_needed)
6007 {
6008 basereg = hard_frame_pointer_rtx;
6009 delta = 0;
6010 }
6011 else
6012 {
6013 basereg = stack_pointer_rtx;
6014 delta = cfun->machine->reg_offset;
6015 }
6016
6017 /* At this point, (basereg + delta) points at the low end of
6018 the reg save area. Regs are saved sequentially from low
6019 to high from this address. */
6020 for (i = 0; i < CSKY_LR_REGNUM; i++)
6021 if (reg_mask & (1 << i))
6022 delta += 4;
6023
6024 if ((CSKY_TARGET_ARCH (CK801) && delta >= CSKY_LD16_MAX_OFFSET (Pmode))
6025 || delta >= CSKY_LD32_MAX_OFFSET (Pmode))
6026 {
6027 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
6028 emit_insn (gen_addsi3 (scratch, scratch, basereg));
6029 addr = scratch;
6030 }
6031 else
6032 addr = plus_constant (Pmode, basereg, delta);
6033 emit_move_insn (gen_frame_mem (Pmode, addr), source);
6034 }
6035 else
6036 emit_move_insn (gen_rtx_REG (Pmode, CSKY_LR_REGNUM), source);
6037 }
6038
6039 /* Return TRUE if X references a SYMBOL_REF. */
6040
6041 bool
6042 csky_symbol_mentioned_p (rtx x)
6043 {
6044 const char *fmt;
6045 int i;
6046
6047 if (GET_CODE (x) == SYMBOL_REF)
6048 return true;
6049
6050 fmt = GET_RTX_FORMAT (GET_CODE (x));
6051 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6052 {
6053 if (fmt[i] == 'E')
6054 {
6055 int j;
6056
6057 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6058 if (csky_symbol_mentioned_p (XVECEXP (x, i, j)))
6059 return true;
6060 }
6061 else if (fmt[i] == 'e' && csky_symbol_mentioned_p (XEXP (x, i)))
6062 return true;
6063 }
6064 return false;
6065 }
6066
6067
6068 /* Return TRUE if X references a LABEL_REF. */
6069
6070 bool
6071 csky_label_mentioned_p (rtx x)
6072 {
6073 const char *fmt;
6074 int i;
6075
6076 if (GET_CODE (x) == LABEL_REF)
6077 return true;
6078
6079 fmt = GET_RTX_FORMAT (GET_CODE (x));
6080 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6081 {
6082 if (fmt[i] == 'E')
6083 {
6084 int j;
6085
6086 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6087 if (csky_label_mentioned_p (XVECEXP (x, i, j)))
6088 return true;
6089 }
6090 else if (fmt[i] == 'e' && csky_label_mentioned_p (XEXP (x, i)))
6091 return true;
6092 }
6093
6094 return false;
6095 }
6096
6097
6098 static bool
6099 tls_unspec_mentioned_p (rtx x)
6100 {
6101 switch (GET_CODE (x))
6102 {
6103 case CONST:
6104 return tls_unspec_mentioned_p (XEXP (x, 0));
6105
6106 case UNSPEC:
6107 if (XINT (x, 1) == UNSPEC_TLS)
6108 return true;
6109
6110 /* Fall through. */
6111 default:
6112 return false;
6113 }
6114 }
6115
6116
6117 /* Implement LEGITIMATE_PIC_OPERAND_P. */
6118
6119 bool
6120 csky_legitimate_pic_operand_p (rtx x)
6121 {
6122 if (tls_unspec_mentioned_p (x))
6123 return true;
6124 if (csky_symbol_mentioned_p (x) || csky_label_mentioned_p (x))
6125 return false;
6126 return true;
6127 }
6128
6129 rtx
6130 csky_legitimize_pic_address (rtx orig, rtx reg, bool gotrel_p)
6131 {
6132 rtx pic_reg = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
6133 bool optimize_p = false;
6134
6135 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
6136 {
6137 rtx pic_ref, address, rtx_tmp;
6138 rtx insn;
6139 rtx pic_reg = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
6140 int subregs = 0;
6141
6142 if (reg == 0)
6143 {
6144 gcc_assert (can_create_pseudo_p ());
6145 reg = gen_reg_rtx (Pmode);
6146 subregs = 1;
6147 }
6148
6149 if (subregs)
6150 address = gen_reg_rtx (Pmode);
6151 else
6152 address = reg;
6153
6154 if (GET_CODE (orig) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (orig))
6155 {
6156 /* When gotrel_p generate sym@GOT, otherwise generate sym@PLT. */
6157 rtx_tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, orig),
6158 (gotrel_p
6159 ? UNSPEC_PIC_SYMBOL_GOT
6160 : UNSPEC_PIC_SYMBOL_PLT));
6161 optimize_p = gotrel_p;
6162 if (flag_pic != 1)
6163 {
6164 emit_move_insn (address, rtx_tmp);
6165 rtx_tmp = gen_rtx_MULT (Pmode, address, GEN_INT (1));
6166 }
6167 pic_ref = gen_const_mem (Pmode,
6168 gen_rtx_PLUS (Pmode, pic_reg, rtx_tmp));
6169 }
6170 else
6171 {
6172 /* bsr symbol */
6173 if (flag_pic == 1 && !gotrel_p)
6174 {
6175 pic_ref = gen_rtx_UNSPEC (Pmode,
6176 gen_rtvec (1, orig),
6177 UNSPEC_PIC_SYMBOL_BSR);
6178 return pic_ref;
6179 }
6180 /* grs rx, symbol */
6181 else if (flag_pic == 1 && (GET_CODE (orig) == SYMBOL_REF)
6182 && SYMBOL_REF_FUNCTION_P (orig))
6183 {
6184 pic_ref = gen_rtx_UNSPEC (Pmode,
6185 gen_rtvec (1, orig),
6186 UNSPEC_PIC_SYMBOL_GRS);
6187 return pic_ref;
6188 }
6189 /* lrw rx, symbol@GOTOFF; add rx, rx, gb */
6190 else
6191 {
6192 rtx_tmp = gen_rtx_UNSPEC (Pmode,
6193 gen_rtvec (1, orig),
6194 UNSPEC_PIC_SYMBOL_GOTOFF);
6195 emit_move_insn (address, rtx_tmp);
6196 pic_ref = gen_rtx_PLUS (Pmode, address, pic_reg);
6197 optimize_p = true;
6198 }
6199 }
6200
6201 insn = emit_move_insn (reg, pic_ref);
6202 /* Put a REG_EQUAL note on this insn,
6203 so that it can be optimized by loop. */
6204 if (optimize_p)
6205 set_unique_reg_note (insn, REG_EQUAL, orig);
6206
6207 return reg;
6208 }
6209 else if (GET_CODE (orig) == CONST)
6210 {
6211 rtx base, offset;
6212
6213 if (GET_CODE (XEXP (orig, 0)) == PLUS
6214 && XEXP (XEXP (orig, 0), 1) == pic_reg)
6215 return orig;
6216
6217 if (reg == 0)
6218 {
6219 gcc_assert (can_create_pseudo_p ());
6220 reg = gen_reg_rtx (Pmode);
6221 }
6222
6223 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
6224
6225 base = csky_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
6226 reg, gotrel_p);
6227 offset = csky_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
6228 base == reg ? 0 : reg, gotrel_p);
6229
6230 if (GET_CODE (offset) == CONST_INT)
6231 return plus_constant (Pmode, base, INTVAL (offset));
6232
6233 return gen_rtx_PLUS (Pmode, base, offset);
6234 }
6235
6236 return orig;
6237 }
6238
6239
6240 /* Functions to output assembly code for a function call. */
6241
6242 char *
6243 csky_output_call (rtx *operands, int index)
6244 {
6245 static char buffer[20];
6246 rtx addr = operands[index];
6247
6248 if (REG_P (addr))
6249 sprintf (buffer, "jsr\t%%%d", index);
6250 else if (flag_pic && (GET_CODE (addr) == UNSPEC))
6251 sprintf (buffer, "bsr\t%%%d", index);
6252 else
6253 sprintf (buffer, "jbsr\t%%%d", index);
6254
6255 return buffer;
6256 }
6257
6258
6259 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
6260 Output assembler code for a block containing the constant parts
6261 of a trampoline, leaving space for the variable parts.
6262 Note that STATIC_CHAIN_REGNUM is t1 (aka r12) on ck801 and
6263 t1 (r13) otherwise. */
6264
6265 static void
6266 csky_asm_trampoline_template (FILE *f)
6267 {
6268 if (CSKY_ISA_FEATURE (2E3))
6269 {
6270 fprintf (f, "\tlrw\t%s, [.Lstatic_chain]\n",
6271 reg_names[STATIC_CHAIN_REGNUM]);
6272 fprintf (f, "\tjmpi\t[.Lfunc_address]\n");
6273 /* 2 32-bit insns = 8 bytes. */
6274 }
6275 else if (CSKY_TARGET_ARCH (CK801))
6276 {
6277 /* It's hard to provide general support for trampolines on this
6278 core. We need a register other than the one holding the
6279 static chain (r13) to hold the function pointer for the
6280 indirect jump to it. But ck801 has such a limited register set
6281 there is no other call-clobbered scratch register available -- in
6282 particular, this core does not have r12, which we use for the
6283 ck802 case below. If we use a callee-saved register like r4,
6284 saving the old value on the stack screws up the stack frame
6285 if there are overflow arguments pushed on the stack
6286 by the caller. In theory we could test for that and handle
6287 limited cases with parameters that all fit in r0-r3 with no
6288 stack overflow, but punt for now. */
6289 sorry ("Nested function trampolines not supported on CK801.");
6290 }
6291 else
6292 {
6293 fprintf (f, "\tlrw\t%s, [.Lfunc_address]\n",
6294 reg_names[CSKY_T1_REGNUM]);
6295 fprintf (f, "\tlrw\t%s, [.Lstatic_chain]\n",
6296 reg_names[STATIC_CHAIN_REGNUM]);
6297 fprintf (f, "\tjmp\t%s\n",
6298 reg_names[CSKY_T1_REGNUM]);
6299 /* To align constant pool on a word boundary. */
6300 fprintf (f, "\t.align 2\n");
6301 /* 2 32-bit lrw insns + 16-bit jump + 16-bit pad = 12 bytes. */
6302 }
6303
6304 fprintf (f, ".Lstatic_chain:\n");
6305 fprintf (f, "\t.long 0\n");
6306 fprintf (f, ".Lfunc_address:\n");
6307 fprintf (f, "\t.long 0\n");
6308 /* 2 words of constant pool = 8 bytes. */
6309 }
6310
6311 /* Worker function for TARGET_TRAMPOLINE_INIT. */
6312
6313 static void
6314 csky_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6315 {
6316 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6317 rtx mem, a_tramp;
6318 int pool = TRAMPOLINE_SIZE - 8;
6319
6320 emit_block_move (m_tramp, assemble_trampoline_template (),
6321 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
6322
6323 mem = adjust_address (m_tramp, SImode, pool);
6324 emit_move_insn (mem, chain_value);
6325 mem = adjust_address (m_tramp, SImode, pool + 4);
6326 emit_move_insn (mem, fnaddr);
6327
6328 a_tramp = XEXP (m_tramp, 0);
6329 maybe_emit_call_builtin___clear_cache (a_tramp,
6330 plus_constant (Pmode,
6331 a_tramp,
6332 TRAMPOLINE_SIZE));
6333 }
6334
6335
6336 /* Emit a comparison insn for float values.
6337 Return true if the comparison is inverted. */
6338
6339 bool
6340 csky_emit_compare_float (enum rtx_code code, rtx op0, rtx op1)
6341 {
6342 rtx cc_reg = gen_rtx_REG (CCmode, CSKY_CC_REGNUM);
6343 bool invert;
6344 machine_mode mode = GET_MODE (op1);
6345
6346 if (op1 != CONST0_RTX (mode))
6347 op1 = force_reg (mode, op1);
6348
6349 invert = false;
6350
6351 switch (code)
6352 {
6353 case EQ:
6354 code = NE;
6355 invert = true;
6356 break;
6357 case GT:
6358 case LT:
6359 case LE:
6360 if (op1 == CONST0_RTX (mode) && (CSKY_ISA_FEATURE_GET(fpv2_sf)
6361 || CSKY_ISA_FEATURE_GET(fpv2_df)
6362 || CSKY_ISA_FEATURE_GET(fpv2_divd)))
6363 op1 = force_reg (mode, op1);
6364 break;
6365 case ORDERED:
6366 code = UNORDERED;
6367 invert = true;
6368 break;
6369
6370 default:
6371 break;
6372 }
6373
6374 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
6375
6376 return invert;
6377 }
6378
6379 /* Support for the Q or W memory constraint. Returns true if OP is a MEM
6380 RTX with an address consisting of base + index or base + displacement. */
6381
6382 bool
6383 csky_valid_mem_constraint_operand (rtx op, const char *constraint)
6384 {
6385 struct csky_address addr;
6386
6387 if (GET_CODE (op) != MEM)
6388 return false;
6389
6390 if (!decompose_csky_address (XEXP (op, 0), &addr))
6391 return false;
6392
6393 /* Verify base register. */
6394 if (!is_csky_address_register_rtx_p (addr.base, 0))
6395 return false;
6396
6397 /* Verify index operand. */
6398 if (addr.index && (constraint[0] == 'Q' || constraint[0] == 'W'))
6399 {
6400 if (!is_csky_address_register_rtx_p (addr.index, 0))
6401 return false;
6402
6403 if (addr.scale == 1 || addr.scale == 2 || addr.scale == 4
6404 || addr.scale == 8)
6405 return true;
6406
6407 return false;
6408 }
6409 /* Verify disp operand. */
6410 else if (addr.disp && constraint[0] == 'Q')
6411 {
6412 rtx disp = addr.disp;
6413
6414 if (!CONST_INT_P (disp))
6415 return false;
6416
6417 if (((unsigned) INTVAL (disp) % 4) == 0
6418 && (unsigned) INTVAL (disp) <= (unsigned) 1020)
6419 return true;
6420
6421 return false;
6422 }
6423 else if (constraint[0] == 'Q')
6424 /* Single reg is valid for 'Q'. */
6425 return true;
6426
6427 return false;
6428 }
6429
6430
6431 /* Returns the (interrupt) function type of the current
6432 function, or CSKY_FT_UNKNOWN if the type cannot be determined. */
6433
6434 static unsigned long
6435 csky_isr_value (tree argument)
6436 {
6437 const isr_attribute_entry *ptr;
6438 const char *arg;
6439
6440 /* No argument - default to IRQ. */
6441 if (argument == NULL_TREE)
6442 return CSKY_FT_ISR;
6443
6444 /* Get the value of the argument. */
6445 if (TREE_VALUE (argument) == NULL_TREE
6446 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
6447 return CSKY_FT_UNKNOWN;
6448
6449 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
6450
6451 /* Check it against the list of known arguments. */
6452 for (ptr = isr_attribute_map; ptr->arg != NULL; ptr++)
6453 if (strcmp (arg, ptr->arg) == 0)
6454 return ptr->return_value;
6455
6456 /* An unrecognized interrupt type. */
6457 return CSKY_FT_UNKNOWN;
6458 }
6459
6460 /* Handle an attribute requiring a FUNCTION_DECL;
6461 arguments as in struct attribute_spec.handler. */
6462
6463 static tree
6464 csky_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
6465 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
6466 {
6467 if (TREE_CODE (*node) != FUNCTION_DECL)
6468 {
6469 warning (OPT_Wattributes, "%qE attribute only applies to functions",
6470 name);
6471 *no_add_attrs = true;
6472 }
6473
6474 return NULL_TREE;
6475 }
6476
6477 /* Handle an "interrupt" or "isr" attribute;
6478 arguments as in struct attribute_spec.handler. */
6479
6480 static tree
6481 csky_handle_isr_attribute (tree *node, tree name, tree args, int flags,
6482 bool *no_add_attrs)
6483 {
6484
6485 if (!TARGET_ISTACK)
6486 {
6487 warning (OPT_Wattributes, "%qE attribute ignored without %<-mistack%>",
6488 name);
6489 *no_add_attrs = true;
6490 return NULL_TREE;
6491 }
6492
6493 if (DECL_P (*node))
6494 {
6495 if (TREE_CODE (*node) != FUNCTION_DECL)
6496 {
6497 warning (OPT_Wattributes, "%qE attribute only applies to functions",
6498 name);
6499 *no_add_attrs = true;
6500 }
6501 }
6502 else
6503 {
6504 if (TREE_CODE (*node) == FUNCTION_TYPE
6505 || TREE_CODE (*node) == METHOD_TYPE)
6506 {
6507 if (csky_isr_value (args) == CSKY_FT_UNKNOWN)
6508 {
6509 warning (OPT_Wattributes, "%qE attribute ignored", name);
6510 *no_add_attrs = true;
6511 }
6512 }
6513 else if (TREE_CODE (*node) == POINTER_TYPE
6514 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
6515 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
6516 && csky_isr_value (args) != CSKY_FT_UNKNOWN)
6517 {
6518 *node = build_variant_type_copy (*node);
6519 TREE_TYPE (*node) = build_type_attribute_variant (TREE_TYPE (*node),
6520 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
6521 *no_add_attrs = true;
6522 }
6523 else if (flags & ((int)ATTR_FLAG_DECL_NEXT
6524 | (int)ATTR_FLAG_FUNCTION_NEXT
6525 | (int)ATTR_FLAG_ARRAY_NEXT))
6526 {
6527 *no_add_attrs = true;
6528 return tree_cons (name, args, NULL_TREE);
6529 }
6530 else
6531 warning (OPT_Wattributes, "%qE attribute ignored", name);
6532 }
6533 return NULL_TREE;
6534 }
6535
6536
6537 /* Implement TARGET_REGISTER_MOVE_COST: compute extra cost of moving data
6538 between one register class and another. */
6539
6540 int
6541 csky_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
6542 reg_class_t from, reg_class_t to)
6543 {
6544 #define GR_REG_CLASS_P(CLASS) \
6545 ((CLASS) == GENERAL_REGS || (CLASS) == MINI_REGS || (CLASS) == SP_REGS \
6546 || (CLASS) == LOW_REGS)
6547
6548 #define HILO_REG_CLASS_P(CLASS) \
6549 ((CLASS) == HILO_REGS)
6550
6551 #define V_REG_CLASS_P(CLASS) \
6552 ((CLASS) == V_REGS)
6553
6554 if (V_REG_CLASS_P (from) && V_REG_CLASS_P (to))
6555 return 2;
6556
6557 if ((V_REG_CLASS_P (from) && GR_REG_CLASS_P (to))
6558 || (GR_REG_CLASS_P (from) && V_REG_CLASS_P (to)))
6559 return 6;
6560
6561 if ((HILO_REG_CLASS_P (from) && GR_REG_CLASS_P (to))
6562 || (GR_REG_CLASS_P (from) && HILO_REG_CLASS_P (to)))
6563 return 16;
6564
6565 if (HILO_REG_CLASS_P (from) && HILO_REG_CLASS_P (to))
6566 return 32;
6567
6568 if ((HILO_REG_CLASS_P (from) && V_REG_CLASS_P (to))
6569 || (V_REG_CLASS_P (from) && HILO_REG_CLASS_P (to)))
6570 return 64;
6571
6572 return 2;
6573 }
6574
6575
6576 /* Implement TARGET_MEMORY_MOVE_COST: compute the cost of moving data
6577 between registers and memory. */
6578
6579 int
6580 csky_memory_move_cost (machine_mode mode, reg_class_t rclass,
6581 bool in)
6582 {
6583 return (4 + memory_move_secondary_cost (mode, rclass, in));
6584 }
6585
6586
6587 /* TARGET_RTX_COSTS helper for ck801/ck802. */
6588
6589 static bool
6590 ck802_ck801_rtx_costs (rtx x, int code, int outer_code, int *total,
6591 bool speed)
6592 {
6593 machine_mode mode = GET_MODE (x);
6594 switch (code)
6595 {
6596 /* Accessing memory costs quite a lot for first word; */
6597 case MEM:
6598 *total = COSTS_N_INSNS (1 + CSKY_NUM_REGS (mode));
6599 return false;
6600 case DIV:
6601 case UDIV:
6602 case MOD:
6603 case UMOD:
6604 *total = 100;
6605 return true;
6606
6607 case ROTATE:
6608 case ROTATERT:
6609 case ASHIFT:
6610 case LSHIFTRT:
6611 case ASHIFTRT:
6612 if (speed)
6613 *total = 2;
6614 else
6615 *total = COSTS_N_INSNS (1);
6616 return false;
6617
6618 case MINUS:
6619 case PLUS:
6620 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6621 return false;
6622
6623 case AND:
6624 {
6625 enum rtx_code subcode = GET_CODE (XEXP (x, 1));
6626
6627 /* If subcode is "not", we'll try to combine it into e.g. "andn"
6628 instruction, so give AND itself zero cost. */
6629 if (subcode == NOT)
6630 {
6631 *total = 0;
6632 return false;
6633 }
6634 }
6635 /* Fall through. */
6636 case XOR:
6637 case IOR:
6638 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6639 return false;
6640
6641 case MULT:
6642 /* FIXME: is ixw supported on ck801/ck802? */
6643 /* We can use "ix.h/w" insn to replace multiply by 2 or 4.
6644 "ix.h/w" is a 32-bit insn, so let its cost be a little less than
6645 "mult" insn. */
6646 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6647 {
6648 unsigned HOST_WIDE_INT m
6649 = (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)));
6650 if ((m == 2 || m == 4) && outer_code == PLUS)
6651 {
6652 *total = 2;
6653 return true;
6654 }
6655 else
6656 {
6657 /* Because mult is relatively slower than other operations,
6658 we try to use other insns when optimizing for speed.
6659 When optimizing for size, give it lower cost. */
6660 if (speed)
6661 {
6662 *total = COSTS_N_INSNS (10 * CSKY_NUM_REGS (mode));
6663 return true;
6664 }
6665 int cycle = 0;
6666 while (m)
6667 {
6668 m >>= 2;
6669 cycle++;
6670 }
6671 *total = COSTS_N_INSNS (1) + cycle;
6672 return false;
6673 }
6674 }
6675 if (!speed)
6676 *total = COSTS_N_INSNS (1);
6677 return false;
6678
6679 case NEG:
6680 /* Usually, we use subtract from 0 to substitute for neg, and
6681 it costs 1 extra insn to move 0 to a register. */
6682 *total = COSTS_N_INSNS (2 * CSKY_NUM_REGS (mode));
6683 return false;
6684
6685 case NOT:
6686 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6687 return false;
6688
6689 case COMPARE:
6690 *total = COSTS_N_INSNS (1);
6691 return false;
6692
6693 case SIGN_EXTEND:
6694 case ZERO_EXTEND:
6695 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6696 return false;
6697
6698 case SIGN_EXTRACT:
6699 case ZERO_EXTRACT:
6700 if (REG_P (XEXP (x, 0))
6701 && CONST_INT_P (XEXP (x, 1))
6702 && CONST_INT_P (XEXP (x, 2))
6703 && INTVAL (XEXP (x, 1)) == 8
6704 && INTVAL (XEXP (x, 2)) % 8 == 0)
6705 {
6706 *total = COSTS_N_INSNS (1);
6707 return true;
6708 }
6709 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6710 return false;
6711
6712 case CONST_INT:
6713 {
6714 unsigned HOST_WIDE_INT t = (unsigned HOST_WIDE_INT) (INTVAL (x));
6715
6716 if (outer_code == COMPARE)
6717 {
6718 if (t < 0x10000)
6719 *total = 0;
6720 else
6721 *total = COSTS_N_INSNS (2);
6722 }
6723 else if (outer_code == AND || outer_code == IOR || outer_code == XOR)
6724 {
6725 /* "andi,xori,ori" are 32-bit insns, so let it cost a
6726 little more. */
6727 if (t < 0x1000)
6728 {
6729 /* Try replacing "andi" by "sextb/h", so let it cost more. */
6730 if (outer_code == AND && (t == 0xff || t == 0xffff))
6731 {
6732 *total = 8;
6733 return true;
6734 }
6735 *total = 2;
6736 }
6737 else if (t < 0x10000)
6738 *total = COSTS_N_INSNS (1);
6739 else
6740 *total = COSTS_N_INSNS (2);
6741 }
6742 else if (outer_code == PLUS || outer_code == MINUS)
6743 {
6744 /* "addi/subi rx,ry,imm", if imm<9, it is more often a
6745 16-bit insn. If imm>=9, use "movi" insn; it's probably
6746 less than "addi/subi". */
6747 if (t < 9)
6748 *total = 0;
6749 else if (t < 0x1000)
6750 *total = 2;
6751 else if (t < 0x10000)
6752 *total = COSTS_N_INSNS (1);
6753 else
6754 *total = COSTS_N_INSNS (2);
6755 }
6756 else if (outer_code == ROTATE || outer_code == ROTATERT
6757 || outer_code == LSHIFTRT || outer_code == ASHIFTRT
6758 || outer_code == ASHIFT)
6759 {
6760 if (t < 32)
6761 *total = 0;
6762 else
6763 *total = COSTS_N_INSNS (2);
6764 }
6765 else
6766 {
6767 if (t < 0x10000)
6768 if (outer_code == SET && t < 256)
6769 *total = 0;
6770 else
6771 *total = COSTS_N_INSNS (1);
6772 else
6773 *total = COSTS_N_INSNS (2);
6774 }
6775 }
6776 return true;
6777
6778 case CONST:
6779 case LABEL_REF:
6780 case SYMBOL_REF:
6781 *total = COSTS_N_INSNS (3);
6782 return true;
6783 default:
6784 return false;
6785 }
6786 }
6787
6788
6789 /* TARGET_RTX_COSTS helper for ck803. */
6790
6791 static bool
6792 ck803_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED,
6793 int *total, bool speed ATTRIBUTE_UNUSED)
6794 {
6795 switch (code)
6796 {
6797 case SET:
6798 if (MEM_P (XEXP (x, 1)))
6799 {
6800 struct csky_address op1;
6801 bool address_valid
6802 = decompose_csky_address (XEXP (XEXP (x, 1), 0), &op1);
6803 if (op1.index)
6804 {
6805 *total = COSTS_N_INSNS (3);
6806 return true;
6807 }
6808 else if (address_valid)
6809 {
6810 *total = COSTS_N_INSNS (1);
6811 return true;
6812 }
6813 }
6814 if (REG_P (XEXP (x, 0)) && (GET_CODE (XEXP (x, 1)) == PLUS))
6815 {
6816 rtx sub_exp = XEXP (x, 1);
6817 if (REG_P (XEXP (sub_exp, 0)) && REG_P (XEXP (sub_exp, 1)))
6818 {
6819 *total = COSTS_N_INSNS (1);
6820 return true;
6821 }
6822 }
6823 return false;
6824 case MULT:
6825 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6826 {
6827 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6828 if (val % 2 == 0 && val < 0xffffffff && val > 0)
6829 {
6830 *total = COSTS_N_INSNS (1);
6831 return true;
6832 }
6833 }
6834 return false;
6835
6836 case CONST:
6837 case LABEL_REF:
6838 case SYMBOL_REF:
6839 *total = COSTS_N_INSNS (3);
6840 return true;
6841 default:
6842 return false;
6843 }
6844 }
6845
6846 /* TARGET_RTX_COSTS helper for ck807/ck810 arches. */
6847
6848 static bool
6849 ck807_ck810_rtx_costs (rtx x, int code,
6850 int outer_code ATTRIBUTE_UNUSED,
6851 int *total, bool speed ATTRIBUTE_UNUSED)
6852 {
6853 switch (code)
6854 {
6855 case MULT:
6856 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6857 {
6858 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6859 if (val % 2 == 0 && val < 0xffffffff && val > 0)
6860 {
6861 *total = COSTS_N_INSNS (1);
6862 return true;
6863 }
6864 }
6865 return false;
6866
6867 case CONST:
6868 case LABEL_REF:
6869 case SYMBOL_REF:
6870 *total = COSTS_N_INSNS (3);
6871 return true;
6872 default:
6873 return false;
6874 }
6875 }
6876
6877 /* TARGET_RTX_COSTS helper for ck860 arches. */
6878
6879 static bool
6880 ck860_rtx_costs (rtx x, int code, machine_mode mode,
6881 int outer_code ATTRIBUTE_UNUSED,
6882 int *total, bool speed ATTRIBUTE_UNUSED)
6883 {
6884 switch (code)
6885 {
6886 case PLUS:
6887 /* The costs of mula is 1 more than mult. */
6888 if (GET_CODE (XEXP (x, 0)) == MULT && REG_P (XEXP (x, 1)) && speed)
6889 {
6890 rtx mul_op0 = XEXP (XEXP (x, 0), 0);
6891 rtx mul_op1 = XEXP (XEXP (x, 0), 1);
6892 if (REG_P (mul_op0) && REG_P (mul_op1))
6893 {
6894 *total = COSTS_N_INSNS (1);
6895 *total += rtx_cost (XEXP (x, 0), mode,
6896 (enum rtx_code) code, 0, speed);
6897 return true;
6898 }
6899 }
6900 return false;
6901 case MULT:
6902 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6903 {
6904 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6905 if (val % 2 == 0 && val < 0xffffffff && val > 0)
6906 {
6907 *total = COSTS_N_INSNS (1);
6908 return true;
6909 }
6910 }
6911 return false;
6912
6913 case CONST:
6914 case LABEL_REF:
6915 case SYMBOL_REF:
6916 *total = COSTS_N_INSNS (3);
6917 return true;
6918 default:
6919 return false;
6920 }
6921 }
6922
6923
6924 /* Implement TARGET_RTX_COSTS, to compute a (partial) cost for rtx X.
6925 Return true if the complete cost has been computed, and false if
6926 subexpressions should be scanned. In either case, *TOTAL contains
6927 the cost result. */
6928
6929 static bool
6930 csky_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
6931 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
6932 {
6933 int code = GET_CODE (x);
6934
6935 if (CSKY_TARGET_ARCH (CK802) || CSKY_TARGET_ARCH (CK801))
6936 return ck802_ck801_rtx_costs (x, code, outer_code, total, speed);
6937 else if (CSKY_TARGET_ARCH (CK803))
6938 return ck803_rtx_costs (x, code, outer_code, total, speed);
6939 else if (CSKY_TARGET_ARCH (CK807) || CSKY_TARGET_ARCH (CK810))
6940 return ck807_ck810_rtx_costs (x, code, outer_code, total, speed);
6941 else if (CSKY_TARGET_ARCH (CK860))
6942 return ck860_rtx_costs (x, code, mode, outer_code, total, speed);
6943 else
6944 gcc_unreachable ();
6945 }
6946
6947 /* Emit assembly code for CASESI. This is only used on CK801 and CK802
6948 when optimizing for size, and uses helper functions in libgcc instead
6949 of doing the control transfer inline. */
6950
6951 const char *
6952 csky_output_casesi (rtx *operands)
6953 {
6954 rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[0])));
6955
6956 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
6957
6958 switch (GET_MODE (diff_vec))
6959 {
6960 case E_QImode:
6961 return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned
6962 ? "jbsr\t___gnu_csky_case_uqi"
6963 : "jbsr\t___gnu_csky_case_sqi");
6964 case E_HImode:
6965 return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned
6966 ? "jbsr\t___gnu_csky_case_uhi"
6967 : "jbsr\t___gnu_csky_case_shi");
6968 case E_SImode:
6969 return "jbsr\t___gnu_csky_case_si";
6970 default:
6971 gcc_unreachable ();
6972 }
6973 }
6974
6975 /* Implement TARGET_SCHED_ISSUE_RATE. Lookup the issue rate in the
6976 per-core tuning structs. */
6977 static int
6978 csky_sched_issue_rate (void)
6979 {
6980 if (CSKY_TARGET_ARCH (CK810))
6981 return 2;
6982 else
6983 return 1;
6984 }
6985
6986
6987 /* This function implements the target macro TARGET_SCHED_ADJUST_COST.
6988 It corrects the value of COST based on the relationship between
6989 INSN and DEP through the dependence DEP_TYPE. It returns the new
6990 value. */
6991
6992 static int
6993 csky_sched_adjust_cost (rtx_insn *insn,
6994 int dep_type,
6995 rtx_insn *dep,
6996 int cost,
6997 unsigned int dw ATTRIBUTE_UNUSED)
6998 {
6999 if (dep_type == REG_DEP_ANTI || dep_type == REG_DEP_OUTPUT)
7000 return 0;
7001 /* The REG_DEP_TRUE situation. */
7002 else if (recog_memoized (insn) >= 0 && recog_memoized (dep) >= 0)
7003 {
7004 enum attr_type insn_type = get_attr_type (insn);
7005 if (CSKY_TARGET_ARCH (CK803))
7006 {
7007 /* The ld or st's base reg depends on the pre insn,
7008 it will delay 1 cycle. */
7009 if (insn_type == TYPE_LOAD || insn_type == TYPE_STORE)
7010 {
7011 rtx pattern = PATTERN (insn);
7012
7013 gcc_assert (GET_CODE (pattern) == SET);
7014 rtx addr = (insn_type == TYPE_LOAD
7015 ? SET_SRC (pattern) : SET_DEST (pattern));
7016
7017 enum rtx_code code = GET_CODE (addr);
7018 if (code == ZERO_EXTEND || code == SIGN_EXTEND)
7019 addr = XEXP (addr, 0);
7020 gcc_assert (GET_CODE (addr) == MEM);
7021
7022 rtx base = XEXP (addr, 0);
7023 rtx reg = NULL_RTX;
7024 if (REG_P (base))
7025 reg = base;
7026 if (GET_CODE (base) == PLUS
7027 && GET_CODE (XEXP (base, 0)) == REG)
7028 reg = XEXP (base, 0);
7029 if ((reg != NULL_RTX) && reg_set_p (reg, PATTERN (dep)))
7030 return 2;
7031 }
7032 }
7033 else if (CSKY_TARGET_ARCH (CK802))
7034 {
7035 if ((insn_type == TYPE_CALL_JSR || insn_type == TYPE_BRANCH_JMP)
7036 && get_attr_type (dep) != TYPE_LOAD)
7037 return 1;
7038
7039 if (insn_type == TYPE_LOAD || insn_type == TYPE_STORE)
7040 {
7041 rtx pattern = PATTERN (insn);
7042
7043 gcc_assert (GET_CODE (pattern) == SET);
7044
7045 rtx addr = (insn_type == TYPE_LOAD
7046 ? SET_SRC (pattern) : SET_DEST (pattern));
7047
7048 enum rtx_code code = GET_CODE (addr);
7049 if (code == ZERO_EXTEND || code == SIGN_EXTEND)
7050 addr = XEXP (addr, 0);
7051 gcc_assert (GET_CODE (addr) == MEM);
7052
7053 rtx base = XEXP (addr, 0);
7054 rtx reg = NULL_RTX;
7055 if (REG_P (base))
7056 reg = base;
7057 if (GET_CODE (base) == PLUS
7058 && GET_CODE (XEXP (base, 0)) == REG)
7059 reg = XEXP (base, 0);
7060 if ((reg != NULL_RTX) && reg_set_p (reg, PATTERN (dep))
7061 && get_attr_type (dep) != TYPE_LOAD)
7062 return 1;
7063
7064 if (insn_type == TYPE_STORE
7065 && reg_referenced_p (SET_SRC (pattern), PATTERN (dep)))
7066 return 1;
7067 }
7068 }
7069 }
7070 return cost;
7071 }
7072
7073 static bool
7074 csky_warn_func_return (tree decl)
7075 {
7076 /* Naked functions are implemented entirely in assembly, including the
7077 return sequence, so suppress warnings about this. */
7078 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
7079 }
7080
7081
7082 /* Implement TARGET_RETURN_IN_MEMORY to decide whether TYPE should be
7083 returned in memory (true) or in a register (false).
7084 FNTYPE is the type of the function making the call. */
7085
7086 static bool
7087 csky_return_in_memory (const_tree type,
7088 const_tree fntype ATTRIBUTE_UNUSED)
7089 {
7090 const HOST_WIDE_INT size = int_size_in_bytes (type);
7091 return (size == -1 || size > 2 * UNITS_PER_WORD);
7092 }
7093
7094
7095 /* Implement TARGET_DWARF_REGISTER_SPAN.
7096 Dwarf models VFP registers as 64-bit or 128-bit registers default.
7097 GCC models tham as 32-bit registers, so we need to describe this to
7098 the DWARF generation code. Other registers can use the default. */
7099
7100 static rtx
7101 csky_dwarf_register_span (rtx rtl)
7102 {
7103 machine_mode mode;
7104 unsigned regno;
7105 rtx parts[16];
7106 int nregs;
7107 int i;
7108
7109 regno = REGNO (rtl);
7110 if (!CSKY_VREG_P (regno))
7111 return NULL_RTX;
7112
7113 if (CSKY_VREG_HI_P (regno))
7114 regno += 16;
7115
7116 mode = GET_MODE (rtl);
7117 if (GET_MODE_SIZE (mode) < 8)
7118 return NULL_RTX;
7119
7120
7121 if (TARGET_SINGLE_FPU)
7122 {
7123 nregs = GET_MODE_SIZE (mode) / 4;
7124 for (i = 0; i < nregs; i += 2)
7125 if (TARGET_BIG_ENDIAN)
7126 {
7127 parts[i] = gen_rtx_REG (SImode, regno + i + 1);
7128 parts[i + 1] = gen_rtx_REG (SImode, regno + i);
7129 }
7130 else
7131 {
7132 parts[i] = gen_rtx_REG (SImode, regno + i);
7133 parts[i + 1] = gen_rtx_REG (SImode, regno + i + 1);
7134 }
7135 }
7136 else
7137 {
7138 /* FIXME: dwarf2 considers all general registers to be the same
7139 as the CPU bit width. Transform the 64-bit FPU registers to
7140 32 bits here, and we will modify the unwind processing to
7141 fit CSKY architecture later. */
7142 nregs = GET_MODE_SIZE (mode) / 4;
7143 for (i = 0; i < nregs; i += 2)
7144 if (TARGET_BIG_ENDIAN)
7145 {
7146 parts[i] = gen_rtx_REG (SImode, regno + i - 16);
7147 parts[i + 1] = gen_rtx_REG (SImode, regno + i);
7148 }
7149 else
7150 {
7151 parts[i] = gen_rtx_REG (SImode, regno + i);
7152 parts[i + 1] = gen_rtx_REG (SImode, regno + i - 16);
7153 }
7154 }
7155
7156 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nregs , parts));
7157 }
7158
7159 /* Implement TARGET_INIT_LIBFUNCS. */
7160
7161 static void
7162 csky_init_libfuncs (void)
7163 {
7164 if (TARGET_CSKY_LINUX)
7165 init_sync_libfuncs (UNITS_PER_WORD);
7166 if (!TARGET_LIBCCRT)
7167 return;
7168
7169 #define CSKY_GCC_SYM(sym) "__csky_ccrt_" # sym
7170
7171 /* int */
7172
7173 /* Arithmetic functions */
7174 set_optab_libfunc (ashl_optab, DImode, CSKY_GCC_SYM (ashldi3));
7175 set_optab_libfunc (ashr_optab, DImode, CSKY_GCC_SYM (ashrdi3));
7176 set_optab_libfunc (sdiv_optab, SImode, CSKY_GCC_SYM (divsi3));
7177 set_optab_libfunc (sdiv_optab, DImode, CSKY_GCC_SYM (divdi3));
7178 set_optab_libfunc (lshr_optab, DImode, CSKY_GCC_SYM (lshrdi3));
7179 set_optab_libfunc (smod_optab, SImode, CSKY_GCC_SYM (modsi3));
7180 set_optab_libfunc (smod_optab, DImode, CSKY_GCC_SYM (moddi3));
7181 set_optab_libfunc (smul_optab, DImode, CSKY_GCC_SYM (muldi3));
7182 set_optab_libfunc (neg_optab, DImode, CSKY_GCC_SYM (negdi2));
7183 set_optab_libfunc (udiv_optab, SImode, CSKY_GCC_SYM (udivsi3));
7184 set_optab_libfunc (udiv_optab, DImode, CSKY_GCC_SYM (udivdi3));
7185 set_optab_libfunc (udivmod_optab, DImode, CSKY_GCC_SYM (udivmoddi4));
7186 set_optab_libfunc (umod_optab, SImode, CSKY_GCC_SYM (umodsi3));
7187 set_optab_libfunc (umod_optab, DImode, CSKY_GCC_SYM (umoddi3));
7188
7189 /* Comparison functions */
7190 set_optab_libfunc (cmp_optab, DImode, CSKY_GCC_SYM (cmpdi2));
7191 set_optab_libfunc (ucmp_optab, DImode, CSKY_GCC_SYM (ucmpdi2));
7192
7193 /* Trapping arithmetic functions */
7194 set_optab_libfunc (absv_optab, SImode, CSKY_GCC_SYM (absvsi2));
7195 set_optab_libfunc (absv_optab, DImode, CSKY_GCC_SYM (absvdi2));
7196 set_optab_libfunc (addv_optab, SImode, CSKY_GCC_SYM (addvsi3));
7197 set_optab_libfunc (addv_optab, DImode, CSKY_GCC_SYM (addvdi3));
7198 set_optab_libfunc (smulv_optab, SImode, CSKY_GCC_SYM (mulvsi3));
7199 set_optab_libfunc (smulv_optab, DImode, CSKY_GCC_SYM (mulvdi3));
7200 set_optab_libfunc (negv_optab, SImode, CSKY_GCC_SYM (negvsi2));
7201 set_optab_libfunc (negv_optab, DImode, CSKY_GCC_SYM (negvdi2));
7202 set_optab_libfunc (subv_optab, SImode, CSKY_GCC_SYM (subvsi3));
7203 set_optab_libfunc (subv_optab, DImode, CSKY_GCC_SYM (subvdi3));
7204
7205 /* Bit operations */
7206 set_optab_libfunc (clz_optab, SImode, CSKY_GCC_SYM (clzsi2));
7207 set_optab_libfunc (clz_optab, DImode, CSKY_GCC_SYM (clzdi2));
7208 set_optab_libfunc (ctz_optab, SImode, CSKY_GCC_SYM (ctzsi2));
7209 set_optab_libfunc (ctz_optab, DImode, CSKY_GCC_SYM (ctzdi2));
7210 set_optab_libfunc (ffs_optab, DImode, CSKY_GCC_SYM (ffsdi2));
7211 set_optab_libfunc (parity_optab, SImode, CSKY_GCC_SYM (paritysi2));
7212 set_optab_libfunc (parity_optab, DImode, CSKY_GCC_SYM (paritydi2));
7213 set_optab_libfunc (popcount_optab,SImode, CSKY_GCC_SYM (popcountsi2));
7214 set_optab_libfunc (popcount_optab,DImode, CSKY_GCC_SYM (popcountdi2));
7215 set_optab_libfunc (bswap_optab, SImode, CSKY_GCC_SYM (bswapsi2));
7216 set_optab_libfunc (bswap_optab, DImode, CSKY_GCC_SYM (bswapdi2));
7217
7218 /* float */
7219
7220 /* Arithmetic functions */
7221 set_optab_libfunc (add_optab, SFmode, CSKY_GCC_SYM (addsf3));
7222 set_optab_libfunc (add_optab, DFmode, CSKY_GCC_SYM (adddf3));
7223 set_optab_libfunc (sub_optab, SFmode, CSKY_GCC_SYM (subsf3));
7224 set_optab_libfunc (sub_optab, DFmode, CSKY_GCC_SYM (subdf3));
7225 set_optab_libfunc (smul_optab, SFmode, CSKY_GCC_SYM (mulsf3));
7226 set_optab_libfunc (smul_optab, DFmode, CSKY_GCC_SYM (muldf3));
7227 set_optab_libfunc (sdiv_optab, SFmode, CSKY_GCC_SYM (divsf3));
7228 set_optab_libfunc (sdiv_optab, DFmode, CSKY_GCC_SYM (divdf3));
7229 set_optab_libfunc (neg_optab, SFmode, CSKY_GCC_SYM (negsf2));
7230 set_optab_libfunc (neg_optab, DFmode, CSKY_GCC_SYM (negdf2));
7231
7232 /* Conversion functions */
7233 set_conv_libfunc (sext_optab, DFmode, SFmode, CSKY_GCC_SYM (extendsfdf2));
7234 set_conv_libfunc (trunc_optab, SFmode, DFmode, CSKY_GCC_SYM (truncdfsf2));
7235 set_conv_libfunc (sfix_optab, SImode, SFmode, CSKY_GCC_SYM (fixsfsi));
7236 set_conv_libfunc (sfix_optab, SImode, DFmode, CSKY_GCC_SYM (fixdfsi));
7237 set_conv_libfunc (sfix_optab, DImode, SFmode, CSKY_GCC_SYM (fixsfdi));
7238 set_conv_libfunc (sfix_optab, DImode, DFmode, CSKY_GCC_SYM (fixdfdi));
7239 set_conv_libfunc (ufix_optab, SImode, SFmode, CSKY_GCC_SYM (fixunssfsi));
7240 set_conv_libfunc (ufix_optab, SImode, DFmode, CSKY_GCC_SYM (fixunsdfsi));
7241 set_conv_libfunc (ufix_optab, DImode, SFmode, CSKY_GCC_SYM (fixunssfdi));
7242 set_conv_libfunc (ufix_optab, DImode, DFmode, CSKY_GCC_SYM (fixunsdfdi));
7243 set_conv_libfunc (sfloat_optab, SFmode, SImode, CSKY_GCC_SYM (floatsisf));
7244 set_conv_libfunc (sfloat_optab, DFmode, SImode, CSKY_GCC_SYM (floatsidf));
7245 set_conv_libfunc (sfloat_optab, SFmode, DImode, CSKY_GCC_SYM (floatdisf));
7246 set_conv_libfunc (sfloat_optab, DFmode, DImode, CSKY_GCC_SYM (floatdidf));
7247 set_conv_libfunc (ufloat_optab, SFmode, SImode, CSKY_GCC_SYM (floatunsisf));
7248 set_conv_libfunc (ufloat_optab, DFmode, SImode, CSKY_GCC_SYM (floatunsidf));
7249 set_conv_libfunc (ufloat_optab, SFmode, DImode, CSKY_GCC_SYM (floatundisf));
7250 set_conv_libfunc (ufloat_optab, DFmode, DImode, CSKY_GCC_SYM (floatundidf));
7251
7252 /* Comparison functions */
7253 set_optab_libfunc (cmp_optab, SFmode, CSKY_GCC_SYM (cmpsf2));
7254 set_optab_libfunc (cmp_optab, DFmode, CSKY_GCC_SYM (cmpdf2));
7255 set_optab_libfunc (unord_optab, SFmode, CSKY_GCC_SYM (unordsf2));
7256 set_optab_libfunc (unord_optab, DFmode, CSKY_GCC_SYM (unorddf2));
7257 set_optab_libfunc (eq_optab, SFmode, CSKY_GCC_SYM (eqsf2));
7258 set_optab_libfunc (eq_optab, DFmode, CSKY_GCC_SYM (eqdf2));
7259 set_optab_libfunc (ne_optab, SFmode, CSKY_GCC_SYM (nesf2));
7260 set_optab_libfunc (ne_optab, DFmode, CSKY_GCC_SYM (nedf2));
7261 set_optab_libfunc (ge_optab, SFmode, CSKY_GCC_SYM (gesf2));
7262 set_optab_libfunc (ge_optab, DFmode, CSKY_GCC_SYM (gedf2));
7263 set_optab_libfunc (lt_optab, SFmode, CSKY_GCC_SYM (ltsf2));
7264 set_optab_libfunc (lt_optab, DFmode, CSKY_GCC_SYM (ltdf2));
7265 set_optab_libfunc (le_optab, SFmode, CSKY_GCC_SYM (lesf2));
7266 set_optab_libfunc (le_optab, DFmode, CSKY_GCC_SYM (ledf2));
7267 set_optab_libfunc (gt_optab, SFmode, CSKY_GCC_SYM (gtsf2));
7268 set_optab_libfunc (gt_optab, DFmode, CSKY_GCC_SYM (gtdf2));
7269 }
7270
7271
7272 /* Implement TARGET_ADDRESS_COST to estimate cost of the memory address X.
7273 For C-SKY, (register) and (register + offset) have the same cost.
7274 Other situations cost more. */
7275
7276 static int
7277 csky_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
7278 addr_space_t as ATTRIBUTE_UNUSED,
7279 bool speed ATTRIBUTE_UNUSED)
7280 {
7281 enum rtx_code code = GET_CODE (x);
7282
7283 if (code == REG)
7284 return COSTS_N_INSNS (1);
7285 if (code == PLUS
7286 && REG_P (XEXP (x, 0))
7287 && CONST_INT_P (XEXP (x, 1)))
7288 return COSTS_N_INSNS (1);
7289
7290 return COSTS_N_INSNS (3);
7291 }
7292
7293
7294 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
7295
7296 static bool
7297 csky_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
7298 {
7299 *p1 = CSKY_CC_REGNUM;
7300 *p2 = INVALID_REGNUM;
7301 return true;
7302 }
7303
7304 void
7305 csky_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
7306 rtx libname ATTRIBUTE_UNUSED,
7307 tree fndecl ATTRIBUTE_UNUSED)
7308 {
7309 memset(pcum, 0, sizeof(*pcum));
7310 if (stdarg_p (fntype))
7311 pcum->is_stdarg = true;
7312 }
7313
7314
7315 /* Implement the TARGET_INIT_BUILTINS target macro. */
7316
7317 void
7318 csky_init_builtins (void)
7319 {
7320 /* Inint fp16. */
7321 static tree csky_floatHF_type_node = make_node (REAL_TYPE);
7322 TYPE_PRECISION (csky_floatHF_type_node) = GET_MODE_PRECISION (HFmode);
7323 layout_type (csky_floatHF_type_node);
7324 (*lang_hooks.types.register_builtin_type) (csky_floatHF_type_node, "__fp16");
7325 }
7326
7327
7328 /* Implement TARGET_MANGLE_TYPE. */
7329
7330 static const char *
7331 csky_mangle_type (const_tree type)
7332 {
7333 if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
7334 && DECL_NAME (TYPE_NAME (type))
7335 && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))), "__fp16"))
7336 return "__fp16";
7337
7338 /* Use the default mangling. */
7339 return NULL;
7340 }
7341
7342 struct gcc_target targetm = TARGET_INITIALIZER;
7343
7344 #include "gt-csky.h"
This page took 0.356044 seconds and 5 git commands to generate.