]> gcc.gnu.org Git - gcc.git/blob - gcc/config/i386/i386.c
cse.c (CSE_ADDRESS_COST): Remove.
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
3 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include <setjmp.h>
23 #include "config.h"
24 #include "system.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-flags.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "toplev.h"
42 #include "basic-block.h"
43 #include "ggc.h"
44
45 #ifdef EXTRA_CONSTRAINT
46 /* If EXTRA_CONSTRAINT is defined, then the 'S'
47 constraint in REG_CLASS_FROM_LETTER will no longer work, and various
48 asm statements that need 'S' for class SIREG will break. */
49 error EXTRA_CONSTRAINT conflicts with S constraint letter
50 /* The previous line used to be #error, but some compilers barf
51 even if the conditional was untrue. */
52 #endif
53
54 #ifndef CHECK_STACK_LIMIT
55 #define CHECK_STACK_LIMIT -1
56 #endif
57
58 /* Processor costs (relative to an add) */
59 struct processor_costs i386_cost = { /* 386 specific costs */
60 1, /* cost of an add instruction */
61 1, /* cost of a lea instruction */
62 3, /* variable shift costs */
63 2, /* constant shift costs */
64 6, /* cost of starting a multiply */
65 1, /* cost of multiply per each bit set */
66 23, /* cost of a divide/mod */
67 15, /* "large" insn */
68 3, /* MOVE_RATIO */
69 4, /* cost for loading QImode using movzbl */
70 {2, 4, 2}, /* cost of loading integer registers
71 in QImode, HImode and SImode.
72 Relative to reg-reg move (2). */
73 {2, 4, 2}, /* cost of storing integer registers */
74 2, /* cost of reg,reg fld/fst */
75 {8, 8, 8}, /* cost of loading fp registers
76 in SFmode, DFmode and XFmode */
77 {8, 8, 8} /* cost of loading integer registers */
78 };
79
80 struct processor_costs i486_cost = { /* 486 specific costs */
81 1, /* cost of an add instruction */
82 1, /* cost of a lea instruction */
83 3, /* variable shift costs */
84 2, /* constant shift costs */
85 12, /* cost of starting a multiply */
86 1, /* cost of multiply per each bit set */
87 40, /* cost of a divide/mod */
88 15, /* "large" insn */
89 3, /* MOVE_RATIO */
90 4, /* cost for loading QImode using movzbl */
91 {2, 4, 2}, /* cost of loading integer registers
92 in QImode, HImode and SImode.
93 Relative to reg-reg move (2). */
94 {2, 4, 2}, /* cost of storing integer registers */
95 2, /* cost of reg,reg fld/fst */
96 {8, 8, 8}, /* cost of loading fp registers
97 in SFmode, DFmode and XFmode */
98 {8, 8, 8} /* cost of loading integer registers */
99 };
100
101 struct processor_costs pentium_cost = {
102 1, /* cost of an add instruction */
103 1, /* cost of a lea instruction */
104 4, /* variable shift costs */
105 1, /* constant shift costs */
106 11, /* cost of starting a multiply */
107 0, /* cost of multiply per each bit set */
108 25, /* cost of a divide/mod */
109 8, /* "large" insn */
110 6, /* MOVE_RATIO */
111 6, /* cost for loading QImode using movzbl */
112 {2, 4, 2}, /* cost of loading integer registers
113 in QImode, HImode and SImode.
114 Relative to reg-reg move (2). */
115 {2, 4, 2}, /* cost of storing integer registers */
116 2, /* cost of reg,reg fld/fst */
117 {2, 2, 6}, /* cost of loading fp registers
118 in SFmode, DFmode and XFmode */
119 {4, 4, 6} /* cost of loading integer registers */
120 };
121
122 struct processor_costs pentiumpro_cost = {
123 1, /* cost of an add instruction */
124 1, /* cost of a lea instruction */
125 1, /* variable shift costs */
126 1, /* constant shift costs */
127 4, /* cost of starting a multiply */
128 0, /* cost of multiply per each bit set */
129 17, /* cost of a divide/mod */
130 8, /* "large" insn */
131 6, /* MOVE_RATIO */
132 2, /* cost for loading QImode using movzbl */
133 {4, 4, 4}, /* cost of loading integer registers
134 in QImode, HImode and SImode.
135 Relative to reg-reg move (2). */
136 {2, 2, 2}, /* cost of storing integer registers */
137 2, /* cost of reg,reg fld/fst */
138 {2, 2, 6}, /* cost of loading fp registers
139 in SFmode, DFmode and XFmode */
140 {4, 4, 6} /* cost of loading integer registers */
141 };
142
143 struct processor_costs k6_cost = {
144 1, /* cost of an add instruction */
145 2, /* cost of a lea instruction */
146 1, /* variable shift costs */
147 1, /* constant shift costs */
148 3, /* cost of starting a multiply */
149 0, /* cost of multiply per each bit set */
150 18, /* cost of a divide/mod */
151 8, /* "large" insn */
152 4, /* MOVE_RATIO */
153 3, /* cost for loading QImode using movzbl */
154 {4, 5, 4}, /* cost of loading integer registers
155 in QImode, HImode and SImode.
156 Relative to reg-reg move (2). */
157 {2, 3, 2}, /* cost of storing integer registers */
158 4, /* cost of reg,reg fld/fst */
159 {6, 6, 6}, /* cost of loading fp registers
160 in SFmode, DFmode and XFmode */
161 {4, 4, 4} /* cost of loading integer registers */
162 };
163
164 struct processor_costs athlon_cost = {
165 1, /* cost of an add instruction */
166 2, /* cost of a lea instruction */
167 1, /* variable shift costs */
168 1, /* constant shift costs */
169 5, /* cost of starting a multiply */
170 0, /* cost of multiply per each bit set */
171 42, /* cost of a divide/mod */
172 8, /* "large" insn */
173 9, /* MOVE_RATIO */
174 4, /* cost for loading QImode using movzbl */
175 {4, 5, 4}, /* cost of loading integer registers
176 in QImode, HImode and SImode.
177 Relative to reg-reg move (2). */
178 {2, 3, 2}, /* cost of storing integer registers */
179 4, /* cost of reg,reg fld/fst */
180 {6, 6, 20}, /* cost of loading fp registers
181 in SFmode, DFmode and XFmode */
182 {4, 4, 16} /* cost of loading integer registers */
183 };
184
185 struct processor_costs *ix86_cost = &pentium_cost;
186
187 /* Processor feature/optimization bitmasks. */
188 #define m_386 (1<<PROCESSOR_I386)
189 #define m_486 (1<<PROCESSOR_I486)
190 #define m_PENT (1<<PROCESSOR_PENTIUM)
191 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
192 #define m_K6 (1<<PROCESSOR_K6)
193 #define m_ATHLON (1<<PROCESSOR_ATHLON)
194
195 const int x86_use_leave = m_386 | m_K6 | m_ATHLON;
196 const int x86_push_memory = m_386 | m_K6 | m_ATHLON;
197 const int x86_zero_extend_with_and = m_486 | m_PENT;
198 const int x86_movx = m_ATHLON | m_PPRO /* m_386 | m_K6 */;
199 const int x86_double_with_add = ~m_386;
200 const int x86_use_bit_test = m_386;
201 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON | m_K6;
202 const int x86_use_q_reg = m_PENT | m_PPRO | m_K6;
203 const int x86_use_any_reg = m_486;
204 const int x86_cmove = m_PPRO | m_ATHLON;
205 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON;
206 const int x86_use_sahf = m_PPRO | m_K6 | m_ATHLON;
207 const int x86_partial_reg_stall = m_PPRO;
208 const int x86_use_loop = m_K6;
209 const int x86_use_fiop = ~(m_PPRO | m_ATHLON | m_PENT);
210 const int x86_use_mov0 = m_K6;
211 const int x86_use_cltd = ~(m_PENT | m_K6);
212 const int x86_read_modify_write = ~m_PENT;
213 const int x86_read_modify = ~(m_PENT | m_PPRO);
214 const int x86_split_long_moves = m_PPRO;
215 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486;
216 const int x86_single_stringop = m_386;
217 const int x86_qimode_math = ~(0);
218 const int x86_promote_qi_regs = 0;
219 const int x86_himode_math = ~(m_PPRO);
220 const int x86_promote_hi_regs = m_PPRO;
221 const int x86_sub_esp_4 = m_ATHLON | m_PPRO;
222 const int x86_sub_esp_8 = m_ATHLON | m_PPRO | m_386 | m_486;
223 const int x86_add_esp_4 = m_ATHLON | m_K6;
224 const int x86_add_esp_8 = m_ATHLON | m_PPRO | m_K6 | m_386 | m_486;
225 const int x86_integer_DFmode_moves = ~m_ATHLON;
226 const int x86_partial_reg_dependency = m_ATHLON;
227 const int x86_memory_mismatch_stall = m_ATHLON;
228
229 #define AT_BP(mode) (gen_rtx_MEM ((mode), hard_frame_pointer_rtx))
230
231 const char * const hi_reg_name[] = HI_REGISTER_NAMES;
232 const char * const qi_reg_name[] = QI_REGISTER_NAMES;
233 const char * const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
234
235 /* Array of the smallest class containing reg number REGNO, indexed by
236 REGNO. Used by REGNO_REG_CLASS in i386.h. */
237
238 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
239 {
240 /* ax, dx, cx, bx */
241 AREG, DREG, CREG, BREG,
242 /* si, di, bp, sp */
243 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
244 /* FP registers */
245 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
246 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
247 /* arg pointer */
248 NON_Q_REGS,
249 /* flags, fpsr, dirflag, frame */
250 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS
251 };
252
253 /* The "default" register map. */
254
255 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
256 {
257 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
258 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
259 -1, -1, -1, -1, /* arg, flags, fpsr, dir */
260 };
261
262 /* Define the register numbers to be used in Dwarf debugging information.
263 The SVR4 reference port C compiler uses the following register numbers
264 in its Dwarf output code:
265 0 for %eax (gcc regno = 0)
266 1 for %ecx (gcc regno = 2)
267 2 for %edx (gcc regno = 1)
268 3 for %ebx (gcc regno = 3)
269 4 for %esp (gcc regno = 7)
270 5 for %ebp (gcc regno = 6)
271 6 for %esi (gcc regno = 4)
272 7 for %edi (gcc regno = 5)
273 The following three DWARF register numbers are never generated by
274 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
275 believes these numbers have these meanings.
276 8 for %eip (no gcc equivalent)
277 9 for %eflags (gcc regno = 17)
278 10 for %trapno (no gcc equivalent)
279 It is not at all clear how we should number the FP stack registers
280 for the x86 architecture. If the version of SDB on x86/svr4 were
281 a bit less brain dead with respect to floating-point then we would
282 have a precedent to follow with respect to DWARF register numbers
283 for x86 FP registers, but the SDB on x86/svr4 is so completely
284 broken with respect to FP registers that it is hardly worth thinking
285 of it as something to strive for compatibility with.
286 The version of x86/svr4 SDB I have at the moment does (partially)
287 seem to believe that DWARF register number 11 is associated with
288 the x86 register %st(0), but that's about all. Higher DWARF
289 register numbers don't seem to be associated with anything in
290 particular, and even for DWARF regno 11, SDB only seems to under-
291 stand that it should say that a variable lives in %st(0) (when
292 asked via an `=' command) if we said it was in DWARF regno 11,
293 but SDB still prints garbage when asked for the value of the
294 variable in question (via a `/' command).
295 (Also note that the labels SDB prints for various FP stack regs
296 when doing an `x' command are all wrong.)
297 Note that these problems generally don't affect the native SVR4
298 C compiler because it doesn't allow the use of -O with -g and
299 because when it is *not* optimizing, it allocates a memory
300 location for each floating-point variable, and the memory
301 location is what gets described in the DWARF AT_location
302 attribute for the variable in question.
303 Regardless of the severe mental illness of the x86/svr4 SDB, we
304 do something sensible here and we use the following DWARF
305 register numbers. Note that these are all stack-top-relative
306 numbers.
307 11 for %st(0) (gcc regno = 8)
308 12 for %st(1) (gcc regno = 9)
309 13 for %st(2) (gcc regno = 10)
310 14 for %st(3) (gcc regno = 11)
311 15 for %st(4) (gcc regno = 12)
312 16 for %st(5) (gcc regno = 13)
313 17 for %st(6) (gcc regno = 14)
314 18 for %st(7) (gcc regno = 15)
315 */
316 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
317 {
318 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
319 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
320 -1, 9, -1, -1, /* arg, flags, fpsr, dir */
321 };
322
323
324
325 /* Test and compare insns in i386.md store the information needed to
326 generate branch and scc insns here. */
327
328 struct rtx_def *ix86_compare_op0 = NULL_RTX;
329 struct rtx_def *ix86_compare_op1 = NULL_RTX;
330
331 #define MAX_386_STACK_LOCALS 2
332
333 /* Define the structure for the machine field in struct function. */
334 struct machine_function
335 {
336 rtx stack_locals[(int) MAX_MACHINE_MODE][MAX_386_STACK_LOCALS];
337 };
338
339 #define ix86_stack_locals (cfun->machine->stack_locals)
340
341 /* which cpu are we scheduling for */
342 enum processor_type ix86_cpu;
343
344 /* which instruction set architecture to use. */
345 int ix86_arch;
346
347 /* Strings to hold which cpu and instruction set architecture to use. */
348 const char *ix86_cpu_string; /* for -mcpu=<xxx> */
349 const char *ix86_arch_string; /* for -march=<xxx> */
350
351 /* Register allocation order */
352 const char *ix86_reg_alloc_order;
353 static char regs_allocated[FIRST_PSEUDO_REGISTER];
354
355 /* # of registers to use to pass arguments. */
356 const char *ix86_regparm_string;
357
358 /* ix86_regparm_string as a number */
359 int ix86_regparm;
360
361 /* Alignment to use for loops and jumps: */
362
363 /* Power of two alignment for loops. */
364 const char *ix86_align_loops_string;
365
366 /* Power of two alignment for non-loop jumps. */
367 const char *ix86_align_jumps_string;
368
369 /* Power of two alignment for stack boundary in bytes. */
370 const char *ix86_preferred_stack_boundary_string;
371
372 /* Preferred alignment for stack boundary in bits. */
373 int ix86_preferred_stack_boundary;
374
375 /* Values 1-5: see jump.c */
376 int ix86_branch_cost;
377 const char *ix86_branch_cost_string;
378
379 /* Power of two alignment for functions. */
380 int ix86_align_funcs;
381 const char *ix86_align_funcs_string;
382
383 /* Power of two alignment for loops. */
384 int ix86_align_loops;
385
386 /* Power of two alignment for non-loop jumps. */
387 int ix86_align_jumps;
388 \f
389 static void output_pic_addr_const PARAMS ((FILE *, rtx, int));
390 static void put_condition_code PARAMS ((enum rtx_code, enum machine_mode,
391 int, int, FILE *));
392 static enum rtx_code unsigned_comparison PARAMS ((enum rtx_code code));
393 static rtx ix86_expand_int_compare PARAMS ((enum rtx_code, rtx, rtx));
394 static enum machine_mode ix86_fp_compare_mode PARAMS ((enum rtx_code));
395 static int ix86_use_fcomi_compare PARAMS ((enum rtx_code));
396 static enum rtx_code ix86_prepare_fp_compare_args PARAMS ((enum rtx_code,
397 rtx *, rtx *));
398 static rtx ix86_expand_compare PARAMS ((enum rtx_code));
399 static rtx gen_push PARAMS ((rtx));
400 static int memory_address_length PARAMS ((rtx addr));
401 static int ix86_flags_dependant PARAMS ((rtx, rtx, enum attr_type));
402 static int ix86_agi_dependant PARAMS ((rtx, rtx, enum attr_type));
403 static int ix86_safe_length PARAMS ((rtx));
404 static enum attr_memory ix86_safe_memory PARAMS ((rtx));
405 static enum attr_pent_pair ix86_safe_pent_pair PARAMS ((rtx));
406 static enum attr_ppro_uops ix86_safe_ppro_uops PARAMS ((rtx));
407 static void ix86_dump_ppro_packet PARAMS ((FILE *));
408 static void ix86_reorder_insn PARAMS ((rtx *, rtx *));
409 static rtx * ix86_pent_find_pair PARAMS ((rtx *, rtx *, enum attr_pent_pair,
410 rtx));
411 static void ix86_init_machine_status PARAMS ((struct function *));
412 static void ix86_mark_machine_status PARAMS ((struct function *));
413 static void ix86_split_to_parts PARAMS ((rtx, rtx *, enum machine_mode));
414 static int ix86_safe_length_prefix PARAMS ((rtx));
415 static HOST_WIDE_INT ix86_compute_frame_size PARAMS((HOST_WIDE_INT,
416 int *, int *, int *));
417 static int ix86_nsaved_regs PARAMS((void));
418 static void ix86_emit_save_regs PARAMS((void));
419 static void ix86_emit_restore_regs_using_mov PARAMS ((rtx, int));
420 static void ix86_emit_epilogue_esp_adjustment PARAMS((int));
421 static void ix86_sched_reorder_pentium PARAMS((rtx *, rtx *));
422 static void ix86_sched_reorder_ppro PARAMS((rtx *, rtx *));
423
424 struct ix86_address
425 {
426 rtx base, index, disp;
427 HOST_WIDE_INT scale;
428 };
429
430 static int ix86_decompose_address PARAMS ((rtx, struct ix86_address *));
431 \f
432 /* Sometimes certain combinations of command options do not make
433 sense on a particular target machine. You can define a macro
434 `OVERRIDE_OPTIONS' to take account of this. This macro, if
435 defined, is executed once just after all the command options have
436 been parsed.
437
438 Don't use this macro to turn on various extra optimizations for
439 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
440
441 void
442 override_options ()
443 {
444 /* Comes from final.c -- no real reason to change it. */
445 #define MAX_CODE_ALIGN 16
446
447 static struct ptt
448 {
449 struct processor_costs *cost; /* Processor costs */
450 int target_enable; /* Target flags to enable. */
451 int target_disable; /* Target flags to disable. */
452 int align_loop; /* Default alignments. */
453 int align_jump;
454 int align_func;
455 int branch_cost;
456 }
457 const processor_target_table[PROCESSOR_max] =
458 {
459 {&i386_cost, 0, 0, 2, 2, 2, 1},
460 {&i486_cost, 0, 0, 4, 4, 4, 1},
461 {&pentium_cost, 0, 0, -4, -4, -4, 1},
462 {&pentiumpro_cost, 0, 0, 4, -4, 4, 1},
463 {&k6_cost, 0, 0, -5, -5, 4, 1},
464 {&athlon_cost, 0, 0, 4, -4, 4, 1}
465 };
466
467 static struct pta
468 {
469 const char *name; /* processor name or nickname. */
470 enum processor_type processor;
471 }
472 const processor_alias_table[] =
473 {
474 {"i386", PROCESSOR_I386},
475 {"i486", PROCESSOR_I486},
476 {"i586", PROCESSOR_PENTIUM},
477 {"pentium", PROCESSOR_PENTIUM},
478 {"i686", PROCESSOR_PENTIUMPRO},
479 {"pentiumpro", PROCESSOR_PENTIUMPRO},
480 {"k6", PROCESSOR_K6},
481 {"athlon", PROCESSOR_ATHLON},
482 };
483
484 int const pta_size = sizeof(processor_alias_table)/sizeof(struct pta);
485
486 #ifdef SUBTARGET_OVERRIDE_OPTIONS
487 SUBTARGET_OVERRIDE_OPTIONS;
488 #endif
489
490 ix86_arch = PROCESSOR_I386;
491 ix86_cpu = (enum processor_type) TARGET_CPU_DEFAULT;
492
493 if (ix86_arch_string != 0)
494 {
495 int i;
496 for (i = 0; i < pta_size; i++)
497 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
498 {
499 ix86_arch = processor_alias_table[i].processor;
500 /* Default cpu tuning to the architecture. */
501 ix86_cpu = ix86_arch;
502 break;
503 }
504 if (i == pta_size)
505 error ("bad value (%s) for -march= switch", ix86_arch_string);
506 }
507
508 if (ix86_cpu_string != 0)
509 {
510 int i;
511 for (i = 0; i < pta_size; i++)
512 if (! strcmp (ix86_cpu_string, processor_alias_table[i].name))
513 {
514 ix86_cpu = processor_alias_table[i].processor;
515 break;
516 }
517 if (i == pta_size)
518 error ("bad value (%s) for -mcpu= switch", ix86_cpu_string);
519 }
520
521 ix86_cost = processor_target_table[ix86_cpu].cost;
522 target_flags |= processor_target_table[ix86_cpu].target_enable;
523 target_flags &= ~processor_target_table[ix86_cpu].target_disable;
524
525 /* Arrange to set up i386_stack_locals for all functions. */
526 init_machine_status = ix86_init_machine_status;
527 mark_machine_status = ix86_mark_machine_status;
528
529 /* Validate registers in register allocation order. */
530 if (ix86_reg_alloc_order)
531 {
532 int i, ch;
533 for (i = 0; (ch = ix86_reg_alloc_order[i]) != '\0'; i++)
534 {
535 int regno = 0;
536
537 switch (ch)
538 {
539 case 'a': regno = 0; break;
540 case 'd': regno = 1; break;
541 case 'c': regno = 2; break;
542 case 'b': regno = 3; break;
543 case 'S': regno = 4; break;
544 case 'D': regno = 5; break;
545 case 'B': regno = 6; break;
546
547 default: fatal ("Register '%c' is unknown", ch);
548 }
549
550 if (regs_allocated[regno])
551 fatal ("Register '%c' already specified in allocation order", ch);
552
553 regs_allocated[regno] = 1;
554 }
555 }
556
557 /* Validate -mregparm= value. */
558 if (ix86_regparm_string)
559 {
560 ix86_regparm = atoi (ix86_regparm_string);
561 if (ix86_regparm < 0 || ix86_regparm > REGPARM_MAX)
562 fatal ("-mregparm=%d is not between 0 and %d",
563 ix86_regparm, REGPARM_MAX);
564 }
565
566 /* Validate -malign-loops= value, or provide default. */
567 ix86_align_loops = processor_target_table[ix86_cpu].align_loop;
568 if (ix86_align_loops_string)
569 {
570 ix86_align_loops = atoi (ix86_align_loops_string);
571 if (ix86_align_loops < 0 || ix86_align_loops > MAX_CODE_ALIGN)
572 fatal ("-malign-loops=%d is not between 0 and %d",
573 ix86_align_loops, MAX_CODE_ALIGN);
574 }
575
576 /* Validate -malign-jumps= value, or provide default. */
577 ix86_align_jumps = processor_target_table[ix86_cpu].align_jump;
578 if (ix86_align_jumps_string)
579 {
580 ix86_align_jumps = atoi (ix86_align_jumps_string);
581 if (ix86_align_jumps < 0 || ix86_align_jumps > MAX_CODE_ALIGN)
582 fatal ("-malign-jumps=%d is not between 0 and %d",
583 ix86_align_jumps, MAX_CODE_ALIGN);
584 }
585
586 /* Validate -malign-functions= value, or provide default. */
587 ix86_align_funcs = processor_target_table[ix86_cpu].align_func;
588 if (ix86_align_funcs_string)
589 {
590 ix86_align_funcs = atoi (ix86_align_funcs_string);
591 if (ix86_align_funcs < 0 || ix86_align_funcs > MAX_CODE_ALIGN)
592 fatal ("-malign-functions=%d is not between 0 and %d",
593 ix86_align_funcs, MAX_CODE_ALIGN);
594 }
595
596 /* Validate -mpreferred-stack-boundary= value, or provide default.
597 The default of 128 bits is for Pentium III's SSE __m128. */
598 ix86_preferred_stack_boundary = 128;
599 if (ix86_preferred_stack_boundary_string)
600 {
601 int i = atoi (ix86_preferred_stack_boundary_string);
602 if (i < 2 || i > 31)
603 fatal ("-mpreferred-stack-boundary=%d is not between 2 and 31", i);
604 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
605 }
606
607 /* Validate -mbranch-cost= value, or provide default. */
608 ix86_branch_cost = processor_target_table[ix86_cpu].branch_cost;
609 if (ix86_branch_cost_string)
610 {
611 ix86_branch_cost = atoi (ix86_branch_cost_string);
612 if (ix86_branch_cost < 0 || ix86_branch_cost > 5)
613 fatal ("-mbranch-cost=%d is not between 0 and 5",
614 ix86_branch_cost);
615 }
616
617 /* Keep nonleaf frame pointers. */
618 if (TARGET_OMIT_LEAF_FRAME_POINTER)
619 flag_omit_frame_pointer = 1;
620
621 /* If we're doing fast math, we don't care about comparison order
622 wrt NaNs. This lets us use a shorter comparison sequence. */
623 if (flag_fast_math)
624 target_flags &= ~MASK_IEEE_FP;
625
626 /* If we're planning on using `loop', use it. */
627 if (TARGET_USE_LOOP && optimize)
628 flag_branch_on_count_reg = 1;
629 }
630 \f
631 /* A C statement (sans semicolon) to choose the order in which to
632 allocate hard registers for pseudo-registers local to a basic
633 block.
634
635 Store the desired register order in the array `reg_alloc_order'.
636 Element 0 should be the register to allocate first; element 1, the
637 next register; and so on.
638
639 The macro body should not assume anything about the contents of
640 `reg_alloc_order' before execution of the macro.
641
642 On most machines, it is not necessary to define this macro. */
643
644 void
645 order_regs_for_local_alloc ()
646 {
647 int i, ch, order;
648
649 /* User specified the register allocation order. */
650
651 if (ix86_reg_alloc_order)
652 {
653 for (i = order = 0; (ch = ix86_reg_alloc_order[i]) != '\0'; i++)
654 {
655 int regno = 0;
656
657 switch (ch)
658 {
659 case 'a': regno = 0; break;
660 case 'd': regno = 1; break;
661 case 'c': regno = 2; break;
662 case 'b': regno = 3; break;
663 case 'S': regno = 4; break;
664 case 'D': regno = 5; break;
665 case 'B': regno = 6; break;
666 }
667
668 reg_alloc_order[order++] = regno;
669 }
670
671 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
672 {
673 if (! regs_allocated[i])
674 reg_alloc_order[order++] = i;
675 }
676 }
677
678 /* If user did not specify a register allocation order, use natural order. */
679 else
680 {
681 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
682 reg_alloc_order[i] = i;
683 }
684 }
685 \f
686 void
687 optimization_options (level, size)
688 int level;
689 int size ATTRIBUTE_UNUSED;
690 {
691 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
692 make the problem with not enough registers even worse. */
693 #ifdef INSN_SCHEDULING
694 if (level > 1)
695 flag_schedule_insns = 0;
696 #endif
697 }
698 \f
699 /* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
700 attribute for DECL. The attributes in ATTRIBUTES have previously been
701 assigned to DECL. */
702
703 int
704 ix86_valid_decl_attribute_p (decl, attributes, identifier, args)
705 tree decl ATTRIBUTE_UNUSED;
706 tree attributes ATTRIBUTE_UNUSED;
707 tree identifier ATTRIBUTE_UNUSED;
708 tree args ATTRIBUTE_UNUSED;
709 {
710 return 0;
711 }
712
713 /* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
714 attribute for TYPE. The attributes in ATTRIBUTES have previously been
715 assigned to TYPE. */
716
717 int
718 ix86_valid_type_attribute_p (type, attributes, identifier, args)
719 tree type;
720 tree attributes ATTRIBUTE_UNUSED;
721 tree identifier;
722 tree args;
723 {
724 if (TREE_CODE (type) != FUNCTION_TYPE
725 && TREE_CODE (type) != METHOD_TYPE
726 && TREE_CODE (type) != FIELD_DECL
727 && TREE_CODE (type) != TYPE_DECL)
728 return 0;
729
730 /* Stdcall attribute says callee is responsible for popping arguments
731 if they are not variable. */
732 if (is_attribute_p ("stdcall", identifier))
733 return (args == NULL_TREE);
734
735 /* Cdecl attribute says the callee is a normal C declaration. */
736 if (is_attribute_p ("cdecl", identifier))
737 return (args == NULL_TREE);
738
739 /* Regparm attribute specifies how many integer arguments are to be
740 passed in registers. */
741 if (is_attribute_p ("regparm", identifier))
742 {
743 tree cst;
744
745 if (! args || TREE_CODE (args) != TREE_LIST
746 || TREE_CHAIN (args) != NULL_TREE
747 || TREE_VALUE (args) == NULL_TREE)
748 return 0;
749
750 cst = TREE_VALUE (args);
751 if (TREE_CODE (cst) != INTEGER_CST)
752 return 0;
753
754 if (compare_tree_int (cst, REGPARM_MAX) > 0)
755 return 0;
756
757 return 1;
758 }
759
760 return 0;
761 }
762
763 /* Return 0 if the attributes for two types are incompatible, 1 if they
764 are compatible, and 2 if they are nearly compatible (which causes a
765 warning to be generated). */
766
767 int
768 ix86_comp_type_attributes (type1, type2)
769 tree type1;
770 tree type2;
771 {
772 /* Check for mismatch of non-default calling convention. */
773 const char *rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
774
775 if (TREE_CODE (type1) != FUNCTION_TYPE)
776 return 1;
777
778 /* Check for mismatched return types (cdecl vs stdcall). */
779 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
780 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
781 return 0;
782 return 1;
783 }
784 \f
785 /* Value is the number of bytes of arguments automatically
786 popped when returning from a subroutine call.
787 FUNDECL is the declaration node of the function (as a tree),
788 FUNTYPE is the data type of the function (as a tree),
789 or for a library call it is an identifier node for the subroutine name.
790 SIZE is the number of bytes of arguments passed on the stack.
791
792 On the 80386, the RTD insn may be used to pop them if the number
793 of args is fixed, but if the number is variable then the caller
794 must pop them all. RTD can't be used for library calls now
795 because the library is compiled with the Unix compiler.
796 Use of RTD is a selectable option, since it is incompatible with
797 standard Unix calling sequences. If the option is not selected,
798 the caller must always pop the args.
799
800 The attribute stdcall is equivalent to RTD on a per module basis. */
801
802 int
803 ix86_return_pops_args (fundecl, funtype, size)
804 tree fundecl;
805 tree funtype;
806 int size;
807 {
808 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
809
810 /* Cdecl functions override -mrtd, and never pop the stack. */
811 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
812
813 /* Stdcall functions will pop the stack if not variable args. */
814 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype)))
815 rtd = 1;
816
817 if (rtd
818 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
819 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
820 == void_type_node)))
821 return size;
822 }
823
824 /* Lose any fake structure return argument. */
825 if (aggregate_value_p (TREE_TYPE (funtype)))
826 return GET_MODE_SIZE (Pmode);
827
828 return 0;
829 }
830 \f
831 /* Argument support functions. */
832
833 /* Initialize a variable CUM of type CUMULATIVE_ARGS
834 for a call to a function whose data type is FNTYPE.
835 For a library call, FNTYPE is 0. */
836
837 void
838 init_cumulative_args (cum, fntype, libname)
839 CUMULATIVE_ARGS *cum; /* Argument info to initialize */
840 tree fntype; /* tree ptr for function decl */
841 rtx libname; /* SYMBOL_REF of library name or 0 */
842 {
843 static CUMULATIVE_ARGS zero_cum;
844 tree param, next_param;
845
846 if (TARGET_DEBUG_ARG)
847 {
848 fprintf (stderr, "\ninit_cumulative_args (");
849 if (fntype)
850 fprintf (stderr, "fntype code = %s, ret code = %s",
851 tree_code_name[(int) TREE_CODE (fntype)],
852 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
853 else
854 fprintf (stderr, "no fntype");
855
856 if (libname)
857 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
858 }
859
860 *cum = zero_cum;
861
862 /* Set up the number of registers to use for passing arguments. */
863 cum->nregs = ix86_regparm;
864 if (fntype)
865 {
866 tree attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (fntype));
867
868 if (attr)
869 cum->nregs = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
870 }
871
872 /* Determine if this function has variable arguments. This is
873 indicated by the last argument being 'void_type_mode' if there
874 are no variable arguments. If there are variable arguments, then
875 we won't pass anything in registers */
876
877 if (cum->nregs)
878 {
879 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
880 param != 0; param = next_param)
881 {
882 next_param = TREE_CHAIN (param);
883 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
884 cum->nregs = 0;
885 }
886 }
887
888 if (TARGET_DEBUG_ARG)
889 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
890
891 return;
892 }
893
894 /* Update the data in CUM to advance over an argument
895 of mode MODE and data type TYPE.
896 (TYPE is null for libcalls where that information may not be available.) */
897
898 void
899 function_arg_advance (cum, mode, type, named)
900 CUMULATIVE_ARGS *cum; /* current arg information */
901 enum machine_mode mode; /* current arg mode */
902 tree type; /* type of the argument or 0 if lib support */
903 int named; /* whether or not the argument was named */
904 {
905 int bytes
906 = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
907 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
908
909 if (TARGET_DEBUG_ARG)
910 fprintf (stderr,
911 "function_adv (sz=%d, wds=%2d, nregs=%d, mode=%s, named=%d)\n\n",
912 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
913
914 cum->words += words;
915 cum->nregs -= words;
916 cum->regno += words;
917
918 if (cum->nregs <= 0)
919 {
920 cum->nregs = 0;
921 cum->regno = 0;
922 }
923
924 return;
925 }
926
927 /* Define where to put the arguments to a function.
928 Value is zero to push the argument on the stack,
929 or a hard register in which to store the argument.
930
931 MODE is the argument's machine mode.
932 TYPE is the data type of the argument (as a tree).
933 This is null for libcalls where that information may
934 not be available.
935 CUM is a variable of type CUMULATIVE_ARGS which gives info about
936 the preceding args and about the function being called.
937 NAMED is nonzero if this argument is a named parameter
938 (otherwise it is an extra parameter matching an ellipsis). */
939
940 struct rtx_def *
941 function_arg (cum, mode, type, named)
942 CUMULATIVE_ARGS *cum; /* current arg information */
943 enum machine_mode mode; /* current arg mode */
944 tree type; /* type of the argument or 0 if lib support */
945 int named; /* != 0 for normal args, == 0 for ... args */
946 {
947 rtx ret = NULL_RTX;
948 int bytes
949 = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
950 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
951
952 switch (mode)
953 {
954 /* For now, pass fp/complex values on the stack. */
955 default:
956 break;
957
958 case BLKmode:
959 case DImode:
960 case SImode:
961 case HImode:
962 case QImode:
963 if (words <= cum->nregs)
964 ret = gen_rtx_REG (mode, cum->regno);
965 break;
966 }
967
968 if (TARGET_DEBUG_ARG)
969 {
970 fprintf (stderr,
971 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d",
972 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
973
974 if (ret)
975 fprintf (stderr, ", reg=%%e%s", reg_names[ REGNO(ret) ]);
976 else
977 fprintf (stderr, ", stack");
978
979 fprintf (stderr, " )\n");
980 }
981
982 return ret;
983 }
984 \f
985 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
986 reference and a constant. */
987
988 int
989 symbolic_operand (op, mode)
990 register rtx op;
991 enum machine_mode mode ATTRIBUTE_UNUSED;
992 {
993 switch (GET_CODE (op))
994 {
995 case SYMBOL_REF:
996 case LABEL_REF:
997 return 1;
998
999 case CONST:
1000 op = XEXP (op, 0);
1001 if (GET_CODE (op) == SYMBOL_REF
1002 || GET_CODE (op) == LABEL_REF
1003 || (GET_CODE (op) == UNSPEC
1004 && XINT (op, 1) >= 6
1005 && XINT (op, 1) <= 7))
1006 return 1;
1007 if (GET_CODE (op) != PLUS
1008 || GET_CODE (XEXP (op, 1)) != CONST_INT)
1009 return 0;
1010
1011 op = XEXP (op, 0);
1012 if (GET_CODE (op) == SYMBOL_REF
1013 || GET_CODE (op) == LABEL_REF)
1014 return 1;
1015 /* Only @GOTOFF gets offsets. */
1016 if (GET_CODE (op) != UNSPEC
1017 || XINT (op, 1) != 7)
1018 return 0;
1019
1020 op = XVECEXP (op, 0, 0);
1021 if (GET_CODE (op) == SYMBOL_REF
1022 || GET_CODE (op) == LABEL_REF)
1023 return 1;
1024 return 0;
1025
1026 default:
1027 return 0;
1028 }
1029 }
1030
1031 /* Return true if the operand contains a @GOT or @GOTOFF reference. */
1032
1033 int
1034 pic_symbolic_operand (op, mode)
1035 register rtx op;
1036 enum machine_mode mode ATTRIBUTE_UNUSED;
1037 {
1038 if (GET_CODE (op) == CONST)
1039 {
1040 op = XEXP (op, 0);
1041 if (GET_CODE (op) == UNSPEC)
1042 return 1;
1043 if (GET_CODE (op) != PLUS
1044 || GET_CODE (XEXP (op, 1)) != CONST_INT)
1045 return 0;
1046 op = XEXP (op, 0);
1047 if (GET_CODE (op) == UNSPEC)
1048 return 1;
1049 }
1050 return 0;
1051 }
1052
1053 /* Test for a valid operand for a call instruction. Don't allow the
1054 arg pointer register or virtual regs since they may decay into
1055 reg + const, which the patterns can't handle. */
1056
1057 int
1058 call_insn_operand (op, mode)
1059 rtx op;
1060 enum machine_mode mode ATTRIBUTE_UNUSED;
1061 {
1062 if (GET_CODE (op) != MEM)
1063 return 0;
1064 op = XEXP (op, 0);
1065
1066 /* Disallow indirect through a virtual register. This leads to
1067 compiler aborts when trying to eliminate them. */
1068 if (GET_CODE (op) == REG
1069 && (op == arg_pointer_rtx
1070 || op == frame_pointer_rtx
1071 || (REGNO (op) >= FIRST_PSEUDO_REGISTER
1072 && REGNO (op) <= LAST_VIRTUAL_REGISTER)))
1073 return 0;
1074
1075 /* Disallow `call 1234'. Due to varying assembler lameness this
1076 gets either rejected or translated to `call .+1234'. */
1077 if (GET_CODE (op) == CONST_INT)
1078 return 0;
1079
1080 /* Explicitly allow SYMBOL_REF even if pic. */
1081 if (GET_CODE (op) == SYMBOL_REF)
1082 return 1;
1083
1084 /* Half-pic doesn't allow anything but registers and constants.
1085 We've just taken care of the later. */
1086 if (HALF_PIC_P ())
1087 return register_operand (op, Pmode);
1088
1089 /* Otherwise we can allow any general_operand in the address. */
1090 return general_operand (op, Pmode);
1091 }
1092
1093 int
1094 constant_call_address_operand (op, mode)
1095 rtx op;
1096 enum machine_mode mode ATTRIBUTE_UNUSED;
1097 {
1098 return (GET_CODE (op) == MEM
1099 && CONSTANT_ADDRESS_P (XEXP (op, 0))
1100 && GET_CODE (XEXP (op, 0)) != CONST_INT);
1101 }
1102
1103 /* Match exactly zero and one. */
1104
1105 int
1106 const0_operand (op, mode)
1107 register rtx op;
1108 enum machine_mode mode;
1109 {
1110 return op == CONST0_RTX (mode);
1111 }
1112
1113 int
1114 const1_operand (op, mode)
1115 register rtx op;
1116 enum machine_mode mode ATTRIBUTE_UNUSED;
1117 {
1118 return op == const1_rtx;
1119 }
1120
1121 /* Match 2, 4, or 8. Used for leal multiplicands. */
1122
1123 int
1124 const248_operand (op, mode)
1125 register rtx op;
1126 enum machine_mode mode ATTRIBUTE_UNUSED;
1127 {
1128 return (GET_CODE (op) == CONST_INT
1129 && (INTVAL (op) == 2 || INTVAL (op) == 4 || INTVAL (op) == 8));
1130 }
1131
1132 /* True if this is a constant appropriate for an increment or decremenmt. */
1133
1134 int
1135 incdec_operand (op, mode)
1136 register rtx op;
1137 enum machine_mode mode;
1138 {
1139 if (op == const1_rtx || op == constm1_rtx)
1140 return 1;
1141 if (GET_CODE (op) != CONST_INT)
1142 return 0;
1143 if (mode == SImode && INTVAL (op) == (HOST_WIDE_INT) 0xffffffff)
1144 return 1;
1145 if (mode == HImode && INTVAL (op) == (HOST_WIDE_INT) 0xffff)
1146 return 1;
1147 if (mode == QImode && INTVAL (op) == (HOST_WIDE_INT) 0xff)
1148 return 1;
1149 return 0;
1150 }
1151
1152 /* Return false if this is the stack pointer, or any other fake
1153 register eliminable to the stack pointer. Otherwise, this is
1154 a register operand.
1155
1156 This is used to prevent esp from being used as an index reg.
1157 Which would only happen in pathological cases. */
1158
1159 int
1160 reg_no_sp_operand (op, mode)
1161 register rtx op;
1162 enum machine_mode mode;
1163 {
1164 rtx t = op;
1165 if (GET_CODE (t) == SUBREG)
1166 t = SUBREG_REG (t);
1167 if (t == stack_pointer_rtx || t == arg_pointer_rtx || t == frame_pointer_rtx)
1168 return 0;
1169
1170 return register_operand (op, mode);
1171 }
1172
1173 /* Return false if this is any eliminable register. Otherwise
1174 general_operand. */
1175
1176 int
1177 general_no_elim_operand (op, mode)
1178 register rtx op;
1179 enum machine_mode mode;
1180 {
1181 rtx t = op;
1182 if (GET_CODE (t) == SUBREG)
1183 t = SUBREG_REG (t);
1184 if (t == arg_pointer_rtx || t == frame_pointer_rtx
1185 || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx
1186 || t == virtual_stack_dynamic_rtx)
1187 return 0;
1188
1189 return general_operand (op, mode);
1190 }
1191
1192 /* Return false if this is any eliminable register. Otherwise
1193 register_operand or const_int. */
1194
1195 int
1196 nonmemory_no_elim_operand (op, mode)
1197 register rtx op;
1198 enum machine_mode mode;
1199 {
1200 rtx t = op;
1201 if (GET_CODE (t) == SUBREG)
1202 t = SUBREG_REG (t);
1203 if (t == arg_pointer_rtx || t == frame_pointer_rtx
1204 || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx
1205 || t == virtual_stack_dynamic_rtx)
1206 return 0;
1207
1208 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
1209 }
1210
1211 /* Return true if op is a Q_REGS class register. */
1212
1213 int
1214 q_regs_operand (op, mode)
1215 register rtx op;
1216 enum machine_mode mode;
1217 {
1218 if (mode != VOIDmode && GET_MODE (op) != mode)
1219 return 0;
1220 if (GET_CODE (op) == SUBREG)
1221 op = SUBREG_REG (op);
1222 return QI_REG_P (op);
1223 }
1224
1225 /* Return true if op is a NON_Q_REGS class register. */
1226
1227 int
1228 non_q_regs_operand (op, mode)
1229 register rtx op;
1230 enum machine_mode mode;
1231 {
1232 if (mode != VOIDmode && GET_MODE (op) != mode)
1233 return 0;
1234 if (GET_CODE (op) == SUBREG)
1235 op = SUBREG_REG (op);
1236 return NON_QI_REG_P (op);
1237 }
1238
1239 /* Return 1 if OP is a comparison operator that can use the condition code
1240 generated by a logical operation, which characteristicly does not set
1241 overflow or carry. To be used with CCNOmode. */
1242
1243 int
1244 no_comparison_operator (op, mode)
1245 register rtx op;
1246 enum machine_mode mode;
1247 {
1248 if (mode != VOIDmode && GET_MODE (op) != mode)
1249 return 0;
1250
1251 switch (GET_CODE (op))
1252 {
1253 case EQ: case NE:
1254 case LT: case GE:
1255 case LEU: case LTU: case GEU: case GTU:
1256 return 1;
1257
1258 default:
1259 return 0;
1260 }
1261 }
1262
1263 /* Return 1 if OP is a comparison operator that can be issued by fcmov. */
1264
1265 int
1266 fcmov_comparison_operator (op, mode)
1267 register rtx op;
1268 enum machine_mode mode;
1269 {
1270 if (mode != VOIDmode && GET_MODE (op) != mode)
1271 return 0;
1272
1273 switch (GET_CODE (op))
1274 {
1275 case EQ: case NE:
1276 case LEU: case LTU: case GEU: case GTU:
1277 case UNORDERED: case ORDERED:
1278 return 1;
1279
1280 default:
1281 return 0;
1282 }
1283 }
1284
1285 /* Return 1 if OP is any normal comparison operator plus {UN}ORDERED. */
1286
1287 int
1288 uno_comparison_operator (op, mode)
1289 register rtx op;
1290 enum machine_mode mode;
1291 {
1292 if (mode != VOIDmode && GET_MODE (op) != mode)
1293 return 0;
1294
1295 switch (GET_CODE (op))
1296 {
1297 case EQ: case NE:
1298 case LE: case LT: case GE: case GT:
1299 case LEU: case LTU: case GEU: case GTU:
1300 case UNORDERED: case ORDERED:
1301 return 1;
1302
1303 default:
1304 return 0;
1305 }
1306 }
1307
1308 /* Return 1 if OP is a binary operator that can be promoted to wider mode. */
1309
1310 int
1311 promotable_binary_operator (op, mode)
1312 register rtx op;
1313 enum machine_mode mode ATTRIBUTE_UNUSED;
1314 {
1315 switch (GET_CODE (op))
1316 {
1317 case MULT:
1318 /* Modern CPUs have same latency for HImode and SImode multiply,
1319 but 386 and 486 do HImode multiply faster. */
1320 return ix86_cpu > PROCESSOR_I486;
1321 case PLUS:
1322 case AND:
1323 case IOR:
1324 case XOR:
1325 case ASHIFT:
1326 return 1;
1327 default:
1328 return 0;
1329 }
1330 }
1331
1332 /* Nearly general operand, but accept any const_double, since we wish
1333 to be able to drop them into memory rather than have them get pulled
1334 into registers. */
1335
1336 int
1337 cmp_fp_expander_operand (op, mode)
1338 register rtx op;
1339 enum machine_mode mode;
1340 {
1341 if (mode != VOIDmode && mode != GET_MODE (op))
1342 return 0;
1343 if (GET_CODE (op) == CONST_DOUBLE)
1344 return 1;
1345 return general_operand (op, mode);
1346 }
1347
1348 /* Match an SI or HImode register for a zero_extract. */
1349
1350 int
1351 ext_register_operand (op, mode)
1352 register rtx op;
1353 enum machine_mode mode ATTRIBUTE_UNUSED;
1354 {
1355 if (GET_MODE (op) != SImode && GET_MODE (op) != HImode)
1356 return 0;
1357 return register_operand (op, VOIDmode);
1358 }
1359
1360 /* Return 1 if this is a valid binary floating-point operation.
1361 OP is the expression matched, and MODE is its mode. */
1362
1363 int
1364 binary_fp_operator (op, mode)
1365 register rtx op;
1366 enum machine_mode mode;
1367 {
1368 if (mode != VOIDmode && mode != GET_MODE (op))
1369 return 0;
1370
1371 switch (GET_CODE (op))
1372 {
1373 case PLUS:
1374 case MINUS:
1375 case MULT:
1376 case DIV:
1377 return GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT;
1378
1379 default:
1380 return 0;
1381 }
1382 }
1383
1384 int
1385 mult_operator(op, mode)
1386 register rtx op;
1387 enum machine_mode mode ATTRIBUTE_UNUSED;
1388 {
1389 return GET_CODE (op) == MULT;
1390 }
1391
1392 int
1393 div_operator(op, mode)
1394 register rtx op;
1395 enum machine_mode mode ATTRIBUTE_UNUSED;
1396 {
1397 return GET_CODE (op) == DIV;
1398 }
1399
1400 int
1401 arith_or_logical_operator (op, mode)
1402 rtx op;
1403 enum machine_mode mode;
1404 {
1405 return ((mode == VOIDmode || GET_MODE (op) == mode)
1406 && (GET_RTX_CLASS (GET_CODE (op)) == 'c'
1407 || GET_RTX_CLASS (GET_CODE (op)) == '2'));
1408 }
1409
1410 /* Returns 1 if OP is memory operand with a displacement. */
1411
1412 int
1413 memory_displacement_operand (op, mode)
1414 register rtx op;
1415 enum machine_mode mode;
1416 {
1417 struct ix86_address parts;
1418
1419 if (! memory_operand (op, mode))
1420 return 0;
1421
1422 if (! ix86_decompose_address (XEXP (op, 0), &parts))
1423 abort ();
1424
1425 return parts.disp != NULL_RTX;
1426 }
1427
1428 /* To avoid problems when jump re-emits comparisons like testqi_ext_ccno_0,
1429 re-recognize the operand to avoid a copy_to_mode_reg that will fail.
1430
1431 ??? It seems likely that this will only work because cmpsi is an
1432 expander, and no actual insns use this. */
1433
1434 int
1435 cmpsi_operand (op, mode)
1436 rtx op;
1437 enum machine_mode mode;
1438 {
1439 if (general_operand (op, mode))
1440 return 1;
1441
1442 if (GET_CODE (op) == AND
1443 && GET_MODE (op) == SImode
1444 && GET_CODE (XEXP (op, 0)) == ZERO_EXTRACT
1445 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
1446 && GET_CODE (XEXP (XEXP (op, 0), 2)) == CONST_INT
1447 && INTVAL (XEXP (XEXP (op, 0), 1)) == 8
1448 && INTVAL (XEXP (XEXP (op, 0), 2)) == 8
1449 && GET_CODE (XEXP (op, 1)) == CONST_INT)
1450 return 1;
1451
1452 return 0;
1453 }
1454
1455 /* Returns 1 if OP is memory operand that can not be represented by the
1456 modRM array. */
1457
1458 int
1459 long_memory_operand (op, mode)
1460 register rtx op;
1461 enum machine_mode mode;
1462 {
1463 if (! memory_operand (op, mode))
1464 return 0;
1465
1466 return memory_address_length (op) != 0;
1467 }
1468
1469 /* Return nonzero if the rtx is known aligned. */
1470
1471 int
1472 aligned_operand (op, mode)
1473 rtx op;
1474 enum machine_mode mode;
1475 {
1476 struct ix86_address parts;
1477
1478 if (!general_operand (op, mode))
1479 return 0;
1480
1481 /* Registers and immediate operands are always "aligned". */
1482 if (GET_CODE (op) != MEM)
1483 return 1;
1484
1485 /* Don't even try to do any aligned optimizations with volatiles. */
1486 if (MEM_VOLATILE_P (op))
1487 return 0;
1488
1489 op = XEXP (op, 0);
1490
1491 /* Pushes and pops are only valid on the stack pointer. */
1492 if (GET_CODE (op) == PRE_DEC
1493 || GET_CODE (op) == POST_INC)
1494 return 1;
1495
1496 /* Decode the address. */
1497 if (! ix86_decompose_address (op, &parts))
1498 abort ();
1499
1500 /* Look for some component that isn't known to be aligned. */
1501 if (parts.index)
1502 {
1503 if (parts.scale < 4
1504 && REGNO_POINTER_ALIGN (REGNO (parts.index)) < 32)
1505 return 0;
1506 }
1507 if (parts.base)
1508 {
1509 if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
1510 return 0;
1511 }
1512 if (parts.disp)
1513 {
1514 if (GET_CODE (parts.disp) != CONST_INT
1515 || (INTVAL (parts.disp) & 3) != 0)
1516 return 0;
1517 }
1518
1519 /* Didn't find one -- this must be an aligned address. */
1520 return 1;
1521 }
1522 \f
1523 /* Return true if the constant is something that can be loaded with
1524 a special instruction. Only handle 0.0 and 1.0; others are less
1525 worthwhile. */
1526
1527 int
1528 standard_80387_constant_p (x)
1529 rtx x;
1530 {
1531 if (GET_CODE (x) != CONST_DOUBLE)
1532 return -1;
1533
1534 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1535 {
1536 REAL_VALUE_TYPE d;
1537 jmp_buf handler;
1538 int is0, is1;
1539
1540 if (setjmp (handler))
1541 return 0;
1542
1543 set_float_handler (handler);
1544 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
1545 is0 = REAL_VALUES_EQUAL (d, dconst0) && !REAL_VALUE_MINUS_ZERO (d);
1546 is1 = REAL_VALUES_EQUAL (d, dconst1);
1547 set_float_handler (NULL_PTR);
1548
1549 if (is0)
1550 return 1;
1551
1552 if (is1)
1553 return 2;
1554
1555 /* Note that on the 80387, other constants, such as pi,
1556 are much slower to load as standard constants
1557 than to load from doubles in memory! */
1558 /* ??? Not true on K6: all constants are equal cost. */
1559 }
1560 #endif
1561
1562 return 0;
1563 }
1564
1565 /* Returns 1 if OP contains a symbol reference */
1566
1567 int
1568 symbolic_reference_mentioned_p (op)
1569 rtx op;
1570 {
1571 register const char *fmt;
1572 register int i;
1573
1574 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
1575 return 1;
1576
1577 fmt = GET_RTX_FORMAT (GET_CODE (op));
1578 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
1579 {
1580 if (fmt[i] == 'E')
1581 {
1582 register int j;
1583
1584 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
1585 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
1586 return 1;
1587 }
1588
1589 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
1590 return 1;
1591 }
1592
1593 return 0;
1594 }
1595
1596 /* Return 1 if it is appropriate to emit `ret' instructions in the
1597 body of a function. Do this only if the epilogue is simple, needing a
1598 couple of insns. Prior to reloading, we can't tell how many registers
1599 must be saved, so return 0 then. Return 0 if there is no frame
1600 marker to de-allocate.
1601
1602 If NON_SAVING_SETJMP is defined and true, then it is not possible
1603 for the epilogue to be simple, so return 0. This is a special case
1604 since NON_SAVING_SETJMP will not cause regs_ever_live to change
1605 until final, but jump_optimize may need to know sooner if a
1606 `return' is OK. */
1607
1608 int
1609 ix86_can_use_return_insn_p ()
1610 {
1611 HOST_WIDE_INT tsize;
1612 int nregs;
1613
1614 #ifdef NON_SAVING_SETJMP
1615 if (NON_SAVING_SETJMP && current_function_calls_setjmp)
1616 return 0;
1617 #endif
1618 #ifdef FUNCTION_BLOCK_PROFILER_EXIT
1619 if (profile_block_flag == 2)
1620 return 0;
1621 #endif
1622
1623 if (! reload_completed || frame_pointer_needed)
1624 return 0;
1625
1626 /* Don't allow more than 32 pop, since that's all we can do
1627 with one instruction. */
1628 if (current_function_pops_args
1629 && current_function_args_size >= 32768)
1630 return 0;
1631
1632 tsize = ix86_compute_frame_size (get_frame_size (), &nregs, NULL, NULL);
1633 return tsize == 0 && nregs == 0;
1634 }
1635 \f
1636 static char *pic_label_name;
1637 static int pic_label_output;
1638 static char *global_offset_table_name;
1639
1640 /* This function generates code for -fpic that loads %ebx with
1641 the return address of the caller and then returns. */
1642
1643 void
1644 asm_output_function_prefix (file, name)
1645 FILE *file;
1646 const char *name ATTRIBUTE_UNUSED;
1647 {
1648 rtx xops[2];
1649 int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
1650 || current_function_uses_const_pool);
1651 xops[0] = pic_offset_table_rtx;
1652 xops[1] = stack_pointer_rtx;
1653
1654 /* Deep branch prediction favors having a return for every call. */
1655 if (pic_reg_used && TARGET_DEEP_BRANCH_PREDICTION)
1656 {
1657 if (!pic_label_output)
1658 {
1659 /* This used to call ASM_DECLARE_FUNCTION_NAME() but since it's an
1660 internal (non-global) label that's being emitted, it didn't make
1661 sense to have .type information for local labels. This caused
1662 the SCO OpenServer 5.0.4 ELF assembler grief (why are you giving
1663 me debug info for a label that you're declaring non-global?) this
1664 was changed to call ASM_OUTPUT_LABEL() instead. */
1665
1666 ASM_OUTPUT_LABEL (file, pic_label_name);
1667
1668 xops[1] = gen_rtx_MEM (SImode, xops[1]);
1669 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
1670 output_asm_insn ("ret", xops);
1671
1672 pic_label_output = 1;
1673 }
1674 }
1675 }
1676
1677 void
1678 load_pic_register ()
1679 {
1680 rtx gotsym, pclab;
1681
1682 if (global_offset_table_name == NULL)
1683 {
1684 global_offset_table_name =
1685 ggc_alloc_string ("_GLOBAL_OFFSET_TABLE_", 21);
1686 ggc_add_string_root (&global_offset_table_name, 1);
1687 }
1688 gotsym = gen_rtx_SYMBOL_REF (Pmode, global_offset_table_name);
1689
1690 if (TARGET_DEEP_BRANCH_PREDICTION)
1691 {
1692 if (pic_label_name == NULL)
1693 {
1694 pic_label_name = ggc_alloc_string (NULL, 32);
1695 ggc_add_string_root (&pic_label_name, 1);
1696 ASM_GENERATE_INTERNAL_LABEL (pic_label_name, "LPR", 0);
1697 }
1698 pclab = gen_rtx_MEM (QImode, gen_rtx_SYMBOL_REF (Pmode, pic_label_name));
1699 }
1700 else
1701 {
1702 pclab = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
1703 }
1704
1705 emit_insn (gen_prologue_get_pc (pic_offset_table_rtx, pclab));
1706
1707 if (! TARGET_DEEP_BRANCH_PREDICTION)
1708 emit_insn (gen_popsi1 (pic_offset_table_rtx));
1709
1710 emit_insn (gen_prologue_set_got (pic_offset_table_rtx, gotsym, pclab));
1711 }
1712
1713 /* Generate an SImode "push" pattern for input ARG. */
1714
1715 static rtx
1716 gen_push (arg)
1717 rtx arg;
1718 {
1719 return gen_rtx_SET (VOIDmode,
1720 gen_rtx_MEM (SImode,
1721 gen_rtx_PRE_DEC (SImode,
1722 stack_pointer_rtx)),
1723 arg);
1724 }
1725
1726 /* Return number of registers to be saved on the stack. */
1727
1728 static int
1729 ix86_nsaved_regs ()
1730 {
1731 int nregs = 0;
1732 int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
1733 || current_function_uses_const_pool);
1734 int limit = (frame_pointer_needed
1735 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
1736 int regno;
1737
1738 for (regno = limit - 1; regno >= 0; regno--)
1739 if ((regs_ever_live[regno] && ! call_used_regs[regno])
1740 || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
1741 {
1742 nregs ++;
1743 }
1744 return nregs;
1745 }
1746
1747 /* Return the offset between two registers, one to be eliminated, and the other
1748 its replacement, at the start of a routine. */
1749
1750 HOST_WIDE_INT
1751 ix86_initial_elimination_offset (from, to)
1752 int from;
1753 int to;
1754 {
1755 int padding1;
1756 int nregs;
1757
1758 /* Stack grows downward:
1759
1760 [arguments]
1761 <- ARG_POINTER
1762 saved pc
1763
1764 saved frame pointer if frame_pointer_needed
1765 <- HARD_FRAME_POINTER
1766 [saved regs]
1767
1768 [padding1] \
1769 | <- FRAME_POINTER
1770 [frame] > tsize
1771 |
1772 [padding2] /
1773 */
1774
1775 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
1776 /* Skip saved PC and previous frame pointer.
1777 Executed only when frame_pointer_needed. */
1778 return 8;
1779 else if (from == FRAME_POINTER_REGNUM
1780 && to == HARD_FRAME_POINTER_REGNUM)
1781 {
1782 ix86_compute_frame_size (get_frame_size (), &nregs, &padding1, (int *)0);
1783 padding1 += nregs * UNITS_PER_WORD;
1784 return -padding1;
1785 }
1786 else
1787 {
1788 /* ARG_POINTER or FRAME_POINTER to STACK_POINTER elimination. */
1789 int frame_size = frame_pointer_needed ? 8 : 4;
1790 HOST_WIDE_INT tsize = ix86_compute_frame_size (get_frame_size (),
1791 &nregs, &padding1, (int *)0);
1792
1793
1794 if (to != STACK_POINTER_REGNUM)
1795 abort ();
1796 else if (from == ARG_POINTER_REGNUM)
1797 return tsize + nregs * UNITS_PER_WORD + frame_size;
1798 else if (from != FRAME_POINTER_REGNUM)
1799 abort ();
1800 else
1801 return tsize - padding1;
1802 }
1803 }
1804
1805 /* Compute the size of local storage taking into consideration the
1806 desired stack alignment which is to be maintained. Also determine
1807 the number of registers saved below the local storage.
1808
1809 PADDING1 returns padding before stack frame and PADDING2 returns
1810 padding after stack frame;
1811 */
1812
1813 static HOST_WIDE_INT
1814 ix86_compute_frame_size (size, nregs_on_stack, rpadding1, rpadding2)
1815 HOST_WIDE_INT size;
1816 int *nregs_on_stack;
1817 int *rpadding1;
1818 int *rpadding2;
1819 {
1820 int nregs;
1821 int padding1 = 0;
1822 int padding2 = 0;
1823 HOST_WIDE_INT total_size;
1824 int stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
1825 int offset;
1826 int preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
1827
1828 nregs = ix86_nsaved_regs ();
1829 total_size = size;
1830
1831 offset = frame_pointer_needed ? 8 : 4;
1832
1833 /* Do some sanity checking of stack_alignment_needed and preferred_alignment,
1834 since i386 port is the only using those features that may break easilly. */
1835
1836 if (size && !stack_alignment_needed)
1837 abort ();
1838 if (!size && stack_alignment_needed != STACK_BOUNDARY / BITS_PER_UNIT)
1839 abort ();
1840 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
1841 abort ();
1842 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
1843 abort ();
1844 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
1845 abort ();
1846
1847 if (stack_alignment_needed < 4)
1848 stack_alignment_needed = 4;
1849
1850 offset += nregs * UNITS_PER_WORD;
1851
1852 if (ACCUMULATE_OUTGOING_ARGS)
1853 total_size += current_function_outgoing_args_size;
1854
1855 total_size += offset;
1856
1857 /* Align start of frame for local function. */
1858 padding1 = ((offset + stack_alignment_needed - 1)
1859 & -stack_alignment_needed) - offset;
1860 total_size += padding1;
1861
1862 /* Align stack boundary. */
1863 padding2 = ((total_size + preferred_alignment - 1)
1864 & -preferred_alignment) - total_size;
1865
1866 if (ACCUMULATE_OUTGOING_ARGS)
1867 padding2 += current_function_outgoing_args_size;
1868
1869 if (nregs_on_stack)
1870 *nregs_on_stack = nregs;
1871 if (rpadding1)
1872 *rpadding1 = padding1;
1873 if (rpadding2)
1874 *rpadding2 = padding2;
1875
1876 return size + padding1 + padding2;
1877 }
1878
1879 /* Emit code to save registers in the prologue. */
1880
1881 static void
1882 ix86_emit_save_regs ()
1883 {
1884 register int regno;
1885 int limit;
1886 rtx insn;
1887 int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
1888 || current_function_uses_const_pool);
1889 limit = (frame_pointer_needed
1890 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
1891
1892 for (regno = limit - 1; regno >= 0; regno--)
1893 if ((regs_ever_live[regno] && !call_used_regs[regno])
1894 || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
1895 {
1896 insn = emit_insn (gen_push (gen_rtx_REG (SImode, regno)));
1897 RTX_FRAME_RELATED_P (insn) = 1;
1898 }
1899 }
1900
1901 /* Expand the prologue into a bunch of separate insns. */
1902
1903 void
1904 ix86_expand_prologue ()
1905 {
1906 HOST_WIDE_INT tsize = ix86_compute_frame_size (get_frame_size (), (int *)0, (int *)0,
1907 (int *)0);
1908 rtx insn;
1909 int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
1910 || current_function_uses_const_pool);
1911
1912 /* Note: AT&T enter does NOT have reversed args. Enter is probably
1913 slower on all targets. Also sdb doesn't like it. */
1914
1915 if (frame_pointer_needed)
1916 {
1917 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
1918 RTX_FRAME_RELATED_P (insn) = 1;
1919
1920 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
1921 RTX_FRAME_RELATED_P (insn) = 1;
1922 }
1923
1924 ix86_emit_save_regs ();
1925
1926 if (tsize == 0)
1927 ;
1928 else if (! TARGET_STACK_PROBE || tsize < CHECK_STACK_LIMIT)
1929 {
1930 if (frame_pointer_needed)
1931 insn = emit_insn (gen_pro_epilogue_adjust_stack
1932 (stack_pointer_rtx, stack_pointer_rtx,
1933 GEN_INT (-tsize), hard_frame_pointer_rtx));
1934 else
1935 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
1936 GEN_INT (-tsize)));
1937 RTX_FRAME_RELATED_P (insn) = 1;
1938 }
1939 else
1940 {
1941 /* ??? Is this only valid for Win32? */
1942
1943 rtx arg0, sym;
1944
1945 arg0 = gen_rtx_REG (SImode, 0);
1946 emit_move_insn (arg0, GEN_INT (tsize));
1947
1948 sym = gen_rtx_MEM (FUNCTION_MODE,
1949 gen_rtx_SYMBOL_REF (Pmode, "_alloca"));
1950 insn = emit_call_insn (gen_call (sym, const0_rtx));
1951
1952 CALL_INSN_FUNCTION_USAGE (insn)
1953 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, arg0),
1954 CALL_INSN_FUNCTION_USAGE (insn));
1955 }
1956
1957 #ifdef SUBTARGET_PROLOGUE
1958 SUBTARGET_PROLOGUE;
1959 #endif
1960
1961 if (pic_reg_used)
1962 load_pic_register ();
1963
1964 /* If we are profiling, make sure no instructions are scheduled before
1965 the call to mcount. However, if -fpic, the above call will have
1966 done that. */
1967 if ((profile_flag || profile_block_flag) && ! pic_reg_used)
1968 emit_insn (gen_blockage ());
1969 }
1970
1971 /* Emit code to add TSIZE to esp value. Use POP instruction when
1972 profitable. */
1973
1974 static void
1975 ix86_emit_epilogue_esp_adjustment (tsize)
1976 int tsize;
1977 {
1978 /* If a frame pointer is present, we must be sure to tie the sp
1979 to the fp so that we don't mis-schedule. */
1980 if (frame_pointer_needed)
1981 emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx,
1982 stack_pointer_rtx,
1983 GEN_INT (tsize),
1984 hard_frame_pointer_rtx));
1985 else
1986 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
1987 GEN_INT (tsize)));
1988 }
1989
1990 /* Emit code to restore saved registers using MOV insns. First register
1991 is restored from POINTER + OFFSET. */
1992 static void
1993 ix86_emit_restore_regs_using_mov (pointer, offset)
1994 rtx pointer;
1995 int offset;
1996 {
1997 int regno;
1998 int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
1999 || current_function_uses_const_pool);
2000 int limit = (frame_pointer_needed
2001 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
2002
2003 for (regno = 0; regno < limit; regno++)
2004 if ((regs_ever_live[regno] && !call_used_regs[regno])
2005 || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
2006 {
2007 emit_move_insn (gen_rtx_REG (SImode, regno),
2008 adj_offsettable_operand (gen_rtx_MEM (SImode,
2009 pointer),
2010 offset));
2011 offset += 4;
2012 }
2013 }
2014
2015 /* Restore function stack, frame, and registers. */
2016
2017 void
2018 ix86_expand_epilogue (emit_return)
2019 int emit_return;
2020 {
2021 int nregs;
2022 int regno;
2023
2024 int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
2025 || current_function_uses_const_pool);
2026 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
2027 HOST_WIDE_INT offset;
2028 HOST_WIDE_INT tsize = ix86_compute_frame_size (get_frame_size (), &nregs,
2029 (int *)0, (int *)0);
2030
2031
2032 /* Calculate start of saved registers relative to ebp. */
2033 offset = -nregs * UNITS_PER_WORD;
2034
2035 #ifdef FUNCTION_BLOCK_PROFILER_EXIT
2036 if (profile_block_flag == 2)
2037 {
2038 FUNCTION_BLOCK_PROFILER_EXIT;
2039 }
2040 #endif
2041
2042 /* If we're only restoring one register and sp is not valid then
2043 using a move instruction to restore the register since it's
2044 less work than reloading sp and popping the register.
2045
2046 The default code result in stack adjustment using add/lea instruction,
2047 while this code results in LEAVE instruction (or discrete equivalent),
2048 so it is profitable in some other cases as well. Especially when there
2049 are no registers to restore. We also use this code when TARGET_USE_LEAVE
2050 and there is exactly one register to pop. This heruistic may need some
2051 tuning in future. */
2052 if ((!sp_valid && nregs <= 1)
2053 || (frame_pointer_needed && !nregs && tsize)
2054 || (frame_pointer_needed && TARGET_USE_LEAVE && !optimize_size
2055 && nregs == 1))
2056 {
2057 /* Restore registers. We can use ebp or esp to address the memory
2058 locations. If both are available, default to ebp, since offsets
2059 are known to be small. Only exception is esp pointing directly to the
2060 end of block of saved registers, where we may simplify addressing
2061 mode. */
2062
2063 if (!frame_pointer_needed || (sp_valid && !tsize))
2064 ix86_emit_restore_regs_using_mov (stack_pointer_rtx, tsize);
2065 else
2066 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx, offset);
2067
2068 if (!frame_pointer_needed)
2069 ix86_emit_epilogue_esp_adjustment (tsize + nregs * UNITS_PER_WORD);
2070 /* If not an i386, mov & pop is faster than "leave". */
2071 else if (TARGET_USE_LEAVE || optimize_size)
2072 emit_insn (gen_leave ());
2073 else
2074 {
2075 emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx,
2076 hard_frame_pointer_rtx,
2077 const0_rtx,
2078 hard_frame_pointer_rtx));
2079 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
2080 }
2081 }
2082 else
2083 {
2084 /* First step is to deallocate the stack frame so that we can
2085 pop the registers. */
2086 if (!sp_valid)
2087 {
2088 if (!frame_pointer_needed)
2089 abort ();
2090 emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx,
2091 hard_frame_pointer_rtx,
2092 GEN_INT (offset),
2093 hard_frame_pointer_rtx));
2094 }
2095 else if (tsize)
2096 ix86_emit_epilogue_esp_adjustment (tsize);
2097
2098 for (regno = 0; regno < STACK_POINTER_REGNUM; regno++)
2099 if ((regs_ever_live[regno] && !call_used_regs[regno])
2100 || (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
2101 emit_insn (gen_popsi1 (gen_rtx_REG (SImode, regno)));
2102 }
2103
2104 /* Sibcall epilogues don't want a return instruction. */
2105 if (! emit_return)
2106 return;
2107
2108 if (current_function_pops_args && current_function_args_size)
2109 {
2110 rtx popc = GEN_INT (current_function_pops_args);
2111
2112 /* i386 can only pop 64K bytes. If asked to pop more, pop
2113 return address, do explicit add, and jump indirectly to the
2114 caller. */
2115
2116 if (current_function_pops_args >= 65536)
2117 {
2118 rtx ecx = gen_rtx_REG (SImode, 2);
2119
2120 emit_insn (gen_popsi1 (ecx));
2121 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
2122 emit_indirect_jump (ecx);
2123 }
2124 else
2125 emit_jump_insn (gen_return_pop_internal (popc));
2126 }
2127 else
2128 emit_jump_insn (gen_return_internal ());
2129 }
2130 \f
2131 /* Extract the parts of an RTL expression that is a valid memory address
2132 for an instruction. Return false if the structure of the address is
2133 grossly off. */
2134
2135 static int
2136 ix86_decompose_address (addr, out)
2137 register rtx addr;
2138 struct ix86_address *out;
2139 {
2140 rtx base = NULL_RTX;
2141 rtx index = NULL_RTX;
2142 rtx disp = NULL_RTX;
2143 HOST_WIDE_INT scale = 1;
2144 rtx scale_rtx = NULL_RTX;
2145
2146 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
2147 base = addr;
2148 else if (GET_CODE (addr) == PLUS)
2149 {
2150 rtx op0 = XEXP (addr, 0);
2151 rtx op1 = XEXP (addr, 1);
2152 enum rtx_code code0 = GET_CODE (op0);
2153 enum rtx_code code1 = GET_CODE (op1);
2154
2155 if (code0 == REG || code0 == SUBREG)
2156 {
2157 if (code1 == REG || code1 == SUBREG)
2158 index = op0, base = op1; /* index + base */
2159 else
2160 base = op0, disp = op1; /* base + displacement */
2161 }
2162 else if (code0 == MULT)
2163 {
2164 index = XEXP (op0, 0);
2165 scale_rtx = XEXP (op0, 1);
2166 if (code1 == REG || code1 == SUBREG)
2167 base = op1; /* index*scale + base */
2168 else
2169 disp = op1; /* index*scale + disp */
2170 }
2171 else if (code0 == PLUS && GET_CODE (XEXP (op0, 0)) == MULT)
2172 {
2173 index = XEXP (XEXP (op0, 0), 0); /* index*scale + base + disp */
2174 scale_rtx = XEXP (XEXP (op0, 0), 1);
2175 base = XEXP (op0, 1);
2176 disp = op1;
2177 }
2178 else if (code0 == PLUS)
2179 {
2180 index = XEXP (op0, 0); /* index + base + disp */
2181 base = XEXP (op0, 1);
2182 disp = op1;
2183 }
2184 else
2185 return FALSE;
2186 }
2187 else if (GET_CODE (addr) == MULT)
2188 {
2189 index = XEXP (addr, 0); /* index*scale */
2190 scale_rtx = XEXP (addr, 1);
2191 }
2192 else if (GET_CODE (addr) == ASHIFT)
2193 {
2194 rtx tmp;
2195
2196 /* We're called for lea too, which implements ashift on occasion. */
2197 index = XEXP (addr, 0);
2198 tmp = XEXP (addr, 1);
2199 if (GET_CODE (tmp) != CONST_INT)
2200 return FALSE;
2201 scale = INTVAL (tmp);
2202 if ((unsigned HOST_WIDE_INT) scale > 3)
2203 return FALSE;
2204 scale = 1 << scale;
2205 }
2206 else
2207 disp = addr; /* displacement */
2208
2209 /* Extract the integral value of scale. */
2210 if (scale_rtx)
2211 {
2212 if (GET_CODE (scale_rtx) != CONST_INT)
2213 return FALSE;
2214 scale = INTVAL (scale_rtx);
2215 }
2216
2217 /* Allow arg pointer and stack pointer as index if there is not scaling */
2218 if (base && index && scale == 1
2219 && (index == arg_pointer_rtx || index == frame_pointer_rtx
2220 || index == stack_pointer_rtx))
2221 {
2222 rtx tmp = base;
2223 base = index;
2224 index = tmp;
2225 }
2226
2227 /* Special case: %ebp cannot be encoded as a base without a displacement. */
2228 if ((base == hard_frame_pointer_rtx
2229 || base == frame_pointer_rtx
2230 || base == arg_pointer_rtx) && !disp)
2231 disp = const0_rtx;
2232
2233 /* Special case: on K6, [%esi] makes the instruction vector decoded.
2234 Avoid this by transforming to [%esi+0]. */
2235 if (ix86_cpu == PROCESSOR_K6 && !optimize_size
2236 && base && !index && !disp
2237 && REG_P (base)
2238 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
2239 disp = const0_rtx;
2240
2241 /* Special case: encode reg+reg instead of reg*2. */
2242 if (!base && index && scale && scale == 2)
2243 base = index, scale = 1;
2244
2245 /* Special case: scaling cannot be encoded without base or displacement. */
2246 if (!base && !disp && index && scale != 1)
2247 disp = const0_rtx;
2248
2249 out->base = base;
2250 out->index = index;
2251 out->disp = disp;
2252 out->scale = scale;
2253
2254 return TRUE;
2255 }
2256 \f
2257 /* Return cost of the memory address x.
2258 For i386, it is better to use a complex address than let gcc copy
2259 the address into a reg and make a new pseudo. But not if the address
2260 requires to two regs - that would mean more pseudos with longer
2261 lifetimes. */
2262 int
2263 ix86_address_cost (x)
2264 rtx x;
2265 {
2266 struct ix86_address parts;
2267 int cost = 1;
2268
2269 if (!ix86_decompose_address (x, &parts))
2270 abort ();
2271
2272 /* More complex memory references are better. */
2273 if (parts.disp && parts.disp != const0_rtx)
2274 cost--;
2275
2276 /* Attempt to minimize number of registers in the address. */
2277 if ((parts.base
2278 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
2279 || (parts.index
2280 && (!REG_P (parts.index)
2281 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
2282 cost++;
2283
2284 if (parts.base
2285 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
2286 && parts.index
2287 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
2288 && parts.base != parts.index)
2289 cost++;
2290
2291 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
2292 since it's predecode logic can't detect the length of instructions
2293 and it degenerates to vector decoded. Increase cost of such
2294 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
2295 to split such addresses or even refuse such addresses at all.
2296
2297 Following addressing modes are affected:
2298 [base+scale*index]
2299 [scale*index+disp]
2300 [base+index]
2301
2302 The first and last case may be avoidable by explicitly coding the zero in
2303 memory address, but I don't have AMD-K6 machine handy to check this
2304 theory. */
2305
2306 if (TARGET_K6
2307 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
2308 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
2309 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
2310 cost += 10;
2311
2312 return cost;
2313 }
2314 \f
2315 /* Determine if a given CONST RTX is a valid memory displacement
2316 in PIC mode. */
2317
2318 int
2319 legitimate_pic_address_disp_p (disp)
2320 register rtx disp;
2321 {
2322 if (GET_CODE (disp) != CONST)
2323 return 0;
2324 disp = XEXP (disp, 0);
2325
2326 if (GET_CODE (disp) == PLUS)
2327 {
2328 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
2329 return 0;
2330 disp = XEXP (disp, 0);
2331 }
2332
2333 if (GET_CODE (disp) != UNSPEC
2334 || XVECLEN (disp, 0) != 1)
2335 return 0;
2336
2337 /* Must be @GOT or @GOTOFF. */
2338 if (XINT (disp, 1) != 6
2339 && XINT (disp, 1) != 7)
2340 return 0;
2341
2342 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2343 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
2344 return 0;
2345
2346 return 1;
2347 }
2348
2349 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
2350 memory address for an instruction. The MODE argument is the machine mode
2351 for the MEM expression that wants to use this address.
2352
2353 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
2354 convert common non-canonical forms to canonical form so that they will
2355 be recognized. */
2356
2357 int
2358 legitimate_address_p (mode, addr, strict)
2359 enum machine_mode mode;
2360 register rtx addr;
2361 int strict;
2362 {
2363 struct ix86_address parts;
2364 rtx base, index, disp;
2365 HOST_WIDE_INT scale;
2366 const char *reason = NULL;
2367 rtx reason_rtx = NULL_RTX;
2368
2369 if (TARGET_DEBUG_ADDR)
2370 {
2371 fprintf (stderr,
2372 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
2373 GET_MODE_NAME (mode), strict);
2374 debug_rtx (addr);
2375 }
2376
2377 if (! ix86_decompose_address (addr, &parts))
2378 {
2379 reason = "decomposition failed";
2380 goto error;
2381 }
2382
2383 base = parts.base;
2384 index = parts.index;
2385 disp = parts.disp;
2386 scale = parts.scale;
2387
2388 /* Validate base register.
2389
2390 Don't allow SUBREG's here, it can lead to spill failures when the base
2391 is one word out of a two word structure, which is represented internally
2392 as a DImode int. */
2393
2394 if (base)
2395 {
2396 reason_rtx = base;
2397
2398 if (GET_CODE (base) != REG)
2399 {
2400 reason = "base is not a register";
2401 goto error;
2402 }
2403
2404 if (GET_MODE (base) != Pmode)
2405 {
2406 reason = "base is not in Pmode";
2407 goto error;
2408 }
2409
2410 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
2411 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
2412 {
2413 reason = "base is not valid";
2414 goto error;
2415 }
2416 }
2417
2418 /* Validate index register.
2419
2420 Don't allow SUBREG's here, it can lead to spill failures when the index
2421 is one word out of a two word structure, which is represented internally
2422 as a DImode int. */
2423
2424 if (index)
2425 {
2426 reason_rtx = index;
2427
2428 if (GET_CODE (index) != REG)
2429 {
2430 reason = "index is not a register";
2431 goto error;
2432 }
2433
2434 if (GET_MODE (index) != Pmode)
2435 {
2436 reason = "index is not in Pmode";
2437 goto error;
2438 }
2439
2440 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
2441 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
2442 {
2443 reason = "index is not valid";
2444 goto error;
2445 }
2446 }
2447
2448 /* Validate scale factor. */
2449 if (scale != 1)
2450 {
2451 reason_rtx = GEN_INT (scale);
2452 if (!index)
2453 {
2454 reason = "scale without index";
2455 goto error;
2456 }
2457
2458 if (scale != 2 && scale != 4 && scale != 8)
2459 {
2460 reason = "scale is not a valid multiplier";
2461 goto error;
2462 }
2463 }
2464
2465 /* Validate displacement. */
2466 if (disp)
2467 {
2468 reason_rtx = disp;
2469
2470 if (!CONSTANT_ADDRESS_P (disp))
2471 {
2472 reason = "displacement is not constant";
2473 goto error;
2474 }
2475
2476 if (GET_CODE (disp) == CONST_DOUBLE)
2477 {
2478 reason = "displacement is a const_double";
2479 goto error;
2480 }
2481
2482 if (flag_pic && SYMBOLIC_CONST (disp))
2483 {
2484 if (! legitimate_pic_address_disp_p (disp))
2485 {
2486 reason = "displacement is an invalid pic construct";
2487 goto error;
2488 }
2489
2490 /* Verify that a symbolic pic displacement includes
2491 the pic_offset_table_rtx register. */
2492 if (base != pic_offset_table_rtx
2493 && (index != pic_offset_table_rtx || scale != 1))
2494 {
2495 reason = "pic displacement against invalid base";
2496 goto error;
2497 }
2498 }
2499 else if (HALF_PIC_P ())
2500 {
2501 if (! HALF_PIC_ADDRESS_P (disp)
2502 || (base != NULL_RTX || index != NULL_RTX))
2503 {
2504 reason = "displacement is an invalid half-pic reference";
2505 goto error;
2506 }
2507 }
2508 }
2509
2510 /* Everything looks valid. */
2511 if (TARGET_DEBUG_ADDR)
2512 fprintf (stderr, "Success.\n");
2513 return TRUE;
2514
2515 error:
2516 if (TARGET_DEBUG_ADDR)
2517 {
2518 fprintf (stderr, "Error: %s\n", reason);
2519 debug_rtx (reason_rtx);
2520 }
2521 return FALSE;
2522 }
2523 \f
2524 /* Return a legitimate reference for ORIG (an address) using the
2525 register REG. If REG is 0, a new pseudo is generated.
2526
2527 There are two types of references that must be handled:
2528
2529 1. Global data references must load the address from the GOT, via
2530 the PIC reg. An insn is emitted to do this load, and the reg is
2531 returned.
2532
2533 2. Static data references, constant pool addresses, and code labels
2534 compute the address as an offset from the GOT, whose base is in
2535 the PIC reg. Static data objects have SYMBOL_REF_FLAG set to
2536 differentiate them from global data objects. The returned
2537 address is the PIC reg + an unspec constant.
2538
2539 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
2540 reg also appears in the address. */
2541
2542 rtx
2543 legitimize_pic_address (orig, reg)
2544 rtx orig;
2545 rtx reg;
2546 {
2547 rtx addr = orig;
2548 rtx new = orig;
2549 rtx base;
2550
2551 if (GET_CODE (addr) == LABEL_REF
2552 || (GET_CODE (addr) == SYMBOL_REF
2553 && (CONSTANT_POOL_ADDRESS_P (addr)
2554 || SYMBOL_REF_FLAG (addr))))
2555 {
2556 /* This symbol may be referenced via a displacement from the PIC
2557 base address (@GOTOFF). */
2558
2559 current_function_uses_pic_offset_table = 1;
2560 new = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, addr), 7);
2561 new = gen_rtx_CONST (VOIDmode, new);
2562 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
2563
2564 if (reg != 0)
2565 {
2566 emit_move_insn (reg, new);
2567 new = reg;
2568 }
2569 }
2570 else if (GET_CODE (addr) == SYMBOL_REF)
2571 {
2572 /* This symbol must be referenced via a load from the
2573 Global Offset Table (@GOT). */
2574
2575 current_function_uses_pic_offset_table = 1;
2576 new = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, addr), 6);
2577 new = gen_rtx_CONST (VOIDmode, new);
2578 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
2579 new = gen_rtx_MEM (Pmode, new);
2580 RTX_UNCHANGING_P (new) = 1;
2581
2582 if (reg == 0)
2583 reg = gen_reg_rtx (Pmode);
2584 emit_move_insn (reg, new);
2585 new = reg;
2586 }
2587 else
2588 {
2589 if (GET_CODE (addr) == CONST)
2590 {
2591 addr = XEXP (addr, 0);
2592 if (GET_CODE (addr) == UNSPEC)
2593 {
2594 /* Check that the unspec is one of the ones we generate? */
2595 }
2596 else if (GET_CODE (addr) != PLUS)
2597 abort ();
2598 }
2599 if (GET_CODE (addr) == PLUS)
2600 {
2601 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
2602
2603 /* Check first to see if this is a constant offset from a @GOTOFF
2604 symbol reference. */
2605 if ((GET_CODE (op0) == LABEL_REF
2606 || (GET_CODE (op0) == SYMBOL_REF
2607 && (CONSTANT_POOL_ADDRESS_P (op0)
2608 || SYMBOL_REF_FLAG (op0))))
2609 && GET_CODE (op1) == CONST_INT)
2610 {
2611 current_function_uses_pic_offset_table = 1;
2612 new = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, op0), 7);
2613 new = gen_rtx_PLUS (VOIDmode, new, op1);
2614 new = gen_rtx_CONST (VOIDmode, new);
2615 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
2616
2617 if (reg != 0)
2618 {
2619 emit_move_insn (reg, new);
2620 new = reg;
2621 }
2622 }
2623 else
2624 {
2625 base = legitimize_pic_address (XEXP (addr, 0), reg);
2626 new = legitimize_pic_address (XEXP (addr, 1),
2627 base == reg ? NULL_RTX : reg);
2628
2629 if (GET_CODE (new) == CONST_INT)
2630 new = plus_constant (base, INTVAL (new));
2631 else
2632 {
2633 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
2634 {
2635 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
2636 new = XEXP (new, 1);
2637 }
2638 new = gen_rtx_PLUS (Pmode, base, new);
2639 }
2640 }
2641 }
2642 }
2643 return new;
2644 }
2645 \f
2646 /* Try machine-dependent ways of modifying an illegitimate address
2647 to be legitimate. If we find one, return the new, valid address.
2648 This macro is used in only one place: `memory_address' in explow.c.
2649
2650 OLDX is the address as it was before break_out_memory_refs was called.
2651 In some cases it is useful to look at this to decide what needs to be done.
2652
2653 MODE and WIN are passed so that this macro can use
2654 GO_IF_LEGITIMATE_ADDRESS.
2655
2656 It is always safe for this macro to do nothing. It exists to recognize
2657 opportunities to optimize the output.
2658
2659 For the 80386, we handle X+REG by loading X into a register R and
2660 using R+REG. R will go in a general reg and indexing will be used.
2661 However, if REG is a broken-out memory address or multiplication,
2662 nothing needs to be done because REG can certainly go in a general reg.
2663
2664 When -fpic is used, special handling is needed for symbolic references.
2665 See comments by legitimize_pic_address in i386.c for details. */
2666
2667 rtx
2668 legitimize_address (x, oldx, mode)
2669 register rtx x;
2670 register rtx oldx ATTRIBUTE_UNUSED;
2671 enum machine_mode mode;
2672 {
2673 int changed = 0;
2674 unsigned log;
2675
2676 if (TARGET_DEBUG_ADDR)
2677 {
2678 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
2679 GET_MODE_NAME (mode));
2680 debug_rtx (x);
2681 }
2682
2683 if (flag_pic && SYMBOLIC_CONST (x))
2684 return legitimize_pic_address (x, 0);
2685
2686 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
2687 if (GET_CODE (x) == ASHIFT
2688 && GET_CODE (XEXP (x, 1)) == CONST_INT
2689 && (log = (unsigned)exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
2690 {
2691 changed = 1;
2692 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
2693 GEN_INT (1 << log));
2694 }
2695
2696 if (GET_CODE (x) == PLUS)
2697 {
2698 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
2699
2700 if (GET_CODE (XEXP (x, 0)) == ASHIFT
2701 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2702 && (log = (unsigned)exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
2703 {
2704 changed = 1;
2705 XEXP (x, 0) = gen_rtx_MULT (Pmode,
2706 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
2707 GEN_INT (1 << log));
2708 }
2709
2710 if (GET_CODE (XEXP (x, 1)) == ASHIFT
2711 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
2712 && (log = (unsigned)exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
2713 {
2714 changed = 1;
2715 XEXP (x, 1) = gen_rtx_MULT (Pmode,
2716 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
2717 GEN_INT (1 << log));
2718 }
2719
2720 /* Put multiply first if it isn't already. */
2721 if (GET_CODE (XEXP (x, 1)) == MULT)
2722 {
2723 rtx tmp = XEXP (x, 0);
2724 XEXP (x, 0) = XEXP (x, 1);
2725 XEXP (x, 1) = tmp;
2726 changed = 1;
2727 }
2728
2729 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
2730 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
2731 created by virtual register instantiation, register elimination, and
2732 similar optimizations. */
2733 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
2734 {
2735 changed = 1;
2736 x = gen_rtx_PLUS (Pmode,
2737 gen_rtx_PLUS (Pmode, XEXP (x, 0),
2738 XEXP (XEXP (x, 1), 0)),
2739 XEXP (XEXP (x, 1), 1));
2740 }
2741
2742 /* Canonicalize
2743 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
2744 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
2745 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
2746 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
2747 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
2748 && CONSTANT_P (XEXP (x, 1)))
2749 {
2750 rtx constant;
2751 rtx other = NULL_RTX;
2752
2753 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2754 {
2755 constant = XEXP (x, 1);
2756 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
2757 }
2758 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
2759 {
2760 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
2761 other = XEXP (x, 1);
2762 }
2763 else
2764 constant = 0;
2765
2766 if (constant)
2767 {
2768 changed = 1;
2769 x = gen_rtx_PLUS (Pmode,
2770 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
2771 XEXP (XEXP (XEXP (x, 0), 1), 0)),
2772 plus_constant (other, INTVAL (constant)));
2773 }
2774 }
2775
2776 if (changed && legitimate_address_p (mode, x, FALSE))
2777 return x;
2778
2779 if (GET_CODE (XEXP (x, 0)) == MULT)
2780 {
2781 changed = 1;
2782 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
2783 }
2784
2785 if (GET_CODE (XEXP (x, 1)) == MULT)
2786 {
2787 changed = 1;
2788 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
2789 }
2790
2791 if (changed
2792 && GET_CODE (XEXP (x, 1)) == REG
2793 && GET_CODE (XEXP (x, 0)) == REG)
2794 return x;
2795
2796 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
2797 {
2798 changed = 1;
2799 x = legitimize_pic_address (x, 0);
2800 }
2801
2802 if (changed && legitimate_address_p (mode, x, FALSE))
2803 return x;
2804
2805 if (GET_CODE (XEXP (x, 0)) == REG)
2806 {
2807 register rtx temp = gen_reg_rtx (Pmode);
2808 register rtx val = force_operand (XEXP (x, 1), temp);
2809 if (val != temp)
2810 emit_move_insn (temp, val);
2811
2812 XEXP (x, 1) = temp;
2813 return x;
2814 }
2815
2816 else if (GET_CODE (XEXP (x, 1)) == REG)
2817 {
2818 register rtx temp = gen_reg_rtx (Pmode);
2819 register rtx val = force_operand (XEXP (x, 0), temp);
2820 if (val != temp)
2821 emit_move_insn (temp, val);
2822
2823 XEXP (x, 0) = temp;
2824 return x;
2825 }
2826 }
2827
2828 return x;
2829 }
2830 \f
2831 /* Print an integer constant expression in assembler syntax. Addition
2832 and subtraction are the only arithmetic that may appear in these
2833 expressions. FILE is the stdio stream to write to, X is the rtx, and
2834 CODE is the operand print code from the output string. */
2835
2836 static void
2837 output_pic_addr_const (file, x, code)
2838 FILE *file;
2839 rtx x;
2840 int code;
2841 {
2842 char buf[256];
2843
2844 switch (GET_CODE (x))
2845 {
2846 case PC:
2847 if (flag_pic)
2848 putc ('.', file);
2849 else
2850 abort ();
2851 break;
2852
2853 case SYMBOL_REF:
2854 assemble_name (file, XSTR (x, 0));
2855 if (code == 'P' && ! SYMBOL_REF_FLAG (x))
2856 fputs ("@PLT", file);
2857 break;
2858
2859 case LABEL_REF:
2860 x = XEXP (x, 0);
2861 /* FALLTHRU */
2862 case CODE_LABEL:
2863 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
2864 assemble_name (asm_out_file, buf);
2865 break;
2866
2867 case CONST_INT:
2868 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
2869 break;
2870
2871 case CONST:
2872 /* This used to output parentheses around the expression,
2873 but that does not work on the 386 (either ATT or BSD assembler). */
2874 output_pic_addr_const (file, XEXP (x, 0), code);
2875 break;
2876
2877 case CONST_DOUBLE:
2878 if (GET_MODE (x) == VOIDmode)
2879 {
2880 /* We can use %d if the number is <32 bits and positive. */
2881 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
2882 fprintf (file, "0x%lx%08lx",
2883 (unsigned long) CONST_DOUBLE_HIGH (x),
2884 (unsigned long) CONST_DOUBLE_LOW (x));
2885 else
2886 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
2887 }
2888 else
2889 /* We can't handle floating point constants;
2890 PRINT_OPERAND must handle them. */
2891 output_operand_lossage ("floating constant misused");
2892 break;
2893
2894 case PLUS:
2895 /* Some assemblers need integer constants to appear first. */
2896 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
2897 {
2898 output_pic_addr_const (file, XEXP (x, 0), code);
2899 putc ('+', file);
2900 output_pic_addr_const (file, XEXP (x, 1), code);
2901 }
2902 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2903 {
2904 output_pic_addr_const (file, XEXP (x, 1), code);
2905 putc ('+', file);
2906 output_pic_addr_const (file, XEXP (x, 0), code);
2907 }
2908 else
2909 abort ();
2910 break;
2911
2912 case MINUS:
2913 putc (ASSEMBLER_DIALECT ? '(' : '[', file);
2914 output_pic_addr_const (file, XEXP (x, 0), code);
2915 putc ('-', file);
2916 output_pic_addr_const (file, XEXP (x, 1), code);
2917 putc (ASSEMBLER_DIALECT ? ')' : ']', file);
2918 break;
2919
2920 case UNSPEC:
2921 if (XVECLEN (x, 0) != 1)
2922 abort ();
2923 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
2924 switch (XINT (x, 1))
2925 {
2926 case 6:
2927 fputs ("@GOT", file);
2928 break;
2929 case 7:
2930 fputs ("@GOTOFF", file);
2931 break;
2932 case 8:
2933 fputs ("@PLT", file);
2934 break;
2935 default:
2936 output_operand_lossage ("invalid UNSPEC as operand");
2937 break;
2938 }
2939 break;
2940
2941 default:
2942 output_operand_lossage ("invalid expression as operand");
2943 }
2944 }
2945
2946 /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST.
2947 We need to handle our special PIC relocations. */
2948
2949 void
2950 i386_dwarf_output_addr_const (file, x)
2951 FILE *file;
2952 rtx x;
2953 {
2954 fprintf (file, "\t%s\t", INT_ASM_OP);
2955 if (flag_pic)
2956 output_pic_addr_const (file, x, '\0');
2957 else
2958 output_addr_const (file, x);
2959 fputc ('\n', file);
2960 }
2961
2962 /* In the name of slightly smaller debug output, and to cater to
2963 general assembler losage, recognize PIC+GOTOFF and turn it back
2964 into a direct symbol reference. */
2965
2966 rtx
2967 i386_simplify_dwarf_addr (orig_x)
2968 rtx orig_x;
2969 {
2970 rtx x = orig_x;
2971
2972 if (GET_CODE (x) != PLUS
2973 || GET_CODE (XEXP (x, 0)) != REG
2974 || GET_CODE (XEXP (x, 1)) != CONST)
2975 return orig_x;
2976
2977 x = XEXP (XEXP (x, 1), 0);
2978 if (GET_CODE (x) == UNSPEC
2979 && XINT (x, 1) == 7)
2980 return XVECEXP (x, 0, 0);
2981
2982 if (GET_CODE (x) == PLUS
2983 && GET_CODE (XEXP (x, 0)) == UNSPEC
2984 && GET_CODE (XEXP (x, 1)) == CONST_INT
2985 && XINT (XEXP (x, 0), 1) == 7)
2986 return gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
2987
2988 return orig_x;
2989 }
2990 \f
2991 static void
2992 put_condition_code (code, mode, reverse, fp, file)
2993 enum rtx_code code;
2994 enum machine_mode mode;
2995 int reverse, fp;
2996 FILE *file;
2997 {
2998 const char *suffix;
2999
3000 if (reverse)
3001 code = reverse_condition (code);
3002
3003 switch (code)
3004 {
3005 case EQ:
3006 suffix = "e";
3007 break;
3008 case NE:
3009 suffix = "ne";
3010 break;
3011 case GT:
3012 if (mode == CCNOmode)
3013 abort ();
3014 suffix = "g";
3015 break;
3016 case GTU:
3017 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
3018 Those same assemblers have the same but opposite losage on cmov. */
3019 suffix = fp ? "nbe" : "a";
3020 break;
3021 case LT:
3022 if (mode == CCNOmode)
3023 suffix = "s";
3024 else
3025 suffix = "l";
3026 break;
3027 case LTU:
3028 suffix = "b";
3029 break;
3030 case GE:
3031 if (mode == CCNOmode)
3032 suffix = "ns";
3033 else
3034 suffix = "ge";
3035 break;
3036 case GEU:
3037 /* ??? As above. */
3038 suffix = fp ? "nb" : "ae";
3039 break;
3040 case LE:
3041 if (mode == CCNOmode)
3042 abort ();
3043 suffix = "le";
3044 break;
3045 case LEU:
3046 suffix = "be";
3047 break;
3048 case UNORDERED:
3049 suffix = "p";
3050 break;
3051 case ORDERED:
3052 suffix = "np";
3053 break;
3054 default:
3055 abort ();
3056 }
3057 fputs (suffix, file);
3058 }
3059
3060 void
3061 print_reg (x, code, file)
3062 rtx x;
3063 int code;
3064 FILE *file;
3065 {
3066 if (REGNO (x) == ARG_POINTER_REGNUM
3067 || REGNO (x) == FRAME_POINTER_REGNUM
3068 || REGNO (x) == FLAGS_REG
3069 || REGNO (x) == FPSR_REG)
3070 abort ();
3071
3072 if (ASSEMBLER_DIALECT == 0 || USER_LABEL_PREFIX[0] == 0)
3073 putc ('%', file);
3074
3075 if (code == 'w')
3076 code = 2;
3077 else if (code == 'b')
3078 code = 1;
3079 else if (code == 'k')
3080 code = 4;
3081 else if (code == 'y')
3082 code = 3;
3083 else if (code == 'h')
3084 code = 0;
3085 else
3086 code = GET_MODE_SIZE (GET_MODE (x));
3087
3088 switch (code)
3089 {
3090 case 3:
3091 if (STACK_TOP_P (x))
3092 {
3093 fputs ("st(0)", file);
3094 break;
3095 }
3096 /* FALLTHRU */
3097 case 4:
3098 case 8:
3099 case 12:
3100 if (! FP_REG_P (x))
3101 putc ('e', file);
3102 /* FALLTHRU */
3103 case 2:
3104 fputs (hi_reg_name[REGNO (x)], file);
3105 break;
3106 case 1:
3107 fputs (qi_reg_name[REGNO (x)], file);
3108 break;
3109 case 0:
3110 fputs (qi_high_reg_name[REGNO (x)], file);
3111 break;
3112 default:
3113 abort ();
3114 }
3115 }
3116
3117 /* Meaning of CODE:
3118 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
3119 C -- print opcode suffix for set/cmov insn.
3120 c -- like C, but print reversed condition
3121 R -- print the prefix for register names.
3122 z -- print the opcode suffix for the size of the current operand.
3123 * -- print a star (in certain assembler syntax)
3124 w -- print the operand as if it's a "word" (HImode) even if it isn't.
3125 s -- print a shift double count, followed by the assemblers argument
3126 delimiter.
3127 b -- print the QImode name of the register for the indicated operand.
3128 %b0 would print %al if operands[0] is reg 0.
3129 w -- likewise, print the HImode name of the register.
3130 k -- likewise, print the SImode name of the register.
3131 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
3132 y -- print "st(0)" instead of "st" as a register. */
3133
3134 void
3135 print_operand (file, x, code)
3136 FILE *file;
3137 rtx x;
3138 int code;
3139 {
3140 if (code)
3141 {
3142 switch (code)
3143 {
3144 case '*':
3145 if (ASSEMBLER_DIALECT == 0)
3146 putc ('*', file);
3147 return;
3148
3149 case 'L':
3150 if (ASSEMBLER_DIALECT == 0)
3151 putc ('l', file);
3152 return;
3153
3154 case 'W':
3155 if (ASSEMBLER_DIALECT == 0)
3156 putc ('w', file);
3157 return;
3158
3159 case 'B':
3160 if (ASSEMBLER_DIALECT == 0)
3161 putc ('b', file);
3162 return;
3163
3164 case 'Q':
3165 if (ASSEMBLER_DIALECT == 0)
3166 putc ('l', file);
3167 return;
3168
3169 case 'S':
3170 if (ASSEMBLER_DIALECT == 0)
3171 putc ('s', file);
3172 return;
3173
3174 case 'T':
3175 if (ASSEMBLER_DIALECT == 0)
3176 putc ('t', file);
3177 return;
3178
3179 case 'z':
3180 /* 387 opcodes don't get size suffixes if the operands are
3181 registers. */
3182
3183 if (STACK_REG_P (x))
3184 return;
3185
3186 /* Intel syntax has no truck with instruction suffixes. */
3187 if (ASSEMBLER_DIALECT != 0)
3188 return;
3189
3190 /* this is the size of op from size of operand */
3191 switch (GET_MODE_SIZE (GET_MODE (x)))
3192 {
3193 case 2:
3194 #ifdef HAVE_GAS_FILDS_FISTS
3195 putc ('s', file);
3196 #endif
3197 return;
3198
3199 case 4:
3200 if (GET_MODE (x) == SFmode)
3201 {
3202 putc ('s', file);
3203 return;
3204 }
3205 else
3206 putc ('l', file);
3207 return;
3208
3209 case 12:
3210 putc ('t', file);
3211 return;
3212
3213 case 8:
3214 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
3215 {
3216 #ifdef GAS_MNEMONICS
3217 putc ('q', file);
3218 #else
3219 putc ('l', file);
3220 putc ('l', file);
3221 #endif
3222 }
3223 else
3224 putc ('l', file);
3225 return;
3226
3227 default:
3228 abort ();
3229 }
3230
3231 case 'b':
3232 case 'w':
3233 case 'k':
3234 case 'h':
3235 case 'y':
3236 case 'X':
3237 case 'P':
3238 break;
3239
3240 case 's':
3241 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
3242 {
3243 PRINT_OPERAND (file, x, 0);
3244 putc (',', file);
3245 }
3246 return;
3247
3248 case 'C':
3249 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
3250 return;
3251 case 'F':
3252 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
3253 return;
3254
3255 /* Like above, but reverse condition */
3256 case 'c':
3257 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
3258 return;
3259 case 'f':
3260 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
3261 return;
3262
3263 default:
3264 {
3265 char str[50];
3266 sprintf (str, "invalid operand code `%c'", code);
3267 output_operand_lossage (str);
3268 }
3269 }
3270 }
3271
3272 if (GET_CODE (x) == REG)
3273 {
3274 PRINT_REG (x, code, file);
3275 }
3276
3277 else if (GET_CODE (x) == MEM)
3278 {
3279 /* No `byte ptr' prefix for call instructions. */
3280 if (ASSEMBLER_DIALECT != 0 && code != 'X' && code != 'P')
3281 {
3282 const char * size;
3283 switch (GET_MODE_SIZE (GET_MODE (x)))
3284 {
3285 case 1: size = "BYTE"; break;
3286 case 2: size = "WORD"; break;
3287 case 4: size = "DWORD"; break;
3288 case 8: size = "QWORD"; break;
3289 case 12: size = "XWORD"; break;
3290 default:
3291 abort ();
3292 }
3293 fputs (size, file);
3294 fputs (" PTR ", file);
3295 }
3296
3297 x = XEXP (x, 0);
3298 if (flag_pic && CONSTANT_ADDRESS_P (x))
3299 output_pic_addr_const (file, x, code);
3300 else
3301 output_address (x);
3302 }
3303
3304 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
3305 {
3306 REAL_VALUE_TYPE r;
3307 long l;
3308
3309 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3310 REAL_VALUE_TO_TARGET_SINGLE (r, l);
3311
3312 if (ASSEMBLER_DIALECT == 0)
3313 putc ('$', file);
3314 fprintf (file, "0x%lx", l);
3315 }
3316
3317 /* These float cases don't actually occur as immediate operands. */
3318 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
3319 {
3320 REAL_VALUE_TYPE r;
3321 char dstr[30];
3322
3323 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3324 REAL_VALUE_TO_DECIMAL (r, "%.22e", dstr);
3325 fprintf (file, "%s", dstr);
3326 }
3327
3328 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
3329 {
3330 REAL_VALUE_TYPE r;
3331 char dstr[30];
3332
3333 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3334 REAL_VALUE_TO_DECIMAL (r, "%.22e", dstr);
3335 fprintf (file, "%s", dstr);
3336 }
3337 else
3338 {
3339 if (code != 'P')
3340 {
3341 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
3342 {
3343 if (ASSEMBLER_DIALECT == 0)
3344 putc ('$', file);
3345 }
3346 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
3347 || GET_CODE (x) == LABEL_REF)
3348 {
3349 if (ASSEMBLER_DIALECT == 0)
3350 putc ('$', file);
3351 else
3352 fputs ("OFFSET FLAT:", file);
3353 }
3354 }
3355 if (GET_CODE (x) == CONST_INT)
3356 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
3357 else if (flag_pic)
3358 output_pic_addr_const (file, x, code);
3359 else
3360 output_addr_const (file, x);
3361 }
3362 }
3363 \f
3364 /* Print a memory operand whose address is ADDR. */
3365
3366 void
3367 print_operand_address (file, addr)
3368 FILE *file;
3369 register rtx addr;
3370 {
3371 struct ix86_address parts;
3372 rtx base, index, disp;
3373 int scale;
3374
3375 if (! ix86_decompose_address (addr, &parts))
3376 abort ();
3377
3378 base = parts.base;
3379 index = parts.index;
3380 disp = parts.disp;
3381 scale = parts.scale;
3382
3383 if (!base && !index)
3384 {
3385 /* Displacement only requires special attention. */
3386
3387 if (GET_CODE (disp) == CONST_INT)
3388 {
3389 if (ASSEMBLER_DIALECT != 0)
3390 fputs ("ds:", file);
3391 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
3392 }
3393 else if (flag_pic)
3394 output_pic_addr_const (file, addr, 0);
3395 else
3396 output_addr_const (file, addr);
3397 }
3398 else
3399 {
3400 if (ASSEMBLER_DIALECT == 0)
3401 {
3402 if (disp)
3403 {
3404 if (flag_pic)
3405 output_pic_addr_const (file, disp, 0);
3406 else if (GET_CODE (disp) == LABEL_REF)
3407 output_asm_label (disp);
3408 else
3409 output_addr_const (file, disp);
3410 }
3411
3412 putc ('(', file);
3413 if (base)
3414 PRINT_REG (base, 0, file);
3415 if (index)
3416 {
3417 putc (',', file);
3418 PRINT_REG (index, 0, file);
3419 if (scale != 1)
3420 fprintf (file, ",%d", scale);
3421 }
3422 putc (')', file);
3423 }
3424 else
3425 {
3426 rtx offset = NULL_RTX;
3427
3428 if (disp)
3429 {
3430 /* Pull out the offset of a symbol; print any symbol itself. */
3431 if (GET_CODE (disp) == CONST
3432 && GET_CODE (XEXP (disp, 0)) == PLUS
3433 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
3434 {
3435 offset = XEXP (XEXP (disp, 0), 1);
3436 disp = gen_rtx_CONST (VOIDmode,
3437 XEXP (XEXP (disp, 0), 0));
3438 }
3439
3440 if (flag_pic)
3441 output_pic_addr_const (file, disp, 0);
3442 else if (GET_CODE (disp) == LABEL_REF)
3443 output_asm_label (disp);
3444 else if (GET_CODE (disp) == CONST_INT)
3445 offset = disp;
3446 else
3447 output_addr_const (file, disp);
3448 }
3449
3450 putc ('[', file);
3451 if (base)
3452 {
3453 PRINT_REG (base, 0, file);
3454 if (offset)
3455 {
3456 if (INTVAL (offset) >= 0)
3457 putc ('+', file);
3458 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
3459 }
3460 }
3461 else if (offset)
3462 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
3463 else
3464 putc ('0', file);
3465
3466 if (index)
3467 {
3468 putc ('+', file);
3469 PRINT_REG (index, 0, file);
3470 if (scale != 1)
3471 fprintf (file, "*%d", scale);
3472 }
3473 putc (']', file);
3474 }
3475 }
3476 }
3477 \f
3478 /* Split one or more DImode RTL references into pairs of SImode
3479 references. The RTL can be REG, offsettable MEM, integer constant, or
3480 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3481 split and "num" is its length. lo_half and hi_half are output arrays
3482 that parallel "operands". */
3483
3484 void
3485 split_di (operands, num, lo_half, hi_half)
3486 rtx operands[];
3487 int num;
3488 rtx lo_half[], hi_half[];
3489 {
3490 while (num--)
3491 {
3492 rtx op = operands[num];
3493 if (CONSTANT_P (op))
3494 split_double (op, &lo_half[num], &hi_half[num]);
3495 else if (! reload_completed)
3496 {
3497 lo_half[num] = gen_lowpart (SImode, op);
3498 hi_half[num] = gen_highpart (SImode, op);
3499 }
3500 else if (GET_CODE (op) == REG)
3501 {
3502 lo_half[num] = gen_rtx_REG (SImode, REGNO (op));
3503 hi_half[num] = gen_rtx_REG (SImode, REGNO (op) + 1);
3504 }
3505 else if (offsettable_memref_p (op))
3506 {
3507 rtx lo_addr = XEXP (op, 0);
3508 rtx hi_addr = XEXP (adj_offsettable_operand (op, 4), 0);
3509 lo_half[num] = change_address (op, SImode, lo_addr);
3510 hi_half[num] = change_address (op, SImode, hi_addr);
3511 }
3512 else
3513 abort ();
3514 }
3515 }
3516 \f
3517 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
3518 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
3519 is the expression of the binary operation. The output may either be
3520 emitted here, or returned to the caller, like all output_* functions.
3521
3522 There is no guarantee that the operands are the same mode, as they
3523 might be within FLOAT or FLOAT_EXTEND expressions. */
3524
3525 #ifndef SYSV386_COMPAT
3526 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
3527 wants to fix the assemblers because that causes incompatibility
3528 with gcc. No-one wants to fix gcc because that causes
3529 incompatibility with assemblers... You can use the option of
3530 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
3531 #define SYSV386_COMPAT 1
3532 #endif
3533
3534 const char *
3535 output_387_binary_op (insn, operands)
3536 rtx insn;
3537 rtx *operands;
3538 {
3539 static char buf[30];
3540 const char *p;
3541
3542 #ifdef ENABLE_CHECKING
3543 /* Even if we do not want to check the inputs, this documents input
3544 constraints. Which helps in understanding the following code. */
3545 if (STACK_REG_P (operands[0])
3546 && ((REG_P (operands[1])
3547 && REGNO (operands[0]) == REGNO (operands[1])
3548 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
3549 || (REG_P (operands[2])
3550 && REGNO (operands[0]) == REGNO (operands[2])
3551 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
3552 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
3553 ; /* ok */
3554 else
3555 abort ();
3556 #endif
3557
3558 switch (GET_CODE (operands[3]))
3559 {
3560 case PLUS:
3561 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
3562 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
3563 p = "fiadd";
3564 else
3565 p = "fadd";
3566 break;
3567
3568 case MINUS:
3569 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
3570 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
3571 p = "fisub";
3572 else
3573 p = "fsub";
3574 break;
3575
3576 case MULT:
3577 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
3578 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
3579 p = "fimul";
3580 else
3581 p = "fmul";
3582 break;
3583
3584 case DIV:
3585 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
3586 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
3587 p = "fidiv";
3588 else
3589 p = "fdiv";
3590 break;
3591
3592 default:
3593 abort ();
3594 }
3595
3596 strcpy (buf, p);
3597
3598 switch (GET_CODE (operands[3]))
3599 {
3600 case MULT:
3601 case PLUS:
3602 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
3603 {
3604 rtx temp = operands[2];
3605 operands[2] = operands[1];
3606 operands[1] = temp;
3607 }
3608
3609 /* know operands[0] == operands[1]. */
3610
3611 if (GET_CODE (operands[2]) == MEM)
3612 {
3613 p = "%z2\t%2";
3614 break;
3615 }
3616
3617 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
3618 {
3619 if (STACK_TOP_P (operands[0]))
3620 /* How is it that we are storing to a dead operand[2]?
3621 Well, presumably operands[1] is dead too. We can't
3622 store the result to st(0) as st(0) gets popped on this
3623 instruction. Instead store to operands[2] (which I
3624 think has to be st(1)). st(1) will be popped later.
3625 gcc <= 2.8.1 didn't have this check and generated
3626 assembly code that the Unixware assembler rejected. */
3627 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
3628 else
3629 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
3630 break;
3631 }
3632
3633 if (STACK_TOP_P (operands[0]))
3634 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
3635 else
3636 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
3637 break;
3638
3639 case MINUS:
3640 case DIV:
3641 if (GET_CODE (operands[1]) == MEM)
3642 {
3643 p = "r%z1\t%1";
3644 break;
3645 }
3646
3647 if (GET_CODE (operands[2]) == MEM)
3648 {
3649 p = "%z2\t%2";
3650 break;
3651 }
3652
3653 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
3654 {
3655 #if SYSV386_COMPAT
3656 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
3657 derived assemblers, confusingly reverse the direction of
3658 the operation for fsub{r} and fdiv{r} when the
3659 destination register is not st(0). The Intel assembler
3660 doesn't have this brain damage. Read !SYSV386_COMPAT to
3661 figure out what the hardware really does. */
3662 if (STACK_TOP_P (operands[0]))
3663 p = "{p\t%0, %2|rp\t%2, %0}";
3664 else
3665 p = "{rp\t%2, %0|p\t%0, %2}";
3666 #else
3667 if (STACK_TOP_P (operands[0]))
3668 /* As above for fmul/fadd, we can't store to st(0). */
3669 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
3670 else
3671 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
3672 #endif
3673 break;
3674 }
3675
3676 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
3677 {
3678 #if SYSV386_COMPAT
3679 if (STACK_TOP_P (operands[0]))
3680 p = "{rp\t%0, %1|p\t%1, %0}";
3681 else
3682 p = "{p\t%1, %0|rp\t%0, %1}";
3683 #else
3684 if (STACK_TOP_P (operands[0]))
3685 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
3686 else
3687 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
3688 #endif
3689 break;
3690 }
3691
3692 if (STACK_TOP_P (operands[0]))
3693 {
3694 if (STACK_TOP_P (operands[1]))
3695 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
3696 else
3697 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
3698 break;
3699 }
3700 else if (STACK_TOP_P (operands[1]))
3701 {
3702 #if SYSV386_COMPAT
3703 p = "{\t%1, %0|r\t%0, %1}";
3704 #else
3705 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
3706 #endif
3707 }
3708 else
3709 {
3710 #if SYSV386_COMPAT
3711 p = "{r\t%2, %0|\t%0, %2}";
3712 #else
3713 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
3714 #endif
3715 }
3716 break;
3717
3718 default:
3719 abort ();
3720 }
3721
3722 strcat (buf, p);
3723 return buf;
3724 }
3725
3726 /* Output code for INSN to convert a float to a signed int. OPERANDS
3727 are the insn operands. The output may be [HSD]Imode and the input
3728 operand may be [SDX]Fmode. */
3729
3730 const char *
3731 output_fix_trunc (insn, operands)
3732 rtx insn;
3733 rtx *operands;
3734 {
3735 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
3736 int dimode_p = GET_MODE (operands[0]) == DImode;
3737 rtx xops[4];
3738
3739 /* Jump through a hoop or two for DImode, since the hardware has no
3740 non-popping instruction. We used to do this a different way, but
3741 that was somewhat fragile and broke with post-reload splitters. */
3742 if (dimode_p && !stack_top_dies)
3743 output_asm_insn ("fld\t%y1", operands);
3744
3745 if (! STACK_TOP_P (operands[1]))
3746 abort ();
3747
3748 xops[0] = GEN_INT (12);
3749 xops[1] = adj_offsettable_operand (operands[2], 1);
3750 xops[1] = change_address (xops[1], QImode, NULL_RTX);
3751
3752 xops[2] = operands[0];
3753 if (GET_CODE (operands[0]) != MEM)
3754 xops[2] = operands[3];
3755
3756 output_asm_insn ("fnstcw\t%2", operands);
3757 output_asm_insn ("mov{l}\t{%2, %4|%4, %2}", operands);
3758 output_asm_insn ("mov{b}\t{%0, %1|%1, %0}", xops);
3759 output_asm_insn ("fldcw\t%2", operands);
3760 output_asm_insn ("mov{l}\t{%4, %2|%2, %4}", operands);
3761
3762 if (stack_top_dies || dimode_p)
3763 output_asm_insn ("fistp%z2\t%2", xops);
3764 else
3765 output_asm_insn ("fist%z2\t%2", xops);
3766
3767 output_asm_insn ("fldcw\t%2", operands);
3768
3769 if (GET_CODE (operands[0]) != MEM)
3770 {
3771 if (dimode_p)
3772 {
3773 split_di (operands+0, 1, xops+0, xops+1);
3774 split_di (operands+3, 1, xops+2, xops+3);
3775 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3776 output_asm_insn ("mov{l}\t{%3, %1|%1, %3}", xops);
3777 }
3778 else if (GET_MODE (operands[0]) == SImode)
3779 output_asm_insn ("mov{l}\t{%3, %0|%0, %3}", operands);
3780 else
3781 output_asm_insn ("mov{w}\t{%3, %0|%0, %3}", operands);
3782 }
3783
3784 return "";
3785 }
3786
3787 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
3788 should be used and 2 when fnstsw should be used. UNORDERED_P is true
3789 when fucom should be used. */
3790
3791 const char *
3792 output_fp_compare (insn, operands, eflags_p, unordered_p)
3793 rtx insn;
3794 rtx *operands;
3795 int eflags_p, unordered_p;
3796 {
3797 int stack_top_dies;
3798 rtx cmp_op0 = operands[0];
3799 rtx cmp_op1 = operands[1];
3800
3801 if (eflags_p == 2)
3802 {
3803 cmp_op0 = cmp_op1;
3804 cmp_op1 = operands[2];
3805 }
3806
3807 if (! STACK_TOP_P (cmp_op0))
3808 abort ();
3809
3810 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
3811
3812 if (STACK_REG_P (cmp_op1)
3813 && stack_top_dies
3814 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
3815 && REGNO (cmp_op1) != FIRST_STACK_REG)
3816 {
3817 /* If both the top of the 387 stack dies, and the other operand
3818 is also a stack register that dies, then this must be a
3819 `fcompp' float compare */
3820
3821 if (eflags_p == 1)
3822 {
3823 /* There is no double popping fcomi variant. Fortunately,
3824 eflags is immune from the fstp's cc clobbering. */
3825 if (unordered_p)
3826 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
3827 else
3828 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
3829 return "fstp\t%y0";
3830 }
3831 else
3832 {
3833 if (eflags_p == 2)
3834 {
3835 if (unordered_p)
3836 return "fucompp\n\tfnstsw\t%0";
3837 else
3838 return "fcompp\n\tfnstsw\t%0";
3839 }
3840 else
3841 {
3842 if (unordered_p)
3843 return "fucompp";
3844 else
3845 return "fcompp";
3846 }
3847 }
3848 }
3849 else
3850 {
3851 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
3852
3853 static const char * const alt[24] =
3854 {
3855 "fcom%z1\t%y1",
3856 "fcomp%z1\t%y1",
3857 "fucom%z1\t%y1",
3858 "fucomp%z1\t%y1",
3859
3860 "ficom%z1\t%y1",
3861 "ficomp%z1\t%y1",
3862 NULL,
3863 NULL,
3864
3865 "fcomi\t{%y1, %0|%0, %y1}",
3866 "fcomip\t{%y1, %0|%0, %y1}",
3867 "fucomi\t{%y1, %0|%0, %y1}",
3868 "fucomip\t{%y1, %0|%0, %y1}",
3869
3870 NULL,
3871 NULL,
3872 NULL,
3873 NULL,
3874
3875 "fcom%z2\t%y2\n\tfnstsw\t%0",
3876 "fcomp%z2\t%y2\n\tfnstsw\t%0",
3877 "fucom%z2\t%y2\n\tfnstsw\t%0",
3878 "fucomp%z2\t%y2\n\tfnstsw\t%0",
3879
3880 "ficom%z2\t%y2\n\tfnstsw\t%0",
3881 "ficomp%z2\t%y2\n\tfnstsw\t%0",
3882 NULL,
3883 NULL
3884 };
3885
3886 int mask;
3887 const char *ret;
3888
3889 mask = eflags_p << 3;
3890 mask |= (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT) << 2;
3891 mask |= unordered_p << 1;
3892 mask |= stack_top_dies;
3893
3894 if (mask >= 24)
3895 abort ();
3896 ret = alt[mask];
3897 if (ret == NULL)
3898 abort ();
3899
3900 return ret;
3901 }
3902 }
3903
3904 /* Output assembler code to FILE to initialize basic-block profiling.
3905
3906 If profile_block_flag == 2
3907
3908 Output code to call the subroutine `__bb_init_trace_func'
3909 and pass two parameters to it. The first parameter is
3910 the address of a block allocated in the object module.
3911 The second parameter is the number of the first basic block
3912 of the function.
3913
3914 The name of the block is a local symbol made with this statement:
3915
3916 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
3917
3918 Of course, since you are writing the definition of
3919 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
3920 can take a short cut in the definition of this macro and use the
3921 name that you know will result.
3922
3923 The number of the first basic block of the function is
3924 passed to the macro in BLOCK_OR_LABEL.
3925
3926 If described in a virtual assembler language the code to be
3927 output looks like:
3928
3929 parameter1 <- LPBX0
3930 parameter2 <- BLOCK_OR_LABEL
3931 call __bb_init_trace_func
3932
3933 else if profile_block_flag != 0
3934
3935 Output code to call the subroutine `__bb_init_func'
3936 and pass one single parameter to it, which is the same
3937 as the first parameter to `__bb_init_trace_func'.
3938
3939 The first word of this parameter is a flag which will be nonzero if
3940 the object module has already been initialized. So test this word
3941 first, and do not call `__bb_init_func' if the flag is nonzero.
3942 Note: When profile_block_flag == 2 the test need not be done
3943 but `__bb_init_trace_func' *must* be called.
3944
3945 BLOCK_OR_LABEL may be used to generate a label number as a
3946 branch destination in case `__bb_init_func' will not be called.
3947
3948 If described in a virtual assembler language the code to be
3949 output looks like:
3950
3951 cmp (LPBX0),0
3952 jne local_label
3953 parameter1 <- LPBX0
3954 call __bb_init_func
3955 local_label:
3956 */
3957
3958 void
3959 ix86_output_function_block_profiler (file, block_or_label)
3960 FILE *file;
3961 int block_or_label;
3962 {
3963 static int num_func = 0;
3964 rtx xops[8];
3965 char block_table[80], false_label[80];
3966
3967 ASM_GENERATE_INTERNAL_LABEL (block_table, "LPBX", 0);
3968
3969 xops[1] = gen_rtx_SYMBOL_REF (VOIDmode, block_table);
3970 xops[5] = stack_pointer_rtx;
3971 xops[7] = gen_rtx_REG (Pmode, 0); /* eax */
3972
3973 CONSTANT_POOL_ADDRESS_P (xops[1]) = TRUE;
3974
3975 switch (profile_block_flag)
3976 {
3977 case 2:
3978 xops[2] = GEN_INT (block_or_label);
3979 xops[3] = gen_rtx_MEM (Pmode,
3980 gen_rtx_SYMBOL_REF (VOIDmode, "__bb_init_trace_func"));
3981 xops[6] = GEN_INT (8);
3982
3983 output_asm_insn ("push{l}\t%2", xops);
3984 if (!flag_pic)
3985 output_asm_insn ("push{l}\t%1", xops);
3986 else
3987 {
3988 output_asm_insn ("lea{l}\t{%a1, %7|%7, %a1}", xops);
3989 output_asm_insn ("push{l}\t%7", xops);
3990 }
3991 output_asm_insn ("call\t%P3", xops);
3992 output_asm_insn ("add{l}\t{%6, %5|%5, %6}", xops);
3993 break;
3994
3995 default:
3996 ASM_GENERATE_INTERNAL_LABEL (false_label, "LPBZ", num_func);
3997
3998 xops[0] = const0_rtx;
3999 xops[2] = gen_rtx_MEM (Pmode,
4000 gen_rtx_SYMBOL_REF (VOIDmode, false_label));
4001 xops[3] = gen_rtx_MEM (Pmode,
4002 gen_rtx_SYMBOL_REF (VOIDmode, "__bb_init_func"));
4003 xops[4] = gen_rtx_MEM (Pmode, xops[1]);
4004 xops[6] = GEN_INT (4);
4005
4006 CONSTANT_POOL_ADDRESS_P (xops[2]) = TRUE;
4007
4008 output_asm_insn ("cmp{l}\t{%0, %4|%4, %0}", xops);
4009 output_asm_insn ("jne\t%2", xops);
4010
4011 if (!flag_pic)
4012 output_asm_insn ("push{l}\t%1", xops);
4013 else
4014 {
4015 output_asm_insn ("lea{l}\t{%a1, %7|%7, %a2}", xops);
4016 output_asm_insn ("push{l}\t%7", xops);
4017 }
4018 output_asm_insn ("call\t%P3", xops);
4019 output_asm_insn ("add{l}\t{%6, %5|%5, %6}", xops);
4020 ASM_OUTPUT_INTERNAL_LABEL (file, "LPBZ", num_func);
4021 num_func++;
4022 break;
4023 }
4024 }
4025
4026 /* Output assembler code to FILE to increment a counter associated
4027 with basic block number BLOCKNO.
4028
4029 If profile_block_flag == 2
4030
4031 Output code to initialize the global structure `__bb' and
4032 call the function `__bb_trace_func' which will increment the
4033 counter.
4034
4035 `__bb' consists of two words. In the first word the number
4036 of the basic block has to be stored. In the second word
4037 the address of a block allocated in the object module
4038 has to be stored.
4039
4040 The basic block number is given by BLOCKNO.
4041
4042 The address of the block is given by the label created with
4043
4044 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
4045
4046 by FUNCTION_BLOCK_PROFILER.
4047
4048 Of course, since you are writing the definition of
4049 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
4050 can take a short cut in the definition of this macro and use the
4051 name that you know will result.
4052
4053 If described in a virtual assembler language the code to be
4054 output looks like:
4055
4056 move BLOCKNO -> (__bb)
4057 move LPBX0 -> (__bb+4)
4058 call __bb_trace_func
4059
4060 Note that function `__bb_trace_func' must not change the
4061 machine state, especially the flag register. To grant
4062 this, you must output code to save and restore registers
4063 either in this macro or in the macros MACHINE_STATE_SAVE
4064 and MACHINE_STATE_RESTORE. The last two macros will be
4065 used in the function `__bb_trace_func', so you must make
4066 sure that the function prologue does not change any
4067 register prior to saving it with MACHINE_STATE_SAVE.
4068
4069 else if profile_block_flag != 0
4070
4071 Output code to increment the counter directly.
4072 Basic blocks are numbered separately from zero within each
4073 compiled object module. The count associated with block number
4074 BLOCKNO is at index BLOCKNO in an array of words; the name of
4075 this array is a local symbol made with this statement:
4076
4077 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
4078
4079 Of course, since you are writing the definition of
4080 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
4081 can take a short cut in the definition of this macro and use the
4082 name that you know will result.
4083
4084 If described in a virtual assembler language the code to be
4085 output looks like:
4086
4087 inc (LPBX2+4*BLOCKNO)
4088 */
4089
4090 void
4091 ix86_output_block_profiler (file, blockno)
4092 FILE *file ATTRIBUTE_UNUSED;
4093 int blockno;
4094 {
4095 rtx xops[8], cnt_rtx;
4096 char counts[80];
4097 char *block_table = counts;
4098
4099 switch (profile_block_flag)
4100 {
4101 case 2:
4102 ASM_GENERATE_INTERNAL_LABEL (block_table, "LPBX", 0);
4103
4104 xops[1] = gen_rtx_SYMBOL_REF (VOIDmode, block_table);
4105 xops[2] = GEN_INT (blockno);
4106 xops[3] = gen_rtx_MEM (Pmode,
4107 gen_rtx_SYMBOL_REF (VOIDmode, "__bb_trace_func"));
4108 xops[4] = gen_rtx_SYMBOL_REF (VOIDmode, "__bb");
4109 xops[5] = plus_constant (xops[4], 4);
4110 xops[0] = gen_rtx_MEM (SImode, xops[4]);
4111 xops[6] = gen_rtx_MEM (SImode, xops[5]);
4112
4113 CONSTANT_POOL_ADDRESS_P (xops[1]) = TRUE;
4114
4115 output_asm_insn ("pushf", xops);
4116 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4117 if (flag_pic)
4118 {
4119 xops[7] = gen_rtx_REG (Pmode, 0); /* eax */
4120 output_asm_insn ("push{l}\t%7", xops);
4121 output_asm_insn ("lea{l}\t{%a1, %7|%7, %a1}", xops);
4122 output_asm_insn ("mov{l}\t{%7, %6|%6, %7}", xops);
4123 output_asm_insn ("pop{l}\t%7", xops);
4124 }
4125 else
4126 output_asm_insn ("mov{l}\t{%1, %6|%6, %1}", xops);
4127 output_asm_insn ("call\t%P3", xops);
4128 output_asm_insn ("popf", xops);
4129
4130 break;
4131
4132 default:
4133 ASM_GENERATE_INTERNAL_LABEL (counts, "LPBX", 2);
4134 cnt_rtx = gen_rtx_SYMBOL_REF (VOIDmode, counts);
4135 SYMBOL_REF_FLAG (cnt_rtx) = TRUE;
4136
4137 if (blockno)
4138 cnt_rtx = plus_constant (cnt_rtx, blockno*4);
4139
4140 if (flag_pic)
4141 cnt_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, cnt_rtx);
4142
4143 xops[0] = gen_rtx_MEM (SImode, cnt_rtx);
4144 output_asm_insn ("inc{l}\t%0", xops);
4145
4146 break;
4147 }
4148 }
4149 \f
4150 void
4151 ix86_expand_move (mode, operands)
4152 enum machine_mode mode;
4153 rtx operands[];
4154 {
4155 int strict = (reload_in_progress || reload_completed);
4156 rtx insn;
4157
4158 if (flag_pic && mode == Pmode && symbolic_operand (operands[1], Pmode))
4159 {
4160 /* Emit insns to move operands[1] into operands[0]. */
4161
4162 if (GET_CODE (operands[0]) == MEM)
4163 operands[1] = force_reg (Pmode, operands[1]);
4164 else
4165 {
4166 rtx temp = operands[0];
4167 if (GET_CODE (temp) != REG)
4168 temp = gen_reg_rtx (Pmode);
4169 temp = legitimize_pic_address (operands[1], temp);
4170 if (temp == operands[0])
4171 return;
4172 operands[1] = temp;
4173 }
4174 }
4175 else
4176 {
4177 if (GET_CODE (operands[0]) == MEM
4178 && (GET_MODE (operands[0]) == QImode
4179 || !push_operand (operands[0], mode))
4180 && GET_CODE (operands[1]) == MEM)
4181 operands[1] = force_reg (mode, operands[1]);
4182
4183 if (push_operand (operands[0], mode)
4184 && ! general_no_elim_operand (operands[1], mode))
4185 operands[1] = copy_to_mode_reg (mode, operands[1]);
4186
4187 if (FLOAT_MODE_P (mode))
4188 {
4189 /* If we are loading a floating point constant to a register,
4190 force the value to memory now, since we'll get better code
4191 out the back end. */
4192
4193 if (strict)
4194 ;
4195 else if (GET_CODE (operands[1]) == CONST_DOUBLE
4196 && register_operand (operands[0], mode))
4197 operands[1] = validize_mem (force_const_mem (mode, operands[1]));
4198 }
4199 }
4200
4201 insn = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
4202
4203 emit_insn (insn);
4204 }
4205
4206 /* Attempt to expand a binary operator. Make the expansion closer to the
4207 actual machine, then just general_operand, which will allow 3 separate
4208 memory references (one output, two input) in a single insn. */
4209
4210 void
4211 ix86_expand_binary_operator (code, mode, operands)
4212 enum rtx_code code;
4213 enum machine_mode mode;
4214 rtx operands[];
4215 {
4216 int matching_memory;
4217 rtx src1, src2, dst, op, clob;
4218
4219 dst = operands[0];
4220 src1 = operands[1];
4221 src2 = operands[2];
4222
4223 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
4224 if (GET_RTX_CLASS (code) == 'c'
4225 && (rtx_equal_p (dst, src2)
4226 || immediate_operand (src1, mode)))
4227 {
4228 rtx temp = src1;
4229 src1 = src2;
4230 src2 = temp;
4231 }
4232
4233 /* If the destination is memory, and we do not have matching source
4234 operands, do things in registers. */
4235 matching_memory = 0;
4236 if (GET_CODE (dst) == MEM)
4237 {
4238 if (rtx_equal_p (dst, src1))
4239 matching_memory = 1;
4240 else if (GET_RTX_CLASS (code) == 'c'
4241 && rtx_equal_p (dst, src2))
4242 matching_memory = 2;
4243 else
4244 dst = gen_reg_rtx (mode);
4245 }
4246
4247 /* Both source operands cannot be in memory. */
4248 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
4249 {
4250 if (matching_memory != 2)
4251 src2 = force_reg (mode, src2);
4252 else
4253 src1 = force_reg (mode, src1);
4254 }
4255
4256 /* If the operation is not commutable, source 1 cannot be a constant
4257 or non-matching memory. */
4258 if ((CONSTANT_P (src1)
4259 || (!matching_memory && GET_CODE (src1) == MEM))
4260 && GET_RTX_CLASS (code) != 'c')
4261 src1 = force_reg (mode, src1);
4262
4263 /* If optimizing, copy to regs to improve CSE */
4264 if (optimize && ! no_new_pseudos)
4265 {
4266 if (GET_CODE (dst) == MEM)
4267 dst = gen_reg_rtx (mode);
4268 if (GET_CODE (src1) == MEM)
4269 src1 = force_reg (mode, src1);
4270 if (GET_CODE (src2) == MEM)
4271 src2 = force_reg (mode, src2);
4272 }
4273
4274 /* Emit the instruction. */
4275
4276 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
4277 if (reload_in_progress)
4278 {
4279 /* Reload doesn't know about the flags register, and doesn't know that
4280 it doesn't want to clobber it. We can only do this with PLUS. */
4281 if (code != PLUS)
4282 abort ();
4283 emit_insn (op);
4284 }
4285 else
4286 {
4287 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
4288 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
4289 }
4290
4291 /* Fix up the destination if needed. */
4292 if (dst != operands[0])
4293 emit_move_insn (operands[0], dst);
4294 }
4295
4296 /* Return TRUE or FALSE depending on whether the binary operator meets the
4297 appropriate constraints. */
4298
4299 int
4300 ix86_binary_operator_ok (code, mode, operands)
4301 enum rtx_code code;
4302 enum machine_mode mode ATTRIBUTE_UNUSED;
4303 rtx operands[3];
4304 {
4305 /* Both source operands cannot be in memory. */
4306 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
4307 return 0;
4308 /* If the operation is not commutable, source 1 cannot be a constant. */
4309 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != 'c')
4310 return 0;
4311 /* If the destination is memory, we must have a matching source operand. */
4312 if (GET_CODE (operands[0]) == MEM
4313 && ! (rtx_equal_p (operands[0], operands[1])
4314 || (GET_RTX_CLASS (code) == 'c'
4315 && rtx_equal_p (operands[0], operands[2]))))
4316 return 0;
4317 /* If the operation is not commutable and the source 1 is memory, we must
4318 have a matching destionation. */
4319 if (GET_CODE (operands[1]) == MEM
4320 && GET_RTX_CLASS (code) != 'c'
4321 && ! rtx_equal_p (operands[0], operands[1]))
4322 return 0;
4323 return 1;
4324 }
4325
4326 /* Attempt to expand a unary operator. Make the expansion closer to the
4327 actual machine, then just general_operand, which will allow 2 separate
4328 memory references (one output, one input) in a single insn. */
4329
4330 void
4331 ix86_expand_unary_operator (code, mode, operands)
4332 enum rtx_code code;
4333 enum machine_mode mode;
4334 rtx operands[];
4335 {
4336 int matching_memory;
4337 rtx src, dst, op, clob;
4338
4339 dst = operands[0];
4340 src = operands[1];
4341
4342 /* If the destination is memory, and we do not have matching source
4343 operands, do things in registers. */
4344 matching_memory = 0;
4345 if (GET_CODE (dst) == MEM)
4346 {
4347 if (rtx_equal_p (dst, src))
4348 matching_memory = 1;
4349 else
4350 dst = gen_reg_rtx (mode);
4351 }
4352
4353 /* When source operand is memory, destination must match. */
4354 if (!matching_memory && GET_CODE (src) == MEM)
4355 src = force_reg (mode, src);
4356
4357 /* If optimizing, copy to regs to improve CSE */
4358 if (optimize && ! no_new_pseudos)
4359 {
4360 if (GET_CODE (dst) == MEM)
4361 dst = gen_reg_rtx (mode);
4362 if (GET_CODE (src) == MEM)
4363 src = force_reg (mode, src);
4364 }
4365
4366 /* Emit the instruction. */
4367
4368 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
4369 if (reload_in_progress || code == NOT)
4370 {
4371 /* Reload doesn't know about the flags register, and doesn't know that
4372 it doesn't want to clobber it. */
4373 if (code != NOT)
4374 abort ();
4375 emit_insn (op);
4376 }
4377 else
4378 {
4379 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
4380 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
4381 }
4382
4383 /* Fix up the destination if needed. */
4384 if (dst != operands[0])
4385 emit_move_insn (operands[0], dst);
4386 }
4387
4388 /* Return TRUE or FALSE depending on whether the unary operator meets the
4389 appropriate constraints. */
4390
4391 int
4392 ix86_unary_operator_ok (code, mode, operands)
4393 enum rtx_code code ATTRIBUTE_UNUSED;
4394 enum machine_mode mode ATTRIBUTE_UNUSED;
4395 rtx operands[2] ATTRIBUTE_UNUSED;
4396 {
4397 /* If one of operands is memory, source and destination must match. */
4398 if ((GET_CODE (operands[0]) == MEM
4399 || GET_CODE (operands[1]) == MEM)
4400 && ! rtx_equal_p (operands[0], operands[1]))
4401 return FALSE;
4402 return TRUE;
4403 }
4404
4405 /* Return TRUE or FALSE depending on whether the first SET in INSN
4406 has source and destination with matching CC modes, and that the
4407 CC mode is at least as constrained as REQ_MODE. */
4408
4409 int
4410 ix86_match_ccmode (insn, req_mode)
4411 rtx insn;
4412 enum machine_mode req_mode;
4413 {
4414 rtx set;
4415 enum machine_mode set_mode;
4416
4417 set = PATTERN (insn);
4418 if (GET_CODE (set) == PARALLEL)
4419 set = XVECEXP (set, 0, 0);
4420 if (GET_CODE (set) != SET)
4421 abort ();
4422
4423 set_mode = GET_MODE (SET_DEST (set));
4424 switch (set_mode)
4425 {
4426 case CCmode:
4427 if (req_mode == CCNOmode)
4428 return 0;
4429 /* FALLTHRU */
4430 case CCNOmode:
4431 if (req_mode == CCZmode)
4432 return 0;
4433 /* FALLTHRU */
4434 case CCZmode:
4435 break;
4436
4437 default:
4438 abort ();
4439 }
4440
4441 return (GET_MODE (SET_SRC (set)) == set_mode);
4442 }
4443
4444 /* Produce an unsigned comparison for a given signed comparison. */
4445
4446 static enum rtx_code
4447 unsigned_comparison (code)
4448 enum rtx_code code;
4449 {
4450 switch (code)
4451 {
4452 case GT:
4453 code = GTU;
4454 break;
4455 case LT:
4456 code = LTU;
4457 break;
4458 case GE:
4459 code = GEU;
4460 break;
4461 case LE:
4462 code = LEU;
4463 break;
4464 case EQ:
4465 case NE:
4466 case LEU:
4467 case LTU:
4468 case GEU:
4469 case GTU:
4470 case UNORDERED:
4471 case ORDERED:
4472 break;
4473 default:
4474 abort ();
4475 }
4476 return code;
4477 }
4478
4479 /* Generate insn patterns to do an integer compare of OPERANDS. */
4480
4481 static rtx
4482 ix86_expand_int_compare (code, op0, op1)
4483 enum rtx_code code;
4484 rtx op0, op1;
4485 {
4486 enum machine_mode cmpmode;
4487 rtx tmp, flags;
4488
4489 cmpmode = SELECT_CC_MODE (code, op0, op1);
4490 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
4491
4492 /* This is very simple, but making the interface the same as in the
4493 FP case makes the rest of the code easier. */
4494 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
4495 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
4496
4497 /* Return the test that should be put into the flags user, i.e.
4498 the bcc, scc, or cmov instruction. */
4499 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
4500 }
4501
4502 /* Figure out whether to use ordered or unordered fp comparisons.
4503 Return the appropriate mode to use. */
4504
4505 static enum machine_mode
4506 ix86_fp_compare_mode (code)
4507 enum rtx_code code;
4508 {
4509 int unordered;
4510
4511 switch (code)
4512 {
4513 case NE: case EQ:
4514 /* When not doing IEEE compliant compares, fault on NaNs. */
4515 unordered = (TARGET_IEEE_FP != 0);
4516 break;
4517
4518 case LT: case LE: case GT: case GE:
4519 unordered = 0;
4520 break;
4521
4522 case UNORDERED: case ORDERED:
4523 case UNEQ: case UNGE: case UNGT: case UNLE: case UNLT: case LTGT:
4524 unordered = 1;
4525 break;
4526
4527 default:
4528 abort ();
4529 }
4530
4531 /* ??? If we knew whether invalid-operand exceptions were masked,
4532 we could rely on fcom to raise an exception and take care of
4533 NaNs. But we don't. We could know this from c99 math pragmas. */
4534 if (TARGET_IEEE_FP)
4535 unordered = 1;
4536
4537 return unordered ? CCFPUmode : CCFPmode;
4538 }
4539
4540 /* Return true if we should use an FCOMI instruction for this fp comparison. */
4541
4542 static int
4543 ix86_use_fcomi_compare (code)
4544 enum rtx_code code;
4545 {
4546 return (TARGET_CMOVE
4547 && (code == ORDERED || code == UNORDERED
4548 /* All other unordered compares require checking
4549 multiple sets of bits. */
4550 || ix86_fp_compare_mode (code) == CCFPmode));
4551 }
4552
4553 /* Swap, force into registers, or otherwise massage the two operands
4554 to a fp comparison. The operands are updated in place; the new
4555 comparsion code is returned. */
4556
4557 static enum rtx_code
4558 ix86_prepare_fp_compare_args (code, pop0, pop1)
4559 enum rtx_code code;
4560 rtx *pop0, *pop1;
4561 {
4562 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
4563 rtx op0 = *pop0, op1 = *pop1;
4564 enum machine_mode op_mode = GET_MODE (op0);
4565
4566 /* All of the unordered compare instructions only work on registers.
4567 The same is true of the XFmode compare instructions. The same is
4568 true of the fcomi compare instructions. */
4569
4570 if (fpcmp_mode == CCFPUmode
4571 || op_mode == XFmode
4572 || ix86_use_fcomi_compare (code))
4573 {
4574 op0 = force_reg (op_mode, op0);
4575 op1 = force_reg (op_mode, op1);
4576 }
4577 else
4578 {
4579 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
4580 things around if they appear profitable, otherwise force op0
4581 into a register. */
4582
4583 if (standard_80387_constant_p (op0) == 0
4584 || (GET_CODE (op0) == MEM
4585 && ! (standard_80387_constant_p (op1) == 0
4586 || GET_CODE (op1) == MEM)))
4587 {
4588 rtx tmp;
4589 tmp = op0, op0 = op1, op1 = tmp;
4590 code = swap_condition (code);
4591 }
4592
4593 if (GET_CODE (op0) != REG)
4594 op0 = force_reg (op_mode, op0);
4595
4596 if (CONSTANT_P (op1))
4597 {
4598 if (standard_80387_constant_p (op1))
4599 op1 = force_reg (op_mode, op1);
4600 else
4601 op1 = validize_mem (force_const_mem (op_mode, op1));
4602 }
4603 }
4604
4605 *pop0 = op0;
4606 *pop1 = op1;
4607 return code;
4608 }
4609
4610 /* Generate insn patterns to do a floating point compare of OPERANDS. */
4611
4612 rtx
4613 ix86_expand_fp_compare (code, op0, op1, scratch)
4614 enum rtx_code code;
4615 rtx op0, op1, scratch;
4616 {
4617 enum machine_mode fpcmp_mode, intcmp_mode;
4618 rtx tmp;
4619
4620 fpcmp_mode = ix86_fp_compare_mode (code);
4621 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
4622
4623 /* %%% fcomi is probably always faster, even when dealing with memory,
4624 since compare-and-branch would be three insns instead of four. */
4625 if (ix86_use_fcomi_compare (code))
4626 {
4627 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
4628 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG), tmp);
4629 emit_insn (tmp);
4630
4631 /* The FP codes work out to act like unsigned. */
4632 code = unsigned_comparison (code);
4633 intcmp_mode = CCmode;
4634 }
4635 else
4636 {
4637 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
4638
4639 rtx tmp2;
4640 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
4641 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), 9);
4642 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
4643
4644 if (fpcmp_mode == CCFPmode
4645 || code == ORDERED
4646 || code == UNORDERED)
4647 {
4648 /* We have two options here -- use sahf, or testing bits of ah
4649 directly. On PPRO, they are equivalent, sahf being one byte
4650 smaller. On Pentium, sahf is non-pairable while test is UV
4651 pairable. */
4652
4653 if (TARGET_USE_SAHF || optimize_size)
4654 {
4655 do_sahf:
4656 emit_insn (gen_x86_sahf_1 (scratch));
4657
4658 /* The FP codes work out to act like unsigned. */
4659 code = unsigned_comparison (code);
4660 intcmp_mode = CCmode;
4661 }
4662 else
4663 {
4664 /*
4665 * The numbers below correspond to the bits of the FPSW in AH.
4666 * C3, C2, and C0 are in bits 0x40, 0x4, and 0x01 respectively.
4667 *
4668 * cmp C3 C2 C0
4669 * > 0 0 0
4670 * < 0 0 1
4671 * = 1 0 0
4672 * un 1 1 1
4673 */
4674
4675 int mask;
4676
4677 switch (code)
4678 {
4679 case GT:
4680 mask = 0x41;
4681 code = EQ;
4682 break;
4683 case LT:
4684 mask = 0x01;
4685 code = NE;
4686 break;
4687 case GE:
4688 /* We'd have to use `xorb 1,ah; andb 0x41,ah', so it's
4689 faster in all cases to just fall back on sahf. */
4690 goto do_sahf;
4691 case LE:
4692 mask = 0x41;
4693 code = NE;
4694 break;
4695 case EQ:
4696 mask = 0x40;
4697 code = NE;
4698 break;
4699 case NE:
4700 mask = 0x40;
4701 code = EQ;
4702 break;
4703 case UNORDERED:
4704 mask = 0x04;
4705 code = NE;
4706 break;
4707 case ORDERED:
4708 mask = 0x04;
4709 code = EQ;
4710 break;
4711
4712 default:
4713 abort ();
4714 }
4715
4716 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (mask)));
4717 intcmp_mode = CCNOmode;
4718 }
4719 }
4720 else
4721 {
4722 /* In the unordered case, we have to check C2 for NaN's, which
4723 doesn't happen to work out to anything nice combination-wise.
4724 So do some bit twiddling on the value we've got in AH to come
4725 up with an appropriate set of condition codes. */
4726
4727 intcmp_mode = CCNOmode;
4728 switch (code)
4729 {
4730 case GT:
4731 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
4732 code = EQ;
4733 break;
4734 case LT:
4735 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
4736 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
4737 intcmp_mode = CCmode;
4738 code = EQ;
4739 break;
4740 case GE:
4741 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
4742 code = EQ;
4743 break;
4744 case LE:
4745 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
4746 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
4747 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
4748 intcmp_mode = CCmode;
4749 code = LTU;
4750 break;
4751 case EQ:
4752 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
4753 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
4754 intcmp_mode = CCmode;
4755 code = EQ;
4756 break;
4757 case NE:
4758 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
4759 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, GEN_INT (0x40)));
4760 code = NE;
4761 break;
4762
4763 case UNORDERED:
4764 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
4765 code = NE;
4766 break;
4767 case ORDERED:
4768 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
4769 code = EQ;
4770 break;
4771 case UNEQ:
4772 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
4773 code = NE;
4774 break;
4775 case UNGE:
4776 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
4777 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, GEN_INT (0x01)));
4778 code = NE;
4779 break;
4780 case UNGT:
4781 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
4782 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
4783 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
4784 code = GEU;
4785 break;
4786 case UNLE:
4787 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
4788 code = NE;
4789 break;
4790 case UNLT:
4791 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
4792 code = NE;
4793 break;
4794 case LTGT:
4795 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
4796 code = EQ;
4797 break;
4798
4799 default:
4800 abort ();
4801 }
4802 }
4803 }
4804
4805 /* Return the test that should be put into the flags user, i.e.
4806 the bcc, scc, or cmov instruction. */
4807 return gen_rtx_fmt_ee (code, VOIDmode,
4808 gen_rtx_REG (intcmp_mode, FLAGS_REG),
4809 const0_rtx);
4810 }
4811
4812 static rtx
4813 ix86_expand_compare (code)
4814 enum rtx_code code;
4815 {
4816 rtx op0, op1, ret;
4817 op0 = ix86_compare_op0;
4818 op1 = ix86_compare_op1;
4819
4820 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4821 ret = ix86_expand_fp_compare (code, op0, op1, gen_reg_rtx (HImode));
4822 else
4823 ret = ix86_expand_int_compare (code, op0, op1);
4824
4825 return ret;
4826 }
4827
4828 void
4829 ix86_expand_branch (code, label)
4830 enum rtx_code code;
4831 rtx label;
4832 {
4833 rtx tmp;
4834
4835 switch (GET_MODE (ix86_compare_op0))
4836 {
4837 case QImode:
4838 case HImode:
4839 case SImode:
4840 tmp = ix86_expand_compare (code);
4841 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
4842 gen_rtx_LABEL_REF (VOIDmode, label),
4843 pc_rtx);
4844 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
4845 return;
4846
4847 case SFmode:
4848 case DFmode:
4849 case XFmode:
4850 /* Don't expand the comparison early, so that we get better code
4851 when jump or whoever decides to reverse the comparison. */
4852 {
4853 rtvec vec;
4854 int use_fcomi;
4855
4856 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
4857 &ix86_compare_op1);
4858
4859 tmp = gen_rtx_fmt_ee (code, ix86_fp_compare_mode (code),
4860 ix86_compare_op0, ix86_compare_op1);
4861 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
4862 gen_rtx_LABEL_REF (VOIDmode, label),
4863 pc_rtx);
4864 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
4865
4866 use_fcomi = ix86_use_fcomi_compare (code);
4867 vec = rtvec_alloc (3 + !use_fcomi);
4868 RTVEC_ELT (vec, 0) = tmp;
4869 RTVEC_ELT (vec, 1)
4870 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
4871 RTVEC_ELT (vec, 2)
4872 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
4873 if (! use_fcomi)
4874 RTVEC_ELT (vec, 3)
4875 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
4876
4877 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
4878 return;
4879 }
4880
4881 case DImode:
4882 /* Expand DImode branch into multiple compare+branch. */
4883 {
4884 rtx lo[2], hi[2], label2;
4885 enum rtx_code code1, code2, code3;
4886
4887 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
4888 {
4889 tmp = ix86_compare_op0;
4890 ix86_compare_op0 = ix86_compare_op1;
4891 ix86_compare_op1 = tmp;
4892 code = swap_condition (code);
4893 }
4894 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
4895 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
4896
4897 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
4898 avoid two branches. This costs one extra insn, so disable when
4899 optimizing for size. */
4900
4901 if ((code == EQ || code == NE)
4902 && (!optimize_size
4903 || hi[1] == const0_rtx || lo[1] == const0_rtx))
4904 {
4905 rtx xor0, xor1;
4906
4907 xor1 = hi[0];
4908 if (hi[1] != const0_rtx)
4909 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
4910 NULL_RTX, 0, OPTAB_WIDEN);
4911
4912 xor0 = lo[0];
4913 if (lo[1] != const0_rtx)
4914 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
4915 NULL_RTX, 0, OPTAB_WIDEN);
4916
4917 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
4918 NULL_RTX, 0, OPTAB_WIDEN);
4919
4920 ix86_compare_op0 = tmp;
4921 ix86_compare_op1 = const0_rtx;
4922 ix86_expand_branch (code, label);
4923 return;
4924 }
4925
4926 /* Otherwise, if we are doing less-than, op1 is a constant and the
4927 low word is zero, then we can just examine the high word. */
4928
4929 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx
4930 && (code == LT || code == LTU))
4931 {
4932 ix86_compare_op0 = hi[0];
4933 ix86_compare_op1 = hi[1];
4934 ix86_expand_branch (code, label);
4935 return;
4936 }
4937
4938 /* Otherwise, we need two or three jumps. */
4939
4940 label2 = gen_label_rtx ();
4941
4942 code1 = code;
4943 code2 = swap_condition (code);
4944 code3 = unsigned_condition (code);
4945
4946 switch (code)
4947 {
4948 case LT: case GT: case LTU: case GTU:
4949 break;
4950
4951 case LE: code1 = LT; code2 = GT; break;
4952 case GE: code1 = GT; code2 = LT; break;
4953 case LEU: code1 = LTU; code2 = GTU; break;
4954 case GEU: code1 = GTU; code2 = LTU; break;
4955
4956 case EQ: code1 = NIL; code2 = NE; break;
4957 case NE: code2 = NIL; break;
4958
4959 default:
4960 abort ();
4961 }
4962
4963 /*
4964 * a < b =>
4965 * if (hi(a) < hi(b)) goto true;
4966 * if (hi(a) > hi(b)) goto false;
4967 * if (lo(a) < lo(b)) goto true;
4968 * false:
4969 */
4970
4971 ix86_compare_op0 = hi[0];
4972 ix86_compare_op1 = hi[1];
4973
4974 if (code1 != NIL)
4975 ix86_expand_branch (code1, label);
4976 if (code2 != NIL)
4977 ix86_expand_branch (code2, label2);
4978
4979 ix86_compare_op0 = lo[0];
4980 ix86_compare_op1 = lo[1];
4981 ix86_expand_branch (code3, label);
4982
4983 if (code2 != NIL)
4984 emit_label (label2);
4985 return;
4986 }
4987
4988 default:
4989 abort ();
4990 }
4991 }
4992
4993 int
4994 ix86_expand_setcc (code, dest)
4995 enum rtx_code code;
4996 rtx dest;
4997 {
4998 rtx ret, tmp;
4999 int type;
5000
5001 if (GET_MODE (ix86_compare_op0) == DImode)
5002 return 0; /* FAIL */
5003
5004 /* Three modes of generation:
5005 0 -- destination does not overlap compare sources:
5006 clear dest first, emit strict_low_part setcc.
5007 1 -- destination does overlap compare sources:
5008 emit subreg setcc, zero extend.
5009 2 -- destination is in QImode:
5010 emit setcc only.
5011 */
5012
5013 type = 0;
5014
5015 if (GET_MODE (dest) == QImode)
5016 type = 2;
5017 else if (reg_overlap_mentioned_p (dest, ix86_compare_op0)
5018 || reg_overlap_mentioned_p (dest, ix86_compare_op1))
5019 type = 1;
5020
5021 if (type == 0)
5022 emit_move_insn (dest, const0_rtx);
5023
5024 ret = ix86_expand_compare (code);
5025 PUT_MODE (ret, QImode);
5026
5027 tmp = dest;
5028 if (type == 0)
5029 {
5030 tmp = gen_lowpart (QImode, dest);
5031 tmp = gen_rtx_STRICT_LOW_PART (VOIDmode, tmp);
5032 }
5033 else if (type == 1)
5034 {
5035 if (!cse_not_expected)
5036 tmp = gen_reg_rtx (QImode);
5037 else
5038 tmp = gen_lowpart (QImode, dest);
5039 }
5040
5041 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
5042
5043 if (type == 1)
5044 {
5045 rtx clob;
5046
5047 tmp = gen_rtx_ZERO_EXTEND (GET_MODE (dest), tmp);
5048 tmp = gen_rtx_SET (VOIDmode, dest, tmp);
5049 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
5050 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
5051 emit_insn (tmp);
5052 }
5053
5054 return 1; /* DONE */
5055 }
5056
5057 int
5058 ix86_expand_int_movcc (operands)
5059 rtx operands[];
5060 {
5061 enum rtx_code code = GET_CODE (operands[1]), compare_code;
5062 rtx compare_seq, compare_op;
5063
5064 /* When the compare code is not LTU or GEU, we can not use sbbl case.
5065 In case comparsion is done with immediate, we can convert it to LTU or
5066 GEU by altering the integer. */
5067
5068 if ((code == LEU || code == GTU)
5069 && GET_CODE (ix86_compare_op1) == CONST_INT
5070 && GET_MODE (operands[0]) != HImode
5071 && (unsigned int)INTVAL (ix86_compare_op1) != 0xffffffff
5072 && GET_CODE (operands[2]) == CONST_INT
5073 && GET_CODE (operands[3]) == CONST_INT)
5074 {
5075 if (code == LEU)
5076 code = LTU;
5077 else
5078 code = GEU;
5079 ix86_compare_op1 = GEN_INT (INTVAL (ix86_compare_op1) + 1);
5080 }
5081
5082 start_sequence ();
5083 compare_op = ix86_expand_compare (code);
5084 compare_seq = gen_sequence ();
5085 end_sequence ();
5086
5087 compare_code = GET_CODE (compare_op);
5088
5089 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
5090 HImode insns, we'd be swallowed in word prefix ops. */
5091
5092 if (GET_MODE (operands[0]) != HImode
5093 && GET_CODE (operands[2]) == CONST_INT
5094 && GET_CODE (operands[3]) == CONST_INT)
5095 {
5096 rtx out = operands[0];
5097 HOST_WIDE_INT ct = INTVAL (operands[2]);
5098 HOST_WIDE_INT cf = INTVAL (operands[3]);
5099 HOST_WIDE_INT diff;
5100
5101 if (compare_code == LTU || compare_code == GEU)
5102 {
5103
5104 /* Detect overlap between destination and compare sources. */
5105 rtx tmp = out;
5106
5107 /* To simplify rest of code, restrict to the GEU case. */
5108 if (compare_code == LTU)
5109 {
5110 int tmp = ct;
5111 ct = cf;
5112 cf = tmp;
5113 compare_code = reverse_condition (compare_code);
5114 code = reverse_condition (code);
5115 }
5116 diff = ct - cf;
5117
5118 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
5119 || reg_overlap_mentioned_p (out, ix86_compare_op1))
5120 tmp = gen_reg_rtx (SImode);
5121
5122 emit_insn (compare_seq);
5123 emit_insn (gen_x86_movsicc_0_m1 (tmp));
5124
5125 if (diff == 1)
5126 {
5127 /*
5128 * cmpl op0,op1
5129 * sbbl dest,dest
5130 * [addl dest, ct]
5131 *
5132 * Size 5 - 8.
5133 */
5134 if (ct)
5135 emit_insn (gen_addsi3 (out, out, GEN_INT (ct)));
5136 }
5137 else if (cf == -1)
5138 {
5139 /*
5140 * cmpl op0,op1
5141 * sbbl dest,dest
5142 * orl $ct, dest
5143 *
5144 * Size 8.
5145 */
5146 emit_insn (gen_iorsi3 (out, out, GEN_INT (ct)));
5147 }
5148 else if (diff == -1 && ct)
5149 {
5150 /*
5151 * cmpl op0,op1
5152 * sbbl dest,dest
5153 * xorl $-1, dest
5154 * [addl dest, cf]
5155 *
5156 * Size 8 - 11.
5157 */
5158 emit_insn (gen_one_cmplsi2 (tmp, tmp));
5159 if (cf)
5160 emit_insn (gen_addsi3 (out, out, GEN_INT (cf)));
5161 }
5162 else
5163 {
5164 /*
5165 * cmpl op0,op1
5166 * sbbl dest,dest
5167 * andl cf - ct, dest
5168 * [addl dest, ct]
5169 *
5170 * Size 8 - 11.
5171 */
5172 emit_insn (gen_andsi3 (out, out, GEN_INT (cf - ct)));
5173 if (ct)
5174 emit_insn (gen_addsi3 (out, out, GEN_INT (ct)));
5175 }
5176
5177 if (tmp != out)
5178 emit_move_insn (out, tmp);
5179
5180 return 1; /* DONE */
5181 }
5182
5183 diff = ct - cf;
5184 if (diff < 0)
5185 {
5186 HOST_WIDE_INT tmp;
5187 tmp = ct, ct = cf, cf = tmp;
5188 diff = -diff;
5189 compare_code = reverse_condition (compare_code);
5190 code = reverse_condition (code);
5191 }
5192 if (diff == 1 || diff == 2 || diff == 4 || diff == 8
5193 || diff == 3 || diff == 5 || diff == 9)
5194 {
5195 /*
5196 * xorl dest,dest
5197 * cmpl op1,op2
5198 * setcc dest
5199 * lea cf(dest*(ct-cf)),dest
5200 *
5201 * Size 14.
5202 *
5203 * This also catches the degenerate setcc-only case.
5204 */
5205
5206 rtx tmp;
5207 int nops;
5208
5209 out = emit_store_flag (out, code, ix86_compare_op0,
5210 ix86_compare_op1, VOIDmode, 0, 1);
5211
5212 nops = 0;
5213 if (diff == 1)
5214 tmp = out;
5215 else
5216 {
5217 tmp = gen_rtx_MULT (SImode, out, GEN_INT (diff & ~1));
5218 nops++;
5219 if (diff & 1)
5220 {
5221 tmp = gen_rtx_PLUS (SImode, tmp, out);
5222 nops++;
5223 }
5224 }
5225 if (cf != 0)
5226 {
5227 tmp = gen_rtx_PLUS (SImode, tmp, GEN_INT (cf));
5228 nops++;
5229 }
5230 if (tmp != out)
5231 {
5232 if (nops == 0)
5233 emit_move_insn (out, tmp);
5234 else if (nops == 1)
5235 {
5236 rtx clob;
5237
5238 clob = gen_rtx_REG (CCmode, FLAGS_REG);
5239 clob = gen_rtx_CLOBBER (VOIDmode, clob);
5240
5241 tmp = gen_rtx_SET (VOIDmode, out, tmp);
5242 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
5243 emit_insn (tmp);
5244 }
5245 else
5246 emit_insn (gen_rtx_SET (VOIDmode, out, tmp));
5247 }
5248 if (out != operands[0])
5249 emit_move_insn (operands[0], out);
5250
5251 return 1; /* DONE */
5252 }
5253
5254 /*
5255 * General case: Jumpful:
5256 * xorl dest,dest cmpl op1, op2
5257 * cmpl op1, op2 movl ct, dest
5258 * setcc dest jcc 1f
5259 * decl dest movl cf, dest
5260 * andl (cf-ct),dest 1:
5261 * addl ct,dest
5262 *
5263 * Size 20. Size 14.
5264 *
5265 * This is reasonably steep, but branch mispredict costs are
5266 * high on modern cpus, so consider failing only if optimizing
5267 * for space.
5268 *
5269 * %%% Parameterize branch_cost on the tuning architecture, then
5270 * use that. The 80386 couldn't care less about mispredicts.
5271 */
5272
5273 if (!optimize_size && !TARGET_CMOVE)
5274 {
5275 if (ct == 0)
5276 {
5277 ct = cf;
5278 cf = 0;
5279 compare_code = reverse_condition (compare_code);
5280 code = reverse_condition (code);
5281 }
5282
5283 out = emit_store_flag (out, code, ix86_compare_op0,
5284 ix86_compare_op1, VOIDmode, 0, 1);
5285
5286 emit_insn (gen_addsi3 (out, out, constm1_rtx));
5287 emit_insn (gen_andsi3 (out, out, GEN_INT (cf-ct)));
5288 if (ct != 0)
5289 emit_insn (gen_addsi3 (out, out, GEN_INT (ct)));
5290 if (out != operands[0])
5291 emit_move_insn (operands[0], out);
5292
5293 return 1; /* DONE */
5294 }
5295 }
5296
5297 if (!TARGET_CMOVE)
5298 {
5299 /* Try a few things more with specific constants and a variable. */
5300
5301 optab op;
5302 rtx var, orig_out, out, tmp;
5303
5304 if (optimize_size)
5305 return 0; /* FAIL */
5306
5307 /* If one of the two operands is an interesting constant, load a
5308 constant with the above and mask it in with a logical operation. */
5309
5310 if (GET_CODE (operands[2]) == CONST_INT)
5311 {
5312 var = operands[3];
5313 if (INTVAL (operands[2]) == 0)
5314 operands[3] = constm1_rtx, op = and_optab;
5315 else if (INTVAL (operands[2]) == -1)
5316 operands[3] = const0_rtx, op = ior_optab;
5317 else
5318 return 0; /* FAIL */
5319 }
5320 else if (GET_CODE (operands[3]) == CONST_INT)
5321 {
5322 var = operands[2];
5323 if (INTVAL (operands[3]) == 0)
5324 operands[2] = constm1_rtx, op = and_optab;
5325 else if (INTVAL (operands[3]) == -1)
5326 operands[2] = const0_rtx, op = ior_optab;
5327 else
5328 return 0; /* FAIL */
5329 }
5330 else
5331 return 0; /* FAIL */
5332
5333 orig_out = operands[0];
5334 tmp = gen_reg_rtx (GET_MODE (orig_out));
5335 operands[0] = tmp;
5336
5337 /* Recurse to get the constant loaded. */
5338 if (ix86_expand_int_movcc (operands) == 0)
5339 return 0; /* FAIL */
5340
5341 /* Mask in the interesting variable. */
5342 out = expand_binop (GET_MODE (orig_out), op, var, tmp, orig_out, 0,
5343 OPTAB_WIDEN);
5344 if (out != orig_out)
5345 emit_move_insn (orig_out, out);
5346
5347 return 1; /* DONE */
5348 }
5349
5350 /*
5351 * For comparison with above,
5352 *
5353 * movl cf,dest
5354 * movl ct,tmp
5355 * cmpl op1,op2
5356 * cmovcc tmp,dest
5357 *
5358 * Size 15.
5359 */
5360
5361 if (! nonimmediate_operand (operands[2], GET_MODE (operands[0])))
5362 operands[2] = force_reg (GET_MODE (operands[0]), operands[2]);
5363 if (! nonimmediate_operand (operands[3], GET_MODE (operands[0])))
5364 operands[3] = force_reg (GET_MODE (operands[0]), operands[3]);
5365
5366 emit_insn (compare_seq);
5367 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
5368 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
5369 compare_op, operands[2],
5370 operands[3])));
5371
5372 return 1; /* DONE */
5373 }
5374
5375 int
5376 ix86_expand_fp_movcc (operands)
5377 rtx operands[];
5378 {
5379 enum rtx_code code;
5380 enum machine_mode mode;
5381 rtx tmp;
5382
5383 /* The floating point conditional move instructions don't directly
5384 support conditions resulting from a signed integer comparison. */
5385
5386 code = GET_CODE (operands[1]);
5387 switch (code)
5388 {
5389 case LT:
5390 case LE:
5391 case GE:
5392 case GT:
5393 tmp = gen_reg_rtx (QImode);
5394 ix86_expand_setcc (code, tmp);
5395 code = NE;
5396 ix86_compare_op0 = tmp;
5397 ix86_compare_op1 = const0_rtx;
5398 break;
5399
5400 default:
5401 break;
5402 }
5403
5404 mode = SELECT_CC_MODE (code, ix86_compare_op0, ix86_compare_op1);
5405 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (mode, FLAGS_REG),
5406 gen_rtx_COMPARE (mode,
5407 ix86_compare_op0,
5408 ix86_compare_op1)));
5409 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
5410 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
5411 gen_rtx_fmt_ee (code, VOIDmode,
5412 gen_rtx_REG (mode, FLAGS_REG),
5413 const0_rtx),
5414 operands[2],
5415 operands[3])));
5416
5417 return 1;
5418 }
5419
5420 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
5421 works for floating pointer parameters and nonoffsetable memories.
5422 For pushes, it returns just stack offsets; the values will be saved
5423 in the right order. Maximally three parts are generated. */
5424
5425 static void
5426 ix86_split_to_parts (operand, parts, mode)
5427 rtx operand;
5428 rtx *parts;
5429 enum machine_mode mode;
5430 {
5431 int size = GET_MODE_SIZE (mode) / 4;
5432
5433 if (size < 2 || size > 3)
5434 abort ();
5435
5436 /* Optimize constant pool reference to immediates. This is used by fp moves,
5437 that force all constants to memory to allow combining. */
5438
5439 if (GET_CODE (operand) == MEM
5440 && GET_CODE (XEXP (operand, 0)) == SYMBOL_REF
5441 && CONSTANT_POOL_ADDRESS_P (XEXP (operand, 0)))
5442 operand = get_pool_constant (XEXP (operand, 0));
5443
5444 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
5445 {
5446 /* The only non-offsetable memories we handle are pushes. */
5447 if (! push_operand (operand, VOIDmode))
5448 abort ();
5449
5450 PUT_MODE (operand, SImode);
5451 parts[0] = parts[1] = parts[2] = operand;
5452 }
5453 else
5454 {
5455 if (mode == DImode)
5456 split_di (&operand, 1, &parts[0], &parts[1]);
5457 else
5458 {
5459 if (REG_P (operand))
5460 {
5461 if (!reload_completed)
5462 abort ();
5463 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
5464 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
5465 if (size == 3)
5466 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
5467 }
5468 else if (offsettable_memref_p (operand))
5469 {
5470 PUT_MODE (operand, SImode);
5471 parts[0] = operand;
5472 parts[1] = adj_offsettable_operand (operand, 4);
5473 if (size == 3)
5474 parts[2] = adj_offsettable_operand (operand, 8);
5475 }
5476 else if (GET_CODE (operand) == CONST_DOUBLE)
5477 {
5478 REAL_VALUE_TYPE r;
5479 long l[3];
5480
5481 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
5482 switch (mode)
5483 {
5484 case XFmode:
5485 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
5486 parts[2] = GEN_INT (l[2]);
5487 break;
5488 case DFmode:
5489 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
5490 break;
5491 default:
5492 abort ();
5493 }
5494 parts[1] = GEN_INT (l[1]);
5495 parts[0] = GEN_INT (l[0]);
5496 }
5497 else
5498 abort ();
5499 }
5500 }
5501
5502 return;
5503 }
5504
5505 /* Emit insns to perform a move or push of DI, DF, and XF values.
5506 Return false when normal moves are needed; true when all required
5507 insns have been emitted. Operands 2-4 contain the input values
5508 int the correct order; operands 5-7 contain the output values. */
5509
5510 int
5511 ix86_split_long_move (operands1)
5512 rtx operands1[];
5513 {
5514 rtx part[2][3];
5515 rtx operands[2];
5516 int size = GET_MODE_SIZE (GET_MODE (operands1[0])) / 4;
5517 int push = 0;
5518 int collisions = 0;
5519
5520 /* Make our own copy to avoid clobbering the operands. */
5521 operands[0] = copy_rtx (operands1[0]);
5522 operands[1] = copy_rtx (operands1[1]);
5523
5524 if (size < 2 || size > 3)
5525 abort ();
5526
5527 /* The only non-offsettable memory we handle is push. */
5528 if (push_operand (operands[0], VOIDmode))
5529 push = 1;
5530 else if (GET_CODE (operands[0]) == MEM
5531 && ! offsettable_memref_p (operands[0]))
5532 abort ();
5533
5534 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands1[0]));
5535 ix86_split_to_parts (operands[1], part[1], GET_MODE (operands1[0]));
5536
5537 /* When emitting push, take care for source operands on the stack. */
5538 if (push && GET_CODE (operands[1]) == MEM
5539 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
5540 {
5541 if (size == 3)
5542 part[1][1] = part[1][2];
5543 part[1][0] = part[1][1];
5544 }
5545
5546 /* We need to do copy in the right order in case an address register
5547 of the source overlaps the destination. */
5548 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
5549 {
5550 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
5551 collisions++;
5552 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
5553 collisions++;
5554 if (size == 3
5555 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
5556 collisions++;
5557
5558 /* Collision in the middle part can be handled by reordering. */
5559 if (collisions == 1 && size == 3
5560 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
5561 {
5562 rtx tmp;
5563 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
5564 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
5565 }
5566
5567 /* If there are more collisions, we can't handle it by reordering.
5568 Do an lea to the last part and use only one colliding move. */
5569 else if (collisions > 1)
5570 {
5571 collisions = 1;
5572 emit_insn (gen_rtx_SET (VOIDmode, part[0][size - 1],
5573 XEXP (part[1][0], 0)));
5574 part[1][0] = change_address (part[1][0], SImode, part[0][size - 1]);
5575 part[1][1] = adj_offsettable_operand (part[1][0], 4);
5576 if (size == 3)
5577 part[1][2] = adj_offsettable_operand (part[1][0], 8);
5578 }
5579 }
5580
5581 if (push)
5582 {
5583 if (size == 3)
5584 emit_insn (gen_push (part[1][2]));
5585 emit_insn (gen_push (part[1][1]));
5586 emit_insn (gen_push (part[1][0]));
5587 return 1;
5588 }
5589
5590 /* Choose correct order to not overwrite the source before it is copied. */
5591 if ((REG_P (part[0][0])
5592 && REG_P (part[1][1])
5593 && (REGNO (part[0][0]) == REGNO (part[1][1])
5594 || (size == 3
5595 && REGNO (part[0][0]) == REGNO (part[1][2]))))
5596 || (collisions > 0
5597 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
5598 {
5599 if (size == 3)
5600 {
5601 operands1[2] = part[0][2];
5602 operands1[3] = part[0][1];
5603 operands1[4] = part[0][0];
5604 operands1[5] = part[1][2];
5605 operands1[6] = part[1][1];
5606 operands1[7] = part[1][0];
5607 }
5608 else
5609 {
5610 operands1[2] = part[0][1];
5611 operands1[3] = part[0][0];
5612 operands1[5] = part[1][1];
5613 operands1[6] = part[1][0];
5614 }
5615 }
5616 else
5617 {
5618 if (size == 3)
5619 {
5620 operands1[2] = part[0][0];
5621 operands1[3] = part[0][1];
5622 operands1[4] = part[0][2];
5623 operands1[5] = part[1][0];
5624 operands1[6] = part[1][1];
5625 operands1[7] = part[1][2];
5626 }
5627 else
5628 {
5629 operands1[2] = part[0][0];
5630 operands1[3] = part[0][1];
5631 operands1[5] = part[1][0];
5632 operands1[6] = part[1][1];
5633 }
5634 }
5635
5636 return 0;
5637 }
5638
5639 void
5640 ix86_split_ashldi (operands, scratch)
5641 rtx *operands, scratch;
5642 {
5643 rtx low[2], high[2];
5644 int count;
5645
5646 if (GET_CODE (operands[2]) == CONST_INT)
5647 {
5648 split_di (operands, 2, low, high);
5649 count = INTVAL (operands[2]) & 63;
5650
5651 if (count >= 32)
5652 {
5653 emit_move_insn (high[0], low[1]);
5654 emit_move_insn (low[0], const0_rtx);
5655
5656 if (count > 32)
5657 emit_insn (gen_ashlsi3 (high[0], high[0], GEN_INT (count - 32)));
5658 }
5659 else
5660 {
5661 if (!rtx_equal_p (operands[0], operands[1]))
5662 emit_move_insn (operands[0], operands[1]);
5663 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
5664 emit_insn (gen_ashlsi3 (low[0], low[0], GEN_INT (count)));
5665 }
5666 }
5667 else
5668 {
5669 if (!rtx_equal_p (operands[0], operands[1]))
5670 emit_move_insn (operands[0], operands[1]);
5671
5672 split_di (operands, 1, low, high);
5673
5674 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
5675 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
5676
5677 if (TARGET_CMOVE && (! no_new_pseudos || scratch))
5678 {
5679 if (! no_new_pseudos)
5680 scratch = force_reg (SImode, const0_rtx);
5681 else
5682 emit_move_insn (scratch, const0_rtx);
5683
5684 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2],
5685 scratch));
5686 }
5687 else
5688 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
5689 }
5690 }
5691
5692 void
5693 ix86_split_ashrdi (operands, scratch)
5694 rtx *operands, scratch;
5695 {
5696 rtx low[2], high[2];
5697 int count;
5698
5699 if (GET_CODE (operands[2]) == CONST_INT)
5700 {
5701 split_di (operands, 2, low, high);
5702 count = INTVAL (operands[2]) & 63;
5703
5704 if (count >= 32)
5705 {
5706 emit_move_insn (low[0], high[1]);
5707
5708 if (! reload_completed)
5709 emit_insn (gen_ashrsi3 (high[0], low[0], GEN_INT (31)));
5710 else
5711 {
5712 emit_move_insn (high[0], low[0]);
5713 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
5714 }
5715
5716 if (count > 32)
5717 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
5718 }
5719 else
5720 {
5721 if (!rtx_equal_p (operands[0], operands[1]))
5722 emit_move_insn (operands[0], operands[1]);
5723 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
5724 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
5725 }
5726 }
5727 else
5728 {
5729 if (!rtx_equal_p (operands[0], operands[1]))
5730 emit_move_insn (operands[0], operands[1]);
5731
5732 split_di (operands, 1, low, high);
5733
5734 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
5735 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
5736
5737 if (TARGET_CMOVE && (! no_new_pseudos || scratch))
5738 {
5739 if (! no_new_pseudos)
5740 scratch = gen_reg_rtx (SImode);
5741 emit_move_insn (scratch, high[0]);
5742 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
5743 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
5744 scratch));
5745 }
5746 else
5747 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
5748 }
5749 }
5750
5751 void
5752 ix86_split_lshrdi (operands, scratch)
5753 rtx *operands, scratch;
5754 {
5755 rtx low[2], high[2];
5756 int count;
5757
5758 if (GET_CODE (operands[2]) == CONST_INT)
5759 {
5760 split_di (operands, 2, low, high);
5761 count = INTVAL (operands[2]) & 63;
5762
5763 if (count >= 32)
5764 {
5765 emit_move_insn (low[0], high[1]);
5766 emit_move_insn (high[0], const0_rtx);
5767
5768 if (count > 32)
5769 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
5770 }
5771 else
5772 {
5773 if (!rtx_equal_p (operands[0], operands[1]))
5774 emit_move_insn (operands[0], operands[1]);
5775 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
5776 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
5777 }
5778 }
5779 else
5780 {
5781 if (!rtx_equal_p (operands[0], operands[1]))
5782 emit_move_insn (operands[0], operands[1]);
5783
5784 split_di (operands, 1, low, high);
5785
5786 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
5787 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
5788
5789 /* Heh. By reversing the arguments, we can reuse this pattern. */
5790 if (TARGET_CMOVE && (! no_new_pseudos || scratch))
5791 {
5792 if (! no_new_pseudos)
5793 scratch = force_reg (SImode, const0_rtx);
5794 else
5795 emit_move_insn (scratch, const0_rtx);
5796
5797 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
5798 scratch));
5799 }
5800 else
5801 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
5802 }
5803 }
5804
5805 /* Expand the appropriate insns for doing strlen if not just doing
5806 repnz; scasb
5807
5808 out = result, initialized with the start address
5809 align_rtx = alignment of the address.
5810 scratch = scratch register, initialized with the startaddress when
5811 not aligned, otherwise undefined
5812
5813 This is just the body. It needs the initialisations mentioned above and
5814 some address computing at the end. These things are done in i386.md. */
5815
5816 void
5817 ix86_expand_strlensi_unroll_1 (out, align_rtx, scratch)
5818 rtx out, align_rtx, scratch;
5819 {
5820 int align;
5821 rtx tmp;
5822 rtx align_2_label = NULL_RTX;
5823 rtx align_3_label = NULL_RTX;
5824 rtx align_4_label = gen_label_rtx ();
5825 rtx end_0_label = gen_label_rtx ();
5826 rtx mem;
5827 rtx no_flags = gen_rtx_REG (CCNOmode, FLAGS_REG);
5828 rtx z_flags = gen_rtx_REG (CCNOmode, FLAGS_REG);
5829 rtx tmpreg = gen_reg_rtx (SImode);
5830
5831 align = 0;
5832 if (GET_CODE (align_rtx) == CONST_INT)
5833 align = INTVAL (align_rtx);
5834
5835 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
5836
5837 /* Is there a known alignment and is it less than 4? */
5838 if (align < 4)
5839 {
5840 /* Is there a known alignment and is it not 2? */
5841 if (align != 2)
5842 {
5843 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
5844 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
5845
5846 /* Leave just the 3 lower bits. */
5847 align_rtx = expand_binop (SImode, and_optab, scratch, GEN_INT (3),
5848 NULL_RTX, 0, OPTAB_WIDEN);
5849
5850 emit_insn (gen_cmpsi_ccz_1 (align_rtx, const0_rtx));
5851
5852 tmp = gen_rtx_EQ (VOIDmode, z_flags, const0_rtx);
5853 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5854 gen_rtx_LABEL_REF (VOIDmode,
5855 align_4_label),
5856 pc_rtx);
5857 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5858
5859 emit_insn (gen_cmpsi_ccno_1 (align_rtx, GEN_INT (2)));
5860
5861 tmp = gen_rtx_EQ (VOIDmode, no_flags, const0_rtx);
5862 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5863 gen_rtx_LABEL_REF (VOIDmode,
5864 align_2_label),
5865 pc_rtx);
5866 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5867
5868 tmp = gen_rtx_GTU (VOIDmode, no_flags, const0_rtx);
5869 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5870 gen_rtx_LABEL_REF (VOIDmode,
5871 align_3_label),
5872 pc_rtx);
5873 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5874 }
5875 else
5876 {
5877 /* Since the alignment is 2, we have to check 2 or 0 bytes;
5878 check if is aligned to 4 - byte. */
5879
5880 align_rtx = expand_binop (SImode, and_optab, scratch, GEN_INT (2),
5881 NULL_RTX, 0, OPTAB_WIDEN);
5882
5883 emit_insn (gen_cmpsi_ccz_1 (align_rtx, const0_rtx));
5884
5885 tmp = gen_rtx_EQ (VOIDmode, z_flags, const0_rtx);
5886 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5887 gen_rtx_LABEL_REF (VOIDmode,
5888 align_4_label),
5889 pc_rtx);
5890 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5891 }
5892
5893 mem = gen_rtx_MEM (QImode, out);
5894
5895 /* Now compare the bytes. */
5896
5897 /* Compare the first n unaligned byte on a byte per byte basis. */
5898 emit_insn (gen_cmpqi_ccz_1 (mem, const0_rtx));
5899
5900 tmp = gen_rtx_EQ (VOIDmode, z_flags, const0_rtx);
5901 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5902 gen_rtx_LABEL_REF (VOIDmode, end_0_label),
5903 pc_rtx);
5904 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5905
5906 /* Increment the address. */
5907 emit_insn (gen_addsi3 (out, out, const1_rtx));
5908
5909 /* Not needed with an alignment of 2 */
5910 if (align != 2)
5911 {
5912 emit_label (align_2_label);
5913
5914 emit_insn (gen_cmpqi_ccz_1 (mem, const0_rtx));
5915
5916 tmp = gen_rtx_EQ (VOIDmode, z_flags, const0_rtx);
5917 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5918 gen_rtx_LABEL_REF (VOIDmode,
5919 end_0_label),
5920 pc_rtx);
5921 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5922
5923 emit_insn (gen_addsi3 (out, out, const1_rtx));
5924
5925 emit_label (align_3_label);
5926 }
5927
5928 emit_insn (gen_cmpqi_ccz_1 (mem, const0_rtx));
5929
5930 tmp = gen_rtx_EQ (VOIDmode, z_flags, const0_rtx);
5931 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5932 gen_rtx_LABEL_REF (VOIDmode, end_0_label),
5933 pc_rtx);
5934 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5935
5936 emit_insn (gen_addsi3 (out, out, const1_rtx));
5937 }
5938
5939 /* Generate loop to check 4 bytes at a time. It is not a good idea to
5940 align this loop. It gives only huge programs, but does not help to
5941 speed up. */
5942 emit_label (align_4_label);
5943
5944 mem = gen_rtx_MEM (SImode, out);
5945 emit_move_insn (scratch, mem);
5946 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
5947
5948 /* This formula yields a nonzero result iff one of the bytes is zero.
5949 This saves three branches inside loop and many cycles. */
5950
5951 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
5952 emit_insn (gen_one_cmplsi2 (scratch, scratch));
5953 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
5954 emit_insn (gen_andsi3 (tmpreg, tmpreg, GEN_INT (0x80808080)));
5955 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1, 0, align_4_label);
5956
5957 if (TARGET_CMOVE)
5958 {
5959 rtx reg = gen_reg_rtx (SImode);
5960 emit_move_insn (reg, tmpreg);
5961 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
5962
5963 /* If zero is not in the first two bytes, move two bytes forward. */
5964 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
5965 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
5966 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
5967 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
5968 gen_rtx_IF_THEN_ELSE (SImode, tmp,
5969 reg,
5970 tmpreg)));
5971 /* Emit lea manually to avoid clobbering of flags. */
5972 emit_insn (gen_rtx_SET (SImode, reg,
5973 gen_rtx_PLUS (SImode, out, GEN_INT (2))));
5974
5975 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
5976 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
5977 emit_insn (gen_rtx_SET (VOIDmode, out,
5978 gen_rtx_IF_THEN_ELSE (SImode, tmp,
5979 reg,
5980 out)));
5981
5982 }
5983 else
5984 {
5985 rtx end_2_label = gen_label_rtx ();
5986 /* Is zero in the first two bytes? */
5987
5988 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
5989 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
5990 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
5991 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
5992 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
5993 pc_rtx);
5994 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
5995 JUMP_LABEL (tmp) = end_2_label;
5996
5997 /* Not in the first two. Move two bytes forward. */
5998 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
5999 emit_insn (gen_addsi3 (out, out, GEN_INT (2)));
6000
6001 emit_label (end_2_label);
6002
6003 }
6004
6005 /* Avoid branch in fixing the byte. */
6006 tmpreg = gen_lowpart (QImode, tmpreg);
6007 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
6008 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3)));
6009
6010 emit_label (end_0_label);
6011 }
6012 \f
6013 /* Clear stack slot assignments remembered from previous functions.
6014 This is called from INIT_EXPANDERS once before RTL is emitted for each
6015 function. */
6016
6017 static void
6018 ix86_init_machine_status (p)
6019 struct function *p;
6020 {
6021 enum machine_mode mode;
6022 int n;
6023 p->machine
6024 = (struct machine_function *) xmalloc (sizeof (struct machine_function));
6025
6026 for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
6027 mode = (enum machine_mode) ((int) mode + 1))
6028 for (n = 0; n < MAX_386_STACK_LOCALS; n++)
6029 ix86_stack_locals[(int) mode][n] = NULL_RTX;
6030 }
6031
6032 /* Mark machine specific bits of P for GC. */
6033 static void
6034 ix86_mark_machine_status (p)
6035 struct function *p;
6036 {
6037 enum machine_mode mode;
6038 int n;
6039
6040 for (mode = VOIDmode; (int) mode < (int) MAX_MACHINE_MODE;
6041 mode = (enum machine_mode) ((int) mode + 1))
6042 for (n = 0; n < MAX_386_STACK_LOCALS; n++)
6043 ggc_mark_rtx (p->machine->stack_locals[(int) mode][n]);
6044 }
6045
6046 /* Return a MEM corresponding to a stack slot with mode MODE.
6047 Allocate a new slot if necessary.
6048
6049 The RTL for a function can have several slots available: N is
6050 which slot to use. */
6051
6052 rtx
6053 assign_386_stack_local (mode, n)
6054 enum machine_mode mode;
6055 int n;
6056 {
6057 if (n < 0 || n >= MAX_386_STACK_LOCALS)
6058 abort ();
6059
6060 if (ix86_stack_locals[(int) mode][n] == NULL_RTX)
6061 ix86_stack_locals[(int) mode][n]
6062 = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
6063
6064 return ix86_stack_locals[(int) mode][n];
6065 }
6066 \f
6067 /* Calculate the length of the memory address in the instruction
6068 encoding. Does not include the one-byte modrm, opcode, or prefix. */
6069
6070 static int
6071 memory_address_length (addr)
6072 rtx addr;
6073 {
6074 struct ix86_address parts;
6075 rtx base, index, disp;
6076 int len;
6077
6078 if (GET_CODE (addr) == PRE_DEC
6079 || GET_CODE (addr) == POST_INC)
6080 return 0;
6081
6082 if (! ix86_decompose_address (addr, &parts))
6083 abort ();
6084
6085 base = parts.base;
6086 index = parts.index;
6087 disp = parts.disp;
6088 len = 0;
6089
6090 /* Register Indirect. */
6091 if (base && !index && !disp)
6092 {
6093 /* Special cases: ebp and esp need the two-byte modrm form. */
6094 if (addr == stack_pointer_rtx
6095 || addr == arg_pointer_rtx
6096 || addr == frame_pointer_rtx
6097 || addr == hard_frame_pointer_rtx)
6098 len = 1;
6099 }
6100
6101 /* Direct Addressing. */
6102 else if (disp && !base && !index)
6103 len = 4;
6104
6105 else
6106 {
6107 /* Find the length of the displacement constant. */
6108 if (disp)
6109 {
6110 if (GET_CODE (disp) == CONST_INT
6111 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K'))
6112 len = 1;
6113 else
6114 len = 4;
6115 }
6116
6117 /* An index requires the two-byte modrm form. */
6118 if (index)
6119 len += 1;
6120 }
6121
6122 return len;
6123 }
6124
6125 int
6126 ix86_attr_length_default (insn)
6127 rtx insn;
6128 {
6129 enum attr_type type;
6130 int len = 0, i;
6131
6132 type = get_attr_type (insn);
6133 extract_insn (insn);
6134 switch (type)
6135 {
6136 case TYPE_INCDEC:
6137 case TYPE_SETCC:
6138 case TYPE_ICMOV:
6139 case TYPE_FMOV:
6140 case TYPE_FOP:
6141 case TYPE_FCMP:
6142 case TYPE_FOP1:
6143 case TYPE_FMUL:
6144 case TYPE_FDIV:
6145 case TYPE_FSGN:
6146 case TYPE_FPSPC:
6147 case TYPE_FCMOV:
6148 case TYPE_IBR:
6149 break;
6150 case TYPE_STR:
6151 case TYPE_CLD:
6152 len = 0;
6153
6154 case TYPE_ALU1:
6155 case TYPE_NEGNOT:
6156 case TYPE_ALU:
6157 case TYPE_ICMP:
6158 case TYPE_IMOVX:
6159 case TYPE_ISHIFT:
6160 case TYPE_IMUL:
6161 case TYPE_IDIV:
6162 case TYPE_PUSH:
6163 case TYPE_POP:
6164 for (i = recog_data.n_operands - 1; i >= 0; --i)
6165 if (CONSTANT_P (recog_data.operand[i]))
6166 {
6167 if (GET_CODE (recog_data.operand[i]) == CONST_INT
6168 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
6169 len += 1;
6170 else
6171 len += GET_MODE_SIZE (GET_MODE (recog_data.operand[0]));
6172 }
6173 break;
6174
6175 case TYPE_IMOV:
6176 if (CONSTANT_P (recog_data.operand[1]))
6177 len += GET_MODE_SIZE (GET_MODE (recog_data.operand[0]));
6178 break;
6179
6180 case TYPE_CALL:
6181 if (constant_call_address_operand (recog_data.operand[0],
6182 GET_MODE (recog_data.operand[0])))
6183 return 5;
6184 break;
6185
6186 case TYPE_CALLV:
6187 if (constant_call_address_operand (recog_data.operand[1],
6188 GET_MODE (recog_data.operand[1])))
6189 return 5;
6190 break;
6191
6192 case TYPE_LEA:
6193 {
6194 /* Irritatingly, single_set doesn't work with REG_UNUSED present,
6195 as we'll get from running life_analysis during reg-stack when
6196 not optimizing. Not that it matters anyway, now that
6197 pro_epilogue_adjust_stack uses lea, and is by design not
6198 single_set. */
6199 rtx set = PATTERN (insn);
6200 if (GET_CODE (set) == SET)
6201 ;
6202 else if (GET_CODE (set) == PARALLEL
6203 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
6204 set = XVECEXP (set, 0, 0);
6205 else
6206 abort ();
6207
6208 len += memory_address_length (SET_SRC (set));
6209 goto just_opcode;
6210 }
6211
6212 case TYPE_OTHER:
6213 case TYPE_MULTI:
6214 return 15;
6215
6216 case TYPE_FXCH:
6217 if (STACK_TOP_P (recog_data.operand[0]))
6218 return 2 + (REGNO (recog_data.operand[1]) != FIRST_STACK_REG + 1);
6219 else
6220 return 2 + (REGNO (recog_data.operand[0]) != FIRST_STACK_REG + 1);
6221
6222 default:
6223 abort ();
6224 }
6225
6226 for (i = recog_data.n_operands - 1; i >= 0; --i)
6227 if (GET_CODE (recog_data.operand[i]) == MEM)
6228 {
6229 len += memory_address_length (XEXP (recog_data.operand[i], 0));
6230 break;
6231 }
6232
6233 just_opcode:
6234 len += get_attr_length_opcode (insn);
6235 len += get_attr_length_prefix (insn);
6236
6237 return len;
6238 }
6239 \f
6240 /* Return the maximum number of instructions a cpu can issue. */
6241
6242 int
6243 ix86_issue_rate ()
6244 {
6245 switch (ix86_cpu)
6246 {
6247 case PROCESSOR_PENTIUM:
6248 case PROCESSOR_K6:
6249 return 2;
6250
6251 case PROCESSOR_PENTIUMPRO:
6252 return 3;
6253
6254 default:
6255 return 1;
6256 }
6257 }
6258
6259 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
6260 by DEP_INSN and nothing set by DEP_INSN. */
6261
6262 static int
6263 ix86_flags_dependant (insn, dep_insn, insn_type)
6264 rtx insn, dep_insn;
6265 enum attr_type insn_type;
6266 {
6267 rtx set, set2;
6268
6269 /* Simplify the test for uninteresting insns. */
6270 if (insn_type != TYPE_SETCC
6271 && insn_type != TYPE_ICMOV
6272 && insn_type != TYPE_FCMOV
6273 && insn_type != TYPE_IBR)
6274 return 0;
6275
6276 if ((set = single_set (dep_insn)) != 0)
6277 {
6278 set = SET_DEST (set);
6279 set2 = NULL_RTX;
6280 }
6281 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
6282 && XVECLEN (PATTERN (dep_insn), 0) == 2
6283 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
6284 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
6285 {
6286 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
6287 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
6288 }
6289 else
6290 return 0;
6291
6292 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
6293 return 0;
6294
6295 /* This test is true if the dependant insn reads the flags but
6296 not any other potentially set register. */
6297 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
6298 return 0;
6299
6300 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
6301 return 0;
6302
6303 return 1;
6304 }
6305
6306 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
6307 address with operands set by DEP_INSN. */
6308
6309 static int
6310 ix86_agi_dependant (insn, dep_insn, insn_type)
6311 rtx insn, dep_insn;
6312 enum attr_type insn_type;
6313 {
6314 rtx addr;
6315
6316 if (insn_type == TYPE_LEA)
6317 {
6318 addr = PATTERN (insn);
6319 if (GET_CODE (addr) == SET)
6320 ;
6321 else if (GET_CODE (addr) == PARALLEL
6322 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
6323 addr = XVECEXP (addr, 0, 0);
6324 else
6325 abort ();
6326 addr = SET_SRC (addr);
6327 }
6328 else
6329 {
6330 int i;
6331 extract_insn (insn);
6332 for (i = recog_data.n_operands - 1; i >= 0; --i)
6333 if (GET_CODE (recog_data.operand[i]) == MEM)
6334 {
6335 addr = XEXP (recog_data.operand[i], 0);
6336 goto found;
6337 }
6338 return 0;
6339 found:;
6340 }
6341
6342 return modified_in_p (addr, dep_insn);
6343 }
6344
6345 int
6346 ix86_adjust_cost (insn, link, dep_insn, cost)
6347 rtx insn, link, dep_insn;
6348 int cost;
6349 {
6350 enum attr_type insn_type, dep_insn_type;
6351 enum attr_memory memory;
6352 rtx set, set2;
6353 int dep_insn_code_number;
6354
6355 /* Anti and output depenancies have zero cost on all CPUs. */
6356 if (REG_NOTE_KIND (link) != 0)
6357 return 0;
6358
6359 dep_insn_code_number = recog_memoized (dep_insn);
6360
6361 /* If we can't recognize the insns, we can't really do anything. */
6362 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
6363 return cost;
6364
6365 insn_type = get_attr_type (insn);
6366 dep_insn_type = get_attr_type (dep_insn);
6367
6368 /* Prologue and epilogue allocators can have a false dependency on ebp.
6369 This results in one cycle extra stall on Pentium prologue scheduling,
6370 so handle this important case manually. */
6371 if (dep_insn_code_number == CODE_FOR_pro_epilogue_adjust_stack
6372 && dep_insn_type == TYPE_ALU
6373 && !reg_mentioned_p (stack_pointer_rtx, insn))
6374 return 0;
6375
6376 switch (ix86_cpu)
6377 {
6378 case PROCESSOR_PENTIUM:
6379 /* Address Generation Interlock adds a cycle of latency. */
6380 if (ix86_agi_dependant (insn, dep_insn, insn_type))
6381 cost += 1;
6382
6383 /* ??? Compares pair with jump/setcc. */
6384 if (ix86_flags_dependant (insn, dep_insn, insn_type))
6385 cost = 0;
6386
6387 /* Floating point stores require value to be ready one cycle ealier. */
6388 if (insn_type == TYPE_FMOV
6389 && get_attr_memory (insn) == MEMORY_STORE
6390 && !ix86_agi_dependant (insn, dep_insn, insn_type))
6391 cost += 1;
6392 break;
6393
6394 case PROCESSOR_PENTIUMPRO:
6395 /* Since we can't represent delayed latencies of load+operation,
6396 increase the cost here for non-imov insns. */
6397 if (dep_insn_type != TYPE_IMOV
6398 && dep_insn_type != TYPE_FMOV
6399 && ((memory = get_attr_memory (dep_insn) == MEMORY_LOAD)
6400 || memory == MEMORY_BOTH))
6401 cost += 1;
6402
6403 /* INT->FP conversion is expensive. */
6404 if (get_attr_fp_int_src (dep_insn))
6405 cost += 5;
6406
6407 /* There is one cycle extra latency between an FP op and a store. */
6408 if (insn_type == TYPE_FMOV
6409 && (set = single_set (dep_insn)) != NULL_RTX
6410 && (set2 = single_set (insn)) != NULL_RTX
6411 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
6412 && GET_CODE (SET_DEST (set2)) == MEM)
6413 cost += 1;
6414 break;
6415
6416 case PROCESSOR_K6:
6417 /* The esp dependency is resolved before the instruction is really
6418 finished. */
6419 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
6420 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
6421 return 1;
6422
6423 /* Since we can't represent delayed latencies of load+operation,
6424 increase the cost here for non-imov insns. */
6425 if ((memory = get_attr_memory (dep_insn) == MEMORY_LOAD)
6426 || memory == MEMORY_BOTH)
6427 cost += (dep_insn_type != TYPE_IMOV) ? 2 : 1;
6428
6429 /* INT->FP conversion is expensive. */
6430 if (get_attr_fp_int_src (dep_insn))
6431 cost += 5;
6432 break;
6433
6434 case PROCESSOR_ATHLON:
6435 if ((memory = get_attr_memory (dep_insn)) == MEMORY_LOAD
6436 || memory == MEMORY_BOTH)
6437 {
6438 if (dep_insn_type == TYPE_IMOV || dep_insn_type == TYPE_FMOV)
6439 cost += 2;
6440 else
6441 cost += 3;
6442 }
6443
6444 default:
6445 break;
6446 }
6447
6448 return cost;
6449 }
6450
6451 static union
6452 {
6453 struct ppro_sched_data
6454 {
6455 rtx decode[3];
6456 int issued_this_cycle;
6457 } ppro;
6458 } ix86_sched_data;
6459
6460 static int
6461 ix86_safe_length (insn)
6462 rtx insn;
6463 {
6464 if (recog_memoized (insn) >= 0)
6465 return get_attr_length(insn);
6466 else
6467 return 128;
6468 }
6469
6470 static int
6471 ix86_safe_length_prefix (insn)
6472 rtx insn;
6473 {
6474 if (recog_memoized (insn) >= 0)
6475 return get_attr_length(insn);
6476 else
6477 return 0;
6478 }
6479
6480 static enum attr_memory
6481 ix86_safe_memory (insn)
6482 rtx insn;
6483 {
6484 if (recog_memoized (insn) >= 0)
6485 return get_attr_memory(insn);
6486 else
6487 return MEMORY_UNKNOWN;
6488 }
6489
6490 static enum attr_pent_pair
6491 ix86_safe_pent_pair (insn)
6492 rtx insn;
6493 {
6494 if (recog_memoized (insn) >= 0)
6495 return get_attr_pent_pair(insn);
6496 else
6497 return PENT_PAIR_NP;
6498 }
6499
6500 static enum attr_ppro_uops
6501 ix86_safe_ppro_uops (insn)
6502 rtx insn;
6503 {
6504 if (recog_memoized (insn) >= 0)
6505 return get_attr_ppro_uops (insn);
6506 else
6507 return PPRO_UOPS_MANY;
6508 }
6509
6510 static void
6511 ix86_dump_ppro_packet (dump)
6512 FILE *dump;
6513 {
6514 if (ix86_sched_data.ppro.decode[0])
6515 {
6516 fprintf (dump, "PPRO packet: %d",
6517 INSN_UID (ix86_sched_data.ppro.decode[0]));
6518 if (ix86_sched_data.ppro.decode[1])
6519 fprintf (dump, " %d", INSN_UID (ix86_sched_data.ppro.decode[1]));
6520 if (ix86_sched_data.ppro.decode[2])
6521 fprintf (dump, " %d", INSN_UID (ix86_sched_data.ppro.decode[2]));
6522 fputc ('\n', dump);
6523 }
6524 }
6525
6526 /* We're beginning a new block. Initialize data structures as necessary. */
6527
6528 void
6529 ix86_sched_init (dump, sched_verbose)
6530 FILE *dump ATTRIBUTE_UNUSED;
6531 int sched_verbose ATTRIBUTE_UNUSED;
6532 {
6533 memset (&ix86_sched_data, 0, sizeof (ix86_sched_data));
6534 }
6535
6536 /* Shift INSN to SLOT, and shift everything else down. */
6537
6538 static void
6539 ix86_reorder_insn (insnp, slot)
6540 rtx *insnp, *slot;
6541 {
6542 if (insnp != slot)
6543 {
6544 rtx insn = *insnp;
6545 do
6546 insnp[0] = insnp[1];
6547 while (++insnp != slot);
6548 *insnp = insn;
6549 }
6550 }
6551
6552 /* Find an instruction with given pairability and minimal amount of cycles
6553 lost by the fact that the CPU waits for both pipelines to finish before
6554 reading next instructions. Also take care that both instructions together
6555 can not exceed 7 bytes. */
6556
6557 static rtx *
6558 ix86_pent_find_pair (e_ready, ready, type, first)
6559 rtx *e_ready;
6560 rtx *ready;
6561 enum attr_pent_pair type;
6562 rtx first;
6563 {
6564 int mincycles, cycles;
6565 enum attr_pent_pair tmp;
6566 enum attr_memory memory;
6567 rtx *insnp, *bestinsnp = NULL;
6568
6569 if (ix86_safe_length (first) > 7 + ix86_safe_length_prefix (first))
6570 return NULL;
6571
6572 memory = ix86_safe_memory (first);
6573 cycles = result_ready_cost (first);
6574 mincycles = INT_MAX;
6575
6576 for (insnp = e_ready; insnp >= ready && mincycles; --insnp)
6577 if ((tmp = ix86_safe_pent_pair (*insnp)) == type
6578 && ix86_safe_length (*insnp) <= 7 + ix86_safe_length_prefix (*insnp))
6579 {
6580 enum attr_memory second_memory;
6581 int secondcycles, currentcycles;
6582
6583 second_memory = ix86_safe_memory (*insnp);
6584 secondcycles = result_ready_cost (*insnp);
6585 currentcycles = abs (cycles - secondcycles);
6586
6587 if (secondcycles >= 1 && cycles >= 1)
6588 {
6589 /* Two read/modify/write instructions together takes two
6590 cycles longer. */
6591 if (memory == MEMORY_BOTH && second_memory == MEMORY_BOTH)
6592 currentcycles += 2;
6593
6594 /* Read modify/write instruction followed by read/modify
6595 takes one cycle longer. */
6596 if (memory == MEMORY_BOTH && second_memory == MEMORY_LOAD
6597 && tmp != PENT_PAIR_UV
6598 && ix86_safe_pent_pair (first) != PENT_PAIR_UV)
6599 currentcycles += 1;
6600 }
6601 if (currentcycles < mincycles)
6602 bestinsnp = insnp, mincycles = currentcycles;
6603 }
6604
6605 return bestinsnp;
6606 }
6607
6608 /* Subroutines of ix86_sched_reorder. */
6609
6610 static void
6611 ix86_sched_reorder_pentium (ready, e_ready)
6612 rtx *ready;
6613 rtx *e_ready;
6614 {
6615 enum attr_pent_pair pair1, pair2;
6616 rtx *insnp;
6617
6618 /* This wouldn't be necessary if Haifa knew that static insn ordering
6619 is important to which pipe an insn is issued to. So we have to make
6620 some minor rearrangements. */
6621
6622 pair1 = ix86_safe_pent_pair (*e_ready);
6623
6624 /* If the first insn is non-pairable, let it be. */
6625 if (pair1 == PENT_PAIR_NP)
6626 return;
6627
6628 pair2 = PENT_PAIR_NP;
6629 insnp = 0;
6630
6631 /* If the first insn is UV or PV pairable, search for a PU
6632 insn to go with. */
6633 if (pair1 == PENT_PAIR_UV || pair1 == PENT_PAIR_PV)
6634 {
6635 insnp = ix86_pent_find_pair (e_ready-1, ready,
6636 PENT_PAIR_PU, *e_ready);
6637 if (insnp)
6638 pair2 = PENT_PAIR_PU;
6639 }
6640
6641 /* If the first insn is PU or UV pairable, search for a PV
6642 insn to go with. */
6643 if (pair2 == PENT_PAIR_NP
6644 && (pair1 == PENT_PAIR_PU || pair1 == PENT_PAIR_UV))
6645 {
6646 insnp = ix86_pent_find_pair (e_ready-1, ready,
6647 PENT_PAIR_PV, *e_ready);
6648 if (insnp)
6649 pair2 = PENT_PAIR_PV;
6650 }
6651
6652 /* If the first insn is pairable, search for a UV
6653 insn to go with. */
6654 if (pair2 == PENT_PAIR_NP)
6655 {
6656 insnp = ix86_pent_find_pair (e_ready-1, ready,
6657 PENT_PAIR_UV, *e_ready);
6658 if (insnp)
6659 pair2 = PENT_PAIR_UV;
6660 }
6661
6662 if (pair2 == PENT_PAIR_NP)
6663 return;
6664
6665 /* Found something! Decide if we need to swap the order. */
6666 if (pair1 == PENT_PAIR_PV || pair2 == PENT_PAIR_PU
6667 || (pair1 == PENT_PAIR_UV && pair2 == PENT_PAIR_UV
6668 && ix86_safe_memory (*e_ready) == MEMORY_BOTH
6669 && ix86_safe_memory (*insnp) == MEMORY_LOAD))
6670 ix86_reorder_insn (insnp, e_ready);
6671 else
6672 ix86_reorder_insn (insnp, e_ready - 1);
6673 }
6674
6675 static void
6676 ix86_sched_reorder_ppro (ready, e_ready)
6677 rtx *ready;
6678 rtx *e_ready;
6679 {
6680 rtx decode[3];
6681 enum attr_ppro_uops cur_uops;
6682 int issued_this_cycle;
6683 rtx *insnp;
6684 int i;
6685
6686 /* At this point .ppro.decode contains the state of the three
6687 decoders from last "cycle". That is, those insns that were
6688 actually independent. But here we're scheduling for the
6689 decoder, and we may find things that are decodable in the
6690 same cycle. */
6691
6692 memcpy (decode, ix86_sched_data.ppro.decode, sizeof(decode));
6693 issued_this_cycle = 0;
6694
6695 insnp = e_ready;
6696 cur_uops = ix86_safe_ppro_uops (*insnp);
6697
6698 /* If the decoders are empty, and we've a complex insn at the
6699 head of the priority queue, let it issue without complaint. */
6700 if (decode[0] == NULL)
6701 {
6702 if (cur_uops == PPRO_UOPS_MANY)
6703 {
6704 decode[0] = *insnp;
6705 goto ppro_done;
6706 }
6707
6708 /* Otherwise, search for a 2-4 uop unsn to issue. */
6709 while (cur_uops != PPRO_UOPS_FEW)
6710 {
6711 if (insnp == ready)
6712 break;
6713 cur_uops = ix86_safe_ppro_uops (*--insnp);
6714 }
6715
6716 /* If so, move it to the head of the line. */
6717 if (cur_uops == PPRO_UOPS_FEW)
6718 ix86_reorder_insn (insnp, e_ready);
6719
6720 /* Issue the head of the queue. */
6721 issued_this_cycle = 1;
6722 decode[0] = *e_ready--;
6723 }
6724
6725 /* Look for simple insns to fill in the other two slots. */
6726 for (i = 1; i < 3; ++i)
6727 if (decode[i] == NULL)
6728 {
6729 if (ready >= e_ready)
6730 goto ppro_done;
6731
6732 insnp = e_ready;
6733 cur_uops = ix86_safe_ppro_uops (*insnp);
6734 while (cur_uops != PPRO_UOPS_ONE)
6735 {
6736 if (insnp == ready)
6737 break;
6738 cur_uops = ix86_safe_ppro_uops (*--insnp);
6739 }
6740
6741 /* Found one. Move it to the head of the queue and issue it. */
6742 if (cur_uops == PPRO_UOPS_ONE)
6743 {
6744 ix86_reorder_insn (insnp, e_ready);
6745 decode[i] = *e_ready--;
6746 issued_this_cycle++;
6747 continue;
6748 }
6749
6750 /* ??? Didn't find one. Ideally, here we would do a lazy split
6751 of 2-uop insns, issue one and queue the other. */
6752 }
6753
6754 ppro_done:
6755 if (issued_this_cycle == 0)
6756 issued_this_cycle = 1;
6757 ix86_sched_data.ppro.issued_this_cycle = issued_this_cycle;
6758 }
6759
6760
6761 /* We are about to being issuing insns for this clock cycle.
6762 Override the default sort algorithm to better slot instructions. */
6763 int
6764 ix86_sched_reorder (dump, sched_verbose, ready, n_ready, clock_var)
6765 FILE *dump ATTRIBUTE_UNUSED;
6766 int sched_verbose ATTRIBUTE_UNUSED;
6767 rtx *ready;
6768 int n_ready;
6769 int clock_var ATTRIBUTE_UNUSED;
6770 {
6771 rtx *e_ready = ready + n_ready - 1;
6772
6773 if (n_ready < 2)
6774 goto out;
6775
6776 switch (ix86_cpu)
6777 {
6778 default:
6779 break;
6780
6781 case PROCESSOR_PENTIUM:
6782 ix86_sched_reorder_pentium (ready, e_ready);
6783 break;
6784
6785 case PROCESSOR_PENTIUMPRO:
6786 ix86_sched_reorder_ppro (ready, e_ready);
6787 break;
6788 }
6789
6790 out:
6791 return ix86_issue_rate ();
6792 }
6793
6794 /* We are about to issue INSN. Return the number of insns left on the
6795 ready queue that can be issued this cycle. */
6796
6797 int
6798 ix86_variable_issue (dump, sched_verbose, insn, can_issue_more)
6799 FILE *dump;
6800 int sched_verbose;
6801 rtx insn;
6802 int can_issue_more;
6803 {
6804 int i;
6805 switch (ix86_cpu)
6806 {
6807 default:
6808 return can_issue_more - 1;
6809
6810 case PROCESSOR_PENTIUMPRO:
6811 {
6812 enum attr_ppro_uops uops = ix86_safe_ppro_uops (insn);
6813
6814 if (uops == PPRO_UOPS_MANY)
6815 {
6816 if (sched_verbose)
6817 ix86_dump_ppro_packet (dump);
6818 ix86_sched_data.ppro.decode[0] = insn;
6819 ix86_sched_data.ppro.decode[1] = NULL;
6820 ix86_sched_data.ppro.decode[2] = NULL;
6821 if (sched_verbose)
6822 ix86_dump_ppro_packet (dump);
6823 ix86_sched_data.ppro.decode[0] = NULL;
6824 }
6825 else if (uops == PPRO_UOPS_FEW)
6826 {
6827 if (sched_verbose)
6828 ix86_dump_ppro_packet (dump);
6829 ix86_sched_data.ppro.decode[0] = insn;
6830 ix86_sched_data.ppro.decode[1] = NULL;
6831 ix86_sched_data.ppro.decode[2] = NULL;
6832 }
6833 else
6834 {
6835 for (i = 0; i < 3; ++i)
6836 if (ix86_sched_data.ppro.decode[i] == NULL)
6837 {
6838 ix86_sched_data.ppro.decode[i] = insn;
6839 break;
6840 }
6841 if (i == 3)
6842 abort ();
6843 if (i == 2)
6844 {
6845 if (sched_verbose)
6846 ix86_dump_ppro_packet (dump);
6847 ix86_sched_data.ppro.decode[0] = NULL;
6848 ix86_sched_data.ppro.decode[1] = NULL;
6849 ix86_sched_data.ppro.decode[2] = NULL;
6850 }
6851 }
6852 }
6853 return --ix86_sched_data.ppro.issued_this_cycle;
6854 }
6855 }
This page took 0.35646 seconds and 5 git commands to generate.