1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
3 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-flags.h"
35 #include "insn-attr.h"
42 #include "basic-block.h"
45 #ifdef EXTRA_CONSTRAINT
46 /* If EXTRA_CONSTRAINT is defined, then the 'S'
47 constraint in REG_CLASS_FROM_LETTER will no longer work, and various
48 asm statements that need 'S' for class SIREG will break. */
49 error EXTRA_CONSTRAINT conflicts with S constraint letter
50 /* The previous line used to be #error, but some compilers barf
51 even if the conditional was untrue. */
54 #ifndef CHECK_STACK_LIMIT
55 #define CHECK_STACK_LIMIT -1
58 /* Processor costs (relative to an add) */
59 struct processor_costs i386_cost
= { /* 386 specific costs */
60 1, /* cost of an add instruction */
61 1, /* cost of a lea instruction */
62 3, /* variable shift costs */
63 2, /* constant shift costs */
64 6, /* cost of starting a multiply */
65 1, /* cost of multiply per each bit set */
66 23, /* cost of a divide/mod */
67 15, /* "large" insn */
69 4, /* cost for loading QImode using movzbl */
70 {2, 4, 2}, /* cost of loading integer registers
71 in QImode, HImode and SImode.
72 Relative to reg-reg move (2). */
73 {2, 4, 2}, /* cost of storing integer registers */
74 2, /* cost of reg,reg fld/fst */
75 {8, 8, 8}, /* cost of loading fp registers
76 in SFmode, DFmode and XFmode */
77 {8, 8, 8} /* cost of loading integer registers */
80 struct processor_costs i486_cost
= { /* 486 specific costs */
81 1, /* cost of an add instruction */
82 1, /* cost of a lea instruction */
83 3, /* variable shift costs */
84 2, /* constant shift costs */
85 12, /* cost of starting a multiply */
86 1, /* cost of multiply per each bit set */
87 40, /* cost of a divide/mod */
88 15, /* "large" insn */
90 4, /* cost for loading QImode using movzbl */
91 {2, 4, 2}, /* cost of loading integer registers
92 in QImode, HImode and SImode.
93 Relative to reg-reg move (2). */
94 {2, 4, 2}, /* cost of storing integer registers */
95 2, /* cost of reg,reg fld/fst */
96 {8, 8, 8}, /* cost of loading fp registers
97 in SFmode, DFmode and XFmode */
98 {8, 8, 8} /* cost of loading integer registers */
101 struct processor_costs pentium_cost
= {
102 1, /* cost of an add instruction */
103 1, /* cost of a lea instruction */
104 4, /* variable shift costs */
105 1, /* constant shift costs */
106 11, /* cost of starting a multiply */
107 0, /* cost of multiply per each bit set */
108 25, /* cost of a divide/mod */
109 8, /* "large" insn */
111 6, /* cost for loading QImode using movzbl */
112 {2, 4, 2}, /* cost of loading integer registers
113 in QImode, HImode and SImode.
114 Relative to reg-reg move (2). */
115 {2, 4, 2}, /* cost of storing integer registers */
116 2, /* cost of reg,reg fld/fst */
117 {2, 2, 6}, /* cost of loading fp registers
118 in SFmode, DFmode and XFmode */
119 {4, 4, 6} /* cost of loading integer registers */
122 struct processor_costs pentiumpro_cost
= {
123 1, /* cost of an add instruction */
124 1, /* cost of a lea instruction */
125 1, /* variable shift costs */
126 1, /* constant shift costs */
127 4, /* cost of starting a multiply */
128 0, /* cost of multiply per each bit set */
129 17, /* cost of a divide/mod */
130 8, /* "large" insn */
132 2, /* cost for loading QImode using movzbl */
133 {4, 4, 4}, /* cost of loading integer registers
134 in QImode, HImode and SImode.
135 Relative to reg-reg move (2). */
136 {2, 2, 2}, /* cost of storing integer registers */
137 2, /* cost of reg,reg fld/fst */
138 {2, 2, 6}, /* cost of loading fp registers
139 in SFmode, DFmode and XFmode */
140 {4, 4, 6} /* cost of loading integer registers */
143 struct processor_costs k6_cost
= {
144 1, /* cost of an add instruction */
145 2, /* cost of a lea instruction */
146 1, /* variable shift costs */
147 1, /* constant shift costs */
148 3, /* cost of starting a multiply */
149 0, /* cost of multiply per each bit set */
150 18, /* cost of a divide/mod */
151 8, /* "large" insn */
153 3, /* cost for loading QImode using movzbl */
154 {4, 5, 4}, /* cost of loading integer registers
155 in QImode, HImode and SImode.
156 Relative to reg-reg move (2). */
157 {2, 3, 2}, /* cost of storing integer registers */
158 4, /* cost of reg,reg fld/fst */
159 {6, 6, 6}, /* cost of loading fp registers
160 in SFmode, DFmode and XFmode */
161 {4, 4, 4} /* cost of loading integer registers */
164 struct processor_costs athlon_cost
= {
165 1, /* cost of an add instruction */
166 2, /* cost of a lea instruction */
167 1, /* variable shift costs */
168 1, /* constant shift costs */
169 5, /* cost of starting a multiply */
170 0, /* cost of multiply per each bit set */
171 42, /* cost of a divide/mod */
172 8, /* "large" insn */
174 4, /* cost for loading QImode using movzbl */
175 {4, 5, 4}, /* cost of loading integer registers
176 in QImode, HImode and SImode.
177 Relative to reg-reg move (2). */
178 {2, 3, 2}, /* cost of storing integer registers */
179 4, /* cost of reg,reg fld/fst */
180 {6, 6, 20}, /* cost of loading fp registers
181 in SFmode, DFmode and XFmode */
182 {4, 4, 16} /* cost of loading integer registers */
185 struct processor_costs
*ix86_cost
= &pentium_cost
;
187 /* Processor feature/optimization bitmasks. */
188 #define m_386 (1<<PROCESSOR_I386)
189 #define m_486 (1<<PROCESSOR_I486)
190 #define m_PENT (1<<PROCESSOR_PENTIUM)
191 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
192 #define m_K6 (1<<PROCESSOR_K6)
193 #define m_ATHLON (1<<PROCESSOR_ATHLON)
195 const int x86_use_leave
= m_386
| m_K6
| m_ATHLON
;
196 const int x86_push_memory
= m_386
| m_K6
| m_ATHLON
;
197 const int x86_zero_extend_with_and
= m_486
| m_PENT
;
198 const int x86_movx
= m_ATHLON
| m_PPRO
/* m_386 | m_K6 */;
199 const int x86_double_with_add
= ~m_386
;
200 const int x86_use_bit_test
= m_386
;
201 const int x86_unroll_strlen
= m_486
| m_PENT
| m_PPRO
| m_ATHLON
| m_K6
;
202 const int x86_use_q_reg
= m_PENT
| m_PPRO
| m_K6
;
203 const int x86_use_any_reg
= m_486
;
204 const int x86_cmove
= m_PPRO
| m_ATHLON
;
205 const int x86_deep_branch
= m_PPRO
| m_K6
| m_ATHLON
;
206 const int x86_use_sahf
= m_PPRO
| m_K6
| m_ATHLON
;
207 const int x86_partial_reg_stall
= m_PPRO
;
208 const int x86_use_loop
= m_K6
;
209 const int x86_use_fiop
= ~(m_PPRO
| m_ATHLON
| m_PENT
);
210 const int x86_use_mov0
= m_K6
;
211 const int x86_use_cltd
= ~(m_PENT
| m_K6
);
212 const int x86_read_modify_write
= ~m_PENT
;
213 const int x86_read_modify
= ~(m_PENT
| m_PPRO
);
214 const int x86_split_long_moves
= m_PPRO
;
215 const int x86_promote_QImode
= m_K6
| m_PENT
| m_386
| m_486
;
216 const int x86_single_stringop
= m_386
;
217 const int x86_qimode_math
= ~(0);
218 const int x86_promote_qi_regs
= 0;
219 const int x86_himode_math
= ~(m_PPRO
);
220 const int x86_promote_hi_regs
= m_PPRO
;
221 const int x86_sub_esp_4
= m_ATHLON
| m_PPRO
;
222 const int x86_sub_esp_8
= m_ATHLON
| m_PPRO
| m_386
| m_486
;
223 const int x86_add_esp_4
= m_ATHLON
| m_K6
;
224 const int x86_add_esp_8
= m_ATHLON
| m_PPRO
| m_K6
| m_386
| m_486
;
225 const int x86_integer_DFmode_moves
= ~m_ATHLON
;
226 const int x86_partial_reg_dependency
= m_ATHLON
;
227 const int x86_memory_mismatch_stall
= m_ATHLON
;
229 #define AT_BP(mode) (gen_rtx_MEM ((mode), hard_frame_pointer_rtx))
231 const char * const hi_reg_name
[] = HI_REGISTER_NAMES
;
232 const char * const qi_reg_name
[] = QI_REGISTER_NAMES
;
233 const char * const qi_high_reg_name
[] = QI_HIGH_REGISTER_NAMES
;
235 /* Array of the smallest class containing reg number REGNO, indexed by
236 REGNO. Used by REGNO_REG_CLASS in i386.h. */
238 enum reg_class
const regclass_map
[FIRST_PSEUDO_REGISTER
] =
241 AREG
, DREG
, CREG
, BREG
,
243 SIREG
, DIREG
, NON_Q_REGS
, NON_Q_REGS
,
245 FP_TOP_REG
, FP_SECOND_REG
, FLOAT_REGS
, FLOAT_REGS
,
246 FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
,
249 /* flags, fpsr, dirflag, frame */
250 NO_REGS
, NO_REGS
, NO_REGS
, NON_Q_REGS
253 /* The "default" register map. */
255 int const dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
257 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
258 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
259 -1, -1, -1, -1, /* arg, flags, fpsr, dir */
262 /* Define the register numbers to be used in Dwarf debugging information.
263 The SVR4 reference port C compiler uses the following register numbers
264 in its Dwarf output code:
265 0 for %eax (gcc regno = 0)
266 1 for %ecx (gcc regno = 2)
267 2 for %edx (gcc regno = 1)
268 3 for %ebx (gcc regno = 3)
269 4 for %esp (gcc regno = 7)
270 5 for %ebp (gcc regno = 6)
271 6 for %esi (gcc regno = 4)
272 7 for %edi (gcc regno = 5)
273 The following three DWARF register numbers are never generated by
274 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
275 believes these numbers have these meanings.
276 8 for %eip (no gcc equivalent)
277 9 for %eflags (gcc regno = 17)
278 10 for %trapno (no gcc equivalent)
279 It is not at all clear how we should number the FP stack registers
280 for the x86 architecture. If the version of SDB on x86/svr4 were
281 a bit less brain dead with respect to floating-point then we would
282 have a precedent to follow with respect to DWARF register numbers
283 for x86 FP registers, but the SDB on x86/svr4 is so completely
284 broken with respect to FP registers that it is hardly worth thinking
285 of it as something to strive for compatibility with.
286 The version of x86/svr4 SDB I have at the moment does (partially)
287 seem to believe that DWARF register number 11 is associated with
288 the x86 register %st(0), but that's about all. Higher DWARF
289 register numbers don't seem to be associated with anything in
290 particular, and even for DWARF regno 11, SDB only seems to under-
291 stand that it should say that a variable lives in %st(0) (when
292 asked via an `=' command) if we said it was in DWARF regno 11,
293 but SDB still prints garbage when asked for the value of the
294 variable in question (via a `/' command).
295 (Also note that the labels SDB prints for various FP stack regs
296 when doing an `x' command are all wrong.)
297 Note that these problems generally don't affect the native SVR4
298 C compiler because it doesn't allow the use of -O with -g and
299 because when it is *not* optimizing, it allocates a memory
300 location for each floating-point variable, and the memory
301 location is what gets described in the DWARF AT_location
302 attribute for the variable in question.
303 Regardless of the severe mental illness of the x86/svr4 SDB, we
304 do something sensible here and we use the following DWARF
305 register numbers. Note that these are all stack-top-relative
307 11 for %st(0) (gcc regno = 8)
308 12 for %st(1) (gcc regno = 9)
309 13 for %st(2) (gcc regno = 10)
310 14 for %st(3) (gcc regno = 11)
311 15 for %st(4) (gcc regno = 12)
312 16 for %st(5) (gcc regno = 13)
313 17 for %st(6) (gcc regno = 14)
314 18 for %st(7) (gcc regno = 15)
316 int const svr4_dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
318 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
319 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
320 -1, 9, -1, -1, /* arg, flags, fpsr, dir */
325 /* Test and compare insns in i386.md store the information needed to
326 generate branch and scc insns here. */
328 struct rtx_def
*ix86_compare_op0
= NULL_RTX
;
329 struct rtx_def
*ix86_compare_op1
= NULL_RTX
;
331 #define MAX_386_STACK_LOCALS 2
333 /* Define the structure for the machine field in struct function. */
334 struct machine_function
336 rtx stack_locals
[(int) MAX_MACHINE_MODE
][MAX_386_STACK_LOCALS
];
339 #define ix86_stack_locals (cfun->machine->stack_locals)
341 /* which cpu are we scheduling for */
342 enum processor_type ix86_cpu
;
344 /* which instruction set architecture to use. */
347 /* Strings to hold which cpu and instruction set architecture to use. */
348 const char *ix86_cpu_string
; /* for -mcpu=<xxx> */
349 const char *ix86_arch_string
; /* for -march=<xxx> */
351 /* Register allocation order */
352 const char *ix86_reg_alloc_order
;
353 static char regs_allocated
[FIRST_PSEUDO_REGISTER
];
355 /* # of registers to use to pass arguments. */
356 const char *ix86_regparm_string
;
358 /* ix86_regparm_string as a number */
361 /* Alignment to use for loops and jumps: */
363 /* Power of two alignment for loops. */
364 const char *ix86_align_loops_string
;
366 /* Power of two alignment for non-loop jumps. */
367 const char *ix86_align_jumps_string
;
369 /* Power of two alignment for stack boundary in bytes. */
370 const char *ix86_preferred_stack_boundary_string
;
372 /* Preferred alignment for stack boundary in bits. */
373 int ix86_preferred_stack_boundary
;
375 /* Values 1-5: see jump.c */
376 int ix86_branch_cost
;
377 const char *ix86_branch_cost_string
;
379 /* Power of two alignment for functions. */
380 int ix86_align_funcs
;
381 const char *ix86_align_funcs_string
;
383 /* Power of two alignment for loops. */
384 int ix86_align_loops
;
386 /* Power of two alignment for non-loop jumps. */
387 int ix86_align_jumps
;
389 static void output_pic_addr_const
PARAMS ((FILE *, rtx
, int));
390 static void put_condition_code
PARAMS ((enum rtx_code
, enum machine_mode
,
392 static enum rtx_code unsigned_comparison
PARAMS ((enum rtx_code code
));
393 static rtx ix86_expand_int_compare
PARAMS ((enum rtx_code
, rtx
, rtx
));
394 static enum machine_mode ix86_fp_compare_mode
PARAMS ((enum rtx_code
));
395 static int ix86_use_fcomi_compare
PARAMS ((enum rtx_code
));
396 static enum rtx_code ix86_prepare_fp_compare_args
PARAMS ((enum rtx_code
,
398 static rtx ix86_expand_compare
PARAMS ((enum rtx_code
));
399 static rtx gen_push
PARAMS ((rtx
));
400 static int memory_address_length
PARAMS ((rtx addr
));
401 static int ix86_flags_dependant
PARAMS ((rtx
, rtx
, enum attr_type
));
402 static int ix86_agi_dependant
PARAMS ((rtx
, rtx
, enum attr_type
));
403 static int ix86_safe_length
PARAMS ((rtx
));
404 static enum attr_memory ix86_safe_memory
PARAMS ((rtx
));
405 static enum attr_pent_pair ix86_safe_pent_pair
PARAMS ((rtx
));
406 static enum attr_ppro_uops ix86_safe_ppro_uops
PARAMS ((rtx
));
407 static void ix86_dump_ppro_packet
PARAMS ((FILE *));
408 static void ix86_reorder_insn
PARAMS ((rtx
*, rtx
*));
409 static rtx
* ix86_pent_find_pair
PARAMS ((rtx
*, rtx
*, enum attr_pent_pair
,
411 static void ix86_init_machine_status
PARAMS ((struct function
*));
412 static void ix86_mark_machine_status
PARAMS ((struct function
*));
413 static void ix86_split_to_parts
PARAMS ((rtx
, rtx
*, enum machine_mode
));
414 static int ix86_safe_length_prefix
PARAMS ((rtx
));
415 static HOST_WIDE_INT ix86_compute_frame_size
PARAMS((HOST_WIDE_INT
,
416 int *, int *, int *));
417 static int ix86_nsaved_regs
PARAMS((void));
418 static void ix86_emit_save_regs
PARAMS((void));
419 static void ix86_emit_restore_regs_using_mov
PARAMS ((rtx
, int));
420 static void ix86_emit_epilogue_esp_adjustment
PARAMS((int));
421 static void ix86_sched_reorder_pentium
PARAMS((rtx
*, rtx
*));
422 static void ix86_sched_reorder_ppro
PARAMS((rtx
*, rtx
*));
426 rtx base
, index
, disp
;
430 static int ix86_decompose_address
PARAMS ((rtx
, struct ix86_address
*));
432 /* Sometimes certain combinations of command options do not make
433 sense on a particular target machine. You can define a macro
434 `OVERRIDE_OPTIONS' to take account of this. This macro, if
435 defined, is executed once just after all the command options have
438 Don't use this macro to turn on various extra optimizations for
439 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
444 /* Comes from final.c -- no real reason to change it. */
445 #define MAX_CODE_ALIGN 16
449 struct processor_costs
*cost
; /* Processor costs */
450 int target_enable
; /* Target flags to enable. */
451 int target_disable
; /* Target flags to disable. */
452 int align_loop
; /* Default alignments. */
457 const processor_target_table
[PROCESSOR_max
] =
459 {&i386_cost
, 0, 0, 2, 2, 2, 1},
460 {&i486_cost
, 0, 0, 4, 4, 4, 1},
461 {&pentium_cost
, 0, 0, -4, -4, -4, 1},
462 {&pentiumpro_cost
, 0, 0, 4, -4, 4, 1},
463 {&k6_cost
, 0, 0, -5, -5, 4, 1},
464 {&athlon_cost
, 0, 0, 4, -4, 4, 1}
469 const char *name
; /* processor name or nickname. */
470 enum processor_type processor
;
472 const processor_alias_table
[] =
474 {"i386", PROCESSOR_I386
},
475 {"i486", PROCESSOR_I486
},
476 {"i586", PROCESSOR_PENTIUM
},
477 {"pentium", PROCESSOR_PENTIUM
},
478 {"i686", PROCESSOR_PENTIUMPRO
},
479 {"pentiumpro", PROCESSOR_PENTIUMPRO
},
480 {"k6", PROCESSOR_K6
},
481 {"athlon", PROCESSOR_ATHLON
},
484 int const pta_size
= sizeof(processor_alias_table
)/sizeof(struct pta
);
486 #ifdef SUBTARGET_OVERRIDE_OPTIONS
487 SUBTARGET_OVERRIDE_OPTIONS
;
490 ix86_arch
= PROCESSOR_I386
;
491 ix86_cpu
= (enum processor_type
) TARGET_CPU_DEFAULT
;
493 if (ix86_arch_string
!= 0)
496 for (i
= 0; i
< pta_size
; i
++)
497 if (! strcmp (ix86_arch_string
, processor_alias_table
[i
].name
))
499 ix86_arch
= processor_alias_table
[i
].processor
;
500 /* Default cpu tuning to the architecture. */
501 ix86_cpu
= ix86_arch
;
505 error ("bad value (%s) for -march= switch", ix86_arch_string
);
508 if (ix86_cpu_string
!= 0)
511 for (i
= 0; i
< pta_size
; i
++)
512 if (! strcmp (ix86_cpu_string
, processor_alias_table
[i
].name
))
514 ix86_cpu
= processor_alias_table
[i
].processor
;
518 error ("bad value (%s) for -mcpu= switch", ix86_cpu_string
);
521 ix86_cost
= processor_target_table
[ix86_cpu
].cost
;
522 target_flags
|= processor_target_table
[ix86_cpu
].target_enable
;
523 target_flags
&= ~processor_target_table
[ix86_cpu
].target_disable
;
525 /* Arrange to set up i386_stack_locals for all functions. */
526 init_machine_status
= ix86_init_machine_status
;
527 mark_machine_status
= ix86_mark_machine_status
;
529 /* Validate registers in register allocation order. */
530 if (ix86_reg_alloc_order
)
533 for (i
= 0; (ch
= ix86_reg_alloc_order
[i
]) != '\0'; i
++)
539 case 'a': regno
= 0; break;
540 case 'd': regno
= 1; break;
541 case 'c': regno
= 2; break;
542 case 'b': regno
= 3; break;
543 case 'S': regno
= 4; break;
544 case 'D': regno
= 5; break;
545 case 'B': regno
= 6; break;
547 default: fatal ("Register '%c' is unknown", ch
);
550 if (regs_allocated
[regno
])
551 fatal ("Register '%c' already specified in allocation order", ch
);
553 regs_allocated
[regno
] = 1;
557 /* Validate -mregparm= value. */
558 if (ix86_regparm_string
)
560 ix86_regparm
= atoi (ix86_regparm_string
);
561 if (ix86_regparm
< 0 || ix86_regparm
> REGPARM_MAX
)
562 fatal ("-mregparm=%d is not between 0 and %d",
563 ix86_regparm
, REGPARM_MAX
);
566 /* Validate -malign-loops= value, or provide default. */
567 ix86_align_loops
= processor_target_table
[ix86_cpu
].align_loop
;
568 if (ix86_align_loops_string
)
570 ix86_align_loops
= atoi (ix86_align_loops_string
);
571 if (ix86_align_loops
< 0 || ix86_align_loops
> MAX_CODE_ALIGN
)
572 fatal ("-malign-loops=%d is not between 0 and %d",
573 ix86_align_loops
, MAX_CODE_ALIGN
);
576 /* Validate -malign-jumps= value, or provide default. */
577 ix86_align_jumps
= processor_target_table
[ix86_cpu
].align_jump
;
578 if (ix86_align_jumps_string
)
580 ix86_align_jumps
= atoi (ix86_align_jumps_string
);
581 if (ix86_align_jumps
< 0 || ix86_align_jumps
> MAX_CODE_ALIGN
)
582 fatal ("-malign-jumps=%d is not between 0 and %d",
583 ix86_align_jumps
, MAX_CODE_ALIGN
);
586 /* Validate -malign-functions= value, or provide default. */
587 ix86_align_funcs
= processor_target_table
[ix86_cpu
].align_func
;
588 if (ix86_align_funcs_string
)
590 ix86_align_funcs
= atoi (ix86_align_funcs_string
);
591 if (ix86_align_funcs
< 0 || ix86_align_funcs
> MAX_CODE_ALIGN
)
592 fatal ("-malign-functions=%d is not between 0 and %d",
593 ix86_align_funcs
, MAX_CODE_ALIGN
);
596 /* Validate -mpreferred-stack-boundary= value, or provide default.
597 The default of 128 bits is for Pentium III's SSE __m128. */
598 ix86_preferred_stack_boundary
= 128;
599 if (ix86_preferred_stack_boundary_string
)
601 int i
= atoi (ix86_preferred_stack_boundary_string
);
603 fatal ("-mpreferred-stack-boundary=%d is not between 2 and 31", i
);
604 ix86_preferred_stack_boundary
= (1 << i
) * BITS_PER_UNIT
;
607 /* Validate -mbranch-cost= value, or provide default. */
608 ix86_branch_cost
= processor_target_table
[ix86_cpu
].branch_cost
;
609 if (ix86_branch_cost_string
)
611 ix86_branch_cost
= atoi (ix86_branch_cost_string
);
612 if (ix86_branch_cost
< 0 || ix86_branch_cost
> 5)
613 fatal ("-mbranch-cost=%d is not between 0 and 5",
617 /* Keep nonleaf frame pointers. */
618 if (TARGET_OMIT_LEAF_FRAME_POINTER
)
619 flag_omit_frame_pointer
= 1;
621 /* If we're doing fast math, we don't care about comparison order
622 wrt NaNs. This lets us use a shorter comparison sequence. */
624 target_flags
&= ~MASK_IEEE_FP
;
626 /* If we're planning on using `loop', use it. */
627 if (TARGET_USE_LOOP
&& optimize
)
628 flag_branch_on_count_reg
= 1;
631 /* A C statement (sans semicolon) to choose the order in which to
632 allocate hard registers for pseudo-registers local to a basic
635 Store the desired register order in the array `reg_alloc_order'.
636 Element 0 should be the register to allocate first; element 1, the
637 next register; and so on.
639 The macro body should not assume anything about the contents of
640 `reg_alloc_order' before execution of the macro.
642 On most machines, it is not necessary to define this macro. */
645 order_regs_for_local_alloc ()
649 /* User specified the register allocation order. */
651 if (ix86_reg_alloc_order
)
653 for (i
= order
= 0; (ch
= ix86_reg_alloc_order
[i
]) != '\0'; i
++)
659 case 'a': regno
= 0; break;
660 case 'd': regno
= 1; break;
661 case 'c': regno
= 2; break;
662 case 'b': regno
= 3; break;
663 case 'S': regno
= 4; break;
664 case 'D': regno
= 5; break;
665 case 'B': regno
= 6; break;
668 reg_alloc_order
[order
++] = regno
;
671 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
673 if (! regs_allocated
[i
])
674 reg_alloc_order
[order
++] = i
;
678 /* If user did not specify a register allocation order, use natural order. */
681 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
682 reg_alloc_order
[i
] = i
;
687 optimization_options (level
, size
)
689 int size ATTRIBUTE_UNUSED
;
691 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
692 make the problem with not enough registers even worse. */
693 #ifdef INSN_SCHEDULING
695 flag_schedule_insns
= 0;
699 /* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
700 attribute for DECL. The attributes in ATTRIBUTES have previously been
704 ix86_valid_decl_attribute_p (decl
, attributes
, identifier
, args
)
705 tree decl ATTRIBUTE_UNUSED
;
706 tree attributes ATTRIBUTE_UNUSED
;
707 tree identifier ATTRIBUTE_UNUSED
;
708 tree args ATTRIBUTE_UNUSED
;
713 /* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
714 attribute for TYPE. The attributes in ATTRIBUTES have previously been
718 ix86_valid_type_attribute_p (type
, attributes
, identifier
, args
)
720 tree attributes ATTRIBUTE_UNUSED
;
724 if (TREE_CODE (type
) != FUNCTION_TYPE
725 && TREE_CODE (type
) != METHOD_TYPE
726 && TREE_CODE (type
) != FIELD_DECL
727 && TREE_CODE (type
) != TYPE_DECL
)
730 /* Stdcall attribute says callee is responsible for popping arguments
731 if they are not variable. */
732 if (is_attribute_p ("stdcall", identifier
))
733 return (args
== NULL_TREE
);
735 /* Cdecl attribute says the callee is a normal C declaration. */
736 if (is_attribute_p ("cdecl", identifier
))
737 return (args
== NULL_TREE
);
739 /* Regparm attribute specifies how many integer arguments are to be
740 passed in registers. */
741 if (is_attribute_p ("regparm", identifier
))
745 if (! args
|| TREE_CODE (args
) != TREE_LIST
746 || TREE_CHAIN (args
) != NULL_TREE
747 || TREE_VALUE (args
) == NULL_TREE
)
750 cst
= TREE_VALUE (args
);
751 if (TREE_CODE (cst
) != INTEGER_CST
)
754 if (compare_tree_int (cst
, REGPARM_MAX
) > 0)
763 /* Return 0 if the attributes for two types are incompatible, 1 if they
764 are compatible, and 2 if they are nearly compatible (which causes a
765 warning to be generated). */
768 ix86_comp_type_attributes (type1
, type2
)
772 /* Check for mismatch of non-default calling convention. */
773 const char *rtdstr
= TARGET_RTD
? "cdecl" : "stdcall";
775 if (TREE_CODE (type1
) != FUNCTION_TYPE
)
778 /* Check for mismatched return types (cdecl vs stdcall). */
779 if (!lookup_attribute (rtdstr
, TYPE_ATTRIBUTES (type1
))
780 != !lookup_attribute (rtdstr
, TYPE_ATTRIBUTES (type2
)))
785 /* Value is the number of bytes of arguments automatically
786 popped when returning from a subroutine call.
787 FUNDECL is the declaration node of the function (as a tree),
788 FUNTYPE is the data type of the function (as a tree),
789 or for a library call it is an identifier node for the subroutine name.
790 SIZE is the number of bytes of arguments passed on the stack.
792 On the 80386, the RTD insn may be used to pop them if the number
793 of args is fixed, but if the number is variable then the caller
794 must pop them all. RTD can't be used for library calls now
795 because the library is compiled with the Unix compiler.
796 Use of RTD is a selectable option, since it is incompatible with
797 standard Unix calling sequences. If the option is not selected,
798 the caller must always pop the args.
800 The attribute stdcall is equivalent to RTD on a per module basis. */
803 ix86_return_pops_args (fundecl
, funtype
, size
)
808 int rtd
= TARGET_RTD
&& (!fundecl
|| TREE_CODE (fundecl
) != IDENTIFIER_NODE
);
810 /* Cdecl functions override -mrtd, and never pop the stack. */
811 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype
))) {
813 /* Stdcall functions will pop the stack if not variable args. */
814 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype
)))
818 && (TYPE_ARG_TYPES (funtype
) == NULL_TREE
819 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype
)))
824 /* Lose any fake structure return argument. */
825 if (aggregate_value_p (TREE_TYPE (funtype
)))
826 return GET_MODE_SIZE (Pmode
);
831 /* Argument support functions. */
833 /* Initialize a variable CUM of type CUMULATIVE_ARGS
834 for a call to a function whose data type is FNTYPE.
835 For a library call, FNTYPE is 0. */
838 init_cumulative_args (cum
, fntype
, libname
)
839 CUMULATIVE_ARGS
*cum
; /* Argument info to initialize */
840 tree fntype
; /* tree ptr for function decl */
841 rtx libname
; /* SYMBOL_REF of library name or 0 */
843 static CUMULATIVE_ARGS zero_cum
;
844 tree param
, next_param
;
846 if (TARGET_DEBUG_ARG
)
848 fprintf (stderr
, "\ninit_cumulative_args (");
850 fprintf (stderr
, "fntype code = %s, ret code = %s",
851 tree_code_name
[(int) TREE_CODE (fntype
)],
852 tree_code_name
[(int) TREE_CODE (TREE_TYPE (fntype
))]);
854 fprintf (stderr
, "no fntype");
857 fprintf (stderr
, ", libname = %s", XSTR (libname
, 0));
862 /* Set up the number of registers to use for passing arguments. */
863 cum
->nregs
= ix86_regparm
;
866 tree attr
= lookup_attribute ("regparm", TYPE_ATTRIBUTES (fntype
));
869 cum
->nregs
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
)));
872 /* Determine if this function has variable arguments. This is
873 indicated by the last argument being 'void_type_mode' if there
874 are no variable arguments. If there are variable arguments, then
875 we won't pass anything in registers */
879 for (param
= (fntype
) ? TYPE_ARG_TYPES (fntype
) : 0;
880 param
!= 0; param
= next_param
)
882 next_param
= TREE_CHAIN (param
);
883 if (next_param
== 0 && TREE_VALUE (param
) != void_type_node
)
888 if (TARGET_DEBUG_ARG
)
889 fprintf (stderr
, ", nregs=%d )\n", cum
->nregs
);
894 /* Update the data in CUM to advance over an argument
895 of mode MODE and data type TYPE.
896 (TYPE is null for libcalls where that information may not be available.) */
899 function_arg_advance (cum
, mode
, type
, named
)
900 CUMULATIVE_ARGS
*cum
; /* current arg information */
901 enum machine_mode mode
; /* current arg mode */
902 tree type
; /* type of the argument or 0 if lib support */
903 int named
; /* whether or not the argument was named */
906 = (mode
== BLKmode
) ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
907 int words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
909 if (TARGET_DEBUG_ARG
)
911 "function_adv (sz=%d, wds=%2d, nregs=%d, mode=%s, named=%d)\n\n",
912 words
, cum
->words
, cum
->nregs
, GET_MODE_NAME (mode
), named
);
927 /* Define where to put the arguments to a function.
928 Value is zero to push the argument on the stack,
929 or a hard register in which to store the argument.
931 MODE is the argument's machine mode.
932 TYPE is the data type of the argument (as a tree).
933 This is null for libcalls where that information may
935 CUM is a variable of type CUMULATIVE_ARGS which gives info about
936 the preceding args and about the function being called.
937 NAMED is nonzero if this argument is a named parameter
938 (otherwise it is an extra parameter matching an ellipsis). */
941 function_arg (cum
, mode
, type
, named
)
942 CUMULATIVE_ARGS
*cum
; /* current arg information */
943 enum machine_mode mode
; /* current arg mode */
944 tree type
; /* type of the argument or 0 if lib support */
945 int named
; /* != 0 for normal args, == 0 for ... args */
949 = (mode
== BLKmode
) ? int_size_in_bytes (type
) : GET_MODE_SIZE (mode
);
950 int words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
954 /* For now, pass fp/complex values on the stack. */
963 if (words
<= cum
->nregs
)
964 ret
= gen_rtx_REG (mode
, cum
->regno
);
968 if (TARGET_DEBUG_ARG
)
971 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d",
972 words
, cum
->words
, cum
->nregs
, GET_MODE_NAME (mode
), named
);
975 fprintf (stderr
, ", reg=%%e%s", reg_names
[ REGNO(ret
) ]);
977 fprintf (stderr
, ", stack");
979 fprintf (stderr
, " )\n");
985 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
986 reference and a constant. */
989 symbolic_operand (op
, mode
)
991 enum machine_mode mode ATTRIBUTE_UNUSED
;
993 switch (GET_CODE (op
))
1001 if (GET_CODE (op
) == SYMBOL_REF
1002 || GET_CODE (op
) == LABEL_REF
1003 || (GET_CODE (op
) == UNSPEC
1004 && XINT (op
, 1) >= 6
1005 && XINT (op
, 1) <= 7))
1007 if (GET_CODE (op
) != PLUS
1008 || GET_CODE (XEXP (op
, 1)) != CONST_INT
)
1012 if (GET_CODE (op
) == SYMBOL_REF
1013 || GET_CODE (op
) == LABEL_REF
)
1015 /* Only @GOTOFF gets offsets. */
1016 if (GET_CODE (op
) != UNSPEC
1017 || XINT (op
, 1) != 7)
1020 op
= XVECEXP (op
, 0, 0);
1021 if (GET_CODE (op
) == SYMBOL_REF
1022 || GET_CODE (op
) == LABEL_REF
)
1031 /* Return true if the operand contains a @GOT or @GOTOFF reference. */
1034 pic_symbolic_operand (op
, mode
)
1036 enum machine_mode mode ATTRIBUTE_UNUSED
;
1038 if (GET_CODE (op
) == CONST
)
1041 if (GET_CODE (op
) == UNSPEC
)
1043 if (GET_CODE (op
) != PLUS
1044 || GET_CODE (XEXP (op
, 1)) != CONST_INT
)
1047 if (GET_CODE (op
) == UNSPEC
)
1053 /* Test for a valid operand for a call instruction. Don't allow the
1054 arg pointer register or virtual regs since they may decay into
1055 reg + const, which the patterns can't handle. */
1058 call_insn_operand (op
, mode
)
1060 enum machine_mode mode ATTRIBUTE_UNUSED
;
1062 if (GET_CODE (op
) != MEM
)
1066 /* Disallow indirect through a virtual register. This leads to
1067 compiler aborts when trying to eliminate them. */
1068 if (GET_CODE (op
) == REG
1069 && (op
== arg_pointer_rtx
1070 || op
== frame_pointer_rtx
1071 || (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1072 && REGNO (op
) <= LAST_VIRTUAL_REGISTER
)))
1075 /* Disallow `call 1234'. Due to varying assembler lameness this
1076 gets either rejected or translated to `call .+1234'. */
1077 if (GET_CODE (op
) == CONST_INT
)
1080 /* Explicitly allow SYMBOL_REF even if pic. */
1081 if (GET_CODE (op
) == SYMBOL_REF
)
1084 /* Half-pic doesn't allow anything but registers and constants.
1085 We've just taken care of the later. */
1087 return register_operand (op
, Pmode
);
1089 /* Otherwise we can allow any general_operand in the address. */
1090 return general_operand (op
, Pmode
);
1094 constant_call_address_operand (op
, mode
)
1096 enum machine_mode mode ATTRIBUTE_UNUSED
;
1098 return (GET_CODE (op
) == MEM
1099 && CONSTANT_ADDRESS_P (XEXP (op
, 0))
1100 && GET_CODE (XEXP (op
, 0)) != CONST_INT
);
1103 /* Match exactly zero and one. */
1106 const0_operand (op
, mode
)
1108 enum machine_mode mode
;
1110 return op
== CONST0_RTX (mode
);
1114 const1_operand (op
, mode
)
1116 enum machine_mode mode ATTRIBUTE_UNUSED
;
1118 return op
== const1_rtx
;
1121 /* Match 2, 4, or 8. Used for leal multiplicands. */
1124 const248_operand (op
, mode
)
1126 enum machine_mode mode ATTRIBUTE_UNUSED
;
1128 return (GET_CODE (op
) == CONST_INT
1129 && (INTVAL (op
) == 2 || INTVAL (op
) == 4 || INTVAL (op
) == 8));
1132 /* True if this is a constant appropriate for an increment or decremenmt. */
1135 incdec_operand (op
, mode
)
1137 enum machine_mode mode
;
1139 if (op
== const1_rtx
|| op
== constm1_rtx
)
1141 if (GET_CODE (op
) != CONST_INT
)
1143 if (mode
== SImode
&& INTVAL (op
) == (HOST_WIDE_INT
) 0xffffffff)
1145 if (mode
== HImode
&& INTVAL (op
) == (HOST_WIDE_INT
) 0xffff)
1147 if (mode
== QImode
&& INTVAL (op
) == (HOST_WIDE_INT
) 0xff)
1152 /* Return false if this is the stack pointer, or any other fake
1153 register eliminable to the stack pointer. Otherwise, this is
1156 This is used to prevent esp from being used as an index reg.
1157 Which would only happen in pathological cases. */
1160 reg_no_sp_operand (op
, mode
)
1162 enum machine_mode mode
;
1165 if (GET_CODE (t
) == SUBREG
)
1167 if (t
== stack_pointer_rtx
|| t
== arg_pointer_rtx
|| t
== frame_pointer_rtx
)
1170 return register_operand (op
, mode
);
1173 /* Return false if this is any eliminable register. Otherwise
1177 general_no_elim_operand (op
, mode
)
1179 enum machine_mode mode
;
1182 if (GET_CODE (t
) == SUBREG
)
1184 if (t
== arg_pointer_rtx
|| t
== frame_pointer_rtx
1185 || t
== virtual_incoming_args_rtx
|| t
== virtual_stack_vars_rtx
1186 || t
== virtual_stack_dynamic_rtx
)
1189 return general_operand (op
, mode
);
1192 /* Return false if this is any eliminable register. Otherwise
1193 register_operand or const_int. */
1196 nonmemory_no_elim_operand (op
, mode
)
1198 enum machine_mode mode
;
1201 if (GET_CODE (t
) == SUBREG
)
1203 if (t
== arg_pointer_rtx
|| t
== frame_pointer_rtx
1204 || t
== virtual_incoming_args_rtx
|| t
== virtual_stack_vars_rtx
1205 || t
== virtual_stack_dynamic_rtx
)
1208 return GET_CODE (op
) == CONST_INT
|| register_operand (op
, mode
);
1211 /* Return true if op is a Q_REGS class register. */
1214 q_regs_operand (op
, mode
)
1216 enum machine_mode mode
;
1218 if (mode
!= VOIDmode
&& GET_MODE (op
) != mode
)
1220 if (GET_CODE (op
) == SUBREG
)
1221 op
= SUBREG_REG (op
);
1222 return QI_REG_P (op
);
1225 /* Return true if op is a NON_Q_REGS class register. */
1228 non_q_regs_operand (op
, mode
)
1230 enum machine_mode mode
;
1232 if (mode
!= VOIDmode
&& GET_MODE (op
) != mode
)
1234 if (GET_CODE (op
) == SUBREG
)
1235 op
= SUBREG_REG (op
);
1236 return NON_QI_REG_P (op
);
1239 /* Return 1 if OP is a comparison operator that can use the condition code
1240 generated by a logical operation, which characteristicly does not set
1241 overflow or carry. To be used with CCNOmode. */
1244 no_comparison_operator (op
, mode
)
1246 enum machine_mode mode
;
1248 if (mode
!= VOIDmode
&& GET_MODE (op
) != mode
)
1251 switch (GET_CODE (op
))
1255 case LEU
: case LTU
: case GEU
: case GTU
:
1263 /* Return 1 if OP is a comparison operator that can be issued by fcmov. */
1266 fcmov_comparison_operator (op
, mode
)
1268 enum machine_mode mode
;
1270 if (mode
!= VOIDmode
&& GET_MODE (op
) != mode
)
1273 switch (GET_CODE (op
))
1276 case LEU
: case LTU
: case GEU
: case GTU
:
1277 case UNORDERED
: case ORDERED
:
1285 /* Return 1 if OP is any normal comparison operator plus {UN}ORDERED. */
1288 uno_comparison_operator (op
, mode
)
1290 enum machine_mode mode
;
1292 if (mode
!= VOIDmode
&& GET_MODE (op
) != mode
)
1295 switch (GET_CODE (op
))
1298 case LE
: case LT
: case GE
: case GT
:
1299 case LEU
: case LTU
: case GEU
: case GTU
:
1300 case UNORDERED
: case ORDERED
:
1308 /* Return 1 if OP is a binary operator that can be promoted to wider mode. */
1311 promotable_binary_operator (op
, mode
)
1313 enum machine_mode mode ATTRIBUTE_UNUSED
;
1315 switch (GET_CODE (op
))
1318 /* Modern CPUs have same latency for HImode and SImode multiply,
1319 but 386 and 486 do HImode multiply faster. */
1320 return ix86_cpu
> PROCESSOR_I486
;
1332 /* Nearly general operand, but accept any const_double, since we wish
1333 to be able to drop them into memory rather than have them get pulled
1337 cmp_fp_expander_operand (op
, mode
)
1339 enum machine_mode mode
;
1341 if (mode
!= VOIDmode
&& mode
!= GET_MODE (op
))
1343 if (GET_CODE (op
) == CONST_DOUBLE
)
1345 return general_operand (op
, mode
);
1348 /* Match an SI or HImode register for a zero_extract. */
1351 ext_register_operand (op
, mode
)
1353 enum machine_mode mode ATTRIBUTE_UNUSED
;
1355 if (GET_MODE (op
) != SImode
&& GET_MODE (op
) != HImode
)
1357 return register_operand (op
, VOIDmode
);
1360 /* Return 1 if this is a valid binary floating-point operation.
1361 OP is the expression matched, and MODE is its mode. */
1364 binary_fp_operator (op
, mode
)
1366 enum machine_mode mode
;
1368 if (mode
!= VOIDmode
&& mode
!= GET_MODE (op
))
1371 switch (GET_CODE (op
))
1377 return GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
;
1385 mult_operator(op
, mode
)
1387 enum machine_mode mode ATTRIBUTE_UNUSED
;
1389 return GET_CODE (op
) == MULT
;
1393 div_operator(op
, mode
)
1395 enum machine_mode mode ATTRIBUTE_UNUSED
;
1397 return GET_CODE (op
) == DIV
;
1401 arith_or_logical_operator (op
, mode
)
1403 enum machine_mode mode
;
1405 return ((mode
== VOIDmode
|| GET_MODE (op
) == mode
)
1406 && (GET_RTX_CLASS (GET_CODE (op
)) == 'c'
1407 || GET_RTX_CLASS (GET_CODE (op
)) == '2'));
1410 /* Returns 1 if OP is memory operand with a displacement. */
1413 memory_displacement_operand (op
, mode
)
1415 enum machine_mode mode
;
1417 struct ix86_address parts
;
1419 if (! memory_operand (op
, mode
))
1422 if (! ix86_decompose_address (XEXP (op
, 0), &parts
))
1425 return parts
.disp
!= NULL_RTX
;
1428 /* To avoid problems when jump re-emits comparisons like testqi_ext_ccno_0,
1429 re-recognize the operand to avoid a copy_to_mode_reg that will fail.
1431 ??? It seems likely that this will only work because cmpsi is an
1432 expander, and no actual insns use this. */
1435 cmpsi_operand (op
, mode
)
1437 enum machine_mode mode
;
1439 if (general_operand (op
, mode
))
1442 if (GET_CODE (op
) == AND
1443 && GET_MODE (op
) == SImode
1444 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTRACT
1445 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
1446 && GET_CODE (XEXP (XEXP (op
, 0), 2)) == CONST_INT
1447 && INTVAL (XEXP (XEXP (op
, 0), 1)) == 8
1448 && INTVAL (XEXP (XEXP (op
, 0), 2)) == 8
1449 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
1455 /* Returns 1 if OP is memory operand that can not be represented by the
1459 long_memory_operand (op
, mode
)
1461 enum machine_mode mode
;
1463 if (! memory_operand (op
, mode
))
1466 return memory_address_length (op
) != 0;
1469 /* Return nonzero if the rtx is known aligned. */
1472 aligned_operand (op
, mode
)
1474 enum machine_mode mode
;
1476 struct ix86_address parts
;
1478 if (!general_operand (op
, mode
))
1481 /* Registers and immediate operands are always "aligned". */
1482 if (GET_CODE (op
) != MEM
)
1485 /* Don't even try to do any aligned optimizations with volatiles. */
1486 if (MEM_VOLATILE_P (op
))
1491 /* Pushes and pops are only valid on the stack pointer. */
1492 if (GET_CODE (op
) == PRE_DEC
1493 || GET_CODE (op
) == POST_INC
)
1496 /* Decode the address. */
1497 if (! ix86_decompose_address (op
, &parts
))
1500 /* Look for some component that isn't known to be aligned. */
1504 && REGNO_POINTER_ALIGN (REGNO (parts
.index
)) < 32)
1509 if (REGNO_POINTER_ALIGN (REGNO (parts
.base
)) < 32)
1514 if (GET_CODE (parts
.disp
) != CONST_INT
1515 || (INTVAL (parts
.disp
) & 3) != 0)
1519 /* Didn't find one -- this must be an aligned address. */
1523 /* Return true if the constant is something that can be loaded with
1524 a special instruction. Only handle 0.0 and 1.0; others are less
1528 standard_80387_constant_p (x
)
1531 if (GET_CODE (x
) != CONST_DOUBLE
)
1534 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1540 if (setjmp (handler
))
1543 set_float_handler (handler
);
1544 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
1545 is0
= REAL_VALUES_EQUAL (d
, dconst0
) && !REAL_VALUE_MINUS_ZERO (d
);
1546 is1
= REAL_VALUES_EQUAL (d
, dconst1
);
1547 set_float_handler (NULL_PTR
);
1555 /* Note that on the 80387, other constants, such as pi,
1556 are much slower to load as standard constants
1557 than to load from doubles in memory! */
1558 /* ??? Not true on K6: all constants are equal cost. */
1565 /* Returns 1 if OP contains a symbol reference */
1568 symbolic_reference_mentioned_p (op
)
1571 register const char *fmt
;
1574 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
1577 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
1578 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
1584 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
1585 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
1589 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
1596 /* Return 1 if it is appropriate to emit `ret' instructions in the
1597 body of a function. Do this only if the epilogue is simple, needing a
1598 couple of insns. Prior to reloading, we can't tell how many registers
1599 must be saved, so return 0 then. Return 0 if there is no frame
1600 marker to de-allocate.
1602 If NON_SAVING_SETJMP is defined and true, then it is not possible
1603 for the epilogue to be simple, so return 0. This is a special case
1604 since NON_SAVING_SETJMP will not cause regs_ever_live to change
1605 until final, but jump_optimize may need to know sooner if a
1609 ix86_can_use_return_insn_p ()
1611 HOST_WIDE_INT tsize
;
1614 #ifdef NON_SAVING_SETJMP
1615 if (NON_SAVING_SETJMP
&& current_function_calls_setjmp
)
1618 #ifdef FUNCTION_BLOCK_PROFILER_EXIT
1619 if (profile_block_flag
== 2)
1623 if (! reload_completed
|| frame_pointer_needed
)
1626 /* Don't allow more than 32 pop, since that's all we can do
1627 with one instruction. */
1628 if (current_function_pops_args
1629 && current_function_args_size
>= 32768)
1632 tsize
= ix86_compute_frame_size (get_frame_size (), &nregs
, NULL
, NULL
);
1633 return tsize
== 0 && nregs
== 0;
1636 static char *pic_label_name
;
1637 static int pic_label_output
;
1638 static char *global_offset_table_name
;
1640 /* This function generates code for -fpic that loads %ebx with
1641 the return address of the caller and then returns. */
1644 asm_output_function_prefix (file
, name
)
1646 const char *name ATTRIBUTE_UNUSED
;
1649 int pic_reg_used
= flag_pic
&& (current_function_uses_pic_offset_table
1650 || current_function_uses_const_pool
);
1651 xops
[0] = pic_offset_table_rtx
;
1652 xops
[1] = stack_pointer_rtx
;
1654 /* Deep branch prediction favors having a return for every call. */
1655 if (pic_reg_used
&& TARGET_DEEP_BRANCH_PREDICTION
)
1657 if (!pic_label_output
)
1659 /* This used to call ASM_DECLARE_FUNCTION_NAME() but since it's an
1660 internal (non-global) label that's being emitted, it didn't make
1661 sense to have .type information for local labels. This caused
1662 the SCO OpenServer 5.0.4 ELF assembler grief (why are you giving
1663 me debug info for a label that you're declaring non-global?) this
1664 was changed to call ASM_OUTPUT_LABEL() instead. */
1666 ASM_OUTPUT_LABEL (file
, pic_label_name
);
1668 xops
[1] = gen_rtx_MEM (SImode
, xops
[1]);
1669 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops
);
1670 output_asm_insn ("ret", xops
);
1672 pic_label_output
= 1;
1678 load_pic_register ()
1682 if (global_offset_table_name
== NULL
)
1684 global_offset_table_name
=
1685 ggc_alloc_string ("_GLOBAL_OFFSET_TABLE_", 21);
1686 ggc_add_string_root (&global_offset_table_name
, 1);
1688 gotsym
= gen_rtx_SYMBOL_REF (Pmode
, global_offset_table_name
);
1690 if (TARGET_DEEP_BRANCH_PREDICTION
)
1692 if (pic_label_name
== NULL
)
1694 pic_label_name
= ggc_alloc_string (NULL
, 32);
1695 ggc_add_string_root (&pic_label_name
, 1);
1696 ASM_GENERATE_INTERNAL_LABEL (pic_label_name
, "LPR", 0);
1698 pclab
= gen_rtx_MEM (QImode
, gen_rtx_SYMBOL_REF (Pmode
, pic_label_name
));
1702 pclab
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
1705 emit_insn (gen_prologue_get_pc (pic_offset_table_rtx
, pclab
));
1707 if (! TARGET_DEEP_BRANCH_PREDICTION
)
1708 emit_insn (gen_popsi1 (pic_offset_table_rtx
));
1710 emit_insn (gen_prologue_set_got (pic_offset_table_rtx
, gotsym
, pclab
));
1713 /* Generate an SImode "push" pattern for input ARG. */
1719 return gen_rtx_SET (VOIDmode
,
1720 gen_rtx_MEM (SImode
,
1721 gen_rtx_PRE_DEC (SImode
,
1722 stack_pointer_rtx
)),
1726 /* Return number of registers to be saved on the stack. */
1732 int pic_reg_used
= flag_pic
&& (current_function_uses_pic_offset_table
1733 || current_function_uses_const_pool
);
1734 int limit
= (frame_pointer_needed
1735 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
1738 for (regno
= limit
- 1; regno
>= 0; regno
--)
1739 if ((regs_ever_live
[regno
] && ! call_used_regs
[regno
])
1740 || (regno
== PIC_OFFSET_TABLE_REGNUM
&& pic_reg_used
))
1747 /* Return the offset between two registers, one to be eliminated, and the other
1748 its replacement, at the start of a routine. */
1751 ix86_initial_elimination_offset (from
, to
)
1758 /* Stack grows downward:
1764 saved frame pointer if frame_pointer_needed
1765 <- HARD_FRAME_POINTER
1775 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
1776 /* Skip saved PC and previous frame pointer.
1777 Executed only when frame_pointer_needed. */
1779 else if (from
== FRAME_POINTER_REGNUM
1780 && to
== HARD_FRAME_POINTER_REGNUM
)
1782 ix86_compute_frame_size (get_frame_size (), &nregs
, &padding1
, (int *)0);
1783 padding1
+= nregs
* UNITS_PER_WORD
;
1788 /* ARG_POINTER or FRAME_POINTER to STACK_POINTER elimination. */
1789 int frame_size
= frame_pointer_needed
? 8 : 4;
1790 HOST_WIDE_INT tsize
= ix86_compute_frame_size (get_frame_size (),
1791 &nregs
, &padding1
, (int *)0);
1794 if (to
!= STACK_POINTER_REGNUM
)
1796 else if (from
== ARG_POINTER_REGNUM
)
1797 return tsize
+ nregs
* UNITS_PER_WORD
+ frame_size
;
1798 else if (from
!= FRAME_POINTER_REGNUM
)
1801 return tsize
- padding1
;
1805 /* Compute the size of local storage taking into consideration the
1806 desired stack alignment which is to be maintained. Also determine
1807 the number of registers saved below the local storage.
1809 PADDING1 returns padding before stack frame and PADDING2 returns
1810 padding after stack frame;
1813 static HOST_WIDE_INT
1814 ix86_compute_frame_size (size
, nregs_on_stack
, rpadding1
, rpadding2
)
1816 int *nregs_on_stack
;
1823 HOST_WIDE_INT total_size
;
1824 int stack_alignment_needed
= cfun
->stack_alignment_needed
/ BITS_PER_UNIT
;
1826 int preferred_alignment
= cfun
->preferred_stack_boundary
/ BITS_PER_UNIT
;
1828 nregs
= ix86_nsaved_regs ();
1831 offset
= frame_pointer_needed
? 8 : 4;
1833 /* Do some sanity checking of stack_alignment_needed and preferred_alignment,
1834 since i386 port is the only using those features that may break easilly. */
1836 if (size
&& !stack_alignment_needed
)
1838 if (!size
&& stack_alignment_needed
!= STACK_BOUNDARY
/ BITS_PER_UNIT
)
1840 if (preferred_alignment
< STACK_BOUNDARY
/ BITS_PER_UNIT
)
1842 if (preferred_alignment
> PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
)
1844 if (stack_alignment_needed
> PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
)
1847 if (stack_alignment_needed
< 4)
1848 stack_alignment_needed
= 4;
1850 offset
+= nregs
* UNITS_PER_WORD
;
1852 if (ACCUMULATE_OUTGOING_ARGS
)
1853 total_size
+= current_function_outgoing_args_size
;
1855 total_size
+= offset
;
1857 /* Align start of frame for local function. */
1858 padding1
= ((offset
+ stack_alignment_needed
- 1)
1859 & -stack_alignment_needed
) - offset
;
1860 total_size
+= padding1
;
1862 /* Align stack boundary. */
1863 padding2
= ((total_size
+ preferred_alignment
- 1)
1864 & -preferred_alignment
) - total_size
;
1866 if (ACCUMULATE_OUTGOING_ARGS
)
1867 padding2
+= current_function_outgoing_args_size
;
1870 *nregs_on_stack
= nregs
;
1872 *rpadding1
= padding1
;
1874 *rpadding2
= padding2
;
1876 return size
+ padding1
+ padding2
;
1879 /* Emit code to save registers in the prologue. */
1882 ix86_emit_save_regs ()
1887 int pic_reg_used
= flag_pic
&& (current_function_uses_pic_offset_table
1888 || current_function_uses_const_pool
);
1889 limit
= (frame_pointer_needed
1890 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
1892 for (regno
= limit
- 1; regno
>= 0; regno
--)
1893 if ((regs_ever_live
[regno
] && !call_used_regs
[regno
])
1894 || (regno
== PIC_OFFSET_TABLE_REGNUM
&& pic_reg_used
))
1896 insn
= emit_insn (gen_push (gen_rtx_REG (SImode
, regno
)));
1897 RTX_FRAME_RELATED_P (insn
) = 1;
1901 /* Expand the prologue into a bunch of separate insns. */
1904 ix86_expand_prologue ()
1906 HOST_WIDE_INT tsize
= ix86_compute_frame_size (get_frame_size (), (int *)0, (int *)0,
1909 int pic_reg_used
= flag_pic
&& (current_function_uses_pic_offset_table
1910 || current_function_uses_const_pool
);
1912 /* Note: AT&T enter does NOT have reversed args. Enter is probably
1913 slower on all targets. Also sdb doesn't like it. */
1915 if (frame_pointer_needed
)
1917 insn
= emit_insn (gen_push (hard_frame_pointer_rtx
));
1918 RTX_FRAME_RELATED_P (insn
) = 1;
1920 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
1921 RTX_FRAME_RELATED_P (insn
) = 1;
1924 ix86_emit_save_regs ();
1928 else if (! TARGET_STACK_PROBE
|| tsize
< CHECK_STACK_LIMIT
)
1930 if (frame_pointer_needed
)
1931 insn
= emit_insn (gen_pro_epilogue_adjust_stack
1932 (stack_pointer_rtx
, stack_pointer_rtx
,
1933 GEN_INT (-tsize
), hard_frame_pointer_rtx
));
1935 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
1937 RTX_FRAME_RELATED_P (insn
) = 1;
1941 /* ??? Is this only valid for Win32? */
1945 arg0
= gen_rtx_REG (SImode
, 0);
1946 emit_move_insn (arg0
, GEN_INT (tsize
));
1948 sym
= gen_rtx_MEM (FUNCTION_MODE
,
1949 gen_rtx_SYMBOL_REF (Pmode
, "_alloca"));
1950 insn
= emit_call_insn (gen_call (sym
, const0_rtx
));
1952 CALL_INSN_FUNCTION_USAGE (insn
)
1953 = gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_USE (VOIDmode
, arg0
),
1954 CALL_INSN_FUNCTION_USAGE (insn
));
1957 #ifdef SUBTARGET_PROLOGUE
1962 load_pic_register ();
1964 /* If we are profiling, make sure no instructions are scheduled before
1965 the call to mcount. However, if -fpic, the above call will have
1967 if ((profile_flag
|| profile_block_flag
) && ! pic_reg_used
)
1968 emit_insn (gen_blockage ());
1971 /* Emit code to add TSIZE to esp value. Use POP instruction when
1975 ix86_emit_epilogue_esp_adjustment (tsize
)
1978 /* If a frame pointer is present, we must be sure to tie the sp
1979 to the fp so that we don't mis-schedule. */
1980 if (frame_pointer_needed
)
1981 emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx
,
1984 hard_frame_pointer_rtx
));
1986 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
1990 /* Emit code to restore saved registers using MOV insns. First register
1991 is restored from POINTER + OFFSET. */
1993 ix86_emit_restore_regs_using_mov (pointer
, offset
)
1998 int pic_reg_used
= flag_pic
&& (current_function_uses_pic_offset_table
1999 || current_function_uses_const_pool
);
2000 int limit
= (frame_pointer_needed
2001 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
2003 for (regno
= 0; regno
< limit
; regno
++)
2004 if ((regs_ever_live
[regno
] && !call_used_regs
[regno
])
2005 || (regno
== PIC_OFFSET_TABLE_REGNUM
&& pic_reg_used
))
2007 emit_move_insn (gen_rtx_REG (SImode
, regno
),
2008 adj_offsettable_operand (gen_rtx_MEM (SImode
,
2015 /* Restore function stack, frame, and registers. */
2018 ix86_expand_epilogue (emit_return
)
2024 int pic_reg_used
= flag_pic
&& (current_function_uses_pic_offset_table
2025 || current_function_uses_const_pool
);
2026 int sp_valid
= !frame_pointer_needed
|| current_function_sp_is_unchanging
;
2027 HOST_WIDE_INT offset
;
2028 HOST_WIDE_INT tsize
= ix86_compute_frame_size (get_frame_size (), &nregs
,
2029 (int *)0, (int *)0);
2032 /* Calculate start of saved registers relative to ebp. */
2033 offset
= -nregs
* UNITS_PER_WORD
;
2035 #ifdef FUNCTION_BLOCK_PROFILER_EXIT
2036 if (profile_block_flag
== 2)
2038 FUNCTION_BLOCK_PROFILER_EXIT
;
2042 /* If we're only restoring one register and sp is not valid then
2043 using a move instruction to restore the register since it's
2044 less work than reloading sp and popping the register.
2046 The default code result in stack adjustment using add/lea instruction,
2047 while this code results in LEAVE instruction (or discrete equivalent),
2048 so it is profitable in some other cases as well. Especially when there
2049 are no registers to restore. We also use this code when TARGET_USE_LEAVE
2050 and there is exactly one register to pop. This heruistic may need some
2051 tuning in future. */
2052 if ((!sp_valid
&& nregs
<= 1)
2053 || (frame_pointer_needed
&& !nregs
&& tsize
)
2054 || (frame_pointer_needed
&& TARGET_USE_LEAVE
&& !optimize_size
2057 /* Restore registers. We can use ebp or esp to address the memory
2058 locations. If both are available, default to ebp, since offsets
2059 are known to be small. Only exception is esp pointing directly to the
2060 end of block of saved registers, where we may simplify addressing
2063 if (!frame_pointer_needed
|| (sp_valid
&& !tsize
))
2064 ix86_emit_restore_regs_using_mov (stack_pointer_rtx
, tsize
);
2066 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx
, offset
);
2068 if (!frame_pointer_needed
)
2069 ix86_emit_epilogue_esp_adjustment (tsize
+ nregs
* UNITS_PER_WORD
);
2070 /* If not an i386, mov & pop is faster than "leave". */
2071 else if (TARGET_USE_LEAVE
|| optimize_size
)
2072 emit_insn (gen_leave ());
2075 emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx
,
2076 hard_frame_pointer_rtx
,
2078 hard_frame_pointer_rtx
));
2079 emit_insn (gen_popsi1 (hard_frame_pointer_rtx
));
2084 /* First step is to deallocate the stack frame so that we can
2085 pop the registers. */
2088 if (!frame_pointer_needed
)
2090 emit_insn (gen_pro_epilogue_adjust_stack (stack_pointer_rtx
,
2091 hard_frame_pointer_rtx
,
2093 hard_frame_pointer_rtx
));
2096 ix86_emit_epilogue_esp_adjustment (tsize
);
2098 for (regno
= 0; regno
< STACK_POINTER_REGNUM
; regno
++)
2099 if ((regs_ever_live
[regno
] && !call_used_regs
[regno
])
2100 || (regno
== PIC_OFFSET_TABLE_REGNUM
&& pic_reg_used
))
2101 emit_insn (gen_popsi1 (gen_rtx_REG (SImode
, regno
)));
2104 /* Sibcall epilogues don't want a return instruction. */
2108 if (current_function_pops_args
&& current_function_args_size
)
2110 rtx popc
= GEN_INT (current_function_pops_args
);
2112 /* i386 can only pop 64K bytes. If asked to pop more, pop
2113 return address, do explicit add, and jump indirectly to the
2116 if (current_function_pops_args
>= 65536)
2118 rtx ecx
= gen_rtx_REG (SImode
, 2);
2120 emit_insn (gen_popsi1 (ecx
));
2121 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, popc
));
2122 emit_indirect_jump (ecx
);
2125 emit_jump_insn (gen_return_pop_internal (popc
));
2128 emit_jump_insn (gen_return_internal ());
2131 /* Extract the parts of an RTL expression that is a valid memory address
2132 for an instruction. Return false if the structure of the address is
2136 ix86_decompose_address (addr
, out
)
2138 struct ix86_address
*out
;
2140 rtx base
= NULL_RTX
;
2141 rtx index
= NULL_RTX
;
2142 rtx disp
= NULL_RTX
;
2143 HOST_WIDE_INT scale
= 1;
2144 rtx scale_rtx
= NULL_RTX
;
2146 if (GET_CODE (addr
) == REG
|| GET_CODE (addr
) == SUBREG
)
2148 else if (GET_CODE (addr
) == PLUS
)
2150 rtx op0
= XEXP (addr
, 0);
2151 rtx op1
= XEXP (addr
, 1);
2152 enum rtx_code code0
= GET_CODE (op0
);
2153 enum rtx_code code1
= GET_CODE (op1
);
2155 if (code0
== REG
|| code0
== SUBREG
)
2157 if (code1
== REG
|| code1
== SUBREG
)
2158 index
= op0
, base
= op1
; /* index + base */
2160 base
= op0
, disp
= op1
; /* base + displacement */
2162 else if (code0
== MULT
)
2164 index
= XEXP (op0
, 0);
2165 scale_rtx
= XEXP (op0
, 1);
2166 if (code1
== REG
|| code1
== SUBREG
)
2167 base
= op1
; /* index*scale + base */
2169 disp
= op1
; /* index*scale + disp */
2171 else if (code0
== PLUS
&& GET_CODE (XEXP (op0
, 0)) == MULT
)
2173 index
= XEXP (XEXP (op0
, 0), 0); /* index*scale + base + disp */
2174 scale_rtx
= XEXP (XEXP (op0
, 0), 1);
2175 base
= XEXP (op0
, 1);
2178 else if (code0
== PLUS
)
2180 index
= XEXP (op0
, 0); /* index + base + disp */
2181 base
= XEXP (op0
, 1);
2187 else if (GET_CODE (addr
) == MULT
)
2189 index
= XEXP (addr
, 0); /* index*scale */
2190 scale_rtx
= XEXP (addr
, 1);
2192 else if (GET_CODE (addr
) == ASHIFT
)
2196 /* We're called for lea too, which implements ashift on occasion. */
2197 index
= XEXP (addr
, 0);
2198 tmp
= XEXP (addr
, 1);
2199 if (GET_CODE (tmp
) != CONST_INT
)
2201 scale
= INTVAL (tmp
);
2202 if ((unsigned HOST_WIDE_INT
) scale
> 3)
2207 disp
= addr
; /* displacement */
2209 /* Extract the integral value of scale. */
2212 if (GET_CODE (scale_rtx
) != CONST_INT
)
2214 scale
= INTVAL (scale_rtx
);
2217 /* Allow arg pointer and stack pointer as index if there is not scaling */
2218 if (base
&& index
&& scale
== 1
2219 && (index
== arg_pointer_rtx
|| index
== frame_pointer_rtx
2220 || index
== stack_pointer_rtx
))
2227 /* Special case: %ebp cannot be encoded as a base without a displacement. */
2228 if ((base
== hard_frame_pointer_rtx
2229 || base
== frame_pointer_rtx
2230 || base
== arg_pointer_rtx
) && !disp
)
2233 /* Special case: on K6, [%esi] makes the instruction vector decoded.
2234 Avoid this by transforming to [%esi+0]. */
2235 if (ix86_cpu
== PROCESSOR_K6
&& !optimize_size
2236 && base
&& !index
&& !disp
2238 && REGNO_REG_CLASS (REGNO (base
)) == SIREG
)
2241 /* Special case: encode reg+reg instead of reg*2. */
2242 if (!base
&& index
&& scale
&& scale
== 2)
2243 base
= index
, scale
= 1;
2245 /* Special case: scaling cannot be encoded without base or displacement. */
2246 if (!base
&& !disp
&& index
&& scale
!= 1)
2257 /* Return cost of the memory address x.
2258 For i386, it is better to use a complex address than let gcc copy
2259 the address into a reg and make a new pseudo. But not if the address
2260 requires to two regs - that would mean more pseudos with longer
2263 ix86_address_cost (x
)
2266 struct ix86_address parts
;
2269 if (!ix86_decompose_address (x
, &parts
))
2272 /* More complex memory references are better. */
2273 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
2276 /* Attempt to minimize number of registers in the address. */
2278 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
))
2280 && (!REG_P (parts
.index
)
2281 || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)))
2285 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
)
2287 && (!REG_P (parts
.index
) || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)
2288 && parts
.base
!= parts
.index
)
2291 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
2292 since it's predecode logic can't detect the length of instructions
2293 and it degenerates to vector decoded. Increase cost of such
2294 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
2295 to split such addresses or even refuse such addresses at all.
2297 Following addressing modes are affected:
2302 The first and last case may be avoidable by explicitly coding the zero in
2303 memory address, but I don't have AMD-K6 machine handy to check this
2307 && ((!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
!= 1)
2308 || (parts
.disp
&& !parts
.base
&& parts
.index
&& parts
.scale
!= 1)
2309 || (!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
== 1)))
2315 /* Determine if a given CONST RTX is a valid memory displacement
2319 legitimate_pic_address_disp_p (disp
)
2322 if (GET_CODE (disp
) != CONST
)
2324 disp
= XEXP (disp
, 0);
2326 if (GET_CODE (disp
) == PLUS
)
2328 if (GET_CODE (XEXP (disp
, 1)) != CONST_INT
)
2330 disp
= XEXP (disp
, 0);
2333 if (GET_CODE (disp
) != UNSPEC
2334 || XVECLEN (disp
, 0) != 1)
2337 /* Must be @GOT or @GOTOFF. */
2338 if (XINT (disp
, 1) != 6
2339 && XINT (disp
, 1) != 7)
2342 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
2343 && GET_CODE (XVECEXP (disp
, 0, 0)) != LABEL_REF
)
2349 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
2350 memory address for an instruction. The MODE argument is the machine mode
2351 for the MEM expression that wants to use this address.
2353 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
2354 convert common non-canonical forms to canonical form so that they will
2358 legitimate_address_p (mode
, addr
, strict
)
2359 enum machine_mode mode
;
2363 struct ix86_address parts
;
2364 rtx base
, index
, disp
;
2365 HOST_WIDE_INT scale
;
2366 const char *reason
= NULL
;
2367 rtx reason_rtx
= NULL_RTX
;
2369 if (TARGET_DEBUG_ADDR
)
2372 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
2373 GET_MODE_NAME (mode
), strict
);
2377 if (! ix86_decompose_address (addr
, &parts
))
2379 reason
= "decomposition failed";
2384 index
= parts
.index
;
2386 scale
= parts
.scale
;
2388 /* Validate base register.
2390 Don't allow SUBREG's here, it can lead to spill failures when the base
2391 is one word out of a two word structure, which is represented internally
2398 if (GET_CODE (base
) != REG
)
2400 reason
= "base is not a register";
2404 if (GET_MODE (base
) != Pmode
)
2406 reason
= "base is not in Pmode";
2410 if ((strict
&& ! REG_OK_FOR_BASE_STRICT_P (base
))
2411 || (! strict
&& ! REG_OK_FOR_BASE_NONSTRICT_P (base
)))
2413 reason
= "base is not valid";
2418 /* Validate index register.
2420 Don't allow SUBREG's here, it can lead to spill failures when the index
2421 is one word out of a two word structure, which is represented internally
2428 if (GET_CODE (index
) != REG
)
2430 reason
= "index is not a register";
2434 if (GET_MODE (index
) != Pmode
)
2436 reason
= "index is not in Pmode";
2440 if ((strict
&& ! REG_OK_FOR_INDEX_STRICT_P (index
))
2441 || (! strict
&& ! REG_OK_FOR_INDEX_NONSTRICT_P (index
)))
2443 reason
= "index is not valid";
2448 /* Validate scale factor. */
2451 reason_rtx
= GEN_INT (scale
);
2454 reason
= "scale without index";
2458 if (scale
!= 2 && scale
!= 4 && scale
!= 8)
2460 reason
= "scale is not a valid multiplier";
2465 /* Validate displacement. */
2470 if (!CONSTANT_ADDRESS_P (disp
))
2472 reason
= "displacement is not constant";
2476 if (GET_CODE (disp
) == CONST_DOUBLE
)
2478 reason
= "displacement is a const_double";
2482 if (flag_pic
&& SYMBOLIC_CONST (disp
))
2484 if (! legitimate_pic_address_disp_p (disp
))
2486 reason
= "displacement is an invalid pic construct";
2490 /* Verify that a symbolic pic displacement includes
2491 the pic_offset_table_rtx register. */
2492 if (base
!= pic_offset_table_rtx
2493 && (index
!= pic_offset_table_rtx
|| scale
!= 1))
2495 reason
= "pic displacement against invalid base";
2499 else if (HALF_PIC_P ())
2501 if (! HALF_PIC_ADDRESS_P (disp
)
2502 || (base
!= NULL_RTX
|| index
!= NULL_RTX
))
2504 reason
= "displacement is an invalid half-pic reference";
2510 /* Everything looks valid. */
2511 if (TARGET_DEBUG_ADDR
)
2512 fprintf (stderr
, "Success.\n");
2516 if (TARGET_DEBUG_ADDR
)
2518 fprintf (stderr
, "Error: %s\n", reason
);
2519 debug_rtx (reason_rtx
);
2524 /* Return a legitimate reference for ORIG (an address) using the
2525 register REG. If REG is 0, a new pseudo is generated.
2527 There are two types of references that must be handled:
2529 1. Global data references must load the address from the GOT, via
2530 the PIC reg. An insn is emitted to do this load, and the reg is
2533 2. Static data references, constant pool addresses, and code labels
2534 compute the address as an offset from the GOT, whose base is in
2535 the PIC reg. Static data objects have SYMBOL_REF_FLAG set to
2536 differentiate them from global data objects. The returned
2537 address is the PIC reg + an unspec constant.
2539 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
2540 reg also appears in the address. */
2543 legitimize_pic_address (orig
, reg
)
2551 if (GET_CODE (addr
) == LABEL_REF
2552 || (GET_CODE (addr
) == SYMBOL_REF
2553 && (CONSTANT_POOL_ADDRESS_P (addr
)
2554 || SYMBOL_REF_FLAG (addr
))))
2556 /* This symbol may be referenced via a displacement from the PIC
2557 base address (@GOTOFF). */
2559 current_function_uses_pic_offset_table
= 1;
2560 new = gen_rtx_UNSPEC (VOIDmode
, gen_rtvec (1, addr
), 7);
2561 new = gen_rtx_CONST (VOIDmode
, new);
2562 new = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new);
2566 emit_move_insn (reg
, new);
2570 else if (GET_CODE (addr
) == SYMBOL_REF
)
2572 /* This symbol must be referenced via a load from the
2573 Global Offset Table (@GOT). */
2575 current_function_uses_pic_offset_table
= 1;
2576 new = gen_rtx_UNSPEC (VOIDmode
, gen_rtvec (1, addr
), 6);
2577 new = gen_rtx_CONST (VOIDmode
, new);
2578 new = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new);
2579 new = gen_rtx_MEM (Pmode
, new);
2580 RTX_UNCHANGING_P (new) = 1;
2583 reg
= gen_reg_rtx (Pmode
);
2584 emit_move_insn (reg
, new);
2589 if (GET_CODE (addr
) == CONST
)
2591 addr
= XEXP (addr
, 0);
2592 if (GET_CODE (addr
) == UNSPEC
)
2594 /* Check that the unspec is one of the ones we generate? */
2596 else if (GET_CODE (addr
) != PLUS
)
2599 if (GET_CODE (addr
) == PLUS
)
2601 rtx op0
= XEXP (addr
, 0), op1
= XEXP (addr
, 1);
2603 /* Check first to see if this is a constant offset from a @GOTOFF
2604 symbol reference. */
2605 if ((GET_CODE (op0
) == LABEL_REF
2606 || (GET_CODE (op0
) == SYMBOL_REF
2607 && (CONSTANT_POOL_ADDRESS_P (op0
)
2608 || SYMBOL_REF_FLAG (op0
))))
2609 && GET_CODE (op1
) == CONST_INT
)
2611 current_function_uses_pic_offset_table
= 1;
2612 new = gen_rtx_UNSPEC (VOIDmode
, gen_rtvec (1, op0
), 7);
2613 new = gen_rtx_PLUS (VOIDmode
, new, op1
);
2614 new = gen_rtx_CONST (VOIDmode
, new);
2615 new = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new);
2619 emit_move_insn (reg
, new);
2625 base
= legitimize_pic_address (XEXP (addr
, 0), reg
);
2626 new = legitimize_pic_address (XEXP (addr
, 1),
2627 base
== reg
? NULL_RTX
: reg
);
2629 if (GET_CODE (new) == CONST_INT
)
2630 new = plus_constant (base
, INTVAL (new));
2633 if (GET_CODE (new) == PLUS
&& CONSTANT_P (XEXP (new, 1)))
2635 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (new, 0));
2636 new = XEXP (new, 1);
2638 new = gen_rtx_PLUS (Pmode
, base
, new);
2646 /* Try machine-dependent ways of modifying an illegitimate address
2647 to be legitimate. If we find one, return the new, valid address.
2648 This macro is used in only one place: `memory_address' in explow.c.
2650 OLDX is the address as it was before break_out_memory_refs was called.
2651 In some cases it is useful to look at this to decide what needs to be done.
2653 MODE and WIN are passed so that this macro can use
2654 GO_IF_LEGITIMATE_ADDRESS.
2656 It is always safe for this macro to do nothing. It exists to recognize
2657 opportunities to optimize the output.
2659 For the 80386, we handle X+REG by loading X into a register R and
2660 using R+REG. R will go in a general reg and indexing will be used.
2661 However, if REG is a broken-out memory address or multiplication,
2662 nothing needs to be done because REG can certainly go in a general reg.
2664 When -fpic is used, special handling is needed for symbolic references.
2665 See comments by legitimize_pic_address in i386.c for details. */
2668 legitimize_address (x
, oldx
, mode
)
2670 register rtx oldx ATTRIBUTE_UNUSED
;
2671 enum machine_mode mode
;
2676 if (TARGET_DEBUG_ADDR
)
2678 fprintf (stderr
, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
2679 GET_MODE_NAME (mode
));
2683 if (flag_pic
&& SYMBOLIC_CONST (x
))
2684 return legitimize_pic_address (x
, 0);
2686 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
2687 if (GET_CODE (x
) == ASHIFT
2688 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2689 && (log
= (unsigned)exact_log2 (INTVAL (XEXP (x
, 1)))) < 4)
2692 x
= gen_rtx_MULT (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
2693 GEN_INT (1 << log
));
2696 if (GET_CODE (x
) == PLUS
)
2698 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
2700 if (GET_CODE (XEXP (x
, 0)) == ASHIFT
2701 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
2702 && (log
= (unsigned)exact_log2 (INTVAL (XEXP (XEXP (x
, 0), 1)))) < 4)
2705 XEXP (x
, 0) = gen_rtx_MULT (Pmode
,
2706 force_reg (Pmode
, XEXP (XEXP (x
, 0), 0)),
2707 GEN_INT (1 << log
));
2710 if (GET_CODE (XEXP (x
, 1)) == ASHIFT
2711 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
2712 && (log
= (unsigned)exact_log2 (INTVAL (XEXP (XEXP (x
, 1), 1)))) < 4)
2715 XEXP (x
, 1) = gen_rtx_MULT (Pmode
,
2716 force_reg (Pmode
, XEXP (XEXP (x
, 1), 0)),
2717 GEN_INT (1 << log
));
2720 /* Put multiply first if it isn't already. */
2721 if (GET_CODE (XEXP (x
, 1)) == MULT
)
2723 rtx tmp
= XEXP (x
, 0);
2724 XEXP (x
, 0) = XEXP (x
, 1);
2729 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
2730 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
2731 created by virtual register instantiation, register elimination, and
2732 similar optimizations. */
2733 if (GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
2736 x
= gen_rtx_PLUS (Pmode
,
2737 gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
2738 XEXP (XEXP (x
, 1), 0)),
2739 XEXP (XEXP (x
, 1), 1));
2743 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
2744 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
2745 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
2746 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
2747 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == PLUS
2748 && CONSTANT_P (XEXP (x
, 1)))
2751 rtx other
= NULL_RTX
;
2753 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2755 constant
= XEXP (x
, 1);
2756 other
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
2758 else if (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 1), 1)) == CONST_INT
)
2760 constant
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
2761 other
= XEXP (x
, 1);
2769 x
= gen_rtx_PLUS (Pmode
,
2770 gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 0),
2771 XEXP (XEXP (XEXP (x
, 0), 1), 0)),
2772 plus_constant (other
, INTVAL (constant
)));
2776 if (changed
&& legitimate_address_p (mode
, x
, FALSE
))
2779 if (GET_CODE (XEXP (x
, 0)) == MULT
)
2782 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
2785 if (GET_CODE (XEXP (x
, 1)) == MULT
)
2788 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
2792 && GET_CODE (XEXP (x
, 1)) == REG
2793 && GET_CODE (XEXP (x
, 0)) == REG
)
2796 if (flag_pic
&& SYMBOLIC_CONST (XEXP (x
, 1)))
2799 x
= legitimize_pic_address (x
, 0);
2802 if (changed
&& legitimate_address_p (mode
, x
, FALSE
))
2805 if (GET_CODE (XEXP (x
, 0)) == REG
)
2807 register rtx temp
= gen_reg_rtx (Pmode
);
2808 register rtx val
= force_operand (XEXP (x
, 1), temp
);
2810 emit_move_insn (temp
, val
);
2816 else if (GET_CODE (XEXP (x
, 1)) == REG
)
2818 register rtx temp
= gen_reg_rtx (Pmode
);
2819 register rtx val
= force_operand (XEXP (x
, 0), temp
);
2821 emit_move_insn (temp
, val
);
2831 /* Print an integer constant expression in assembler syntax. Addition
2832 and subtraction are the only arithmetic that may appear in these
2833 expressions. FILE is the stdio stream to write to, X is the rtx, and
2834 CODE is the operand print code from the output string. */
2837 output_pic_addr_const (file
, x
, code
)
2844 switch (GET_CODE (x
))
2854 assemble_name (file
, XSTR (x
, 0));
2855 if (code
== 'P' && ! SYMBOL_REF_FLAG (x
))
2856 fputs ("@PLT", file
);
2863 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (x
));
2864 assemble_name (asm_out_file
, buf
);
2868 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
2872 /* This used to output parentheses around the expression,
2873 but that does not work on the 386 (either ATT or BSD assembler). */
2874 output_pic_addr_const (file
, XEXP (x
, 0), code
);
2878 if (GET_MODE (x
) == VOIDmode
)
2880 /* We can use %d if the number is <32 bits and positive. */
2881 if (CONST_DOUBLE_HIGH (x
) || CONST_DOUBLE_LOW (x
) < 0)
2882 fprintf (file
, "0x%lx%08lx",
2883 (unsigned long) CONST_DOUBLE_HIGH (x
),
2884 (unsigned long) CONST_DOUBLE_LOW (x
));
2886 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
));
2889 /* We can't handle floating point constants;
2890 PRINT_OPERAND must handle them. */
2891 output_operand_lossage ("floating constant misused");
2895 /* Some assemblers need integer constants to appear first. */
2896 if (GET_CODE (XEXP (x
, 0)) == CONST_INT
)
2898 output_pic_addr_const (file
, XEXP (x
, 0), code
);
2900 output_pic_addr_const (file
, XEXP (x
, 1), code
);
2902 else if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2904 output_pic_addr_const (file
, XEXP (x
, 1), code
);
2906 output_pic_addr_const (file
, XEXP (x
, 0), code
);
2913 putc (ASSEMBLER_DIALECT
? '(' : '[', file
);
2914 output_pic_addr_const (file
, XEXP (x
, 0), code
);
2916 output_pic_addr_const (file
, XEXP (x
, 1), code
);
2917 putc (ASSEMBLER_DIALECT
? ')' : ']', file
);
2921 if (XVECLEN (x
, 0) != 1)
2923 output_pic_addr_const (file
, XVECEXP (x
, 0, 0), code
);
2924 switch (XINT (x
, 1))
2927 fputs ("@GOT", file
);
2930 fputs ("@GOTOFF", file
);
2933 fputs ("@PLT", file
);
2936 output_operand_lossage ("invalid UNSPEC as operand");
2942 output_operand_lossage ("invalid expression as operand");
2946 /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST.
2947 We need to handle our special PIC relocations. */
2950 i386_dwarf_output_addr_const (file
, x
)
2954 fprintf (file
, "\t%s\t", INT_ASM_OP
);
2956 output_pic_addr_const (file
, x
, '\0');
2958 output_addr_const (file
, x
);
2962 /* In the name of slightly smaller debug output, and to cater to
2963 general assembler losage, recognize PIC+GOTOFF and turn it back
2964 into a direct symbol reference. */
2967 i386_simplify_dwarf_addr (orig_x
)
2972 if (GET_CODE (x
) != PLUS
2973 || GET_CODE (XEXP (x
, 0)) != REG
2974 || GET_CODE (XEXP (x
, 1)) != CONST
)
2977 x
= XEXP (XEXP (x
, 1), 0);
2978 if (GET_CODE (x
) == UNSPEC
2979 && XINT (x
, 1) == 7)
2980 return XVECEXP (x
, 0, 0);
2982 if (GET_CODE (x
) == PLUS
2983 && GET_CODE (XEXP (x
, 0)) == UNSPEC
2984 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2985 && XINT (XEXP (x
, 0), 1) == 7)
2986 return gen_rtx_PLUS (VOIDmode
, XVECEXP (XEXP (x
, 0), 0, 0), XEXP (x
, 1));
2992 put_condition_code (code
, mode
, reverse
, fp
, file
)
2994 enum machine_mode mode
;
3001 code
= reverse_condition (code
);
3012 if (mode
== CCNOmode
)
3017 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
3018 Those same assemblers have the same but opposite losage on cmov. */
3019 suffix
= fp
? "nbe" : "a";
3022 if (mode
== CCNOmode
)
3031 if (mode
== CCNOmode
)
3038 suffix
= fp
? "nb" : "ae";
3041 if (mode
== CCNOmode
)
3057 fputs (suffix
, file
);
3061 print_reg (x
, code
, file
)
3066 if (REGNO (x
) == ARG_POINTER_REGNUM
3067 || REGNO (x
) == FRAME_POINTER_REGNUM
3068 || REGNO (x
) == FLAGS_REG
3069 || REGNO (x
) == FPSR_REG
)
3072 if (ASSEMBLER_DIALECT
== 0 || USER_LABEL_PREFIX
[0] == 0)
3077 else if (code
== 'b')
3079 else if (code
== 'k')
3081 else if (code
== 'y')
3083 else if (code
== 'h')
3086 code
= GET_MODE_SIZE (GET_MODE (x
));
3091 if (STACK_TOP_P (x
))
3093 fputs ("st(0)", file
);
3104 fputs (hi_reg_name
[REGNO (x
)], file
);
3107 fputs (qi_reg_name
[REGNO (x
)], file
);
3110 fputs (qi_high_reg_name
[REGNO (x
)], file
);
3118 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
3119 C -- print opcode suffix for set/cmov insn.
3120 c -- like C, but print reversed condition
3121 R -- print the prefix for register names.
3122 z -- print the opcode suffix for the size of the current operand.
3123 * -- print a star (in certain assembler syntax)
3124 w -- print the operand as if it's a "word" (HImode) even if it isn't.
3125 s -- print a shift double count, followed by the assemblers argument
3127 b -- print the QImode name of the register for the indicated operand.
3128 %b0 would print %al if operands[0] is reg 0.
3129 w -- likewise, print the HImode name of the register.
3130 k -- likewise, print the SImode name of the register.
3131 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
3132 y -- print "st(0)" instead of "st" as a register. */
3135 print_operand (file
, x
, code
)
3145 if (ASSEMBLER_DIALECT
== 0)
3150 if (ASSEMBLER_DIALECT
== 0)
3155 if (ASSEMBLER_DIALECT
== 0)
3160 if (ASSEMBLER_DIALECT
== 0)
3165 if (ASSEMBLER_DIALECT
== 0)
3170 if (ASSEMBLER_DIALECT
== 0)
3175 if (ASSEMBLER_DIALECT
== 0)
3180 /* 387 opcodes don't get size suffixes if the operands are
3183 if (STACK_REG_P (x
))
3186 /* Intel syntax has no truck with instruction suffixes. */
3187 if (ASSEMBLER_DIALECT
!= 0)
3190 /* this is the size of op from size of operand */
3191 switch (GET_MODE_SIZE (GET_MODE (x
)))
3194 #ifdef HAVE_GAS_FILDS_FISTS
3200 if (GET_MODE (x
) == SFmode
)
3214 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
3216 #ifdef GAS_MNEMONICS
3241 if (GET_CODE (x
) == CONST_INT
|| ! SHIFT_DOUBLE_OMITS_COUNT
)
3243 PRINT_OPERAND (file
, x
, 0);
3249 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 0, 0, file
);
3252 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 0, 1, file
);
3255 /* Like above, but reverse condition */
3257 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 1, 0, file
);
3260 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 1, 1, file
);
3266 sprintf (str
, "invalid operand code `%c'", code
);
3267 output_operand_lossage (str
);
3272 if (GET_CODE (x
) == REG
)
3274 PRINT_REG (x
, code
, file
);
3277 else if (GET_CODE (x
) == MEM
)
3279 /* No `byte ptr' prefix for call instructions. */
3280 if (ASSEMBLER_DIALECT
!= 0 && code
!= 'X' && code
!= 'P')
3283 switch (GET_MODE_SIZE (GET_MODE (x
)))
3285 case 1: size
= "BYTE"; break;
3286 case 2: size
= "WORD"; break;
3287 case 4: size
= "DWORD"; break;
3288 case 8: size
= "QWORD"; break;
3289 case 12: size
= "XWORD"; break;
3294 fputs (" PTR ", file
);
3298 if (flag_pic
&& CONSTANT_ADDRESS_P (x
))
3299 output_pic_addr_const (file
, x
, code
);
3304 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == SFmode
)
3309 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3310 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
3312 if (ASSEMBLER_DIALECT
== 0)
3314 fprintf (file
, "0x%lx", l
);
3317 /* These float cases don't actually occur as immediate operands. */
3318 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == DFmode
)
3323 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3324 REAL_VALUE_TO_DECIMAL (r
, "%.22e", dstr
);
3325 fprintf (file
, "%s", dstr
);
3328 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == XFmode
)
3333 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3334 REAL_VALUE_TO_DECIMAL (r
, "%.22e", dstr
);
3335 fprintf (file
, "%s", dstr
);
3341 if (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
)
3343 if (ASSEMBLER_DIALECT
== 0)
3346 else if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
3347 || GET_CODE (x
) == LABEL_REF
)
3349 if (ASSEMBLER_DIALECT
== 0)
3352 fputs ("OFFSET FLAT:", file
);
3355 if (GET_CODE (x
) == CONST_INT
)
3356 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
3358 output_pic_addr_const (file
, x
, code
);
3360 output_addr_const (file
, x
);
3364 /* Print a memory operand whose address is ADDR. */
3367 print_operand_address (file
, addr
)
3371 struct ix86_address parts
;
3372 rtx base
, index
, disp
;
3375 if (! ix86_decompose_address (addr
, &parts
))
3379 index
= parts
.index
;
3381 scale
= parts
.scale
;
3383 if (!base
&& !index
)
3385 /* Displacement only requires special attention. */
3387 if (GET_CODE (disp
) == CONST_INT
)
3389 if (ASSEMBLER_DIALECT
!= 0)
3390 fputs ("ds:", file
);
3391 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (addr
));
3394 output_pic_addr_const (file
, addr
, 0);
3396 output_addr_const (file
, addr
);
3400 if (ASSEMBLER_DIALECT
== 0)
3405 output_pic_addr_const (file
, disp
, 0);
3406 else if (GET_CODE (disp
) == LABEL_REF
)
3407 output_asm_label (disp
);
3409 output_addr_const (file
, disp
);
3414 PRINT_REG (base
, 0, file
);
3418 PRINT_REG (index
, 0, file
);
3420 fprintf (file
, ",%d", scale
);
3426 rtx offset
= NULL_RTX
;
3430 /* Pull out the offset of a symbol; print any symbol itself. */
3431 if (GET_CODE (disp
) == CONST
3432 && GET_CODE (XEXP (disp
, 0)) == PLUS
3433 && GET_CODE (XEXP (XEXP (disp
, 0), 1)) == CONST_INT
)
3435 offset
= XEXP (XEXP (disp
, 0), 1);
3436 disp
= gen_rtx_CONST (VOIDmode
,
3437 XEXP (XEXP (disp
, 0), 0));
3441 output_pic_addr_const (file
, disp
, 0);
3442 else if (GET_CODE (disp
) == LABEL_REF
)
3443 output_asm_label (disp
);
3444 else if (GET_CODE (disp
) == CONST_INT
)
3447 output_addr_const (file
, disp
);
3453 PRINT_REG (base
, 0, file
);
3456 if (INTVAL (offset
) >= 0)
3458 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
3462 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
3469 PRINT_REG (index
, 0, file
);
3471 fprintf (file
, "*%d", scale
);
3478 /* Split one or more DImode RTL references into pairs of SImode
3479 references. The RTL can be REG, offsettable MEM, integer constant, or
3480 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3481 split and "num" is its length. lo_half and hi_half are output arrays
3482 that parallel "operands". */
3485 split_di (operands
, num
, lo_half
, hi_half
)
3488 rtx lo_half
[], hi_half
[];
3492 rtx op
= operands
[num
];
3493 if (CONSTANT_P (op
))
3494 split_double (op
, &lo_half
[num
], &hi_half
[num
]);
3495 else if (! reload_completed
)
3497 lo_half
[num
] = gen_lowpart (SImode
, op
);
3498 hi_half
[num
] = gen_highpart (SImode
, op
);
3500 else if (GET_CODE (op
) == REG
)
3502 lo_half
[num
] = gen_rtx_REG (SImode
, REGNO (op
));
3503 hi_half
[num
] = gen_rtx_REG (SImode
, REGNO (op
) + 1);
3505 else if (offsettable_memref_p (op
))
3507 rtx lo_addr
= XEXP (op
, 0);
3508 rtx hi_addr
= XEXP (adj_offsettable_operand (op
, 4), 0);
3509 lo_half
[num
] = change_address (op
, SImode
, lo_addr
);
3510 hi_half
[num
] = change_address (op
, SImode
, hi_addr
);
3517 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
3518 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
3519 is the expression of the binary operation. The output may either be
3520 emitted here, or returned to the caller, like all output_* functions.
3522 There is no guarantee that the operands are the same mode, as they
3523 might be within FLOAT or FLOAT_EXTEND expressions. */
3525 #ifndef SYSV386_COMPAT
3526 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
3527 wants to fix the assemblers because that causes incompatibility
3528 with gcc. No-one wants to fix gcc because that causes
3529 incompatibility with assemblers... You can use the option of
3530 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
3531 #define SYSV386_COMPAT 1
3535 output_387_binary_op (insn
, operands
)
3539 static char buf
[30];
3542 #ifdef ENABLE_CHECKING
3543 /* Even if we do not want to check the inputs, this documents input
3544 constraints. Which helps in understanding the following code. */
3545 if (STACK_REG_P (operands
[0])
3546 && ((REG_P (operands
[1])
3547 && REGNO (operands
[0]) == REGNO (operands
[1])
3548 && (STACK_REG_P (operands
[2]) || GET_CODE (operands
[2]) == MEM
))
3549 || (REG_P (operands
[2])
3550 && REGNO (operands
[0]) == REGNO (operands
[2])
3551 && (STACK_REG_P (operands
[1]) || GET_CODE (operands
[1]) == MEM
)))
3552 && (STACK_TOP_P (operands
[1]) || STACK_TOP_P (operands
[2])))
3558 switch (GET_CODE (operands
[3]))
3561 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
3562 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
3569 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
3570 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
3577 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
3578 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
3585 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
3586 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
3598 switch (GET_CODE (operands
[3]))
3602 if (REG_P (operands
[2]) && REGNO (operands
[0]) == REGNO (operands
[2]))
3604 rtx temp
= operands
[2];
3605 operands
[2] = operands
[1];
3609 /* know operands[0] == operands[1]. */
3611 if (GET_CODE (operands
[2]) == MEM
)
3617 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
3619 if (STACK_TOP_P (operands
[0]))
3620 /* How is it that we are storing to a dead operand[2]?
3621 Well, presumably operands[1] is dead too. We can't
3622 store the result to st(0) as st(0) gets popped on this
3623 instruction. Instead store to operands[2] (which I
3624 think has to be st(1)). st(1) will be popped later.
3625 gcc <= 2.8.1 didn't have this check and generated
3626 assembly code that the Unixware assembler rejected. */
3627 p
= "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
3629 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
3633 if (STACK_TOP_P (operands
[0]))
3634 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
3636 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
3641 if (GET_CODE (operands
[1]) == MEM
)
3647 if (GET_CODE (operands
[2]) == MEM
)
3653 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
3656 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
3657 derived assemblers, confusingly reverse the direction of
3658 the operation for fsub{r} and fdiv{r} when the
3659 destination register is not st(0). The Intel assembler
3660 doesn't have this brain damage. Read !SYSV386_COMPAT to
3661 figure out what the hardware really does. */
3662 if (STACK_TOP_P (operands
[0]))
3663 p
= "{p\t%0, %2|rp\t%2, %0}";
3665 p
= "{rp\t%2, %0|p\t%0, %2}";
3667 if (STACK_TOP_P (operands
[0]))
3668 /* As above for fmul/fadd, we can't store to st(0). */
3669 p
= "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
3671 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
3676 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
3679 if (STACK_TOP_P (operands
[0]))
3680 p
= "{rp\t%0, %1|p\t%1, %0}";
3682 p
= "{p\t%1, %0|rp\t%0, %1}";
3684 if (STACK_TOP_P (operands
[0]))
3685 p
= "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
3687 p
= "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
3692 if (STACK_TOP_P (operands
[0]))
3694 if (STACK_TOP_P (operands
[1]))
3695 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
3697 p
= "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
3700 else if (STACK_TOP_P (operands
[1]))
3703 p
= "{\t%1, %0|r\t%0, %1}";
3705 p
= "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
3711 p
= "{r\t%2, %0|\t%0, %2}";
3713 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
3726 /* Output code for INSN to convert a float to a signed int. OPERANDS
3727 are the insn operands. The output may be [HSD]Imode and the input
3728 operand may be [SDX]Fmode. */
3731 output_fix_trunc (insn
, operands
)
3735 int stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
3736 int dimode_p
= GET_MODE (operands
[0]) == DImode
;
3739 /* Jump through a hoop or two for DImode, since the hardware has no
3740 non-popping instruction. We used to do this a different way, but
3741 that was somewhat fragile and broke with post-reload splitters. */
3742 if (dimode_p
&& !stack_top_dies
)
3743 output_asm_insn ("fld\t%y1", operands
);
3745 if (! STACK_TOP_P (operands
[1]))
3748 xops
[0] = GEN_INT (12);
3749 xops
[1] = adj_offsettable_operand (operands
[2], 1);
3750 xops
[1] = change_address (xops
[1], QImode
, NULL_RTX
);
3752 xops
[2] = operands
[0];
3753 if (GET_CODE (operands
[0]) != MEM
)
3754 xops
[2] = operands
[3];
3756 output_asm_insn ("fnstcw\t%2", operands
);
3757 output_asm_insn ("mov{l}\t{%2, %4|%4, %2}", operands
);
3758 output_asm_insn ("mov{b}\t{%0, %1|%1, %0}", xops
);
3759 output_asm_insn ("fldcw\t%2", operands
);
3760 output_asm_insn ("mov{l}\t{%4, %2|%2, %4}", operands
);
3762 if (stack_top_dies
|| dimode_p
)
3763 output_asm_insn ("fistp%z2\t%2", xops
);
3765 output_asm_insn ("fist%z2\t%2", xops
);
3767 output_asm_insn ("fldcw\t%2", operands
);
3769 if (GET_CODE (operands
[0]) != MEM
)
3773 split_di (operands
+0, 1, xops
+0, xops
+1);
3774 split_di (operands
+3, 1, xops
+2, xops
+3);
3775 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops
);
3776 output_asm_insn ("mov{l}\t{%3, %1|%1, %3}", xops
);
3778 else if (GET_MODE (operands
[0]) == SImode
)
3779 output_asm_insn ("mov{l}\t{%3, %0|%0, %3}", operands
);
3781 output_asm_insn ("mov{w}\t{%3, %0|%0, %3}", operands
);
3787 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
3788 should be used and 2 when fnstsw should be used. UNORDERED_P is true
3789 when fucom should be used. */
3792 output_fp_compare (insn
, operands
, eflags_p
, unordered_p
)
3795 int eflags_p
, unordered_p
;
3798 rtx cmp_op0
= operands
[0];
3799 rtx cmp_op1
= operands
[1];
3804 cmp_op1
= operands
[2];
3807 if (! STACK_TOP_P (cmp_op0
))
3810 stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
3812 if (STACK_REG_P (cmp_op1
)
3814 && find_regno_note (insn
, REG_DEAD
, REGNO (cmp_op1
))
3815 && REGNO (cmp_op1
) != FIRST_STACK_REG
)
3817 /* If both the top of the 387 stack dies, and the other operand
3818 is also a stack register that dies, then this must be a
3819 `fcompp' float compare */
3823 /* There is no double popping fcomi variant. Fortunately,
3824 eflags is immune from the fstp's cc clobbering. */
3826 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands
);
3828 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands
);
3836 return "fucompp\n\tfnstsw\t%0";
3838 return "fcompp\n\tfnstsw\t%0";
3851 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
3853 static const char * const alt
[24] =
3865 "fcomi\t{%y1, %0|%0, %y1}",
3866 "fcomip\t{%y1, %0|%0, %y1}",
3867 "fucomi\t{%y1, %0|%0, %y1}",
3868 "fucomip\t{%y1, %0|%0, %y1}",
3875 "fcom%z2\t%y2\n\tfnstsw\t%0",
3876 "fcomp%z2\t%y2\n\tfnstsw\t%0",
3877 "fucom%z2\t%y2\n\tfnstsw\t%0",
3878 "fucomp%z2\t%y2\n\tfnstsw\t%0",
3880 "ficom%z2\t%y2\n\tfnstsw\t%0",
3881 "ficomp%z2\t%y2\n\tfnstsw\t%0",
3889 mask
= eflags_p
<< 3;
3890 mask
|= (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
) << 2;
3891 mask
|= unordered_p
<< 1;
3892 mask
|= stack_top_dies
;
3904 /* Output assembler code to FILE to initialize basic-block profiling.
3906 If profile_block_flag == 2
3908 Output code to call the subroutine `__bb_init_trace_func'
3909 and pass two parameters to it. The first parameter is
3910 the address of a block allocated in the object module.
3911 The second parameter is the number of the first basic block
3914 The name of the block is a local symbol made with this statement:
3916 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
3918 Of course, since you are writing the definition of
3919 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
3920 can take a short cut in the definition of this macro and use the
3921 name that you know will result.
3923 The number of the first basic block of the function is
3924 passed to the macro in BLOCK_OR_LABEL.
3926 If described in a virtual assembler language the code to be
3930 parameter2 <- BLOCK_OR_LABEL
3931 call __bb_init_trace_func
3933 else if profile_block_flag != 0
3935 Output code to call the subroutine `__bb_init_func'
3936 and pass one single parameter to it, which is the same
3937 as the first parameter to `__bb_init_trace_func'.
3939 The first word of this parameter is a flag which will be nonzero if
3940 the object module has already been initialized. So test this word
3941 first, and do not call `__bb_init_func' if the flag is nonzero.
3942 Note: When profile_block_flag == 2 the test need not be done
3943 but `__bb_init_trace_func' *must* be called.
3945 BLOCK_OR_LABEL may be used to generate a label number as a
3946 branch destination in case `__bb_init_func' will not be called.
3948 If described in a virtual assembler language the code to be
3959 ix86_output_function_block_profiler (file
, block_or_label
)
3963 static int num_func
= 0;
3965 char block_table
[80], false_label
[80];
3967 ASM_GENERATE_INTERNAL_LABEL (block_table
, "LPBX", 0);
3969 xops
[1] = gen_rtx_SYMBOL_REF (VOIDmode
, block_table
);
3970 xops
[5] = stack_pointer_rtx
;
3971 xops
[7] = gen_rtx_REG (Pmode
, 0); /* eax */
3973 CONSTANT_POOL_ADDRESS_P (xops
[1]) = TRUE
;
3975 switch (profile_block_flag
)
3978 xops
[2] = GEN_INT (block_or_label
);
3979 xops
[3] = gen_rtx_MEM (Pmode
,
3980 gen_rtx_SYMBOL_REF (VOIDmode
, "__bb_init_trace_func"));
3981 xops
[6] = GEN_INT (8);
3983 output_asm_insn ("push{l}\t%2", xops
);
3985 output_asm_insn ("push{l}\t%1", xops
);
3988 output_asm_insn ("lea{l}\t{%a1, %7|%7, %a1}", xops
);
3989 output_asm_insn ("push{l}\t%7", xops
);
3991 output_asm_insn ("call\t%P3", xops
);
3992 output_asm_insn ("add{l}\t{%6, %5|%5, %6}", xops
);
3996 ASM_GENERATE_INTERNAL_LABEL (false_label
, "LPBZ", num_func
);
3998 xops
[0] = const0_rtx
;
3999 xops
[2] = gen_rtx_MEM (Pmode
,
4000 gen_rtx_SYMBOL_REF (VOIDmode
, false_label
));
4001 xops
[3] = gen_rtx_MEM (Pmode
,
4002 gen_rtx_SYMBOL_REF (VOIDmode
, "__bb_init_func"));
4003 xops
[4] = gen_rtx_MEM (Pmode
, xops
[1]);
4004 xops
[6] = GEN_INT (4);
4006 CONSTANT_POOL_ADDRESS_P (xops
[2]) = TRUE
;
4008 output_asm_insn ("cmp{l}\t{%0, %4|%4, %0}", xops
);
4009 output_asm_insn ("jne\t%2", xops
);
4012 output_asm_insn ("push{l}\t%1", xops
);
4015 output_asm_insn ("lea{l}\t{%a1, %7|%7, %a2}", xops
);
4016 output_asm_insn ("push{l}\t%7", xops
);
4018 output_asm_insn ("call\t%P3", xops
);
4019 output_asm_insn ("add{l}\t{%6, %5|%5, %6}", xops
);
4020 ASM_OUTPUT_INTERNAL_LABEL (file
, "LPBZ", num_func
);
4026 /* Output assembler code to FILE to increment a counter associated
4027 with basic block number BLOCKNO.
4029 If profile_block_flag == 2
4031 Output code to initialize the global structure `__bb' and
4032 call the function `__bb_trace_func' which will increment the
4035 `__bb' consists of two words. In the first word the number
4036 of the basic block has to be stored. In the second word
4037 the address of a block allocated in the object module
4040 The basic block number is given by BLOCKNO.
4042 The address of the block is given by the label created with
4044 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
4046 by FUNCTION_BLOCK_PROFILER.
4048 Of course, since you are writing the definition of
4049 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
4050 can take a short cut in the definition of this macro and use the
4051 name that you know will result.
4053 If described in a virtual assembler language the code to be
4056 move BLOCKNO -> (__bb)
4057 move LPBX0 -> (__bb+4)
4058 call __bb_trace_func
4060 Note that function `__bb_trace_func' must not change the
4061 machine state, especially the flag register. To grant
4062 this, you must output code to save and restore registers
4063 either in this macro or in the macros MACHINE_STATE_SAVE
4064 and MACHINE_STATE_RESTORE. The last two macros will be
4065 used in the function `__bb_trace_func', so you must make
4066 sure that the function prologue does not change any
4067 register prior to saving it with MACHINE_STATE_SAVE.
4069 else if profile_block_flag != 0
4071 Output code to increment the counter directly.
4072 Basic blocks are numbered separately from zero within each
4073 compiled object module. The count associated with block number
4074 BLOCKNO is at index BLOCKNO in an array of words; the name of
4075 this array is a local symbol made with this statement:
4077 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
4079 Of course, since you are writing the definition of
4080 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
4081 can take a short cut in the definition of this macro and use the
4082 name that you know will result.
4084 If described in a virtual assembler language the code to be
4087 inc (LPBX2+4*BLOCKNO)
4091 ix86_output_block_profiler (file
, blockno
)
4092 FILE *file ATTRIBUTE_UNUSED
;
4095 rtx xops
[8], cnt_rtx
;
4097 char *block_table
= counts
;
4099 switch (profile_block_flag
)
4102 ASM_GENERATE_INTERNAL_LABEL (block_table
, "LPBX", 0);
4104 xops
[1] = gen_rtx_SYMBOL_REF (VOIDmode
, block_table
);
4105 xops
[2] = GEN_INT (blockno
);
4106 xops
[3] = gen_rtx_MEM (Pmode
,
4107 gen_rtx_SYMBOL_REF (VOIDmode
, "__bb_trace_func"));
4108 xops
[4] = gen_rtx_SYMBOL_REF (VOIDmode
, "__bb");
4109 xops
[5] = plus_constant (xops
[4], 4);
4110 xops
[0] = gen_rtx_MEM (SImode
, xops
[4]);
4111 xops
[6] = gen_rtx_MEM (SImode
, xops
[5]);
4113 CONSTANT_POOL_ADDRESS_P (xops
[1]) = TRUE
;
4115 output_asm_insn ("pushf", xops
);
4116 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops
);
4119 xops
[7] = gen_rtx_REG (Pmode
, 0); /* eax */
4120 output_asm_insn ("push{l}\t%7", xops
);
4121 output_asm_insn ("lea{l}\t{%a1, %7|%7, %a1}", xops
);
4122 output_asm_insn ("mov{l}\t{%7, %6|%6, %7}", xops
);
4123 output_asm_insn ("pop{l}\t%7", xops
);
4126 output_asm_insn ("mov{l}\t{%1, %6|%6, %1}", xops
);
4127 output_asm_insn ("call\t%P3", xops
);
4128 output_asm_insn ("popf", xops
);
4133 ASM_GENERATE_INTERNAL_LABEL (counts
, "LPBX", 2);
4134 cnt_rtx
= gen_rtx_SYMBOL_REF (VOIDmode
, counts
);
4135 SYMBOL_REF_FLAG (cnt_rtx
) = TRUE
;
4138 cnt_rtx
= plus_constant (cnt_rtx
, blockno
*4);
4141 cnt_rtx
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, cnt_rtx
);
4143 xops
[0] = gen_rtx_MEM (SImode
, cnt_rtx
);
4144 output_asm_insn ("inc{l}\t%0", xops
);
4151 ix86_expand_move (mode
, operands
)
4152 enum machine_mode mode
;
4155 int strict
= (reload_in_progress
|| reload_completed
);
4158 if (flag_pic
&& mode
== Pmode
&& symbolic_operand (operands
[1], Pmode
))
4160 /* Emit insns to move operands[1] into operands[0]. */
4162 if (GET_CODE (operands
[0]) == MEM
)
4163 operands
[1] = force_reg (Pmode
, operands
[1]);
4166 rtx temp
= operands
[0];
4167 if (GET_CODE (temp
) != REG
)
4168 temp
= gen_reg_rtx (Pmode
);
4169 temp
= legitimize_pic_address (operands
[1], temp
);
4170 if (temp
== operands
[0])
4177 if (GET_CODE (operands
[0]) == MEM
4178 && (GET_MODE (operands
[0]) == QImode
4179 || !push_operand (operands
[0], mode
))
4180 && GET_CODE (operands
[1]) == MEM
)
4181 operands
[1] = force_reg (mode
, operands
[1]);
4183 if (push_operand (operands
[0], mode
)
4184 && ! general_no_elim_operand (operands
[1], mode
))
4185 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
4187 if (FLOAT_MODE_P (mode
))
4189 /* If we are loading a floating point constant to a register,
4190 force the value to memory now, since we'll get better code
4191 out the back end. */
4195 else if (GET_CODE (operands
[1]) == CONST_DOUBLE
4196 && register_operand (operands
[0], mode
))
4197 operands
[1] = validize_mem (force_const_mem (mode
, operands
[1]));
4201 insn
= gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]);
4206 /* Attempt to expand a binary operator. Make the expansion closer to the
4207 actual machine, then just general_operand, which will allow 3 separate
4208 memory references (one output, two input) in a single insn. */
4211 ix86_expand_binary_operator (code
, mode
, operands
)
4213 enum machine_mode mode
;
4216 int matching_memory
;
4217 rtx src1
, src2
, dst
, op
, clob
;
4223 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
4224 if (GET_RTX_CLASS (code
) == 'c'
4225 && (rtx_equal_p (dst
, src2
)
4226 || immediate_operand (src1
, mode
)))
4233 /* If the destination is memory, and we do not have matching source
4234 operands, do things in registers. */
4235 matching_memory
= 0;
4236 if (GET_CODE (dst
) == MEM
)
4238 if (rtx_equal_p (dst
, src1
))
4239 matching_memory
= 1;
4240 else if (GET_RTX_CLASS (code
) == 'c'
4241 && rtx_equal_p (dst
, src2
))
4242 matching_memory
= 2;
4244 dst
= gen_reg_rtx (mode
);
4247 /* Both source operands cannot be in memory. */
4248 if (GET_CODE (src1
) == MEM
&& GET_CODE (src2
) == MEM
)
4250 if (matching_memory
!= 2)
4251 src2
= force_reg (mode
, src2
);
4253 src1
= force_reg (mode
, src1
);
4256 /* If the operation is not commutable, source 1 cannot be a constant
4257 or non-matching memory. */
4258 if ((CONSTANT_P (src1
)
4259 || (!matching_memory
&& GET_CODE (src1
) == MEM
))
4260 && GET_RTX_CLASS (code
) != 'c')
4261 src1
= force_reg (mode
, src1
);
4263 /* If optimizing, copy to regs to improve CSE */
4264 if (optimize
&& ! no_new_pseudos
)
4266 if (GET_CODE (dst
) == MEM
)
4267 dst
= gen_reg_rtx (mode
);
4268 if (GET_CODE (src1
) == MEM
)
4269 src1
= force_reg (mode
, src1
);
4270 if (GET_CODE (src2
) == MEM
)
4271 src2
= force_reg (mode
, src2
);
4274 /* Emit the instruction. */
4276 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, mode
, src1
, src2
));
4277 if (reload_in_progress
)
4279 /* Reload doesn't know about the flags register, and doesn't know that
4280 it doesn't want to clobber it. We can only do this with PLUS. */
4287 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
4288 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
4291 /* Fix up the destination if needed. */
4292 if (dst
!= operands
[0])
4293 emit_move_insn (operands
[0], dst
);
4296 /* Return TRUE or FALSE depending on whether the binary operator meets the
4297 appropriate constraints. */
4300 ix86_binary_operator_ok (code
, mode
, operands
)
4302 enum machine_mode mode ATTRIBUTE_UNUSED
;
4305 /* Both source operands cannot be in memory. */
4306 if (GET_CODE (operands
[1]) == MEM
&& GET_CODE (operands
[2]) == MEM
)
4308 /* If the operation is not commutable, source 1 cannot be a constant. */
4309 if (CONSTANT_P (operands
[1]) && GET_RTX_CLASS (code
) != 'c')
4311 /* If the destination is memory, we must have a matching source operand. */
4312 if (GET_CODE (operands
[0]) == MEM
4313 && ! (rtx_equal_p (operands
[0], operands
[1])
4314 || (GET_RTX_CLASS (code
) == 'c'
4315 && rtx_equal_p (operands
[0], operands
[2]))))
4317 /* If the operation is not commutable and the source 1 is memory, we must
4318 have a matching destionation. */
4319 if (GET_CODE (operands
[1]) == MEM
4320 && GET_RTX_CLASS (code
) != 'c'
4321 && ! rtx_equal_p (operands
[0], operands
[1]))
4326 /* Attempt to expand a unary operator. Make the expansion closer to the
4327 actual machine, then just general_operand, which will allow 2 separate
4328 memory references (one output, one input) in a single insn. */
4331 ix86_expand_unary_operator (code
, mode
, operands
)
4333 enum machine_mode mode
;
4336 int matching_memory
;
4337 rtx src
, dst
, op
, clob
;
4342 /* If the destination is memory, and we do not have matching source
4343 operands, do things in registers. */
4344 matching_memory
= 0;
4345 if (GET_CODE (dst
) == MEM
)
4347 if (rtx_equal_p (dst
, src
))
4348 matching_memory
= 1;
4350 dst
= gen_reg_rtx (mode
);
4353 /* When source operand is memory, destination must match. */
4354 if (!matching_memory
&& GET_CODE (src
) == MEM
)
4355 src
= force_reg (mode
, src
);
4357 /* If optimizing, copy to regs to improve CSE */
4358 if (optimize
&& ! no_new_pseudos
)
4360 if (GET_CODE (dst
) == MEM
)
4361 dst
= gen_reg_rtx (mode
);
4362 if (GET_CODE (src
) == MEM
)
4363 src
= force_reg (mode
, src
);
4366 /* Emit the instruction. */
4368 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_e (code
, mode
, src
));
4369 if (reload_in_progress
|| code
== NOT
)
4371 /* Reload doesn't know about the flags register, and doesn't know that
4372 it doesn't want to clobber it. */
4379 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
4380 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
4383 /* Fix up the destination if needed. */
4384 if (dst
!= operands
[0])
4385 emit_move_insn (operands
[0], dst
);
4388 /* Return TRUE or FALSE depending on whether the unary operator meets the
4389 appropriate constraints. */
4392 ix86_unary_operator_ok (code
, mode
, operands
)
4393 enum rtx_code code ATTRIBUTE_UNUSED
;
4394 enum machine_mode mode ATTRIBUTE_UNUSED
;
4395 rtx operands
[2] ATTRIBUTE_UNUSED
;
4397 /* If one of operands is memory, source and destination must match. */
4398 if ((GET_CODE (operands
[0]) == MEM
4399 || GET_CODE (operands
[1]) == MEM
)
4400 && ! rtx_equal_p (operands
[0], operands
[1]))
4405 /* Return TRUE or FALSE depending on whether the first SET in INSN
4406 has source and destination with matching CC modes, and that the
4407 CC mode is at least as constrained as REQ_MODE. */
4410 ix86_match_ccmode (insn
, req_mode
)
4412 enum machine_mode req_mode
;
4415 enum machine_mode set_mode
;
4417 set
= PATTERN (insn
);
4418 if (GET_CODE (set
) == PARALLEL
)
4419 set
= XVECEXP (set
, 0, 0);
4420 if (GET_CODE (set
) != SET
)
4423 set_mode
= GET_MODE (SET_DEST (set
));
4427 if (req_mode
== CCNOmode
)
4431 if (req_mode
== CCZmode
)
4441 return (GET_MODE (SET_SRC (set
)) == set_mode
);
4444 /* Produce an unsigned comparison for a given signed comparison. */
4446 static enum rtx_code
4447 unsigned_comparison (code
)
4479 /* Generate insn patterns to do an integer compare of OPERANDS. */
4482 ix86_expand_int_compare (code
, op0
, op1
)
4486 enum machine_mode cmpmode
;
4489 cmpmode
= SELECT_CC_MODE (code
, op0
, op1
);
4490 flags
= gen_rtx_REG (cmpmode
, FLAGS_REG
);
4492 /* This is very simple, but making the interface the same as in the
4493 FP case makes the rest of the code easier. */
4494 tmp
= gen_rtx_COMPARE (cmpmode
, op0
, op1
);
4495 emit_insn (gen_rtx_SET (VOIDmode
, flags
, tmp
));
4497 /* Return the test that should be put into the flags user, i.e.
4498 the bcc, scc, or cmov instruction. */
4499 return gen_rtx_fmt_ee (code
, VOIDmode
, flags
, const0_rtx
);
4502 /* Figure out whether to use ordered or unordered fp comparisons.
4503 Return the appropriate mode to use. */
4505 static enum machine_mode
4506 ix86_fp_compare_mode (code
)
4514 /* When not doing IEEE compliant compares, fault on NaNs. */
4515 unordered
= (TARGET_IEEE_FP
!= 0);
4518 case LT
: case LE
: case GT
: case GE
:
4522 case UNORDERED
: case ORDERED
:
4523 case UNEQ
: case UNGE
: case UNGT
: case UNLE
: case UNLT
: case LTGT
:
4531 /* ??? If we knew whether invalid-operand exceptions were masked,
4532 we could rely on fcom to raise an exception and take care of
4533 NaNs. But we don't. We could know this from c99 math pragmas. */
4537 return unordered
? CCFPUmode
: CCFPmode
;
4540 /* Return true if we should use an FCOMI instruction for this fp comparison. */
4543 ix86_use_fcomi_compare (code
)
4546 return (TARGET_CMOVE
4547 && (code
== ORDERED
|| code
== UNORDERED
4548 /* All other unordered compares require checking
4549 multiple sets of bits. */
4550 || ix86_fp_compare_mode (code
) == CCFPmode
));
4553 /* Swap, force into registers, or otherwise massage the two operands
4554 to a fp comparison. The operands are updated in place; the new
4555 comparsion code is returned. */
4557 static enum rtx_code
4558 ix86_prepare_fp_compare_args (code
, pop0
, pop1
)
4562 enum machine_mode fpcmp_mode
= ix86_fp_compare_mode (code
);
4563 rtx op0
= *pop0
, op1
= *pop1
;
4564 enum machine_mode op_mode
= GET_MODE (op0
);
4566 /* All of the unordered compare instructions only work on registers.
4567 The same is true of the XFmode compare instructions. The same is
4568 true of the fcomi compare instructions. */
4570 if (fpcmp_mode
== CCFPUmode
4571 || op_mode
== XFmode
4572 || ix86_use_fcomi_compare (code
))
4574 op0
= force_reg (op_mode
, op0
);
4575 op1
= force_reg (op_mode
, op1
);
4579 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
4580 things around if they appear profitable, otherwise force op0
4583 if (standard_80387_constant_p (op0
) == 0
4584 || (GET_CODE (op0
) == MEM
4585 && ! (standard_80387_constant_p (op1
) == 0
4586 || GET_CODE (op1
) == MEM
)))
4589 tmp
= op0
, op0
= op1
, op1
= tmp
;
4590 code
= swap_condition (code
);
4593 if (GET_CODE (op0
) != REG
)
4594 op0
= force_reg (op_mode
, op0
);
4596 if (CONSTANT_P (op1
))
4598 if (standard_80387_constant_p (op1
))
4599 op1
= force_reg (op_mode
, op1
);
4601 op1
= validize_mem (force_const_mem (op_mode
, op1
));
4610 /* Generate insn patterns to do a floating point compare of OPERANDS. */
4613 ix86_expand_fp_compare (code
, op0
, op1
, scratch
)
4615 rtx op0
, op1
, scratch
;
4617 enum machine_mode fpcmp_mode
, intcmp_mode
;
4620 fpcmp_mode
= ix86_fp_compare_mode (code
);
4621 code
= ix86_prepare_fp_compare_args (code
, &op0
, &op1
);
4623 /* %%% fcomi is probably always faster, even when dealing with memory,
4624 since compare-and-branch would be three insns instead of four. */
4625 if (ix86_use_fcomi_compare (code
))
4627 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
4628 tmp
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (fpcmp_mode
, FLAGS_REG
), tmp
);
4631 /* The FP codes work out to act like unsigned. */
4632 code
= unsigned_comparison (code
);
4633 intcmp_mode
= CCmode
;
4637 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
4640 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
4641 tmp2
= gen_rtx_UNSPEC (HImode
, gen_rtvec (1, tmp
), 9);
4642 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp2
));
4644 if (fpcmp_mode
== CCFPmode
4646 || code
== UNORDERED
)
4648 /* We have two options here -- use sahf, or testing bits of ah
4649 directly. On PPRO, they are equivalent, sahf being one byte
4650 smaller. On Pentium, sahf is non-pairable while test is UV
4653 if (TARGET_USE_SAHF
|| optimize_size
)
4656 emit_insn (gen_x86_sahf_1 (scratch
));
4658 /* The FP codes work out to act like unsigned. */
4659 code
= unsigned_comparison (code
);
4660 intcmp_mode
= CCmode
;
4665 * The numbers below correspond to the bits of the FPSW in AH.
4666 * C3, C2, and C0 are in bits 0x40, 0x4, and 0x01 respectively.
4688 /* We'd have to use `xorb 1,ah; andb 0x41,ah', so it's
4689 faster in all cases to just fall back on sahf. */
4716 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (mask
)));
4717 intcmp_mode
= CCNOmode
;
4722 /* In the unordered case, we have to check C2 for NaN's, which
4723 doesn't happen to work out to anything nice combination-wise.
4724 So do some bit twiddling on the value we've got in AH to come
4725 up with an appropriate set of condition codes. */
4727 intcmp_mode
= CCNOmode
;
4731 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
4735 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
4736 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x01)));
4737 intcmp_mode
= CCmode
;
4741 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x05)));
4745 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
4746 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
4747 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
4748 intcmp_mode
= CCmode
;
4752 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
4753 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
4754 intcmp_mode
= CCmode
;
4758 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
4759 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
, GEN_INT (0x40)));
4764 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
4768 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
4772 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
4776 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
4777 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
, GEN_INT (0x01)));
4781 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
4782 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
4783 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x44)));
4787 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
4791 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x01)));
4795 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
4805 /* Return the test that should be put into the flags user, i.e.
4806 the bcc, scc, or cmov instruction. */
4807 return gen_rtx_fmt_ee (code
, VOIDmode
,
4808 gen_rtx_REG (intcmp_mode
, FLAGS_REG
),
4813 ix86_expand_compare (code
)
4817 op0
= ix86_compare_op0
;
4818 op1
= ix86_compare_op1
;
4820 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
4821 ret
= ix86_expand_fp_compare (code
, op0
, op1
, gen_reg_rtx (HImode
));
4823 ret
= ix86_expand_int_compare (code
, op0
, op1
);
4829 ix86_expand_branch (code
, label
)
4835 switch (GET_MODE (ix86_compare_op0
))
4840 tmp
= ix86_expand_compare (code
);
4841 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
4842 gen_rtx_LABEL_REF (VOIDmode
, label
),
4844 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
4850 /* Don't expand the comparison early, so that we get better code
4851 when jump or whoever decides to reverse the comparison. */
4856 code
= ix86_prepare_fp_compare_args (code
, &ix86_compare_op0
,
4859 tmp
= gen_rtx_fmt_ee (code
, ix86_fp_compare_mode (code
),
4860 ix86_compare_op0
, ix86_compare_op1
);
4861 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
4862 gen_rtx_LABEL_REF (VOIDmode
, label
),
4864 tmp
= gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
);
4866 use_fcomi
= ix86_use_fcomi_compare (code
);
4867 vec
= rtvec_alloc (3 + !use_fcomi
);
4868 RTVEC_ELT (vec
, 0) = tmp
;
4870 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCFPmode
, 18));
4872 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCFPmode
, 17));
4875 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (HImode
));
4877 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, vec
));
4882 /* Expand DImode branch into multiple compare+branch. */
4884 rtx lo
[2], hi
[2], label2
;
4885 enum rtx_code code1
, code2
, code3
;
4887 if (CONSTANT_P (ix86_compare_op0
) && ! CONSTANT_P (ix86_compare_op1
))
4889 tmp
= ix86_compare_op0
;
4890 ix86_compare_op0
= ix86_compare_op1
;
4891 ix86_compare_op1
= tmp
;
4892 code
= swap_condition (code
);
4894 split_di (&ix86_compare_op0
, 1, lo
+0, hi
+0);
4895 split_di (&ix86_compare_op1
, 1, lo
+1, hi
+1);
4897 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
4898 avoid two branches. This costs one extra insn, so disable when
4899 optimizing for size. */
4901 if ((code
== EQ
|| code
== NE
)
4903 || hi
[1] == const0_rtx
|| lo
[1] == const0_rtx
))
4908 if (hi
[1] != const0_rtx
)
4909 xor1
= expand_binop (SImode
, xor_optab
, xor1
, hi
[1],
4910 NULL_RTX
, 0, OPTAB_WIDEN
);
4913 if (lo
[1] != const0_rtx
)
4914 xor0
= expand_binop (SImode
, xor_optab
, xor0
, lo
[1],
4915 NULL_RTX
, 0, OPTAB_WIDEN
);
4917 tmp
= expand_binop (SImode
, ior_optab
, xor1
, xor0
,
4918 NULL_RTX
, 0, OPTAB_WIDEN
);
4920 ix86_compare_op0
= tmp
;
4921 ix86_compare_op1
= const0_rtx
;
4922 ix86_expand_branch (code
, label
);
4926 /* Otherwise, if we are doing less-than, op1 is a constant and the
4927 low word is zero, then we can just examine the high word. */
4929 if (GET_CODE (hi
[1]) == CONST_INT
&& lo
[1] == const0_rtx
4930 && (code
== LT
|| code
== LTU
))
4932 ix86_compare_op0
= hi
[0];
4933 ix86_compare_op1
= hi
[1];
4934 ix86_expand_branch (code
, label
);
4938 /* Otherwise, we need two or three jumps. */
4940 label2
= gen_label_rtx ();
4943 code2
= swap_condition (code
);
4944 code3
= unsigned_condition (code
);
4948 case LT
: case GT
: case LTU
: case GTU
:
4951 case LE
: code1
= LT
; code2
= GT
; break;
4952 case GE
: code1
= GT
; code2
= LT
; break;
4953 case LEU
: code1
= LTU
; code2
= GTU
; break;
4954 case GEU
: code1
= GTU
; code2
= LTU
; break;
4956 case EQ
: code1
= NIL
; code2
= NE
; break;
4957 case NE
: code2
= NIL
; break;
4965 * if (hi(a) < hi(b)) goto true;
4966 * if (hi(a) > hi(b)) goto false;
4967 * if (lo(a) < lo(b)) goto true;
4971 ix86_compare_op0
= hi
[0];
4972 ix86_compare_op1
= hi
[1];
4975 ix86_expand_branch (code1
, label
);
4977 ix86_expand_branch (code2
, label2
);
4979 ix86_compare_op0
= lo
[0];
4980 ix86_compare_op1
= lo
[1];
4981 ix86_expand_branch (code3
, label
);
4984 emit_label (label2
);
4994 ix86_expand_setcc (code
, dest
)
5001 if (GET_MODE (ix86_compare_op0
) == DImode
)
5002 return 0; /* FAIL */
5004 /* Three modes of generation:
5005 0 -- destination does not overlap compare sources:
5006 clear dest first, emit strict_low_part setcc.
5007 1 -- destination does overlap compare sources:
5008 emit subreg setcc, zero extend.
5009 2 -- destination is in QImode:
5015 if (GET_MODE (dest
) == QImode
)
5017 else if (reg_overlap_mentioned_p (dest
, ix86_compare_op0
)
5018 || reg_overlap_mentioned_p (dest
, ix86_compare_op1
))
5022 emit_move_insn (dest
, const0_rtx
);
5024 ret
= ix86_expand_compare (code
);
5025 PUT_MODE (ret
, QImode
);
5030 tmp
= gen_lowpart (QImode
, dest
);
5031 tmp
= gen_rtx_STRICT_LOW_PART (VOIDmode
, tmp
);
5035 if (!cse_not_expected
)
5036 tmp
= gen_reg_rtx (QImode
);
5038 tmp
= gen_lowpart (QImode
, dest
);
5041 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, ret
));
5047 tmp
= gen_rtx_ZERO_EXTEND (GET_MODE (dest
), tmp
);
5048 tmp
= gen_rtx_SET (VOIDmode
, dest
, tmp
);
5049 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
5050 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, clob
));
5054 return 1; /* DONE */
5058 ix86_expand_int_movcc (operands
)
5061 enum rtx_code code
= GET_CODE (operands
[1]), compare_code
;
5062 rtx compare_seq
, compare_op
;
5064 /* When the compare code is not LTU or GEU, we can not use sbbl case.
5065 In case comparsion is done with immediate, we can convert it to LTU or
5066 GEU by altering the integer. */
5068 if ((code
== LEU
|| code
== GTU
)
5069 && GET_CODE (ix86_compare_op1
) == CONST_INT
5070 && GET_MODE (operands
[0]) != HImode
5071 && (unsigned int)INTVAL (ix86_compare_op1
) != 0xffffffff
5072 && GET_CODE (operands
[2]) == CONST_INT
5073 && GET_CODE (operands
[3]) == CONST_INT
)
5079 ix86_compare_op1
= GEN_INT (INTVAL (ix86_compare_op1
) + 1);
5083 compare_op
= ix86_expand_compare (code
);
5084 compare_seq
= gen_sequence ();
5087 compare_code
= GET_CODE (compare_op
);
5089 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
5090 HImode insns, we'd be swallowed in word prefix ops. */
5092 if (GET_MODE (operands
[0]) != HImode
5093 && GET_CODE (operands
[2]) == CONST_INT
5094 && GET_CODE (operands
[3]) == CONST_INT
)
5096 rtx out
= operands
[0];
5097 HOST_WIDE_INT ct
= INTVAL (operands
[2]);
5098 HOST_WIDE_INT cf
= INTVAL (operands
[3]);
5101 if (compare_code
== LTU
|| compare_code
== GEU
)
5104 /* Detect overlap between destination and compare sources. */
5107 /* To simplify rest of code, restrict to the GEU case. */
5108 if (compare_code
== LTU
)
5113 compare_code
= reverse_condition (compare_code
);
5114 code
= reverse_condition (code
);
5118 if (reg_overlap_mentioned_p (out
, ix86_compare_op0
)
5119 || reg_overlap_mentioned_p (out
, ix86_compare_op1
))
5120 tmp
= gen_reg_rtx (SImode
);
5122 emit_insn (compare_seq
);
5123 emit_insn (gen_x86_movsicc_0_m1 (tmp
));
5135 emit_insn (gen_addsi3 (out
, out
, GEN_INT (ct
)));
5146 emit_insn (gen_iorsi3 (out
, out
, GEN_INT (ct
)));
5148 else if (diff
== -1 && ct
)
5158 emit_insn (gen_one_cmplsi2 (tmp
, tmp
));
5160 emit_insn (gen_addsi3 (out
, out
, GEN_INT (cf
)));
5167 * andl cf - ct, dest
5172 emit_insn (gen_andsi3 (out
, out
, GEN_INT (cf
- ct
)));
5174 emit_insn (gen_addsi3 (out
, out
, GEN_INT (ct
)));
5178 emit_move_insn (out
, tmp
);
5180 return 1; /* DONE */
5187 tmp
= ct
, ct
= cf
, cf
= tmp
;
5189 compare_code
= reverse_condition (compare_code
);
5190 code
= reverse_condition (code
);
5192 if (diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
5193 || diff
== 3 || diff
== 5 || diff
== 9)
5199 * lea cf(dest*(ct-cf)),dest
5203 * This also catches the degenerate setcc-only case.
5209 out
= emit_store_flag (out
, code
, ix86_compare_op0
,
5210 ix86_compare_op1
, VOIDmode
, 0, 1);
5217 tmp
= gen_rtx_MULT (SImode
, out
, GEN_INT (diff
& ~1));
5221 tmp
= gen_rtx_PLUS (SImode
, tmp
, out
);
5227 tmp
= gen_rtx_PLUS (SImode
, tmp
, GEN_INT (cf
));
5233 emit_move_insn (out
, tmp
);
5238 clob
= gen_rtx_REG (CCmode
, FLAGS_REG
);
5239 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
5241 tmp
= gen_rtx_SET (VOIDmode
, out
, tmp
);
5242 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, clob
));
5246 emit_insn (gen_rtx_SET (VOIDmode
, out
, tmp
));
5248 if (out
!= operands
[0])
5249 emit_move_insn (operands
[0], out
);
5251 return 1; /* DONE */
5255 * General case: Jumpful:
5256 * xorl dest,dest cmpl op1, op2
5257 * cmpl op1, op2 movl ct, dest
5259 * decl dest movl cf, dest
5260 * andl (cf-ct),dest 1:
5265 * This is reasonably steep, but branch mispredict costs are
5266 * high on modern cpus, so consider failing only if optimizing
5269 * %%% Parameterize branch_cost on the tuning architecture, then
5270 * use that. The 80386 couldn't care less about mispredicts.
5273 if (!optimize_size
&& !TARGET_CMOVE
)
5279 compare_code
= reverse_condition (compare_code
);
5280 code
= reverse_condition (code
);
5283 out
= emit_store_flag (out
, code
, ix86_compare_op0
,
5284 ix86_compare_op1
, VOIDmode
, 0, 1);
5286 emit_insn (gen_addsi3 (out
, out
, constm1_rtx
));
5287 emit_insn (gen_andsi3 (out
, out
, GEN_INT (cf
-ct
)));
5289 emit_insn (gen_addsi3 (out
, out
, GEN_INT (ct
)));
5290 if (out
!= operands
[0])
5291 emit_move_insn (operands
[0], out
);
5293 return 1; /* DONE */
5299 /* Try a few things more with specific constants and a variable. */
5302 rtx var
, orig_out
, out
, tmp
;
5305 return 0; /* FAIL */
5307 /* If one of the two operands is an interesting constant, load a
5308 constant with the above and mask it in with a logical operation. */
5310 if (GET_CODE (operands
[2]) == CONST_INT
)
5313 if (INTVAL (operands
[2]) == 0)
5314 operands
[3] = constm1_rtx
, op
= and_optab
;
5315 else if (INTVAL (operands
[2]) == -1)
5316 operands
[3] = const0_rtx
, op
= ior_optab
;
5318 return 0; /* FAIL */
5320 else if (GET_CODE (operands
[3]) == CONST_INT
)
5323 if (INTVAL (operands
[3]) == 0)
5324 operands
[2] = constm1_rtx
, op
= and_optab
;
5325 else if (INTVAL (operands
[3]) == -1)
5326 operands
[2] = const0_rtx
, op
= ior_optab
;
5328 return 0; /* FAIL */
5331 return 0; /* FAIL */
5333 orig_out
= operands
[0];
5334 tmp
= gen_reg_rtx (GET_MODE (orig_out
));
5337 /* Recurse to get the constant loaded. */
5338 if (ix86_expand_int_movcc (operands
) == 0)
5339 return 0; /* FAIL */
5341 /* Mask in the interesting variable. */
5342 out
= expand_binop (GET_MODE (orig_out
), op
, var
, tmp
, orig_out
, 0,
5344 if (out
!= orig_out
)
5345 emit_move_insn (orig_out
, out
);
5347 return 1; /* DONE */
5351 * For comparison with above,
5361 if (! nonimmediate_operand (operands
[2], GET_MODE (operands
[0])))
5362 operands
[2] = force_reg (GET_MODE (operands
[0]), operands
[2]);
5363 if (! nonimmediate_operand (operands
[3], GET_MODE (operands
[0])))
5364 operands
[3] = force_reg (GET_MODE (operands
[0]), operands
[3]);
5366 emit_insn (compare_seq
);
5367 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
5368 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
5369 compare_op
, operands
[2],
5372 return 1; /* DONE */
5376 ix86_expand_fp_movcc (operands
)
5380 enum machine_mode mode
;
5383 /* The floating point conditional move instructions don't directly
5384 support conditions resulting from a signed integer comparison. */
5386 code
= GET_CODE (operands
[1]);
5393 tmp
= gen_reg_rtx (QImode
);
5394 ix86_expand_setcc (code
, tmp
);
5396 ix86_compare_op0
= tmp
;
5397 ix86_compare_op1
= const0_rtx
;
5404 mode
= SELECT_CC_MODE (code
, ix86_compare_op0
, ix86_compare_op1
);
5405 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_REG (mode
, FLAGS_REG
),
5406 gen_rtx_COMPARE (mode
,
5408 ix86_compare_op1
)));
5409 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
5410 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
5411 gen_rtx_fmt_ee (code
, VOIDmode
,
5412 gen_rtx_REG (mode
, FLAGS_REG
),
5420 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
5421 works for floating pointer parameters and nonoffsetable memories.
5422 For pushes, it returns just stack offsets; the values will be saved
5423 in the right order. Maximally three parts are generated. */
5426 ix86_split_to_parts (operand
, parts
, mode
)
5429 enum machine_mode mode
;
5431 int size
= GET_MODE_SIZE (mode
) / 4;
5433 if (size
< 2 || size
> 3)
5436 /* Optimize constant pool reference to immediates. This is used by fp moves,
5437 that force all constants to memory to allow combining. */
5439 if (GET_CODE (operand
) == MEM
5440 && GET_CODE (XEXP (operand
, 0)) == SYMBOL_REF
5441 && CONSTANT_POOL_ADDRESS_P (XEXP (operand
, 0)))
5442 operand
= get_pool_constant (XEXP (operand
, 0));
5444 if (GET_CODE (operand
) == MEM
&& !offsettable_memref_p (operand
))
5446 /* The only non-offsetable memories we handle are pushes. */
5447 if (! push_operand (operand
, VOIDmode
))
5450 PUT_MODE (operand
, SImode
);
5451 parts
[0] = parts
[1] = parts
[2] = operand
;
5456 split_di (&operand
, 1, &parts
[0], &parts
[1]);
5459 if (REG_P (operand
))
5461 if (!reload_completed
)
5463 parts
[0] = gen_rtx_REG (SImode
, REGNO (operand
) + 0);
5464 parts
[1] = gen_rtx_REG (SImode
, REGNO (operand
) + 1);
5466 parts
[2] = gen_rtx_REG (SImode
, REGNO (operand
) + 2);
5468 else if (offsettable_memref_p (operand
))
5470 PUT_MODE (operand
, SImode
);
5472 parts
[1] = adj_offsettable_operand (operand
, 4);
5474 parts
[2] = adj_offsettable_operand (operand
, 8);
5476 else if (GET_CODE (operand
) == CONST_DOUBLE
)
5481 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
5485 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r
, l
);
5486 parts
[2] = GEN_INT (l
[2]);
5489 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5494 parts
[1] = GEN_INT (l
[1]);
5495 parts
[0] = GEN_INT (l
[0]);
5505 /* Emit insns to perform a move or push of DI, DF, and XF values.
5506 Return false when normal moves are needed; true when all required
5507 insns have been emitted. Operands 2-4 contain the input values
5508 int the correct order; operands 5-7 contain the output values. */
5511 ix86_split_long_move (operands1
)
5516 int size
= GET_MODE_SIZE (GET_MODE (operands1
[0])) / 4;
5520 /* Make our own copy to avoid clobbering the operands. */
5521 operands
[0] = copy_rtx (operands1
[0]);
5522 operands
[1] = copy_rtx (operands1
[1]);
5524 if (size
< 2 || size
> 3)
5527 /* The only non-offsettable memory we handle is push. */
5528 if (push_operand (operands
[0], VOIDmode
))
5530 else if (GET_CODE (operands
[0]) == MEM
5531 && ! offsettable_memref_p (operands
[0]))
5534 ix86_split_to_parts (operands
[0], part
[0], GET_MODE (operands1
[0]));
5535 ix86_split_to_parts (operands
[1], part
[1], GET_MODE (operands1
[0]));
5537 /* When emitting push, take care for source operands on the stack. */
5538 if (push
&& GET_CODE (operands
[1]) == MEM
5539 && reg_overlap_mentioned_p (stack_pointer_rtx
, operands
[1]))
5542 part
[1][1] = part
[1][2];
5543 part
[1][0] = part
[1][1];
5546 /* We need to do copy in the right order in case an address register
5547 of the source overlaps the destination. */
5548 if (REG_P (part
[0][0]) && GET_CODE (part
[1][0]) == MEM
)
5550 if (reg_overlap_mentioned_p (part
[0][0], XEXP (part
[1][0], 0)))
5552 if (reg_overlap_mentioned_p (part
[0][1], XEXP (part
[1][0], 0)))
5555 && reg_overlap_mentioned_p (part
[0][2], XEXP (part
[1][0], 0)))
5558 /* Collision in the middle part can be handled by reordering. */
5559 if (collisions
== 1 && size
== 3
5560 && reg_overlap_mentioned_p (part
[0][1], XEXP (part
[1][0], 0)))
5563 tmp
= part
[0][1]; part
[0][1] = part
[0][2]; part
[0][2] = tmp
;
5564 tmp
= part
[1][1]; part
[1][1] = part
[1][2]; part
[1][2] = tmp
;
5567 /* If there are more collisions, we can't handle it by reordering.
5568 Do an lea to the last part and use only one colliding move. */
5569 else if (collisions
> 1)
5572 emit_insn (gen_rtx_SET (VOIDmode
, part
[0][size
- 1],
5573 XEXP (part
[1][0], 0)));
5574 part
[1][0] = change_address (part
[1][0], SImode
, part
[0][size
- 1]);
5575 part
[1][1] = adj_offsettable_operand (part
[1][0], 4);
5577 part
[1][2] = adj_offsettable_operand (part
[1][0], 8);
5584 emit_insn (gen_push (part
[1][2]));
5585 emit_insn (gen_push (part
[1][1]));
5586 emit_insn (gen_push (part
[1][0]));
5590 /* Choose correct order to not overwrite the source before it is copied. */
5591 if ((REG_P (part
[0][0])
5592 && REG_P (part
[1][1])
5593 && (REGNO (part
[0][0]) == REGNO (part
[1][1])
5595 && REGNO (part
[0][0]) == REGNO (part
[1][2]))))
5597 && reg_overlap_mentioned_p (part
[0][0], XEXP (part
[1][0], 0))))
5601 operands1
[2] = part
[0][2];
5602 operands1
[3] = part
[0][1];
5603 operands1
[4] = part
[0][0];
5604 operands1
[5] = part
[1][2];
5605 operands1
[6] = part
[1][1];
5606 operands1
[7] = part
[1][0];
5610 operands1
[2] = part
[0][1];
5611 operands1
[3] = part
[0][0];
5612 operands1
[5] = part
[1][1];
5613 operands1
[6] = part
[1][0];
5620 operands1
[2] = part
[0][0];
5621 operands1
[3] = part
[0][1];
5622 operands1
[4] = part
[0][2];
5623 operands1
[5] = part
[1][0];
5624 operands1
[6] = part
[1][1];
5625 operands1
[7] = part
[1][2];
5629 operands1
[2] = part
[0][0];
5630 operands1
[3] = part
[0][1];
5631 operands1
[5] = part
[1][0];
5632 operands1
[6] = part
[1][1];
5640 ix86_split_ashldi (operands
, scratch
)
5641 rtx
*operands
, scratch
;
5643 rtx low
[2], high
[2];
5646 if (GET_CODE (operands
[2]) == CONST_INT
)
5648 split_di (operands
, 2, low
, high
);
5649 count
= INTVAL (operands
[2]) & 63;
5653 emit_move_insn (high
[0], low
[1]);
5654 emit_move_insn (low
[0], const0_rtx
);
5657 emit_insn (gen_ashlsi3 (high
[0], high
[0], GEN_INT (count
- 32)));
5661 if (!rtx_equal_p (operands
[0], operands
[1]))
5662 emit_move_insn (operands
[0], operands
[1]);
5663 emit_insn (gen_x86_shld_1 (high
[0], low
[0], GEN_INT (count
)));
5664 emit_insn (gen_ashlsi3 (low
[0], low
[0], GEN_INT (count
)));
5669 if (!rtx_equal_p (operands
[0], operands
[1]))
5670 emit_move_insn (operands
[0], operands
[1]);
5672 split_di (operands
, 1, low
, high
);
5674 emit_insn (gen_x86_shld_1 (high
[0], low
[0], operands
[2]));
5675 emit_insn (gen_ashlsi3 (low
[0], low
[0], operands
[2]));
5677 if (TARGET_CMOVE
&& (! no_new_pseudos
|| scratch
))
5679 if (! no_new_pseudos
)
5680 scratch
= force_reg (SImode
, const0_rtx
);
5682 emit_move_insn (scratch
, const0_rtx
);
5684 emit_insn (gen_x86_shift_adj_1 (high
[0], low
[0], operands
[2],
5688 emit_insn (gen_x86_shift_adj_2 (high
[0], low
[0], operands
[2]));
5693 ix86_split_ashrdi (operands
, scratch
)
5694 rtx
*operands
, scratch
;
5696 rtx low
[2], high
[2];
5699 if (GET_CODE (operands
[2]) == CONST_INT
)
5701 split_di (operands
, 2, low
, high
);
5702 count
= INTVAL (operands
[2]) & 63;
5706 emit_move_insn (low
[0], high
[1]);
5708 if (! reload_completed
)
5709 emit_insn (gen_ashrsi3 (high
[0], low
[0], GEN_INT (31)));
5712 emit_move_insn (high
[0], low
[0]);
5713 emit_insn (gen_ashrsi3 (high
[0], high
[0], GEN_INT (31)));
5717 emit_insn (gen_ashrsi3 (low
[0], low
[0], GEN_INT (count
- 32)));
5721 if (!rtx_equal_p (operands
[0], operands
[1]))
5722 emit_move_insn (operands
[0], operands
[1]);
5723 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], GEN_INT (count
)));
5724 emit_insn (gen_ashrsi3 (high
[0], high
[0], GEN_INT (count
)));
5729 if (!rtx_equal_p (operands
[0], operands
[1]))
5730 emit_move_insn (operands
[0], operands
[1]);
5732 split_di (operands
, 1, low
, high
);
5734 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], operands
[2]));
5735 emit_insn (gen_ashrsi3 (high
[0], high
[0], operands
[2]));
5737 if (TARGET_CMOVE
&& (! no_new_pseudos
|| scratch
))
5739 if (! no_new_pseudos
)
5740 scratch
= gen_reg_rtx (SImode
);
5741 emit_move_insn (scratch
, high
[0]);
5742 emit_insn (gen_ashrsi3 (scratch
, scratch
, GEN_INT (31)));
5743 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
5747 emit_insn (gen_x86_shift_adj_3 (low
[0], high
[0], operands
[2]));
5752 ix86_split_lshrdi (operands
, scratch
)
5753 rtx
*operands
, scratch
;
5755 rtx low
[2], high
[2];
5758 if (GET_CODE (operands
[2]) == CONST_INT
)
5760 split_di (operands
, 2, low
, high
);
5761 count
= INTVAL (operands
[2]) & 63;
5765 emit_move_insn (low
[0], high
[1]);
5766 emit_move_insn (high
[0], const0_rtx
);
5769 emit_insn (gen_lshrsi3 (low
[0], low
[0], GEN_INT (count
- 32)));
5773 if (!rtx_equal_p (operands
[0], operands
[1]))
5774 emit_move_insn (operands
[0], operands
[1]);
5775 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], GEN_INT (count
)));
5776 emit_insn (gen_lshrsi3 (high
[0], high
[0], GEN_INT (count
)));
5781 if (!rtx_equal_p (operands
[0], operands
[1]))
5782 emit_move_insn (operands
[0], operands
[1]);
5784 split_di (operands
, 1, low
, high
);
5786 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], operands
[2]));
5787 emit_insn (gen_lshrsi3 (high
[0], high
[0], operands
[2]));
5789 /* Heh. By reversing the arguments, we can reuse this pattern. */
5790 if (TARGET_CMOVE
&& (! no_new_pseudos
|| scratch
))
5792 if (! no_new_pseudos
)
5793 scratch
= force_reg (SImode
, const0_rtx
);
5795 emit_move_insn (scratch
, const0_rtx
);
5797 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
5801 emit_insn (gen_x86_shift_adj_2 (low
[0], high
[0], operands
[2]));
5805 /* Expand the appropriate insns for doing strlen if not just doing
5808 out = result, initialized with the start address
5809 align_rtx = alignment of the address.
5810 scratch = scratch register, initialized with the startaddress when
5811 not aligned, otherwise undefined
5813 This is just the body. It needs the initialisations mentioned above and
5814 some address computing at the end. These things are done in i386.md. */
5817 ix86_expand_strlensi_unroll_1 (out
, align_rtx
, scratch
)
5818 rtx out
, align_rtx
, scratch
;
5822 rtx align_2_label
= NULL_RTX
;
5823 rtx align_3_label
= NULL_RTX
;
5824 rtx align_4_label
= gen_label_rtx ();
5825 rtx end_0_label
= gen_label_rtx ();
5827 rtx no_flags
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
5828 rtx z_flags
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
5829 rtx tmpreg
= gen_reg_rtx (SImode
);
5832 if (GET_CODE (align_rtx
) == CONST_INT
)
5833 align
= INTVAL (align_rtx
);
5835 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
5837 /* Is there a known alignment and is it less than 4? */
5840 /* Is there a known alignment and is it not 2? */
5843 align_3_label
= gen_label_rtx (); /* Label when aligned to 3-byte */
5844 align_2_label
= gen_label_rtx (); /* Label when aligned to 2-byte */
5846 /* Leave just the 3 lower bits. */
5847 align_rtx
= expand_binop (SImode
, and_optab
, scratch
, GEN_INT (3),
5848 NULL_RTX
, 0, OPTAB_WIDEN
);
5850 emit_insn (gen_cmpsi_ccz_1 (align_rtx
, const0_rtx
));
5852 tmp
= gen_rtx_EQ (VOIDmode
, z_flags
, const0_rtx
);
5853 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5854 gen_rtx_LABEL_REF (VOIDmode
,
5857 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5859 emit_insn (gen_cmpsi_ccno_1 (align_rtx
, GEN_INT (2)));
5861 tmp
= gen_rtx_EQ (VOIDmode
, no_flags
, const0_rtx
);
5862 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5863 gen_rtx_LABEL_REF (VOIDmode
,
5866 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5868 tmp
= gen_rtx_GTU (VOIDmode
, no_flags
, const0_rtx
);
5869 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5870 gen_rtx_LABEL_REF (VOIDmode
,
5873 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5877 /* Since the alignment is 2, we have to check 2 or 0 bytes;
5878 check if is aligned to 4 - byte. */
5880 align_rtx
= expand_binop (SImode
, and_optab
, scratch
, GEN_INT (2),
5881 NULL_RTX
, 0, OPTAB_WIDEN
);
5883 emit_insn (gen_cmpsi_ccz_1 (align_rtx
, const0_rtx
));
5885 tmp
= gen_rtx_EQ (VOIDmode
, z_flags
, const0_rtx
);
5886 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5887 gen_rtx_LABEL_REF (VOIDmode
,
5890 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5893 mem
= gen_rtx_MEM (QImode
, out
);
5895 /* Now compare the bytes. */
5897 /* Compare the first n unaligned byte on a byte per byte basis. */
5898 emit_insn (gen_cmpqi_ccz_1 (mem
, const0_rtx
));
5900 tmp
= gen_rtx_EQ (VOIDmode
, z_flags
, const0_rtx
);
5901 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5902 gen_rtx_LABEL_REF (VOIDmode
, end_0_label
),
5904 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5906 /* Increment the address. */
5907 emit_insn (gen_addsi3 (out
, out
, const1_rtx
));
5909 /* Not needed with an alignment of 2 */
5912 emit_label (align_2_label
);
5914 emit_insn (gen_cmpqi_ccz_1 (mem
, const0_rtx
));
5916 tmp
= gen_rtx_EQ (VOIDmode
, z_flags
, const0_rtx
);
5917 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5918 gen_rtx_LABEL_REF (VOIDmode
,
5921 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5923 emit_insn (gen_addsi3 (out
, out
, const1_rtx
));
5925 emit_label (align_3_label
);
5928 emit_insn (gen_cmpqi_ccz_1 (mem
, const0_rtx
));
5930 tmp
= gen_rtx_EQ (VOIDmode
, z_flags
, const0_rtx
);
5931 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5932 gen_rtx_LABEL_REF (VOIDmode
, end_0_label
),
5934 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5936 emit_insn (gen_addsi3 (out
, out
, const1_rtx
));
5939 /* Generate loop to check 4 bytes at a time. It is not a good idea to
5940 align this loop. It gives only huge programs, but does not help to
5942 emit_label (align_4_label
);
5944 mem
= gen_rtx_MEM (SImode
, out
);
5945 emit_move_insn (scratch
, mem
);
5946 emit_insn (gen_addsi3 (out
, out
, GEN_INT (4)));
5948 /* This formula yields a nonzero result iff one of the bytes is zero.
5949 This saves three branches inside loop and many cycles. */
5951 emit_insn (gen_addsi3 (tmpreg
, scratch
, GEN_INT (-0x01010101)));
5952 emit_insn (gen_one_cmplsi2 (scratch
, scratch
));
5953 emit_insn (gen_andsi3 (tmpreg
, tmpreg
, scratch
));
5954 emit_insn (gen_andsi3 (tmpreg
, tmpreg
, GEN_INT (0x80808080)));
5955 emit_cmp_and_jump_insns (tmpreg
, const0_rtx
, EQ
, 0, SImode
, 1, 0, align_4_label
);
5959 rtx reg
= gen_reg_rtx (SImode
);
5960 emit_move_insn (reg
, tmpreg
);
5961 emit_insn (gen_lshrsi3 (reg
, reg
, GEN_INT (16)));
5963 /* If zero is not in the first two bytes, move two bytes forward. */
5964 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
5965 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
5966 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
5967 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
,
5968 gen_rtx_IF_THEN_ELSE (SImode
, tmp
,
5971 /* Emit lea manually to avoid clobbering of flags. */
5972 emit_insn (gen_rtx_SET (SImode
, reg
,
5973 gen_rtx_PLUS (SImode
, out
, GEN_INT (2))));
5975 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
5976 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
5977 emit_insn (gen_rtx_SET (VOIDmode
, out
,
5978 gen_rtx_IF_THEN_ELSE (SImode
, tmp
,
5985 rtx end_2_label
= gen_label_rtx ();
5986 /* Is zero in the first two bytes? */
5988 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
5989 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
5990 tmp
= gen_rtx_NE (VOIDmode
, tmp
, const0_rtx
);
5991 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
5992 gen_rtx_LABEL_REF (VOIDmode
, end_2_label
),
5994 tmp
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
5995 JUMP_LABEL (tmp
) = end_2_label
;
5997 /* Not in the first two. Move two bytes forward. */
5998 emit_insn (gen_lshrsi3 (tmpreg
, tmpreg
, GEN_INT (16)));
5999 emit_insn (gen_addsi3 (out
, out
, GEN_INT (2)));
6001 emit_label (end_2_label
);
6005 /* Avoid branch in fixing the byte. */
6006 tmpreg
= gen_lowpart (QImode
, tmpreg
);
6007 emit_insn (gen_addqi3_cc (tmpreg
, tmpreg
, tmpreg
));
6008 emit_insn (gen_subsi3_carry (out
, out
, GEN_INT (3)));
6010 emit_label (end_0_label
);
6013 /* Clear stack slot assignments remembered from previous functions.
6014 This is called from INIT_EXPANDERS once before RTL is emitted for each
6018 ix86_init_machine_status (p
)
6021 enum machine_mode mode
;
6024 = (struct machine_function
*) xmalloc (sizeof (struct machine_function
));
6026 for (mode
= VOIDmode
; (int) mode
< (int) MAX_MACHINE_MODE
;
6027 mode
= (enum machine_mode
) ((int) mode
+ 1))
6028 for (n
= 0; n
< MAX_386_STACK_LOCALS
; n
++)
6029 ix86_stack_locals
[(int) mode
][n
] = NULL_RTX
;
6032 /* Mark machine specific bits of P for GC. */
6034 ix86_mark_machine_status (p
)
6037 enum machine_mode mode
;
6040 for (mode
= VOIDmode
; (int) mode
< (int) MAX_MACHINE_MODE
;
6041 mode
= (enum machine_mode
) ((int) mode
+ 1))
6042 for (n
= 0; n
< MAX_386_STACK_LOCALS
; n
++)
6043 ggc_mark_rtx (p
->machine
->stack_locals
[(int) mode
][n
]);
6046 /* Return a MEM corresponding to a stack slot with mode MODE.
6047 Allocate a new slot if necessary.
6049 The RTL for a function can have several slots available: N is
6050 which slot to use. */
6053 assign_386_stack_local (mode
, n
)
6054 enum machine_mode mode
;
6057 if (n
< 0 || n
>= MAX_386_STACK_LOCALS
)
6060 if (ix86_stack_locals
[(int) mode
][n
] == NULL_RTX
)
6061 ix86_stack_locals
[(int) mode
][n
]
6062 = assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
6064 return ix86_stack_locals
[(int) mode
][n
];
6067 /* Calculate the length of the memory address in the instruction
6068 encoding. Does not include the one-byte modrm, opcode, or prefix. */
6071 memory_address_length (addr
)
6074 struct ix86_address parts
;
6075 rtx base
, index
, disp
;
6078 if (GET_CODE (addr
) == PRE_DEC
6079 || GET_CODE (addr
) == POST_INC
)
6082 if (! ix86_decompose_address (addr
, &parts
))
6086 index
= parts
.index
;
6090 /* Register Indirect. */
6091 if (base
&& !index
&& !disp
)
6093 /* Special cases: ebp and esp need the two-byte modrm form. */
6094 if (addr
== stack_pointer_rtx
6095 || addr
== arg_pointer_rtx
6096 || addr
== frame_pointer_rtx
6097 || addr
== hard_frame_pointer_rtx
)
6101 /* Direct Addressing. */
6102 else if (disp
&& !base
&& !index
)
6107 /* Find the length of the displacement constant. */
6110 if (GET_CODE (disp
) == CONST_INT
6111 && CONST_OK_FOR_LETTER_P (INTVAL (disp
), 'K'))
6117 /* An index requires the two-byte modrm form. */
6126 ix86_attr_length_default (insn
)
6129 enum attr_type type
;
6132 type
= get_attr_type (insn
);
6133 extract_insn (insn
);
6164 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
6165 if (CONSTANT_P (recog_data
.operand
[i
]))
6167 if (GET_CODE (recog_data
.operand
[i
]) == CONST_INT
6168 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data
.operand
[i
]), 'K'))
6171 len
+= GET_MODE_SIZE (GET_MODE (recog_data
.operand
[0]));
6176 if (CONSTANT_P (recog_data
.operand
[1]))
6177 len
+= GET_MODE_SIZE (GET_MODE (recog_data
.operand
[0]));
6181 if (constant_call_address_operand (recog_data
.operand
[0],
6182 GET_MODE (recog_data
.operand
[0])))
6187 if (constant_call_address_operand (recog_data
.operand
[1],
6188 GET_MODE (recog_data
.operand
[1])))
6194 /* Irritatingly, single_set doesn't work with REG_UNUSED present,
6195 as we'll get from running life_analysis during reg-stack when
6196 not optimizing. Not that it matters anyway, now that
6197 pro_epilogue_adjust_stack uses lea, and is by design not
6199 rtx set
= PATTERN (insn
);
6200 if (GET_CODE (set
) == SET
)
6202 else if (GET_CODE (set
) == PARALLEL
6203 && GET_CODE (XVECEXP (set
, 0, 0)) == SET
)
6204 set
= XVECEXP (set
, 0, 0);
6208 len
+= memory_address_length (SET_SRC (set
));
6217 if (STACK_TOP_P (recog_data
.operand
[0]))
6218 return 2 + (REGNO (recog_data
.operand
[1]) != FIRST_STACK_REG
+ 1);
6220 return 2 + (REGNO (recog_data
.operand
[0]) != FIRST_STACK_REG
+ 1);
6226 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
6227 if (GET_CODE (recog_data
.operand
[i
]) == MEM
)
6229 len
+= memory_address_length (XEXP (recog_data
.operand
[i
], 0));
6234 len
+= get_attr_length_opcode (insn
);
6235 len
+= get_attr_length_prefix (insn
);
6240 /* Return the maximum number of instructions a cpu can issue. */
6247 case PROCESSOR_PENTIUM
:
6251 case PROCESSOR_PENTIUMPRO
:
6259 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
6260 by DEP_INSN and nothing set by DEP_INSN. */
6263 ix86_flags_dependant (insn
, dep_insn
, insn_type
)
6265 enum attr_type insn_type
;
6269 /* Simplify the test for uninteresting insns. */
6270 if (insn_type
!= TYPE_SETCC
6271 && insn_type
!= TYPE_ICMOV
6272 && insn_type
!= TYPE_FCMOV
6273 && insn_type
!= TYPE_IBR
)
6276 if ((set
= single_set (dep_insn
)) != 0)
6278 set
= SET_DEST (set
);
6281 else if (GET_CODE (PATTERN (dep_insn
)) == PARALLEL
6282 && XVECLEN (PATTERN (dep_insn
), 0) == 2
6283 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 0)) == SET
6284 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 1)) == SET
)
6286 set
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
6287 set2
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
6292 if (GET_CODE (set
) != REG
|| REGNO (set
) != FLAGS_REG
)
6295 /* This test is true if the dependant insn reads the flags but
6296 not any other potentially set register. */
6297 if (!reg_overlap_mentioned_p (set
, PATTERN (insn
)))
6300 if (set2
&& reg_overlap_mentioned_p (set2
, PATTERN (insn
)))
6306 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
6307 address with operands set by DEP_INSN. */
6310 ix86_agi_dependant (insn
, dep_insn
, insn_type
)
6312 enum attr_type insn_type
;
6316 if (insn_type
== TYPE_LEA
)
6318 addr
= PATTERN (insn
);
6319 if (GET_CODE (addr
) == SET
)
6321 else if (GET_CODE (addr
) == PARALLEL
6322 && GET_CODE (XVECEXP (addr
, 0, 0)) == SET
)
6323 addr
= XVECEXP (addr
, 0, 0);
6326 addr
= SET_SRC (addr
);
6331 extract_insn (insn
);
6332 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
6333 if (GET_CODE (recog_data
.operand
[i
]) == MEM
)
6335 addr
= XEXP (recog_data
.operand
[i
], 0);
6342 return modified_in_p (addr
, dep_insn
);
6346 ix86_adjust_cost (insn
, link
, dep_insn
, cost
)
6347 rtx insn
, link
, dep_insn
;
6350 enum attr_type insn_type
, dep_insn_type
;
6351 enum attr_memory memory
;
6353 int dep_insn_code_number
;
6355 /* Anti and output depenancies have zero cost on all CPUs. */
6356 if (REG_NOTE_KIND (link
) != 0)
6359 dep_insn_code_number
= recog_memoized (dep_insn
);
6361 /* If we can't recognize the insns, we can't really do anything. */
6362 if (dep_insn_code_number
< 0 || recog_memoized (insn
) < 0)
6365 insn_type
= get_attr_type (insn
);
6366 dep_insn_type
= get_attr_type (dep_insn
);
6368 /* Prologue and epilogue allocators can have a false dependency on ebp.
6369 This results in one cycle extra stall on Pentium prologue scheduling,
6370 so handle this important case manually. */
6371 if (dep_insn_code_number
== CODE_FOR_pro_epilogue_adjust_stack
6372 && dep_insn_type
== TYPE_ALU
6373 && !reg_mentioned_p (stack_pointer_rtx
, insn
))
6378 case PROCESSOR_PENTIUM
:
6379 /* Address Generation Interlock adds a cycle of latency. */
6380 if (ix86_agi_dependant (insn
, dep_insn
, insn_type
))
6383 /* ??? Compares pair with jump/setcc. */
6384 if (ix86_flags_dependant (insn
, dep_insn
, insn_type
))
6387 /* Floating point stores require value to be ready one cycle ealier. */
6388 if (insn_type
== TYPE_FMOV
6389 && get_attr_memory (insn
) == MEMORY_STORE
6390 && !ix86_agi_dependant (insn
, dep_insn
, insn_type
))
6394 case PROCESSOR_PENTIUMPRO
:
6395 /* Since we can't represent delayed latencies of load+operation,
6396 increase the cost here for non-imov insns. */
6397 if (dep_insn_type
!= TYPE_IMOV
6398 && dep_insn_type
!= TYPE_FMOV
6399 && ((memory
= get_attr_memory (dep_insn
) == MEMORY_LOAD
)
6400 || memory
== MEMORY_BOTH
))
6403 /* INT->FP conversion is expensive. */
6404 if (get_attr_fp_int_src (dep_insn
))
6407 /* There is one cycle extra latency between an FP op and a store. */
6408 if (insn_type
== TYPE_FMOV
6409 && (set
= single_set (dep_insn
)) != NULL_RTX
6410 && (set2
= single_set (insn
)) != NULL_RTX
6411 && rtx_equal_p (SET_DEST (set
), SET_SRC (set2
))
6412 && GET_CODE (SET_DEST (set2
)) == MEM
)
6417 /* The esp dependency is resolved before the instruction is really
6419 if ((insn_type
== TYPE_PUSH
|| insn_type
== TYPE_POP
)
6420 && (dep_insn_type
== TYPE_PUSH
|| dep_insn_type
== TYPE_POP
))
6423 /* Since we can't represent delayed latencies of load+operation,
6424 increase the cost here for non-imov insns. */
6425 if ((memory
= get_attr_memory (dep_insn
) == MEMORY_LOAD
)
6426 || memory
== MEMORY_BOTH
)
6427 cost
+= (dep_insn_type
!= TYPE_IMOV
) ? 2 : 1;
6429 /* INT->FP conversion is expensive. */
6430 if (get_attr_fp_int_src (dep_insn
))
6434 case PROCESSOR_ATHLON
:
6435 if ((memory
= get_attr_memory (dep_insn
)) == MEMORY_LOAD
6436 || memory
== MEMORY_BOTH
)
6438 if (dep_insn_type
== TYPE_IMOV
|| dep_insn_type
== TYPE_FMOV
)
6453 struct ppro_sched_data
6456 int issued_this_cycle
;
6461 ix86_safe_length (insn
)
6464 if (recog_memoized (insn
) >= 0)
6465 return get_attr_length(insn
);
6471 ix86_safe_length_prefix (insn
)
6474 if (recog_memoized (insn
) >= 0)
6475 return get_attr_length(insn
);
6480 static enum attr_memory
6481 ix86_safe_memory (insn
)
6484 if (recog_memoized (insn
) >= 0)
6485 return get_attr_memory(insn
);
6487 return MEMORY_UNKNOWN
;
6490 static enum attr_pent_pair
6491 ix86_safe_pent_pair (insn
)
6494 if (recog_memoized (insn
) >= 0)
6495 return get_attr_pent_pair(insn
);
6497 return PENT_PAIR_NP
;
6500 static enum attr_ppro_uops
6501 ix86_safe_ppro_uops (insn
)
6504 if (recog_memoized (insn
) >= 0)
6505 return get_attr_ppro_uops (insn
);
6507 return PPRO_UOPS_MANY
;
6511 ix86_dump_ppro_packet (dump
)
6514 if (ix86_sched_data
.ppro
.decode
[0])
6516 fprintf (dump
, "PPRO packet: %d",
6517 INSN_UID (ix86_sched_data
.ppro
.decode
[0]));
6518 if (ix86_sched_data
.ppro
.decode
[1])
6519 fprintf (dump
, " %d", INSN_UID (ix86_sched_data
.ppro
.decode
[1]));
6520 if (ix86_sched_data
.ppro
.decode
[2])
6521 fprintf (dump
, " %d", INSN_UID (ix86_sched_data
.ppro
.decode
[2]));
6526 /* We're beginning a new block. Initialize data structures as necessary. */
6529 ix86_sched_init (dump
, sched_verbose
)
6530 FILE *dump ATTRIBUTE_UNUSED
;
6531 int sched_verbose ATTRIBUTE_UNUSED
;
6533 memset (&ix86_sched_data
, 0, sizeof (ix86_sched_data
));
6536 /* Shift INSN to SLOT, and shift everything else down. */
6539 ix86_reorder_insn (insnp
, slot
)
6546 insnp
[0] = insnp
[1];
6547 while (++insnp
!= slot
);
6552 /* Find an instruction with given pairability and minimal amount of cycles
6553 lost by the fact that the CPU waits for both pipelines to finish before
6554 reading next instructions. Also take care that both instructions together
6555 can not exceed 7 bytes. */
6558 ix86_pent_find_pair (e_ready
, ready
, type
, first
)
6561 enum attr_pent_pair type
;
6564 int mincycles
, cycles
;
6565 enum attr_pent_pair tmp
;
6566 enum attr_memory memory
;
6567 rtx
*insnp
, *bestinsnp
= NULL
;
6569 if (ix86_safe_length (first
) > 7 + ix86_safe_length_prefix (first
))
6572 memory
= ix86_safe_memory (first
);
6573 cycles
= result_ready_cost (first
);
6574 mincycles
= INT_MAX
;
6576 for (insnp
= e_ready
; insnp
>= ready
&& mincycles
; --insnp
)
6577 if ((tmp
= ix86_safe_pent_pair (*insnp
)) == type
6578 && ix86_safe_length (*insnp
) <= 7 + ix86_safe_length_prefix (*insnp
))
6580 enum attr_memory second_memory
;
6581 int secondcycles
, currentcycles
;
6583 second_memory
= ix86_safe_memory (*insnp
);
6584 secondcycles
= result_ready_cost (*insnp
);
6585 currentcycles
= abs (cycles
- secondcycles
);
6587 if (secondcycles
>= 1 && cycles
>= 1)
6589 /* Two read/modify/write instructions together takes two
6591 if (memory
== MEMORY_BOTH
&& second_memory
== MEMORY_BOTH
)
6594 /* Read modify/write instruction followed by read/modify
6595 takes one cycle longer. */
6596 if (memory
== MEMORY_BOTH
&& second_memory
== MEMORY_LOAD
6597 && tmp
!= PENT_PAIR_UV
6598 && ix86_safe_pent_pair (first
) != PENT_PAIR_UV
)
6601 if (currentcycles
< mincycles
)
6602 bestinsnp
= insnp
, mincycles
= currentcycles
;
6608 /* Subroutines of ix86_sched_reorder. */
6611 ix86_sched_reorder_pentium (ready
, e_ready
)
6615 enum attr_pent_pair pair1
, pair2
;
6618 /* This wouldn't be necessary if Haifa knew that static insn ordering
6619 is important to which pipe an insn is issued to. So we have to make
6620 some minor rearrangements. */
6622 pair1
= ix86_safe_pent_pair (*e_ready
);
6624 /* If the first insn is non-pairable, let it be. */
6625 if (pair1
== PENT_PAIR_NP
)
6628 pair2
= PENT_PAIR_NP
;
6631 /* If the first insn is UV or PV pairable, search for a PU
6633 if (pair1
== PENT_PAIR_UV
|| pair1
== PENT_PAIR_PV
)
6635 insnp
= ix86_pent_find_pair (e_ready
-1, ready
,
6636 PENT_PAIR_PU
, *e_ready
);
6638 pair2
= PENT_PAIR_PU
;
6641 /* If the first insn is PU or UV pairable, search for a PV
6643 if (pair2
== PENT_PAIR_NP
6644 && (pair1
== PENT_PAIR_PU
|| pair1
== PENT_PAIR_UV
))
6646 insnp
= ix86_pent_find_pair (e_ready
-1, ready
,
6647 PENT_PAIR_PV
, *e_ready
);
6649 pair2
= PENT_PAIR_PV
;
6652 /* If the first insn is pairable, search for a UV
6654 if (pair2
== PENT_PAIR_NP
)
6656 insnp
= ix86_pent_find_pair (e_ready
-1, ready
,
6657 PENT_PAIR_UV
, *e_ready
);
6659 pair2
= PENT_PAIR_UV
;
6662 if (pair2
== PENT_PAIR_NP
)
6665 /* Found something! Decide if we need to swap the order. */
6666 if (pair1
== PENT_PAIR_PV
|| pair2
== PENT_PAIR_PU
6667 || (pair1
== PENT_PAIR_UV
&& pair2
== PENT_PAIR_UV
6668 && ix86_safe_memory (*e_ready
) == MEMORY_BOTH
6669 && ix86_safe_memory (*insnp
) == MEMORY_LOAD
))
6670 ix86_reorder_insn (insnp
, e_ready
);
6672 ix86_reorder_insn (insnp
, e_ready
- 1);
6676 ix86_sched_reorder_ppro (ready
, e_ready
)
6681 enum attr_ppro_uops cur_uops
;
6682 int issued_this_cycle
;
6686 /* At this point .ppro.decode contains the state of the three
6687 decoders from last "cycle". That is, those insns that were
6688 actually independent. But here we're scheduling for the
6689 decoder, and we may find things that are decodable in the
6692 memcpy (decode
, ix86_sched_data
.ppro
.decode
, sizeof(decode
));
6693 issued_this_cycle
= 0;
6696 cur_uops
= ix86_safe_ppro_uops (*insnp
);
6698 /* If the decoders are empty, and we've a complex insn at the
6699 head of the priority queue, let it issue without complaint. */
6700 if (decode
[0] == NULL
)
6702 if (cur_uops
== PPRO_UOPS_MANY
)
6708 /* Otherwise, search for a 2-4 uop unsn to issue. */
6709 while (cur_uops
!= PPRO_UOPS_FEW
)
6713 cur_uops
= ix86_safe_ppro_uops (*--insnp
);
6716 /* If so, move it to the head of the line. */
6717 if (cur_uops
== PPRO_UOPS_FEW
)
6718 ix86_reorder_insn (insnp
, e_ready
);
6720 /* Issue the head of the queue. */
6721 issued_this_cycle
= 1;
6722 decode
[0] = *e_ready
--;
6725 /* Look for simple insns to fill in the other two slots. */
6726 for (i
= 1; i
< 3; ++i
)
6727 if (decode
[i
] == NULL
)
6729 if (ready
>= e_ready
)
6733 cur_uops
= ix86_safe_ppro_uops (*insnp
);
6734 while (cur_uops
!= PPRO_UOPS_ONE
)
6738 cur_uops
= ix86_safe_ppro_uops (*--insnp
);
6741 /* Found one. Move it to the head of the queue and issue it. */
6742 if (cur_uops
== PPRO_UOPS_ONE
)
6744 ix86_reorder_insn (insnp
, e_ready
);
6745 decode
[i
] = *e_ready
--;
6746 issued_this_cycle
++;
6750 /* ??? Didn't find one. Ideally, here we would do a lazy split
6751 of 2-uop insns, issue one and queue the other. */
6755 if (issued_this_cycle
== 0)
6756 issued_this_cycle
= 1;
6757 ix86_sched_data
.ppro
.issued_this_cycle
= issued_this_cycle
;
6761 /* We are about to being issuing insns for this clock cycle.
6762 Override the default sort algorithm to better slot instructions. */
6764 ix86_sched_reorder (dump
, sched_verbose
, ready
, n_ready
, clock_var
)
6765 FILE *dump ATTRIBUTE_UNUSED
;
6766 int sched_verbose ATTRIBUTE_UNUSED
;
6769 int clock_var ATTRIBUTE_UNUSED
;
6771 rtx
*e_ready
= ready
+ n_ready
- 1;
6781 case PROCESSOR_PENTIUM
:
6782 ix86_sched_reorder_pentium (ready
, e_ready
);
6785 case PROCESSOR_PENTIUMPRO
:
6786 ix86_sched_reorder_ppro (ready
, e_ready
);
6791 return ix86_issue_rate ();
6794 /* We are about to issue INSN. Return the number of insns left on the
6795 ready queue that can be issued this cycle. */
6798 ix86_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
)
6808 return can_issue_more
- 1;
6810 case PROCESSOR_PENTIUMPRO
:
6812 enum attr_ppro_uops uops
= ix86_safe_ppro_uops (insn
);
6814 if (uops
== PPRO_UOPS_MANY
)
6817 ix86_dump_ppro_packet (dump
);
6818 ix86_sched_data
.ppro
.decode
[0] = insn
;
6819 ix86_sched_data
.ppro
.decode
[1] = NULL
;
6820 ix86_sched_data
.ppro
.decode
[2] = NULL
;
6822 ix86_dump_ppro_packet (dump
);
6823 ix86_sched_data
.ppro
.decode
[0] = NULL
;
6825 else if (uops
== PPRO_UOPS_FEW
)
6828 ix86_dump_ppro_packet (dump
);
6829 ix86_sched_data
.ppro
.decode
[0] = insn
;
6830 ix86_sched_data
.ppro
.decode
[1] = NULL
;
6831 ix86_sched_data
.ppro
.decode
[2] = NULL
;
6835 for (i
= 0; i
< 3; ++i
)
6836 if (ix86_sched_data
.ppro
.decode
[i
] == NULL
)
6838 ix86_sched_data
.ppro
.decode
[i
] = insn
;
6846 ix86_dump_ppro_packet (dump
);
6847 ix86_sched_data
.ppro
.decode
[0] = NULL
;
6848 ix86_sched_data
.ppro
.decode
[1] = NULL
;
6849 ix86_sched_data
.ppro
.decode
[2] = NULL
;
6853 return --ix86_sched_data
.ppro
.issued_this_cycle
;