1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
47 #include "target-def.h"
48 #include "langhooks.h"
50 #include "tree-gimple.h"
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
64 /* Processor costs (relative to an add) */
66 struct processor_costs size_cost
= { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
109 /* Processor costs (relative to an add) */
111 struct processor_costs i386_cost
= { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
155 struct processor_costs i486_cost
= { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
199 struct processor_costs pentium_cost
= {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
243 struct processor_costs pentiumpro_cost
= {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
287 struct processor_costs k6_cost
= {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
331 struct processor_costs athlon_cost
= {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
375 struct processor_costs k8_cost
= {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
419 struct processor_costs pentium4_cost
= {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
463 struct processor_costs nocona_cost
= {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
506 const struct processor_costs
*ix86_cost
= &pentium_cost
;
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
520 const int x86_use_leave
= m_386
| m_K6
| m_ATHLON_K8
;
521 const int x86_push_memory
= m_386
| m_K6
| m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
522 const int x86_zero_extend_with_and
= m_486
| m_PENT
;
523 const int x86_movx
= m_ATHLON_K8
| m_PPRO
| m_PENT4
| m_NOCONA
/* m_386 | m_K6 */;
524 const int x86_double_with_add
= ~m_386
;
525 const int x86_use_bit_test
= m_386
;
526 const int x86_unroll_strlen
= m_486
| m_PENT
| m_PPRO
| m_ATHLON_K8
| m_K6
;
527 const int x86_cmove
= m_PPRO
| m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
528 const int x86_3dnow_a
= m_ATHLON_K8
;
529 const int x86_deep_branch
= m_PPRO
| m_K6
| m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints
= 0;
535 const int x86_use_sahf
= m_PPRO
| m_K6
| m_PENT4
| m_NOCONA
;
536 const int x86_partial_reg_stall
= m_PPRO
;
537 const int x86_use_loop
= m_K6
;
538 const int x86_use_fiop
= ~(m_PPRO
| m_ATHLON_K8
| m_PENT
);
539 const int x86_use_mov0
= m_K6
;
540 const int x86_use_cltd
= ~(m_PENT
| m_K6
);
541 const int x86_read_modify_write
= ~m_PENT
;
542 const int x86_read_modify
= ~(m_PENT
| m_PPRO
);
543 const int x86_split_long_moves
= m_PPRO
;
544 const int x86_promote_QImode
= m_K6
| m_PENT
| m_386
| m_486
| m_ATHLON_K8
;
545 const int x86_fast_prefix
= ~(m_PENT
| m_486
| m_386
);
546 const int x86_single_stringop
= m_386
| m_PENT4
| m_NOCONA
;
547 const int x86_qimode_math
= ~(0);
548 const int x86_promote_qi_regs
= 0;
549 const int x86_himode_math
= ~(m_PPRO
);
550 const int x86_promote_hi_regs
= m_PPRO
;
551 const int x86_sub_esp_4
= m_ATHLON_K8
| m_PPRO
| m_PENT4
| m_NOCONA
;
552 const int x86_sub_esp_8
= m_ATHLON_K8
| m_PPRO
| m_386
| m_486
| m_PENT4
| m_NOCONA
;
553 const int x86_add_esp_4
= m_ATHLON_K8
| m_K6
| m_PENT4
| m_NOCONA
;
554 const int x86_add_esp_8
= m_ATHLON_K8
| m_PPRO
| m_K6
| m_386
| m_486
| m_PENT4
| m_NOCONA
;
555 const int x86_integer_DFmode_moves
= ~(m_ATHLON_K8
| m_PENT4
| m_NOCONA
| m_PPRO
);
556 const int x86_partial_reg_dependency
= m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
557 const int x86_memory_mismatch_stall
= m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
558 const int x86_accumulate_outgoing_args
= m_ATHLON_K8
| m_PENT4
| m_NOCONA
| m_PPRO
;
559 const int x86_prologue_using_move
= m_ATHLON_K8
| m_PPRO
;
560 const int x86_epilogue_using_move
= m_ATHLON_K8
| m_PPRO
;
561 const int x86_decompose_lea
= m_PENT4
| m_NOCONA
;
562 const int x86_shift1
= ~m_486
;
563 const int x86_arch_always_fancy_math_387
= m_PENT
| m_PPRO
| m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
564 const int x86_sse_partial_reg_dependency
= m_PENT4
| m_NOCONA
| m_PPRO
;
565 /* Set for machines where the type and dependencies are resolved on SSE
566 register parts instead of whole registers, so we may maintain just
567 lower part of scalar values in proper format leaving the upper part
569 const int x86_sse_split_regs
= m_ATHLON_K8
;
570 const int x86_sse_typeless_stores
= m_ATHLON_K8
;
571 const int x86_sse_load0_by_pxor
= m_PPRO
| m_PENT4
| m_NOCONA
;
572 const int x86_use_ffreep
= m_ATHLON_K8
;
573 const int x86_rep_movl_optimal
= m_386
| m_PENT
| m_PPRO
| m_K6
;
574 const int x86_inter_unit_moves
= ~(m_ATHLON_K8
);
575 const int x86_ext_80387_constants
= m_K6
| m_ATHLON
| m_PENT4
| m_NOCONA
| m_PPRO
;
576 /* Some CPU cores are not able to predict more than 4 branch instructions in
577 the 16 byte window. */
578 const int x86_four_jump_limit
= m_PPRO
| m_ATHLON_K8
| m_PENT4
| m_NOCONA
;
579 const int x86_schedule
= m_PPRO
| m_ATHLON_K8
| m_K6
| m_PENT
;
580 const int x86_use_bt
= m_ATHLON_K8
;
582 /* In case the average insn count for single function invocation is
583 lower than this constant, emit fast (but longer) prologue and
585 #define FAST_PROLOGUE_INSN_COUNT 20
587 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
588 static const char *const qi_reg_name
[] = QI_REGISTER_NAMES
;
589 static const char *const qi_high_reg_name
[] = QI_HIGH_REGISTER_NAMES
;
590 static const char *const hi_reg_name
[] = HI_REGISTER_NAMES
;
592 /* Array of the smallest class containing reg number REGNO, indexed by
593 REGNO. Used by REGNO_REG_CLASS in i386.h. */
595 enum reg_class
const regclass_map
[FIRST_PSEUDO_REGISTER
] =
598 AREG
, DREG
, CREG
, BREG
,
600 SIREG
, DIREG
, NON_Q_REGS
, NON_Q_REGS
,
602 FP_TOP_REG
, FP_SECOND_REG
, FLOAT_REGS
, FLOAT_REGS
,
603 FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
, FLOAT_REGS
,
606 /* flags, fpsr, dirflag, frame */
607 NO_REGS
, NO_REGS
, NO_REGS
, NON_Q_REGS
,
608 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
610 MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
, MMX_REGS
,
612 NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
,
613 NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
, NON_Q_REGS
,
614 SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
, SSE_REGS
,
618 /* The "default" register map used in 32bit mode. */
620 int const dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
622 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
623 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
624 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
625 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
626 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
631 static int const x86_64_int_parameter_registers
[6] =
633 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
634 FIRST_REX_INT_REG
/*R8 */, FIRST_REX_INT_REG
+ 1 /*R9 */
637 static int const x86_64_int_return_registers
[4] =
639 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
642 /* The "default" register map used in 64bit mode. */
643 int const dbx64_register_map
[FIRST_PSEUDO_REGISTER
] =
645 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
646 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
647 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
648 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
649 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
650 8,9,10,11,12,13,14,15, /* extended integer registers */
651 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
654 /* Define the register numbers to be used in Dwarf debugging information.
655 The SVR4 reference port C compiler uses the following register numbers
656 in its Dwarf output code:
657 0 for %eax (gcc regno = 0)
658 1 for %ecx (gcc regno = 2)
659 2 for %edx (gcc regno = 1)
660 3 for %ebx (gcc regno = 3)
661 4 for %esp (gcc regno = 7)
662 5 for %ebp (gcc regno = 6)
663 6 for %esi (gcc regno = 4)
664 7 for %edi (gcc regno = 5)
665 The following three DWARF register numbers are never generated by
666 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
667 believes these numbers have these meanings.
668 8 for %eip (no gcc equivalent)
669 9 for %eflags (gcc regno = 17)
670 10 for %trapno (no gcc equivalent)
671 It is not at all clear how we should number the FP stack registers
672 for the x86 architecture. If the version of SDB on x86/svr4 were
673 a bit less brain dead with respect to floating-point then we would
674 have a precedent to follow with respect to DWARF register numbers
675 for x86 FP registers, but the SDB on x86/svr4 is so completely
676 broken with respect to FP registers that it is hardly worth thinking
677 of it as something to strive for compatibility with.
678 The version of x86/svr4 SDB I have at the moment does (partially)
679 seem to believe that DWARF register number 11 is associated with
680 the x86 register %st(0), but that's about all. Higher DWARF
681 register numbers don't seem to be associated with anything in
682 particular, and even for DWARF regno 11, SDB only seems to under-
683 stand that it should say that a variable lives in %st(0) (when
684 asked via an `=' command) if we said it was in DWARF regno 11,
685 but SDB still prints garbage when asked for the value of the
686 variable in question (via a `/' command).
687 (Also note that the labels SDB prints for various FP stack regs
688 when doing an `x' command are all wrong.)
689 Note that these problems generally don't affect the native SVR4
690 C compiler because it doesn't allow the use of -O with -g and
691 because when it is *not* optimizing, it allocates a memory
692 location for each floating-point variable, and the memory
693 location is what gets described in the DWARF AT_location
694 attribute for the variable in question.
695 Regardless of the severe mental illness of the x86/svr4 SDB, we
696 do something sensible here and we use the following DWARF
697 register numbers. Note that these are all stack-top-relative
699 11 for %st(0) (gcc regno = 8)
700 12 for %st(1) (gcc regno = 9)
701 13 for %st(2) (gcc regno = 10)
702 14 for %st(3) (gcc regno = 11)
703 15 for %st(4) (gcc regno = 12)
704 16 for %st(5) (gcc regno = 13)
705 17 for %st(6) (gcc regno = 14)
706 18 for %st(7) (gcc regno = 15)
708 int const svr4_dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
710 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
711 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
712 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
713 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
714 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
715 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
719 /* Test and compare insns in i386.md store the information needed to
720 generate branch and scc insns here. */
722 rtx ix86_compare_op0
= NULL_RTX
;
723 rtx ix86_compare_op1
= NULL_RTX
;
725 #define MAX_386_STACK_LOCALS 3
726 /* Size of the register save area. */
727 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
729 /* Define the structure for the machine field in struct function. */
731 struct stack_local_entry
GTY(())
736 struct stack_local_entry
*next
;
739 /* Structure describing stack frame layout.
740 Stack grows downward:
746 saved frame pointer if frame_pointer_needed
747 <- HARD_FRAME_POINTER
753 > to_allocate <- FRAME_POINTER
765 int outgoing_arguments_size
;
768 HOST_WIDE_INT to_allocate
;
769 /* The offsets relative to ARG_POINTER. */
770 HOST_WIDE_INT frame_pointer_offset
;
771 HOST_WIDE_INT hard_frame_pointer_offset
;
772 HOST_WIDE_INT stack_pointer_offset
;
774 /* When save_regs_using_mov is set, emit prologue using
775 move instead of push instructions. */
776 bool save_regs_using_mov
;
779 /* Used to enable/disable debugging features. */
780 const char *ix86_debug_arg_string
, *ix86_debug_addr_string
;
781 /* Code model option as passed by user. */
782 const char *ix86_cmodel_string
;
784 enum cmodel ix86_cmodel
;
786 const char *ix86_asm_string
;
787 enum asm_dialect ix86_asm_dialect
= ASM_ATT
;
789 const char *ix86_tls_dialect_string
;
790 enum tls_dialect ix86_tls_dialect
= TLS_DIALECT_GNU
;
792 /* Which unit we are generating floating point math for. */
793 enum fpmath_unit ix86_fpmath
;
795 /* Which cpu are we scheduling for. */
796 enum processor_type ix86_tune
;
797 /* Which instruction set architecture to use. */
798 enum processor_type ix86_arch
;
800 /* Strings to hold which cpu and instruction set architecture to use. */
801 const char *ix86_tune_string
; /* for -mtune=<xxx> */
802 const char *ix86_arch_string
; /* for -march=<xxx> */
803 const char *ix86_fpmath_string
; /* for -mfpmath=<xxx> */
805 /* # of registers to use to pass arguments. */
806 const char *ix86_regparm_string
;
808 /* true if sse prefetch instruction is not NOOP. */
809 int x86_prefetch_sse
;
811 /* ix86_regparm_string as a number */
814 /* Alignment to use for loops and jumps: */
816 /* Power of two alignment for loops. */
817 const char *ix86_align_loops_string
;
819 /* Power of two alignment for non-loop jumps. */
820 const char *ix86_align_jumps_string
;
822 /* Power of two alignment for stack boundary in bytes. */
823 const char *ix86_preferred_stack_boundary_string
;
825 /* Preferred alignment for stack boundary in bits. */
826 unsigned int ix86_preferred_stack_boundary
;
828 /* Values 1-5: see jump.c */
829 int ix86_branch_cost
;
830 const char *ix86_branch_cost_string
;
832 /* Power of two alignment for functions. */
833 const char *ix86_align_funcs_string
;
835 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
836 char internal_label_prefix
[16];
837 int internal_label_prefix_len
;
839 static void output_pic_addr_const (FILE *, rtx
, int);
840 static void put_condition_code (enum rtx_code
, enum machine_mode
,
842 static const char *get_some_local_dynamic_name (void);
843 static int get_some_local_dynamic_name_1 (rtx
*, void *);
844 static rtx
ix86_expand_int_compare (enum rtx_code
, rtx
, rtx
);
845 static enum rtx_code
ix86_prepare_fp_compare_args (enum rtx_code
, rtx
*,
847 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
848 static enum machine_mode
ix86_cc_modes_compatible (enum machine_mode
,
850 static rtx
get_thread_pointer (int);
851 static rtx
legitimize_tls_address (rtx
, enum tls_model
, int);
852 static void get_pc_thunk_name (char [32], unsigned int);
853 static rtx
gen_push (rtx
);
854 static int ix86_flags_dependant (rtx
, rtx
, enum attr_type
);
855 static int ix86_agi_dependant (rtx
, rtx
, enum attr_type
);
856 static struct machine_function
* ix86_init_machine_status (void);
857 static int ix86_split_to_parts (rtx
, rtx
*, enum machine_mode
);
858 static int ix86_nsaved_regs (void);
859 static void ix86_emit_save_regs (void);
860 static void ix86_emit_save_regs_using_mov (rtx
, HOST_WIDE_INT
);
861 static void ix86_emit_restore_regs_using_mov (rtx
, HOST_WIDE_INT
, int);
862 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT
);
863 static HOST_WIDE_INT
ix86_GOT_alias_set (void);
864 static void ix86_adjust_counter (rtx
, HOST_WIDE_INT
);
865 static rtx
ix86_expand_aligntest (rtx
, int);
866 static void ix86_expand_strlensi_unroll_1 (rtx
, rtx
, rtx
);
867 static int ix86_issue_rate (void);
868 static int ix86_adjust_cost (rtx
, rtx
, rtx
, int);
869 static int ia32_multipass_dfa_lookahead (void);
870 static void ix86_init_mmx_sse_builtins (void);
871 static rtx
x86_this_parameter (tree
);
872 static void x86_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
873 HOST_WIDE_INT
, tree
);
874 static bool x86_can_output_mi_thunk (tree
, HOST_WIDE_INT
, HOST_WIDE_INT
, tree
);
875 static void x86_file_start (void);
876 static void ix86_reorg (void);
877 static bool ix86_expand_carry_flag_compare (enum rtx_code
, rtx
, rtx
, rtx
*);
878 static tree
ix86_build_builtin_va_list (void);
879 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
,
881 static tree
ix86_gimplify_va_arg (tree
, tree
, tree
*, tree
*);
882 static bool ix86_vector_mode_supported_p (enum machine_mode
);
884 static int ix86_address_cost (rtx
);
885 static bool ix86_cannot_force_const_mem (rtx
);
886 static rtx
ix86_delegitimize_address (rtx
);
888 struct builtin_description
;
889 static rtx
ix86_expand_sse_comi (const struct builtin_description
*,
891 static rtx
ix86_expand_sse_compare (const struct builtin_description
*,
893 static rtx
ix86_expand_unop1_builtin (enum insn_code
, tree
, rtx
);
894 static rtx
ix86_expand_unop_builtin (enum insn_code
, tree
, rtx
, int);
895 static rtx
ix86_expand_binop_builtin (enum insn_code
, tree
, rtx
);
896 static rtx
ix86_expand_store_builtin (enum insn_code
, tree
);
897 static rtx
safe_vector_operand (rtx
, enum machine_mode
);
898 static rtx
ix86_expand_fp_compare (enum rtx_code
, rtx
, rtx
, rtx
, rtx
*, rtx
*);
899 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code
);
900 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code
);
901 static int ix86_fp_comparison_sahf_cost (enum rtx_code code
);
902 static int ix86_fp_comparison_cost (enum rtx_code code
);
903 static unsigned int ix86_select_alt_pic_regnum (void);
904 static int ix86_save_reg (unsigned int, int);
905 static void ix86_compute_frame_layout (struct ix86_frame
*);
906 static int ix86_comp_type_attributes (tree
, tree
);
907 static int ix86_function_regparm (tree
, tree
);
908 const struct attribute_spec ix86_attribute_table
[];
909 static bool ix86_function_ok_for_sibcall (tree
, tree
);
910 static tree
ix86_handle_cdecl_attribute (tree
*, tree
, tree
, int, bool *);
911 static tree
ix86_handle_regparm_attribute (tree
*, tree
, tree
, int, bool *);
912 static int ix86_value_regno (enum machine_mode
);
913 static bool contains_128bit_aligned_vector_p (tree
);
914 static rtx
ix86_struct_value_rtx (tree
, int);
915 static bool ix86_ms_bitfield_layout_p (tree
);
916 static tree
ix86_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
917 static int extended_reg_mentioned_1 (rtx
*, void *);
918 static bool ix86_rtx_costs (rtx
, int, int, int *);
919 static int min_insn_size (rtx
);
920 static tree
ix86_md_asm_clobbers (tree clobbers
);
921 static bool ix86_must_pass_in_stack (enum machine_mode mode
, tree type
);
922 static bool ix86_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
924 static void ix86_init_builtins (void);
925 static rtx
ix86_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
927 /* This function is only used on Solaris. */
928 static void i386_solaris_elf_named_section (const char *, unsigned int, tree
)
931 /* Register class used for passing given 64bit part of the argument.
932 These represent classes as documented by the PS ABI, with the exception
933 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
934 use SF or DFmode move instead of DImode to avoid reformatting penalties.
936 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
937 whenever possible (upper half does contain padding).
939 enum x86_64_reg_class
942 X86_64_INTEGER_CLASS
,
943 X86_64_INTEGERSI_CLASS
,
950 X86_64_COMPLEX_X87_CLASS
,
953 static const char * const x86_64_reg_class_name
[] = {
954 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
955 "sseup", "x87", "x87up", "cplx87", "no"
958 #define MAX_CLASSES 4
960 /* Table of constants used by fldpi, fldln2, etc.... */
961 static REAL_VALUE_TYPE ext_80387_constants_table
[5];
962 static bool ext_80387_constants_init
= 0;
963 static void init_ext_80387_constants (void);
965 /* Initialize the GCC target structure. */
966 #undef TARGET_ATTRIBUTE_TABLE
967 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
968 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
969 # undef TARGET_MERGE_DECL_ATTRIBUTES
970 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
973 #undef TARGET_COMP_TYPE_ATTRIBUTES
974 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
976 #undef TARGET_INIT_BUILTINS
977 #define TARGET_INIT_BUILTINS ix86_init_builtins
978 #undef TARGET_EXPAND_BUILTIN
979 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
981 #undef TARGET_ASM_FUNCTION_EPILOGUE
982 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
984 #undef TARGET_ASM_OPEN_PAREN
985 #define TARGET_ASM_OPEN_PAREN ""
986 #undef TARGET_ASM_CLOSE_PAREN
987 #define TARGET_ASM_CLOSE_PAREN ""
989 #undef TARGET_ASM_ALIGNED_HI_OP
990 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
991 #undef TARGET_ASM_ALIGNED_SI_OP
992 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
994 #undef TARGET_ASM_ALIGNED_DI_OP
995 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
998 #undef TARGET_ASM_UNALIGNED_HI_OP
999 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1000 #undef TARGET_ASM_UNALIGNED_SI_OP
1001 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1002 #undef TARGET_ASM_UNALIGNED_DI_OP
1003 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1005 #undef TARGET_SCHED_ADJUST_COST
1006 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1007 #undef TARGET_SCHED_ISSUE_RATE
1008 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1009 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1010 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1011 ia32_multipass_dfa_lookahead
1013 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1014 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1017 #undef TARGET_HAVE_TLS
1018 #define TARGET_HAVE_TLS true
1020 #undef TARGET_CANNOT_FORCE_CONST_MEM
1021 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1023 #undef TARGET_DELEGITIMIZE_ADDRESS
1024 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1026 #undef TARGET_MS_BITFIELD_LAYOUT_P
1027 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1029 #undef TARGET_ASM_OUTPUT_MI_THUNK
1030 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1031 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1032 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1034 #undef TARGET_ASM_FILE_START
1035 #define TARGET_ASM_FILE_START x86_file_start
1037 #undef TARGET_RTX_COSTS
1038 #define TARGET_RTX_COSTS ix86_rtx_costs
1039 #undef TARGET_ADDRESS_COST
1040 #define TARGET_ADDRESS_COST ix86_address_cost
1042 #undef TARGET_FIXED_CONDITION_CODE_REGS
1043 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1044 #undef TARGET_CC_MODES_COMPATIBLE
1045 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1047 #undef TARGET_MACHINE_DEPENDENT_REORG
1048 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1050 #undef TARGET_BUILD_BUILTIN_VA_LIST
1051 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1053 #undef TARGET_MD_ASM_CLOBBERS
1054 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1056 #undef TARGET_PROMOTE_PROTOTYPES
1057 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1058 #undef TARGET_STRUCT_VALUE_RTX
1059 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1060 #undef TARGET_SETUP_INCOMING_VARARGS
1061 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1062 #undef TARGET_MUST_PASS_IN_STACK
1063 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1064 #undef TARGET_PASS_BY_REFERENCE
1065 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1067 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1068 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1070 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1071 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1073 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1074 #undef TARGET_INSERT_ATTRIBUTES
1075 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1078 struct gcc_target targetm
= TARGET_INITIALIZER
;
1081 /* The svr4 ABI for the i386 says that records and unions are returned
1083 #ifndef DEFAULT_PCC_STRUCT_RETURN
1084 #define DEFAULT_PCC_STRUCT_RETURN 1
1087 /* Sometimes certain combinations of command options do not make
1088 sense on a particular target machine. You can define a macro
1089 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1090 defined, is executed once just after all the command options have
1093 Don't use this macro to turn on various extra optimizations for
1094 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1097 override_options (void)
1100 int ix86_tune_defaulted
= 0;
1102 /* Comes from final.c -- no real reason to change it. */
1103 #define MAX_CODE_ALIGN 16
1107 const struct processor_costs
*cost
; /* Processor costs */
1108 const int target_enable
; /* Target flags to enable. */
1109 const int target_disable
; /* Target flags to disable. */
1110 const int align_loop
; /* Default alignments. */
1111 const int align_loop_max_skip
;
1112 const int align_jump
;
1113 const int align_jump_max_skip
;
1114 const int align_func
;
1116 const processor_target_table
[PROCESSOR_max
] =
1118 {&i386_cost
, 0, 0, 4, 3, 4, 3, 4},
1119 {&i486_cost
, 0, 0, 16, 15, 16, 15, 16},
1120 {&pentium_cost
, 0, 0, 16, 7, 16, 7, 16},
1121 {&pentiumpro_cost
, 0, 0, 16, 15, 16, 7, 16},
1122 {&k6_cost
, 0, 0, 32, 7, 32, 7, 32},
1123 {&athlon_cost
, 0, 0, 16, 7, 16, 7, 16},
1124 {&pentium4_cost
, 0, 0, 0, 0, 0, 0, 0},
1125 {&k8_cost
, 0, 0, 16, 7, 16, 7, 16},
1126 {&nocona_cost
, 0, 0, 0, 0, 0, 0, 0}
1129 static const char * const cpu_names
[] = TARGET_CPU_DEFAULT_NAMES
;
1132 const char *const name
; /* processor name or nickname. */
1133 const enum processor_type processor
;
1134 const enum pta_flags
1140 PTA_PREFETCH_SSE
= 16,
1146 const processor_alias_table
[] =
1148 {"i386", PROCESSOR_I386
, 0},
1149 {"i486", PROCESSOR_I486
, 0},
1150 {"i586", PROCESSOR_PENTIUM
, 0},
1151 {"pentium", PROCESSOR_PENTIUM
, 0},
1152 {"pentium-mmx", PROCESSOR_PENTIUM
, PTA_MMX
},
1153 {"winchip-c6", PROCESSOR_I486
, PTA_MMX
},
1154 {"winchip2", PROCESSOR_I486
, PTA_MMX
| PTA_3DNOW
},
1155 {"c3", PROCESSOR_I486
, PTA_MMX
| PTA_3DNOW
},
1156 {"c3-2", PROCESSOR_PENTIUMPRO
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_SSE
},
1157 {"i686", PROCESSOR_PENTIUMPRO
, 0},
1158 {"pentiumpro", PROCESSOR_PENTIUMPRO
, 0},
1159 {"pentium2", PROCESSOR_PENTIUMPRO
, PTA_MMX
},
1160 {"pentium3", PROCESSOR_PENTIUMPRO
, PTA_MMX
| PTA_SSE
| PTA_PREFETCH_SSE
},
1161 {"pentium3m", PROCESSOR_PENTIUMPRO
, PTA_MMX
| PTA_SSE
| PTA_PREFETCH_SSE
},
1162 {"pentium-m", PROCESSOR_PENTIUMPRO
, PTA_MMX
| PTA_SSE
| PTA_PREFETCH_SSE
| PTA_SSE2
},
1163 {"pentium4", PROCESSOR_PENTIUM4
, PTA_SSE
| PTA_SSE2
1164 | PTA_MMX
| PTA_PREFETCH_SSE
},
1165 {"pentium4m", PROCESSOR_PENTIUM4
, PTA_SSE
| PTA_SSE2
1166 | PTA_MMX
| PTA_PREFETCH_SSE
},
1167 {"prescott", PROCESSOR_NOCONA
, PTA_SSE
| PTA_SSE2
| PTA_SSE3
1168 | PTA_MMX
| PTA_PREFETCH_SSE
},
1169 {"nocona", PROCESSOR_NOCONA
, PTA_SSE
| PTA_SSE2
| PTA_SSE3
| PTA_64BIT
1170 | PTA_MMX
| PTA_PREFETCH_SSE
},
1171 {"k6", PROCESSOR_K6
, PTA_MMX
},
1172 {"k6-2", PROCESSOR_K6
, PTA_MMX
| PTA_3DNOW
},
1173 {"k6-3", PROCESSOR_K6
, PTA_MMX
| PTA_3DNOW
},
1174 {"athlon", PROCESSOR_ATHLON
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
1176 {"athlon-tbird", PROCESSOR_ATHLON
, PTA_MMX
| PTA_PREFETCH_SSE
1177 | PTA_3DNOW
| PTA_3DNOW_A
},
1178 {"athlon-4", PROCESSOR_ATHLON
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
1179 | PTA_3DNOW_A
| PTA_SSE
},
1180 {"athlon-xp", PROCESSOR_ATHLON
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
1181 | PTA_3DNOW_A
| PTA_SSE
},
1182 {"athlon-mp", PROCESSOR_ATHLON
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
1183 | PTA_3DNOW_A
| PTA_SSE
},
1184 {"x86-64", PROCESSOR_K8
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_64BIT
1185 | PTA_SSE
| PTA_SSE2
},
1186 {"k8", PROCESSOR_K8
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
| PTA_64BIT
1187 | PTA_3DNOW_A
| PTA_SSE
| PTA_SSE2
},
1188 {"opteron", PROCESSOR_K8
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
| PTA_64BIT
1189 | PTA_3DNOW_A
| PTA_SSE
| PTA_SSE2
},
1190 {"athlon64", PROCESSOR_K8
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
| PTA_64BIT
1191 | PTA_3DNOW_A
| PTA_SSE
| PTA_SSE2
},
1192 {"athlon-fx", PROCESSOR_K8
, PTA_MMX
| PTA_PREFETCH_SSE
| PTA_3DNOW
| PTA_64BIT
1193 | PTA_3DNOW_A
| PTA_SSE
| PTA_SSE2
},
1196 int const pta_size
= ARRAY_SIZE (processor_alias_table
);
1198 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1199 SUBTARGET_OVERRIDE_OPTIONS
;
1202 /* Set the default values for switches whose default depends on TARGET_64BIT
1203 in case they weren't overwritten by command line options. */
1206 if (flag_omit_frame_pointer
== 2)
1207 flag_omit_frame_pointer
= 1;
1208 if (flag_asynchronous_unwind_tables
== 2)
1209 flag_asynchronous_unwind_tables
= 1;
1210 if (flag_pcc_struct_return
== 2)
1211 flag_pcc_struct_return
= 0;
1215 if (flag_omit_frame_pointer
== 2)
1216 flag_omit_frame_pointer
= 0;
1217 if (flag_asynchronous_unwind_tables
== 2)
1218 flag_asynchronous_unwind_tables
= 0;
1219 if (flag_pcc_struct_return
== 2)
1220 flag_pcc_struct_return
= DEFAULT_PCC_STRUCT_RETURN
;
1223 if (!ix86_tune_string
&& ix86_arch_string
)
1224 ix86_tune_string
= ix86_arch_string
;
1225 if (!ix86_tune_string
)
1227 ix86_tune_string
= cpu_names
[TARGET_CPU_DEFAULT
];
1228 ix86_tune_defaulted
= 1;
1230 if (!ix86_arch_string
)
1231 ix86_arch_string
= TARGET_64BIT
? "x86-64" : "i386";
1233 if (ix86_cmodel_string
!= 0)
1235 if (!strcmp (ix86_cmodel_string
, "small"))
1236 ix86_cmodel
= flag_pic
? CM_SMALL_PIC
: CM_SMALL
;
1238 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string
);
1239 else if (!strcmp (ix86_cmodel_string
, "32"))
1240 ix86_cmodel
= CM_32
;
1241 else if (!strcmp (ix86_cmodel_string
, "kernel") && !flag_pic
)
1242 ix86_cmodel
= CM_KERNEL
;
1243 else if (!strcmp (ix86_cmodel_string
, "medium") && !flag_pic
)
1244 ix86_cmodel
= CM_MEDIUM
;
1245 else if (!strcmp (ix86_cmodel_string
, "large") && !flag_pic
)
1246 ix86_cmodel
= CM_LARGE
;
1248 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string
);
1252 ix86_cmodel
= CM_32
;
1254 ix86_cmodel
= flag_pic
? CM_SMALL_PIC
: CM_SMALL
;
1256 if (ix86_asm_string
!= 0)
1258 if (!strcmp (ix86_asm_string
, "intel"))
1259 ix86_asm_dialect
= ASM_INTEL
;
1260 else if (!strcmp (ix86_asm_string
, "att"))
1261 ix86_asm_dialect
= ASM_ATT
;
1263 error ("bad value (%s) for -masm= switch", ix86_asm_string
);
1265 if ((TARGET_64BIT
== 0) != (ix86_cmodel
== CM_32
))
1266 error ("code model %qs not supported in the %s bit mode",
1267 ix86_cmodel_string
, TARGET_64BIT
? "64" : "32");
1268 if (ix86_cmodel
== CM_LARGE
)
1269 sorry ("code model %<large%> not supported yet");
1270 if ((TARGET_64BIT
!= 0) != ((target_flags
& MASK_64BIT
) != 0))
1271 sorry ("%i-bit mode not compiled in",
1272 (target_flags
& MASK_64BIT
) ? 64 : 32);
1274 for (i
= 0; i
< pta_size
; i
++)
1275 if (! strcmp (ix86_arch_string
, processor_alias_table
[i
].name
))
1277 ix86_arch
= processor_alias_table
[i
].processor
;
1278 /* Default cpu tuning to the architecture. */
1279 ix86_tune
= ix86_arch
;
1280 if (processor_alias_table
[i
].flags
& PTA_MMX
1281 && !(target_flags_explicit
& MASK_MMX
))
1282 target_flags
|= MASK_MMX
;
1283 if (processor_alias_table
[i
].flags
& PTA_3DNOW
1284 && !(target_flags_explicit
& MASK_3DNOW
))
1285 target_flags
|= MASK_3DNOW
;
1286 if (processor_alias_table
[i
].flags
& PTA_3DNOW_A
1287 && !(target_flags_explicit
& MASK_3DNOW_A
))
1288 target_flags
|= MASK_3DNOW_A
;
1289 if (processor_alias_table
[i
].flags
& PTA_SSE
1290 && !(target_flags_explicit
& MASK_SSE
))
1291 target_flags
|= MASK_SSE
;
1292 if (processor_alias_table
[i
].flags
& PTA_SSE2
1293 && !(target_flags_explicit
& MASK_SSE2
))
1294 target_flags
|= MASK_SSE2
;
1295 if (processor_alias_table
[i
].flags
& PTA_SSE3
1296 && !(target_flags_explicit
& MASK_SSE3
))
1297 target_flags
|= MASK_SSE3
;
1298 if (processor_alias_table
[i
].flags
& PTA_PREFETCH_SSE
)
1299 x86_prefetch_sse
= true;
1300 if (TARGET_64BIT
&& !(processor_alias_table
[i
].flags
& PTA_64BIT
))
1301 error ("CPU you selected does not support x86-64 "
1307 error ("bad value (%s) for -march= switch", ix86_arch_string
);
1309 for (i
= 0; i
< pta_size
; i
++)
1310 if (! strcmp (ix86_tune_string
, processor_alias_table
[i
].name
))
1312 ix86_tune
= processor_alias_table
[i
].processor
;
1313 if (TARGET_64BIT
&& !(processor_alias_table
[i
].flags
& PTA_64BIT
))
1315 if (ix86_tune_defaulted
)
1317 ix86_tune_string
= "x86-64";
1318 for (i
= 0; i
< pta_size
; i
++)
1319 if (! strcmp (ix86_tune_string
,
1320 processor_alias_table
[i
].name
))
1322 ix86_tune
= processor_alias_table
[i
].processor
;
1325 error ("CPU you selected does not support x86-64 "
1328 /* Intel CPUs have always interpreted SSE prefetch instructions as
1329 NOPs; so, we can enable SSE prefetch instructions even when
1330 -mtune (rather than -march) points us to a processor that has them.
1331 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1332 higher processors. */
1333 if (TARGET_CMOVE
&& (processor_alias_table
[i
].flags
& PTA_PREFETCH_SSE
))
1334 x86_prefetch_sse
= true;
1338 error ("bad value (%s) for -mtune= switch", ix86_tune_string
);
1341 ix86_cost
= &size_cost
;
1343 ix86_cost
= processor_target_table
[ix86_tune
].cost
;
1344 target_flags
|= processor_target_table
[ix86_tune
].target_enable
;
1345 target_flags
&= ~processor_target_table
[ix86_tune
].target_disable
;
1347 /* Arrange to set up i386_stack_locals for all functions. */
1348 init_machine_status
= ix86_init_machine_status
;
1350 /* Validate -mregparm= value. */
1351 if (ix86_regparm_string
)
1353 i
= atoi (ix86_regparm_string
);
1354 if (i
< 0 || i
> REGPARM_MAX
)
1355 error ("-mregparm=%d is not between 0 and %d", i
, REGPARM_MAX
);
1361 ix86_regparm
= REGPARM_MAX
;
1363 /* If the user has provided any of the -malign-* options,
1364 warn and use that value only if -falign-* is not set.
1365 Remove this code in GCC 3.2 or later. */
1366 if (ix86_align_loops_string
)
1368 warning ("-malign-loops is obsolete, use -falign-loops");
1369 if (align_loops
== 0)
1371 i
= atoi (ix86_align_loops_string
);
1372 if (i
< 0 || i
> MAX_CODE_ALIGN
)
1373 error ("-malign-loops=%d is not between 0 and %d", i
, MAX_CODE_ALIGN
);
1375 align_loops
= 1 << i
;
1379 if (ix86_align_jumps_string
)
1381 warning ("-malign-jumps is obsolete, use -falign-jumps");
1382 if (align_jumps
== 0)
1384 i
= atoi (ix86_align_jumps_string
);
1385 if (i
< 0 || i
> MAX_CODE_ALIGN
)
1386 error ("-malign-loops=%d is not between 0 and %d", i
, MAX_CODE_ALIGN
);
1388 align_jumps
= 1 << i
;
1392 if (ix86_align_funcs_string
)
1394 warning ("-malign-functions is obsolete, use -falign-functions");
1395 if (align_functions
== 0)
1397 i
= atoi (ix86_align_funcs_string
);
1398 if (i
< 0 || i
> MAX_CODE_ALIGN
)
1399 error ("-malign-loops=%d is not between 0 and %d", i
, MAX_CODE_ALIGN
);
1401 align_functions
= 1 << i
;
1405 /* Default align_* from the processor table. */
1406 if (align_loops
== 0)
1408 align_loops
= processor_target_table
[ix86_tune
].align_loop
;
1409 align_loops_max_skip
= processor_target_table
[ix86_tune
].align_loop_max_skip
;
1411 if (align_jumps
== 0)
1413 align_jumps
= processor_target_table
[ix86_tune
].align_jump
;
1414 align_jumps_max_skip
= processor_target_table
[ix86_tune
].align_jump_max_skip
;
1416 if (align_functions
== 0)
1418 align_functions
= processor_target_table
[ix86_tune
].align_func
;
1421 /* Validate -mpreferred-stack-boundary= value, or provide default.
1422 The default of 128 bits is for Pentium III's SSE __m128, but we
1423 don't want additional code to keep the stack aligned when
1424 optimizing for code size. */
1425 ix86_preferred_stack_boundary
= (optimize_size
1426 ? TARGET_64BIT
? 128 : 32
1428 if (ix86_preferred_stack_boundary_string
)
1430 i
= atoi (ix86_preferred_stack_boundary_string
);
1431 if (i
< (TARGET_64BIT
? 4 : 2) || i
> 12)
1432 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i
,
1433 TARGET_64BIT
? 4 : 2);
1435 ix86_preferred_stack_boundary
= (1 << i
) * BITS_PER_UNIT
;
1438 /* Validate -mbranch-cost= value, or provide default. */
1439 ix86_branch_cost
= processor_target_table
[ix86_tune
].cost
->branch_cost
;
1440 if (ix86_branch_cost_string
)
1442 i
= atoi (ix86_branch_cost_string
);
1444 error ("-mbranch-cost=%d is not between 0 and 5", i
);
1446 ix86_branch_cost
= i
;
1449 if (ix86_tls_dialect_string
)
1451 if (strcmp (ix86_tls_dialect_string
, "gnu") == 0)
1452 ix86_tls_dialect
= TLS_DIALECT_GNU
;
1453 else if (strcmp (ix86_tls_dialect_string
, "sun") == 0)
1454 ix86_tls_dialect
= TLS_DIALECT_SUN
;
1456 error ("bad value (%s) for -mtls-dialect= switch",
1457 ix86_tls_dialect_string
);
1460 /* Keep nonleaf frame pointers. */
1461 if (flag_omit_frame_pointer
)
1462 target_flags
&= ~MASK_OMIT_LEAF_FRAME_POINTER
;
1463 else if (TARGET_OMIT_LEAF_FRAME_POINTER
)
1464 flag_omit_frame_pointer
= 1;
1466 /* If we're doing fast math, we don't care about comparison order
1467 wrt NaNs. This lets us use a shorter comparison sequence. */
1468 if (flag_unsafe_math_optimizations
)
1469 target_flags
&= ~MASK_IEEE_FP
;
1471 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1472 since the insns won't need emulation. */
1473 if (x86_arch_always_fancy_math_387
& (1 << ix86_arch
))
1474 target_flags
&= ~MASK_NO_FANCY_MATH_387
;
1476 /* Likewise, if the target doesn't have a 387, or we've specified
1477 software floating point, don't use 387 inline instrinsics. */
1479 target_flags
|= MASK_NO_FANCY_MATH_387
;
1481 /* Turn on SSE2 builtins for -msse3. */
1483 target_flags
|= MASK_SSE2
;
1485 /* Turn on SSE builtins for -msse2. */
1487 target_flags
|= MASK_SSE
;
1489 /* Turn on MMX builtins for -msse. */
1492 target_flags
|= MASK_MMX
& ~target_flags_explicit
;
1493 x86_prefetch_sse
= true;
1496 /* Turn on MMX builtins for 3Dnow. */
1498 target_flags
|= MASK_MMX
;
1502 if (TARGET_ALIGN_DOUBLE
)
1503 error ("-malign-double makes no sense in the 64bit mode");
1505 error ("-mrtd calling convention not supported in the 64bit mode");
1507 /* Enable by default the SSE and MMX builtins. Do allow the user to
1508 explicitly disable any of these. In particular, disabling SSE and
1509 MMX for kernel code is extremely useful. */
1511 |= ((MASK_SSE2
| MASK_SSE
| MASK_MMX
| MASK_128BIT_LONG_DOUBLE
)
1512 & ~target_flags_explicit
);
1515 ix86_fpmath
= FPMATH_SSE
;
1519 ix86_fpmath
= FPMATH_387
;
1520 /* i386 ABI does not specify red zone. It still makes sense to use it
1521 when programmer takes care to stack from being destroyed. */
1522 if (!(target_flags_explicit
& MASK_NO_RED_ZONE
))
1523 target_flags
|= MASK_NO_RED_ZONE
;
1526 if (ix86_fpmath_string
!= 0)
1528 if (! strcmp (ix86_fpmath_string
, "387"))
1529 ix86_fpmath
= FPMATH_387
;
1530 else if (! strcmp (ix86_fpmath_string
, "sse"))
1534 warning ("SSE instruction set disabled, using 387 arithmetics");
1535 ix86_fpmath
= FPMATH_387
;
1538 ix86_fpmath
= FPMATH_SSE
;
1540 else if (! strcmp (ix86_fpmath_string
, "387,sse")
1541 || ! strcmp (ix86_fpmath_string
, "sse,387"))
1545 warning ("SSE instruction set disabled, using 387 arithmetics");
1546 ix86_fpmath
= FPMATH_387
;
1548 else if (!TARGET_80387
)
1550 warning ("387 instruction set disabled, using SSE arithmetics");
1551 ix86_fpmath
= FPMATH_SSE
;
1554 ix86_fpmath
= FPMATH_SSE
| FPMATH_387
;
1557 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string
);
1560 /* If the i387 is disabled, then do not return values in it. */
1562 target_flags
&= ~MASK_FLOAT_RETURNS
;
1564 if ((x86_accumulate_outgoing_args
& TUNEMASK
)
1565 && !(target_flags_explicit
& MASK_ACCUMULATE_OUTGOING_ARGS
)
1567 target_flags
|= MASK_ACCUMULATE_OUTGOING_ARGS
;
1569 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1572 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix
, "LX", 0);
1573 p
= strchr (internal_label_prefix
, 'X');
1574 internal_label_prefix_len
= p
- internal_label_prefix
;
1578 /* When scheduling description is not available, disable scheduler pass
1579 so it won't slow down the compilation and make x87 code slower. */
1580 if (!TARGET_SCHEDULE
)
1581 flag_schedule_insns_after_reload
= flag_schedule_insns
= 0;
1585 optimization_options (int level
, int size ATTRIBUTE_UNUSED
)
1587 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1588 make the problem with not enough registers even worse. */
1589 #ifdef INSN_SCHEDULING
1591 flag_schedule_insns
= 0;
1594 /* The default values of these switches depend on the TARGET_64BIT
1595 that is not known at this moment. Mark these values with 2 and
1596 let user the to override these. In case there is no command line option
1597 specifying them, we will set the defaults in override_options. */
1599 flag_omit_frame_pointer
= 2;
1600 flag_pcc_struct_return
= 2;
1601 flag_asynchronous_unwind_tables
= 2;
1602 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1603 SUBTARGET_OPTIMIZATION_OPTIONS
;
1607 /* Table of valid machine attributes. */
1608 const struct attribute_spec ix86_attribute_table
[] =
1610 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1611 /* Stdcall attribute says callee is responsible for popping arguments
1612 if they are not variable. */
1613 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute
},
1614 /* Fastcall attribute says callee is responsible for popping arguments
1615 if they are not variable. */
1616 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute
},
1617 /* Cdecl attribute says the callee is a normal C declaration */
1618 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute
},
1619 /* Regparm attribute specifies how many integer arguments are to be
1620 passed in registers. */
1621 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute
},
1622 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1623 { "dllimport", 0, 0, false, false, false, handle_dll_attribute
},
1624 { "dllexport", 0, 0, false, false, false, handle_dll_attribute
},
1625 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute
},
1627 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute
},
1628 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute
},
1629 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1630 SUBTARGET_ATTRIBUTE_TABLE
,
1632 { NULL
, 0, 0, false, false, false, NULL
}
1635 /* Decide whether we can make a sibling call to a function. DECL is the
1636 declaration of the function being targeted by the call and EXP is the
1637 CALL_EXPR representing the call. */
1640 ix86_function_ok_for_sibcall (tree decl
, tree exp
)
1642 /* If we are generating position-independent code, we cannot sibcall
1643 optimize any indirect call, or a direct call to a global function,
1644 as the PLT requires %ebx be live. */
1645 if (!TARGET_64BIT
&& flag_pic
&& (!decl
|| TREE_PUBLIC (decl
)))
1648 /* If we are returning floats on the 80387 register stack, we cannot
1649 make a sibcall from a function that doesn't return a float to a
1650 function that does or, conversely, from a function that does return
1651 a float to a function that doesn't; the necessary stack adjustment
1652 would not be executed. */
1653 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp
)))
1654 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun
->decl
)))))
1657 /* If this call is indirect, we'll need to be able to use a call-clobbered
1658 register for the address of the target function. Make sure that all
1659 such registers are not used for passing parameters. */
1660 if (!decl
&& !TARGET_64BIT
)
1664 /* We're looking at the CALL_EXPR, we need the type of the function. */
1665 type
= TREE_OPERAND (exp
, 0); /* pointer expression */
1666 type
= TREE_TYPE (type
); /* pointer type */
1667 type
= TREE_TYPE (type
); /* function type */
1669 if (ix86_function_regparm (type
, NULL
) >= 3)
1671 /* ??? Need to count the actual number of registers to be used,
1672 not the possible number of registers. Fix later. */
1677 /* Otherwise okay. That also includes certain types of indirect calls. */
1681 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1682 arguments as in struct attribute_spec.handler. */
1684 ix86_handle_cdecl_attribute (tree
*node
, tree name
,
1685 tree args ATTRIBUTE_UNUSED
,
1686 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
1688 if (TREE_CODE (*node
) != FUNCTION_TYPE
1689 && TREE_CODE (*node
) != METHOD_TYPE
1690 && TREE_CODE (*node
) != FIELD_DECL
1691 && TREE_CODE (*node
) != TYPE_DECL
)
1693 warning ("%qs attribute only applies to functions",
1694 IDENTIFIER_POINTER (name
));
1695 *no_add_attrs
= true;
1699 if (is_attribute_p ("fastcall", name
))
1701 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node
)))
1703 error ("fastcall and stdcall attributes are not compatible");
1705 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node
)))
1707 error ("fastcall and regparm attributes are not compatible");
1710 else if (is_attribute_p ("stdcall", name
))
1712 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
1714 error ("fastcall and stdcall attributes are not compatible");
1721 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name
));
1722 *no_add_attrs
= true;
1728 /* Handle a "regparm" attribute;
1729 arguments as in struct attribute_spec.handler. */
1731 ix86_handle_regparm_attribute (tree
*node
, tree name
, tree args
,
1732 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
1734 if (TREE_CODE (*node
) != FUNCTION_TYPE
1735 && TREE_CODE (*node
) != METHOD_TYPE
1736 && TREE_CODE (*node
) != FIELD_DECL
1737 && TREE_CODE (*node
) != TYPE_DECL
)
1739 warning ("%qs attribute only applies to functions",
1740 IDENTIFIER_POINTER (name
));
1741 *no_add_attrs
= true;
1747 cst
= TREE_VALUE (args
);
1748 if (TREE_CODE (cst
) != INTEGER_CST
)
1750 warning ("%qs attribute requires an integer constant argument",
1751 IDENTIFIER_POINTER (name
));
1752 *no_add_attrs
= true;
1754 else if (compare_tree_int (cst
, REGPARM_MAX
) > 0)
1756 warning ("argument to %qs attribute larger than %d",
1757 IDENTIFIER_POINTER (name
), REGPARM_MAX
);
1758 *no_add_attrs
= true;
1761 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node
)))
1763 error ("fastcall and regparm attributes are not compatible");
1770 /* Return 0 if the attributes for two types are incompatible, 1 if they
1771 are compatible, and 2 if they are nearly compatible (which causes a
1772 warning to be generated). */
1775 ix86_comp_type_attributes (tree type1
, tree type2
)
1777 /* Check for mismatch of non-default calling convention. */
1778 const char *const rtdstr
= TARGET_RTD
? "cdecl" : "stdcall";
1780 if (TREE_CODE (type1
) != FUNCTION_TYPE
)
1783 /* Check for mismatched fastcall types */
1784 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1
))
1785 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2
)))
1788 /* Check for mismatched return types (cdecl vs stdcall). */
1789 if (!lookup_attribute (rtdstr
, TYPE_ATTRIBUTES (type1
))
1790 != !lookup_attribute (rtdstr
, TYPE_ATTRIBUTES (type2
)))
1792 if (ix86_function_regparm (type1
, NULL
)
1793 != ix86_function_regparm (type2
, NULL
))
1798 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1799 DECL may be NULL when calling function indirectly
1800 or considering a libcall. */
1803 ix86_function_regparm (tree type
, tree decl
)
1806 int regparm
= ix86_regparm
;
1807 bool user_convention
= false;
1811 attr
= lookup_attribute ("regparm", TYPE_ATTRIBUTES (type
));
1814 regparm
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr
)));
1815 user_convention
= true;
1818 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type
)))
1821 user_convention
= true;
1824 /* Use register calling convention for local functions when possible. */
1825 if (!TARGET_64BIT
&& !user_convention
&& decl
1826 && flag_unit_at_a_time
&& !profile_flag
)
1828 struct cgraph_local_info
*i
= cgraph_local_info (decl
);
1831 /* We can't use regparm(3) for nested functions as these use
1832 static chain pointer in third argument. */
1833 if (DECL_CONTEXT (decl
) && !DECL_NO_STATIC_CHAIN (decl
))
1843 /* Return true if EAX is live at the start of the function. Used by
1844 ix86_expand_prologue to determine if we need special help before
1845 calling allocate_stack_worker. */
1848 ix86_eax_live_at_start_p (void)
1850 /* Cheat. Don't bother working forward from ix86_function_regparm
1851 to the function type to whether an actual argument is located in
1852 eax. Instead just look at cfg info, which is still close enough
1853 to correct at this point. This gives false positives for broken
1854 functions that might use uninitialized data that happens to be
1855 allocated in eax, but who cares? */
1856 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR
->global_live_at_end
, 0);
1859 /* Value is the number of bytes of arguments automatically
1860 popped when returning from a subroutine call.
1861 FUNDECL is the declaration node of the function (as a tree),
1862 FUNTYPE is the data type of the function (as a tree),
1863 or for a library call it is an identifier node for the subroutine name.
1864 SIZE is the number of bytes of arguments passed on the stack.
1866 On the 80386, the RTD insn may be used to pop them if the number
1867 of args is fixed, but if the number is variable then the caller
1868 must pop them all. RTD can't be used for library calls now
1869 because the library is compiled with the Unix compiler.
1870 Use of RTD is a selectable option, since it is incompatible with
1871 standard Unix calling sequences. If the option is not selected,
1872 the caller must always pop the args.
1874 The attribute stdcall is equivalent to RTD on a per module basis. */
1877 ix86_return_pops_args (tree fundecl
, tree funtype
, int size
)
1879 int rtd
= TARGET_RTD
&& (!fundecl
|| TREE_CODE (fundecl
) != IDENTIFIER_NODE
);
1881 /* Cdecl functions override -mrtd, and never pop the stack. */
1882 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype
))) {
1884 /* Stdcall and fastcall functions will pop the stack if not
1886 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype
))
1887 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype
)))
1891 && (TYPE_ARG_TYPES (funtype
) == NULL_TREE
1892 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype
)))
1893 == void_type_node
)))
1897 /* Lose any fake structure return argument if it is passed on the stack. */
1898 if (aggregate_value_p (TREE_TYPE (funtype
), fundecl
)
1900 && !KEEP_AGGREGATE_RETURN_POINTER
)
1902 int nregs
= ix86_function_regparm (funtype
, fundecl
);
1905 return GET_MODE_SIZE (Pmode
);
1911 /* Argument support functions. */
1913 /* Return true when register may be used to pass function parameters. */
1915 ix86_function_arg_regno_p (int regno
)
1919 return (regno
< REGPARM_MAX
1920 || (TARGET_SSE
&& SSE_REGNO_P (regno
) && !fixed_regs
[regno
]));
1921 if (SSE_REGNO_P (regno
) && TARGET_SSE
)
1923 /* RAX is used as hidden argument to va_arg functions. */
1926 for (i
= 0; i
< REGPARM_MAX
; i
++)
1927 if (regno
== x86_64_int_parameter_registers
[i
])
1932 /* Return if we do not know how to pass TYPE solely in registers. */
1935 ix86_must_pass_in_stack (enum machine_mode mode
, tree type
)
1937 if (must_pass_in_stack_var_size_or_pad (mode
, type
))
1940 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
1941 The layout_type routine is crafty and tries to trick us into passing
1942 currently unsupported vector types on the stack by using TImode. */
1943 return (!TARGET_64BIT
&& mode
== TImode
1944 && type
&& TREE_CODE (type
) != VECTOR_TYPE
);
1947 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1948 for a call to a function whose data type is FNTYPE.
1949 For a library call, FNTYPE is 0. */
1952 init_cumulative_args (CUMULATIVE_ARGS
*cum
, /* Argument info to initialize */
1953 tree fntype
, /* tree ptr for function decl */
1954 rtx libname
, /* SYMBOL_REF of library name or 0 */
1957 static CUMULATIVE_ARGS zero_cum
;
1958 tree param
, next_param
;
1960 if (TARGET_DEBUG_ARG
)
1962 fprintf (stderr
, "\ninit_cumulative_args (");
1964 fprintf (stderr
, "fntype code = %s, ret code = %s",
1965 tree_code_name
[(int) TREE_CODE (fntype
)],
1966 tree_code_name
[(int) TREE_CODE (TREE_TYPE (fntype
))]);
1968 fprintf (stderr
, "no fntype");
1971 fprintf (stderr
, ", libname = %s", XSTR (libname
, 0));
1976 /* Set up the number of registers to use for passing arguments. */
1978 cum
->nregs
= ix86_function_regparm (fntype
, fndecl
);
1980 cum
->nregs
= ix86_regparm
;
1982 cum
->sse_nregs
= SSE_REGPARM_MAX
;
1984 cum
->mmx_nregs
= MMX_REGPARM_MAX
;
1985 cum
->warn_sse
= true;
1986 cum
->warn_mmx
= true;
1987 cum
->maybe_vaarg
= false;
1989 /* Use ecx and edx registers if function has fastcall attribute */
1990 if (fntype
&& !TARGET_64BIT
)
1992 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype
)))
1999 /* Determine if this function has variable arguments. This is
2000 indicated by the last argument being 'void_type_mode' if there
2001 are no variable arguments. If there are variable arguments, then
2002 we won't pass anything in registers in 32-bit mode. */
2004 if (cum
->nregs
|| cum
->mmx_nregs
|| cum
->sse_nregs
)
2006 for (param
= (fntype
) ? TYPE_ARG_TYPES (fntype
) : 0;
2007 param
!= 0; param
= next_param
)
2009 next_param
= TREE_CHAIN (param
);
2010 if (next_param
== 0 && TREE_VALUE (param
) != void_type_node
)
2021 cum
->maybe_vaarg
= true;
2025 if ((!fntype
&& !libname
)
2026 || (fntype
&& !TYPE_ARG_TYPES (fntype
)))
2027 cum
->maybe_vaarg
= 1;
2029 if (TARGET_DEBUG_ARG
)
2030 fprintf (stderr
, ", nregs=%d )\n", cum
->nregs
);
2035 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2036 But in the case of vector types, it is some vector mode.
2038 When we have only some of our vector isa extensions enabled, then there
2039 are some modes for which vector_mode_supported_p is false. For these
2040 modes, the generic vector support in gcc will choose some non-vector mode
2041 in order to implement the type. By computing the natural mode, we'll
2042 select the proper ABI location for the operand and not depend on whatever
2043 the middle-end decides to do with these vector types. */
2045 static enum machine_mode
2046 type_natural_mode (tree type
)
2048 enum machine_mode mode
= TYPE_MODE (type
);
2050 if (TREE_CODE (type
) == VECTOR_TYPE
&& !VECTOR_MODE_P (mode
))
2052 HOST_WIDE_INT size
= int_size_in_bytes (type
);
2053 if ((size
== 8 || size
== 16)
2054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2055 && TYPE_VECTOR_SUBPARTS (type
) > 1)
2057 enum machine_mode innermode
= TYPE_MODE (TREE_TYPE (type
));
2059 if (TREE_CODE (TREE_TYPE (type
)) == REAL_TYPE
)
2060 mode
= MIN_MODE_VECTOR_FLOAT
;
2062 mode
= MIN_MODE_VECTOR_INT
;
2064 /* Get the mode which has this inner mode and number of units. */
2065 for (; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
2066 if (GET_MODE_NUNITS (mode
) == TYPE_VECTOR_SUBPARTS (type
)
2067 && GET_MODE_INNER (mode
) == innermode
)
2077 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2078 this may not agree with the mode that the type system has chosen for the
2079 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2080 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2083 gen_reg_or_parallel (enum machine_mode mode
, enum machine_mode orig_mode
,
2088 if (orig_mode
!= BLKmode
)
2089 tmp
= gen_rtx_REG (orig_mode
, regno
);
2092 tmp
= gen_rtx_REG (mode
, regno
);
2093 tmp
= gen_rtx_EXPR_LIST (VOIDmode
, tmp
, const0_rtx
);
2094 tmp
= gen_rtx_PARALLEL (orig_mode
, gen_rtvec (1, tmp
));
2100 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2101 of this code is to classify each 8bytes of incoming argument by the register
2102 class and assign registers accordingly. */
2104 /* Return the union class of CLASS1 and CLASS2.
2105 See the x86-64 PS ABI for details. */
2107 static enum x86_64_reg_class
2108 merge_classes (enum x86_64_reg_class class1
, enum x86_64_reg_class class2
)
2110 /* Rule #1: If both classes are equal, this is the resulting class. */
2111 if (class1
== class2
)
2114 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2116 if (class1
== X86_64_NO_CLASS
)
2118 if (class2
== X86_64_NO_CLASS
)
2121 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2122 if (class1
== X86_64_MEMORY_CLASS
|| class2
== X86_64_MEMORY_CLASS
)
2123 return X86_64_MEMORY_CLASS
;
2125 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2126 if ((class1
== X86_64_INTEGERSI_CLASS
&& class2
== X86_64_SSESF_CLASS
)
2127 || (class2
== X86_64_INTEGERSI_CLASS
&& class1
== X86_64_SSESF_CLASS
))
2128 return X86_64_INTEGERSI_CLASS
;
2129 if (class1
== X86_64_INTEGER_CLASS
|| class1
== X86_64_INTEGERSI_CLASS
2130 || class2
== X86_64_INTEGER_CLASS
|| class2
== X86_64_INTEGERSI_CLASS
)
2131 return X86_64_INTEGER_CLASS
;
2133 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2135 if (class1
== X86_64_X87_CLASS
2136 || class1
== X86_64_X87UP_CLASS
2137 || class1
== X86_64_COMPLEX_X87_CLASS
2138 || class2
== X86_64_X87_CLASS
2139 || class2
== X86_64_X87UP_CLASS
2140 || class2
== X86_64_COMPLEX_X87_CLASS
)
2141 return X86_64_MEMORY_CLASS
;
2143 /* Rule #6: Otherwise class SSE is used. */
2144 return X86_64_SSE_CLASS
;
2147 /* Classify the argument of type TYPE and mode MODE.
2148 CLASSES will be filled by the register class used to pass each word
2149 of the operand. The number of words is returned. In case the parameter
2150 should be passed in memory, 0 is returned. As a special case for zero
2151 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2153 BIT_OFFSET is used internally for handling records and specifies offset
2154 of the offset in bits modulo 256 to avoid overflow cases.
2156 See the x86-64 PS ABI for details.
2160 classify_argument (enum machine_mode mode
, tree type
,
2161 enum x86_64_reg_class classes
[MAX_CLASSES
], int bit_offset
)
2163 HOST_WIDE_INT bytes
=
2164 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
2165 int words
= (bytes
+ (bit_offset
% 64) / 8 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2167 /* Variable sized entities are always passed/returned in memory. */
2171 if (mode
!= VOIDmode
2172 && targetm
.calls
.must_pass_in_stack (mode
, type
))
2175 if (type
&& AGGREGATE_TYPE_P (type
))
2179 enum x86_64_reg_class subclasses
[MAX_CLASSES
];
2181 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2185 for (i
= 0; i
< words
; i
++)
2186 classes
[i
] = X86_64_NO_CLASS
;
2188 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2189 signalize memory class, so handle it as special case. */
2192 classes
[0] = X86_64_NO_CLASS
;
2196 /* Classify each field of record and merge classes. */
2197 if (TREE_CODE (type
) == RECORD_TYPE
)
2199 /* For classes first merge in the field of the subclasses. */
2200 if (TYPE_BINFO (type
))
2202 tree binfo
, base_binfo
;
2205 for (binfo
= TYPE_BINFO (type
), basenum
= 0;
2206 BINFO_BASE_ITERATE (binfo
, basenum
, base_binfo
); basenum
++)
2209 int offset
= tree_low_cst (BINFO_OFFSET (base_binfo
), 0) * 8;
2210 tree type
= BINFO_TYPE (base_binfo
);
2212 num
= classify_argument (TYPE_MODE (type
),
2214 (offset
+ bit_offset
) % 256);
2217 for (i
= 0; i
< num
; i
++)
2219 int pos
= (offset
+ (bit_offset
% 64)) / 8 / 8;
2221 merge_classes (subclasses
[i
], classes
[i
+ pos
]);
2225 /* And now merge the fields of structure. */
2226 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
2228 if (TREE_CODE (field
) == FIELD_DECL
)
2232 /* Bitfields are always classified as integer. Handle them
2233 early, since later code would consider them to be
2234 misaligned integers. */
2235 if (DECL_BIT_FIELD (field
))
2237 for (i
= int_bit_position (field
) / 8 / 8;
2238 i
< (int_bit_position (field
)
2239 + tree_low_cst (DECL_SIZE (field
), 0)
2242 merge_classes (X86_64_INTEGER_CLASS
,
2247 num
= classify_argument (TYPE_MODE (TREE_TYPE (field
)),
2248 TREE_TYPE (field
), subclasses
,
2249 (int_bit_position (field
)
2250 + bit_offset
) % 256);
2253 for (i
= 0; i
< num
; i
++)
2256 (int_bit_position (field
) + (bit_offset
% 64)) / 8 / 8;
2258 merge_classes (subclasses
[i
], classes
[i
+ pos
]);
2264 /* Arrays are handled as small records. */
2265 else if (TREE_CODE (type
) == ARRAY_TYPE
)
2268 num
= classify_argument (TYPE_MODE (TREE_TYPE (type
)),
2269 TREE_TYPE (type
), subclasses
, bit_offset
);
2273 /* The partial classes are now full classes. */
2274 if (subclasses
[0] == X86_64_SSESF_CLASS
&& bytes
!= 4)
2275 subclasses
[0] = X86_64_SSE_CLASS
;
2276 if (subclasses
[0] == X86_64_INTEGERSI_CLASS
&& bytes
!= 4)
2277 subclasses
[0] = X86_64_INTEGER_CLASS
;
2279 for (i
= 0; i
< words
; i
++)
2280 classes
[i
] = subclasses
[i
% num
];
2282 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2283 else if (TREE_CODE (type
) == UNION_TYPE
2284 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
2286 /* For classes first merge in the field of the subclasses. */
2287 if (TYPE_BINFO (type
))
2289 tree binfo
, base_binfo
;
2292 for (binfo
= TYPE_BINFO (type
), basenum
= 0;
2293 BINFO_BASE_ITERATE (binfo
, basenum
, base_binfo
); basenum
++)
2296 int offset
= tree_low_cst (BINFO_OFFSET (base_binfo
), 0) * 8;
2297 tree type
= BINFO_TYPE (base_binfo
);
2299 num
= classify_argument (TYPE_MODE (type
),
2301 (offset
+ (bit_offset
% 64)) % 256);
2304 for (i
= 0; i
< num
; i
++)
2306 int pos
= (offset
+ (bit_offset
% 64)) / 8 / 8;
2308 merge_classes (subclasses
[i
], classes
[i
+ pos
]);
2312 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
2314 if (TREE_CODE (field
) == FIELD_DECL
)
2317 num
= classify_argument (TYPE_MODE (TREE_TYPE (field
)),
2318 TREE_TYPE (field
), subclasses
,
2322 for (i
= 0; i
< num
; i
++)
2323 classes
[i
] = merge_classes (subclasses
[i
], classes
[i
]);
2330 /* Final merger cleanup. */
2331 for (i
= 0; i
< words
; i
++)
2333 /* If one class is MEMORY, everything should be passed in
2335 if (classes
[i
] == X86_64_MEMORY_CLASS
)
2338 /* The X86_64_SSEUP_CLASS should be always preceded by
2339 X86_64_SSE_CLASS. */
2340 if (classes
[i
] == X86_64_SSEUP_CLASS
2341 && (i
== 0 || classes
[i
- 1] != X86_64_SSE_CLASS
))
2342 classes
[i
] = X86_64_SSE_CLASS
;
2344 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2345 if (classes
[i
] == X86_64_X87UP_CLASS
2346 && (i
== 0 || classes
[i
- 1] != X86_64_X87_CLASS
))
2347 classes
[i
] = X86_64_SSE_CLASS
;
2352 /* Compute alignment needed. We align all types to natural boundaries with
2353 exception of XFmode that is aligned to 64bits. */
2354 if (mode
!= VOIDmode
&& mode
!= BLKmode
)
2356 int mode_alignment
= GET_MODE_BITSIZE (mode
);
2359 mode_alignment
= 128;
2360 else if (mode
== XCmode
)
2361 mode_alignment
= 256;
2362 if (COMPLEX_MODE_P (mode
))
2363 mode_alignment
/= 2;
2364 /* Misaligned fields are always returned in memory. */
2365 if (bit_offset
% mode_alignment
)
2369 /* for V1xx modes, just use the base mode */
2370 if (VECTOR_MODE_P (mode
)
2371 && GET_MODE_SIZE (GET_MODE_INNER (mode
)) == bytes
)
2372 mode
= GET_MODE_INNER (mode
);
2374 /* Classification of atomic types. */
2384 if (bit_offset
+ GET_MODE_BITSIZE (mode
) <= 32)
2385 classes
[0] = X86_64_INTEGERSI_CLASS
;
2387 classes
[0] = X86_64_INTEGER_CLASS
;
2391 classes
[0] = classes
[1] = X86_64_INTEGER_CLASS
;
2396 if (!(bit_offset
% 64))
2397 classes
[0] = X86_64_SSESF_CLASS
;
2399 classes
[0] = X86_64_SSE_CLASS
;
2402 classes
[0] = X86_64_SSEDF_CLASS
;
2405 classes
[0] = X86_64_X87_CLASS
;
2406 classes
[1] = X86_64_X87UP_CLASS
;
2409 classes
[0] = X86_64_SSE_CLASS
;
2410 classes
[1] = X86_64_SSEUP_CLASS
;
2413 classes
[0] = X86_64_SSE_CLASS
;
2416 classes
[0] = X86_64_SSEDF_CLASS
;
2417 classes
[1] = X86_64_SSEDF_CLASS
;
2420 classes
[0] = X86_64_COMPLEX_X87_CLASS
;
2423 /* This modes is larger than 16 bytes. */
2431 classes
[0] = X86_64_SSE_CLASS
;
2432 classes
[1] = X86_64_SSEUP_CLASS
;
2438 classes
[0] = X86_64_SSE_CLASS
;
2444 if (VECTOR_MODE_P (mode
))
2448 if (GET_MODE_CLASS (GET_MODE_INNER (mode
)) == MODE_INT
)
2450 if (bit_offset
+ GET_MODE_BITSIZE (mode
) <= 32)
2451 classes
[0] = X86_64_INTEGERSI_CLASS
;
2453 classes
[0] = X86_64_INTEGER_CLASS
;
2454 classes
[1] = X86_64_INTEGER_CLASS
;
2455 return 1 + (bytes
> 8);
2462 /* Examine the argument and return set number of register required in each
2463 class. Return 0 iff parameter should be passed in memory. */
2465 examine_argument (enum machine_mode mode
, tree type
, int in_return
,
2466 int *int_nregs
, int *sse_nregs
)
2468 enum x86_64_reg_class
class[MAX_CLASSES
];
2469 int n
= classify_argument (mode
, type
, class, 0);
2475 for (n
--; n
>= 0; n
--)
2478 case X86_64_INTEGER_CLASS
:
2479 case X86_64_INTEGERSI_CLASS
:
2482 case X86_64_SSE_CLASS
:
2483 case X86_64_SSESF_CLASS
:
2484 case X86_64_SSEDF_CLASS
:
2487 case X86_64_NO_CLASS
:
2488 case X86_64_SSEUP_CLASS
:
2490 case X86_64_X87_CLASS
:
2491 case X86_64_X87UP_CLASS
:
2495 case X86_64_COMPLEX_X87_CLASS
:
2496 return in_return
? 2 : 0;
2497 case X86_64_MEMORY_CLASS
:
2503 /* Construct container for the argument used by GCC interface. See
2504 FUNCTION_ARG for the detailed description. */
2507 construct_container (enum machine_mode mode
, enum machine_mode orig_mode
,
2508 tree type
, int in_return
, int nintregs
, int nsseregs
,
2509 const int *intreg
, int sse_regno
)
2511 enum machine_mode tmpmode
;
2513 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
2514 enum x86_64_reg_class
class[MAX_CLASSES
];
2518 int needed_sseregs
, needed_intregs
;
2519 rtx exp
[MAX_CLASSES
];
2522 n
= classify_argument (mode
, type
, class, 0);
2523 if (TARGET_DEBUG_ARG
)
2526 fprintf (stderr
, "Memory class\n");
2529 fprintf (stderr
, "Classes:");
2530 for (i
= 0; i
< n
; i
++)
2532 fprintf (stderr
, " %s", x86_64_reg_class_name
[class[i
]]);
2534 fprintf (stderr
, "\n");
2539 if (!examine_argument (mode
, type
, in_return
, &needed_intregs
,
2542 if (needed_intregs
> nintregs
|| needed_sseregs
> nsseregs
)
2545 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2546 some less clueful developer tries to use floating-point anyway. */
2547 if (needed_sseregs
&& !TARGET_SSE
)
2549 static bool issued_error
;
2552 issued_error
= true;
2554 error ("SSE register return with SSE disabled");
2556 error ("SSE register argument with SSE disabled");
2561 /* First construct simple cases. Avoid SCmode, since we want to use
2562 single register to pass this type. */
2563 if (n
== 1 && mode
!= SCmode
)
2566 case X86_64_INTEGER_CLASS
:
2567 case X86_64_INTEGERSI_CLASS
:
2568 return gen_rtx_REG (mode
, intreg
[0]);
2569 case X86_64_SSE_CLASS
:
2570 case X86_64_SSESF_CLASS
:
2571 case X86_64_SSEDF_CLASS
:
2572 return gen_reg_or_parallel (mode
, orig_mode
, SSE_REGNO (sse_regno
));
2573 case X86_64_X87_CLASS
:
2574 case X86_64_COMPLEX_X87_CLASS
:
2575 return gen_rtx_REG (mode
, FIRST_STACK_REG
);
2576 case X86_64_NO_CLASS
:
2577 /* Zero sized array, struct or class. */
2582 if (n
== 2 && class[0] == X86_64_SSE_CLASS
&& class[1] == X86_64_SSEUP_CLASS
2584 return gen_rtx_REG (mode
, SSE_REGNO (sse_regno
));
2586 && class[0] == X86_64_X87_CLASS
&& class[1] == X86_64_X87UP_CLASS
)
2587 return gen_rtx_REG (XFmode
, FIRST_STACK_REG
);
2588 if (n
== 2 && class[0] == X86_64_INTEGER_CLASS
2589 && class[1] == X86_64_INTEGER_CLASS
2590 && (mode
== CDImode
|| mode
== TImode
|| mode
== TFmode
)
2591 && intreg
[0] + 1 == intreg
[1])
2592 return gen_rtx_REG (mode
, intreg
[0]);
2594 /* Otherwise figure out the entries of the PARALLEL. */
2595 for (i
= 0; i
< n
; i
++)
2599 case X86_64_NO_CLASS
:
2601 case X86_64_INTEGER_CLASS
:
2602 case X86_64_INTEGERSI_CLASS
:
2603 /* Merge TImodes on aligned occasions here too. */
2604 if (i
* 8 + 8 > bytes
)
2605 tmpmode
= mode_for_size ((bytes
- i
* 8) * BITS_PER_UNIT
, MODE_INT
, 0);
2606 else if (class[i
] == X86_64_INTEGERSI_CLASS
)
2610 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2611 if (tmpmode
== BLKmode
)
2613 exp
[nexps
++] = gen_rtx_EXPR_LIST (VOIDmode
,
2614 gen_rtx_REG (tmpmode
, *intreg
),
2618 case X86_64_SSESF_CLASS
:
2619 exp
[nexps
++] = gen_rtx_EXPR_LIST (VOIDmode
,
2620 gen_rtx_REG (SFmode
,
2621 SSE_REGNO (sse_regno
)),
2625 case X86_64_SSEDF_CLASS
:
2626 exp
[nexps
++] = gen_rtx_EXPR_LIST (VOIDmode
,
2627 gen_rtx_REG (DFmode
,
2628 SSE_REGNO (sse_regno
)),
2632 case X86_64_SSE_CLASS
:
2633 if (i
< n
- 1 && class[i
+ 1] == X86_64_SSEUP_CLASS
)
2637 exp
[nexps
++] = gen_rtx_EXPR_LIST (VOIDmode
,
2638 gen_rtx_REG (tmpmode
,
2639 SSE_REGNO (sse_regno
)),
2641 if (tmpmode
== TImode
)
2649 ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nexps
));
2650 for (i
= 0; i
< nexps
; i
++)
2651 XVECEXP (ret
, 0, i
) = exp
[i
];
2655 /* Update the data in CUM to advance over an argument
2656 of mode MODE and data type TYPE.
2657 (TYPE is null for libcalls where that information may not be available.) */
2660 function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
2661 tree type
, int named
)
2664 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
2665 int words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2668 mode
= type_natural_mode (type
);
2670 if (TARGET_DEBUG_ARG
)
2671 fprintf (stderr
, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
2672 "mode=%s, named=%d)\n\n",
2673 words
, cum
->words
, cum
->nregs
, cum
->sse_nregs
,
2674 GET_MODE_NAME (mode
), named
);
2678 int int_nregs
, sse_nregs
;
2679 if (!examine_argument (mode
, type
, 0, &int_nregs
, &sse_nregs
))
2680 cum
->words
+= words
;
2681 else if (sse_nregs
<= cum
->sse_nregs
&& int_nregs
<= cum
->nregs
)
2683 cum
->nregs
-= int_nregs
;
2684 cum
->sse_nregs
-= sse_nregs
;
2685 cum
->regno
+= int_nregs
;
2686 cum
->sse_regno
+= sse_nregs
;
2689 cum
->words
+= words
;
2707 cum
->words
+= words
;
2708 cum
->nregs
-= words
;
2709 cum
->regno
+= words
;
2711 if (cum
->nregs
<= 0)
2725 if (!type
|| !AGGREGATE_TYPE_P (type
))
2727 cum
->sse_words
+= words
;
2728 cum
->sse_nregs
-= 1;
2729 cum
->sse_regno
+= 1;
2730 if (cum
->sse_nregs
<= 0)
2742 if (!type
|| !AGGREGATE_TYPE_P (type
))
2744 cum
->mmx_words
+= words
;
2745 cum
->mmx_nregs
-= 1;
2746 cum
->mmx_regno
+= 1;
2747 if (cum
->mmx_nregs
<= 0)
2758 /* Define where to put the arguments to a function.
2759 Value is zero to push the argument on the stack,
2760 or a hard register in which to store the argument.
2762 MODE is the argument's machine mode.
2763 TYPE is the data type of the argument (as a tree).
2764 This is null for libcalls where that information may
2766 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2767 the preceding args and about the function being called.
2768 NAMED is nonzero if this argument is a named parameter
2769 (otherwise it is an extra parameter matching an ellipsis). */
2772 function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode orig_mode
,
2773 tree type
, int named
)
2775 enum machine_mode mode
= orig_mode
;
2778 (mode
== BLKmode
) ? int_size_in_bytes (type
) : (int) GET_MODE_SIZE (mode
);
2779 int words
= (bytes
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
2780 static bool warnedsse
, warnedmmx
;
2782 /* To simplify the code below, represent vector types with a vector mode
2783 even if MMX/SSE are not active. */
2784 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
2785 mode
= type_natural_mode (type
);
2787 /* Handle a hidden AL argument containing number of registers for varargs
2788 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2790 if (mode
== VOIDmode
)
2793 return GEN_INT (cum
->maybe_vaarg
2794 ? (cum
->sse_nregs
< 0
2802 ret
= construct_container (mode
, orig_mode
, type
, 0, cum
->nregs
,
2804 &x86_64_int_parameter_registers
[cum
->regno
],
2809 /* For now, pass fp/complex values on the stack. */
2821 if (words
<= cum
->nregs
)
2823 int regno
= cum
->regno
;
2825 /* Fastcall allocates the first two DWORD (SImode) or
2826 smaller arguments to ECX and EDX. */
2829 if (mode
== BLKmode
|| mode
== DImode
)
2832 /* ECX not EAX is the first allocated register. */
2836 ret
= gen_rtx_REG (mode
, regno
);
2846 if (!type
|| !AGGREGATE_TYPE_P (type
))
2848 if (!TARGET_SSE
&& !warnedsse
&& cum
->warn_sse
)
2851 warning ("SSE vector argument without SSE enabled "
2855 ret
= gen_reg_or_parallel (mode
, orig_mode
,
2856 cum
->sse_regno
+ FIRST_SSE_REG
);
2863 if (!type
|| !AGGREGATE_TYPE_P (type
))
2865 if (!TARGET_MMX
&& !warnedmmx
&& cum
->warn_mmx
)
2868 warning ("MMX vector argument without MMX enabled "
2872 ret
= gen_reg_or_parallel (mode
, orig_mode
,
2873 cum
->mmx_regno
+ FIRST_MMX_REG
);
2878 if (TARGET_DEBUG_ARG
)
2881 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2882 words
, cum
->words
, cum
->nregs
, GET_MODE_NAME (mode
), named
);
2885 print_simple_rtl (stderr
, ret
);
2887 fprintf (stderr
, ", stack");
2889 fprintf (stderr
, " )\n");
2895 /* A C expression that indicates when an argument must be passed by
2896 reference. If nonzero for an argument, a copy of that argument is
2897 made in memory and a pointer to the argument is passed instead of
2898 the argument itself. The pointer is passed in whatever way is
2899 appropriate for passing a pointer to that type. */
2902 ix86_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
2903 enum machine_mode mode ATTRIBUTE_UNUSED
,
2904 tree type
, bool named ATTRIBUTE_UNUSED
)
2909 if (type
&& int_size_in_bytes (type
) == -1)
2911 if (TARGET_DEBUG_ARG
)
2912 fprintf (stderr
, "function_arg_pass_by_reference\n");
2919 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2920 ABI. Only called if TARGET_SSE. */
2922 contains_128bit_aligned_vector_p (tree type
)
2924 enum machine_mode mode
= TYPE_MODE (type
);
2925 if (SSE_REG_MODE_P (mode
)
2926 && (!TYPE_USER_ALIGN (type
) || TYPE_ALIGN (type
) > 128))
2928 if (TYPE_ALIGN (type
) < 128)
2931 if (AGGREGATE_TYPE_P (type
))
2933 /* Walk the aggregates recursively. */
2934 if (TREE_CODE (type
) == RECORD_TYPE
2935 || TREE_CODE (type
) == UNION_TYPE
2936 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
2940 if (TYPE_BINFO (type
))
2942 tree binfo
, base_binfo
;
2945 for (binfo
= TYPE_BINFO (type
), i
= 0;
2946 BINFO_BASE_ITERATE (binfo
, i
, base_binfo
); i
++)
2947 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo
)))
2950 /* And now merge the fields of structure. */
2951 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
2953 if (TREE_CODE (field
) == FIELD_DECL
2954 && contains_128bit_aligned_vector_p (TREE_TYPE (field
)))
2958 /* Just for use if some languages passes arrays by value. */
2959 else if (TREE_CODE (type
) == ARRAY_TYPE
)
2961 if (contains_128bit_aligned_vector_p (TREE_TYPE (type
)))
2970 /* Gives the alignment boundary, in bits, of an argument with the
2971 specified mode and type. */
2974 ix86_function_arg_boundary (enum machine_mode mode
, tree type
)
2978 align
= TYPE_ALIGN (type
);
2980 align
= GET_MODE_ALIGNMENT (mode
);
2981 if (align
< PARM_BOUNDARY
)
2982 align
= PARM_BOUNDARY
;
2985 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2986 make an exception for SSE modes since these require 128bit
2989 The handling here differs from field_alignment. ICC aligns MMX
2990 arguments to 4 byte boundaries, while structure fields are aligned
2991 to 8 byte boundaries. */
2993 align
= PARM_BOUNDARY
;
2996 if (!SSE_REG_MODE_P (mode
))
2997 align
= PARM_BOUNDARY
;
3001 if (!contains_128bit_aligned_vector_p (type
))
3002 align
= PARM_BOUNDARY
;
3010 /* Return true if N is a possible register number of function value. */
3012 ix86_function_value_regno_p (int regno
)
3016 return ((regno
) == 0
3017 || ((regno
) == FIRST_FLOAT_REG
&& TARGET_FLOAT_RETURNS_IN_80387
)
3018 || ((regno
) == FIRST_SSE_REG
&& TARGET_SSE
));
3020 return ((regno
) == 0 || (regno
) == FIRST_FLOAT_REG
3021 || ((regno
) == FIRST_SSE_REG
&& TARGET_SSE
)
3022 || ((regno
) == FIRST_FLOAT_REG
&& TARGET_FLOAT_RETURNS_IN_80387
));
3025 /* Define how to find the value returned by a function.
3026 VALTYPE is the data type of the value (as a tree).
3027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3028 otherwise, FUNC is 0. */
3030 ix86_function_value (tree valtype
)
3032 enum machine_mode natmode
= type_natural_mode (valtype
);
3036 rtx ret
= construct_container (natmode
, TYPE_MODE (valtype
), valtype
,
3037 1, REGPARM_MAX
, SSE_REGPARM_MAX
,
3038 x86_64_int_return_registers
, 0);
3039 /* For zero sized structures, construct_container return NULL, but we
3040 need to keep rest of compiler happy by returning meaningful value. */
3042 ret
= gen_rtx_REG (TYPE_MODE (valtype
), 0);
3046 return gen_rtx_REG (TYPE_MODE (valtype
), ix86_value_regno (natmode
));
3049 /* Return false iff type is returned in memory. */
3051 ix86_return_in_memory (tree type
)
3053 int needed_intregs
, needed_sseregs
, size
;
3054 enum machine_mode mode
= type_natural_mode (type
);
3057 return !examine_argument (mode
, type
, 1, &needed_intregs
, &needed_sseregs
);
3059 if (mode
== BLKmode
)
3062 size
= int_size_in_bytes (type
);
3064 if (MS_AGGREGATE_RETURN
&& AGGREGATE_TYPE_P (type
) && size
<= 8)
3067 if (VECTOR_MODE_P (mode
) || mode
== TImode
)
3069 /* User-created vectors small enough to fit in EAX. */
3073 /* MMX/3dNow values are returned on the stack, since we've
3074 got to EMMS/FEMMS before returning. */
3078 /* SSE values are returned in XMM0, except when it doesn't exist. */
3080 return (TARGET_SSE
? 0 : 1);
3091 /* When returning SSE vector types, we have a choice of either
3092 (1) being abi incompatible with a -march switch, or
3093 (2) generating an error.
3094 Given no good solution, I think the safest thing is one warning.
3095 The user won't be able to use -Werror, but....
3097 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3098 called in response to actually generating a caller or callee that
3099 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3100 via aggregate_value_p for general type probing from tree-ssa. */
3103 ix86_struct_value_rtx (tree type
, int incoming ATTRIBUTE_UNUSED
)
3107 if (!TARGET_SSE
&& type
&& !warned
)
3109 /* Look at the return type of the function, not the function type. */
3110 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (type
));
3113 || (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
3116 warning ("SSE vector return without SSE enabled changes the ABI");
3123 /* Define how to find the value returned by a library function
3124 assuming the value has mode MODE. */
3126 ix86_libcall_value (enum machine_mode mode
)
3137 return gen_rtx_REG (mode
, FIRST_SSE_REG
);
3140 return gen_rtx_REG (mode
, FIRST_FLOAT_REG
);
3144 return gen_rtx_REG (mode
, 0);
3148 return gen_rtx_REG (mode
, ix86_value_regno (mode
));
3151 /* Given a mode, return the register to use for a return value. */
3154 ix86_value_regno (enum machine_mode mode
)
3156 /* Floating point return values in %st(0). */
3157 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& TARGET_FLOAT_RETURNS_IN_80387
)
3158 return FIRST_FLOAT_REG
;
3159 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3160 we prevent this case when sse is not available. */
3161 if (mode
== TImode
|| (VECTOR_MODE_P (mode
) && GET_MODE_SIZE (mode
) == 16))
3162 return FIRST_SSE_REG
;
3163 /* Everything else in %eax. */
3167 /* Create the va_list data type. */
3170 ix86_build_builtin_va_list (void)
3172 tree f_gpr
, f_fpr
, f_ovf
, f_sav
, record
, type_decl
;
3174 /* For i386 we use plain pointer to argument area. */
3176 return build_pointer_type (char_type_node
);
3178 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
3179 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
3181 f_gpr
= build_decl (FIELD_DECL
, get_identifier ("gp_offset"),
3182 unsigned_type_node
);
3183 f_fpr
= build_decl (FIELD_DECL
, get_identifier ("fp_offset"),
3184 unsigned_type_node
);
3185 f_ovf
= build_decl (FIELD_DECL
, get_identifier ("overflow_arg_area"),
3187 f_sav
= build_decl (FIELD_DECL
, get_identifier ("reg_save_area"),
3190 DECL_FIELD_CONTEXT (f_gpr
) = record
;
3191 DECL_FIELD_CONTEXT (f_fpr
) = record
;
3192 DECL_FIELD_CONTEXT (f_ovf
) = record
;
3193 DECL_FIELD_CONTEXT (f_sav
) = record
;
3195 TREE_CHAIN (record
) = type_decl
;
3196 TYPE_NAME (record
) = type_decl
;
3197 TYPE_FIELDS (record
) = f_gpr
;
3198 TREE_CHAIN (f_gpr
) = f_fpr
;
3199 TREE_CHAIN (f_fpr
) = f_ovf
;
3200 TREE_CHAIN (f_ovf
) = f_sav
;
3202 layout_type (record
);
3204 /* The correct type is an array type of one element. */
3205 return build_array_type (record
, build_index_type (size_zero_node
));
3208 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3211 ix86_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
3212 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
3215 CUMULATIVE_ARGS next_cum
;
3216 rtx save_area
= NULL_RTX
, mem
;
3229 /* Indicate to allocate space on the stack for varargs save area. */
3230 ix86_save_varrargs_registers
= 1;
3232 cfun
->stack_alignment_needed
= 128;
3234 fntype
= TREE_TYPE (current_function_decl
);
3235 stdarg_p
= (TYPE_ARG_TYPES (fntype
) != 0
3236 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype
)))
3237 != void_type_node
));
3239 /* For varargs, we do not want to skip the dummy va_dcl argument.
3240 For stdargs, we do want to skip the last named argument. */
3243 function_arg_advance (&next_cum
, mode
, type
, 1);
3246 save_area
= frame_pointer_rtx
;
3248 set
= get_varargs_alias_set ();
3250 for (i
= next_cum
.regno
; i
< ix86_regparm
; i
++)
3252 mem
= gen_rtx_MEM (Pmode
,
3253 plus_constant (save_area
, i
* UNITS_PER_WORD
));
3254 set_mem_alias_set (mem
, set
);
3255 emit_move_insn (mem
, gen_rtx_REG (Pmode
,
3256 x86_64_int_parameter_registers
[i
]));
3259 if (next_cum
.sse_nregs
)
3261 /* Now emit code to save SSE registers. The AX parameter contains number
3262 of SSE parameter registers used to call this function. We use
3263 sse_prologue_save insn template that produces computed jump across
3264 SSE saves. We need some preparation work to get this working. */
3266 label
= gen_label_rtx ();
3267 label_ref
= gen_rtx_LABEL_REF (Pmode
, label
);
3269 /* Compute address to jump to :
3270 label - 5*eax + nnamed_sse_arguments*5 */
3271 tmp_reg
= gen_reg_rtx (Pmode
);
3272 nsse_reg
= gen_reg_rtx (Pmode
);
3273 emit_insn (gen_zero_extendqidi2 (nsse_reg
, gen_rtx_REG (QImode
, 0)));
3274 emit_insn (gen_rtx_SET (VOIDmode
, tmp_reg
,
3275 gen_rtx_MULT (Pmode
, nsse_reg
,
3277 if (next_cum
.sse_regno
)
3280 gen_rtx_CONST (DImode
,
3281 gen_rtx_PLUS (DImode
,
3283 GEN_INT (next_cum
.sse_regno
* 4))));
3285 emit_move_insn (nsse_reg
, label_ref
);
3286 emit_insn (gen_subdi3 (nsse_reg
, nsse_reg
, tmp_reg
));
3288 /* Compute address of memory block we save into. We always use pointer
3289 pointing 127 bytes after first byte to store - this is needed to keep
3290 instruction size limited by 4 bytes. */
3291 tmp_reg
= gen_reg_rtx (Pmode
);
3292 emit_insn (gen_rtx_SET (VOIDmode
, tmp_reg
,
3293 plus_constant (save_area
,
3294 8 * REGPARM_MAX
+ 127)));
3295 mem
= gen_rtx_MEM (BLKmode
, plus_constant (tmp_reg
, -127));
3296 set_mem_alias_set (mem
, set
);
3297 set_mem_align (mem
, BITS_PER_WORD
);
3299 /* And finally do the dirty job! */
3300 emit_insn (gen_sse_prologue_save (mem
, nsse_reg
,
3301 GEN_INT (next_cum
.sse_regno
), label
));
3306 /* Implement va_start. */
3309 ix86_va_start (tree valist
, rtx nextarg
)
3311 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
3312 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
3313 tree gpr
, fpr
, ovf
, sav
, t
;
3315 /* Only 64bit target needs something special. */
3318 std_expand_builtin_va_start (valist
, nextarg
);
3322 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
3323 f_fpr
= TREE_CHAIN (f_gpr
);
3324 f_ovf
= TREE_CHAIN (f_fpr
);
3325 f_sav
= TREE_CHAIN (f_ovf
);
3327 valist
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (valist
)), valist
);
3328 gpr
= build (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
3329 fpr
= build (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
3330 ovf
= build (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
3331 sav
= build (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
3333 /* Count number of gp and fp argument registers used. */
3334 words
= current_function_args_info
.words
;
3335 n_gpr
= current_function_args_info
.regno
;
3336 n_fpr
= current_function_args_info
.sse_regno
;
3338 if (TARGET_DEBUG_ARG
)
3339 fprintf (stderr
, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3340 (int) words
, (int) n_gpr
, (int) n_fpr
);
3342 t
= build (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
3343 build_int_cst (NULL_TREE
, n_gpr
* 8));
3344 TREE_SIDE_EFFECTS (t
) = 1;
3345 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3347 t
= build (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
3348 build_int_cst (NULL_TREE
, n_fpr
* 16 + 8*REGPARM_MAX
));
3349 TREE_SIDE_EFFECTS (t
) = 1;
3350 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3352 /* Find the overflow area. */
3353 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
3355 t
= build (PLUS_EXPR
, TREE_TYPE (ovf
), t
,
3356 build_int_cst (NULL_TREE
, words
* UNITS_PER_WORD
));
3357 t
= build (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
3358 TREE_SIDE_EFFECTS (t
) = 1;
3359 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3361 /* Find the register save area.
3362 Prologue of the function save it right above stack frame. */
3363 t
= make_tree (TREE_TYPE (sav
), frame_pointer_rtx
);
3364 t
= build (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
3365 TREE_SIDE_EFFECTS (t
) = 1;
3366 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
3369 /* Implement va_arg. */
3372 ix86_gimplify_va_arg (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
3374 static const int intreg
[6] = { 0, 1, 2, 3, 4, 5 };
3375 tree f_gpr
, f_fpr
, f_ovf
, f_sav
;
3376 tree gpr
, fpr
, ovf
, sav
, t
;
3378 tree lab_false
, lab_over
= NULL_TREE
;
3383 enum machine_mode nat_mode
;
3385 /* Only 64bit target needs something special. */
3387 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
3389 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
3390 f_fpr
= TREE_CHAIN (f_gpr
);
3391 f_ovf
= TREE_CHAIN (f_fpr
);
3392 f_sav
= TREE_CHAIN (f_ovf
);
3394 valist
= build_va_arg_indirect_ref (valist
);
3395 gpr
= build (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
3396 fpr
= build (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
3397 ovf
= build (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
3398 sav
= build (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
3400 indirect_p
= pass_by_reference (NULL
, TYPE_MODE (type
), type
, false);
3402 type
= build_pointer_type (type
);
3403 size
= int_size_in_bytes (type
);
3404 rsize
= (size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
3406 nat_mode
= type_natural_mode (type
);
3407 container
= construct_container (nat_mode
, TYPE_MODE (type
), type
, 0,
3408 REGPARM_MAX
, SSE_REGPARM_MAX
, intreg
, 0);
3410 /* Pull the value out of the saved registers. */
3412 addr
= create_tmp_var (ptr_type_node
, "addr");
3413 DECL_POINTER_ALIAS_SET (addr
) = get_varargs_alias_set ();
3417 int needed_intregs
, needed_sseregs
;
3419 tree int_addr
, sse_addr
;
3421 lab_false
= create_artificial_label ();
3422 lab_over
= create_artificial_label ();
3424 examine_argument (nat_mode
, type
, 0, &needed_intregs
, &needed_sseregs
);
3426 need_temp
= (!REG_P (container
)
3427 && ((needed_intregs
&& TYPE_ALIGN (type
) > 64)
3428 || TYPE_ALIGN (type
) > 128));
3430 /* In case we are passing structure, verify that it is consecutive block
3431 on the register save area. If not we need to do moves. */
3432 if (!need_temp
&& !REG_P (container
))
3434 /* Verify that all registers are strictly consecutive */
3435 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container
, 0, 0), 0))))
3439 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
3441 rtx slot
= XVECEXP (container
, 0, i
);
3442 if (REGNO (XEXP (slot
, 0)) != FIRST_SSE_REG
+ (unsigned int) i
3443 || INTVAL (XEXP (slot
, 1)) != i
* 16)
3451 for (i
= 0; i
< XVECLEN (container
, 0) && !need_temp
; i
++)
3453 rtx slot
= XVECEXP (container
, 0, i
);
3454 if (REGNO (XEXP (slot
, 0)) != (unsigned int) i
3455 || INTVAL (XEXP (slot
, 1)) != i
* 8)
3467 int_addr
= create_tmp_var (ptr_type_node
, "int_addr");
3468 DECL_POINTER_ALIAS_SET (int_addr
) = get_varargs_alias_set ();
3469 sse_addr
= create_tmp_var (ptr_type_node
, "sse_addr");
3470 DECL_POINTER_ALIAS_SET (sse_addr
) = get_varargs_alias_set ();
3473 /* First ensure that we fit completely in registers. */
3476 t
= build_int_cst (TREE_TYPE (gpr
),
3477 (REGPARM_MAX
- needed_intregs
+ 1) * 8);
3478 t
= build2 (GE_EXPR
, boolean_type_node
, gpr
, t
);
3479 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
3480 t
= build (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
3481 gimplify_and_add (t
, pre_p
);
3485 t
= build_int_cst (TREE_TYPE (fpr
),
3486 (SSE_REGPARM_MAX
- needed_sseregs
+ 1) * 16
3488 t
= build2 (GE_EXPR
, boolean_type_node
, fpr
, t
);
3489 t2
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
3490 t
= build (COND_EXPR
, void_type_node
, t
, t2
, NULL_TREE
);
3491 gimplify_and_add (t
, pre_p
);
3494 /* Compute index to start of area used for integer regs. */
3497 /* int_addr = gpr + sav; */
3498 t
= fold_convert (ptr_type_node
, gpr
);
3499 t
= build2 (PLUS_EXPR
, ptr_type_node
, sav
, t
);
3500 t
= build2 (MODIFY_EXPR
, void_type_node
, int_addr
, t
);
3501 gimplify_and_add (t
, pre_p
);
3505 /* sse_addr = fpr + sav; */
3506 t
= fold_convert (ptr_type_node
, fpr
);
3507 t
= build2 (PLUS_EXPR
, ptr_type_node
, sav
, t
);
3508 t
= build2 (MODIFY_EXPR
, void_type_node
, sse_addr
, t
);
3509 gimplify_and_add (t
, pre_p
);
3514 tree temp
= create_tmp_var (type
, "va_arg_tmp");
3517 t
= build1 (ADDR_EXPR
, build_pointer_type (type
), temp
);
3518 t
= build2 (MODIFY_EXPR
, void_type_node
, addr
, t
);
3519 gimplify_and_add (t
, pre_p
);
3521 for (i
= 0; i
< XVECLEN (container
, 0); i
++)
3523 rtx slot
= XVECEXP (container
, 0, i
);
3524 rtx reg
= XEXP (slot
, 0);
3525 enum machine_mode mode
= GET_MODE (reg
);
3526 tree piece_type
= lang_hooks
.types
.type_for_mode (mode
, 1);
3527 tree addr_type
= build_pointer_type (piece_type
);
3530 tree dest_addr
, dest
;
3532 if (SSE_REGNO_P (REGNO (reg
)))
3534 src_addr
= sse_addr
;
3535 src_offset
= (REGNO (reg
) - FIRST_SSE_REG
) * 16;
3539 src_addr
= int_addr
;
3540 src_offset
= REGNO (reg
) * 8;
3542 src_addr
= fold_convert (addr_type
, src_addr
);
3543 src_addr
= fold (build2 (PLUS_EXPR
, addr_type
, src_addr
,
3544 size_int (src_offset
)));
3545 src
= build_va_arg_indirect_ref (src_addr
);
3547 dest_addr
= fold_convert (addr_type
, addr
);
3548 dest_addr
= fold (build2 (PLUS_EXPR
, addr_type
, dest_addr
,
3549 size_int (INTVAL (XEXP (slot
, 1)))));
3550 dest
= build_va_arg_indirect_ref (dest_addr
);
3552 t
= build2 (MODIFY_EXPR
, void_type_node
, dest
, src
);
3553 gimplify_and_add (t
, pre_p
);
3559 t
= build2 (PLUS_EXPR
, TREE_TYPE (gpr
), gpr
,
3560 build_int_cst (TREE_TYPE (gpr
), needed_intregs
* 8));
3561 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
, t
);
3562 gimplify_and_add (t
, pre_p
);
3566 t
= build2 (PLUS_EXPR
, TREE_TYPE (fpr
), fpr
,
3567 build_int_cst (TREE_TYPE (fpr
), needed_sseregs
* 16));
3568 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
, t
);
3569 gimplify_and_add (t
, pre_p
);
3572 t
= build1 (GOTO_EXPR
, void_type_node
, lab_over
);
3573 gimplify_and_add (t
, pre_p
);
3575 t
= build1 (LABEL_EXPR
, void_type_node
, lab_false
);
3576 append_to_statement_list (t
, pre_p
);
3579 /* ... otherwise out of the overflow area. */
3581 /* Care for on-stack alignment if needed. */
3582 if (FUNCTION_ARG_BOUNDARY (VOIDmode
, type
) <= 64)
3586 HOST_WIDE_INT align
= FUNCTION_ARG_BOUNDARY (VOIDmode
, type
) / 8;
3587 t
= build (PLUS_EXPR
, TREE_TYPE (ovf
), ovf
,
3588 build_int_cst (TREE_TYPE (ovf
), align
- 1));
3589 t
= build (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
3590 build_int_cst (TREE_TYPE (t
), -align
));
3592 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
3594 t2
= build2 (MODIFY_EXPR
, void_type_node
, addr
, t
);
3595 gimplify_and_add (t2
, pre_p
);
3597 t
= build2 (PLUS_EXPR
, TREE_TYPE (t
), t
,
3598 build_int_cst (TREE_TYPE (t
), rsize
* UNITS_PER_WORD
));
3599 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
3600 gimplify_and_add (t
, pre_p
);
3604 t
= build1 (LABEL_EXPR
, void_type_node
, lab_over
);
3605 append_to_statement_list (t
, pre_p
);
3608 ptrtype
= build_pointer_type (type
);
3609 addr
= fold_convert (ptrtype
, addr
);
3612 addr
= build_va_arg_indirect_ref (addr
);
3613 return build_va_arg_indirect_ref (addr
);
3616 /* Return nonzero if OPNUM's MEM should be matched
3617 in movabs* patterns. */
3620 ix86_check_movabs (rtx insn
, int opnum
)
3624 set
= PATTERN (insn
);
3625 if (GET_CODE (set
) == PARALLEL
)
3626 set
= XVECEXP (set
, 0, 0);
3627 if (GET_CODE (set
) != SET
)
3629 mem
= XEXP (set
, opnum
);
3630 while (GET_CODE (mem
) == SUBREG
)
3631 mem
= SUBREG_REG (mem
);
3632 if (GET_CODE (mem
) != MEM
)
3634 return (volatile_ok
|| !MEM_VOLATILE_P (mem
));
3637 /* Initialize the table of extra 80387 mathematical constants. */
3640 init_ext_80387_constants (void)
3642 static const char * cst
[5] =
3644 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3645 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3646 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3647 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3648 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3652 for (i
= 0; i
< 5; i
++)
3654 real_from_string (&ext_80387_constants_table
[i
], cst
[i
]);
3655 /* Ensure each constant is rounded to XFmode precision. */
3656 real_convert (&ext_80387_constants_table
[i
],
3657 XFmode
, &ext_80387_constants_table
[i
]);
3660 ext_80387_constants_init
= 1;
3663 /* Return true if the constant is something that can be loaded with
3664 a special instruction. */
3667 standard_80387_constant_p (rtx x
)
3669 if (GET_CODE (x
) != CONST_DOUBLE
|| !FLOAT_MODE_P (GET_MODE (x
)))
3672 if (x
== CONST0_RTX (GET_MODE (x
)))
3674 if (x
== CONST1_RTX (GET_MODE (x
)))
3677 /* For XFmode constants, try to find a special 80387 instruction when
3678 optimizing for size or on those CPUs that benefit from them. */
3679 if (GET_MODE (x
) == XFmode
3680 && (optimize_size
|| x86_ext_80387_constants
& TUNEMASK
))
3685 if (! ext_80387_constants_init
)
3686 init_ext_80387_constants ();
3688 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3689 for (i
= 0; i
< 5; i
++)
3690 if (real_identical (&r
, &ext_80387_constants_table
[i
]))
3697 /* Return the opcode of the special instruction to be used to load
3701 standard_80387_constant_opcode (rtx x
)
3703 switch (standard_80387_constant_p (x
))
3723 /* Return the CONST_DOUBLE representing the 80387 constant that is
3724 loaded by the specified special instruction. The argument IDX
3725 matches the return value from standard_80387_constant_p. */
3728 standard_80387_constant_rtx (int idx
)
3732 if (! ext_80387_constants_init
)
3733 init_ext_80387_constants ();
3749 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table
[i
],
3753 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3756 standard_sse_constant_p (rtx x
)
3758 if (x
== const0_rtx
)
3760 return (x
== CONST0_RTX (GET_MODE (x
)));
3763 /* Returns 1 if OP contains a symbol reference */
3766 symbolic_reference_mentioned_p (rtx op
)
3771 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
3774 fmt
= GET_RTX_FORMAT (GET_CODE (op
));
3775 for (i
= GET_RTX_LENGTH (GET_CODE (op
)) - 1; i
>= 0; i
--)
3781 for (j
= XVECLEN (op
, i
) - 1; j
>= 0; j
--)
3782 if (symbolic_reference_mentioned_p (XVECEXP (op
, i
, j
)))
3786 else if (fmt
[i
] == 'e' && symbolic_reference_mentioned_p (XEXP (op
, i
)))
3793 /* Return 1 if it is appropriate to emit `ret' instructions in the
3794 body of a function. Do this only if the epilogue is simple, needing a
3795 couple of insns. Prior to reloading, we can't tell how many registers
3796 must be saved, so return 0 then. Return 0 if there is no frame
3797 marker to de-allocate. */
3800 ix86_can_use_return_insn_p (void)
3802 struct ix86_frame frame
;
3804 if (! reload_completed
|| frame_pointer_needed
)
3807 /* Don't allow more than 32 pop, since that's all we can do
3808 with one instruction. */
3809 if (current_function_pops_args
3810 && current_function_args_size
>= 32768)
3813 ix86_compute_frame_layout (&frame
);
3814 return frame
.to_allocate
== 0 && frame
.nregs
== 0;
3817 /* Value should be nonzero if functions must have frame pointers.
3818 Zero means the frame pointer need not be set up (and parms may
3819 be accessed via the stack pointer) in functions that seem suitable. */
3822 ix86_frame_pointer_required (void)
3824 /* If we accessed previous frames, then the generated code expects
3825 to be able to access the saved ebp value in our frame. */
3826 if (cfun
->machine
->accesses_prev_frame
)
3829 /* Several x86 os'es need a frame pointer for other reasons,
3830 usually pertaining to setjmp. */
3831 if (SUBTARGET_FRAME_POINTER_REQUIRED
)
3834 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3835 the frame pointer by default. Turn it back on now if we've not
3836 got a leaf function. */
3837 if (TARGET_OMIT_LEAF_FRAME_POINTER
3838 && (!current_function_is_leaf
))
3841 if (current_function_profile
)
3847 /* Record that the current function accesses previous call frames. */
3850 ix86_setup_frame_addresses (void)
3852 cfun
->machine
->accesses_prev_frame
= 1;
3855 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3856 # define USE_HIDDEN_LINKONCE 1
3858 # define USE_HIDDEN_LINKONCE 0
3861 static int pic_labels_used
;
3863 /* Fills in the label name that should be used for a pc thunk for
3864 the given register. */
3867 get_pc_thunk_name (char name
[32], unsigned int regno
)
3869 if (USE_HIDDEN_LINKONCE
)
3870 sprintf (name
, "__i686.get_pc_thunk.%s", reg_names
[regno
]);
3872 ASM_GENERATE_INTERNAL_LABEL (name
, "LPR", regno
);
3876 /* This function generates code for -fpic that loads %ebx with
3877 the return address of the caller and then returns. */
3880 ix86_file_end (void)
3885 for (regno
= 0; regno
< 8; ++regno
)
3889 if (! ((pic_labels_used
>> regno
) & 1))
3892 get_pc_thunk_name (name
, regno
);
3894 if (USE_HIDDEN_LINKONCE
)
3898 decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
3900 TREE_PUBLIC (decl
) = 1;
3901 TREE_STATIC (decl
) = 1;
3902 DECL_ONE_ONLY (decl
) = 1;
3904 (*targetm
.asm_out
.unique_section
) (decl
, 0);
3905 named_section (decl
, NULL
, 0);
3907 (*targetm
.asm_out
.globalize_label
) (asm_out_file
, name
);
3908 fputs ("\t.hidden\t", asm_out_file
);
3909 assemble_name (asm_out_file
, name
);
3910 fputc ('\n', asm_out_file
);
3911 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
3916 ASM_OUTPUT_LABEL (asm_out_file
, name
);
3919 xops
[0] = gen_rtx_REG (SImode
, regno
);
3920 xops
[1] = gen_rtx_MEM (SImode
, stack_pointer_rtx
);
3921 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops
);
3922 output_asm_insn ("ret", xops
);
3925 if (NEED_INDICATE_EXEC_STACK
)
3926 file_end_indicate_exec_stack ();
3929 /* Emit code for the SET_GOT patterns. */
3932 output_set_got (rtx dest
)
3937 xops
[1] = gen_rtx_SYMBOL_REF (Pmode
, GOT_SYMBOL_NAME
);
3939 if (! TARGET_DEEP_BRANCH_PREDICTION
|| !flag_pic
)
3941 xops
[2] = gen_rtx_LABEL_REF (Pmode
, gen_label_rtx ());
3944 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops
);
3946 output_asm_insn ("call\t%a2", xops
);
3949 /* Output the "canonical" label name ("Lxx$pb") here too. This
3950 is what will be referred to by the Mach-O PIC subsystem. */
3951 ASM_OUTPUT_LABEL (asm_out_file
, machopic_function_base_name ());
3953 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
3954 CODE_LABEL_NUMBER (XEXP (xops
[2], 0)));
3957 output_asm_insn ("pop{l}\t%0", xops
);
3962 get_pc_thunk_name (name
, REGNO (dest
));
3963 pic_labels_used
|= 1 << REGNO (dest
);
3965 xops
[2] = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
3966 xops
[2] = gen_rtx_MEM (QImode
, xops
[2]);
3967 output_asm_insn ("call\t%X2", xops
);
3970 if (!flag_pic
|| TARGET_DEEP_BRANCH_PREDICTION
)
3971 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops
);
3972 else if (!TARGET_MACHO
)
3973 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops
);
3978 /* Generate an "push" pattern for input ARG. */
3983 return gen_rtx_SET (VOIDmode
,
3985 gen_rtx_PRE_DEC (Pmode
,
3986 stack_pointer_rtx
)),
3990 /* Return >= 0 if there is an unused call-clobbered register available
3991 for the entire function. */
3994 ix86_select_alt_pic_regnum (void)
3996 if (current_function_is_leaf
&& !current_function_profile
)
3999 for (i
= 2; i
>= 0; --i
)
4000 if (!regs_ever_live
[i
])
4004 return INVALID_REGNUM
;
4007 /* Return 1 if we need to save REGNO. */
4009 ix86_save_reg (unsigned int regno
, int maybe_eh_return
)
4011 if (pic_offset_table_rtx
4012 && regno
== REAL_PIC_OFFSET_TABLE_REGNUM
4013 && (regs_ever_live
[REAL_PIC_OFFSET_TABLE_REGNUM
]
4014 || current_function_profile
4015 || current_function_calls_eh_return
4016 || current_function_uses_const_pool
))
4018 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM
)
4023 if (current_function_calls_eh_return
&& maybe_eh_return
)
4028 unsigned test
= EH_RETURN_DATA_REGNO (i
);
4029 if (test
== INVALID_REGNUM
)
4036 return (regs_ever_live
[regno
]
4037 && !call_used_regs
[regno
]
4038 && !fixed_regs
[regno
]
4039 && (regno
!= HARD_FRAME_POINTER_REGNUM
|| !frame_pointer_needed
));
4042 /* Return number of registers to be saved on the stack. */
4045 ix86_nsaved_regs (void)
4050 for (regno
= FIRST_PSEUDO_REGISTER
- 1; regno
>= 0; regno
--)
4051 if (ix86_save_reg (regno
, true))
4056 /* Return the offset between two registers, one to be eliminated, and the other
4057 its replacement, at the start of a routine. */
4060 ix86_initial_elimination_offset (int from
, int to
)
4062 struct ix86_frame frame
;
4063 ix86_compute_frame_layout (&frame
);
4065 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
4066 return frame
.hard_frame_pointer_offset
;
4067 else if (from
== FRAME_POINTER_REGNUM
4068 && to
== HARD_FRAME_POINTER_REGNUM
)
4069 return frame
.hard_frame_pointer_offset
- frame
.frame_pointer_offset
;
4072 if (to
!= STACK_POINTER_REGNUM
)
4074 else if (from
== ARG_POINTER_REGNUM
)
4075 return frame
.stack_pointer_offset
;
4076 else if (from
!= FRAME_POINTER_REGNUM
)
4079 return frame
.stack_pointer_offset
- frame
.frame_pointer_offset
;
4083 /* Fill structure ix86_frame about frame of currently computed function. */
4086 ix86_compute_frame_layout (struct ix86_frame
*frame
)
4088 HOST_WIDE_INT total_size
;
4089 unsigned int stack_alignment_needed
;
4090 HOST_WIDE_INT offset
;
4091 unsigned int preferred_alignment
;
4092 HOST_WIDE_INT size
= get_frame_size ();
4094 frame
->nregs
= ix86_nsaved_regs ();
4097 stack_alignment_needed
= cfun
->stack_alignment_needed
/ BITS_PER_UNIT
;
4098 preferred_alignment
= cfun
->preferred_stack_boundary
/ BITS_PER_UNIT
;
4100 /* During reload iteration the amount of registers saved can change.
4101 Recompute the value as needed. Do not recompute when amount of registers
4102 didn't change as reload does mutiple calls to the function and does not
4103 expect the decision to change within single iteration. */
4105 && cfun
->machine
->use_fast_prologue_epilogue_nregs
!= frame
->nregs
)
4107 int count
= frame
->nregs
;
4109 cfun
->machine
->use_fast_prologue_epilogue_nregs
= count
;
4110 /* The fast prologue uses move instead of push to save registers. This
4111 is significantly longer, but also executes faster as modern hardware
4112 can execute the moves in parallel, but can't do that for push/pop.
4114 Be careful about choosing what prologue to emit: When function takes
4115 many instructions to execute we may use slow version as well as in
4116 case function is known to be outside hot spot (this is known with
4117 feedback only). Weight the size of function by number of registers
4118 to save as it is cheap to use one or two push instructions but very
4119 slow to use many of them. */
4121 count
= (count
- 1) * FAST_PROLOGUE_INSN_COUNT
;
4122 if (cfun
->function_frequency
< FUNCTION_FREQUENCY_NORMAL
4123 || (flag_branch_probabilities
4124 && cfun
->function_frequency
< FUNCTION_FREQUENCY_HOT
))
4125 cfun
->machine
->use_fast_prologue_epilogue
= false;
4127 cfun
->machine
->use_fast_prologue_epilogue
4128 = !expensive_function_p (count
);
4130 if (TARGET_PROLOGUE_USING_MOVE
4131 && cfun
->machine
->use_fast_prologue_epilogue
)
4132 frame
->save_regs_using_mov
= true;
4134 frame
->save_regs_using_mov
= false;
4137 /* Skip return address and saved base pointer. */
4138 offset
= frame_pointer_needed
? UNITS_PER_WORD
* 2 : UNITS_PER_WORD
;
4140 frame
->hard_frame_pointer_offset
= offset
;
4142 /* Do some sanity checking of stack_alignment_needed and
4143 preferred_alignment, since i386 port is the only using those features
4144 that may break easily. */
4146 if (size
&& !stack_alignment_needed
)
4148 if (preferred_alignment
< STACK_BOUNDARY
/ BITS_PER_UNIT
)
4150 if (preferred_alignment
> PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
)
4152 if (stack_alignment_needed
> PREFERRED_STACK_BOUNDARY
/ BITS_PER_UNIT
)
4155 if (stack_alignment_needed
< STACK_BOUNDARY
/ BITS_PER_UNIT
)
4156 stack_alignment_needed
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
4158 /* Register save area */
4159 offset
+= frame
->nregs
* UNITS_PER_WORD
;
4162 if (ix86_save_varrargs_registers
)
4164 offset
+= X86_64_VARARGS_SIZE
;
4165 frame
->va_arg_size
= X86_64_VARARGS_SIZE
;
4168 frame
->va_arg_size
= 0;
4170 /* Align start of frame for local function. */
4171 frame
->padding1
= ((offset
+ stack_alignment_needed
- 1)
4172 & -stack_alignment_needed
) - offset
;
4174 offset
+= frame
->padding1
;
4176 /* Frame pointer points here. */
4177 frame
->frame_pointer_offset
= offset
;
4181 /* Add outgoing arguments area. Can be skipped if we eliminated
4182 all the function calls as dead code.
4183 Skipping is however impossible when function calls alloca. Alloca
4184 expander assumes that last current_function_outgoing_args_size
4185 of stack frame are unused. */
4186 if (ACCUMULATE_OUTGOING_ARGS
4187 && (!current_function_is_leaf
|| current_function_calls_alloca
))
4189 offset
+= current_function_outgoing_args_size
;
4190 frame
->outgoing_arguments_size
= current_function_outgoing_args_size
;
4193 frame
->outgoing_arguments_size
= 0;
4195 /* Align stack boundary. Only needed if we're calling another function
4197 if (!current_function_is_leaf
|| current_function_calls_alloca
)
4198 frame
->padding2
= ((offset
+ preferred_alignment
- 1)
4199 & -preferred_alignment
) - offset
;
4201 frame
->padding2
= 0;
4203 offset
+= frame
->padding2
;
4205 /* We've reached end of stack frame. */
4206 frame
->stack_pointer_offset
= offset
;
4208 /* Size prologue needs to allocate. */
4209 frame
->to_allocate
=
4210 (size
+ frame
->padding1
+ frame
->padding2
4211 + frame
->outgoing_arguments_size
+ frame
->va_arg_size
);
4213 if ((!frame
->to_allocate
&& frame
->nregs
<= 1)
4214 || (TARGET_64BIT
&& frame
->to_allocate
>= (HOST_WIDE_INT
) 0x80000000))
4215 frame
->save_regs_using_mov
= false;
4217 if (TARGET_RED_ZONE
&& current_function_sp_is_unchanging
4218 && current_function_is_leaf
)
4220 frame
->red_zone_size
= frame
->to_allocate
;
4221 if (frame
->save_regs_using_mov
)
4222 frame
->red_zone_size
+= frame
->nregs
* UNITS_PER_WORD
;
4223 if (frame
->red_zone_size
> RED_ZONE_SIZE
- RED_ZONE_RESERVE
)
4224 frame
->red_zone_size
= RED_ZONE_SIZE
- RED_ZONE_RESERVE
;
4227 frame
->red_zone_size
= 0;
4228 frame
->to_allocate
-= frame
->red_zone_size
;
4229 frame
->stack_pointer_offset
-= frame
->red_zone_size
;
4231 fprintf (stderr
, "nregs: %i\n", frame
->nregs
);
4232 fprintf (stderr
, "size: %i\n", size
);
4233 fprintf (stderr
, "alignment1: %i\n", stack_alignment_needed
);
4234 fprintf (stderr
, "padding1: %i\n", frame
->padding1
);
4235 fprintf (stderr
, "va_arg: %i\n", frame
->va_arg_size
);
4236 fprintf (stderr
, "padding2: %i\n", frame
->padding2
);
4237 fprintf (stderr
, "to_allocate: %i\n", frame
->to_allocate
);
4238 fprintf (stderr
, "red_zone_size: %i\n", frame
->red_zone_size
);
4239 fprintf (stderr
, "frame_pointer_offset: %i\n", frame
->frame_pointer_offset
);
4240 fprintf (stderr
, "hard_frame_pointer_offset: %i\n",
4241 frame
->hard_frame_pointer_offset
);
4242 fprintf (stderr
, "stack_pointer_offset: %i\n", frame
->stack_pointer_offset
);
4246 /* Emit code to save registers in the prologue. */
4249 ix86_emit_save_regs (void)
4254 for (regno
= FIRST_PSEUDO_REGISTER
- 1; regno
>= 0; regno
--)
4255 if (ix86_save_reg (regno
, true))
4257 insn
= emit_insn (gen_push (gen_rtx_REG (Pmode
, regno
)));
4258 RTX_FRAME_RELATED_P (insn
) = 1;
4262 /* Emit code to save registers using MOV insns. First register
4263 is restored from POINTER + OFFSET. */
4265 ix86_emit_save_regs_using_mov (rtx pointer
, HOST_WIDE_INT offset
)
4270 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
4271 if (ix86_save_reg (regno
, true))
4273 insn
= emit_move_insn (adjust_address (gen_rtx_MEM (Pmode
, pointer
),
4275 gen_rtx_REG (Pmode
, regno
));
4276 RTX_FRAME_RELATED_P (insn
) = 1;
4277 offset
+= UNITS_PER_WORD
;
4281 /* Expand prologue or epilogue stack adjustment.
4282 The pattern exist to put a dependency on all ebp-based memory accesses.
4283 STYLE should be negative if instructions should be marked as frame related,
4284 zero if %r11 register is live and cannot be freely used and positive
4288 pro_epilogue_adjust_stack (rtx dest
, rtx src
, rtx offset
, int style
)
4293 insn
= emit_insn (gen_pro_epilogue_adjust_stack_1 (dest
, src
, offset
));
4294 else if (x86_64_immediate_operand (offset
, DImode
))
4295 insn
= emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest
, src
, offset
));
4299 /* r11 is used by indirect sibcall return as well, set before the
4300 epilogue and used after the epilogue. ATM indirect sibcall
4301 shouldn't be used together with huge frame sizes in one
4302 function because of the frame_size check in sibcall.c. */
4305 r11
= gen_rtx_REG (DImode
, FIRST_REX_INT_REG
+ 3 /* R11 */);
4306 insn
= emit_insn (gen_rtx_SET (DImode
, r11
, offset
));
4308 RTX_FRAME_RELATED_P (insn
) = 1;
4309 insn
= emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest
, src
, r11
,
4313 RTX_FRAME_RELATED_P (insn
) = 1;
4316 /* Expand the prologue into a bunch of separate insns. */
4319 ix86_expand_prologue (void)
4323 struct ix86_frame frame
;
4324 HOST_WIDE_INT allocate
;
4326 ix86_compute_frame_layout (&frame
);
4328 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4329 slower on all targets. Also sdb doesn't like it. */
4331 if (frame_pointer_needed
)
4333 insn
= emit_insn (gen_push (hard_frame_pointer_rtx
));
4334 RTX_FRAME_RELATED_P (insn
) = 1;
4336 insn
= emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
);
4337 RTX_FRAME_RELATED_P (insn
) = 1;
4340 allocate
= frame
.to_allocate
;
4342 if (!frame
.save_regs_using_mov
)
4343 ix86_emit_save_regs ();
4345 allocate
+= frame
.nregs
* UNITS_PER_WORD
;
4347 /* When using red zone we may start register saving before allocating
4348 the stack frame saving one cycle of the prologue. */
4349 if (TARGET_RED_ZONE
&& frame
.save_regs_using_mov
)
4350 ix86_emit_save_regs_using_mov (frame_pointer_needed
? hard_frame_pointer_rtx
4351 : stack_pointer_rtx
,
4352 -frame
.nregs
* UNITS_PER_WORD
);
4356 else if (! TARGET_STACK_PROBE
|| allocate
< CHECK_STACK_LIMIT
)
4357 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
4358 GEN_INT (-allocate
), -1);
4361 /* Only valid for Win32. */
4362 rtx eax
= gen_rtx_REG (SImode
, 0);
4363 bool eax_live
= ix86_eax_live_at_start_p ();
4371 emit_insn (gen_push (eax
));
4375 emit_move_insn (eax
, GEN_INT (allocate
));
4377 insn
= emit_insn (gen_allocate_stack_worker (eax
));
4378 RTX_FRAME_RELATED_P (insn
) = 1;
4379 t
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, GEN_INT (-allocate
));
4380 t
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, t
);
4381 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
4382 t
, REG_NOTES (insn
));
4386 if (frame_pointer_needed
)
4387 t
= plus_constant (hard_frame_pointer_rtx
,
4390 - frame
.nregs
* UNITS_PER_WORD
);
4392 t
= plus_constant (stack_pointer_rtx
, allocate
);
4393 emit_move_insn (eax
, gen_rtx_MEM (SImode
, t
));
4397 if (frame
.save_regs_using_mov
&& !TARGET_RED_ZONE
)
4399 if (!frame_pointer_needed
|| !frame
.to_allocate
)
4400 ix86_emit_save_regs_using_mov (stack_pointer_rtx
, frame
.to_allocate
);
4402 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx
,
4403 -frame
.nregs
* UNITS_PER_WORD
);
4406 pic_reg_used
= false;
4407 if (pic_offset_table_rtx
4408 && (regs_ever_live
[REAL_PIC_OFFSET_TABLE_REGNUM
]
4409 || current_function_profile
))
4411 unsigned int alt_pic_reg_used
= ix86_select_alt_pic_regnum ();
4413 if (alt_pic_reg_used
!= INVALID_REGNUM
)
4414 REGNO (pic_offset_table_rtx
) = alt_pic_reg_used
;
4416 pic_reg_used
= true;
4421 insn
= emit_insn (gen_set_got (pic_offset_table_rtx
));
4423 /* Even with accurate pre-reload life analysis, we can wind up
4424 deleting all references to the pic register after reload.
4425 Consider if cross-jumping unifies two sides of a branch
4426 controlled by a comparison vs the only read from a global.
4427 In which case, allow the set_got to be deleted, though we're
4428 too late to do anything about the ebx save in the prologue. */
4429 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
, const0_rtx
, NULL
);
4432 /* Prevent function calls from be scheduled before the call to mcount.
4433 In the pic_reg_used case, make sure that the got load isn't deleted. */
4434 if (current_function_profile
)
4435 emit_insn (gen_blockage (pic_reg_used
? pic_offset_table_rtx
: const0_rtx
));
4438 /* Emit code to restore saved registers using MOV insns. First register
4439 is restored from POINTER + OFFSET. */
4441 ix86_emit_restore_regs_using_mov (rtx pointer
, HOST_WIDE_INT offset
,
4442 int maybe_eh_return
)
4445 rtx base_address
= gen_rtx_MEM (Pmode
, pointer
);
4447 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
4448 if (ix86_save_reg (regno
, maybe_eh_return
))
4450 /* Ensure that adjust_address won't be forced to produce pointer
4451 out of range allowed by x86-64 instruction set. */
4452 if (TARGET_64BIT
&& offset
!= trunc_int_for_mode (offset
, SImode
))
4456 r11
= gen_rtx_REG (DImode
, FIRST_REX_INT_REG
+ 3 /* R11 */);
4457 emit_move_insn (r11
, GEN_INT (offset
));
4458 emit_insn (gen_adddi3 (r11
, r11
, pointer
));
4459 base_address
= gen_rtx_MEM (Pmode
, r11
);
4462 emit_move_insn (gen_rtx_REG (Pmode
, regno
),
4463 adjust_address (base_address
, Pmode
, offset
));
4464 offset
+= UNITS_PER_WORD
;
4468 /* Restore function stack, frame, and registers. */
4471 ix86_expand_epilogue (int style
)
4474 int sp_valid
= !frame_pointer_needed
|| current_function_sp_is_unchanging
;
4475 struct ix86_frame frame
;
4476 HOST_WIDE_INT offset
;
4478 ix86_compute_frame_layout (&frame
);
4480 /* Calculate start of saved registers relative to ebp. Special care
4481 must be taken for the normal return case of a function using
4482 eh_return: the eax and edx registers are marked as saved, but not
4483 restored along this path. */
4484 offset
= frame
.nregs
;
4485 if (current_function_calls_eh_return
&& style
!= 2)
4487 offset
*= -UNITS_PER_WORD
;
4489 /* If we're only restoring one register and sp is not valid then
4490 using a move instruction to restore the register since it's
4491 less work than reloading sp and popping the register.
4493 The default code result in stack adjustment using add/lea instruction,
4494 while this code results in LEAVE instruction (or discrete equivalent),
4495 so it is profitable in some other cases as well. Especially when there
4496 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4497 and there is exactly one register to pop. This heuristic may need some
4498 tuning in future. */
4499 if ((!sp_valid
&& frame
.nregs
<= 1)
4500 || (TARGET_EPILOGUE_USING_MOVE
4501 && cfun
->machine
->use_fast_prologue_epilogue
4502 && (frame
.nregs
> 1 || frame
.to_allocate
))
4503 || (frame_pointer_needed
&& !frame
.nregs
&& frame
.to_allocate
)
4504 || (frame_pointer_needed
&& TARGET_USE_LEAVE
4505 && cfun
->machine
->use_fast_prologue_epilogue
4506 && frame
.nregs
== 1)
4507 || current_function_calls_eh_return
)
4509 /* Restore registers. We can use ebp or esp to address the memory
4510 locations. If both are available, default to ebp, since offsets
4511 are known to be small. Only exception is esp pointing directly to the
4512 end of block of saved registers, where we may simplify addressing
4515 if (!frame_pointer_needed
|| (sp_valid
&& !frame
.to_allocate
))
4516 ix86_emit_restore_regs_using_mov (stack_pointer_rtx
,
4517 frame
.to_allocate
, style
== 2);
4519 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx
,
4520 offset
, style
== 2);
4522 /* eh_return epilogues need %ecx added to the stack pointer. */
4525 rtx tmp
, sa
= EH_RETURN_STACKADJ_RTX
;
4527 if (frame_pointer_needed
)
4529 tmp
= gen_rtx_PLUS (Pmode
, hard_frame_pointer_rtx
, sa
);
4530 tmp
= plus_constant (tmp
, UNITS_PER_WORD
);
4531 emit_insn (gen_rtx_SET (VOIDmode
, sa
, tmp
));
4533 tmp
= gen_rtx_MEM (Pmode
, hard_frame_pointer_rtx
);
4534 emit_move_insn (hard_frame_pointer_rtx
, tmp
);
4536 pro_epilogue_adjust_stack (stack_pointer_rtx
, sa
,
4541 tmp
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, sa
);
4542 tmp
= plus_constant (tmp
, (frame
.to_allocate
4543 + frame
.nregs
* UNITS_PER_WORD
));
4544 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, tmp
));
4547 else if (!frame_pointer_needed
)
4548 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
4549 GEN_INT (frame
.to_allocate
4550 + frame
.nregs
* UNITS_PER_WORD
),
4552 /* If not an i386, mov & pop is faster than "leave". */
4553 else if (TARGET_USE_LEAVE
|| optimize_size
4554 || !cfun
->machine
->use_fast_prologue_epilogue
)
4555 emit_insn (TARGET_64BIT
? gen_leave_rex64 () : gen_leave ());
4558 pro_epilogue_adjust_stack (stack_pointer_rtx
,
4559 hard_frame_pointer_rtx
,
4562 emit_insn (gen_popdi1 (hard_frame_pointer_rtx
));
4564 emit_insn (gen_popsi1 (hard_frame_pointer_rtx
));
4569 /* First step is to deallocate the stack frame so that we can
4570 pop the registers. */
4573 if (!frame_pointer_needed
)
4575 pro_epilogue_adjust_stack (stack_pointer_rtx
,
4576 hard_frame_pointer_rtx
,
4577 GEN_INT (offset
), style
);
4579 else if (frame
.to_allocate
)
4580 pro_epilogue_adjust_stack (stack_pointer_rtx
, stack_pointer_rtx
,
4581 GEN_INT (frame
.to_allocate
), style
);
4583 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
4584 if (ix86_save_reg (regno
, false))
4587 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode
, regno
)));
4589 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode
, regno
)));
4591 if (frame_pointer_needed
)
4593 /* Leave results in shorter dependency chains on CPUs that are
4594 able to grok it fast. */
4595 if (TARGET_USE_LEAVE
)
4596 emit_insn (TARGET_64BIT
? gen_leave_rex64 () : gen_leave ());
4597 else if (TARGET_64BIT
)
4598 emit_insn (gen_popdi1 (hard_frame_pointer_rtx
));
4600 emit_insn (gen_popsi1 (hard_frame_pointer_rtx
));
4604 /* Sibcall epilogues don't want a return instruction. */
4608 if (current_function_pops_args
&& current_function_args_size
)
4610 rtx popc
= GEN_INT (current_function_pops_args
);
4612 /* i386 can only pop 64K bytes. If asked to pop more, pop
4613 return address, do explicit add, and jump indirectly to the
4616 if (current_function_pops_args
>= 65536)
4618 rtx ecx
= gen_rtx_REG (SImode
, 2);
4620 /* There is no "pascal" calling convention in 64bit ABI. */
4624 emit_insn (gen_popsi1 (ecx
));
4625 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, popc
));
4626 emit_jump_insn (gen_return_indirect_internal (ecx
));
4629 emit_jump_insn (gen_return_pop_internal (popc
));
4632 emit_jump_insn (gen_return_internal ());
4635 /* Reset from the function's potential modifications. */
4638 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
4639 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
4641 if (pic_offset_table_rtx
)
4642 REGNO (pic_offset_table_rtx
) = REAL_PIC_OFFSET_TABLE_REGNUM
;
4645 /* Extract the parts of an RTL expression that is a valid memory address
4646 for an instruction. Return 0 if the structure of the address is
4647 grossly off. Return -1 if the address contains ASHIFT, so it is not
4648 strictly valid, but still used for computing length of lea instruction. */
4651 ix86_decompose_address (rtx addr
, struct ix86_address
*out
)
4653 rtx base
= NULL_RTX
;
4654 rtx index
= NULL_RTX
;
4655 rtx disp
= NULL_RTX
;
4656 HOST_WIDE_INT scale
= 1;
4657 rtx scale_rtx
= NULL_RTX
;
4659 enum ix86_address_seg seg
= SEG_DEFAULT
;
4661 if (GET_CODE (addr
) == REG
|| GET_CODE (addr
) == SUBREG
)
4663 else if (GET_CODE (addr
) == PLUS
)
4673 addends
[n
++] = XEXP (op
, 1);
4676 while (GET_CODE (op
) == PLUS
);
4681 for (i
= n
; i
>= 0; --i
)
4684 switch (GET_CODE (op
))
4689 index
= XEXP (op
, 0);
4690 scale_rtx
= XEXP (op
, 1);
4694 if (XINT (op
, 1) == UNSPEC_TP
4695 && TARGET_TLS_DIRECT_SEG_REFS
4696 && seg
== SEG_DEFAULT
)
4697 seg
= TARGET_64BIT
? SEG_FS
: SEG_GS
;
4726 else if (GET_CODE (addr
) == MULT
)
4728 index
= XEXP (addr
, 0); /* index*scale */
4729 scale_rtx
= XEXP (addr
, 1);
4731 else if (GET_CODE (addr
) == ASHIFT
)
4735 /* We're called for lea too, which implements ashift on occasion. */
4736 index
= XEXP (addr
, 0);
4737 tmp
= XEXP (addr
, 1);
4738 if (GET_CODE (tmp
) != CONST_INT
)
4740 scale
= INTVAL (tmp
);
4741 if ((unsigned HOST_WIDE_INT
) scale
> 3)
4747 disp
= addr
; /* displacement */
4749 /* Extract the integral value of scale. */
4752 if (GET_CODE (scale_rtx
) != CONST_INT
)
4754 scale
= INTVAL (scale_rtx
);
4757 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4758 if (base
&& index
&& scale
== 1
4759 && (index
== arg_pointer_rtx
4760 || index
== frame_pointer_rtx
4761 || (REG_P (index
) && REGNO (index
) == STACK_POINTER_REGNUM
)))
4768 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4769 if ((base
== hard_frame_pointer_rtx
4770 || base
== frame_pointer_rtx
4771 || base
== arg_pointer_rtx
) && !disp
)
4774 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4775 Avoid this by transforming to [%esi+0]. */
4776 if (ix86_tune
== PROCESSOR_K6
&& !optimize_size
4777 && base
&& !index
&& !disp
4779 && REGNO_REG_CLASS (REGNO (base
)) == SIREG
)
4782 /* Special case: encode reg+reg instead of reg*2. */
4783 if (!base
&& index
&& scale
&& scale
== 2)
4784 base
= index
, scale
= 1;
4786 /* Special case: scaling cannot be encoded without base or displacement. */
4787 if (!base
&& !disp
&& index
&& scale
!= 1)
4799 /* Return cost of the memory address x.
4800 For i386, it is better to use a complex address than let gcc copy
4801 the address into a reg and make a new pseudo. But not if the address
4802 requires to two regs - that would mean more pseudos with longer
4805 ix86_address_cost (rtx x
)
4807 struct ix86_address parts
;
4810 if (!ix86_decompose_address (x
, &parts
))
4813 /* More complex memory references are better. */
4814 if (parts
.disp
&& parts
.disp
!= const0_rtx
)
4816 if (parts
.seg
!= SEG_DEFAULT
)
4819 /* Attempt to minimize number of registers in the address. */
4821 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
))
4823 && (!REG_P (parts
.index
)
4824 || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)))
4828 && (!REG_P (parts
.base
) || REGNO (parts
.base
) >= FIRST_PSEUDO_REGISTER
)
4830 && (!REG_P (parts
.index
) || REGNO (parts
.index
) >= FIRST_PSEUDO_REGISTER
)
4831 && parts
.base
!= parts
.index
)
4834 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4835 since it's predecode logic can't detect the length of instructions
4836 and it degenerates to vector decoded. Increase cost of such
4837 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4838 to split such addresses or even refuse such addresses at all.
4840 Following addressing modes are affected:
4845 The first and last case may be avoidable by explicitly coding the zero in
4846 memory address, but I don't have AMD-K6 machine handy to check this
4850 && ((!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
!= 1)
4851 || (parts
.disp
&& !parts
.base
&& parts
.index
&& parts
.scale
!= 1)
4852 || (!parts
.disp
&& parts
.base
&& parts
.index
&& parts
.scale
== 1)))
4858 /* If X is a machine specific address (i.e. a symbol or label being
4859 referenced as a displacement from the GOT implemented using an
4860 UNSPEC), then return the base term. Otherwise return X. */
4863 ix86_find_base_term (rtx x
)
4869 if (GET_CODE (x
) != CONST
)
4872 if (GET_CODE (term
) == PLUS
4873 && (GET_CODE (XEXP (term
, 1)) == CONST_INT
4874 || GET_CODE (XEXP (term
, 1)) == CONST_DOUBLE
))
4875 term
= XEXP (term
, 0);
4876 if (GET_CODE (term
) != UNSPEC
4877 || XINT (term
, 1) != UNSPEC_GOTPCREL
)
4880 term
= XVECEXP (term
, 0, 0);
4882 if (GET_CODE (term
) != SYMBOL_REF
4883 && GET_CODE (term
) != LABEL_REF
)
4889 term
= ix86_delegitimize_address (x
);
4891 if (GET_CODE (term
) != SYMBOL_REF
4892 && GET_CODE (term
) != LABEL_REF
)
4898 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4899 this is used for to form addresses to local data when -fPIC is in
4903 darwin_local_data_pic (rtx disp
)
4905 if (GET_CODE (disp
) == MINUS
)
4907 if (GET_CODE (XEXP (disp
, 0)) == LABEL_REF
4908 || GET_CODE (XEXP (disp
, 0)) == SYMBOL_REF
)
4909 if (GET_CODE (XEXP (disp
, 1)) == SYMBOL_REF
)
4911 const char *sym_name
= XSTR (XEXP (disp
, 1), 0);
4912 if (! strcmp (sym_name
, "<pic base>"))
4920 /* Determine if a given RTX is a valid constant. We already know this
4921 satisfies CONSTANT_P. */
4924 legitimate_constant_p (rtx x
)
4926 switch (GET_CODE (x
))
4931 if (GET_CODE (x
) == PLUS
)
4933 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
4938 if (TARGET_MACHO
&& darwin_local_data_pic (x
))
4941 /* Only some unspecs are valid as "constants". */
4942 if (GET_CODE (x
) == UNSPEC
)
4943 switch (XINT (x
, 1))
4947 return local_exec_symbolic_operand (XVECEXP (x
, 0, 0), Pmode
);
4949 return local_dynamic_symbolic_operand (XVECEXP (x
, 0, 0), Pmode
);
4954 /* We must have drilled down to a symbol. */
4955 if (!symbolic_operand (x
, Pmode
))
4960 /* TLS symbols are never valid. */
4961 if (tls_symbolic_operand (x
, Pmode
))
4969 /* Otherwise we handle everything else in the move patterns. */
4973 /* Determine if it's legal to put X into the constant pool. This
4974 is not possible for the address of thread-local symbols, which
4975 is checked above. */
4978 ix86_cannot_force_const_mem (rtx x
)
4980 return !legitimate_constant_p (x
);
4983 /* Determine if a given RTX is a valid constant address. */
4986 constant_address_p (rtx x
)
4988 return CONSTANT_P (x
) && legitimate_address_p (Pmode
, x
, 1);
4991 /* Nonzero if the constant value X is a legitimate general operand
4992 when generating PIC code. It is given that flag_pic is on and
4993 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4996 legitimate_pic_operand_p (rtx x
)
5000 switch (GET_CODE (x
))
5003 inner
= XEXP (x
, 0);
5005 /* Only some unspecs are valid as "constants". */
5006 if (GET_CODE (inner
) == UNSPEC
)
5007 switch (XINT (inner
, 1))
5010 return local_exec_symbolic_operand (XVECEXP (inner
, 0, 0), Pmode
);
5018 return legitimate_pic_address_disp_p (x
);
5025 /* Determine if a given CONST RTX is a valid memory displacement
5029 legitimate_pic_address_disp_p (rtx disp
)
5033 /* In 64bit mode we can allow direct addresses of symbols and labels
5034 when they are not dynamic symbols. */
5037 /* TLS references should always be enclosed in UNSPEC. */
5038 if (tls_symbolic_operand (disp
, GET_MODE (disp
)))
5040 if (GET_CODE (disp
) == SYMBOL_REF
5041 && ix86_cmodel
== CM_SMALL_PIC
5042 && SYMBOL_REF_LOCAL_P (disp
))
5044 if (GET_CODE (disp
) == LABEL_REF
)
5046 if (GET_CODE (disp
) == CONST
5047 && GET_CODE (XEXP (disp
, 0)) == PLUS
)
5049 rtx op0
= XEXP (XEXP (disp
, 0), 0);
5050 rtx op1
= XEXP (XEXP (disp
, 0), 1);
5052 /* TLS references should always be enclosed in UNSPEC. */
5053 if (tls_symbolic_operand (op0
, GET_MODE (op0
)))
5055 if (((GET_CODE (op0
) == SYMBOL_REF
5056 && ix86_cmodel
== CM_SMALL_PIC
5057 && SYMBOL_REF_LOCAL_P (op0
))
5058 || GET_CODE (op0
) == LABEL_REF
)
5059 && GET_CODE (op1
) == CONST_INT
5060 && INTVAL (op1
) < 16*1024*1024
5061 && INTVAL (op1
) >= -16*1024*1024)
5065 if (GET_CODE (disp
) != CONST
)
5067 disp
= XEXP (disp
, 0);
5071 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5072 of GOT tables. We should not need these anyway. */
5073 if (GET_CODE (disp
) != UNSPEC
5074 || XINT (disp
, 1) != UNSPEC_GOTPCREL
)
5077 if (GET_CODE (XVECEXP (disp
, 0, 0)) != SYMBOL_REF
5078 && GET_CODE (XVECEXP (disp
, 0, 0)) != LABEL_REF
)
5084 if (GET_CODE (disp
) == PLUS
)
5086 if (GET_CODE (XEXP (disp
, 1)) != CONST_INT
)
5088 disp
= XEXP (disp
, 0);
5092 if (TARGET_MACHO
&& darwin_local_data_pic (disp
))
5095 if (GET_CODE (disp
) != UNSPEC
)
5098 switch (XINT (disp
, 1))
5103 return GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
;
5105 if (GET_CODE (XVECEXP (disp
, 0, 0)) == SYMBOL_REF
5106 || GET_CODE (XVECEXP (disp
, 0, 0)) == LABEL_REF
)
5107 return local_symbolic_operand (XVECEXP (disp
, 0, 0), Pmode
);
5109 case UNSPEC_GOTTPOFF
:
5110 case UNSPEC_GOTNTPOFF
:
5111 case UNSPEC_INDNTPOFF
:
5114 return initial_exec_symbolic_operand (XVECEXP (disp
, 0, 0), Pmode
);
5116 return local_exec_symbolic_operand (XVECEXP (disp
, 0, 0), Pmode
);
5118 return local_dynamic_symbolic_operand (XVECEXP (disp
, 0, 0), Pmode
);
5124 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5125 memory address for an instruction. The MODE argument is the machine mode
5126 for the MEM expression that wants to use this address.
5128 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5129 convert common non-canonical forms to canonical form so that they will
5133 legitimate_address_p (enum machine_mode mode
, rtx addr
, int strict
)
5135 struct ix86_address parts
;
5136 rtx base
, index
, disp
;
5137 HOST_WIDE_INT scale
;
5138 const char *reason
= NULL
;
5139 rtx reason_rtx
= NULL_RTX
;
5141 if (TARGET_DEBUG_ADDR
)
5144 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5145 GET_MODE_NAME (mode
), strict
);
5149 if (ix86_decompose_address (addr
, &parts
) <= 0)
5151 reason
= "decomposition failed";
5156 index
= parts
.index
;
5158 scale
= parts
.scale
;
5160 /* Validate base register.
5162 Don't allow SUBREG's here, it can lead to spill failures when the base
5163 is one word out of a two word structure, which is represented internally
5170 if (GET_CODE (base
) != REG
)
5172 reason
= "base is not a register";
5176 if (GET_MODE (base
) != Pmode
)
5178 reason
= "base is not in Pmode";
5182 if ((strict
&& ! REG_OK_FOR_BASE_STRICT_P (base
))
5183 || (! strict
&& ! REG_OK_FOR_BASE_NONSTRICT_P (base
)))
5185 reason
= "base is not valid";
5190 /* Validate index register.
5192 Don't allow SUBREG's here, it can lead to spill failures when the index
5193 is one word out of a two word structure, which is represented internally
5200 if (GET_CODE (index
) != REG
)
5202 reason
= "index is not a register";
5206 if (GET_MODE (index
) != Pmode
)
5208 reason
= "index is not in Pmode";
5212 if ((strict
&& ! REG_OK_FOR_INDEX_STRICT_P (index
))
5213 || (! strict
&& ! REG_OK_FOR_INDEX_NONSTRICT_P (index
)))
5215 reason
= "index is not valid";
5220 /* Validate scale factor. */
5223 reason_rtx
= GEN_INT (scale
);
5226 reason
= "scale without index";
5230 if (scale
!= 2 && scale
!= 4 && scale
!= 8)
5232 reason
= "scale is not a valid multiplier";
5237 /* Validate displacement. */
5242 if (GET_CODE (disp
) == CONST
5243 && GET_CODE (XEXP (disp
, 0)) == UNSPEC
)
5244 switch (XINT (XEXP (disp
, 0), 1))
5248 case UNSPEC_GOTPCREL
:
5251 goto is_legitimate_pic
;
5253 case UNSPEC_GOTTPOFF
:
5254 case UNSPEC_GOTNTPOFF
:
5255 case UNSPEC_INDNTPOFF
:
5261 reason
= "invalid address unspec";
5265 else if (flag_pic
&& (SYMBOLIC_CONST (disp
)
5267 && !machopic_operand_p (disp
)
5272 if (TARGET_64BIT
&& (index
|| base
))
5274 /* foo@dtpoff(%rX) is ok. */
5275 if (GET_CODE (disp
) != CONST
5276 || GET_CODE (XEXP (disp
, 0)) != PLUS
5277 || GET_CODE (XEXP (XEXP (disp
, 0), 0)) != UNSPEC
5278 || GET_CODE (XEXP (XEXP (disp
, 0), 1)) != CONST_INT
5279 || (XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_DTPOFF
5280 && XINT (XEXP (XEXP (disp
, 0), 0), 1) != UNSPEC_NTPOFF
))
5282 reason
= "non-constant pic memory reference";
5286 else if (! legitimate_pic_address_disp_p (disp
))
5288 reason
= "displacement is an invalid pic construct";
5292 /* This code used to verify that a symbolic pic displacement
5293 includes the pic_offset_table_rtx register.
5295 While this is good idea, unfortunately these constructs may
5296 be created by "adds using lea" optimization for incorrect
5305 This code is nonsensical, but results in addressing
5306 GOT table with pic_offset_table_rtx base. We can't
5307 just refuse it easily, since it gets matched by
5308 "addsi3" pattern, that later gets split to lea in the
5309 case output register differs from input. While this
5310 can be handled by separate addsi pattern for this case
5311 that never results in lea, this seems to be easier and
5312 correct fix for crash to disable this test. */
5314 else if (GET_CODE (disp
) != LABEL_REF
5315 && GET_CODE (disp
) != CONST_INT
5316 && (GET_CODE (disp
) != CONST
5317 || !legitimate_constant_p (disp
))
5318 && (GET_CODE (disp
) != SYMBOL_REF
5319 || !legitimate_constant_p (disp
)))
5321 reason
= "displacement is not constant";
5324 else if (TARGET_64BIT
5325 && !x86_64_immediate_operand (disp
, VOIDmode
))
5327 reason
= "displacement is out of range";
5332 /* Everything looks valid. */
5333 if (TARGET_DEBUG_ADDR
)
5334 fprintf (stderr
, "Success.\n");
5338 if (TARGET_DEBUG_ADDR
)
5340 fprintf (stderr
, "Error: %s\n", reason
);
5341 debug_rtx (reason_rtx
);
5346 /* Return an unique alias set for the GOT. */
5348 static HOST_WIDE_INT
5349 ix86_GOT_alias_set (void)
5351 static HOST_WIDE_INT set
= -1;
5353 set
= new_alias_set ();
5357 /* Return a legitimate reference for ORIG (an address) using the
5358 register REG. If REG is 0, a new pseudo is generated.
5360 There are two types of references that must be handled:
5362 1. Global data references must load the address from the GOT, via
5363 the PIC reg. An insn is emitted to do this load, and the reg is
5366 2. Static data references, constant pool addresses, and code labels
5367 compute the address as an offset from the GOT, whose base is in
5368 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5369 differentiate them from global data objects. The returned
5370 address is the PIC reg + an unspec constant.
5372 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5373 reg also appears in the address. */
5376 legitimize_pic_address (rtx orig
, rtx reg
)
5384 reg
= gen_reg_rtx (Pmode
);
5385 /* Use the generic Mach-O PIC machinery. */
5386 return machopic_legitimize_pic_address (orig
, GET_MODE (orig
), reg
);
5389 if (TARGET_64BIT
&& legitimate_pic_address_disp_p (addr
))
5391 else if (!TARGET_64BIT
&& local_symbolic_operand (addr
, Pmode
))
5393 /* This symbol may be referenced via a displacement from the PIC
5394 base address (@GOTOFF). */
5396 if (reload_in_progress
)
5397 regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
] = 1;
5398 if (GET_CODE (addr
) == CONST
)
5399 addr
= XEXP (addr
, 0);
5400 if (GET_CODE (addr
) == PLUS
)
5402 new = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, XEXP (addr
, 0)), UNSPEC_GOTOFF
);
5403 new = gen_rtx_PLUS (Pmode
, new, XEXP (addr
, 1));
5406 new = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTOFF
);
5407 new = gen_rtx_CONST (Pmode
, new);
5408 new = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new);
5412 emit_move_insn (reg
, new);
5416 else if (GET_CODE (addr
) == SYMBOL_REF
)
5420 new = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOTPCREL
);
5421 new = gen_rtx_CONST (Pmode
, new);
5422 new = gen_const_mem (Pmode
, new);
5423 set_mem_alias_set (new, ix86_GOT_alias_set ());
5426 reg
= gen_reg_rtx (Pmode
);
5427 /* Use directly gen_movsi, otherwise the address is loaded
5428 into register for CSE. We don't want to CSE this addresses,
5429 instead we CSE addresses from the GOT table, so skip this. */
5430 emit_insn (gen_movsi (reg
, new));
5435 /* This symbol must be referenced via a load from the
5436 Global Offset Table (@GOT). */
5438 if (reload_in_progress
)
5439 regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
] = 1;
5440 new = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), UNSPEC_GOT
);
5441 new = gen_rtx_CONST (Pmode
, new);
5442 new = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new);
5443 new = gen_const_mem (Pmode
, new);
5444 set_mem_alias_set (new, ix86_GOT_alias_set ());
5447 reg
= gen_reg_rtx (Pmode
);
5448 emit_move_insn (reg
, new);
5454 if (GET_CODE (addr
) == CONST
)
5456 addr
= XEXP (addr
, 0);
5458 /* We must match stuff we generate before. Assume the only
5459 unspecs that can get here are ours. Not that we could do
5460 anything with them anyway.... */
5461 if (GET_CODE (addr
) == UNSPEC
5462 || (GET_CODE (addr
) == PLUS
5463 && GET_CODE (XEXP (addr
, 0)) == UNSPEC
))
5465 if (GET_CODE (addr
) != PLUS
)
5468 if (GET_CODE (addr
) == PLUS
)
5470 rtx op0
= XEXP (addr
, 0), op1
= XEXP (addr
, 1);
5472 /* Check first to see if this is a constant offset from a @GOTOFF
5473 symbol reference. */
5474 if (local_symbolic_operand (op0
, Pmode
)
5475 && GET_CODE (op1
) == CONST_INT
)
5479 if (reload_in_progress
)
5480 regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
] = 1;
5481 new = gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, op0
),
5483 new = gen_rtx_PLUS (Pmode
, new, op1
);
5484 new = gen_rtx_CONST (Pmode
, new);
5485 new = gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, new);
5489 emit_move_insn (reg
, new);
5495 if (INTVAL (op1
) < -16*1024*1024
5496 || INTVAL (op1
) >= 16*1024*1024)
5497 new = gen_rtx_PLUS (Pmode
, force_reg (Pmode
, op0
), op1
);
5502 base
= legitimize_pic_address (XEXP (addr
, 0), reg
);
5503 new = legitimize_pic_address (XEXP (addr
, 1),
5504 base
== reg
? NULL_RTX
: reg
);
5506 if (GET_CODE (new) == CONST_INT
)
5507 new = plus_constant (base
, INTVAL (new));
5510 if (GET_CODE (new) == PLUS
&& CONSTANT_P (XEXP (new, 1)))
5512 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (new, 0));
5513 new = XEXP (new, 1);
5515 new = gen_rtx_PLUS (Pmode
, base
, new);
5523 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5526 get_thread_pointer (int to_reg
)
5530 tp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
), UNSPEC_TP
);
5534 reg
= gen_reg_rtx (Pmode
);
5535 insn
= gen_rtx_SET (VOIDmode
, reg
, tp
);
5536 insn
= emit_insn (insn
);
5541 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5542 false if we expect this to be used for a memory address and true if
5543 we expect to load the address into a register. */
5546 legitimize_tls_address (rtx x
, enum tls_model model
, int for_mov
)
5548 rtx dest
, base
, off
, pic
;
5553 case TLS_MODEL_GLOBAL_DYNAMIC
:
5554 dest
= gen_reg_rtx (Pmode
);
5557 rtx rax
= gen_rtx_REG (Pmode
, 0), insns
;
5560 emit_call_insn (gen_tls_global_dynamic_64 (rax
, x
));
5561 insns
= get_insns ();
5564 emit_libcall_block (insns
, dest
, rax
, x
);
5567 emit_insn (gen_tls_global_dynamic_32 (dest
, x
));
5570 case TLS_MODEL_LOCAL_DYNAMIC
:
5571 base
= gen_reg_rtx (Pmode
);
5574 rtx rax
= gen_rtx_REG (Pmode
, 0), insns
, note
;
5577 emit_call_insn (gen_tls_local_dynamic_base_64 (rax
));
5578 insns
= get_insns ();
5581 note
= gen_rtx_EXPR_LIST (VOIDmode
, const0_rtx
, NULL
);
5582 note
= gen_rtx_EXPR_LIST (VOIDmode
, ix86_tls_get_addr (), note
);
5583 emit_libcall_block (insns
, base
, rax
, note
);
5586 emit_insn (gen_tls_local_dynamic_base_32 (base
));
5588 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPOFF
);
5589 off
= gen_rtx_CONST (Pmode
, off
);
5591 return gen_rtx_PLUS (Pmode
, base
, off
);
5593 case TLS_MODEL_INITIAL_EXEC
:
5597 type
= UNSPEC_GOTNTPOFF
;
5601 if (reload_in_progress
)
5602 regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
] = 1;
5603 pic
= pic_offset_table_rtx
;
5604 type
= TARGET_GNU_TLS
? UNSPEC_GOTNTPOFF
: UNSPEC_GOTTPOFF
;
5606 else if (!TARGET_GNU_TLS
)
5608 pic
= gen_reg_rtx (Pmode
);
5609 emit_insn (gen_set_got (pic
));
5610 type
= UNSPEC_GOTTPOFF
;
5615 type
= UNSPEC_INDNTPOFF
;
5618 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), type
);
5619 off
= gen_rtx_CONST (Pmode
, off
);
5621 off
= gen_rtx_PLUS (Pmode
, pic
, off
);
5622 off
= gen_const_mem (Pmode
, off
);
5623 set_mem_alias_set (off
, ix86_GOT_alias_set ());
5625 if (TARGET_64BIT
|| TARGET_GNU_TLS
)
5627 base
= get_thread_pointer (for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
5628 off
= force_reg (Pmode
, off
);
5629 return gen_rtx_PLUS (Pmode
, base
, off
);
5633 base
= get_thread_pointer (true);
5634 dest
= gen_reg_rtx (Pmode
);
5635 emit_insn (gen_subsi3 (dest
, base
, off
));
5639 case TLS_MODEL_LOCAL_EXEC
:
5640 off
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
),
5641 (TARGET_64BIT
|| TARGET_GNU_TLS
)
5642 ? UNSPEC_NTPOFF
: UNSPEC_TPOFF
);
5643 off
= gen_rtx_CONST (Pmode
, off
);
5645 if (TARGET_64BIT
|| TARGET_GNU_TLS
)
5647 base
= get_thread_pointer (for_mov
|| !TARGET_TLS_DIRECT_SEG_REFS
);
5648 return gen_rtx_PLUS (Pmode
, base
, off
);
5652 base
= get_thread_pointer (true);
5653 dest
= gen_reg_rtx (Pmode
);
5654 emit_insn (gen_subsi3 (dest
, base
, off
));
5665 /* Try machine-dependent ways of modifying an illegitimate address
5666 to be legitimate. If we find one, return the new, valid address.
5667 This macro is used in only one place: `memory_address' in explow.c.
5669 OLDX is the address as it was before break_out_memory_refs was called.
5670 In some cases it is useful to look at this to decide what needs to be done.
5672 MODE and WIN are passed so that this macro can use
5673 GO_IF_LEGITIMATE_ADDRESS.
5675 It is always safe for this macro to do nothing. It exists to recognize
5676 opportunities to optimize the output.
5678 For the 80386, we handle X+REG by loading X into a register R and
5679 using R+REG. R will go in a general reg and indexing will be used.
5680 However, if REG is a broken-out memory address or multiplication,
5681 nothing needs to be done because REG can certainly go in a general reg.
5683 When -fpic is used, special handling is needed for symbolic references.
5684 See comments by legitimize_pic_address in i386.c for details. */
5687 legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
, enum machine_mode mode
)
5692 if (TARGET_DEBUG_ADDR
)
5694 fprintf (stderr
, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5695 GET_MODE_NAME (mode
));
5699 log
= GET_CODE (x
) == SYMBOL_REF
? SYMBOL_REF_TLS_MODEL (x
) : 0;
5701 return legitimize_tls_address (x
, log
, false);
5702 if (GET_CODE (x
) == CONST
5703 && GET_CODE (XEXP (x
, 0)) == PLUS
5704 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
5705 && (log
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0))))
5707 rtx t
= legitimize_tls_address (XEXP (XEXP (x
, 0), 0), log
, false);
5708 return gen_rtx_PLUS (Pmode
, t
, XEXP (XEXP (x
, 0), 1));
5711 if (flag_pic
&& SYMBOLIC_CONST (x
))
5712 return legitimize_pic_address (x
, 0);
5714 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5715 if (GET_CODE (x
) == ASHIFT
5716 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5717 && (log
= (unsigned) exact_log2 (INTVAL (XEXP (x
, 1)))) < 4)
5720 x
= gen_rtx_MULT (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
5721 GEN_INT (1 << log
));
5724 if (GET_CODE (x
) == PLUS
)
5726 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5728 if (GET_CODE (XEXP (x
, 0)) == ASHIFT
5729 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
5730 && (log
= (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x
, 0), 1)))) < 4)
5733 XEXP (x
, 0) = gen_rtx_MULT (Pmode
,
5734 force_reg (Pmode
, XEXP (XEXP (x
, 0), 0)),
5735 GEN_INT (1 << log
));
5738 if (GET_CODE (XEXP (x
, 1)) == ASHIFT
5739 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5740 && (log
= (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x
, 1), 1)))) < 4)
5743 XEXP (x
, 1) = gen_rtx_MULT (Pmode
,
5744 force_reg (Pmode
, XEXP (XEXP (x
, 1), 0)),
5745 GEN_INT (1 << log
));
5748 /* Put multiply first if it isn't already. */
5749 if (GET_CODE (XEXP (x
, 1)) == MULT
)
5751 rtx tmp
= XEXP (x
, 0);
5752 XEXP (x
, 0) = XEXP (x
, 1);
5757 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5758 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5759 created by virtual register instantiation, register elimination, and
5760 similar optimizations. */
5761 if (GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
5764 x
= gen_rtx_PLUS (Pmode
,
5765 gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5766 XEXP (XEXP (x
, 1), 0)),
5767 XEXP (XEXP (x
, 1), 1));
5771 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5772 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5773 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
5774 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
5775 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == PLUS
5776 && CONSTANT_P (XEXP (x
, 1)))
5779 rtx other
= NULL_RTX
;
5781 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
5783 constant
= XEXP (x
, 1);
5784 other
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
5786 else if (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 1), 1)) == CONST_INT
)
5788 constant
= XEXP (XEXP (XEXP (x
, 0), 1), 1);
5789 other
= XEXP (x
, 1);
5797 x
= gen_rtx_PLUS (Pmode
,
5798 gen_rtx_PLUS (Pmode
, XEXP (XEXP (x
, 0), 0),
5799 XEXP (XEXP (XEXP (x
, 0), 1), 0)),
5800 plus_constant (other
, INTVAL (constant
)));
5804 if (changed
&& legitimate_address_p (mode
, x
, FALSE
))
5807 if (GET_CODE (XEXP (x
, 0)) == MULT
)
5810 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
5813 if (GET_CODE (XEXP (x
, 1)) == MULT
)
5816 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
5820 && GET_CODE (XEXP (x
, 1)) == REG
5821 && GET_CODE (XEXP (x
, 0)) == REG
)
5824 if (flag_pic
&& SYMBOLIC_CONST (XEXP (x
, 1)))
5827 x
= legitimize_pic_address (x
, 0);
5830 if (changed
&& legitimate_address_p (mode
, x
, FALSE
))
5833 if (GET_CODE (XEXP (x
, 0)) == REG
)
5835 rtx temp
= gen_reg_rtx (Pmode
);
5836 rtx val
= force_operand (XEXP (x
, 1), temp
);
5838 emit_move_insn (temp
, val
);
5844 else if (GET_CODE (XEXP (x
, 1)) == REG
)
5846 rtx temp
= gen_reg_rtx (Pmode
);
5847 rtx val
= force_operand (XEXP (x
, 0), temp
);
5849 emit_move_insn (temp
, val
);
5859 /* Print an integer constant expression in assembler syntax. Addition
5860 and subtraction are the only arithmetic that may appear in these
5861 expressions. FILE is the stdio stream to write to, X is the rtx, and
5862 CODE is the operand print code from the output string. */
5865 output_pic_addr_const (FILE *file
, rtx x
, int code
)
5869 switch (GET_CODE (x
))
5879 /* Mark the decl as referenced so that cgraph will output the function. */
5880 if (SYMBOL_REF_DECL (x
))
5881 mark_decl_referenced (SYMBOL_REF_DECL (x
));
5883 assemble_name (file
, XSTR (x
, 0));
5884 if (!TARGET_MACHO
&& code
== 'P' && ! SYMBOL_REF_LOCAL_P (x
))
5885 fputs ("@PLT", file
);
5892 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (x
));
5893 assemble_name (asm_out_file
, buf
);
5897 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
5901 /* This used to output parentheses around the expression,
5902 but that does not work on the 386 (either ATT or BSD assembler). */
5903 output_pic_addr_const (file
, XEXP (x
, 0), code
);
5907 if (GET_MODE (x
) == VOIDmode
)
5909 /* We can use %d if the number is <32 bits and positive. */
5910 if (CONST_DOUBLE_HIGH (x
) || CONST_DOUBLE_LOW (x
) < 0)
5911 fprintf (file
, "0x%lx%08lx",
5912 (unsigned long) CONST_DOUBLE_HIGH (x
),
5913 (unsigned long) CONST_DOUBLE_LOW (x
));
5915 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (x
));
5918 /* We can't handle floating point constants;
5919 PRINT_OPERAND must handle them. */
5920 output_operand_lossage ("floating constant misused");
5924 /* Some assemblers need integer constants to appear first. */
5925 if (GET_CODE (XEXP (x
, 0)) == CONST_INT
)
5927 output_pic_addr_const (file
, XEXP (x
, 0), code
);
5929 output_pic_addr_const (file
, XEXP (x
, 1), code
);
5931 else if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
5933 output_pic_addr_const (file
, XEXP (x
, 1), code
);
5935 output_pic_addr_const (file
, XEXP (x
, 0), code
);
5943 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? '(' : '[', file
);
5944 output_pic_addr_const (file
, XEXP (x
, 0), code
);
5946 output_pic_addr_const (file
, XEXP (x
, 1), code
);
5948 putc (ASSEMBLER_DIALECT
== ASM_INTEL
? ')' : ']', file
);
5952 if (XVECLEN (x
, 0) != 1)
5954 output_pic_addr_const (file
, XVECEXP (x
, 0, 0), code
);
5955 switch (XINT (x
, 1))
5958 fputs ("@GOT", file
);
5961 fputs ("@GOTOFF", file
);
5963 case UNSPEC_GOTPCREL
:
5964 fputs ("@GOTPCREL(%rip)", file
);
5966 case UNSPEC_GOTTPOFF
:
5967 /* FIXME: This might be @TPOFF in Sun ld too. */
5968 fputs ("@GOTTPOFF", file
);
5971 fputs ("@TPOFF", file
);
5975 fputs ("@TPOFF", file
);
5977 fputs ("@NTPOFF", file
);
5980 fputs ("@DTPOFF", file
);
5982 case UNSPEC_GOTNTPOFF
:
5984 fputs ("@GOTTPOFF(%rip)", file
);
5986 fputs ("@GOTNTPOFF", file
);
5988 case UNSPEC_INDNTPOFF
:
5989 fputs ("@INDNTPOFF", file
);
5992 output_operand_lossage ("invalid UNSPEC as operand");
5998 output_operand_lossage ("invalid expression as operand");
6002 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
6003 We need to emit DTP-relative relocations. */
6006 i386_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
6008 fputs (ASM_LONG
, file
);
6009 output_addr_const (file
, x
);
6010 fputs ("@DTPOFF", file
);
6016 fputs (", 0", file
);
6023 /* In the name of slightly smaller debug output, and to cater to
6024 general assembler losage, recognize PIC+GOTOFF and turn it back
6025 into a direct symbol reference. */
6028 ix86_delegitimize_address (rtx orig_x
)
6032 if (GET_CODE (x
) == MEM
)
6037 if (GET_CODE (x
) != CONST
6038 || GET_CODE (XEXP (x
, 0)) != UNSPEC
6039 || XINT (XEXP (x
, 0), 1) != UNSPEC_GOTPCREL
6040 || GET_CODE (orig_x
) != MEM
)
6042 return XVECEXP (XEXP (x
, 0), 0, 0);
6045 if (GET_CODE (x
) != PLUS
6046 || GET_CODE (XEXP (x
, 1)) != CONST
)
6049 if (GET_CODE (XEXP (x
, 0)) == REG
6050 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
)
6051 /* %ebx + GOT/GOTOFF */
6053 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6055 /* %ebx + %reg * scale + GOT/GOTOFF */
6057 if (GET_CODE (XEXP (y
, 0)) == REG
6058 && REGNO (XEXP (y
, 0)) == PIC_OFFSET_TABLE_REGNUM
)
6060 else if (GET_CODE (XEXP (y
, 1)) == REG
6061 && REGNO (XEXP (y
, 1)) == PIC_OFFSET_TABLE_REGNUM
)
6065 if (GET_CODE (y
) != REG
6066 && GET_CODE (y
) != MULT
6067 && GET_CODE (y
) != ASHIFT
)
6073 x
= XEXP (XEXP (x
, 1), 0);
6074 if (GET_CODE (x
) == UNSPEC
6075 && ((XINT (x
, 1) == UNSPEC_GOT
&& GET_CODE (orig_x
) == MEM
)
6076 || (XINT (x
, 1) == UNSPEC_GOTOFF
&& GET_CODE (orig_x
) != MEM
)))
6079 return gen_rtx_PLUS (Pmode
, y
, XVECEXP (x
, 0, 0));
6080 return XVECEXP (x
, 0, 0);
6083 if (GET_CODE (x
) == PLUS
6084 && GET_CODE (XEXP (x
, 0)) == UNSPEC
6085 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6086 && ((XINT (XEXP (x
, 0), 1) == UNSPEC_GOT
&& GET_CODE (orig_x
) == MEM
)
6087 || (XINT (XEXP (x
, 0), 1) == UNSPEC_GOTOFF
6088 && GET_CODE (orig_x
) != MEM
)))
6090 x
= gen_rtx_PLUS (VOIDmode
, XVECEXP (XEXP (x
, 0), 0, 0), XEXP (x
, 1));
6092 return gen_rtx_PLUS (Pmode
, y
, x
);
6100 put_condition_code (enum rtx_code code
, enum machine_mode mode
, int reverse
,
6105 if (mode
== CCFPmode
|| mode
== CCFPUmode
)
6107 enum rtx_code second_code
, bypass_code
;
6108 ix86_fp_comparison_codes (code
, &bypass_code
, &code
, &second_code
);
6109 if (bypass_code
!= UNKNOWN
|| second_code
!= UNKNOWN
)
6111 code
= ix86_fp_compare_code_to_integer (code
);
6115 code
= reverse_condition (code
);
6126 if (mode
!= CCmode
&& mode
!= CCNOmode
&& mode
!= CCGCmode
)
6131 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6132 Those same assemblers have the same but opposite losage on cmov. */
6135 suffix
= fp
? "nbe" : "a";
6138 if (mode
== CCNOmode
|| mode
== CCGOCmode
)
6140 else if (mode
== CCmode
|| mode
== CCGCmode
)
6151 if (mode
== CCNOmode
|| mode
== CCGOCmode
)
6153 else if (mode
== CCmode
|| mode
== CCGCmode
)
6162 suffix
= fp
? "nb" : "ae";
6165 if (mode
!= CCmode
&& mode
!= CCGCmode
&& mode
!= CCNOmode
)
6175 suffix
= fp
? "u" : "p";
6178 suffix
= fp
? "nu" : "np";
6183 fputs (suffix
, file
);
6186 /* Print the name of register X to FILE based on its machine mode and number.
6187 If CODE is 'w', pretend the mode is HImode.
6188 If CODE is 'b', pretend the mode is QImode.
6189 If CODE is 'k', pretend the mode is SImode.
6190 If CODE is 'q', pretend the mode is DImode.
6191 If CODE is 'h', pretend the reg is the `high' byte register.
6192 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6195 print_reg (rtx x
, int code
, FILE *file
)
6197 if (REGNO (x
) == ARG_POINTER_REGNUM
6198 || REGNO (x
) == FRAME_POINTER_REGNUM
6199 || REGNO (x
) == FLAGS_REG
6200 || REGNO (x
) == FPSR_REG
)
6203 if (ASSEMBLER_DIALECT
== ASM_ATT
|| USER_LABEL_PREFIX
[0] == 0)
6206 if (code
== 'w' || MMX_REG_P (x
))
6208 else if (code
== 'b')
6210 else if (code
== 'k')
6212 else if (code
== 'q')
6214 else if (code
== 'y')
6216 else if (code
== 'h')
6219 code
= GET_MODE_SIZE (GET_MODE (x
));
6221 /* Irritatingly, AMD extended registers use different naming convention
6222 from the normal registers. */
6223 if (REX_INT_REG_P (x
))
6230 error ("extended registers have no high halves");
6233 fprintf (file
, "r%ib", REGNO (x
) - FIRST_REX_INT_REG
+ 8);
6236 fprintf (file
, "r%iw", REGNO (x
) - FIRST_REX_INT_REG
+ 8);
6239 fprintf (file
, "r%id", REGNO (x
) - FIRST_REX_INT_REG
+ 8);
6242 fprintf (file
, "r%i", REGNO (x
) - FIRST_REX_INT_REG
+ 8);
6245 error ("unsupported operand size for extended register");
6253 if (STACK_TOP_P (x
))
6255 fputs ("st(0)", file
);
6262 if (! ANY_FP_REG_P (x
))
6263 putc (code
== 8 && TARGET_64BIT
? 'r' : 'e', file
);
6268 fputs (hi_reg_name
[REGNO (x
)], file
);
6271 if (REGNO (x
) >= ARRAY_SIZE (qi_reg_name
))
6273 fputs (qi_reg_name
[REGNO (x
)], file
);
6276 if (REGNO (x
) >= ARRAY_SIZE (qi_high_reg_name
))
6278 fputs (qi_high_reg_name
[REGNO (x
)], file
);
6285 /* Locate some local-dynamic symbol still in use by this function
6286 so that we can print its name in some tls_local_dynamic_base
6290 get_some_local_dynamic_name (void)
6294 if (cfun
->machine
->some_ld_name
)
6295 return cfun
->machine
->some_ld_name
;
6297 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
6299 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
6300 return cfun
->machine
->some_ld_name
;
6306 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
6310 if (GET_CODE (x
) == SYMBOL_REF
6311 && local_dynamic_symbolic_operand (x
, Pmode
))
6313 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
6321 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6322 C -- print opcode suffix for set/cmov insn.
6323 c -- like C, but print reversed condition
6324 F,f -- likewise, but for floating-point.
6325 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6327 R -- print the prefix for register names.
6328 z -- print the opcode suffix for the size of the current operand.
6329 * -- print a star (in certain assembler syntax)
6330 A -- print an absolute memory reference.
6331 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6332 s -- print a shift double count, followed by the assemblers argument
6334 b -- print the QImode name of the register for the indicated operand.
6335 %b0 would print %al if operands[0] is reg 0.
6336 w -- likewise, print the HImode name of the register.
6337 k -- likewise, print the SImode name of the register.
6338 q -- likewise, print the DImode name of the register.
6339 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6340 y -- print "st(0)" instead of "st" as a register.
6341 D -- print condition for SSE cmp instruction.
6342 P -- if PIC, print an @PLT suffix.
6343 X -- don't print any sort of PIC '@' suffix for a symbol.
6344 & -- print some in-use local-dynamic symbol name.
6345 H -- print a memory address offset by 8; used for sse high-parts
6349 print_operand (FILE *file
, rtx x
, int code
)
6356 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6361 assemble_name (file
, get_some_local_dynamic_name ());
6365 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6367 else if (ASSEMBLER_DIALECT
== ASM_INTEL
)
6369 /* Intel syntax. For absolute addresses, registers should not
6370 be surrounded by braces. */
6371 if (GET_CODE (x
) != REG
)
6374 PRINT_OPERAND (file
, x
, 0);
6382 PRINT_OPERAND (file
, x
, 0);
6387 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6392 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6397 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6402 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6407 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6412 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6417 /* 387 opcodes don't get size suffixes if the operands are
6419 if (STACK_REG_P (x
))
6422 /* Likewise if using Intel opcodes. */
6423 if (ASSEMBLER_DIALECT
== ASM_INTEL
)
6426 /* This is the size of op from size of operand. */
6427 switch (GET_MODE_SIZE (GET_MODE (x
)))
6430 #ifdef HAVE_GAS_FILDS_FISTS
6436 if (GET_MODE (x
) == SFmode
)
6451 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
)
6453 #ifdef GAS_MNEMONICS
6479 if (GET_CODE (x
) == CONST_INT
|| ! SHIFT_DOUBLE_OMITS_COUNT
)
6481 PRINT_OPERAND (file
, x
, 0);
6487 /* Little bit of braindamage here. The SSE compare instructions
6488 does use completely different names for the comparisons that the
6489 fp conditional moves. */
6490 switch (GET_CODE (x
))
6505 fputs ("unord", file
);
6509 fputs ("neq", file
);
6513 fputs ("nlt", file
);
6517 fputs ("nle", file
);
6520 fputs ("ord", file
);
6528 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6529 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6531 switch (GET_MODE (x
))
6533 case HImode
: putc ('w', file
); break;
6535 case SFmode
: putc ('l', file
); break;
6537 case DFmode
: putc ('q', file
); break;
6545 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 0, 0, file
);
6548 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6549 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6552 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 0, 1, file
);
6555 /* Like above, but reverse condition */
6557 /* Check to see if argument to %c is really a constant
6558 and not a condition code which needs to be reversed. */
6559 if (!COMPARISON_P (x
))
6561 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6564 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 1, 0, file
);
6567 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6568 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6571 put_condition_code (GET_CODE (x
), GET_MODE (XEXP (x
, 0)), 1, 1, file
);
6575 /* It doesn't actually matter what mode we use here, as we're
6576 only going to use this for printing. */
6577 x
= adjust_address_nv (x
, DImode
, 8);
6584 if (!optimize
|| optimize_size
|| !TARGET_BRANCH_PREDICTION_HINTS
)
6587 x
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
6590 int pred_val
= INTVAL (XEXP (x
, 0));
6592 if (pred_val
< REG_BR_PROB_BASE
* 45 / 100
6593 || pred_val
> REG_BR_PROB_BASE
* 55 / 100)
6595 int taken
= pred_val
> REG_BR_PROB_BASE
/ 2;
6596 int cputaken
= final_forward_branch_p (current_output_insn
) == 0;
6598 /* Emit hints only in the case default branch prediction
6599 heuristics would fail. */
6600 if (taken
!= cputaken
)
6602 /* We use 3e (DS) prefix for taken branches and
6603 2e (CS) prefix for not taken branches. */
6605 fputs ("ds ; ", file
);
6607 fputs ("cs ; ", file
);
6614 output_operand_lossage ("invalid operand code '%c'", code
);
6618 if (GET_CODE (x
) == REG
)
6619 print_reg (x
, code
, file
);
6621 else if (GET_CODE (x
) == MEM
)
6623 /* No `byte ptr' prefix for call instructions. */
6624 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& code
!= 'X' && code
!= 'P')
6627 switch (GET_MODE_SIZE (GET_MODE (x
)))
6629 case 1: size
= "BYTE"; break;
6630 case 2: size
= "WORD"; break;
6631 case 4: size
= "DWORD"; break;
6632 case 8: size
= "QWORD"; break;
6633 case 12: size
= "XWORD"; break;
6634 case 16: size
= "XMMWORD"; break;
6639 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6642 else if (code
== 'w')
6644 else if (code
== 'k')
6648 fputs (" PTR ", file
);
6652 /* Avoid (%rip) for call operands. */
6653 if (CONSTANT_ADDRESS_P (x
) && code
== 'P'
6654 && GET_CODE (x
) != CONST_INT
)
6655 output_addr_const (file
, x
);
6656 else if (this_is_asm_operands
&& ! address_operand (x
, VOIDmode
))
6657 output_operand_lossage ("invalid constraints for operand");
6662 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == SFmode
)
6667 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
6668 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
6670 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6672 fprintf (file
, "0x%08lx", l
);
6675 /* These float cases don't actually occur as immediate operands. */
6676 else if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) == DFmode
)
6680 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
), sizeof (dstr
), 0, 1);
6681 fprintf (file
, "%s", dstr
);
6684 else if (GET_CODE (x
) == CONST_DOUBLE
6685 && GET_MODE (x
) == XFmode
)
6689 real_to_decimal (dstr
, CONST_DOUBLE_REAL_VALUE (x
), sizeof (dstr
), 0, 1);
6690 fprintf (file
, "%s", dstr
);
6697 if (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
)
6699 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6702 else if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
6703 || GET_CODE (x
) == LABEL_REF
)
6705 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6708 fputs ("OFFSET FLAT:", file
);
6711 if (GET_CODE (x
) == CONST_INT
)
6712 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
6714 output_pic_addr_const (file
, x
, code
);
6716 output_addr_const (file
, x
);
6720 /* Print a memory operand whose address is ADDR. */
6723 print_operand_address (FILE *file
, rtx addr
)
6725 struct ix86_address parts
;
6726 rtx base
, index
, disp
;
6729 if (! ix86_decompose_address (addr
, &parts
))
6733 index
= parts
.index
;
6735 scale
= parts
.scale
;
6743 if (USER_LABEL_PREFIX
[0] == 0)
6745 fputs ((parts
.seg
== SEG_FS
? "fs:" : "gs:"), file
);
6751 if (!base
&& !index
)
6753 /* Displacement only requires special attention. */
6755 if (GET_CODE (disp
) == CONST_INT
)
6757 if (ASSEMBLER_DIALECT
== ASM_INTEL
&& parts
.seg
== SEG_DEFAULT
)
6759 if (USER_LABEL_PREFIX
[0] == 0)
6761 fputs ("ds:", file
);
6763 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (disp
));
6766 output_pic_addr_const (file
, disp
, 0);
6768 output_addr_const (file
, disp
);
6770 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6772 && ((GET_CODE (disp
) == SYMBOL_REF
6773 && ! tls_symbolic_operand (disp
, GET_MODE (disp
)))
6774 || GET_CODE (disp
) == LABEL_REF
6775 || (GET_CODE (disp
) == CONST
6776 && GET_CODE (XEXP (disp
, 0)) == PLUS
6777 && (GET_CODE (XEXP (XEXP (disp
, 0), 0)) == SYMBOL_REF
6778 || GET_CODE (XEXP (XEXP (disp
, 0), 0)) == LABEL_REF
)
6779 && GET_CODE (XEXP (XEXP (disp
, 0), 1)) == CONST_INT
)))
6780 fputs ("(%rip)", file
);
6784 if (ASSEMBLER_DIALECT
== ASM_ATT
)
6789 output_pic_addr_const (file
, disp
, 0);
6790 else if (GET_CODE (disp
) == LABEL_REF
)
6791 output_asm_label (disp
);
6793 output_addr_const (file
, disp
);
6798 print_reg (base
, 0, file
);
6802 print_reg (index
, 0, file
);
6804 fprintf (file
, ",%d", scale
);
6810 rtx offset
= NULL_RTX
;
6814 /* Pull out the offset of a symbol; print any symbol itself. */
6815 if (GET_CODE (disp
) == CONST
6816 && GET_CODE (XEXP (disp
, 0)) == PLUS
6817 && GET_CODE (XEXP (XEXP (disp
, 0), 1)) == CONST_INT
)
6819 offset
= XEXP (XEXP (disp
, 0), 1);
6820 disp
= gen_rtx_CONST (VOIDmode
,
6821 XEXP (XEXP (disp
, 0), 0));
6825 output_pic_addr_const (file
, disp
, 0);
6826 else if (GET_CODE (disp
) == LABEL_REF
)
6827 output_asm_label (disp
);
6828 else if (GET_CODE (disp
) == CONST_INT
)
6831 output_addr_const (file
, disp
);
6837 print_reg (base
, 0, file
);
6840 if (INTVAL (offset
) >= 0)
6842 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
6846 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (offset
));
6853 print_reg (index
, 0, file
);
6855 fprintf (file
, "*%d", scale
);
6863 output_addr_const_extra (FILE *file
, rtx x
)
6867 if (GET_CODE (x
) != UNSPEC
)
6870 op
= XVECEXP (x
, 0, 0);
6871 switch (XINT (x
, 1))
6873 case UNSPEC_GOTTPOFF
:
6874 output_addr_const (file
, op
);
6875 /* FIXME: This might be @TPOFF in Sun ld. */
6876 fputs ("@GOTTPOFF", file
);
6879 output_addr_const (file
, op
);
6880 fputs ("@TPOFF", file
);
6883 output_addr_const (file
, op
);
6885 fputs ("@TPOFF", file
);
6887 fputs ("@NTPOFF", file
);
6890 output_addr_const (file
, op
);
6891 fputs ("@DTPOFF", file
);
6893 case UNSPEC_GOTNTPOFF
:
6894 output_addr_const (file
, op
);
6896 fputs ("@GOTTPOFF(%rip)", file
);
6898 fputs ("@GOTNTPOFF", file
);
6900 case UNSPEC_INDNTPOFF
:
6901 output_addr_const (file
, op
);
6902 fputs ("@INDNTPOFF", file
);
6912 /* Split one or more DImode RTL references into pairs of SImode
6913 references. The RTL can be REG, offsettable MEM, integer constant, or
6914 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6915 split and "num" is its length. lo_half and hi_half are output arrays
6916 that parallel "operands". */
6919 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
6923 rtx op
= operands
[num
];
6925 /* simplify_subreg refuse to split volatile memory addresses,
6926 but we still have to handle it. */
6927 if (GET_CODE (op
) == MEM
)
6929 lo_half
[num
] = adjust_address (op
, SImode
, 0);
6930 hi_half
[num
] = adjust_address (op
, SImode
, 4);
6934 lo_half
[num
] = simplify_gen_subreg (SImode
, op
,
6935 GET_MODE (op
) == VOIDmode
6936 ? DImode
: GET_MODE (op
), 0);
6937 hi_half
[num
] = simplify_gen_subreg (SImode
, op
,
6938 GET_MODE (op
) == VOIDmode
6939 ? DImode
: GET_MODE (op
), 4);
6943 /* Split one or more TImode RTL references into pairs of SImode
6944 references. The RTL can be REG, offsettable MEM, integer constant, or
6945 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6946 split and "num" is its length. lo_half and hi_half are output arrays
6947 that parallel "operands". */
6950 split_ti (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
6954 rtx op
= operands
[num
];
6956 /* simplify_subreg refuse to split volatile memory addresses, but we
6957 still have to handle it. */
6958 if (GET_CODE (op
) == MEM
)
6960 lo_half
[num
] = adjust_address (op
, DImode
, 0);
6961 hi_half
[num
] = adjust_address (op
, DImode
, 8);
6965 lo_half
[num
] = simplify_gen_subreg (DImode
, op
, TImode
, 0);
6966 hi_half
[num
] = simplify_gen_subreg (DImode
, op
, TImode
, 8);
6971 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6972 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6973 is the expression of the binary operation. The output may either be
6974 emitted here, or returned to the caller, like all output_* functions.
6976 There is no guarantee that the operands are the same mode, as they
6977 might be within FLOAT or FLOAT_EXTEND expressions. */
6979 #ifndef SYSV386_COMPAT
6980 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6981 wants to fix the assemblers because that causes incompatibility
6982 with gcc. No-one wants to fix gcc because that causes
6983 incompatibility with assemblers... You can use the option of
6984 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6985 #define SYSV386_COMPAT 1
6989 output_387_binary_op (rtx insn
, rtx
*operands
)
6991 static char buf
[30];
6994 int is_sse
= SSE_REG_P (operands
[0]) || SSE_REG_P (operands
[1]) || SSE_REG_P (operands
[2]);
6996 #ifdef ENABLE_CHECKING
6997 /* Even if we do not want to check the inputs, this documents input
6998 constraints. Which helps in understanding the following code. */
6999 if (STACK_REG_P (operands
[0])
7000 && ((REG_P (operands
[1])
7001 && REGNO (operands
[0]) == REGNO (operands
[1])
7002 && (STACK_REG_P (operands
[2]) || GET_CODE (operands
[2]) == MEM
))
7003 || (REG_P (operands
[2])
7004 && REGNO (operands
[0]) == REGNO (operands
[2])
7005 && (STACK_REG_P (operands
[1]) || GET_CODE (operands
[1]) == MEM
)))
7006 && (STACK_TOP_P (operands
[1]) || STACK_TOP_P (operands
[2])))
7012 switch (GET_CODE (operands
[3]))
7015 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
7016 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
7024 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
7025 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
7033 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
7034 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
7042 if (GET_MODE_CLASS (GET_MODE (operands
[1])) == MODE_INT
7043 || GET_MODE_CLASS (GET_MODE (operands
[2])) == MODE_INT
)
7057 if (GET_MODE (operands
[0]) == SFmode
)
7058 strcat (buf
, "ss\t{%2, %0|%0, %2}");
7060 strcat (buf
, "sd\t{%2, %0|%0, %2}");
7065 switch (GET_CODE (operands
[3]))
7069 if (REG_P (operands
[2]) && REGNO (operands
[0]) == REGNO (operands
[2]))
7071 rtx temp
= operands
[2];
7072 operands
[2] = operands
[1];
7076 /* know operands[0] == operands[1]. */
7078 if (GET_CODE (operands
[2]) == MEM
)
7084 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
7086 if (STACK_TOP_P (operands
[0]))
7087 /* How is it that we are storing to a dead operand[2]?
7088 Well, presumably operands[1] is dead too. We can't
7089 store the result to st(0) as st(0) gets popped on this
7090 instruction. Instead store to operands[2] (which I
7091 think has to be st(1)). st(1) will be popped later.
7092 gcc <= 2.8.1 didn't have this check and generated
7093 assembly code that the Unixware assembler rejected. */
7094 p
= "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7096 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7100 if (STACK_TOP_P (operands
[0]))
7101 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7103 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7108 if (GET_CODE (operands
[1]) == MEM
)
7114 if (GET_CODE (operands
[2]) == MEM
)
7120 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[2])))
7123 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7124 derived assemblers, confusingly reverse the direction of
7125 the operation for fsub{r} and fdiv{r} when the
7126 destination register is not st(0). The Intel assembler
7127 doesn't have this brain damage. Read !SYSV386_COMPAT to
7128 figure out what the hardware really does. */
7129 if (STACK_TOP_P (operands
[0]))
7130 p
= "{p\t%0, %2|rp\t%2, %0}";
7132 p
= "{rp\t%2, %0|p\t%0, %2}";
7134 if (STACK_TOP_P (operands
[0]))
7135 /* As above for fmul/fadd, we can't store to st(0). */
7136 p
= "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7138 p
= "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7143 if (find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
7146 if (STACK_TOP_P (operands
[0]))
7147 p
= "{rp\t%0, %1|p\t%1, %0}";
7149 p
= "{p\t%1, %0|rp\t%0, %1}";
7151 if (STACK_TOP_P (operands
[0]))
7152 p
= "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7154 p
= "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7159 if (STACK_TOP_P (operands
[0]))
7161 if (STACK_TOP_P (operands
[1]))
7162 p
= "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7164 p
= "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7167 else if (STACK_TOP_P (operands
[1]))
7170 p
= "{\t%1, %0|r\t%0, %1}";
7172 p
= "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7178 p
= "{r\t%2, %0|\t%0, %2}";
7180 p
= "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7193 /* Output code to initialize control word copies used by trunc?f?i and
7194 rounding patterns. CURRENT_MODE is set to current control word,
7195 while NEW_MODE is set to new control word. */
7198 emit_i387_cw_initialization (rtx current_mode
, rtx new_mode
, int mode
)
7200 rtx reg
= gen_reg_rtx (HImode
);
7202 emit_insn (gen_x86_fnstcw_1 (current_mode
));
7203 emit_move_insn (reg
, current_mode
);
7205 if (!TARGET_PARTIAL_REG_STALL
&& !optimize_size
7211 /* round down toward -oo */
7212 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0x4)));
7216 /* round up toward +oo */
7217 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0x8)));
7221 /* round toward zero (truncate) */
7222 emit_insn (gen_movsi_insv_1 (reg
, GEN_INT (0xc)));
7225 case I387_CW_MASK_PM
:
7226 /* mask precision exception for nearbyint() */
7227 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0020)));
7239 /* round down toward -oo */
7240 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
7241 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0400)));
7245 /* round up toward +oo */
7246 emit_insn (gen_andhi3 (reg
, reg
, GEN_INT (~0x0c00)));
7247 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0800)));
7251 /* round toward zero (truncate) */
7252 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0c00)));
7255 case I387_CW_MASK_PM
:
7256 /* mask precision exception for nearbyint() */
7257 emit_insn (gen_iorhi3 (reg
, reg
, GEN_INT (0x0020)));
7265 emit_move_insn (new_mode
, reg
);
7268 /* Output code for INSN to convert a float to a signed int. OPERANDS
7269 are the insn operands. The output may be [HSD]Imode and the input
7270 operand may be [SDX]Fmode. */
7273 output_fix_trunc (rtx insn
, rtx
*operands
)
7275 int stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
7276 int dimode_p
= GET_MODE (operands
[0]) == DImode
;
7278 /* Jump through a hoop or two for DImode, since the hardware has no
7279 non-popping instruction. We used to do this a different way, but
7280 that was somewhat fragile and broke with post-reload splitters. */
7281 if (dimode_p
&& !stack_top_dies
)
7282 output_asm_insn ("fld\t%y1", operands
);
7284 if (!STACK_TOP_P (operands
[1]))
7287 if (GET_CODE (operands
[0]) != MEM
)
7290 output_asm_insn ("fldcw\t%3", operands
);
7291 if (stack_top_dies
|| dimode_p
)
7292 output_asm_insn ("fistp%z0\t%0", operands
);
7294 output_asm_insn ("fist%z0\t%0", operands
);
7295 output_asm_insn ("fldcw\t%2", operands
);
7300 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7301 should be used. UNORDERED_P is true when fucom should be used. */
7304 output_fp_compare (rtx insn
, rtx
*operands
, int eflags_p
, int unordered_p
)
7307 rtx cmp_op0
, cmp_op1
;
7308 int is_sse
= SSE_REG_P (operands
[0]) || SSE_REG_P (operands
[1]);
7312 cmp_op0
= operands
[0];
7313 cmp_op1
= operands
[1];
7317 cmp_op0
= operands
[1];
7318 cmp_op1
= operands
[2];
7323 if (GET_MODE (operands
[0]) == SFmode
)
7325 return "ucomiss\t{%1, %0|%0, %1}";
7327 return "comiss\t{%1, %0|%0, %1}";
7330 return "ucomisd\t{%1, %0|%0, %1}";
7332 return "comisd\t{%1, %0|%0, %1}";
7335 if (! STACK_TOP_P (cmp_op0
))
7338 stack_top_dies
= find_regno_note (insn
, REG_DEAD
, FIRST_STACK_REG
) != 0;
7340 if (cmp_op1
== CONST0_RTX (GET_MODE (cmp_op1
)))
7344 output_asm_insn ("ftst\n\tfnstsw\t%0", operands
);
7345 return TARGET_USE_FFREEP
? "ffreep\t%y1" : "fstp\t%y1";
7348 return "ftst\n\tfnstsw\t%0";
7351 if (STACK_REG_P (cmp_op1
)
7353 && find_regno_note (insn
, REG_DEAD
, REGNO (cmp_op1
))
7354 && REGNO (cmp_op1
) != FIRST_STACK_REG
)
7356 /* If both the top of the 387 stack dies, and the other operand
7357 is also a stack register that dies, then this must be a
7358 `fcompp' float compare */
7362 /* There is no double popping fcomi variant. Fortunately,
7363 eflags is immune from the fstp's cc clobbering. */
7365 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands
);
7367 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands
);
7368 return TARGET_USE_FFREEP
? "ffreep\t%y0" : "fstp\t%y0";
7373 return "fucompp\n\tfnstsw\t%0";
7375 return "fcompp\n\tfnstsw\t%0";
7380 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7382 static const char * const alt
[16] =
7384 "fcom%z2\t%y2\n\tfnstsw\t%0",
7385 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7386 "fucom%z2\t%y2\n\tfnstsw\t%0",
7387 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7389 "ficom%z2\t%y2\n\tfnstsw\t%0",
7390 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7394 "fcomi\t{%y1, %0|%0, %y1}",
7395 "fcomip\t{%y1, %0|%0, %y1}",
7396 "fucomi\t{%y1, %0|%0, %y1}",
7397 "fucomip\t{%y1, %0|%0, %y1}",
7408 mask
= eflags_p
<< 3;
7409 mask
|= (GET_MODE_CLASS (GET_MODE (cmp_op1
)) == MODE_INT
) << 2;
7410 mask
|= unordered_p
<< 1;
7411 mask
|= stack_top_dies
;
7424 ix86_output_addr_vec_elt (FILE *file
, int value
)
7426 const char *directive
= ASM_LONG
;
7431 directive
= ASM_QUAD
;
7437 fprintf (file
, "%s%s%d\n", directive
, LPREFIX
, value
);
7441 ix86_output_addr_diff_elt (FILE *file
, int value
, int rel
)
7444 fprintf (file
, "%s%s%d-%s%d\n",
7445 ASM_LONG
, LPREFIX
, value
, LPREFIX
, rel
);
7446 else if (HAVE_AS_GOTOFF_IN_DATA
)
7447 fprintf (file
, "%s%s%d@GOTOFF\n", ASM_LONG
, LPREFIX
, value
);
7449 else if (TARGET_MACHO
)
7451 fprintf (file
, "%s%s%d-", ASM_LONG
, LPREFIX
, value
);
7452 machopic_output_function_base_name (file
);
7453 fprintf(file
, "\n");
7457 asm_fprintf (file
, "%s%U%s+[.-%s%d]\n",
7458 ASM_LONG
, GOT_SYMBOL_NAME
, LPREFIX
, value
);
7461 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7465 ix86_expand_clear (rtx dest
)
7469 /* We play register width games, which are only valid after reload. */
7470 if (!reload_completed
)
7473 /* Avoid HImode and its attendant prefix byte. */
7474 if (GET_MODE_SIZE (GET_MODE (dest
)) < 4)
7475 dest
= gen_rtx_REG (SImode
, REGNO (dest
));
7477 tmp
= gen_rtx_SET (VOIDmode
, dest
, const0_rtx
);
7479 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7480 if (reload_completed
&& (!TARGET_USE_MOV0
|| optimize_size
))
7482 rtx clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, 17));
7483 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, clob
));
7489 /* X is an unchanging MEM. If it is a constant pool reference, return
7490 the constant pool rtx, else NULL. */
7493 maybe_get_pool_constant (rtx x
)
7495 x
= ix86_delegitimize_address (XEXP (x
, 0));
7497 if (GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
7498 return get_pool_constant (x
);
7504 ix86_expand_move (enum machine_mode mode
, rtx operands
[])
7506 int strict
= (reload_in_progress
|| reload_completed
);
7508 enum tls_model model
;
7513 if (GET_CODE (op1
) == SYMBOL_REF
)
7515 model
= SYMBOL_REF_TLS_MODEL (op1
);
7518 op1
= legitimize_tls_address (op1
, model
, true);
7519 op1
= force_operand (op1
, op0
);
7524 else if (GET_CODE (op1
) == CONST
7525 && GET_CODE (XEXP (op1
, 0)) == PLUS
7526 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SYMBOL_REF
)
7528 model
= SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1
, 0), 0));
7531 rtx addend
= XEXP (XEXP (op1
, 0), 1);
7532 op1
= legitimize_tls_address (XEXP (XEXP (op1
, 0), 0), model
, true);
7533 op1
= force_operand (op1
, NULL
);
7534 op1
= expand_simple_binop (Pmode
, PLUS
, op1
, addend
,
7535 op0
, 1, OPTAB_DIRECT
);
7541 if (flag_pic
&& mode
== Pmode
&& symbolic_operand (op1
, Pmode
))
7546 rtx temp
= ((reload_in_progress
7547 || ((op0
&& GET_CODE (op0
) == REG
)
7549 ? op0
: gen_reg_rtx (Pmode
));
7550 op1
= machopic_indirect_data_reference (op1
, temp
);
7551 op1
= machopic_legitimize_pic_address (op1
, mode
,
7552 temp
== op1
? 0 : temp
);
7554 else if (MACHOPIC_INDIRECT
)
7555 op1
= machopic_indirect_data_reference (op1
, 0);
7559 if (GET_CODE (op0
) == MEM
)
7560 op1
= force_reg (Pmode
, op1
);
7562 op1
= legitimize_address (op1
, op1
, Pmode
);
7563 #endif /* TARGET_MACHO */
7567 if (GET_CODE (op0
) == MEM
7568 && (PUSH_ROUNDING (GET_MODE_SIZE (mode
)) != GET_MODE_SIZE (mode
)
7569 || !push_operand (op0
, mode
))
7570 && GET_CODE (op1
) == MEM
)
7571 op1
= force_reg (mode
, op1
);
7573 if (push_operand (op0
, mode
)
7574 && ! general_no_elim_operand (op1
, mode
))
7575 op1
= copy_to_mode_reg (mode
, op1
);
7577 /* Force large constants in 64bit compilation into register
7578 to get them CSEed. */
7579 if (TARGET_64BIT
&& mode
== DImode
7580 && immediate_operand (op1
, mode
)
7581 && !x86_64_zext_immediate_operand (op1
, VOIDmode
)
7582 && !register_operand (op0
, mode
)
7583 && optimize
&& !reload_completed
&& !reload_in_progress
)
7584 op1
= copy_to_mode_reg (mode
, op1
);
7586 if (FLOAT_MODE_P (mode
))
7588 /* If we are loading a floating point constant to a register,
7589 force the value to memory now, since we'll get better code
7590 out the back end. */
7594 else if (GET_CODE (op1
) == CONST_DOUBLE
)
7596 op1
= validize_mem (force_const_mem (mode
, op1
));
7597 if (!register_operand (op0
, mode
))
7599 rtx temp
= gen_reg_rtx (mode
);
7600 emit_insn (gen_rtx_SET (VOIDmode
, temp
, op1
));
7601 emit_move_insn (op0
, temp
);
7608 emit_insn (gen_rtx_SET (VOIDmode
, op0
, op1
));
7612 ix86_expand_vector_move (enum machine_mode mode
, rtx operands
[])
7614 rtx op0
= operands
[0], op1
= operands
[1];
7616 /* Force constants other than zero into memory. We do not know how
7617 the instructions used to build constants modify the upper 64 bits
7618 of the register, once we have that information we may be able
7619 to handle some of them more efficiently. */
7620 if ((reload_in_progress
| reload_completed
) == 0
7621 && register_operand (op0
, mode
)
7622 && CONSTANT_P (op1
) && op1
!= CONST0_RTX (mode
))
7623 op1
= validize_mem (force_const_mem (mode
, op1
));
7625 /* Make operand1 a register if it isn't already. */
7627 && !register_operand (op0
, mode
)
7628 && !register_operand (op1
, mode
))
7630 emit_move_insn (op0
, force_reg (GET_MODE (op0
), op1
));
7634 emit_insn (gen_rtx_SET (VOIDmode
, op0
, op1
));
7637 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
7638 straight to ix86_expand_vector_move. */
7641 ix86_expand_vector_move_misalign (enum machine_mode mode
, rtx operands
[])
7650 /* If we're optimizing for size, movups is the smallest. */
7653 op0
= gen_lowpart (V4SFmode
, op0
);
7654 op1
= gen_lowpart (V4SFmode
, op1
);
7655 emit_insn (gen_sse_movups (op0
, op1
));
7659 /* ??? If we have typed data, then it would appear that using
7660 movdqu is the only way to get unaligned data loaded with
7662 if (TARGET_SSE2
&& GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7664 op0
= gen_lowpart (V16QImode
, op0
);
7665 op1
= gen_lowpart (V16QImode
, op1
);
7666 emit_insn (gen_sse2_movdqu (op0
, op1
));
7670 if (TARGET_SSE2
&& mode
== V2DFmode
)
7674 /* When SSE registers are split into halves, we can avoid
7675 writing to the top half twice. */
7676 if (TARGET_SSE_SPLIT_REGS
)
7678 emit_insn (gen_rtx_CLOBBER (VOIDmode
, op0
));
7683 /* ??? Not sure about the best option for the Intel chips.
7684 The following would seem to satisfy; the register is
7685 entirely cleared, breaking the dependency chain. We
7686 then store to the upper half, with a dependency depth
7687 of one. A rumor has it that Intel recommends two movsd
7688 followed by an unpacklpd, but this is unconfirmed. And
7689 given that the dependency depth of the unpacklpd would
7690 still be one, I'm not sure why this would be better. */
7691 zero
= CONST0_RTX (V2DFmode
);
7694 m
= adjust_address (op1
, DFmode
, 0);
7695 emit_insn (gen_sse2_loadlpd (op0
, zero
, m
));
7696 m
= adjust_address (op1
, DFmode
, 8);
7697 emit_insn (gen_sse2_loadhpd (op0
, op0
, m
));
7701 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY
)
7702 emit_move_insn (op0
, CONST0_RTX (mode
));
7704 emit_insn (gen_rtx_CLOBBER (VOIDmode
, op0
));
7706 m
= adjust_address (op1
, V2SFmode
, 0);
7707 emit_insn (gen_sse_loadlps (op0
, op0
, m
));
7708 m
= adjust_address (op1
, V2SFmode
, 8);
7709 emit_insn (gen_sse_loadhps (op0
, op0
, m
));
7712 else if (MEM_P (op0
))
7714 /* If we're optimizing for size, movups is the smallest. */
7717 op0
= gen_lowpart (V4SFmode
, op0
);
7718 op1
= gen_lowpart (V4SFmode
, op1
);
7719 emit_insn (gen_sse_movups (op0
, op1
));
7723 /* ??? Similar to above, only less clear because of quote
7724 typeless stores unquote. */
7725 if (TARGET_SSE2
&& !TARGET_SSE_TYPELESS_STORES
7726 && GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
7728 op0
= gen_lowpart (V16QImode
, op0
);
7729 op1
= gen_lowpart (V16QImode
, op1
);
7730 emit_insn (gen_sse2_movdqu (op0
, op1
));
7734 if (TARGET_SSE2
&& mode
== V2DFmode
)
7736 m
= adjust_address (op0
, DFmode
, 0);
7737 emit_insn (gen_sse2_storelpd (m
, op1
));
7738 m
= adjust_address (op0
, DFmode
, 8);
7739 emit_insn (gen_sse2_storehpd (m
, op1
));
7743 if (mode
!= V4SFmode
)
7744 op1
= gen_lowpart (V4SFmode
, op1
);
7745 m
= adjust_address (op0
, V2SFmode
, 0);
7746 emit_insn (gen_sse_storelps (m
, op1
));
7747 m
= adjust_address (op0
, V2SFmode
, 8);
7748 emit_insn (gen_sse_storehps (m
, op1
));
7756 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
7757 destination to use for the operation. If different from the true
7758 destination in operands[0], a copy operation will be required. */
7761 ix86_fixup_binary_operands (enum rtx_code code
, enum machine_mode mode
,
7764 int matching_memory
;
7765 rtx src1
, src2
, dst
;
7771 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7772 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
7773 && (rtx_equal_p (dst
, src2
)
7774 || immediate_operand (src1
, mode
)))
7781 /* If the destination is memory, and we do not have matching source
7782 operands, do things in registers. */
7783 matching_memory
= 0;
7784 if (GET_CODE (dst
) == MEM
)
7786 if (rtx_equal_p (dst
, src1
))
7787 matching_memory
= 1;
7788 else if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
7789 && rtx_equal_p (dst
, src2
))
7790 matching_memory
= 2;
7792 dst
= gen_reg_rtx (mode
);
7795 /* Both source operands cannot be in memory. */
7796 if (GET_CODE (src1
) == MEM
&& GET_CODE (src2
) == MEM
)
7798 if (matching_memory
!= 2)
7799 src2
= force_reg (mode
, src2
);
7801 src1
= force_reg (mode
, src1
);
7804 /* If the operation is not commutable, source 1 cannot be a constant
7805 or non-matching memory. */
7806 if ((CONSTANT_P (src1
)
7807 || (!matching_memory
&& GET_CODE (src1
) == MEM
))
7808 && GET_RTX_CLASS (code
) != RTX_COMM_ARITH
)
7809 src1
= force_reg (mode
, src1
);
7811 /* If optimizing, copy to regs to improve CSE */
7812 if (optimize
&& ! no_new_pseudos
)
7814 if (GET_CODE (dst
) == MEM
)
7815 dst
= gen_reg_rtx (mode
);
7816 if (GET_CODE (src1
) == MEM
)
7817 src1
= force_reg (mode
, src1
);
7818 if (GET_CODE (src2
) == MEM
)
7819 src2
= force_reg (mode
, src2
);
7822 src1
= operands
[1] = src1
;
7823 src2
= operands
[2] = src2
;
7827 /* Similarly, but assume that the destination has already been
7831 ix86_fixup_binary_operands_no_copy (enum rtx_code code
,
7832 enum machine_mode mode
, rtx operands
[])
7834 rtx dst
= ix86_fixup_binary_operands (code
, mode
, operands
);
7835 gcc_assert (dst
== operands
[0]);
7838 /* Attempt to expand a binary operator. Make the expansion closer to the
7839 actual machine, then just general_operand, which will allow 3 separate
7840 memory references (one output, two input) in a single insn. */
7843 ix86_expand_binary_operator (enum rtx_code code
, enum machine_mode mode
,
7846 rtx src1
, src2
, dst
, op
, clob
;
7848 dst
= ix86_fixup_binary_operands (code
, mode
, operands
);
7852 /* Emit the instruction. */
7854 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_ee (code
, mode
, src1
, src2
));
7855 if (reload_in_progress
)
7857 /* Reload doesn't know about the flags register, and doesn't know that
7858 it doesn't want to clobber it. We can only do this with PLUS. */
7865 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
7866 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
7869 /* Fix up the destination if needed. */
7870 if (dst
!= operands
[0])
7871 emit_move_insn (operands
[0], dst
);
7874 /* Return TRUE or FALSE depending on whether the binary operator meets the
7875 appropriate constraints. */
7878 ix86_binary_operator_ok (enum rtx_code code
,
7879 enum machine_mode mode ATTRIBUTE_UNUSED
,
7882 /* Both source operands cannot be in memory. */
7883 if (GET_CODE (operands
[1]) == MEM
&& GET_CODE (operands
[2]) == MEM
)
7885 /* If the operation is not commutable, source 1 cannot be a constant. */
7886 if (CONSTANT_P (operands
[1]) && GET_RTX_CLASS (code
) != RTX_COMM_ARITH
)
7888 /* If the destination is memory, we must have a matching source operand. */
7889 if (GET_CODE (operands
[0]) == MEM
7890 && ! (rtx_equal_p (operands
[0], operands
[1])
7891 || (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
7892 && rtx_equal_p (operands
[0], operands
[2]))))
7894 /* If the operation is not commutable and the source 1 is memory, we must
7895 have a matching destination. */
7896 if (GET_CODE (operands
[1]) == MEM
7897 && GET_RTX_CLASS (code
) != RTX_COMM_ARITH
7898 && ! rtx_equal_p (operands
[0], operands
[1]))
7903 /* Attempt to expand a unary operator. Make the expansion closer to the
7904 actual machine, then just general_operand, which will allow 2 separate
7905 memory references (one output, one input) in a single insn. */
7908 ix86_expand_unary_operator (enum rtx_code code
, enum machine_mode mode
,
7911 int matching_memory
;
7912 rtx src
, dst
, op
, clob
;
7917 /* If the destination is memory, and we do not have matching source
7918 operands, do things in registers. */
7919 matching_memory
= 0;
7922 if (rtx_equal_p (dst
, src
))
7923 matching_memory
= 1;
7925 dst
= gen_reg_rtx (mode
);
7928 /* When source operand is memory, destination must match. */
7929 if (MEM_P (src
) && !matching_memory
)
7930 src
= force_reg (mode
, src
);
7932 /* If optimizing, copy to regs to improve CSE. */
7933 if (optimize
&& ! no_new_pseudos
)
7935 if (GET_CODE (dst
) == MEM
)
7936 dst
= gen_reg_rtx (mode
);
7937 if (GET_CODE (src
) == MEM
)
7938 src
= force_reg (mode
, src
);
7941 /* Emit the instruction. */
7943 op
= gen_rtx_SET (VOIDmode
, dst
, gen_rtx_fmt_e (code
, mode
, src
));
7944 if (reload_in_progress
|| code
== NOT
)
7946 /* Reload doesn't know about the flags register, and doesn't know that
7947 it doesn't want to clobber it. */
7954 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
7955 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, op
, clob
)));
7958 /* Fix up the destination if needed. */
7959 if (dst
!= operands
[0])
7960 emit_move_insn (operands
[0], dst
);
7963 /* Return TRUE or FALSE depending on whether the unary operator meets the
7964 appropriate constraints. */
7967 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED
,
7968 enum machine_mode mode ATTRIBUTE_UNUSED
,
7969 rtx operands
[2] ATTRIBUTE_UNUSED
)
7971 /* If one of operands is memory, source and destination must match. */
7972 if ((GET_CODE (operands
[0]) == MEM
7973 || GET_CODE (operands
[1]) == MEM
)
7974 && ! rtx_equal_p (operands
[0], operands
[1]))
7979 /* Generate code for floating point ABS or NEG. */
7982 ix86_expand_fp_absneg_operator (enum rtx_code code
, enum machine_mode mode
,
7985 rtx mask
, set
, use
, clob
, dst
, src
;
7986 bool matching_memory
;
7987 bool use_sse
= false;
7988 bool vector_mode
= VECTOR_MODE_P (mode
);
7989 enum machine_mode elt_mode
= mode
;
7990 enum machine_mode vec_mode
= VOIDmode
;
7994 elt_mode
= GET_MODE_INNER (mode
);
7998 if (TARGET_SSE_MATH
)
8003 vec_mode
= V4SFmode
;
8005 else if (mode
== DFmode
&& TARGET_SSE2
)
8008 vec_mode
= V2DFmode
;
8012 /* NEG and ABS performed with SSE use bitwise mask operations.
8013 Create the appropriate mask now. */
8016 HOST_WIDE_INT hi
, lo
;
8020 /* Find the sign bit, sign extended to 2*HWI. */
8021 if (elt_mode
== SFmode
)
8022 lo
= 0x80000000, hi
= lo
< 0;
8023 else if (HOST_BITS_PER_WIDE_INT
>= 64)
8024 lo
= (HOST_WIDE_INT
)1 << shift
, hi
= -1;
8026 lo
= 0, hi
= (HOST_WIDE_INT
)1 << (shift
- HOST_BITS_PER_WIDE_INT
);
8028 /* If we're looking for the absolute value, then we want
8033 /* Force this value into the low part of a fp vector constant. */
8034 mask
= immed_double_const (lo
, hi
, elt_mode
== SFmode
? SImode
: DImode
);
8035 mask
= gen_lowpart (elt_mode
, mask
);
8040 v
= gen_rtvec (4, mask
, CONST0_RTX (SFmode
),
8041 CONST0_RTX (SFmode
), CONST0_RTX (SFmode
));
8045 v
= gen_rtvec (2, mask
, CONST0_RTX (DFmode
));
8049 v
= gen_rtvec (4, mask
, mask
, mask
, mask
);
8053 v
= gen_rtvec (2, mask
, mask
);
8060 mask
= gen_rtx_CONST_VECTOR (vec_mode
, v
);
8061 mask
= force_reg (vec_mode
, mask
);
8065 /* When not using SSE, we don't use the mask, but prefer to keep the
8066 same general form of the insn pattern to reduce duplication when
8067 it comes time to split. */
8074 /* If the destination is memory, and we don't have matching source
8075 operands, do things in registers. */
8076 matching_memory
= false;
8079 if (rtx_equal_p (dst
, src
) && (!optimize
|| no_new_pseudos
))
8080 matching_memory
= true;
8082 dst
= gen_reg_rtx (mode
);
8084 if (MEM_P (src
) && !matching_memory
)
8085 src
= force_reg (mode
, src
);
8089 set
= gen_rtx_fmt_ee (code
== NEG
? XOR
: AND
, mode
, src
, mask
);
8090 set
= gen_rtx_SET (VOIDmode
, dst
, set
);
8095 set
= gen_rtx_fmt_e (code
, mode
, src
);
8096 set
= gen_rtx_SET (VOIDmode
, dst
, set
);
8097 use
= gen_rtx_USE (VOIDmode
, mask
);
8098 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCmode
, FLAGS_REG
));
8099 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (3, set
, use
, clob
)));
8102 if (dst
!= operands
[0])
8103 emit_move_insn (operands
[0], dst
);
8106 /* Return TRUE or FALSE depending on whether the first SET in INSN
8107 has source and destination with matching CC modes, and that the
8108 CC mode is at least as constrained as REQ_MODE. */
8111 ix86_match_ccmode (rtx insn
, enum machine_mode req_mode
)
8114 enum machine_mode set_mode
;
8116 set
= PATTERN (insn
);
8117 if (GET_CODE (set
) == PARALLEL
)
8118 set
= XVECEXP (set
, 0, 0);
8119 if (GET_CODE (set
) != SET
)
8121 if (GET_CODE (SET_SRC (set
)) != COMPARE
)
8124 set_mode
= GET_MODE (SET_DEST (set
));
8128 if (req_mode
!= CCNOmode
8129 && (req_mode
!= CCmode
8130 || XEXP (SET_SRC (set
), 1) != const0_rtx
))
8134 if (req_mode
== CCGCmode
)
8138 if (req_mode
== CCGOCmode
|| req_mode
== CCNOmode
)
8142 if (req_mode
== CCZmode
)
8152 return (GET_MODE (SET_SRC (set
)) == set_mode
);
8155 /* Generate insn patterns to do an integer compare of OPERANDS. */
8158 ix86_expand_int_compare (enum rtx_code code
, rtx op0
, rtx op1
)
8160 enum machine_mode cmpmode
;
8163 cmpmode
= SELECT_CC_MODE (code
, op0
, op1
);
8164 flags
= gen_rtx_REG (cmpmode
, FLAGS_REG
);
8166 /* This is very simple, but making the interface the same as in the
8167 FP case makes the rest of the code easier. */
8168 tmp
= gen_rtx_COMPARE (cmpmode
, op0
, op1
);
8169 emit_insn (gen_rtx_SET (VOIDmode
, flags
, tmp
));
8171 /* Return the test that should be put into the flags user, i.e.
8172 the bcc, scc, or cmov instruction. */
8173 return gen_rtx_fmt_ee (code
, VOIDmode
, flags
, const0_rtx
);
8176 /* Figure out whether to use ordered or unordered fp comparisons.
8177 Return the appropriate mode to use. */
8180 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED
)
8182 /* ??? In order to make all comparisons reversible, we do all comparisons
8183 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8184 all forms trapping and nontrapping comparisons, we can make inequality
8185 comparisons trapping again, since it results in better code when using
8186 FCOM based compares. */
8187 return TARGET_IEEE_FP
? CCFPUmode
: CCFPmode
;
8191 ix86_cc_mode (enum rtx_code code
, rtx op0
, rtx op1
)
8193 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
8194 return ix86_fp_compare_mode (code
);
8197 /* Only zero flag is needed. */
8199 case NE
: /* ZF!=0 */
8201 /* Codes needing carry flag. */
8202 case GEU
: /* CF=0 */
8203 case GTU
: /* CF=0 & ZF=0 */
8204 case LTU
: /* CF=1 */
8205 case LEU
: /* CF=1 | ZF=1 */
8207 /* Codes possibly doable only with sign flag when
8208 comparing against zero. */
8209 case GE
: /* SF=OF or SF=0 */
8210 case LT
: /* SF<>OF or SF=1 */
8211 if (op1
== const0_rtx
)
8214 /* For other cases Carry flag is not required. */
8216 /* Codes doable only with sign flag when comparing
8217 against zero, but we miss jump instruction for it
8218 so we need to use relational tests against overflow
8219 that thus needs to be zero. */
8220 case GT
: /* ZF=0 & SF=OF */
8221 case LE
: /* ZF=1 | SF<>OF */
8222 if (op1
== const0_rtx
)
8226 /* strcmp pattern do (use flags) and combine may ask us for proper
8235 /* Return the fixed registers used for condition codes. */
8238 ix86_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
8245 /* If two condition code modes are compatible, return a condition code
8246 mode which is compatible with both. Otherwise, return
8249 static enum machine_mode
8250 ix86_cc_modes_compatible (enum machine_mode m1
, enum machine_mode m2
)
8255 if (GET_MODE_CLASS (m1
) != MODE_CC
|| GET_MODE_CLASS (m2
) != MODE_CC
)
8258 if ((m1
== CCGCmode
&& m2
== CCGOCmode
)
8259 || (m1
== CCGOCmode
&& m2
== CCGCmode
))
8287 /* These are only compatible with themselves, which we already
8293 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8296 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED
)
8298 enum rtx_code swapped_code
= swap_condition (code
);
8299 return ((ix86_fp_comparison_cost (code
) == ix86_fp_comparison_fcomi_cost (code
))
8300 || (ix86_fp_comparison_cost (swapped_code
)
8301 == ix86_fp_comparison_fcomi_cost (swapped_code
)));
8304 /* Swap, force into registers, or otherwise massage the two operands
8305 to a fp comparison. The operands are updated in place; the new
8306 comparison code is returned. */
8308 static enum rtx_code
8309 ix86_prepare_fp_compare_args (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
8311 enum machine_mode fpcmp_mode
= ix86_fp_compare_mode (code
);
8312 rtx op0
= *pop0
, op1
= *pop1
;
8313 enum machine_mode op_mode
= GET_MODE (op0
);
8314 int is_sse
= SSE_REG_P (op0
) || SSE_REG_P (op1
);
8316 /* All of the unordered compare instructions only work on registers.
8317 The same is true of the fcomi compare instructions. The same is
8318 true of the XFmode compare instructions if not comparing with
8319 zero (ftst insn is used in this case). */
8322 && (fpcmp_mode
== CCFPUmode
8323 || (op_mode
== XFmode
8324 && ! (standard_80387_constant_p (op0
) == 1
8325 || standard_80387_constant_p (op1
) == 1))
8326 || ix86_use_fcomi_compare (code
)))
8328 op0
= force_reg (op_mode
, op0
);
8329 op1
= force_reg (op_mode
, op1
);
8333 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8334 things around if they appear profitable, otherwise force op0
8337 if (standard_80387_constant_p (op0
) == 0
8338 || (GET_CODE (op0
) == MEM
8339 && ! (standard_80387_constant_p (op1
) == 0
8340 || GET_CODE (op1
) == MEM
)))
8343 tmp
= op0
, op0
= op1
, op1
= tmp
;
8344 code
= swap_condition (code
);
8347 if (GET_CODE (op0
) != REG
)
8348 op0
= force_reg (op_mode
, op0
);
8350 if (CONSTANT_P (op1
))
8352 int tmp
= standard_80387_constant_p (op1
);
8354 op1
= validize_mem (force_const_mem (op_mode
, op1
));
8358 op1
= force_reg (op_mode
, op1
);
8361 op1
= force_reg (op_mode
, op1
);
8365 /* Try to rearrange the comparison to make it cheaper. */
8366 if (ix86_fp_comparison_cost (code
)
8367 > ix86_fp_comparison_cost (swap_condition (code
))
8368 && (GET_CODE (op1
) == REG
|| !no_new_pseudos
))
8371 tmp
= op0
, op0
= op1
, op1
= tmp
;
8372 code
= swap_condition (code
);
8373 if (GET_CODE (op0
) != REG
)
8374 op0
= force_reg (op_mode
, op0
);
8382 /* Convert comparison codes we use to represent FP comparison to integer
8383 code that will result in proper branch. Return UNKNOWN if no such code
8387 ix86_fp_compare_code_to_integer (enum rtx_code code
)
8416 /* Split comparison code CODE into comparisons we can do using branch
8417 instructions. BYPASS_CODE is comparison code for branch that will
8418 branch around FIRST_CODE and SECOND_CODE. If some of branches
8419 is not required, set value to UNKNOWN.
8420 We never require more than two branches. */
8423 ix86_fp_comparison_codes (enum rtx_code code
, enum rtx_code
*bypass_code
,
8424 enum rtx_code
*first_code
,
8425 enum rtx_code
*second_code
)
8428 *bypass_code
= UNKNOWN
;
8429 *second_code
= UNKNOWN
;
8431 /* The fcomi comparison sets flags as follows:
8441 case GT
: /* GTU - CF=0 & ZF=0 */
8442 case GE
: /* GEU - CF=0 */
8443 case ORDERED
: /* PF=0 */
8444 case UNORDERED
: /* PF=1 */
8445 case UNEQ
: /* EQ - ZF=1 */
8446 case UNLT
: /* LTU - CF=1 */
8447 case UNLE
: /* LEU - CF=1 | ZF=1 */
8448 case LTGT
: /* EQ - ZF=0 */
8450 case LT
: /* LTU - CF=1 - fails on unordered */
8452 *bypass_code
= UNORDERED
;
8454 case LE
: /* LEU - CF=1 | ZF=1 - fails on unordered */
8456 *bypass_code
= UNORDERED
;
8458 case EQ
: /* EQ - ZF=1 - fails on unordered */
8460 *bypass_code
= UNORDERED
;
8462 case NE
: /* NE - ZF=0 - fails on unordered */
8464 *second_code
= UNORDERED
;
8466 case UNGE
: /* GEU - CF=0 - fails on unordered */
8468 *second_code
= UNORDERED
;
8470 case UNGT
: /* GTU - CF=0 & ZF=0 - fails on unordered */
8472 *second_code
= UNORDERED
;
8477 if (!TARGET_IEEE_FP
)
8479 *second_code
= UNKNOWN
;
8480 *bypass_code
= UNKNOWN
;
8484 /* Return cost of comparison done fcom + arithmetics operations on AX.
8485 All following functions do use number of instructions as a cost metrics.
8486 In future this should be tweaked to compute bytes for optimize_size and
8487 take into account performance of various instructions on various CPUs. */
8489 ix86_fp_comparison_arithmetics_cost (enum rtx_code code
)
8491 if (!TARGET_IEEE_FP
)
8493 /* The cost of code output by ix86_expand_fp_compare. */
8521 /* Return cost of comparison done using fcomi operation.
8522 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8524 ix86_fp_comparison_fcomi_cost (enum rtx_code code
)
8526 enum rtx_code bypass_code
, first_code
, second_code
;
8527 /* Return arbitrarily high cost when instruction is not supported - this
8528 prevents gcc from using it. */
8531 ix86_fp_comparison_codes (code
, &bypass_code
, &first_code
, &second_code
);
8532 return (bypass_code
!= UNKNOWN
|| second_code
!= UNKNOWN
) + 2;
8535 /* Return cost of comparison done using sahf operation.
8536 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8538 ix86_fp_comparison_sahf_cost (enum rtx_code code
)
8540 enum rtx_code bypass_code
, first_code
, second_code
;
8541 /* Return arbitrarily high cost when instruction is not preferred - this
8542 avoids gcc from using it. */
8543 if (!TARGET_USE_SAHF
&& !optimize_size
)
8545 ix86_fp_comparison_codes (code
, &bypass_code
, &first_code
, &second_code
);
8546 return (bypass_code
!= UNKNOWN
|| second_code
!= UNKNOWN
) + 3;
8549 /* Compute cost of the comparison done using any method.
8550 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8552 ix86_fp_comparison_cost (enum rtx_code code
)
8554 int fcomi_cost
, sahf_cost
, arithmetics_cost
= 1024;
8557 fcomi_cost
= ix86_fp_comparison_fcomi_cost (code
);
8558 sahf_cost
= ix86_fp_comparison_sahf_cost (code
);
8560 min
= arithmetics_cost
= ix86_fp_comparison_arithmetics_cost (code
);
8561 if (min
> sahf_cost
)
8563 if (min
> fcomi_cost
)
8568 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8571 ix86_expand_fp_compare (enum rtx_code code
, rtx op0
, rtx op1
, rtx scratch
,
8572 rtx
*second_test
, rtx
*bypass_test
)
8574 enum machine_mode fpcmp_mode
, intcmp_mode
;
8576 int cost
= ix86_fp_comparison_cost (code
);
8577 enum rtx_code bypass_code
, first_code
, second_code
;
8579 fpcmp_mode
= ix86_fp_compare_mode (code
);
8580 code
= ix86_prepare_fp_compare_args (code
, &op0
, &op1
);
8583 *second_test
= NULL_RTX
;
8585 *bypass_test
= NULL_RTX
;
8587 ix86_fp_comparison_codes (code
, &bypass_code
, &first_code
, &second_code
);
8589 /* Do fcomi/sahf based test when profitable. */
8590 if ((bypass_code
== UNKNOWN
|| bypass_test
)
8591 && (second_code
== UNKNOWN
|| second_test
)
8592 && ix86_fp_comparison_arithmetics_cost (code
) > cost
)
8596 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
8597 tmp
= gen_rtx_SET (VOIDmode
, gen_rtx_REG (fpcmp_mode
, FLAGS_REG
),
8603 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
8604 tmp2
= gen_rtx_UNSPEC (HImode
, gen_rtvec (1, tmp
), UNSPEC_FNSTSW
);
8606 scratch
= gen_reg_rtx (HImode
);
8607 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp2
));
8608 emit_insn (gen_x86_sahf_1 (scratch
));
8611 /* The FP codes work out to act like unsigned. */
8612 intcmp_mode
= fpcmp_mode
;
8614 if (bypass_code
!= UNKNOWN
)
8615 *bypass_test
= gen_rtx_fmt_ee (bypass_code
, VOIDmode
,
8616 gen_rtx_REG (intcmp_mode
, FLAGS_REG
),
8618 if (second_code
!= UNKNOWN
)
8619 *second_test
= gen_rtx_fmt_ee (second_code
, VOIDmode
,
8620 gen_rtx_REG (intcmp_mode
, FLAGS_REG
),
8625 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8626 tmp
= gen_rtx_COMPARE (fpcmp_mode
, op0
, op1
);
8627 tmp2
= gen_rtx_UNSPEC (HImode
, gen_rtvec (1, tmp
), UNSPEC_FNSTSW
);
8629 scratch
= gen_reg_rtx (HImode
);
8630 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, tmp2
));
8632 /* In the unordered case, we have to check C2 for NaN's, which
8633 doesn't happen to work out to anything nice combination-wise.
8634 So do some bit twiddling on the value we've got in AH to come
8635 up with an appropriate set of condition codes. */
8637 intcmp_mode
= CCNOmode
;
8642 if (code
== GT
|| !TARGET_IEEE_FP
)
8644 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
8649 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
8650 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
8651 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x44)));
8652 intcmp_mode
= CCmode
;
8658 if (code
== LT
&& TARGET_IEEE_FP
)
8660 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
8661 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x01)));
8662 intcmp_mode
= CCmode
;
8667 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x01)));
8673 if (code
== GE
|| !TARGET_IEEE_FP
)
8675 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x05)));
8680 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
8681 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
,
8688 if (code
== LE
&& TARGET_IEEE_FP
)
8690 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
8691 emit_insn (gen_addqi_ext_1 (scratch
, scratch
, constm1_rtx
));
8692 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
8693 intcmp_mode
= CCmode
;
8698 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x45)));
8704 if (code
== EQ
&& TARGET_IEEE_FP
)
8706 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
8707 emit_insn (gen_cmpqi_ext_3 (scratch
, GEN_INT (0x40)));
8708 intcmp_mode
= CCmode
;
8713 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
8720 if (code
== NE
&& TARGET_IEEE_FP
)
8722 emit_insn (gen_andqi_ext_0 (scratch
, scratch
, GEN_INT (0x45)));
8723 emit_insn (gen_xorqi_cc_ext_1 (scratch
, scratch
,
8729 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x40)));
8735 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
8739 emit_insn (gen_testqi_ext_ccno_0 (scratch
, GEN_INT (0x04)));
8748 /* Return the test that should be put into the flags user, i.e.
8749 the bcc, scc, or cmov instruction. */
8750 return gen_rtx_fmt_ee (code
, VOIDmode
,
8751 gen_rtx_REG (intcmp_mode
, FLAGS_REG
),
8756 ix86_expand_compare (enum rtx_code code
, rtx
*second_test
, rtx
*bypass_test
)
8759 op0
= ix86_compare_op0
;
8760 op1
= ix86_compare_op1
;
8763 *second_test
= NULL_RTX
;
8765 *bypass_test
= NULL_RTX
;
8767 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
8768 ret
= ix86_expand_fp_compare (code
, op0
, op1
, NULL_RTX
,
8769 second_test
, bypass_test
);
8771 ret
= ix86_expand_int_compare (code
, op0
, op1
);
8776 /* Return true if the CODE will result in nontrivial jump sequence. */
8778 ix86_fp_jump_nontrivial_p (enum rtx_code code
)
8780 enum rtx_code bypass_code
, first_code
, second_code
;
8783 ix86_fp_comparison_codes (code
, &bypass_code
, &first_code
, &second_code
);
8784 return bypass_code
!= UNKNOWN
|| second_code
!= UNKNOWN
;
8788 ix86_expand_branch (enum rtx_code code
, rtx label
)
8792 switch (GET_MODE (ix86_compare_op0
))
8798 tmp
= ix86_expand_compare (code
, NULL
, NULL
);
8799 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
8800 gen_rtx_LABEL_REF (VOIDmode
, label
),
8802 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
8811 enum rtx_code bypass_code
, first_code
, second_code
;
8813 code
= ix86_prepare_fp_compare_args (code
, &ix86_compare_op0
,
8816 ix86_fp_comparison_codes (code
, &bypass_code
, &first_code
, &second_code
);
8818 /* Check whether we will use the natural sequence with one jump. If
8819 so, we can expand jump early. Otherwise delay expansion by
8820 creating compound insn to not confuse optimizers. */
8821 if (bypass_code
== UNKNOWN
&& second_code
== UNKNOWN
8824 ix86_split_fp_branch (code
, ix86_compare_op0
, ix86_compare_op1
,
8825 gen_rtx_LABEL_REF (VOIDmode
, label
),
8826 pc_rtx
, NULL_RTX
, NULL_RTX
);
8830 tmp
= gen_rtx_fmt_ee (code
, VOIDmode
,
8831 ix86_compare_op0
, ix86_compare_op1
);
8832 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
8833 gen_rtx_LABEL_REF (VOIDmode
, label
),
8835 tmp
= gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
);
8837 use_fcomi
= ix86_use_fcomi_compare (code
);
8838 vec
= rtvec_alloc (3 + !use_fcomi
);
8839 RTVEC_ELT (vec
, 0) = tmp
;
8841 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCFPmode
, 18));
8843 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (CCFPmode
, 17));
8846 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (HImode
));
8848 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, vec
));
8856 /* Expand DImode branch into multiple compare+branch. */
8858 rtx lo
[2], hi
[2], label2
;
8859 enum rtx_code code1
, code2
, code3
;
8861 if (CONSTANT_P (ix86_compare_op0
) && ! CONSTANT_P (ix86_compare_op1
))
8863 tmp
= ix86_compare_op0
;
8864 ix86_compare_op0
= ix86_compare_op1
;
8865 ix86_compare_op1
= tmp
;
8866 code
= swap_condition (code
);
8868 split_di (&ix86_compare_op0
, 1, lo
+0, hi
+0);
8869 split_di (&ix86_compare_op1
, 1, lo
+1, hi
+1);
8871 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8872 avoid two branches. This costs one extra insn, so disable when
8873 optimizing for size. */
8875 if ((code
== EQ
|| code
== NE
)
8877 || hi
[1] == const0_rtx
|| lo
[1] == const0_rtx
))
8882 if (hi
[1] != const0_rtx
)
8883 xor1
= expand_binop (SImode
, xor_optab
, xor1
, hi
[1],
8884 NULL_RTX
, 0, OPTAB_WIDEN
);
8887 if (lo
[1] != const0_rtx
)
8888 xor0
= expand_binop (SImode
, xor_optab
, xor0
, lo
[1],
8889 NULL_RTX
, 0, OPTAB_WIDEN
);
8891 tmp
= expand_binop (SImode
, ior_optab
, xor1
, xor0
,
8892 NULL_RTX
, 0, OPTAB_WIDEN
);
8894 ix86_compare_op0
= tmp
;
8895 ix86_compare_op1
= const0_rtx
;
8896 ix86_expand_branch (code
, label
);
8900 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8901 op1 is a constant and the low word is zero, then we can just
8902 examine the high word. */
8904 if (GET_CODE (hi
[1]) == CONST_INT
&& lo
[1] == const0_rtx
)
8907 case LT
: case LTU
: case GE
: case GEU
:
8908 ix86_compare_op0
= hi
[0];
8909 ix86_compare_op1
= hi
[1];
8910 ix86_expand_branch (code
, label
);
8916 /* Otherwise, we need two or three jumps. */
8918 label2
= gen_label_rtx ();
8921 code2
= swap_condition (code
);
8922 code3
= unsigned_condition (code
);
8926 case LT
: case GT
: case LTU
: case GTU
:
8929 case LE
: code1
= LT
; code2
= GT
; break;
8930 case GE
: code1
= GT
; code2
= LT
; break;
8931 case LEU
: code1
= LTU
; code2
= GTU
; break;
8932 case GEU
: code1
= GTU
; code2
= LTU
; break;
8934 case EQ
: code1
= UNKNOWN
; code2
= NE
; break;
8935 case NE
: code2
= UNKNOWN
; break;
8943 * if (hi(a) < hi(b)) goto true;
8944 * if (hi(a) > hi(b)) goto false;
8945 * if (lo(a) < lo(b)) goto true;
8949 ix86_compare_op0
= hi
[0];
8950 ix86_compare_op1
= hi
[1];
8952 if (code1
!= UNKNOWN
)
8953 ix86_expand_branch (code1
, label
);
8954 if (code2
!= UNKNOWN
)
8955 ix86_expand_branch (code2
, label2
);
8957 ix86_compare_op0
= lo
[0];
8958 ix86_compare_op1
= lo
[1];
8959 ix86_expand_branch (code3
, label
);
8961 if (code2
!= UNKNOWN
)
8962 emit_label (label2
);
8971 /* Split branch based on floating point condition. */
8973 ix86_split_fp_branch (enum rtx_code code
, rtx op1
, rtx op2
,
8974 rtx target1
, rtx target2
, rtx tmp
, rtx pushed
)
8977 rtx label
= NULL_RTX
;
8979 int bypass_probability
= -1, second_probability
= -1, probability
= -1;
8982 if (target2
!= pc_rtx
)
8985 code
= reverse_condition_maybe_unordered (code
);
8990 condition
= ix86_expand_fp_compare (code
, op1
, op2
,
8991 tmp
, &second
, &bypass
);
8993 /* Remove pushed operand from stack. */
8995 ix86_free_from_memory (GET_MODE (pushed
));
8997 if (split_branch_probability
>= 0)
8999 /* Distribute the probabilities across the jumps.
9000 Assume the BYPASS and SECOND to be always test
9002 probability
= split_branch_probability
;
9004 /* Value of 1 is low enough to make no need for probability
9005 to be updated. Later we may run some experiments and see
9006 if unordered values are more frequent in practice. */
9008 bypass_probability
= 1;
9010 second_probability
= 1;
9012 if (bypass
!= NULL_RTX
)
9014 label
= gen_label_rtx ();
9015 i
= emit_jump_insn (gen_rtx_SET
9017 gen_rtx_IF_THEN_ELSE (VOIDmode
,
9019 gen_rtx_LABEL_REF (VOIDmode
,
9022 if (bypass_probability
>= 0)
9024 = gen_rtx_EXPR_LIST (REG_BR_PROB
,
9025 GEN_INT (bypass_probability
),
9028 i
= emit_jump_insn (gen_rtx_SET
9030 gen_rtx_IF_THEN_ELSE (VOIDmode
,
9031 condition
, target1
, target2
)));
9032 if (probability
>= 0)
9034 = gen_rtx_EXPR_LIST (REG_BR_PROB
,
9035 GEN_INT (probability
),
9037 if (second
!= NULL_RTX
)
9039 i
= emit_jump_insn (gen_rtx_SET
9041 gen_rtx_IF_THEN_ELSE (VOIDmode
, second
, target1
,
9043 if (second_probability
>= 0)
9045 = gen_rtx_EXPR_LIST (REG_BR_PROB
,
9046 GEN_INT (second_probability
),
9049 if (label
!= NULL_RTX
)
9054 ix86_expand_setcc (enum rtx_code code
, rtx dest
)
9056 rtx ret
, tmp
, tmpreg
, equiv
;
9057 rtx second_test
, bypass_test
;
9059 if (GET_MODE (ix86_compare_op0
) == DImode
9061 return 0; /* FAIL */
9063 if (GET_MODE (dest
) != QImode
)
9066 ret
= ix86_expand_compare (code
, &second_test
, &bypass_test
);
9067 PUT_MODE (ret
, QImode
);
9072 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, ret
));
9073 if (bypass_test
|| second_test
)
9075 rtx test
= second_test
;
9077 rtx tmp2
= gen_reg_rtx (QImode
);
9084 PUT_CODE (test
, reverse_condition_maybe_unordered (GET_CODE (test
)));
9086 PUT_MODE (test
, QImode
);
9087 emit_insn (gen_rtx_SET (VOIDmode
, tmp2
, test
));
9090 emit_insn (gen_andqi3 (tmp
, tmpreg
, tmp2
));
9092 emit_insn (gen_iorqi3 (tmp
, tmpreg
, tmp2
));
9095 /* Attach a REG_EQUAL note describing the comparison result. */
9096 equiv
= simplify_gen_relational (code
, QImode
,
9097 GET_MODE (ix86_compare_op0
),
9098 ix86_compare_op0
, ix86_compare_op1
);
9099 set_unique_reg_note (get_last_insn (), REG_EQUAL
, equiv
);
9101 return 1; /* DONE */
9104 /* Expand comparison setting or clearing carry flag. Return true when
9105 successful and set pop for the operation. */
9107 ix86_expand_carry_flag_compare (enum rtx_code code
, rtx op0
, rtx op1
, rtx
*pop
)
9109 enum machine_mode mode
=
9110 GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
9112 /* Do not handle DImode compares that go trought special path. Also we can't
9113 deal with FP compares yet. This is possible to add. */
9114 if ((mode
== DImode
&& !TARGET_64BIT
))
9116 if (FLOAT_MODE_P (mode
))
9118 rtx second_test
= NULL
, bypass_test
= NULL
;
9119 rtx compare_op
, compare_seq
;
9121 /* Shortcut: following common codes never translate into carry flag compares. */
9122 if (code
== EQ
|| code
== NE
|| code
== UNEQ
|| code
== LTGT
9123 || code
== ORDERED
|| code
== UNORDERED
)
9126 /* These comparisons require zero flag; swap operands so they won't. */
9127 if ((code
== GT
|| code
== UNLE
|| code
== LE
|| code
== UNGT
)
9133 code
= swap_condition (code
);
9136 /* Try to expand the comparison and verify that we end up with carry flag
9137 based comparison. This is fails to be true only when we decide to expand
9138 comparison using arithmetic that is not too common scenario. */
9140 compare_op
= ix86_expand_fp_compare (code
, op0
, op1
, NULL_RTX
,
9141 &second_test
, &bypass_test
);
9142 compare_seq
= get_insns ();
9145 if (second_test
|| bypass_test
)
9147 if (GET_MODE (XEXP (compare_op
, 0)) == CCFPmode
9148 || GET_MODE (XEXP (compare_op
, 0)) == CCFPUmode
)
9149 code
= ix86_fp_compare_code_to_integer (GET_CODE (compare_op
));
9151 code
= GET_CODE (compare_op
);
9152 if (code
!= LTU
&& code
!= GEU
)
9154 emit_insn (compare_seq
);
9158 if (!INTEGRAL_MODE_P (mode
))
9166 /* Convert a==0 into (unsigned)a<1. */
9169 if (op1
!= const0_rtx
)
9172 code
= (code
== EQ
? LTU
: GEU
);
9175 /* Convert a>b into b<a or a>=b-1. */
9178 if (GET_CODE (op1
) == CONST_INT
)
9180 op1
= gen_int_mode (INTVAL (op1
) + 1, GET_MODE (op0
));
9181 /* Bail out on overflow. We still can swap operands but that
9182 would force loading of the constant into register. */
9183 if (op1
== const0_rtx
9184 || !x86_64_immediate_operand (op1
, GET_MODE (op1
)))
9186 code
= (code
== GTU
? GEU
: LTU
);
9193 code
= (code
== GTU
? LTU
: GEU
);
9197 /* Convert a>=0 into (unsigned)a<0x80000000. */
9200 if (mode
== DImode
|| op1
!= const0_rtx
)
9202 op1
= gen_int_mode (1 << (GET_MODE_BITSIZE (mode
) - 1), mode
);
9203 code
= (code
== LT
? GEU
: LTU
);
9207 if (mode
== DImode
|| op1
!= constm1_rtx
)
9209 op1
= gen_int_mode (1 << (GET_MODE_BITSIZE (mode
) - 1), mode
);
9210 code
= (code
== LE
? GEU
: LTU
);
9216 /* Swapping operands may cause constant to appear as first operand. */
9217 if (!nonimmediate_operand (op0
, VOIDmode
))
9221 op0
= force_reg (mode
, op0
);
9223 ix86_compare_op0
= op0
;
9224 ix86_compare_op1
= op1
;
9225 *pop
= ix86_expand_compare (code
, NULL
, NULL
);
9226 if (GET_CODE (*pop
) != LTU
&& GET_CODE (*pop
) != GEU
)
9232 ix86_expand_int_movcc (rtx operands
[])
9234 enum rtx_code code
= GET_CODE (operands
[1]), compare_code
;
9235 rtx compare_seq
, compare_op
;
9236 rtx second_test
, bypass_test
;
9237 enum machine_mode mode
= GET_MODE (operands
[0]);
9238 bool sign_bit_compare_p
= false;;
9241 compare_op
= ix86_expand_compare (code
, &second_test
, &bypass_test
);
9242 compare_seq
= get_insns ();
9245 compare_code
= GET_CODE (compare_op
);
9247 if ((ix86_compare_op1
== const0_rtx
&& (code
== GE
|| code
== LT
))
9248 || (ix86_compare_op1
== constm1_rtx
&& (code
== GT
|| code
== LE
)))
9249 sign_bit_compare_p
= true;
9251 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9252 HImode insns, we'd be swallowed in word prefix ops. */
9254 if ((mode
!= HImode
|| TARGET_FAST_PREFIX
)
9255 && (mode
!= DImode
|| TARGET_64BIT
)
9256 && GET_CODE (operands
[2]) == CONST_INT
9257 && GET_CODE (operands
[3]) == CONST_INT
)
9259 rtx out
= operands
[0];
9260 HOST_WIDE_INT ct
= INTVAL (operands
[2]);
9261 HOST_WIDE_INT cf
= INTVAL (operands
[3]);
9265 /* Sign bit compares are better done using shifts than we do by using
9267 if (sign_bit_compare_p
9268 || ix86_expand_carry_flag_compare (code
, ix86_compare_op0
,
9269 ix86_compare_op1
, &compare_op
))
9271 /* Detect overlap between destination and compare sources. */
9274 if (!sign_bit_compare_p
)
9278 compare_code
= GET_CODE (compare_op
);
9280 if (GET_MODE (XEXP (compare_op
, 0)) == CCFPmode
9281 || GET_MODE (XEXP (compare_op
, 0)) == CCFPUmode
)
9284 compare_code
= ix86_fp_compare_code_to_integer (compare_code
);
9287 /* To simplify rest of code, restrict to the GEU case. */
9288 if (compare_code
== LTU
)
9290 HOST_WIDE_INT tmp
= ct
;
9293 compare_code
= reverse_condition (compare_code
);
9294 code
= reverse_condition (code
);
9299 PUT_CODE (compare_op
,
9300 reverse_condition_maybe_unordered
9301 (GET_CODE (compare_op
)));
9303 PUT_CODE (compare_op
, reverse_condition (GET_CODE (compare_op
)));
9307 if (reg_overlap_mentioned_p (out
, ix86_compare_op0
)
9308 || reg_overlap_mentioned_p (out
, ix86_compare_op1
))
9309 tmp
= gen_reg_rtx (mode
);
9312 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp
, compare_op
));
9314 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode
, tmp
), compare_op
));
9318 if (code
== GT
|| code
== GE
)
9319 code
= reverse_condition (code
);
9322 HOST_WIDE_INT tmp
= ct
;
9327 tmp
= emit_store_flag (tmp
, code
, ix86_compare_op0
,
9328 ix86_compare_op1
, VOIDmode
, 0, -1);
9341 tmp
= expand_simple_binop (mode
, PLUS
,
9343 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
9354 tmp
= expand_simple_binop (mode
, IOR
,
9356 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
9358 else if (diff
== -1 && ct
)
9368 tmp
= expand_simple_unop (mode
, NOT
, tmp
, copy_rtx (tmp
), 1);
9370 tmp
= expand_simple_binop (mode
, PLUS
,
9371 copy_rtx (tmp
), GEN_INT (cf
),
9372 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
9380 * andl cf - ct, dest
9390 tmp
= expand_simple_unop (mode
, NOT
, tmp
, copy_rtx (tmp
), 1);
9393 tmp
= expand_simple_binop (mode
, AND
,
9395 gen_int_mode (cf
- ct
, mode
),
9396 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
9398 tmp
= expand_simple_binop (mode
, PLUS
,
9399 copy_rtx (tmp
), GEN_INT (ct
),
9400 copy_rtx (tmp
), 1, OPTAB_DIRECT
);
9403 if (!rtx_equal_p (tmp
, out
))
9404 emit_move_insn (copy_rtx (out
), copy_rtx (tmp
));
9406 return 1; /* DONE */
9412 tmp
= ct
, ct
= cf
, cf
= tmp
;
9414 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0
)))
9416 /* We may be reversing unordered compare to normal compare, that
9417 is not valid in general (we may convert non-trapping condition
9418 to trapping one), however on i386 we currently emit all
9419 comparisons unordered. */
9420 compare_code
= reverse_condition_maybe_unordered (compare_code
);
9421 code
= reverse_condition_maybe_unordered (code
);
9425 compare_code
= reverse_condition (compare_code
);
9426 code
= reverse_condition (code
);
9430 compare_code
= UNKNOWN
;
9431 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0
)) == MODE_INT
9432 && GET_CODE (ix86_compare_op1
) == CONST_INT
)
9434 if (ix86_compare_op1
== const0_rtx
9435 && (code
== LT
|| code
== GE
))
9436 compare_code
= code
;
9437 else if (ix86_compare_op1
== constm1_rtx
)
9441 else if (code
== GT
)
9446 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9447 if (compare_code
!= UNKNOWN
9448 && GET_MODE (ix86_compare_op0
) == GET_MODE (out
)
9449 && (cf
== -1 || ct
== -1))
9451 /* If lea code below could be used, only optimize
9452 if it results in a 2 insn sequence. */
9454 if (! (diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
9455 || diff
== 3 || diff
== 5 || diff
== 9)
9456 || (compare_code
== LT
&& ct
== -1)
9457 || (compare_code
== GE
&& cf
== -1))
9460 * notl op1 (if necessary)
9468 code
= reverse_condition (code
);
9471 out
= emit_store_flag (out
, code
, ix86_compare_op0
,
9472 ix86_compare_op1
, VOIDmode
, 0, -1);
9474 out
= expand_simple_binop (mode
, IOR
,
9476 out
, 1, OPTAB_DIRECT
);
9477 if (out
!= operands
[0])
9478 emit_move_insn (operands
[0], out
);
9480 return 1; /* DONE */
9485 if ((diff
== 1 || diff
== 2 || diff
== 4 || diff
== 8
9486 || diff
== 3 || diff
== 5 || diff
== 9)
9487 && ((mode
!= QImode
&& mode
!= HImode
) || !TARGET_PARTIAL_REG_STALL
)
9489 || x86_64_immediate_operand (GEN_INT (cf
), VOIDmode
)))
9495 * lea cf(dest*(ct-cf)),dest
9499 * This also catches the degenerate setcc-only case.
9505 out
= emit_store_flag (out
, code
, ix86_compare_op0
,
9506 ix86_compare_op1
, VOIDmode
, 0, 1);
9509 /* On x86_64 the lea instruction operates on Pmode, so we need
9510 to get arithmetics done in proper mode to match. */
9512 tmp
= copy_rtx (out
);
9516 out1
= copy_rtx (out
);
9517 tmp
= gen_rtx_MULT (mode
, out1
, GEN_INT (diff
& ~1));
9521 tmp
= gen_rtx_PLUS (mode
, tmp
, out1
);
9527 tmp
= gen_rtx_PLUS (mode
, tmp
, GEN_INT (cf
));
9530 if (!rtx_equal_p (tmp
, out
))
9533 out
= force_operand (tmp
, copy_rtx (out
));
9535 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (out
), copy_rtx (tmp
)));
9537 if (!rtx_equal_p (out
, operands
[0]))
9538 emit_move_insn (operands
[0], copy_rtx (out
));
9540 return 1; /* DONE */
9544 * General case: Jumpful:
9545 * xorl dest,dest cmpl op1, op2
9546 * cmpl op1, op2 movl ct, dest
9548 * decl dest movl cf, dest
9549 * andl (cf-ct),dest 1:
9554 * This is reasonably steep, but branch mispredict costs are
9555 * high on modern cpus, so consider failing only if optimizing
9559 if ((!TARGET_CMOVE
|| (mode
== QImode
&& TARGET_PARTIAL_REG_STALL
))
9560 && BRANCH_COST
>= 2)
9566 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0
)))
9567 /* We may be reversing unordered compare to normal compare,
9568 that is not valid in general (we may convert non-trapping
9569 condition to trapping one), however on i386 we currently
9570 emit all comparisons unordered. */
9571 code
= reverse_condition_maybe_unordered (code
);
9574 code
= reverse_condition (code
);
9575 if (compare_code
!= UNKNOWN
)
9576 compare_code
= reverse_condition (compare_code
);
9580 if (compare_code
!= UNKNOWN
)
9582 /* notl op1 (if needed)
9587 For x < 0 (resp. x <= -1) there will be no notl,
9588 so if possible swap the constants to get rid of the
9590 True/false will be -1/0 while code below (store flag
9591 followed by decrement) is 0/-1, so the constants need
9592 to be exchanged once more. */
9594 if (compare_code
== GE
|| !cf
)
9596 code
= reverse_condition (code
);
9601 HOST_WIDE_INT tmp
= cf
;
9606 out
= emit_store_flag (out
, code
, ix86_compare_op0
,
9607 ix86_compare_op1
, VOIDmode
, 0, -1);
9611 out
= emit_store_flag (out
, code
, ix86_compare_op0
,
9612 ix86_compare_op1
, VOIDmode
, 0, 1);
9614 out
= expand_simple_binop (mode
, PLUS
, copy_rtx (out
), constm1_rtx
,
9615 copy_rtx (out
), 1, OPTAB_DIRECT
);
9618 out
= expand_simple_binop (mode
, AND
, copy_rtx (out
),
9619 gen_int_mode (cf
- ct
, mode
),
9620 copy_rtx (out
), 1, OPTAB_DIRECT
);
9622 out
= expand_simple_binop (mode
, PLUS
, copy_rtx (out
), GEN_INT (ct
),
9623 copy_rtx (out
), 1, OPTAB_DIRECT
);
9624 if (!rtx_equal_p (out
, operands
[0]))
9625 emit_move_insn (operands
[0], copy_rtx (out
));
9627 return 1; /* DONE */
9631 if (!TARGET_CMOVE
|| (mode
== QImode
&& TARGET_PARTIAL_REG_STALL
))
9633 /* Try a few things more with specific constants and a variable. */
9636 rtx var
, orig_out
, out
, tmp
;
9638 if (BRANCH_COST
<= 2)
9639 return 0; /* FAIL */
9641 /* If one of the two operands is an interesting constant, load a
9642 constant with the above and mask it in with a logical operation. */
9644 if (GET_CODE (operands
[2]) == CONST_INT
)
9647 if (INTVAL (operands
[2]) == 0 && operands
[3] != constm1_rtx
)
9648 operands
[3] = constm1_rtx
, op
= and_optab
;
9649 else if (INTVAL (operands
[2]) == -1 && operands
[3] != const0_rtx
)
9650 operands
[3] = const0_rtx
, op
= ior_optab
;
9652 return 0; /* FAIL */
9654 else if (GET_CODE (operands
[3]) == CONST_INT
)
9657 if (INTVAL (operands
[3]) == 0 && operands
[2] != constm1_rtx
)
9658 operands
[2] = constm1_rtx
, op
= and_optab
;
9659 else if (INTVAL (operands
[3]) == -1 && operands
[3] != const0_rtx
)
9660 operands
[2] = const0_rtx
, op
= ior_optab
;
9662 return 0; /* FAIL */
9665 return 0; /* FAIL */
9667 orig_out
= operands
[0];
9668 tmp
= gen_reg_rtx (mode
);
9671 /* Recurse to get the constant loaded. */
9672 if (ix86_expand_int_movcc (operands
) == 0)
9673 return 0; /* FAIL */
9675 /* Mask in the interesting variable. */
9676 out
= expand_binop (mode
, op
, var
, tmp
, orig_out
, 0,
9678 if (!rtx_equal_p (out
, orig_out
))
9679 emit_move_insn (copy_rtx (orig_out
), copy_rtx (out
));
9681 return 1; /* DONE */
9685 * For comparison with above,
9695 if (! nonimmediate_operand (operands
[2], mode
))
9696 operands
[2] = force_reg (mode
, operands
[2]);
9697 if (! nonimmediate_operand (operands
[3], mode
))
9698 operands
[3] = force_reg (mode
, operands
[3]);
9700 if (bypass_test
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
9702 rtx tmp
= gen_reg_rtx (mode
);
9703 emit_move_insn (tmp
, operands
[3]);
9706 if (second_test
&& reg_overlap_mentioned_p (operands
[0], operands
[2]))
9708 rtx tmp
= gen_reg_rtx (mode
);
9709 emit_move_insn (tmp
, operands
[2]);
9713 if (! register_operand (operands
[2], VOIDmode
)
9715 || ! register_operand (operands
[3], VOIDmode
)))
9716 operands
[2] = force_reg (mode
, operands
[2]);
9719 && ! register_operand (operands
[3], VOIDmode
))
9720 operands
[3] = force_reg (mode
, operands
[3]);
9722 emit_insn (compare_seq
);
9723 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
9724 gen_rtx_IF_THEN_ELSE (mode
,
9725 compare_op
, operands
[2],
9728 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (operands
[0]),
9729 gen_rtx_IF_THEN_ELSE (mode
,
9731 copy_rtx (operands
[3]),
9732 copy_rtx (operands
[0]))));
9734 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (operands
[0]),
9735 gen_rtx_IF_THEN_ELSE (mode
,
9737 copy_rtx (operands
[2]),
9738 copy_rtx (operands
[0]))));
9740 return 1; /* DONE */
9744 ix86_expand_fp_movcc (rtx operands
[])
9746 enum machine_mode mode
= GET_MODE (operands
[0]);
9747 enum rtx_code code
= GET_CODE (operands
[1]);
9748 rtx tmp
, compare_op
, second_test
, bypass_test
;
9750 if (TARGET_SSE_MATH
&& SSE_FLOAT_MODE_P (mode
))
9752 rtx cmp_op0
, cmp_op1
, if_true
, if_false
;
9754 enum machine_mode vmode
, cmode
;
9755 bool is_minmax
= false;
9757 cmp_op0
= ix86_compare_op0
;
9758 cmp_op1
= ix86_compare_op1
;
9759 if_true
= operands
[2];
9760 if_false
= operands
[3];
9762 /* Since we've no cmove for sse registers, don't force bad register
9763 allocation just to gain access to it. Deny movcc when the
9764 comparison mode doesn't match the move mode. */
9765 cmode
= GET_MODE (cmp_op0
);
9766 if (cmode
== VOIDmode
)
9767 cmode
= GET_MODE (cmp_op1
);
9771 /* We have no LTGT as an operator. We could implement it with
9772 NE & ORDERED, but this requires an extra temporary. It's
9773 not clear that it's worth it. */
9774 if (code
== LTGT
|| code
== UNEQ
)
9777 /* Massage condition to satisfy sse_comparison_operator. Try
9778 to canonicalize the destination operand to be first in the
9779 comparison - this helps reload to avoid extra moves. */
9780 if (!sse_comparison_operator (operands
[1], VOIDmode
)
9781 || (COMMUTATIVE_P (operands
[1])
9782 && rtx_equal_p (operands
[0], cmp_op1
)))
9787 code
= swap_condition (code
);
9790 /* Detect conditional moves that exactly match min/max operational
9791 semantics. Note that this is IEEE safe, as long as we don't
9792 interchange the operands. Which is why we keep this in the form
9793 if an IF_THEN_ELSE instead of reducing to SMIN/SMAX. */
9794 if ((code
== LT
|| code
== UNGE
) && REG_P (cmp_op0
) && REG_P (cmp_op1
))
9796 if (((cmp_op0
== if_true
&& cmp_op1
== if_false
)
9797 || (cmp_op0
== if_false
&& cmp_op1
== if_true
)))
9812 else if (mode
== DFmode
)
9817 cmp_op0
= force_reg (mode
, cmp_op0
);
9818 if (!nonimmediate_operand (cmp_op1
, mode
))
9819 cmp_op1
= force_reg (mode
, cmp_op1
);
9821 tmp
= gen_rtx_fmt_ee (code
, mode
, cmp_op0
, cmp_op1
);
9822 gcc_assert (sse_comparison_operator (tmp
, VOIDmode
));
9824 tmp
= gen_rtx_IF_THEN_ELSE (mode
, tmp
, if_true
, if_false
);
9825 tmp
= gen_rtx_SET (VOIDmode
, operands
[0], tmp
);
9829 clob
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (vmode
));
9830 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, tmp
, clob
));
9837 /* The floating point conditional move instructions don't directly
9838 support conditions resulting from a signed integer comparison. */
9840 compare_op
= ix86_expand_compare (code
, &second_test
, &bypass_test
);
9842 /* The floating point conditional move instructions don't directly
9843 support signed integer comparisons. */
9845 if (!fcmov_comparison_operator (compare_op
, VOIDmode
))
9847 if (second_test
!= NULL
|| bypass_test
!= NULL
)
9849 tmp
= gen_reg_rtx (QImode
);
9850 ix86_expand_setcc (code
, tmp
);
9852 ix86_compare_op0
= tmp
;
9853 ix86_compare_op1
= const0_rtx
;
9854 compare_op
= ix86_expand_compare (code
, &second_test
, &bypass_test
);
9856 if (bypass_test
&& reg_overlap_mentioned_p (operands
[0], operands
[3]))
9858 tmp
= gen_reg_rtx (mode
);
9859 emit_move_insn (tmp
, operands
[3]);
9862 if (second_test
&& reg_overlap_mentioned_p (operands
[0], operands
[2]))
9864 tmp
= gen_reg_rtx (mode
);
9865 emit_move_insn (tmp
, operands
[2]);
9869 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
9870 gen_rtx_IF_THEN_ELSE (mode
, compare_op
,
9871 operands
[2], operands
[3])));
9873 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
9874 gen_rtx_IF_THEN_ELSE (mode
, bypass_test
,
9875 operands
[3], operands
[0])));
9877 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
9878 gen_rtx_IF_THEN_ELSE (mode
, second_test
,
9879 operands
[2], operands
[0])));
9885 ix86_split_sse_movcc (rtx operands
[])
9887 rtx dest
, scratch
, cmp
, op_true
, op_false
, x
;
9888 enum machine_mode mode
, vmode
;
9890 /* Note that the operator CMP has been set up with matching constraints
9891 such that dest is valid for the comparison. Unless one of the true
9892 or false operands are zero, the true operand has already been placed
9895 scratch
= operands
[1];
9896 op_true
= operands
[2];
9897 op_false
= operands
[3];
9900 mode
= GET_MODE (dest
);
9901 vmode
= GET_MODE (scratch
);
9903 emit_insn (gen_rtx_SET (VOIDmode
, dest
, cmp
));
9905 dest
= simplify_gen_subreg (vmode
, dest
, mode
, 0);
9907 if (op_false
== CONST0_RTX (mode
))
9909 op_true
= simplify_gen_subreg (vmode
, op_true
, mode
, 0);
9910 x
= gen_rtx_AND (vmode
, dest
, op_true
);
9911 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
9915 op_false
= simplify_gen_subreg (vmode
, op_false
, mode
, 0);
9917 if (op_true
== CONST0_RTX (mode
))
9919 x
= gen_rtx_NOT (vmode
, dest
);
9920 x
= gen_rtx_AND (vmode
, x
, op_false
);
9921 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
9925 x
= gen_rtx_AND (vmode
, scratch
, dest
);
9926 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
9928 x
= gen_rtx_NOT (vmode
, dest
);
9929 x
= gen_rtx_AND (vmode
, x
, op_false
);
9930 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
9932 x
= gen_rtx_IOR (vmode
, dest
, scratch
);
9933 emit_insn (gen_rtx_SET (VOIDmode
, dest
, x
));
9938 /* Expand conditional increment or decrement using adb/sbb instructions.
9939 The default case using setcc followed by the conditional move can be
9940 done by generic code. */
9942 ix86_expand_int_addcc (rtx operands
[])
9944 enum rtx_code code
= GET_CODE (operands
[1]);
9946 rtx val
= const0_rtx
;
9948 enum machine_mode mode
= GET_MODE (operands
[0]);
9950 if (operands
[3] != const1_rtx
9951 && operands
[3] != constm1_rtx
)
9953 if (!ix86_expand_carry_flag_compare (code
, ix86_compare_op0
,
9954 ix86_compare_op1
, &compare_op
))
9956 code
= GET_CODE (compare_op
);
9958 if (GET_MODE (XEXP (compare_op
, 0)) == CCFPmode
9959 || GET_MODE (XEXP (compare_op
, 0)) == CCFPUmode
)
9962 code
= ix86_fp_compare_code_to_integer (code
);
9969 PUT_CODE (compare_op
,
9970 reverse_condition_maybe_unordered
9971 (GET_CODE (compare_op
)));
9973 PUT_CODE (compare_op
, reverse_condition (GET_CODE (compare_op
)));
9975 PUT_MODE (compare_op
, mode
);
9977 /* Construct either adc or sbb insn. */
9978 if ((code
== LTU
) == (operands
[3] == constm1_rtx
))
9980 switch (GET_MODE (operands
[0]))
9983 emit_insn (gen_subqi3_carry (operands
[0], operands
[2], val
, compare_op
));
9986 emit_insn (gen_subhi3_carry (operands
[0], operands
[2], val
, compare_op
));
9989 emit_insn (gen_subsi3_carry (operands
[0], operands
[2], val
, compare_op
));
9992 emit_insn (gen_subdi3_carry_rex64 (operands
[0], operands
[2], val
, compare_op
));
10000 switch (GET_MODE (operands
[0]))
10003 emit_insn (gen_addqi3_carry (operands
[0], operands
[2], val
, compare_op
));
10006 emit_insn (gen_addhi3_carry (operands
[0], operands
[2], val
, compare_op
));
10009 emit_insn (gen_addsi3_carry (operands
[0], operands
[2], val
, compare_op
));
10012 emit_insn (gen_adddi3_carry_rex64 (operands
[0], operands
[2], val
, compare_op
));
10018 return 1; /* DONE */
10022 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10023 works for floating pointer parameters and nonoffsetable memories.
10024 For pushes, it returns just stack offsets; the values will be saved
10025 in the right order. Maximally three parts are generated. */
10028 ix86_split_to_parts (rtx operand
, rtx
*parts
, enum machine_mode mode
)
10033 size
= mode
==XFmode
? 3 : GET_MODE_SIZE (mode
) / 4;
10035 size
= (GET_MODE_SIZE (mode
) + 4) / 8;
10037 if (GET_CODE (operand
) == REG
&& MMX_REGNO_P (REGNO (operand
)))
10039 if (size
< 2 || size
> 3)
10042 /* Optimize constant pool reference to immediates. This is used by fp
10043 moves, that force all constants to memory to allow combining. */
10044 if (GET_CODE (operand
) == MEM
&& MEM_READONLY_P (operand
))
10046 rtx tmp
= maybe_get_pool_constant (operand
);
10051 if (GET_CODE (operand
) == MEM
&& !offsettable_memref_p (operand
))
10053 /* The only non-offsetable memories we handle are pushes. */
10054 if (! push_operand (operand
, VOIDmode
))
10057 operand
= copy_rtx (operand
);
10058 PUT_MODE (operand
, Pmode
);
10059 parts
[0] = parts
[1] = parts
[2] = operand
;
10061 else if (!TARGET_64BIT
)
10063 if (mode
== DImode
)
10064 split_di (&operand
, 1, &parts
[0], &parts
[1]);
10067 if (REG_P (operand
))
10069 if (!reload_completed
)
10071 parts
[0] = gen_rtx_REG (SImode
, REGNO (operand
) + 0);
10072 parts
[1] = gen_rtx_REG (SImode
, REGNO (operand
) + 1);
10074 parts
[2] = gen_rtx_REG (SImode
, REGNO (operand
) + 2);
10076 else if (offsettable_memref_p (operand
))
10078 operand
= adjust_address (operand
, SImode
, 0);
10079 parts
[0] = operand
;
10080 parts
[1] = adjust_address (operand
, SImode
, 4);
10082 parts
[2] = adjust_address (operand
, SImode
, 8);
10084 else if (GET_CODE (operand
) == CONST_DOUBLE
)
10089 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
10093 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r
, l
);
10094 parts
[2] = gen_int_mode (l
[2], SImode
);
10097 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
10102 parts
[1] = gen_int_mode (l
[1], SImode
);
10103 parts
[0] = gen_int_mode (l
[0], SImode
);
10111 if (mode
== TImode
)
10112 split_ti (&operand
, 1, &parts
[0], &parts
[1]);
10113 if (mode
== XFmode
|| mode
== TFmode
)
10115 enum machine_mode upper_mode
= mode
==XFmode
? SImode
: DImode
;
10116 if (REG_P (operand
))
10118 if (!reload_completed
)
10120 parts
[0] = gen_rtx_REG (DImode
, REGNO (operand
) + 0);
10121 parts
[1] = gen_rtx_REG (upper_mode
, REGNO (operand
) + 1);
10123 else if (offsettable_memref_p (operand
))
10125 operand
= adjust_address (operand
, DImode
, 0);
10126 parts
[0] = operand
;
10127 parts
[1] = adjust_address (operand
, upper_mode
, 8);
10129 else if (GET_CODE (operand
) == CONST_DOUBLE
)
10134 REAL_VALUE_FROM_CONST_DOUBLE (r
, operand
);
10135 real_to_target (l
, &r
, mode
);
10137 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10138 if (HOST_BITS_PER_WIDE_INT
>= 64)
10141 ((l
[0] & (((HOST_WIDE_INT
) 2 << 31) - 1))
10142 + ((((HOST_WIDE_INT
) l
[1]) << 31) << 1),
10145 parts
[0] = immed_double_const (l
[0], l
[1], DImode
);
10147 if (upper_mode
== SImode
)
10148 parts
[1] = gen_int_mode (l
[2], SImode
);
10149 else if (HOST_BITS_PER_WIDE_INT
>= 64)
10152 ((l
[2] & (((HOST_WIDE_INT
) 2 << 31) - 1))
10153 + ((((HOST_WIDE_INT
) l
[3]) << 31) << 1),
10156 parts
[1] = immed_double_const (l
[2], l
[3], DImode
);
10166 /* Emit insns to perform a move or push of DI, DF, and XF values.
10167 Return false when normal moves are needed; true when all required
10168 insns have been emitted. Operands 2-4 contain the input values
10169 int the correct order; operands 5-7 contain the output values. */
10172 ix86_split_long_move (rtx operands
[])
10177 int collisions
= 0;
10178 enum machine_mode mode
= GET_MODE (operands
[0]);
10180 /* The DFmode expanders may ask us to move double.
10181 For 64bit target this is single move. By hiding the fact
10182 here we simplify i386.md splitters. */
10183 if (GET_MODE_SIZE (GET_MODE (operands
[0])) == 8 && TARGET_64BIT
)
10185 /* Optimize constant pool reference to immediates. This is used by
10186 fp moves, that force all constants to memory to allow combining. */
10188 if (GET_CODE (operands
[1]) == MEM
10189 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10190 && CONSTANT_POOL_ADDRESS_P (XEXP (operands
[1], 0)))
10191 operands
[1] = get_pool_constant (XEXP (operands
[1], 0));
10192 if (push_operand (operands
[0], VOIDmode
))
10194 operands
[0] = copy_rtx (operands
[0]);
10195 PUT_MODE (operands
[0], Pmode
);
10198 operands
[0] = gen_lowpart (DImode
, operands
[0]);
10199 operands
[1] = gen_lowpart (DImode
, operands
[1]);
10200 emit_move_insn (operands
[0], operands
[1]);
10204 /* The only non-offsettable memory we handle is push. */
10205 if (push_operand (operands
[0], VOIDmode
))
10207 else if (GET_CODE (operands
[0]) == MEM
10208 && ! offsettable_memref_p (operands
[0]))
10211 nparts
= ix86_split_to_parts (operands
[1], part
[1], GET_MODE (operands
[0]));
10212 ix86_split_to_parts (operands
[0], part
[0], GET_MODE (operands
[0]));
10214 /* When emitting push, take care for source operands on the stack. */
10215 if (push
&& GET_CODE (operands
[1]) == MEM
10216 && reg_overlap_mentioned_p (stack_pointer_rtx
, operands
[1]))
10219 part
[1][1] = change_address (part
[1][1], GET_MODE (part
[1][1]),
10220 XEXP (part
[1][2], 0));
10221 part
[1][0] = change_address (part
[1][0], GET_MODE (part
[1][0]),
10222 XEXP (part
[1][1], 0));
10225 /* We need to do copy in the right order in case an address register
10226 of the source overlaps the destination. */
10227 if (REG_P (part
[0][0]) && GET_CODE (part
[1][0]) == MEM
)
10229 if (reg_overlap_mentioned_p (part
[0][0], XEXP (part
[1][0], 0)))
10231 if (reg_overlap_mentioned_p (part
[0][1], XEXP (part
[1][0], 0)))
10234 && reg_overlap_mentioned_p (part
[0][2], XEXP (part
[1][0], 0)))
10237 /* Collision in the middle part can be handled by reordering. */
10238 if (collisions
== 1 && nparts
== 3
10239 && reg_overlap_mentioned_p (part
[0][1], XEXP (part
[1][0], 0)))
10242 tmp
= part
[0][1]; part
[0][1] = part
[0][2]; part
[0][2] = tmp
;
10243 tmp
= part
[1][1]; part
[1][1] = part
[1][2]; part
[1][2] = tmp
;
10246 /* If there are more collisions, we can't handle it by reordering.
10247 Do an lea to the last part and use only one colliding move. */
10248 else if (collisions
> 1)
10254 base
= part
[0][nparts
- 1];
10256 /* Handle the case when the last part isn't valid for lea.
10257 Happens in 64-bit mode storing the 12-byte XFmode. */
10258 if (GET_MODE (base
) != Pmode
)
10259 base
= gen_rtx_REG (Pmode
, REGNO (base
));
10261 emit_insn (gen_rtx_SET (VOIDmode
, base
, XEXP (part
[1][0], 0)));
10262 part
[1][0] = replace_equiv_address (part
[1][0], base
);
10263 part
[1][1] = replace_equiv_address (part
[1][1],
10264 plus_constant (base
, UNITS_PER_WORD
));
10266 part
[1][2] = replace_equiv_address (part
[1][2],
10267 plus_constant (base
, 8));
10277 if (TARGET_128BIT_LONG_DOUBLE
&& mode
== XFmode
)
10278 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, GEN_INT (-4)));
10279 emit_move_insn (part
[0][2], part
[1][2]);
10284 /* In 64bit mode we don't have 32bit push available. In case this is
10285 register, it is OK - we will just use larger counterpart. We also
10286 retype memory - these comes from attempt to avoid REX prefix on
10287 moving of second half of TFmode value. */
10288 if (GET_MODE (part
[1][1]) == SImode
)
10290 if (GET_CODE (part
[1][1]) == MEM
)
10291 part
[1][1] = adjust_address (part
[1][1], DImode
, 0);
10292 else if (REG_P (part
[1][1]))
10293 part
[1][1] = gen_rtx_REG (DImode
, REGNO (part
[1][1]));
10296 if (GET_MODE (part
[1][0]) == SImode
)
10297 part
[1][0] = part
[1][1];
10300 emit_move_insn (part
[0][1], part
[1][1]);
10301 emit_move_insn (part
[0][0], part
[1][0]);
10305 /* Choose correct order to not overwrite the source before it is copied. */
10306 if ((REG_P (part
[0][0])
10307 && REG_P (part
[1][1])
10308 && (REGNO (part
[0][0]) == REGNO (part
[1][1])
10310 && REGNO (part
[0][0]) == REGNO (part
[1][2]))))
10312 && reg_overlap_mentioned_p (part
[0][0], XEXP (part
[1][0], 0))))
10316 operands
[2] = part
[0][2];
10317 operands
[3] = part
[0][1];
10318 operands
[4] = part
[0][0];
10319 operands
[5] = part
[1][2];
10320 operands
[6] = part
[1][1];
10321 operands
[7] = part
[1][0];
10325 operands
[2] = part
[0][1];
10326 operands
[3] = part
[0][0];
10327 operands
[5] = part
[1][1];
10328 operands
[6] = part
[1][0];
10335 operands
[2] = part
[0][0];
10336 operands
[3] = part
[0][1];
10337 operands
[4] = part
[0][2];
10338 operands
[5] = part
[1][0];
10339 operands
[6] = part
[1][1];
10340 operands
[7] = part
[1][2];
10344 operands
[2] = part
[0][0];
10345 operands
[3] = part
[0][1];
10346 operands
[5] = part
[1][0];
10347 operands
[6] = part
[1][1];
10351 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
10354 if (GET_CODE (operands
[5]) == CONST_INT
10355 && operands
[5] != const0_rtx
10356 && REG_P (operands
[2]))
10358 if (GET_CODE (operands
[6]) == CONST_INT
10359 && INTVAL (operands
[6]) == INTVAL (operands
[5]))
10360 operands
[6] = operands
[2];
10363 && GET_CODE (operands
[7]) == CONST_INT
10364 && INTVAL (operands
[7]) == INTVAL (operands
[5]))
10365 operands
[7] = operands
[2];
10369 && GET_CODE (operands
[6]) == CONST_INT
10370 && operands
[6] != const0_rtx
10371 && REG_P (operands
[3])
10372 && GET_CODE (operands
[7]) == CONST_INT
10373 && INTVAL (operands
[7]) == INTVAL (operands
[6]))
10374 operands
[7] = operands
[3];
10377 emit_move_insn (operands
[2], operands
[5]);
10378 emit_move_insn (operands
[3], operands
[6]);
10380 emit_move_insn (operands
[4], operands
[7]);
10385 /* Helper function of ix86_split_ashldi used to generate an SImode
10386 left shift by a constant, either using a single shift or
10387 a sequence of add instructions. */
10390 ix86_expand_ashlsi3_const (rtx operand
, int count
)
10393 emit_insn (gen_addsi3 (operand
, operand
, operand
));
10394 else if (!optimize_size
10395 && count
* ix86_cost
->add
<= ix86_cost
->shift_const
)
10398 for (i
=0; i
<count
; i
++)
10399 emit_insn (gen_addsi3 (operand
, operand
, operand
));
10402 emit_insn (gen_ashlsi3 (operand
, operand
, GEN_INT (count
)));
10406 ix86_split_ashldi (rtx
*operands
, rtx scratch
)
10408 rtx low
[2], high
[2];
10411 if (GET_CODE (operands
[2]) == CONST_INT
)
10413 split_di (operands
, 2, low
, high
);
10414 count
= INTVAL (operands
[2]) & 63;
10418 emit_move_insn (high
[0], low
[1]);
10419 emit_move_insn (low
[0], const0_rtx
);
10422 ix86_expand_ashlsi3_const (high
[0], count
- 32);
10426 if (!rtx_equal_p (operands
[0], operands
[1]))
10427 emit_move_insn (operands
[0], operands
[1]);
10428 emit_insn (gen_x86_shld_1 (high
[0], low
[0], GEN_INT (count
)));
10429 ix86_expand_ashlsi3_const (low
[0], count
);
10434 split_di (operands
, 1, low
, high
);
10436 if (operands
[1] == const1_rtx
)
10438 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10439 can be done with two 32-bit shifts, no branches, no cmoves. */
10440 if (ANY_QI_REG_P (low
[0]) && ANY_QI_REG_P (high
[0]))
10442 rtx s
, d
, flags
= gen_rtx_REG (CCZmode
, FLAGS_REG
);
10444 ix86_expand_clear (low
[0]);
10445 ix86_expand_clear (high
[0]);
10446 emit_insn (gen_testqi_ccz_1 (operands
[2], GEN_INT (32)));
10448 d
= gen_lowpart (QImode
, low
[0]);
10449 d
= gen_rtx_STRICT_LOW_PART (VOIDmode
, d
);
10450 s
= gen_rtx_EQ (QImode
, flags
, const0_rtx
);
10451 emit_insn (gen_rtx_SET (VOIDmode
, d
, s
));
10453 d
= gen_lowpart (QImode
, high
[0]);
10454 d
= gen_rtx_STRICT_LOW_PART (VOIDmode
, d
);
10455 s
= gen_rtx_NE (QImode
, flags
, const0_rtx
);
10456 emit_insn (gen_rtx_SET (VOIDmode
, d
, s
));
10459 /* Otherwise, we can get the same results by manually performing
10460 a bit extract operation on bit 5, and then performing the two
10461 shifts. The two methods of getting 0/1 into low/high are exactly
10462 the same size. Avoiding the shift in the bit extract case helps
10463 pentium4 a bit; no one else seems to care much either way. */
10468 if (TARGET_PARTIAL_REG_STALL
&& !optimize_size
)
10469 x
= gen_rtx_ZERO_EXTEND (SImode
, operands
[2]);
10471 x
= gen_lowpart (SImode
, operands
[2]);
10472 emit_insn (gen_rtx_SET (VOIDmode
, high
[0], x
));
10474 emit_insn (gen_lshrsi3 (high
[0], high
[0], GEN_INT (5)));
10475 emit_insn (gen_andsi3 (high
[0], high
[0], GEN_INT (1)));
10476 emit_move_insn (low
[0], high
[0]);
10477 emit_insn (gen_xorsi3 (low
[0], low
[0], GEN_INT (1)));
10480 emit_insn (gen_ashlsi3 (low
[0], low
[0], operands
[2]));
10481 emit_insn (gen_ashlsi3 (high
[0], high
[0], operands
[2]));
10485 if (operands
[1] == constm1_rtx
)
10487 /* For -1LL << N, we can avoid the shld instruction, because we
10488 know that we're shifting 0...31 ones into a -1. */
10489 emit_move_insn (low
[0], constm1_rtx
);
10491 emit_move_insn (high
[0], low
[0]);
10493 emit_move_insn (high
[0], constm1_rtx
);
10497 if (!rtx_equal_p (operands
[0], operands
[1]))
10498 emit_move_insn (operands
[0], operands
[1]);
10500 split_di (operands
, 1, low
, high
);
10501 emit_insn (gen_x86_shld_1 (high
[0], low
[0], operands
[2]));
10504 emit_insn (gen_ashlsi3 (low
[0], low
[0], operands
[2]));
10506 if (TARGET_CMOVE
&& scratch
)
10508 ix86_expand_clear (scratch
);
10509 emit_insn (gen_x86_shift_adj_1 (high
[0], low
[0], operands
[2], scratch
));
10512 emit_insn (gen_x86_shift_adj_2 (high
[0], low
[0], operands
[2]));
10516 ix86_split_ashrdi (rtx
*operands
, rtx scratch
)
10518 rtx low
[2], high
[2];
10521 if (GET_CODE (operands
[2]) == CONST_INT
)
10523 split_di (operands
, 2, low
, high
);
10524 count
= INTVAL (operands
[2]) & 63;
10528 emit_move_insn (high
[0], high
[1]);
10529 emit_insn (gen_ashrsi3 (high
[0], high
[0], GEN_INT (31)));
10530 emit_move_insn (low
[0], high
[0]);
10533 else if (count
>= 32)
10535 emit_move_insn (low
[0], high
[1]);
10536 emit_move_insn (high
[0], low
[0]);
10537 emit_insn (gen_ashrsi3 (high
[0], high
[0], GEN_INT (31)));
10539 emit_insn (gen_ashrsi3 (low
[0], low
[0], GEN_INT (count
- 32)));
10543 if (!rtx_equal_p (operands
[0], operands
[1]))
10544 emit_move_insn (operands
[0], operands
[1]);
10545 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], GEN_INT (count
)));
10546 emit_insn (gen_ashrsi3 (high
[0], high
[0], GEN_INT (count
)));
10551 if (!rtx_equal_p (operands
[0], operands
[1]))
10552 emit_move_insn (operands
[0], operands
[1]);
10554 split_di (operands
, 1, low
, high
);
10556 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], operands
[2]));
10557 emit_insn (gen_ashrsi3 (high
[0], high
[0], operands
[2]));
10559 if (TARGET_CMOVE
&& scratch
)
10561 emit_move_insn (scratch
, high
[0]);
10562 emit_insn (gen_ashrsi3 (scratch
, scratch
, GEN_INT (31)));
10563 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
10567 emit_insn (gen_x86_shift_adj_3 (low
[0], high
[0], operands
[2]));
10572 ix86_split_lshrdi (rtx
*operands
, rtx scratch
)
10574 rtx low
[2], high
[2];
10577 if (GET_CODE (operands
[2]) == CONST_INT
)
10579 split_di (operands
, 2, low
, high
);
10580 count
= INTVAL (operands
[2]) & 63;
10584 emit_move_insn (low
[0], high
[1]);
10585 ix86_expand_clear (high
[0]);
10588 emit_insn (gen_lshrsi3 (low
[0], low
[0], GEN_INT (count
- 32)));
10592 if (!rtx_equal_p (operands
[0], operands
[1]))
10593 emit_move_insn (operands
[0], operands
[1]);
10594 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], GEN_INT (count
)));
10595 emit_insn (gen_lshrsi3 (high
[0], high
[0], GEN_INT (count
)));
10600 if (!rtx_equal_p (operands
[0], operands
[1]))
10601 emit_move_insn (operands
[0], operands
[1]);
10603 split_di (operands
, 1, low
, high
);
10605 emit_insn (gen_x86_shrd_1 (low
[0], high
[0], operands
[2]));
10606 emit_insn (gen_lshrsi3 (high
[0], high
[0], operands
[2]));
10608 /* Heh. By reversing the arguments, we can reuse this pattern. */
10609 if (TARGET_CMOVE
&& scratch
)
10611 ix86_expand_clear (scratch
);
10612 emit_insn (gen_x86_shift_adj_1 (low
[0], high
[0], operands
[2],
10616 emit_insn (gen_x86_shift_adj_2 (low
[0], high
[0], operands
[2]));
10620 /* Helper function for the string operations below. Dest VARIABLE whether
10621 it is aligned to VALUE bytes. If true, jump to the label. */
10623 ix86_expand_aligntest (rtx variable
, int value
)
10625 rtx label
= gen_label_rtx ();
10626 rtx tmpcount
= gen_reg_rtx (GET_MODE (variable
));
10627 if (GET_MODE (variable
) == DImode
)
10628 emit_insn (gen_anddi3 (tmpcount
, variable
, GEN_INT (value
)));
10630 emit_insn (gen_andsi3 (tmpcount
, variable
, GEN_INT (value
)));
10631 emit_cmp_and_jump_insns (tmpcount
, const0_rtx
, EQ
, 0, GET_MODE (variable
),
10636 /* Adjust COUNTER by the VALUE. */
10638 ix86_adjust_counter (rtx countreg
, HOST_WIDE_INT value
)
10640 if (GET_MODE (countreg
) == DImode
)
10641 emit_insn (gen_adddi3 (countreg
, countreg
, GEN_INT (-value
)));
10643 emit_insn (gen_addsi3 (countreg
, countreg
, GEN_INT (-value
)));
10646 /* Zero extend possibly SImode EXP to Pmode register. */
10648 ix86_zero_extend_to_Pmode (rtx exp
)
10651 if (GET_MODE (exp
) == VOIDmode
)
10652 return force_reg (Pmode
, exp
);
10653 if (GET_MODE (exp
) == Pmode
)
10654 return copy_to_mode_reg (Pmode
, exp
);
10655 r
= gen_reg_rtx (Pmode
);
10656 emit_insn (gen_zero_extendsidi2 (r
, exp
));
10660 /* Expand string move (memcpy) operation. Use i386 string operations when
10661 profitable. expand_clrmem contains similar code. */
10663 ix86_expand_movmem (rtx dst
, rtx src
, rtx count_exp
, rtx align_exp
)
10665 rtx srcreg
, destreg
, countreg
, srcexp
, destexp
;
10666 enum machine_mode counter_mode
;
10667 HOST_WIDE_INT align
= 0;
10668 unsigned HOST_WIDE_INT count
= 0;
10670 if (GET_CODE (align_exp
) == CONST_INT
)
10671 align
= INTVAL (align_exp
);
10673 /* Can't use any of this if the user has appropriated esi or edi. */
10674 if (global_regs
[4] || global_regs
[5])
10677 /* This simple hack avoids all inlining code and simplifies code below. */
10678 if (!TARGET_ALIGN_STRINGOPS
)
10681 if (GET_CODE (count_exp
) == CONST_INT
)
10683 count
= INTVAL (count_exp
);
10684 if (!TARGET_INLINE_ALL_STRINGOPS
&& count
> 64)
10688 /* Figure out proper mode for counter. For 32bits it is always SImode,
10689 for 64bits use SImode when possible, otherwise DImode.
10690 Set count to number of bytes copied when known at compile time. */
10692 || GET_MODE (count_exp
) == SImode
10693 || x86_64_zext_immediate_operand (count_exp
, VOIDmode
))
10694 counter_mode
= SImode
;
10696 counter_mode
= DImode
;
10698 if (counter_mode
!= SImode
&& counter_mode
!= DImode
)
10701 destreg
= copy_to_mode_reg (Pmode
, XEXP (dst
, 0));
10702 if (destreg
!= XEXP (dst
, 0))
10703 dst
= replace_equiv_address_nv (dst
, destreg
);
10704 srcreg
= copy_to_mode_reg (Pmode
, XEXP (src
, 0));
10705 if (srcreg
!= XEXP (src
, 0))
10706 src
= replace_equiv_address_nv (src
, srcreg
);
10708 /* When optimizing for size emit simple rep ; movsb instruction for
10709 counts not divisible by 4. */
10711 if ((!optimize
|| optimize_size
) && (count
== 0 || (count
& 0x03)))
10713 emit_insn (gen_cld ());
10714 countreg
= ix86_zero_extend_to_Pmode (count_exp
);
10715 destexp
= gen_rtx_PLUS (Pmode
, destreg
, countreg
);
10716 srcexp
= gen_rtx_PLUS (Pmode
, srcreg
, countreg
);
10717 emit_insn (gen_rep_mov (destreg
, dst
, srcreg
, src
, countreg
,
10721 /* For constant aligned (or small unaligned) copies use rep movsl
10722 followed by code copying the rest. For PentiumPro ensure 8 byte
10723 alignment to allow rep movsl acceleration. */
10725 else if (count
!= 0
10727 || (!TARGET_PENTIUMPRO
&& !TARGET_64BIT
&& align
>= 4)
10728 || optimize_size
|| count
< (unsigned int) 64))
10730 unsigned HOST_WIDE_INT offset
= 0;
10731 int size
= TARGET_64BIT
&& !optimize_size
? 8 : 4;
10732 rtx srcmem
, dstmem
;
10734 emit_insn (gen_cld ());
10735 if (count
& ~(size
- 1))
10737 countreg
= copy_to_mode_reg (counter_mode
,
10738 GEN_INT ((count
>> (size
== 4 ? 2 : 3))
10739 & (TARGET_64BIT
? -1 : 0x3fffffff)));
10740 countreg
= ix86_zero_extend_to_Pmode (countreg
);
10742 destexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
10743 GEN_INT (size
== 4 ? 2 : 3));
10744 srcexp
= gen_rtx_PLUS (Pmode
, destexp
, srcreg
);
10745 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destreg
);
10747 emit_insn (gen_rep_mov (destreg
, dst
, srcreg
, src
,
10748 countreg
, destexp
, srcexp
));
10749 offset
= count
& ~(size
- 1);
10751 if (size
== 8 && (count
& 0x04))
10753 srcmem
= adjust_automodify_address_nv (src
, SImode
, srcreg
,
10755 dstmem
= adjust_automodify_address_nv (dst
, SImode
, destreg
,
10757 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10762 srcmem
= adjust_automodify_address_nv (src
, HImode
, srcreg
,
10764 dstmem
= adjust_automodify_address_nv (dst
, HImode
, destreg
,
10766 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10771 srcmem
= adjust_automodify_address_nv (src
, QImode
, srcreg
,
10773 dstmem
= adjust_automodify_address_nv (dst
, QImode
, destreg
,
10775 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10778 /* The generic code based on the glibc implementation:
10779 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10780 allowing accelerated copying there)
10781 - copy the data using rep movsl
10782 - copy the rest. */
10787 rtx srcmem
, dstmem
;
10788 int desired_alignment
= (TARGET_PENTIUMPRO
10789 && (count
== 0 || count
>= (unsigned int) 260)
10790 ? 8 : UNITS_PER_WORD
);
10791 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10792 dst
= change_address (dst
, BLKmode
, destreg
);
10793 src
= change_address (src
, BLKmode
, srcreg
);
10795 /* In case we don't know anything about the alignment, default to
10796 library version, since it is usually equally fast and result in
10799 Also emit call when we know that the count is large and call overhead
10800 will not be important. */
10801 if (!TARGET_INLINE_ALL_STRINGOPS
10802 && (align
< UNITS_PER_WORD
|| !TARGET_REP_MOVL_OPTIMAL
))
10805 if (TARGET_SINGLE_STRINGOP
)
10806 emit_insn (gen_cld ());
10808 countreg2
= gen_reg_rtx (Pmode
);
10809 countreg
= copy_to_mode_reg (counter_mode
, count_exp
);
10811 /* We don't use loops to align destination and to copy parts smaller
10812 than 4 bytes, because gcc is able to optimize such code better (in
10813 the case the destination or the count really is aligned, gcc is often
10814 able to predict the branches) and also it is friendlier to the
10815 hardware branch prediction.
10817 Using loops is beneficial for generic case, because we can
10818 handle small counts using the loops. Many CPUs (such as Athlon)
10819 have large REP prefix setup costs.
10821 This is quite costly. Maybe we can revisit this decision later or
10822 add some customizability to this code. */
10824 if (count
== 0 && align
< desired_alignment
)
10826 label
= gen_label_rtx ();
10827 emit_cmp_and_jump_insns (countreg
, GEN_INT (desired_alignment
- 1),
10828 LEU
, 0, counter_mode
, 1, label
);
10832 rtx label
= ix86_expand_aligntest (destreg
, 1);
10833 srcmem
= change_address (src
, QImode
, srcreg
);
10834 dstmem
= change_address (dst
, QImode
, destreg
);
10835 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10836 ix86_adjust_counter (countreg
, 1);
10837 emit_label (label
);
10838 LABEL_NUSES (label
) = 1;
10842 rtx label
= ix86_expand_aligntest (destreg
, 2);
10843 srcmem
= change_address (src
, HImode
, srcreg
);
10844 dstmem
= change_address (dst
, HImode
, destreg
);
10845 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10846 ix86_adjust_counter (countreg
, 2);
10847 emit_label (label
);
10848 LABEL_NUSES (label
) = 1;
10850 if (align
<= 4 && desired_alignment
> 4)
10852 rtx label
= ix86_expand_aligntest (destreg
, 4);
10853 srcmem
= change_address (src
, SImode
, srcreg
);
10854 dstmem
= change_address (dst
, SImode
, destreg
);
10855 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10856 ix86_adjust_counter (countreg
, 4);
10857 emit_label (label
);
10858 LABEL_NUSES (label
) = 1;
10861 if (label
&& desired_alignment
> 4 && !TARGET_64BIT
)
10863 emit_label (label
);
10864 LABEL_NUSES (label
) = 1;
10867 if (!TARGET_SINGLE_STRINGOP
)
10868 emit_insn (gen_cld ());
10871 emit_insn (gen_lshrdi3 (countreg2
, ix86_zero_extend_to_Pmode (countreg
),
10873 destexp
= gen_rtx_ASHIFT (Pmode
, countreg2
, GEN_INT (3));
10877 emit_insn (gen_lshrsi3 (countreg2
, countreg
, const2_rtx
));
10878 destexp
= gen_rtx_ASHIFT (Pmode
, countreg2
, const2_rtx
);
10880 srcexp
= gen_rtx_PLUS (Pmode
, destexp
, srcreg
);
10881 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destreg
);
10882 emit_insn (gen_rep_mov (destreg
, dst
, srcreg
, src
,
10883 countreg2
, destexp
, srcexp
));
10887 emit_label (label
);
10888 LABEL_NUSES (label
) = 1;
10890 if (TARGET_64BIT
&& align
> 4 && count
!= 0 && (count
& 4))
10892 srcmem
= change_address (src
, SImode
, srcreg
);
10893 dstmem
= change_address (dst
, SImode
, destreg
);
10894 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10896 if ((align
<= 4 || count
== 0) && TARGET_64BIT
)
10898 rtx label
= ix86_expand_aligntest (countreg
, 4);
10899 srcmem
= change_address (src
, SImode
, srcreg
);
10900 dstmem
= change_address (dst
, SImode
, destreg
);
10901 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10902 emit_label (label
);
10903 LABEL_NUSES (label
) = 1;
10905 if (align
> 2 && count
!= 0 && (count
& 2))
10907 srcmem
= change_address (src
, HImode
, srcreg
);
10908 dstmem
= change_address (dst
, HImode
, destreg
);
10909 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10911 if (align
<= 2 || count
== 0)
10913 rtx label
= ix86_expand_aligntest (countreg
, 2);
10914 srcmem
= change_address (src
, HImode
, srcreg
);
10915 dstmem
= change_address (dst
, HImode
, destreg
);
10916 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10917 emit_label (label
);
10918 LABEL_NUSES (label
) = 1;
10920 if (align
> 1 && count
!= 0 && (count
& 1))
10922 srcmem
= change_address (src
, QImode
, srcreg
);
10923 dstmem
= change_address (dst
, QImode
, destreg
);
10924 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10926 if (align
<= 1 || count
== 0)
10928 rtx label
= ix86_expand_aligntest (countreg
, 1);
10929 srcmem
= change_address (src
, QImode
, srcreg
);
10930 dstmem
= change_address (dst
, QImode
, destreg
);
10931 emit_insn (gen_strmov (destreg
, dstmem
, srcreg
, srcmem
));
10932 emit_label (label
);
10933 LABEL_NUSES (label
) = 1;
10940 /* Expand string clear operation (bzero). Use i386 string operations when
10941 profitable. expand_movmem contains similar code. */
10943 ix86_expand_clrmem (rtx dst
, rtx count_exp
, rtx align_exp
)
10945 rtx destreg
, zeroreg
, countreg
, destexp
;
10946 enum machine_mode counter_mode
;
10947 HOST_WIDE_INT align
= 0;
10948 unsigned HOST_WIDE_INT count
= 0;
10950 if (GET_CODE (align_exp
) == CONST_INT
)
10951 align
= INTVAL (align_exp
);
10953 /* Can't use any of this if the user has appropriated esi. */
10954 if (global_regs
[4])
10957 /* This simple hack avoids all inlining code and simplifies code below. */
10958 if (!TARGET_ALIGN_STRINGOPS
)
10961 if (GET_CODE (count_exp
) == CONST_INT
)
10963 count
= INTVAL (count_exp
);
10964 if (!TARGET_INLINE_ALL_STRINGOPS
&& count
> 64)
10967 /* Figure out proper mode for counter. For 32bits it is always SImode,
10968 for 64bits use SImode when possible, otherwise DImode.
10969 Set count to number of bytes copied when known at compile time. */
10971 || GET_MODE (count_exp
) == SImode
10972 || x86_64_zext_immediate_operand (count_exp
, VOIDmode
))
10973 counter_mode
= SImode
;
10975 counter_mode
= DImode
;
10977 destreg
= copy_to_mode_reg (Pmode
, XEXP (dst
, 0));
10978 if (destreg
!= XEXP (dst
, 0))
10979 dst
= replace_equiv_address_nv (dst
, destreg
);
10982 /* When optimizing for size emit simple rep ; movsb instruction for
10983 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10984 sequence is 7 bytes long, so if optimizing for size and count is
10985 small enough that some stosl, stosw and stosb instructions without
10986 rep are shorter, fall back into the next if. */
10988 if ((!optimize
|| optimize_size
)
10991 && (!optimize_size
|| (count
& 0x03) + (count
>> 2) > 7))))
10993 emit_insn (gen_cld ());
10995 countreg
= ix86_zero_extend_to_Pmode (count_exp
);
10996 zeroreg
= copy_to_mode_reg (QImode
, const0_rtx
);
10997 destexp
= gen_rtx_PLUS (Pmode
, destreg
, countreg
);
10998 emit_insn (gen_rep_stos (destreg
, countreg
, dst
, zeroreg
, destexp
));
11000 else if (count
!= 0
11002 || (!TARGET_PENTIUMPRO
&& !TARGET_64BIT
&& align
>= 4)
11003 || optimize_size
|| count
< (unsigned int) 64))
11005 int size
= TARGET_64BIT
&& !optimize_size
? 8 : 4;
11006 unsigned HOST_WIDE_INT offset
= 0;
11008 emit_insn (gen_cld ());
11010 zeroreg
= copy_to_mode_reg (size
== 4 ? SImode
: DImode
, const0_rtx
);
11011 if (count
& ~(size
- 1))
11013 unsigned HOST_WIDE_INT repcount
;
11014 unsigned int max_nonrep
;
11016 repcount
= count
>> (size
== 4 ? 2 : 3);
11018 repcount
&= 0x3fffffff;
11020 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
11021 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
11022 bytes. In both cases the latter seems to be faster for small
11024 max_nonrep
= size
== 4 ? 7 : 4;
11025 if (!optimize_size
)
11028 case PROCESSOR_PENTIUM4
:
11029 case PROCESSOR_NOCONA
:
11036 if (repcount
<= max_nonrep
)
11037 while (repcount
-- > 0)
11039 rtx mem
= adjust_automodify_address_nv (dst
,
11040 GET_MODE (zeroreg
),
11042 emit_insn (gen_strset (destreg
, mem
, zeroreg
));
11047 countreg
= copy_to_mode_reg (counter_mode
, GEN_INT (repcount
));
11048 countreg
= ix86_zero_extend_to_Pmode (countreg
);
11049 destexp
= gen_rtx_ASHIFT (Pmode
, countreg
,
11050 GEN_INT (size
== 4 ? 2 : 3));
11051 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destreg
);
11052 emit_insn (gen_rep_stos (destreg
, countreg
, dst
, zeroreg
,
11054 offset
= count
& ~(size
- 1);
11057 if (size
== 8 && (count
& 0x04))
11059 rtx mem
= adjust_automodify_address_nv (dst
, SImode
, destreg
,
11061 emit_insn (gen_strset (destreg
, mem
,
11062 gen_rtx_SUBREG (SImode
, zeroreg
, 0)));
11067 rtx mem
= adjust_automodify_address_nv (dst
, HImode
, destreg
,
11069 emit_insn (gen_strset (destreg
, mem
,
11070 gen_rtx_SUBREG (HImode
, zeroreg
, 0)));
11075 rtx mem
= adjust_automodify_address_nv (dst
, QImode
, destreg
,
11077 emit_insn (gen_strset (destreg
, mem
,
11078 gen_rtx_SUBREG (QImode
, zeroreg
, 0)));
11085 /* Compute desired alignment of the string operation. */
11086 int desired_alignment
= (TARGET_PENTIUMPRO
11087 && (count
== 0 || count
>= (unsigned int) 260)
11088 ? 8 : UNITS_PER_WORD
);
11090 /* In case we don't know anything about the alignment, default to
11091 library version, since it is usually equally fast and result in
11094 Also emit call when we know that the count is large and call overhead
11095 will not be important. */
11096 if (!TARGET_INLINE_ALL_STRINGOPS
11097 && (align
< UNITS_PER_WORD
|| !TARGET_REP_MOVL_OPTIMAL
))
11100 if (TARGET_SINGLE_STRINGOP
)
11101 emit_insn (gen_cld ());
11103 countreg2
= gen_reg_rtx (Pmode
);
11104 countreg
= copy_to_mode_reg (counter_mode
, count_exp
);
11105 zeroreg
= copy_to_mode_reg (Pmode
, const0_rtx
);
11106 /* Get rid of MEM_OFFSET, it won't be accurate. */
11107 dst
= change_address (dst
, BLKmode
, destreg
);
11109 if (count
== 0 && align
< desired_alignment
)
11111 label
= gen_label_rtx ();
11112 emit_cmp_and_jump_insns (countreg
, GEN_INT (desired_alignment
- 1),
11113 LEU
, 0, counter_mode
, 1, label
);
11117 rtx label
= ix86_expand_aligntest (destreg
, 1);
11118 emit_insn (gen_strset (destreg
, dst
,
11119 gen_rtx_SUBREG (QImode
, zeroreg
, 0)));
11120 ix86_adjust_counter (countreg
, 1);
11121 emit_label (label
);
11122 LABEL_NUSES (label
) = 1;
11126 rtx label
= ix86_expand_aligntest (destreg
, 2);
11127 emit_insn (gen_strset (destreg
, dst
,
11128 gen_rtx_SUBREG (HImode
, zeroreg
, 0)));
11129 ix86_adjust_counter (countreg
, 2);
11130 emit_label (label
);
11131 LABEL_NUSES (label
) = 1;
11133 if (align
<= 4 && desired_alignment
> 4)
11135 rtx label
= ix86_expand_aligntest (destreg
, 4);
11136 emit_insn (gen_strset (destreg
, dst
,
11138 ? gen_rtx_SUBREG (SImode
, zeroreg
, 0)
11140 ix86_adjust_counter (countreg
, 4);
11141 emit_label (label
);
11142 LABEL_NUSES (label
) = 1;
11145 if (label
&& desired_alignment
> 4 && !TARGET_64BIT
)
11147 emit_label (label
);
11148 LABEL_NUSES (label
) = 1;
11152 if (!TARGET_SINGLE_STRINGOP
)
11153 emit_insn (gen_cld ());
11156 emit_insn (gen_lshrdi3 (countreg2
, ix86_zero_extend_to_Pmode (countreg
),
11158 destexp
= gen_rtx_ASHIFT (Pmode
, countreg2
, GEN_INT (3));
11162 emit_insn (gen_lshrsi3 (countreg2
, countreg
, const2_rtx
));
11163 destexp
= gen_rtx_ASHIFT (Pmode
, countreg2
, const2_rtx
);
11165 destexp
= gen_rtx_PLUS (Pmode
, destexp
, destreg
);
11166 emit_insn (gen_rep_stos (destreg
, countreg2
, dst
, zeroreg
, destexp
));
11170 emit_label (label
);
11171 LABEL_NUSES (label
) = 1;
11174 if (TARGET_64BIT
&& align
> 4 && count
!= 0 && (count
& 4))
11175 emit_insn (gen_strset (destreg
, dst
,
11176 gen_rtx_SUBREG (SImode
, zeroreg
, 0)));
11177 if (TARGET_64BIT
&& (align
<= 4 || count
== 0))
11179 rtx label
= ix86_expand_aligntest (countreg
, 4);
11180 emit_insn (gen_strset (destreg
, dst
,
11181 gen_rtx_SUBREG (SImode
, zeroreg
, 0)));
11182 emit_label (label
);
11183 LABEL_NUSES (label
) = 1;
11185 if (align
> 2 && count
!= 0 && (count
& 2))
11186 emit_insn (gen_strset (destreg
, dst
,
11187 gen_rtx_SUBREG (HImode
, zeroreg
, 0)));
11188 if (align
<= 2 || count
== 0)
11190 rtx label
= ix86_expand_aligntest (countreg
, 2);
11191 emit_insn (gen_strset (destreg
, dst
,
11192 gen_rtx_SUBREG (HImode
, zeroreg
, 0)));
11193 emit_label (label
);
11194 LABEL_NUSES (label
) = 1;
11196 if (align
> 1 && count
!= 0 && (count
& 1))
11197 emit_insn (gen_strset (destreg
, dst
,
11198 gen_rtx_SUBREG (QImode
, zeroreg
, 0)));
11199 if (align
<= 1 || count
== 0)
11201 rtx label
= ix86_expand_aligntest (countreg
, 1);
11202 emit_insn (gen_strset (destreg
, dst
,
11203 gen_rtx_SUBREG (QImode
, zeroreg
, 0)));
11204 emit_label (label
);
11205 LABEL_NUSES (label
) = 1;
11211 /* Expand strlen. */
11213 ix86_expand_strlen (rtx out
, rtx src
, rtx eoschar
, rtx align
)
11215 rtx addr
, scratch1
, scratch2
, scratch3
, scratch4
;
11217 /* The generic case of strlen expander is long. Avoid it's
11218 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11220 if (TARGET_UNROLL_STRLEN
&& eoschar
== const0_rtx
&& optimize
> 1
11221 && !TARGET_INLINE_ALL_STRINGOPS
11223 && (GET_CODE (align
) != CONST_INT
|| INTVAL (align
) < 4))
11226 addr
= force_reg (Pmode
, XEXP (src
, 0));
11227 scratch1
= gen_reg_rtx (Pmode
);
11229 if (TARGET_UNROLL_STRLEN
&& eoschar
== const0_rtx
&& optimize
> 1
11232 /* Well it seems that some optimizer does not combine a call like
11233 foo(strlen(bar), strlen(bar));
11234 when the move and the subtraction is done here. It does calculate
11235 the length just once when these instructions are done inside of
11236 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11237 often used and I use one fewer register for the lifetime of
11238 output_strlen_unroll() this is better. */
11240 emit_move_insn (out
, addr
);
11242 ix86_expand_strlensi_unroll_1 (out
, src
, align
);
11244 /* strlensi_unroll_1 returns the address of the zero at the end of
11245 the string, like memchr(), so compute the length by subtracting
11246 the start address. */
11248 emit_insn (gen_subdi3 (out
, out
, addr
));
11250 emit_insn (gen_subsi3 (out
, out
, addr
));
11255 scratch2
= gen_reg_rtx (Pmode
);
11256 scratch3
= gen_reg_rtx (Pmode
);
11257 scratch4
= force_reg (Pmode
, constm1_rtx
);
11259 emit_move_insn (scratch3
, addr
);
11260 eoschar
= force_reg (QImode
, eoschar
);
11262 emit_insn (gen_cld ());
11263 src
= replace_equiv_address_nv (src
, scratch3
);
11265 /* If .md starts supporting :P, this can be done in .md. */
11266 unspec
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (4, src
, eoschar
, align
,
11267 scratch4
), UNSPEC_SCAS
);
11268 emit_insn (gen_strlenqi_1 (scratch1
, scratch3
, unspec
));
11271 emit_insn (gen_one_cmpldi2 (scratch2
, scratch1
));
11272 emit_insn (gen_adddi3 (out
, scratch2
, constm1_rtx
));
11276 emit_insn (gen_one_cmplsi2 (scratch2
, scratch1
));
11277 emit_insn (gen_addsi3 (out
, scratch2
, constm1_rtx
));
11283 /* Expand the appropriate insns for doing strlen if not just doing
11286 out = result, initialized with the start address
11287 align_rtx = alignment of the address.
11288 scratch = scratch register, initialized with the startaddress when
11289 not aligned, otherwise undefined
11291 This is just the body. It needs the initializations mentioned above and
11292 some address computing at the end. These things are done in i386.md. */
11295 ix86_expand_strlensi_unroll_1 (rtx out
, rtx src
, rtx align_rtx
)
11299 rtx align_2_label
= NULL_RTX
;
11300 rtx align_3_label
= NULL_RTX
;
11301 rtx align_4_label
= gen_label_rtx ();
11302 rtx end_0_label
= gen_label_rtx ();
11304 rtx tmpreg
= gen_reg_rtx (SImode
);
11305 rtx scratch
= gen_reg_rtx (SImode
);
11309 if (GET_CODE (align_rtx
) == CONST_INT
)
11310 align
= INTVAL (align_rtx
);
11312 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
11314 /* Is there a known alignment and is it less than 4? */
11317 rtx scratch1
= gen_reg_rtx (Pmode
);
11318 emit_move_insn (scratch1
, out
);
11319 /* Is there a known alignment and is it not 2? */
11322 align_3_label
= gen_label_rtx (); /* Label when aligned to 3-byte */
11323 align_2_label
= gen_label_rtx (); /* Label when aligned to 2-byte */
11325 /* Leave just the 3 lower bits. */
11326 align_rtx
= expand_binop (Pmode
, and_optab
, scratch1
, GEN_INT (3),
11327 NULL_RTX
, 0, OPTAB_WIDEN
);
11329 emit_cmp_and_jump_insns (align_rtx
, const0_rtx
, EQ
, NULL
,
11330 Pmode
, 1, align_4_label
);
11331 emit_cmp_and_jump_insns (align_rtx
, const2_rtx
, EQ
, NULL
,
11332 Pmode
, 1, align_2_label
);
11333 emit_cmp_and_jump_insns (align_rtx
, const2_rtx
, GTU
, NULL
,
11334 Pmode
, 1, align_3_label
);
11338 /* Since the alignment is 2, we have to check 2 or 0 bytes;
11339 check if is aligned to 4 - byte. */
11341 align_rtx
= expand_binop (Pmode
, and_optab
, scratch1
, const2_rtx
,
11342 NULL_RTX
, 0, OPTAB_WIDEN
);
11344 emit_cmp_and_jump_insns (align_rtx
, const0_rtx
, EQ
, NULL
,
11345 Pmode
, 1, align_4_label
);
11348 mem
= change_address (src
, QImode
, out
);
11350 /* Now compare the bytes. */
11352 /* Compare the first n unaligned byte on a byte per byte basis. */
11353 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
,
11354 QImode
, 1, end_0_label
);
11356 /* Increment the address. */
11358 emit_insn (gen_adddi3 (out
, out
, const1_rtx
));
11360 emit_insn (gen_addsi3 (out
, out
, const1_rtx
));
11362 /* Not needed with an alignment of 2 */
11365 emit_label (align_2_label
);
11367 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
, QImode
, 1,
11371 emit_insn (gen_adddi3 (out
, out
, const1_rtx
));
11373 emit_insn (gen_addsi3 (out
, out
, const1_rtx
));
11375 emit_label (align_3_label
);
11378 emit_cmp_and_jump_insns (mem
, const0_rtx
, EQ
, NULL
, QImode
, 1,
11382 emit_insn (gen_adddi3 (out
, out
, const1_rtx
));
11384 emit_insn (gen_addsi3 (out
, out
, const1_rtx
));
11387 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11388 align this loop. It gives only huge programs, but does not help to
11390 emit_label (align_4_label
);
11392 mem
= change_address (src
, SImode
, out
);
11393 emit_move_insn (scratch
, mem
);
11395 emit_insn (gen_adddi3 (out
, out
, GEN_INT (4)));
11397 emit_insn (gen_addsi3 (out
, out
, GEN_INT (4)));
11399 /* This formula yields a nonzero result iff one of the bytes is zero.
11400 This saves three branches inside loop and many cycles. */
11402 emit_insn (gen_addsi3 (tmpreg
, scratch
, GEN_INT (-0x01010101)));
11403 emit_insn (gen_one_cmplsi2 (scratch
, scratch
));
11404 emit_insn (gen_andsi3 (tmpreg
, tmpreg
, scratch
));
11405 emit_insn (gen_andsi3 (tmpreg
, tmpreg
,
11406 gen_int_mode (0x80808080, SImode
)));
11407 emit_cmp_and_jump_insns (tmpreg
, const0_rtx
, EQ
, 0, SImode
, 1,
11412 rtx reg
= gen_reg_rtx (SImode
);
11413 rtx reg2
= gen_reg_rtx (Pmode
);
11414 emit_move_insn (reg
, tmpreg
);
11415 emit_insn (gen_lshrsi3 (reg
, reg
, GEN_INT (16)));
11417 /* If zero is not in the first two bytes, move two bytes forward. */
11418 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
11419 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
11420 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
11421 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
,
11422 gen_rtx_IF_THEN_ELSE (SImode
, tmp
,
11425 /* Emit lea manually to avoid clobbering of flags. */
11426 emit_insn (gen_rtx_SET (SImode
, reg2
,
11427 gen_rtx_PLUS (Pmode
, out
, const2_rtx
)));
11429 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
11430 tmp
= gen_rtx_EQ (VOIDmode
, tmp
, const0_rtx
);
11431 emit_insn (gen_rtx_SET (VOIDmode
, out
,
11432 gen_rtx_IF_THEN_ELSE (Pmode
, tmp
,
11439 rtx end_2_label
= gen_label_rtx ();
11440 /* Is zero in the first two bytes? */
11442 emit_insn (gen_testsi_ccno_1 (tmpreg
, GEN_INT (0x8080)));
11443 tmp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
11444 tmp
= gen_rtx_NE (VOIDmode
, tmp
, const0_rtx
);
11445 tmp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, tmp
,
11446 gen_rtx_LABEL_REF (VOIDmode
, end_2_label
),
11448 tmp
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, tmp
));
11449 JUMP_LABEL (tmp
) = end_2_label
;
11451 /* Not in the first two. Move two bytes forward. */
11452 emit_insn (gen_lshrsi3 (tmpreg
, tmpreg
, GEN_INT (16)));
11454 emit_insn (gen_adddi3 (out
, out
, const2_rtx
));
11456 emit_insn (gen_addsi3 (out
, out
, const2_rtx
));
11458 emit_label (end_2_label
);
11462 /* Avoid branch in fixing the byte. */
11463 tmpreg
= gen_lowpart (QImode
, tmpreg
);
11464 emit_insn (gen_addqi3_cc (tmpreg
, tmpreg
, tmpreg
));
11465 cmp
= gen_rtx_LTU (Pmode
, gen_rtx_REG (CCmode
, 17), const0_rtx
);
11467 emit_insn (gen_subdi3_carry_rex64 (out
, out
, GEN_INT (3), cmp
));
11469 emit_insn (gen_subsi3_carry (out
, out
, GEN_INT (3), cmp
));
11471 emit_label (end_0_label
);
11475 ix86_expand_call (rtx retval
, rtx fnaddr
, rtx callarg1
,
11476 rtx callarg2 ATTRIBUTE_UNUSED
,
11477 rtx pop
, int sibcall
)
11479 rtx use
= NULL
, call
;
11481 if (pop
== const0_rtx
)
11483 if (TARGET_64BIT
&& pop
)
11487 if (flag_pic
&& GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
)
11488 fnaddr
= machopic_indirect_call_target (fnaddr
);
11490 /* Static functions and indirect calls don't need the pic register. */
11491 if (! TARGET_64BIT
&& flag_pic
11492 && GET_CODE (XEXP (fnaddr
, 0)) == SYMBOL_REF
11493 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr
, 0)))
11494 use_reg (&use
, pic_offset_table_rtx
);
11496 if (TARGET_64BIT
&& INTVAL (callarg2
) >= 0)
11498 rtx al
= gen_rtx_REG (QImode
, 0);
11499 emit_move_insn (al
, callarg2
);
11500 use_reg (&use
, al
);
11502 #endif /* TARGET_MACHO */
11504 if (! call_insn_operand (XEXP (fnaddr
, 0), Pmode
))
11506 fnaddr
= copy_to_mode_reg (Pmode
, XEXP (fnaddr
, 0));
11507 fnaddr
= gen_rtx_MEM (QImode
, fnaddr
);
11509 if (sibcall
&& TARGET_64BIT
11510 && !constant_call_address_operand (XEXP (fnaddr
, 0), Pmode
))
11513 addr
= copy_to_mode_reg (Pmode
, XEXP (fnaddr
, 0));
11514 fnaddr
= gen_rtx_REG (Pmode
, FIRST_REX_INT_REG
+ 3 /* R11 */);
11515 emit_move_insn (fnaddr
, addr
);
11516 fnaddr
= gen_rtx_MEM (QImode
, fnaddr
);
11519 call
= gen_rtx_CALL (VOIDmode
, fnaddr
, callarg1
);
11521 call
= gen_rtx_SET (VOIDmode
, retval
, call
);
11524 pop
= gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, pop
);
11525 pop
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, pop
);
11526 call
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, call
, pop
));
11529 call
= emit_call_insn (call
);
11531 CALL_INSN_FUNCTION_USAGE (call
) = use
;
11535 /* Clear stack slot assignments remembered from previous functions.
11536 This is called from INIT_EXPANDERS once before RTL is emitted for each
11539 static struct machine_function
*
11540 ix86_init_machine_status (void)
11542 struct machine_function
*f
;
11544 f
= ggc_alloc_cleared (sizeof (struct machine_function
));
11545 f
->use_fast_prologue_epilogue_nregs
= -1;
11550 /* Return a MEM corresponding to a stack slot with mode MODE.
11551 Allocate a new slot if necessary.
11553 The RTL for a function can have several slots available: N is
11554 which slot to use. */
11557 assign_386_stack_local (enum machine_mode mode
, int n
)
11559 struct stack_local_entry
*s
;
11561 if (n
< 0 || n
>= MAX_386_STACK_LOCALS
)
11564 for (s
= ix86_stack_locals
; s
; s
= s
->next
)
11565 if (s
->mode
== mode
&& s
->n
== n
)
11568 s
= (struct stack_local_entry
*)
11569 ggc_alloc (sizeof (struct stack_local_entry
));
11572 s
->rtl
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
11574 s
->next
= ix86_stack_locals
;
11575 ix86_stack_locals
= s
;
11579 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11581 static GTY(()) rtx ix86_tls_symbol
;
11583 ix86_tls_get_addr (void)
11586 if (!ix86_tls_symbol
)
11588 ix86_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
,
11589 (TARGET_GNU_TLS
&& !TARGET_64BIT
)
11590 ? "___tls_get_addr"
11591 : "__tls_get_addr");
11594 return ix86_tls_symbol
;
11597 /* Calculate the length of the memory address in the instruction
11598 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11601 memory_address_length (rtx addr
)
11603 struct ix86_address parts
;
11604 rtx base
, index
, disp
;
11607 if (GET_CODE (addr
) == PRE_DEC
11608 || GET_CODE (addr
) == POST_INC
11609 || GET_CODE (addr
) == PRE_MODIFY
11610 || GET_CODE (addr
) == POST_MODIFY
)
11613 if (! ix86_decompose_address (addr
, &parts
))
11617 index
= parts
.index
;
11622 - esp as the base always wants an index,
11623 - ebp as the base always wants a displacement. */
11625 /* Register Indirect. */
11626 if (base
&& !index
&& !disp
)
11628 /* esp (for its index) and ebp (for its displacement) need
11629 the two-byte modrm form. */
11630 if (addr
== stack_pointer_rtx
11631 || addr
== arg_pointer_rtx
11632 || addr
== frame_pointer_rtx
11633 || addr
== hard_frame_pointer_rtx
)
11637 /* Direct Addressing. */
11638 else if (disp
&& !base
&& !index
)
11643 /* Find the length of the displacement constant. */
11646 if (GET_CODE (disp
) == CONST_INT
11647 && CONST_OK_FOR_LETTER_P (INTVAL (disp
), 'K')
11653 /* ebp always wants a displacement. */
11654 else if (base
== hard_frame_pointer_rtx
)
11657 /* An index requires the two-byte modrm form.... */
11659 /* ...like esp, which always wants an index. */
11660 || base
== stack_pointer_rtx
11661 || base
== arg_pointer_rtx
11662 || base
== frame_pointer_rtx
)
11669 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11670 is set, expect that insn have 8bit immediate alternative. */
11672 ix86_attr_length_immediate_default (rtx insn
, int shortform
)
11676 extract_insn_cached (insn
);
11677 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
11678 if (CONSTANT_P (recog_data
.operand
[i
]))
11683 && GET_CODE (recog_data
.operand
[i
]) == CONST_INT
11684 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data
.operand
[i
]), 'K'))
11688 switch (get_attr_mode (insn
))
11699 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11704 fatal_insn ("unknown insn mode", insn
);
11710 /* Compute default value for "length_address" attribute. */
11712 ix86_attr_length_address_default (rtx insn
)
11716 if (get_attr_type (insn
) == TYPE_LEA
)
11718 rtx set
= PATTERN (insn
);
11719 if (GET_CODE (set
) == SET
)
11721 else if (GET_CODE (set
) == PARALLEL
11722 && GET_CODE (XVECEXP (set
, 0, 0)) == SET
)
11723 set
= XVECEXP (set
, 0, 0);
11726 #ifdef ENABLE_CHECKING
11732 return memory_address_length (SET_SRC (set
));
11735 extract_insn_cached (insn
);
11736 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
11737 if (GET_CODE (recog_data
.operand
[i
]) == MEM
)
11739 return memory_address_length (XEXP (recog_data
.operand
[i
], 0));
11745 /* Return the maximum number of instructions a cpu can issue. */
11748 ix86_issue_rate (void)
11752 case PROCESSOR_PENTIUM
:
11756 case PROCESSOR_PENTIUMPRO
:
11757 case PROCESSOR_PENTIUM4
:
11758 case PROCESSOR_ATHLON
:
11760 case PROCESSOR_NOCONA
:
11768 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11769 by DEP_INSN and nothing set by DEP_INSN. */
11772 ix86_flags_dependant (rtx insn
, rtx dep_insn
, enum attr_type insn_type
)
11776 /* Simplify the test for uninteresting insns. */
11777 if (insn_type
!= TYPE_SETCC
11778 && insn_type
!= TYPE_ICMOV
11779 && insn_type
!= TYPE_FCMOV
11780 && insn_type
!= TYPE_IBR
)
11783 if ((set
= single_set (dep_insn
)) != 0)
11785 set
= SET_DEST (set
);
11788 else if (GET_CODE (PATTERN (dep_insn
)) == PARALLEL
11789 && XVECLEN (PATTERN (dep_insn
), 0) == 2
11790 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 0)) == SET
11791 && GET_CODE (XVECEXP (PATTERN (dep_insn
), 0, 1)) == SET
)
11793 set
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
11794 set2
= SET_DEST (XVECEXP (PATTERN (dep_insn
), 0, 0));
11799 if (GET_CODE (set
) != REG
|| REGNO (set
) != FLAGS_REG
)
11802 /* This test is true if the dependent insn reads the flags but
11803 not any other potentially set register. */
11804 if (!reg_overlap_mentioned_p (set
, PATTERN (insn
)))
11807 if (set2
&& reg_overlap_mentioned_p (set2
, PATTERN (insn
)))
11813 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11814 address with operands set by DEP_INSN. */
11817 ix86_agi_dependant (rtx insn
, rtx dep_insn
, enum attr_type insn_type
)
11821 if (insn_type
== TYPE_LEA
11824 addr
= PATTERN (insn
);
11825 if (GET_CODE (addr
) == SET
)
11827 else if (GET_CODE (addr
) == PARALLEL
11828 && GET_CODE (XVECEXP (addr
, 0, 0)) == SET
)
11829 addr
= XVECEXP (addr
, 0, 0);
11832 addr
= SET_SRC (addr
);
11837 extract_insn_cached (insn
);
11838 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
11839 if (GET_CODE (recog_data
.operand
[i
]) == MEM
)
11841 addr
= XEXP (recog_data
.operand
[i
], 0);
11848 return modified_in_p (addr
, dep_insn
);
11852 ix86_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
11854 enum attr_type insn_type
, dep_insn_type
;
11855 enum attr_memory memory
;
11857 int dep_insn_code_number
;
11859 /* Anti and output dependencies have zero cost on all CPUs. */
11860 if (REG_NOTE_KIND (link
) != 0)
11863 dep_insn_code_number
= recog_memoized (dep_insn
);
11865 /* If we can't recognize the insns, we can't really do anything. */
11866 if (dep_insn_code_number
< 0 || recog_memoized (insn
) < 0)
11869 insn_type
= get_attr_type (insn
);
11870 dep_insn_type
= get_attr_type (dep_insn
);
11874 case PROCESSOR_PENTIUM
:
11875 /* Address Generation Interlock adds a cycle of latency. */
11876 if (ix86_agi_dependant (insn
, dep_insn
, insn_type
))
11879 /* ??? Compares pair with jump/setcc. */
11880 if (ix86_flags_dependant (insn
, dep_insn
, insn_type
))
11883 /* Floating point stores require value to be ready one cycle earlier. */
11884 if (insn_type
== TYPE_FMOV
11885 && get_attr_memory (insn
) == MEMORY_STORE
11886 && !ix86_agi_dependant (insn
, dep_insn
, insn_type
))
11890 case PROCESSOR_PENTIUMPRO
:
11891 memory
= get_attr_memory (insn
);
11893 /* INT->FP conversion is expensive. */
11894 if (get_attr_fp_int_src (dep_insn
))
11897 /* There is one cycle extra latency between an FP op and a store. */
11898 if (insn_type
== TYPE_FMOV
11899 && (set
= single_set (dep_insn
)) != NULL_RTX
11900 && (set2
= single_set (insn
)) != NULL_RTX
11901 && rtx_equal_p (SET_DEST (set
), SET_SRC (set2
))
11902 && GET_CODE (SET_DEST (set2
)) == MEM
)
11905 /* Show ability of reorder buffer to hide latency of load by executing
11906 in parallel with previous instruction in case
11907 previous instruction is not needed to compute the address. */
11908 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
11909 && !ix86_agi_dependant (insn
, dep_insn
, insn_type
))
11911 /* Claim moves to take one cycle, as core can issue one load
11912 at time and the next load can start cycle later. */
11913 if (dep_insn_type
== TYPE_IMOV
11914 || dep_insn_type
== TYPE_FMOV
)
11922 memory
= get_attr_memory (insn
);
11924 /* The esp dependency is resolved before the instruction is really
11926 if ((insn_type
== TYPE_PUSH
|| insn_type
== TYPE_POP
)
11927 && (dep_insn_type
== TYPE_PUSH
|| dep_insn_type
== TYPE_POP
))
11930 /* INT->FP conversion is expensive. */
11931 if (get_attr_fp_int_src (dep_insn
))
11934 /* Show ability of reorder buffer to hide latency of load by executing
11935 in parallel with previous instruction in case
11936 previous instruction is not needed to compute the address. */
11937 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
11938 && !ix86_agi_dependant (insn
, dep_insn
, insn_type
))
11940 /* Claim moves to take one cycle, as core can issue one load
11941 at time and the next load can start cycle later. */
11942 if (dep_insn_type
== TYPE_IMOV
11943 || dep_insn_type
== TYPE_FMOV
)
11952 case PROCESSOR_ATHLON
:
11954 memory
= get_attr_memory (insn
);
11956 /* Show ability of reorder buffer to hide latency of load by executing
11957 in parallel with previous instruction in case
11958 previous instruction is not needed to compute the address. */
11959 if ((memory
== MEMORY_LOAD
|| memory
== MEMORY_BOTH
)
11960 && !ix86_agi_dependant (insn
, dep_insn
, insn_type
))
11962 enum attr_unit unit
= get_attr_unit (insn
);
11965 /* Because of the difference between the length of integer and
11966 floating unit pipeline preparation stages, the memory operands
11967 for floating point are cheaper.
11969 ??? For Athlon it the difference is most probably 2. */
11970 if (unit
== UNIT_INTEGER
|| unit
== UNIT_UNKNOWN
)
11973 loadcost
= TARGET_ATHLON
? 2 : 0;
11975 if (cost
>= loadcost
)
11988 /* How many alternative schedules to try. This should be as wide as the
11989 scheduling freedom in the DFA, but no wider. Making this value too
11990 large results extra work for the scheduler. */
11993 ia32_multipass_dfa_lookahead (void)
11995 if (ix86_tune
== PROCESSOR_PENTIUM
)
11998 if (ix86_tune
== PROCESSOR_PENTIUMPRO
11999 || ix86_tune
== PROCESSOR_K6
)
12007 /* Compute the alignment given to a constant that is being placed in memory.
12008 EXP is the constant and ALIGN is the alignment that the object would
12010 The value of this function is used instead of that alignment to align
12014 ix86_constant_alignment (tree exp
, int align
)
12016 if (TREE_CODE (exp
) == REAL_CST
)
12018 if (TYPE_MODE (TREE_TYPE (exp
)) == DFmode
&& align
< 64)
12020 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp
))) && align
< 128)
12023 else if (!optimize_size
&& TREE_CODE (exp
) == STRING_CST
12024 && TREE_STRING_LENGTH (exp
) >= 31 && align
< BITS_PER_WORD
)
12025 return BITS_PER_WORD
;
12030 /* Compute the alignment for a static variable.
12031 TYPE is the data type, and ALIGN is the alignment that
12032 the object would ordinarily have. The value of this function is used
12033 instead of that alignment to align the object. */
12036 ix86_data_alignment (tree type
, int align
)
12038 if (AGGREGATE_TYPE_P (type
)
12039 && TYPE_SIZE (type
)
12040 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
12041 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 256
12042 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 256)
12045 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12046 to 16byte boundary. */
12049 if (AGGREGATE_TYPE_P (type
)
12050 && TYPE_SIZE (type
)
12051 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
12052 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 128
12053 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 128)
12057 if (TREE_CODE (type
) == ARRAY_TYPE
)
12059 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
12061 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
12064 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
12067 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
12069 if (TYPE_MODE (type
) == XCmode
&& align
< 128)
12072 else if ((TREE_CODE (type
) == RECORD_TYPE
12073 || TREE_CODE (type
) == UNION_TYPE
12074 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
12075 && TYPE_FIELDS (type
))
12077 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
12079 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
12082 else if (TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == VECTOR_TYPE
12083 || TREE_CODE (type
) == INTEGER_TYPE
)
12085 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
12087 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
12094 /* Compute the alignment for a local variable.
12095 TYPE is the data type, and ALIGN is the alignment that
12096 the object would ordinarily have. The value of this macro is used
12097 instead of that alignment to align the object. */
12100 ix86_local_alignment (tree type
, int align
)
12102 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12103 to 16byte boundary. */
12106 if (AGGREGATE_TYPE_P (type
)
12107 && TYPE_SIZE (type
)
12108 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
12109 && (TREE_INT_CST_LOW (TYPE_SIZE (type
)) >= 16
12110 || TREE_INT_CST_HIGH (TYPE_SIZE (type
))) && align
< 128)
12113 if (TREE_CODE (type
) == ARRAY_TYPE
)
12115 if (TYPE_MODE (TREE_TYPE (type
)) == DFmode
&& align
< 64)
12117 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type
))) && align
< 128)
12120 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
12122 if (TYPE_MODE (type
) == DCmode
&& align
< 64)
12124 if (TYPE_MODE (type
) == XCmode
&& align
< 128)
12127 else if ((TREE_CODE (type
) == RECORD_TYPE
12128 || TREE_CODE (type
) == UNION_TYPE
12129 || TREE_CODE (type
) == QUAL_UNION_TYPE
)
12130 && TYPE_FIELDS (type
))
12132 if (DECL_MODE (TYPE_FIELDS (type
)) == DFmode
&& align
< 64)
12134 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type
))) && align
< 128)
12137 else if (TREE_CODE (type
) == REAL_TYPE
|| TREE_CODE (type
) == VECTOR_TYPE
12138 || TREE_CODE (type
) == INTEGER_TYPE
)
12141 if (TYPE_MODE (type
) == DFmode
&& align
< 64)
12143 if (ALIGN_MODE_128 (TYPE_MODE (type
)) && align
< 128)
12149 /* Emit RTL insns to initialize the variable parts of a trampoline.
12150 FNADDR is an RTX for the address of the function's pure code.
12151 CXT is an RTX for the static chain value for the function. */
12153 x86_initialize_trampoline (rtx tramp
, rtx fnaddr
, rtx cxt
)
12157 /* Compute offset from the end of the jmp to the target function. */
12158 rtx disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
12159 plus_constant (tramp
, 10),
12160 NULL_RTX
, 1, OPTAB_DIRECT
);
12161 emit_move_insn (gen_rtx_MEM (QImode
, tramp
),
12162 gen_int_mode (0xb9, QImode
));
12163 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 1)), cxt
);
12164 emit_move_insn (gen_rtx_MEM (QImode
, plus_constant (tramp
, 5)),
12165 gen_int_mode (0xe9, QImode
));
12166 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 6)), disp
);
12171 /* Try to load address using shorter movl instead of movabs.
12172 We may want to support movq for kernel mode, but kernel does not use
12173 trampolines at the moment. */
12174 if (x86_64_zext_immediate_operand (fnaddr
, VOIDmode
))
12176 fnaddr
= copy_to_mode_reg (DImode
, fnaddr
);
12177 emit_move_insn (gen_rtx_MEM (HImode
, plus_constant (tramp
, offset
)),
12178 gen_int_mode (0xbb41, HImode
));
12179 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, offset
+ 2)),
12180 gen_lowpart (SImode
, fnaddr
));
12185 emit_move_insn (gen_rtx_MEM (HImode
, plus_constant (tramp
, offset
)),
12186 gen_int_mode (0xbb49, HImode
));
12187 emit_move_insn (gen_rtx_MEM (DImode
, plus_constant (tramp
, offset
+ 2)),
12191 /* Load static chain using movabs to r10. */
12192 emit_move_insn (gen_rtx_MEM (HImode
, plus_constant (tramp
, offset
)),
12193 gen_int_mode (0xba49, HImode
));
12194 emit_move_insn (gen_rtx_MEM (DImode
, plus_constant (tramp
, offset
+ 2)),
12197 /* Jump to the r11 */
12198 emit_move_insn (gen_rtx_MEM (HImode
, plus_constant (tramp
, offset
)),
12199 gen_int_mode (0xff49, HImode
));
12200 emit_move_insn (gen_rtx_MEM (QImode
, plus_constant (tramp
, offset
+2)),
12201 gen_int_mode (0xe3, QImode
));
12203 if (offset
> TRAMPOLINE_SIZE
)
12207 #ifdef ENABLE_EXECUTE_STACK
12208 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
12209 LCT_NORMAL
, VOIDmode
, 1, tramp
, Pmode
);
12213 /* Codes for all the SSE/MMX builtins. */
12216 IX86_BUILTIN_ADDPS
,
12217 IX86_BUILTIN_ADDSS
,
12218 IX86_BUILTIN_DIVPS
,
12219 IX86_BUILTIN_DIVSS
,
12220 IX86_BUILTIN_MULPS
,
12221 IX86_BUILTIN_MULSS
,
12222 IX86_BUILTIN_SUBPS
,
12223 IX86_BUILTIN_SUBSS
,
12225 IX86_BUILTIN_CMPEQPS
,
12226 IX86_BUILTIN_CMPLTPS
,
12227 IX86_BUILTIN_CMPLEPS
,
12228 IX86_BUILTIN_CMPGTPS
,
12229 IX86_BUILTIN_CMPGEPS
,
12230 IX86_BUILTIN_CMPNEQPS
,
12231 IX86_BUILTIN_CMPNLTPS
,
12232 IX86_BUILTIN_CMPNLEPS
,
12233 IX86_BUILTIN_CMPNGTPS
,
12234 IX86_BUILTIN_CMPNGEPS
,
12235 IX86_BUILTIN_CMPORDPS
,
12236 IX86_BUILTIN_CMPUNORDPS
,
12237 IX86_BUILTIN_CMPNEPS
,
12238 IX86_BUILTIN_CMPEQSS
,
12239 IX86_BUILTIN_CMPLTSS
,
12240 IX86_BUILTIN_CMPLESS
,
12241 IX86_BUILTIN_CMPNEQSS
,
12242 IX86_BUILTIN_CMPNLTSS
,
12243 IX86_BUILTIN_CMPNLESS
,
12244 IX86_BUILTIN_CMPNGTSS
,
12245 IX86_BUILTIN_CMPNGESS
,
12246 IX86_BUILTIN_CMPORDSS
,
12247 IX86_BUILTIN_CMPUNORDSS
,
12248 IX86_BUILTIN_CMPNESS
,
12250 IX86_BUILTIN_COMIEQSS
,
12251 IX86_BUILTIN_COMILTSS
,
12252 IX86_BUILTIN_COMILESS
,
12253 IX86_BUILTIN_COMIGTSS
,
12254 IX86_BUILTIN_COMIGESS
,
12255 IX86_BUILTIN_COMINEQSS
,
12256 IX86_BUILTIN_UCOMIEQSS
,
12257 IX86_BUILTIN_UCOMILTSS
,
12258 IX86_BUILTIN_UCOMILESS
,
12259 IX86_BUILTIN_UCOMIGTSS
,
12260 IX86_BUILTIN_UCOMIGESS
,
12261 IX86_BUILTIN_UCOMINEQSS
,
12263 IX86_BUILTIN_CVTPI2PS
,
12264 IX86_BUILTIN_CVTPS2PI
,
12265 IX86_BUILTIN_CVTSI2SS
,
12266 IX86_BUILTIN_CVTSI642SS
,
12267 IX86_BUILTIN_CVTSS2SI
,
12268 IX86_BUILTIN_CVTSS2SI64
,
12269 IX86_BUILTIN_CVTTPS2PI
,
12270 IX86_BUILTIN_CVTTSS2SI
,
12271 IX86_BUILTIN_CVTTSS2SI64
,
12273 IX86_BUILTIN_MAXPS
,
12274 IX86_BUILTIN_MAXSS
,
12275 IX86_BUILTIN_MINPS
,
12276 IX86_BUILTIN_MINSS
,
12278 IX86_BUILTIN_LOADUPS
,
12279 IX86_BUILTIN_STOREUPS
,
12280 IX86_BUILTIN_MOVSS
,
12282 IX86_BUILTIN_MOVHLPS
,
12283 IX86_BUILTIN_MOVLHPS
,
12284 IX86_BUILTIN_LOADHPS
,
12285 IX86_BUILTIN_LOADLPS
,
12286 IX86_BUILTIN_STOREHPS
,
12287 IX86_BUILTIN_STORELPS
,
12289 IX86_BUILTIN_MASKMOVQ
,
12290 IX86_BUILTIN_MOVMSKPS
,
12291 IX86_BUILTIN_PMOVMSKB
,
12293 IX86_BUILTIN_MOVNTPS
,
12294 IX86_BUILTIN_MOVNTQ
,
12296 IX86_BUILTIN_LOADDQU
,
12297 IX86_BUILTIN_STOREDQU
,
12299 IX86_BUILTIN_PACKSSWB
,
12300 IX86_BUILTIN_PACKSSDW
,
12301 IX86_BUILTIN_PACKUSWB
,
12303 IX86_BUILTIN_PADDB
,
12304 IX86_BUILTIN_PADDW
,
12305 IX86_BUILTIN_PADDD
,
12306 IX86_BUILTIN_PADDQ
,
12307 IX86_BUILTIN_PADDSB
,
12308 IX86_BUILTIN_PADDSW
,
12309 IX86_BUILTIN_PADDUSB
,
12310 IX86_BUILTIN_PADDUSW
,
12311 IX86_BUILTIN_PSUBB
,
12312 IX86_BUILTIN_PSUBW
,
12313 IX86_BUILTIN_PSUBD
,
12314 IX86_BUILTIN_PSUBQ
,
12315 IX86_BUILTIN_PSUBSB
,
12316 IX86_BUILTIN_PSUBSW
,
12317 IX86_BUILTIN_PSUBUSB
,
12318 IX86_BUILTIN_PSUBUSW
,
12321 IX86_BUILTIN_PANDN
,
12325 IX86_BUILTIN_PAVGB
,
12326 IX86_BUILTIN_PAVGW
,
12328 IX86_BUILTIN_PCMPEQB
,
12329 IX86_BUILTIN_PCMPEQW
,
12330 IX86_BUILTIN_PCMPEQD
,
12331 IX86_BUILTIN_PCMPGTB
,
12332 IX86_BUILTIN_PCMPGTW
,
12333 IX86_BUILTIN_PCMPGTD
,
12335 IX86_BUILTIN_PMADDWD
,
12337 IX86_BUILTIN_PMAXSW
,
12338 IX86_BUILTIN_PMAXUB
,
12339 IX86_BUILTIN_PMINSW
,
12340 IX86_BUILTIN_PMINUB
,
12342 IX86_BUILTIN_PMULHUW
,
12343 IX86_BUILTIN_PMULHW
,
12344 IX86_BUILTIN_PMULLW
,
12346 IX86_BUILTIN_PSADBW
,
12347 IX86_BUILTIN_PSHUFW
,
12349 IX86_BUILTIN_PSLLW
,
12350 IX86_BUILTIN_PSLLD
,
12351 IX86_BUILTIN_PSLLQ
,
12352 IX86_BUILTIN_PSRAW
,
12353 IX86_BUILTIN_PSRAD
,
12354 IX86_BUILTIN_PSRLW
,
12355 IX86_BUILTIN_PSRLD
,
12356 IX86_BUILTIN_PSRLQ
,
12357 IX86_BUILTIN_PSLLWI
,
12358 IX86_BUILTIN_PSLLDI
,
12359 IX86_BUILTIN_PSLLQI
,
12360 IX86_BUILTIN_PSRAWI
,
12361 IX86_BUILTIN_PSRADI
,
12362 IX86_BUILTIN_PSRLWI
,
12363 IX86_BUILTIN_PSRLDI
,
12364 IX86_BUILTIN_PSRLQI
,
12366 IX86_BUILTIN_PUNPCKHBW
,
12367 IX86_BUILTIN_PUNPCKHWD
,
12368 IX86_BUILTIN_PUNPCKHDQ
,
12369 IX86_BUILTIN_PUNPCKLBW
,
12370 IX86_BUILTIN_PUNPCKLWD
,
12371 IX86_BUILTIN_PUNPCKLDQ
,
12373 IX86_BUILTIN_SHUFPS
,
12375 IX86_BUILTIN_RCPPS
,
12376 IX86_BUILTIN_RCPSS
,
12377 IX86_BUILTIN_RSQRTPS
,
12378 IX86_BUILTIN_RSQRTSS
,
12379 IX86_BUILTIN_SQRTPS
,
12380 IX86_BUILTIN_SQRTSS
,
12382 IX86_BUILTIN_UNPCKHPS
,
12383 IX86_BUILTIN_UNPCKLPS
,
12385 IX86_BUILTIN_ANDPS
,
12386 IX86_BUILTIN_ANDNPS
,
12388 IX86_BUILTIN_XORPS
,
12391 IX86_BUILTIN_LDMXCSR
,
12392 IX86_BUILTIN_STMXCSR
,
12393 IX86_BUILTIN_SFENCE
,
12395 /* 3DNow! Original */
12396 IX86_BUILTIN_FEMMS
,
12397 IX86_BUILTIN_PAVGUSB
,
12398 IX86_BUILTIN_PF2ID
,
12399 IX86_BUILTIN_PFACC
,
12400 IX86_BUILTIN_PFADD
,
12401 IX86_BUILTIN_PFCMPEQ
,
12402 IX86_BUILTIN_PFCMPGE
,
12403 IX86_BUILTIN_PFCMPGT
,
12404 IX86_BUILTIN_PFMAX
,
12405 IX86_BUILTIN_PFMIN
,
12406 IX86_BUILTIN_PFMUL
,
12407 IX86_BUILTIN_PFRCP
,
12408 IX86_BUILTIN_PFRCPIT1
,
12409 IX86_BUILTIN_PFRCPIT2
,
12410 IX86_BUILTIN_PFRSQIT1
,
12411 IX86_BUILTIN_PFRSQRT
,
12412 IX86_BUILTIN_PFSUB
,
12413 IX86_BUILTIN_PFSUBR
,
12414 IX86_BUILTIN_PI2FD
,
12415 IX86_BUILTIN_PMULHRW
,
12417 /* 3DNow! Athlon Extensions */
12418 IX86_BUILTIN_PF2IW
,
12419 IX86_BUILTIN_PFNACC
,
12420 IX86_BUILTIN_PFPNACC
,
12421 IX86_BUILTIN_PI2FW
,
12422 IX86_BUILTIN_PSWAPDSI
,
12423 IX86_BUILTIN_PSWAPDSF
,
12426 IX86_BUILTIN_ADDPD
,
12427 IX86_BUILTIN_ADDSD
,
12428 IX86_BUILTIN_DIVPD
,
12429 IX86_BUILTIN_DIVSD
,
12430 IX86_BUILTIN_MULPD
,
12431 IX86_BUILTIN_MULSD
,
12432 IX86_BUILTIN_SUBPD
,
12433 IX86_BUILTIN_SUBSD
,
12435 IX86_BUILTIN_CMPEQPD
,
12436 IX86_BUILTIN_CMPLTPD
,
12437 IX86_BUILTIN_CMPLEPD
,
12438 IX86_BUILTIN_CMPGTPD
,
12439 IX86_BUILTIN_CMPGEPD
,
12440 IX86_BUILTIN_CMPNEQPD
,
12441 IX86_BUILTIN_CMPNLTPD
,
12442 IX86_BUILTIN_CMPNLEPD
,
12443 IX86_BUILTIN_CMPNGTPD
,
12444 IX86_BUILTIN_CMPNGEPD
,
12445 IX86_BUILTIN_CMPORDPD
,
12446 IX86_BUILTIN_CMPUNORDPD
,
12447 IX86_BUILTIN_CMPNEPD
,
12448 IX86_BUILTIN_CMPEQSD
,
12449 IX86_BUILTIN_CMPLTSD
,
12450 IX86_BUILTIN_CMPLESD
,
12451 IX86_BUILTIN_CMPNEQSD
,
12452 IX86_BUILTIN_CMPNLTSD
,
12453 IX86_BUILTIN_CMPNLESD
,
12454 IX86_BUILTIN_CMPORDSD
,
12455 IX86_BUILTIN_CMPUNORDSD
,
12456 IX86_BUILTIN_CMPNESD
,
12458 IX86_BUILTIN_COMIEQSD
,
12459 IX86_BUILTIN_COMILTSD
,
12460 IX86_BUILTIN_COMILESD
,
12461 IX86_BUILTIN_COMIGTSD
,
12462 IX86_BUILTIN_COMIGESD
,
12463 IX86_BUILTIN_COMINEQSD
,
12464 IX86_BUILTIN_UCOMIEQSD
,
12465 IX86_BUILTIN_UCOMILTSD
,
12466 IX86_BUILTIN_UCOMILESD
,
12467 IX86_BUILTIN_UCOMIGTSD
,
12468 IX86_BUILTIN_UCOMIGESD
,
12469 IX86_BUILTIN_UCOMINEQSD
,
12471 IX86_BUILTIN_MAXPD
,
12472 IX86_BUILTIN_MAXSD
,
12473 IX86_BUILTIN_MINPD
,
12474 IX86_BUILTIN_MINSD
,
12476 IX86_BUILTIN_ANDPD
,
12477 IX86_BUILTIN_ANDNPD
,
12479 IX86_BUILTIN_XORPD
,
12481 IX86_BUILTIN_SQRTPD
,
12482 IX86_BUILTIN_SQRTSD
,
12484 IX86_BUILTIN_UNPCKHPD
,
12485 IX86_BUILTIN_UNPCKLPD
,
12487 IX86_BUILTIN_SHUFPD
,
12489 IX86_BUILTIN_LOADUPD
,
12490 IX86_BUILTIN_STOREUPD
,
12491 IX86_BUILTIN_MOVSD
,
12493 IX86_BUILTIN_LOADHPD
,
12494 IX86_BUILTIN_LOADLPD
,
12496 IX86_BUILTIN_CVTDQ2PD
,
12497 IX86_BUILTIN_CVTDQ2PS
,
12499 IX86_BUILTIN_CVTPD2DQ
,
12500 IX86_BUILTIN_CVTPD2PI
,
12501 IX86_BUILTIN_CVTPD2PS
,
12502 IX86_BUILTIN_CVTTPD2DQ
,
12503 IX86_BUILTIN_CVTTPD2PI
,
12505 IX86_BUILTIN_CVTPI2PD
,
12506 IX86_BUILTIN_CVTSI2SD
,
12507 IX86_BUILTIN_CVTSI642SD
,
12509 IX86_BUILTIN_CVTSD2SI
,
12510 IX86_BUILTIN_CVTSD2SI64
,
12511 IX86_BUILTIN_CVTSD2SS
,
12512 IX86_BUILTIN_CVTSS2SD
,
12513 IX86_BUILTIN_CVTTSD2SI
,
12514 IX86_BUILTIN_CVTTSD2SI64
,
12516 IX86_BUILTIN_CVTPS2DQ
,
12517 IX86_BUILTIN_CVTPS2PD
,
12518 IX86_BUILTIN_CVTTPS2DQ
,
12520 IX86_BUILTIN_MOVNTI
,
12521 IX86_BUILTIN_MOVNTPD
,
12522 IX86_BUILTIN_MOVNTDQ
,
12525 IX86_BUILTIN_MASKMOVDQU
,
12526 IX86_BUILTIN_MOVMSKPD
,
12527 IX86_BUILTIN_PMOVMSKB128
,
12529 IX86_BUILTIN_PACKSSWB128
,
12530 IX86_BUILTIN_PACKSSDW128
,
12531 IX86_BUILTIN_PACKUSWB128
,
12533 IX86_BUILTIN_PADDB128
,
12534 IX86_BUILTIN_PADDW128
,
12535 IX86_BUILTIN_PADDD128
,
12536 IX86_BUILTIN_PADDQ128
,
12537 IX86_BUILTIN_PADDSB128
,
12538 IX86_BUILTIN_PADDSW128
,
12539 IX86_BUILTIN_PADDUSB128
,
12540 IX86_BUILTIN_PADDUSW128
,
12541 IX86_BUILTIN_PSUBB128
,
12542 IX86_BUILTIN_PSUBW128
,
12543 IX86_BUILTIN_PSUBD128
,
12544 IX86_BUILTIN_PSUBQ128
,
12545 IX86_BUILTIN_PSUBSB128
,
12546 IX86_BUILTIN_PSUBSW128
,
12547 IX86_BUILTIN_PSUBUSB128
,
12548 IX86_BUILTIN_PSUBUSW128
,
12550 IX86_BUILTIN_PAND128
,
12551 IX86_BUILTIN_PANDN128
,
12552 IX86_BUILTIN_POR128
,
12553 IX86_BUILTIN_PXOR128
,
12555 IX86_BUILTIN_PAVGB128
,
12556 IX86_BUILTIN_PAVGW128
,
12558 IX86_BUILTIN_PCMPEQB128
,
12559 IX86_BUILTIN_PCMPEQW128
,
12560 IX86_BUILTIN_PCMPEQD128
,
12561 IX86_BUILTIN_PCMPGTB128
,
12562 IX86_BUILTIN_PCMPGTW128
,
12563 IX86_BUILTIN_PCMPGTD128
,
12565 IX86_BUILTIN_PMADDWD128
,
12567 IX86_BUILTIN_PMAXSW128
,
12568 IX86_BUILTIN_PMAXUB128
,
12569 IX86_BUILTIN_PMINSW128
,
12570 IX86_BUILTIN_PMINUB128
,
12572 IX86_BUILTIN_PMULUDQ
,
12573 IX86_BUILTIN_PMULUDQ128
,
12574 IX86_BUILTIN_PMULHUW128
,
12575 IX86_BUILTIN_PMULHW128
,
12576 IX86_BUILTIN_PMULLW128
,
12578 IX86_BUILTIN_PSADBW128
,
12579 IX86_BUILTIN_PSHUFHW
,
12580 IX86_BUILTIN_PSHUFLW
,
12581 IX86_BUILTIN_PSHUFD
,
12583 IX86_BUILTIN_PSLLW128
,
12584 IX86_BUILTIN_PSLLD128
,
12585 IX86_BUILTIN_PSLLQ128
,
12586 IX86_BUILTIN_PSRAW128
,
12587 IX86_BUILTIN_PSRAD128
,
12588 IX86_BUILTIN_PSRLW128
,
12589 IX86_BUILTIN_PSRLD128
,
12590 IX86_BUILTIN_PSRLQ128
,
12591 IX86_BUILTIN_PSLLDQI128
,
12592 IX86_BUILTIN_PSLLWI128
,
12593 IX86_BUILTIN_PSLLDI128
,
12594 IX86_BUILTIN_PSLLQI128
,
12595 IX86_BUILTIN_PSRAWI128
,
12596 IX86_BUILTIN_PSRADI128
,
12597 IX86_BUILTIN_PSRLDQI128
,
12598 IX86_BUILTIN_PSRLWI128
,
12599 IX86_BUILTIN_PSRLDI128
,
12600 IX86_BUILTIN_PSRLQI128
,
12602 IX86_BUILTIN_PUNPCKHBW128
,
12603 IX86_BUILTIN_PUNPCKHWD128
,
12604 IX86_BUILTIN_PUNPCKHDQ128
,
12605 IX86_BUILTIN_PUNPCKHQDQ128
,
12606 IX86_BUILTIN_PUNPCKLBW128
,
12607 IX86_BUILTIN_PUNPCKLWD128
,
12608 IX86_BUILTIN_PUNPCKLDQ128
,
12609 IX86_BUILTIN_PUNPCKLQDQ128
,
12611 IX86_BUILTIN_CLFLUSH
,
12612 IX86_BUILTIN_MFENCE
,
12613 IX86_BUILTIN_LFENCE
,
12615 /* Prescott New Instructions. */
12616 IX86_BUILTIN_ADDSUBPS
,
12617 IX86_BUILTIN_HADDPS
,
12618 IX86_BUILTIN_HSUBPS
,
12619 IX86_BUILTIN_MOVSHDUP
,
12620 IX86_BUILTIN_MOVSLDUP
,
12621 IX86_BUILTIN_ADDSUBPD
,
12622 IX86_BUILTIN_HADDPD
,
12623 IX86_BUILTIN_HSUBPD
,
12624 IX86_BUILTIN_LDDQU
,
12626 IX86_BUILTIN_MONITOR
,
12627 IX86_BUILTIN_MWAIT
,
12629 IX86_BUILTIN_VEC_INIT_V2SI
,
12630 IX86_BUILTIN_VEC_INIT_V4HI
,
12631 IX86_BUILTIN_VEC_INIT_V8QI
,
12632 IX86_BUILTIN_VEC_EXT_V2DF
,
12633 IX86_BUILTIN_VEC_EXT_V2DI
,
12634 IX86_BUILTIN_VEC_EXT_V4SF
,
12635 IX86_BUILTIN_VEC_EXT_V4SI
,
12636 IX86_BUILTIN_VEC_EXT_V8HI
,
12637 IX86_BUILTIN_VEC_EXT_V4HI
,
12638 IX86_BUILTIN_VEC_SET_V8HI
,
12639 IX86_BUILTIN_VEC_SET_V4HI
,
12644 #define def_builtin(MASK, NAME, TYPE, CODE) \
12646 if ((MASK) & target_flags \
12647 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
12648 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
12649 NULL, NULL_TREE); \
12652 /* Bits for builtin_description.flag. */
12654 /* Set when we don't support the comparison natively, and should
12655 swap_comparison in order to support it. */
12656 #define BUILTIN_DESC_SWAP_OPERANDS 1
12658 struct builtin_description
12660 const unsigned int mask
;
12661 const enum insn_code icode
;
12662 const char *const name
;
12663 const enum ix86_builtins code
;
12664 const enum rtx_code comparison
;
12665 const unsigned int flag
;
12668 static const struct builtin_description bdesc_comi
[] =
12670 { MASK_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS
, UNEQ
, 0 },
12671 { MASK_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS
, UNLT
, 0 },
12672 { MASK_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS
, UNLE
, 0 },
12673 { MASK_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS
, GT
, 0 },
12674 { MASK_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS
, GE
, 0 },
12675 { MASK_SSE
, CODE_FOR_sse_comi
, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS
, LTGT
, 0 },
12676 { MASK_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS
, UNEQ
, 0 },
12677 { MASK_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS
, UNLT
, 0 },
12678 { MASK_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS
, UNLE
, 0 },
12679 { MASK_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS
, GT
, 0 },
12680 { MASK_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS
, GE
, 0 },
12681 { MASK_SSE
, CODE_FOR_sse_ucomi
, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS
, LTGT
, 0 },
12682 { MASK_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD
, UNEQ
, 0 },
12683 { MASK_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD
, UNLT
, 0 },
12684 { MASK_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD
, UNLE
, 0 },
12685 { MASK_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD
, GT
, 0 },
12686 { MASK_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD
, GE
, 0 },
12687 { MASK_SSE2
, CODE_FOR_sse2_comi
, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD
, LTGT
, 0 },
12688 { MASK_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD
, UNEQ
, 0 },
12689 { MASK_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD
, UNLT
, 0 },
12690 { MASK_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD
, UNLE
, 0 },
12691 { MASK_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD
, GT
, 0 },
12692 { MASK_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD
, GE
, 0 },
12693 { MASK_SSE2
, CODE_FOR_sse2_ucomi
, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD
, LTGT
, 0 },
12696 static const struct builtin_description bdesc_2arg
[] =
12699 { MASK_SSE
, CODE_FOR_addv4sf3
, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS
, 0, 0 },
12700 { MASK_SSE
, CODE_FOR_subv4sf3
, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS
, 0, 0 },
12701 { MASK_SSE
, CODE_FOR_mulv4sf3
, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS
, 0, 0 },
12702 { MASK_SSE
, CODE_FOR_divv4sf3
, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS
, 0, 0 },
12703 { MASK_SSE
, CODE_FOR_sse_vmaddv4sf3
, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS
, 0, 0 },
12704 { MASK_SSE
, CODE_FOR_sse_vmsubv4sf3
, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS
, 0, 0 },
12705 { MASK_SSE
, CODE_FOR_sse_vmmulv4sf3
, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS
, 0, 0 },
12706 { MASK_SSE
, CODE_FOR_sse_vmdivv4sf3
, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS
, 0, 0 },
12708 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS
, EQ
, 0 },
12709 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS
, LT
, 0 },
12710 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS
, LE
, 0 },
12711 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS
, LT
,
12712 BUILTIN_DESC_SWAP_OPERANDS
},
12713 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS
, LE
,
12714 BUILTIN_DESC_SWAP_OPERANDS
},
12715 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS
, UNORDERED
, 0 },
12716 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS
, NE
, 0 },
12717 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS
, UNGE
, 0 },
12718 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS
, UNGT
, 0 },
12719 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS
, UNGE
,
12720 BUILTIN_DESC_SWAP_OPERANDS
},
12721 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS
, UNGT
,
12722 BUILTIN_DESC_SWAP_OPERANDS
},
12723 { MASK_SSE
, CODE_FOR_sse_maskcmpv4sf3
, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS
, ORDERED
, 0 },
12724 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS
, EQ
, 0 },
12725 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS
, LT
, 0 },
12726 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS
, LE
, 0 },
12727 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS
, UNORDERED
, 0 },
12728 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS
, NE
, 0 },
12729 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS
, UNGE
, 0 },
12730 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS
, UNGT
, 0 },
12731 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS
, UNGE
,
12732 BUILTIN_DESC_SWAP_OPERANDS
},
12733 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS
, UNGT
,
12734 BUILTIN_DESC_SWAP_OPERANDS
},
12735 { MASK_SSE
, CODE_FOR_sse_vmmaskcmpv4sf3
, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS
, UNORDERED
, 0 },
12737 { MASK_SSE
, CODE_FOR_sminv4sf3
, "__builtin_ia32_minps", IX86_BUILTIN_MINPS
, 0, 0 },
12738 { MASK_SSE
, CODE_FOR_smaxv4sf3
, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS
, 0, 0 },
12739 { MASK_SSE
, CODE_FOR_sse_vmsminv4sf3
, "__builtin_ia32_minss", IX86_BUILTIN_MINSS
, 0, 0 },
12740 { MASK_SSE
, CODE_FOR_sse_vmsmaxv4sf3
, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS
, 0, 0 },
12742 { MASK_SSE
, CODE_FOR_andv4sf3
, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS
, 0, 0 },
12743 { MASK_SSE
, CODE_FOR_sse_nandv4sf3
, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS
, 0, 0 },
12744 { MASK_SSE
, CODE_FOR_iorv4sf3
, "__builtin_ia32_orps", IX86_BUILTIN_ORPS
, 0, 0 },
12745 { MASK_SSE
, CODE_FOR_xorv4sf3
, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS
, 0, 0 },
12747 { MASK_SSE
, CODE_FOR_sse_movss
, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS
, 0, 0 },
12748 { MASK_SSE
, CODE_FOR_sse_movhlps
, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS
, 0, 0 },
12749 { MASK_SSE
, CODE_FOR_sse_movlhps
, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS
, 0, 0 },
12750 { MASK_SSE
, CODE_FOR_sse_unpckhps
, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS
, 0, 0 },
12751 { MASK_SSE
, CODE_FOR_sse_unpcklps
, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS
, 0, 0 },
12754 { MASK_MMX
, CODE_FOR_mmx_addv8qi3
, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB
, 0, 0 },
12755 { MASK_MMX
, CODE_FOR_mmx_addv4hi3
, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW
, 0, 0 },
12756 { MASK_MMX
, CODE_FOR_mmx_addv2si3
, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD
, 0, 0 },
12757 { MASK_MMX
, CODE_FOR_mmx_adddi3
, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ
, 0, 0 },
12758 { MASK_MMX
, CODE_FOR_mmx_subv8qi3
, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB
, 0, 0 },
12759 { MASK_MMX
, CODE_FOR_mmx_subv4hi3
, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW
, 0, 0 },
12760 { MASK_MMX
, CODE_FOR_mmx_subv2si3
, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD
, 0, 0 },
12761 { MASK_MMX
, CODE_FOR_mmx_subdi3
, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ
, 0, 0 },
12763 { MASK_MMX
, CODE_FOR_mmx_ssaddv8qi3
, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB
, 0, 0 },
12764 { MASK_MMX
, CODE_FOR_mmx_ssaddv4hi3
, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW
, 0, 0 },
12765 { MASK_MMX
, CODE_FOR_mmx_sssubv8qi3
, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB
, 0, 0 },
12766 { MASK_MMX
, CODE_FOR_mmx_sssubv4hi3
, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW
, 0, 0 },
12767 { MASK_MMX
, CODE_FOR_mmx_usaddv8qi3
, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB
, 0, 0 },
12768 { MASK_MMX
, CODE_FOR_mmx_usaddv4hi3
, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW
, 0, 0 },
12769 { MASK_MMX
, CODE_FOR_mmx_ussubv8qi3
, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB
, 0, 0 },
12770 { MASK_MMX
, CODE_FOR_mmx_ussubv4hi3
, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW
, 0, 0 },
12772 { MASK_MMX
, CODE_FOR_mmx_mulv4hi3
, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW
, 0, 0 },
12773 { MASK_MMX
, CODE_FOR_mmx_smulv4hi3_highpart
, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW
, 0, 0 },
12774 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_umulv4hi3_highpart
, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW
, 0, 0 },
12776 { MASK_MMX
, CODE_FOR_mmx_andv2si3
, "__builtin_ia32_pand", IX86_BUILTIN_PAND
, 0, 0 },
12777 { MASK_MMX
, CODE_FOR_mmx_nandv2si3
, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN
, 0, 0 },
12778 { MASK_MMX
, CODE_FOR_mmx_iorv2si3
, "__builtin_ia32_por", IX86_BUILTIN_POR
, 0, 0 },
12779 { MASK_MMX
, CODE_FOR_mmx_xorv2si3
, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR
, 0, 0 },
12781 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_uavgv8qi3
, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB
, 0, 0 },
12782 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_uavgv4hi3
, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW
, 0, 0 },
12784 { MASK_MMX
, CODE_FOR_mmx_eqv8qi3
, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB
, 0, 0 },
12785 { MASK_MMX
, CODE_FOR_mmx_eqv4hi3
, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW
, 0, 0 },
12786 { MASK_MMX
, CODE_FOR_mmx_eqv2si3
, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD
, 0, 0 },
12787 { MASK_MMX
, CODE_FOR_mmx_gtv8qi3
, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB
, 0, 0 },
12788 { MASK_MMX
, CODE_FOR_mmx_gtv4hi3
, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW
, 0, 0 },
12789 { MASK_MMX
, CODE_FOR_mmx_gtv2si3
, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD
, 0, 0 },
12791 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_umaxv8qi3
, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB
, 0, 0 },
12792 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_smaxv4hi3
, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW
, 0, 0 },
12793 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_uminv8qi3
, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB
, 0, 0 },
12794 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_sminv4hi3
, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW
, 0, 0 },
12796 { MASK_MMX
, CODE_FOR_mmx_punpckhbw
, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW
, 0, 0 },
12797 { MASK_MMX
, CODE_FOR_mmx_punpckhwd
, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD
, 0, 0 },
12798 { MASK_MMX
, CODE_FOR_mmx_punpckhdq
, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ
, 0, 0 },
12799 { MASK_MMX
, CODE_FOR_mmx_punpcklbw
, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW
, 0, 0 },
12800 { MASK_MMX
, CODE_FOR_mmx_punpcklwd
, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD
, 0, 0 },
12801 { MASK_MMX
, CODE_FOR_mmx_punpckldq
, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ
, 0, 0 },
12804 { MASK_MMX
, CODE_FOR_mmx_packsswb
, 0, IX86_BUILTIN_PACKSSWB
, 0, 0 },
12805 { MASK_MMX
, CODE_FOR_mmx_packssdw
, 0, IX86_BUILTIN_PACKSSDW
, 0, 0 },
12806 { MASK_MMX
, CODE_FOR_mmx_packuswb
, 0, IX86_BUILTIN_PACKUSWB
, 0, 0 },
12808 { MASK_SSE
, CODE_FOR_sse_cvtpi2ps
, 0, IX86_BUILTIN_CVTPI2PS
, 0, 0 },
12809 { MASK_SSE
, CODE_FOR_sse_cvtsi2ss
, 0, IX86_BUILTIN_CVTSI2SS
, 0, 0 },
12810 { MASK_SSE
| MASK_64BIT
, CODE_FOR_sse_cvtsi2ssq
, 0, IX86_BUILTIN_CVTSI642SS
, 0, 0 },
12812 { MASK_MMX
, CODE_FOR_mmx_ashlv4hi3
, 0, IX86_BUILTIN_PSLLW
, 0, 0 },
12813 { MASK_MMX
, CODE_FOR_mmx_ashlv4hi3
, 0, IX86_BUILTIN_PSLLWI
, 0, 0 },
12814 { MASK_MMX
, CODE_FOR_mmx_ashlv2si3
, 0, IX86_BUILTIN_PSLLD
, 0, 0 },
12815 { MASK_MMX
, CODE_FOR_mmx_ashlv2si3
, 0, IX86_BUILTIN_PSLLDI
, 0, 0 },
12816 { MASK_MMX
, CODE_FOR_mmx_ashldi3
, 0, IX86_BUILTIN_PSLLQ
, 0, 0 },
12817 { MASK_MMX
, CODE_FOR_mmx_ashldi3
, 0, IX86_BUILTIN_PSLLQI
, 0, 0 },
12819 { MASK_MMX
, CODE_FOR_mmx_lshrv4hi3
, 0, IX86_BUILTIN_PSRLW
, 0, 0 },
12820 { MASK_MMX
, CODE_FOR_mmx_lshrv4hi3
, 0, IX86_BUILTIN_PSRLWI
, 0, 0 },
12821 { MASK_MMX
, CODE_FOR_mmx_lshrv2si3
, 0, IX86_BUILTIN_PSRLD
, 0, 0 },
12822 { MASK_MMX
, CODE_FOR_mmx_lshrv2si3
, 0, IX86_BUILTIN_PSRLDI
, 0, 0 },
12823 { MASK_MMX
, CODE_FOR_mmx_lshrdi3
, 0, IX86_BUILTIN_PSRLQ
, 0, 0 },
12824 { MASK_MMX
, CODE_FOR_mmx_lshrdi3
, 0, IX86_BUILTIN_PSRLQI
, 0, 0 },
12826 { MASK_MMX
, CODE_FOR_mmx_ashrv4hi3
, 0, IX86_BUILTIN_PSRAW
, 0, 0 },
12827 { MASK_MMX
, CODE_FOR_mmx_ashrv4hi3
, 0, IX86_BUILTIN_PSRAWI
, 0, 0 },
12828 { MASK_MMX
, CODE_FOR_mmx_ashrv2si3
, 0, IX86_BUILTIN_PSRAD
, 0, 0 },
12829 { MASK_MMX
, CODE_FOR_mmx_ashrv2si3
, 0, IX86_BUILTIN_PSRADI
, 0, 0 },
12831 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_psadbw
, 0, IX86_BUILTIN_PSADBW
, 0, 0 },
12832 { MASK_MMX
, CODE_FOR_mmx_pmaddwd
, 0, IX86_BUILTIN_PMADDWD
, 0, 0 },
12835 { MASK_SSE2
, CODE_FOR_addv2df3
, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD
, 0, 0 },
12836 { MASK_SSE2
, CODE_FOR_subv2df3
, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD
, 0, 0 },
12837 { MASK_SSE2
, CODE_FOR_mulv2df3
, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD
, 0, 0 },
12838 { MASK_SSE2
, CODE_FOR_divv2df3
, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD
, 0, 0 },
12839 { MASK_SSE2
, CODE_FOR_sse2_vmaddv2df3
, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD
, 0, 0 },
12840 { MASK_SSE2
, CODE_FOR_sse2_vmsubv2df3
, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD
, 0, 0 },
12841 { MASK_SSE2
, CODE_FOR_sse2_vmmulv2df3
, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD
, 0, 0 },
12842 { MASK_SSE2
, CODE_FOR_sse2_vmdivv2df3
, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD
, 0, 0 },
12844 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD
, EQ
, 0 },
12845 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD
, LT
, 0 },
12846 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD
, LE
, 0 },
12847 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD
, LT
,
12848 BUILTIN_DESC_SWAP_OPERANDS
},
12849 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD
, LE
,
12850 BUILTIN_DESC_SWAP_OPERANDS
},
12851 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD
, UNORDERED
, 0 },
12852 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD
, NE
, 0 },
12853 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD
, UNGE
, 0 },
12854 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD
, UNGT
, 0 },
12855 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD
, UNGE
,
12856 BUILTIN_DESC_SWAP_OPERANDS
},
12857 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD
, UNGT
,
12858 BUILTIN_DESC_SWAP_OPERANDS
},
12859 { MASK_SSE2
, CODE_FOR_sse2_maskcmpv2df3
, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD
, ORDERED
, 0 },
12860 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD
, EQ
, 0 },
12861 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD
, LT
, 0 },
12862 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD
, LE
, 0 },
12863 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD
, UNORDERED
, 0 },
12864 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD
, NE
, 0 },
12865 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD
, UNGE
, 0 },
12866 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD
, UNGT
, 0 },
12867 { MASK_SSE2
, CODE_FOR_sse2_vmmaskcmpv2df3
, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD
, ORDERED
, 0 },
12869 { MASK_SSE2
, CODE_FOR_sminv2df3
, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD
, 0, 0 },
12870 { MASK_SSE2
, CODE_FOR_smaxv2df3
, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD
, 0, 0 },
12871 { MASK_SSE2
, CODE_FOR_sse2_vmsminv2df3
, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD
, 0, 0 },
12872 { MASK_SSE2
, CODE_FOR_sse2_vmsmaxv2df3
, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD
, 0, 0 },
12874 { MASK_SSE2
, CODE_FOR_andv2df3
, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD
, 0, 0 },
12875 { MASK_SSE2
, CODE_FOR_sse2_nandv2df3
, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD
, 0, 0 },
12876 { MASK_SSE2
, CODE_FOR_iorv2df3
, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD
, 0, 0 },
12877 { MASK_SSE2
, CODE_FOR_xorv2df3
, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD
, 0, 0 },
12879 { MASK_SSE2
, CODE_FOR_sse2_movsd
, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD
, 0, 0 },
12880 { MASK_SSE2
, CODE_FOR_sse2_unpckhpd
, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD
, 0, 0 },
12881 { MASK_SSE2
, CODE_FOR_sse2_unpcklpd
, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD
, 0, 0 },
12884 { MASK_SSE2
, CODE_FOR_addv16qi3
, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128
, 0, 0 },
12885 { MASK_SSE2
, CODE_FOR_addv8hi3
, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128
, 0, 0 },
12886 { MASK_SSE2
, CODE_FOR_addv4si3
, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128
, 0, 0 },
12887 { MASK_SSE2
, CODE_FOR_addv2di3
, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128
, 0, 0 },
12888 { MASK_SSE2
, CODE_FOR_subv16qi3
, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128
, 0, 0 },
12889 { MASK_SSE2
, CODE_FOR_subv8hi3
, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128
, 0, 0 },
12890 { MASK_SSE2
, CODE_FOR_subv4si3
, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128
, 0, 0 },
12891 { MASK_SSE2
, CODE_FOR_subv2di3
, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128
, 0, 0 },
12893 { MASK_MMX
, CODE_FOR_sse2_ssaddv16qi3
, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128
, 0, 0 },
12894 { MASK_MMX
, CODE_FOR_sse2_ssaddv8hi3
, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128
, 0, 0 },
12895 { MASK_MMX
, CODE_FOR_sse2_sssubv16qi3
, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128
, 0, 0 },
12896 { MASK_MMX
, CODE_FOR_sse2_sssubv8hi3
, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128
, 0, 0 },
12897 { MASK_MMX
, CODE_FOR_sse2_usaddv16qi3
, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128
, 0, 0 },
12898 { MASK_MMX
, CODE_FOR_sse2_usaddv8hi3
, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128
, 0, 0 },
12899 { MASK_MMX
, CODE_FOR_sse2_ussubv16qi3
, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128
, 0, 0 },
12900 { MASK_MMX
, CODE_FOR_sse2_ussubv8hi3
, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128
, 0, 0 },
12902 { MASK_SSE2
, CODE_FOR_mulv8hi3
, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128
, 0, 0 },
12903 { MASK_SSE2
, CODE_FOR_sse2_smulv8hi3_highpart
, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128
, 0, 0 },
12905 { MASK_SSE2
, CODE_FOR_andv2di3
, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128
, 0, 0 },
12906 { MASK_SSE2
, CODE_FOR_sse2_nandv2di3
, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128
, 0, 0 },
12907 { MASK_SSE2
, CODE_FOR_iorv2di3
, "__builtin_ia32_por128", IX86_BUILTIN_POR128
, 0, 0 },
12908 { MASK_SSE2
, CODE_FOR_xorv2di3
, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128
, 0, 0 },
12910 { MASK_SSE2
, CODE_FOR_sse2_uavgv16qi3
, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128
, 0, 0 },
12911 { MASK_SSE2
, CODE_FOR_sse2_uavgv8hi3
, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128
, 0, 0 },
12913 { MASK_SSE2
, CODE_FOR_sse2_eqv16qi3
, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128
, 0, 0 },
12914 { MASK_SSE2
, CODE_FOR_sse2_eqv8hi3
, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128
, 0, 0 },
12915 { MASK_SSE2
, CODE_FOR_sse2_eqv4si3
, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128
, 0, 0 },
12916 { MASK_SSE2
, CODE_FOR_sse2_gtv16qi3
, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128
, 0, 0 },
12917 { MASK_SSE2
, CODE_FOR_sse2_gtv8hi3
, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128
, 0, 0 },
12918 { MASK_SSE2
, CODE_FOR_sse2_gtv4si3
, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128
, 0, 0 },
12920 { MASK_SSE2
, CODE_FOR_umaxv16qi3
, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128
, 0, 0 },
12921 { MASK_SSE2
, CODE_FOR_smaxv8hi3
, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128
, 0, 0 },
12922 { MASK_SSE2
, CODE_FOR_uminv16qi3
, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128
, 0, 0 },
12923 { MASK_SSE2
, CODE_FOR_sminv8hi3
, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128
, 0, 0 },
12925 { MASK_SSE2
, CODE_FOR_sse2_punpckhbw
, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128
, 0, 0 },
12926 { MASK_SSE2
, CODE_FOR_sse2_punpckhwd
, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128
, 0, 0 },
12927 { MASK_SSE2
, CODE_FOR_sse2_punpckhdq
, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128
, 0, 0 },
12928 { MASK_SSE2
, CODE_FOR_sse2_punpckhqdq
, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128
, 0, 0 },
12929 { MASK_SSE2
, CODE_FOR_sse2_punpcklbw
, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128
, 0, 0 },
12930 { MASK_SSE2
, CODE_FOR_sse2_punpcklwd
, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128
, 0, 0 },
12931 { MASK_SSE2
, CODE_FOR_sse2_punpckldq
, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128
, 0, 0 },
12932 { MASK_SSE2
, CODE_FOR_sse2_punpcklqdq
, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128
, 0, 0 },
12934 { MASK_SSE2
, CODE_FOR_sse2_packsswb
, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128
, 0, 0 },
12935 { MASK_SSE2
, CODE_FOR_sse2_packssdw
, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128
, 0, 0 },
12936 { MASK_SSE2
, CODE_FOR_sse2_packuswb
, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128
, 0, 0 },
12938 { MASK_SSE2
, CODE_FOR_sse2_umulv8hi3_highpart
, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128
, 0, 0 },
12939 { MASK_SSE2
, CODE_FOR_sse2_psadbw
, 0, IX86_BUILTIN_PSADBW128
, 0, 0 },
12941 { MASK_SSE2
, CODE_FOR_sse2_umulsidi3
, 0, IX86_BUILTIN_PMULUDQ
, 0, 0 },
12942 { MASK_SSE2
, CODE_FOR_sse2_umulv2siv2di3
, 0, IX86_BUILTIN_PMULUDQ128
, 0, 0 },
12944 { MASK_SSE2
, CODE_FOR_ashlv8hi3
, 0, IX86_BUILTIN_PSLLWI128
, 0, 0 },
12945 { MASK_SSE2
, CODE_FOR_ashlv4si3
, 0, IX86_BUILTIN_PSLLDI128
, 0, 0 },
12946 { MASK_SSE2
, CODE_FOR_ashlv2di3
, 0, IX86_BUILTIN_PSLLQI128
, 0, 0 },
12948 { MASK_SSE2
, CODE_FOR_lshrv8hi3
, 0, IX86_BUILTIN_PSRLWI128
, 0, 0 },
12949 { MASK_SSE2
, CODE_FOR_lshrv4si3
, 0, IX86_BUILTIN_PSRLDI128
, 0, 0 },
12950 { MASK_SSE2
, CODE_FOR_lshrv2di3
, 0, IX86_BUILTIN_PSRLQI128
, 0, 0 },
12952 { MASK_SSE2
, CODE_FOR_ashrv8hi3
, 0, IX86_BUILTIN_PSRAWI128
, 0, 0 },
12953 { MASK_SSE2
, CODE_FOR_ashrv4si3
, 0, IX86_BUILTIN_PSRADI128
, 0, 0 },
12955 { MASK_SSE2
, CODE_FOR_sse2_pmaddwd
, 0, IX86_BUILTIN_PMADDWD128
, 0, 0 },
12957 { MASK_SSE2
, CODE_FOR_sse2_cvtsi2sd
, 0, IX86_BUILTIN_CVTSI2SD
, 0, 0 },
12958 { MASK_SSE2
| MASK_64BIT
, CODE_FOR_sse2_cvtsi2sdq
, 0, IX86_BUILTIN_CVTSI642SD
, 0, 0 },
12959 { MASK_SSE2
, CODE_FOR_sse2_cvtsd2ss
, 0, IX86_BUILTIN_CVTSD2SS
, 0, 0 },
12960 { MASK_SSE2
, CODE_FOR_sse2_cvtss2sd
, 0, IX86_BUILTIN_CVTSS2SD
, 0, 0 },
12963 { MASK_SSE3
, CODE_FOR_sse3_addsubv4sf3
, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS
, 0, 0 },
12964 { MASK_SSE3
, CODE_FOR_sse3_addsubv2df3
, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD
, 0, 0 },
12965 { MASK_SSE3
, CODE_FOR_sse3_haddv4sf3
, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS
, 0, 0 },
12966 { MASK_SSE3
, CODE_FOR_sse3_haddv2df3
, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD
, 0, 0 },
12967 { MASK_SSE3
, CODE_FOR_sse3_hsubv4sf3
, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS
, 0, 0 },
12968 { MASK_SSE3
, CODE_FOR_sse3_hsubv2df3
, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD
, 0, 0 }
12971 static const struct builtin_description bdesc_1arg
[] =
12973 { MASK_SSE
| MASK_3DNOW_A
, CODE_FOR_mmx_pmovmskb
, 0, IX86_BUILTIN_PMOVMSKB
, 0, 0 },
12974 { MASK_SSE
, CODE_FOR_sse_movmskps
, 0, IX86_BUILTIN_MOVMSKPS
, 0, 0 },
12976 { MASK_SSE
, CODE_FOR_sqrtv4sf2
, 0, IX86_BUILTIN_SQRTPS
, 0, 0 },
12977 { MASK_SSE
, CODE_FOR_sse_rsqrtv4sf2
, 0, IX86_BUILTIN_RSQRTPS
, 0, 0 },
12978 { MASK_SSE
, CODE_FOR_sse_rcpv4sf2
, 0, IX86_BUILTIN_RCPPS
, 0, 0 },
12980 { MASK_SSE
, CODE_FOR_sse_cvtps2pi
, 0, IX86_BUILTIN_CVTPS2PI
, 0, 0 },
12981 { MASK_SSE
, CODE_FOR_sse_cvtss2si
, 0, IX86_BUILTIN_CVTSS2SI
, 0, 0 },
12982 { MASK_SSE
| MASK_64BIT
, CODE_FOR_sse_cvtss2siq
, 0, IX86_BUILTIN_CVTSS2SI64
, 0, 0 },
12983 { MASK_SSE
, CODE_FOR_sse_cvttps2pi
, 0, IX86_BUILTIN_CVTTPS2PI
, 0, 0 },
12984 { MASK_SSE
, CODE_FOR_sse_cvttss2si
, 0, IX86_BUILTIN_CVTTSS2SI
, 0, 0 },
12985 { MASK_SSE
| MASK_64BIT
, CODE_FOR_sse_cvttss2siq
, 0, IX86_BUILTIN_CVTTSS2SI64
, 0, 0 },
12987 { MASK_SSE2
, CODE_FOR_sse2_pmovmskb
, 0, IX86_BUILTIN_PMOVMSKB128
, 0, 0 },
12988 { MASK_SSE2
, CODE_FOR_sse2_movmskpd
, 0, IX86_BUILTIN_MOVMSKPD
, 0, 0 },
12990 { MASK_SSE2
, CODE_FOR_sqrtv2df2
, 0, IX86_BUILTIN_SQRTPD
, 0, 0 },
12992 { MASK_SSE2
, CODE_FOR_sse2_cvtdq2pd
, 0, IX86_BUILTIN_CVTDQ2PD
, 0, 0 },
12993 { MASK_SSE2
, CODE_FOR_sse2_cvtdq2ps
, 0, IX86_BUILTIN_CVTDQ2PS
, 0, 0 },
12995 { MASK_SSE2
, CODE_FOR_sse2_cvtpd2dq
, 0, IX86_BUILTIN_CVTPD2DQ
, 0, 0 },
12996 { MASK_SSE2
, CODE_FOR_sse2_cvtpd2pi
, 0, IX86_BUILTIN_CVTPD2PI
, 0, 0 },
12997 { MASK_SSE2
, CODE_FOR_sse2_cvtpd2ps
, 0, IX86_BUILTIN_CVTPD2PS
, 0, 0 },
12998 { MASK_SSE2
, CODE_FOR_sse2_cvttpd2dq
, 0, IX86_BUILTIN_CVTTPD2DQ
, 0, 0 },
12999 { MASK_SSE2
, CODE_FOR_sse2_cvttpd2pi
, 0, IX86_BUILTIN_CVTTPD2PI
, 0, 0 },
13001 { MASK_SSE2
, CODE_FOR_sse2_cvtpi2pd
, 0, IX86_BUILTIN_CVTPI2PD
, 0, 0 },
13003 { MASK_SSE2
, CODE_FOR_sse2_cvtsd2si
, 0, IX86_BUILTIN_CVTSD2SI
, 0, 0 },
13004 { MASK_SSE2
, CODE_FOR_sse2_cvttsd2si
, 0, IX86_BUILTIN_CVTTSD2SI
, 0, 0 },
13005 { MASK_SSE2
| MASK_64BIT
, CODE_FOR_sse2_cvtsd2siq
, 0, IX86_BUILTIN_CVTSD2SI64
, 0, 0 },
13006 { MASK_SSE2
| MASK_64BIT
, CODE_FOR_sse2_cvttsd2siq
, 0, IX86_BUILTIN_CVTTSD2SI64
, 0, 0 },
13008 { MASK_SSE2
, CODE_FOR_sse2_cvtps2dq
, 0, IX86_BUILTIN_CVTPS2DQ
, 0, 0 },
13009 { MASK_SSE2
, CODE_FOR_sse2_cvtps2pd
, 0, IX86_BUILTIN_CVTPS2PD
, 0, 0 },
13010 { MASK_SSE2
, CODE_FOR_sse2_cvttps2dq
, 0, IX86_BUILTIN_CVTTPS2DQ
, 0, 0 },
13013 { MASK_SSE3
, CODE_FOR_sse3_movshdup
, 0, IX86_BUILTIN_MOVSHDUP
, 0, 0 },
13014 { MASK_SSE3
, CODE_FOR_sse3_movsldup
, 0, IX86_BUILTIN_MOVSLDUP
, 0, 0 },
13018 ix86_init_builtins (void)
13021 ix86_init_mmx_sse_builtins ();
13024 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
13025 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13028 ix86_init_mmx_sse_builtins (void)
13030 const struct builtin_description
* d
;
13033 tree V16QI_type_node
= build_vector_type_for_mode (intQI_type_node
, V16QImode
);
13034 tree V2SI_type_node
= build_vector_type_for_mode (intSI_type_node
, V2SImode
);
13035 tree V2SF_type_node
= build_vector_type_for_mode (float_type_node
, V2SFmode
);
13036 tree V2DI_type_node
13037 = build_vector_type_for_mode (long_long_integer_type_node
, V2DImode
);
13038 tree V2DF_type_node
= build_vector_type_for_mode (double_type_node
, V2DFmode
);
13039 tree V4SF_type_node
= build_vector_type_for_mode (float_type_node
, V4SFmode
);
13040 tree V4SI_type_node
= build_vector_type_for_mode (intSI_type_node
, V4SImode
);
13041 tree V4HI_type_node
= build_vector_type_for_mode (intHI_type_node
, V4HImode
);
13042 tree V8QI_type_node
= build_vector_type_for_mode (intQI_type_node
, V8QImode
);
13043 tree V8HI_type_node
= build_vector_type_for_mode (intHI_type_node
, V8HImode
);
13045 tree pchar_type_node
= build_pointer_type (char_type_node
);
13046 tree pcchar_type_node
= build_pointer_type (
13047 build_type_variant (char_type_node
, 1, 0));
13048 tree pfloat_type_node
= build_pointer_type (float_type_node
);
13049 tree pcfloat_type_node
= build_pointer_type (
13050 build_type_variant (float_type_node
, 1, 0));
13051 tree pv2si_type_node
= build_pointer_type (V2SI_type_node
);
13052 tree pv2di_type_node
= build_pointer_type (V2DI_type_node
);
13053 tree pdi_type_node
= build_pointer_type (long_long_unsigned_type_node
);
13056 tree int_ftype_v4sf_v4sf
13057 = build_function_type_list (integer_type_node
,
13058 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
13059 tree v4si_ftype_v4sf_v4sf
13060 = build_function_type_list (V4SI_type_node
,
13061 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
13062 /* MMX/SSE/integer conversions. */
13063 tree int_ftype_v4sf
13064 = build_function_type_list (integer_type_node
,
13065 V4SF_type_node
, NULL_TREE
);
13066 tree int64_ftype_v4sf
13067 = build_function_type_list (long_long_integer_type_node
,
13068 V4SF_type_node
, NULL_TREE
);
13069 tree int_ftype_v8qi
13070 = build_function_type_list (integer_type_node
, V8QI_type_node
, NULL_TREE
);
13071 tree v4sf_ftype_v4sf_int
13072 = build_function_type_list (V4SF_type_node
,
13073 V4SF_type_node
, integer_type_node
, NULL_TREE
);
13074 tree v4sf_ftype_v4sf_int64
13075 = build_function_type_list (V4SF_type_node
,
13076 V4SF_type_node
, long_long_integer_type_node
,
13078 tree v4sf_ftype_v4sf_v2si
13079 = build_function_type_list (V4SF_type_node
,
13080 V4SF_type_node
, V2SI_type_node
, NULL_TREE
);
13082 /* Miscellaneous. */
13083 tree v8qi_ftype_v4hi_v4hi
13084 = build_function_type_list (V8QI_type_node
,
13085 V4HI_type_node
, V4HI_type_node
, NULL_TREE
);
13086 tree v4hi_ftype_v2si_v2si
13087 = build_function_type_list (V4HI_type_node
,
13088 V2SI_type_node
, V2SI_type_node
, NULL_TREE
);
13089 tree v4sf_ftype_v4sf_v4sf_int
13090 = build_function_type_list (V4SF_type_node
,
13091 V4SF_type_node
, V4SF_type_node
,
13092 integer_type_node
, NULL_TREE
);
13093 tree v2si_ftype_v4hi_v4hi
13094 = build_function_type_list (V2SI_type_node
,
13095 V4HI_type_node
, V4HI_type_node
, NULL_TREE
);
13096 tree v4hi_ftype_v4hi_int
13097 = build_function_type_list (V4HI_type_node
,
13098 V4HI_type_node
, integer_type_node
, NULL_TREE
);
13099 tree v4hi_ftype_v4hi_di
13100 = build_function_type_list (V4HI_type_node
,
13101 V4HI_type_node
, long_long_unsigned_type_node
,
13103 tree v2si_ftype_v2si_di
13104 = build_function_type_list (V2SI_type_node
,
13105 V2SI_type_node
, long_long_unsigned_type_node
,
13107 tree void_ftype_void
13108 = build_function_type (void_type_node
, void_list_node
);
13109 tree void_ftype_unsigned
13110 = build_function_type_list (void_type_node
, unsigned_type_node
, NULL_TREE
);
13111 tree void_ftype_unsigned_unsigned
13112 = build_function_type_list (void_type_node
, unsigned_type_node
,
13113 unsigned_type_node
, NULL_TREE
);
13114 tree void_ftype_pcvoid_unsigned_unsigned
13115 = build_function_type_list (void_type_node
, const_ptr_type_node
,
13116 unsigned_type_node
, unsigned_type_node
,
13118 tree unsigned_ftype_void
13119 = build_function_type (unsigned_type_node
, void_list_node
);
13120 tree v2si_ftype_v4sf
13121 = build_function_type_list (V2SI_type_node
, V4SF_type_node
, NULL_TREE
);
13122 /* Loads/stores. */
13123 tree void_ftype_v8qi_v8qi_pchar
13124 = build_function_type_list (void_type_node
,
13125 V8QI_type_node
, V8QI_type_node
,
13126 pchar_type_node
, NULL_TREE
);
13127 tree v4sf_ftype_pcfloat
13128 = build_function_type_list (V4SF_type_node
, pcfloat_type_node
, NULL_TREE
);
13129 /* @@@ the type is bogus */
13130 tree v4sf_ftype_v4sf_pv2si
13131 = build_function_type_list (V4SF_type_node
,
13132 V4SF_type_node
, pv2si_type_node
, NULL_TREE
);
13133 tree void_ftype_pv2si_v4sf
13134 = build_function_type_list (void_type_node
,
13135 pv2si_type_node
, V4SF_type_node
, NULL_TREE
);
13136 tree void_ftype_pfloat_v4sf
13137 = build_function_type_list (void_type_node
,
13138 pfloat_type_node
, V4SF_type_node
, NULL_TREE
);
13139 tree void_ftype_pdi_di
13140 = build_function_type_list (void_type_node
,
13141 pdi_type_node
, long_long_unsigned_type_node
,
13143 tree void_ftype_pv2di_v2di
13144 = build_function_type_list (void_type_node
,
13145 pv2di_type_node
, V2DI_type_node
, NULL_TREE
);
13146 /* Normal vector unops. */
13147 tree v4sf_ftype_v4sf
13148 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
13150 /* Normal vector binops. */
13151 tree v4sf_ftype_v4sf_v4sf
13152 = build_function_type_list (V4SF_type_node
,
13153 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
13154 tree v8qi_ftype_v8qi_v8qi
13155 = build_function_type_list (V8QI_type_node
,
13156 V8QI_type_node
, V8QI_type_node
, NULL_TREE
);
13157 tree v4hi_ftype_v4hi_v4hi
13158 = build_function_type_list (V4HI_type_node
,
13159 V4HI_type_node
, V4HI_type_node
, NULL_TREE
);
13160 tree v2si_ftype_v2si_v2si
13161 = build_function_type_list (V2SI_type_node
,
13162 V2SI_type_node
, V2SI_type_node
, NULL_TREE
);
13163 tree di_ftype_di_di
13164 = build_function_type_list (long_long_unsigned_type_node
,
13165 long_long_unsigned_type_node
,
13166 long_long_unsigned_type_node
, NULL_TREE
);
13168 tree v2si_ftype_v2sf
13169 = build_function_type_list (V2SI_type_node
, V2SF_type_node
, NULL_TREE
);
13170 tree v2sf_ftype_v2si
13171 = build_function_type_list (V2SF_type_node
, V2SI_type_node
, NULL_TREE
);
13172 tree v2si_ftype_v2si
13173 = build_function_type_list (V2SI_type_node
, V2SI_type_node
, NULL_TREE
);
13174 tree v2sf_ftype_v2sf
13175 = build_function_type_list (V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
13176 tree v2sf_ftype_v2sf_v2sf
13177 = build_function_type_list (V2SF_type_node
,
13178 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
13179 tree v2si_ftype_v2sf_v2sf
13180 = build_function_type_list (V2SI_type_node
,
13181 V2SF_type_node
, V2SF_type_node
, NULL_TREE
);
13182 tree pint_type_node
= build_pointer_type (integer_type_node
);
13183 tree pdouble_type_node
= build_pointer_type (double_type_node
);
13184 tree pcdouble_type_node
= build_pointer_type (
13185 build_type_variant (double_type_node
, 1, 0));
13186 tree int_ftype_v2df_v2df
13187 = build_function_type_list (integer_type_node
,
13188 V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
13190 tree ti_ftype_ti_ti
13191 = build_function_type_list (intTI_type_node
,
13192 intTI_type_node
, intTI_type_node
, NULL_TREE
);
13193 tree void_ftype_pcvoid
13194 = build_function_type_list (void_type_node
, const_ptr_type_node
, NULL_TREE
);
13195 tree v4sf_ftype_v4si
13196 = build_function_type_list (V4SF_type_node
, V4SI_type_node
, NULL_TREE
);
13197 tree v4si_ftype_v4sf
13198 = build_function_type_list (V4SI_type_node
, V4SF_type_node
, NULL_TREE
);
13199 tree v2df_ftype_v4si
13200 = build_function_type_list (V2DF_type_node
, V4SI_type_node
, NULL_TREE
);
13201 tree v4si_ftype_v2df
13202 = build_function_type_list (V4SI_type_node
, V2DF_type_node
, NULL_TREE
);
13203 tree v2si_ftype_v2df
13204 = build_function_type_list (V2SI_type_node
, V2DF_type_node
, NULL_TREE
);
13205 tree v4sf_ftype_v2df
13206 = build_function_type_list (V4SF_type_node
, V2DF_type_node
, NULL_TREE
);
13207 tree v2df_ftype_v2si
13208 = build_function_type_list (V2DF_type_node
, V2SI_type_node
, NULL_TREE
);
13209 tree v2df_ftype_v4sf
13210 = build_function_type_list (V2DF_type_node
, V4SF_type_node
, NULL_TREE
);
13211 tree int_ftype_v2df
13212 = build_function_type_list (integer_type_node
, V2DF_type_node
, NULL_TREE
);
13213 tree int64_ftype_v2df
13214 = build_function_type_list (long_long_integer_type_node
,
13215 V2DF_type_node
, NULL_TREE
);
13216 tree v2df_ftype_v2df_int
13217 = build_function_type_list (V2DF_type_node
,
13218 V2DF_type_node
, integer_type_node
, NULL_TREE
);
13219 tree v2df_ftype_v2df_int64
13220 = build_function_type_list (V2DF_type_node
,
13221 V2DF_type_node
, long_long_integer_type_node
,
13223 tree v4sf_ftype_v4sf_v2df
13224 = build_function_type_list (V4SF_type_node
,
13225 V4SF_type_node
, V2DF_type_node
, NULL_TREE
);
13226 tree v2df_ftype_v2df_v4sf
13227 = build_function_type_list (V2DF_type_node
,
13228 V2DF_type_node
, V4SF_type_node
, NULL_TREE
);
13229 tree v2df_ftype_v2df_v2df_int
13230 = build_function_type_list (V2DF_type_node
,
13231 V2DF_type_node
, V2DF_type_node
,
13234 tree v2df_ftype_v2df_pcdouble
13235 = build_function_type_list (V2DF_type_node
,
13236 V2DF_type_node
, pcdouble_type_node
, NULL_TREE
);
13237 tree void_ftype_pdouble_v2df
13238 = build_function_type_list (void_type_node
,
13239 pdouble_type_node
, V2DF_type_node
, NULL_TREE
);
13240 tree void_ftype_pint_int
13241 = build_function_type_list (void_type_node
,
13242 pint_type_node
, integer_type_node
, NULL_TREE
);
13243 tree void_ftype_v16qi_v16qi_pchar
13244 = build_function_type_list (void_type_node
,
13245 V16QI_type_node
, V16QI_type_node
,
13246 pchar_type_node
, NULL_TREE
);
13247 tree v2df_ftype_pcdouble
13248 = build_function_type_list (V2DF_type_node
, pcdouble_type_node
, NULL_TREE
);
13249 tree v2df_ftype_v2df_v2df
13250 = build_function_type_list (V2DF_type_node
,
13251 V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
13252 tree v16qi_ftype_v16qi_v16qi
13253 = build_function_type_list (V16QI_type_node
,
13254 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
13255 tree v8hi_ftype_v8hi_v8hi
13256 = build_function_type_list (V8HI_type_node
,
13257 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
13258 tree v4si_ftype_v4si_v4si
13259 = build_function_type_list (V4SI_type_node
,
13260 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
13261 tree v2di_ftype_v2di_v2di
13262 = build_function_type_list (V2DI_type_node
,
13263 V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
13264 tree v2di_ftype_v2df_v2df
13265 = build_function_type_list (V2DI_type_node
,
13266 V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
13267 tree v2df_ftype_v2df
13268 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
13269 tree v2di_ftype_v2di_int
13270 = build_function_type_list (V2DI_type_node
,
13271 V2DI_type_node
, integer_type_node
, NULL_TREE
);
13272 tree v4si_ftype_v4si_int
13273 = build_function_type_list (V4SI_type_node
,
13274 V4SI_type_node
, integer_type_node
, NULL_TREE
);
13275 tree v8hi_ftype_v8hi_int
13276 = build_function_type_list (V8HI_type_node
,
13277 V8HI_type_node
, integer_type_node
, NULL_TREE
);
13278 tree v8hi_ftype_v8hi_v2di
13279 = build_function_type_list (V8HI_type_node
,
13280 V8HI_type_node
, V2DI_type_node
, NULL_TREE
);
13281 tree v4si_ftype_v4si_v2di
13282 = build_function_type_list (V4SI_type_node
,
13283 V4SI_type_node
, V2DI_type_node
, NULL_TREE
);
13284 tree v4si_ftype_v8hi_v8hi
13285 = build_function_type_list (V4SI_type_node
,
13286 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
13287 tree di_ftype_v8qi_v8qi
13288 = build_function_type_list (long_long_unsigned_type_node
,
13289 V8QI_type_node
, V8QI_type_node
, NULL_TREE
);
13290 tree di_ftype_v2si_v2si
13291 = build_function_type_list (long_long_unsigned_type_node
,
13292 V2SI_type_node
, V2SI_type_node
, NULL_TREE
);
13293 tree v2di_ftype_v16qi_v16qi
13294 = build_function_type_list (V2DI_type_node
,
13295 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
13296 tree v2di_ftype_v4si_v4si
13297 = build_function_type_list (V2DI_type_node
,
13298 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
13299 tree int_ftype_v16qi
13300 = build_function_type_list (integer_type_node
, V16QI_type_node
, NULL_TREE
);
13301 tree v16qi_ftype_pcchar
13302 = build_function_type_list (V16QI_type_node
, pcchar_type_node
, NULL_TREE
);
13303 tree void_ftype_pchar_v16qi
13304 = build_function_type_list (void_type_node
,
13305 pchar_type_node
, V16QI_type_node
, NULL_TREE
);
13308 tree float128_type
;
13311 /* The __float80 type. */
13312 if (TYPE_MODE (long_double_type_node
) == XFmode
)
13313 (*lang_hooks
.types
.register_builtin_type
) (long_double_type_node
,
13317 /* The __float80 type. */
13318 float80_type
= make_node (REAL_TYPE
);
13319 TYPE_PRECISION (float80_type
) = 80;
13320 layout_type (float80_type
);
13321 (*lang_hooks
.types
.register_builtin_type
) (float80_type
, "__float80");
13324 float128_type
= make_node (REAL_TYPE
);
13325 TYPE_PRECISION (float128_type
) = 128;
13326 layout_type (float128_type
);
13327 (*lang_hooks
.types
.register_builtin_type
) (float128_type
, "__float128");
13329 /* Add all builtins that are more or less simple operations on two
13331 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
13333 /* Use one of the operands; the target can have a different mode for
13334 mask-generating compares. */
13335 enum machine_mode mode
;
13340 mode
= insn_data
[d
->icode
].operand
[1].mode
;
13345 type
= v16qi_ftype_v16qi_v16qi
;
13348 type
= v8hi_ftype_v8hi_v8hi
;
13351 type
= v4si_ftype_v4si_v4si
;
13354 type
= v2di_ftype_v2di_v2di
;
13357 type
= v2df_ftype_v2df_v2df
;
13360 type
= ti_ftype_ti_ti
;
13363 type
= v4sf_ftype_v4sf_v4sf
;
13366 type
= v8qi_ftype_v8qi_v8qi
;
13369 type
= v4hi_ftype_v4hi_v4hi
;
13372 type
= v2si_ftype_v2si_v2si
;
13375 type
= di_ftype_di_di
;
13382 /* Override for comparisons. */
13383 if (d
->icode
== CODE_FOR_sse_maskcmpv4sf3
13384 || d
->icode
== CODE_FOR_sse_vmmaskcmpv4sf3
)
13385 type
= v4si_ftype_v4sf_v4sf
;
13387 if (d
->icode
== CODE_FOR_sse2_maskcmpv2df3
13388 || d
->icode
== CODE_FOR_sse2_vmmaskcmpv2df3
)
13389 type
= v2di_ftype_v2df_v2df
;
13391 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
13394 /* Add the remaining MMX insns with somewhat more complicated types. */
13395 def_builtin (MASK_MMX
, "__builtin_ia32_emms", void_ftype_void
, IX86_BUILTIN_EMMS
);
13396 def_builtin (MASK_MMX
, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di
, IX86_BUILTIN_PSLLW
);
13397 def_builtin (MASK_MMX
, "__builtin_ia32_pslld", v2si_ftype_v2si_di
, IX86_BUILTIN_PSLLD
);
13398 def_builtin (MASK_MMX
, "__builtin_ia32_psllq", di_ftype_di_di
, IX86_BUILTIN_PSLLQ
);
13400 def_builtin (MASK_MMX
, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di
, IX86_BUILTIN_PSRLW
);
13401 def_builtin (MASK_MMX
, "__builtin_ia32_psrld", v2si_ftype_v2si_di
, IX86_BUILTIN_PSRLD
);
13402 def_builtin (MASK_MMX
, "__builtin_ia32_psrlq", di_ftype_di_di
, IX86_BUILTIN_PSRLQ
);
13404 def_builtin (MASK_MMX
, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di
, IX86_BUILTIN_PSRAW
);
13405 def_builtin (MASK_MMX
, "__builtin_ia32_psrad", v2si_ftype_v2si_di
, IX86_BUILTIN_PSRAD
);
13407 def_builtin (MASK_MMX
, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int
, IX86_BUILTIN_PSHUFW
);
13408 def_builtin (MASK_MMX
, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi
, IX86_BUILTIN_PMADDWD
);
13410 /* comi/ucomi insns. */
13411 for (i
= 0, d
= bdesc_comi
; i
< ARRAY_SIZE (bdesc_comi
); i
++, d
++)
13412 if (d
->mask
== MASK_SSE2
)
13413 def_builtin (d
->mask
, d
->name
, int_ftype_v2df_v2df
, d
->code
);
13415 def_builtin (d
->mask
, d
->name
, int_ftype_v4sf_v4sf
, d
->code
);
13417 def_builtin (MASK_MMX
, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi
, IX86_BUILTIN_PACKSSWB
);
13418 def_builtin (MASK_MMX
, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si
, IX86_BUILTIN_PACKSSDW
);
13419 def_builtin (MASK_MMX
, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi
, IX86_BUILTIN_PACKUSWB
);
13421 def_builtin (MASK_SSE
, "__builtin_ia32_ldmxcsr", void_ftype_unsigned
, IX86_BUILTIN_LDMXCSR
);
13422 def_builtin (MASK_SSE
, "__builtin_ia32_stmxcsr", unsigned_ftype_void
, IX86_BUILTIN_STMXCSR
);
13423 def_builtin (MASK_SSE
, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si
, IX86_BUILTIN_CVTPI2PS
);
13424 def_builtin (MASK_SSE
, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf
, IX86_BUILTIN_CVTPS2PI
);
13425 def_builtin (MASK_SSE
, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int
, IX86_BUILTIN_CVTSI2SS
);
13426 def_builtin (MASK_SSE
| MASK_64BIT
, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64
, IX86_BUILTIN_CVTSI642SS
);
13427 def_builtin (MASK_SSE
, "__builtin_ia32_cvtss2si", int_ftype_v4sf
, IX86_BUILTIN_CVTSS2SI
);
13428 def_builtin (MASK_SSE
| MASK_64BIT
, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf
, IX86_BUILTIN_CVTSS2SI64
);
13429 def_builtin (MASK_SSE
, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf
, IX86_BUILTIN_CVTTPS2PI
);
13430 def_builtin (MASK_SSE
, "__builtin_ia32_cvttss2si", int_ftype_v4sf
, IX86_BUILTIN_CVTTSS2SI
);
13431 def_builtin (MASK_SSE
| MASK_64BIT
, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf
, IX86_BUILTIN_CVTTSS2SI64
);
13433 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar
, IX86_BUILTIN_MASKMOVQ
);
13435 def_builtin (MASK_SSE
, "__builtin_ia32_loadups", v4sf_ftype_pcfloat
, IX86_BUILTIN_LOADUPS
);
13436 def_builtin (MASK_SSE
, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf
, IX86_BUILTIN_STOREUPS
);
13438 def_builtin (MASK_SSE
, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si
, IX86_BUILTIN_LOADHPS
);
13439 def_builtin (MASK_SSE
, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si
, IX86_BUILTIN_LOADLPS
);
13440 def_builtin (MASK_SSE
, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf
, IX86_BUILTIN_STOREHPS
);
13441 def_builtin (MASK_SSE
, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf
, IX86_BUILTIN_STORELPS
);
13443 def_builtin (MASK_SSE
, "__builtin_ia32_movmskps", int_ftype_v4sf
, IX86_BUILTIN_MOVMSKPS
);
13444 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_pmovmskb", int_ftype_v8qi
, IX86_BUILTIN_PMOVMSKB
);
13445 def_builtin (MASK_SSE
, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf
, IX86_BUILTIN_MOVNTPS
);
13446 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_movntq", void_ftype_pdi_di
, IX86_BUILTIN_MOVNTQ
);
13448 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_sfence", void_ftype_void
, IX86_BUILTIN_SFENCE
);
13450 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi
, IX86_BUILTIN_PSADBW
);
13452 def_builtin (MASK_SSE
, "__builtin_ia32_rcpps", v4sf_ftype_v4sf
, IX86_BUILTIN_RCPPS
);
13453 def_builtin (MASK_SSE
, "__builtin_ia32_rcpss", v4sf_ftype_v4sf
, IX86_BUILTIN_RCPSS
);
13454 def_builtin (MASK_SSE
, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf
, IX86_BUILTIN_RSQRTPS
);
13455 def_builtin (MASK_SSE
, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf
, IX86_BUILTIN_RSQRTSS
);
13456 def_builtin (MASK_SSE
, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf
, IX86_BUILTIN_SQRTPS
);
13457 def_builtin (MASK_SSE
, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf
, IX86_BUILTIN_SQRTSS
);
13459 def_builtin (MASK_SSE
, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int
, IX86_BUILTIN_SHUFPS
);
13461 /* Original 3DNow! */
13462 def_builtin (MASK_3DNOW
, "__builtin_ia32_femms", void_ftype_void
, IX86_BUILTIN_FEMMS
);
13463 def_builtin (MASK_3DNOW
, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi
, IX86_BUILTIN_PAVGUSB
);
13464 def_builtin (MASK_3DNOW
, "__builtin_ia32_pf2id", v2si_ftype_v2sf
, IX86_BUILTIN_PF2ID
);
13465 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFACC
);
13466 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFADD
);
13467 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf
, IX86_BUILTIN_PFCMPEQ
);
13468 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf
, IX86_BUILTIN_PFCMPGE
);
13469 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf
, IX86_BUILTIN_PFCMPGT
);
13470 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFMAX
);
13471 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFMIN
);
13472 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFMUL
);
13473 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf
, IX86_BUILTIN_PFRCP
);
13474 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFRCPIT1
);
13475 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFRCPIT2
);
13476 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf
, IX86_BUILTIN_PFRSQRT
);
13477 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFRSQIT1
);
13478 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFSUB
);
13479 def_builtin (MASK_3DNOW
, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFSUBR
);
13480 def_builtin (MASK_3DNOW
, "__builtin_ia32_pi2fd", v2sf_ftype_v2si
, IX86_BUILTIN_PI2FD
);
13481 def_builtin (MASK_3DNOW
, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi
, IX86_BUILTIN_PMULHRW
);
13483 /* 3DNow! extension as used in the Athlon CPU. */
13484 def_builtin (MASK_3DNOW_A
, "__builtin_ia32_pf2iw", v2si_ftype_v2sf
, IX86_BUILTIN_PF2IW
);
13485 def_builtin (MASK_3DNOW_A
, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFNACC
);
13486 def_builtin (MASK_3DNOW_A
, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf
, IX86_BUILTIN_PFPNACC
);
13487 def_builtin (MASK_3DNOW_A
, "__builtin_ia32_pi2fw", v2sf_ftype_v2si
, IX86_BUILTIN_PI2FW
);
13488 def_builtin (MASK_3DNOW_A
, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf
, IX86_BUILTIN_PSWAPDSF
);
13489 def_builtin (MASK_3DNOW_A
, "__builtin_ia32_pswapdsi", v2si_ftype_v2si
, IX86_BUILTIN_PSWAPDSI
);
13492 def_builtin (MASK_SSE2
, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar
, IX86_BUILTIN_MASKMOVDQU
);
13494 def_builtin (MASK_SSE2
, "__builtin_ia32_loadupd", v2df_ftype_pcdouble
, IX86_BUILTIN_LOADUPD
);
13495 def_builtin (MASK_SSE2
, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df
, IX86_BUILTIN_STOREUPD
);
13497 def_builtin (MASK_SSE2
, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble
, IX86_BUILTIN_LOADHPD
);
13498 def_builtin (MASK_SSE2
, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble
, IX86_BUILTIN_LOADLPD
);
13500 def_builtin (MASK_SSE2
, "__builtin_ia32_movmskpd", int_ftype_v2df
, IX86_BUILTIN_MOVMSKPD
);
13501 def_builtin (MASK_SSE2
, "__builtin_ia32_pmovmskb128", int_ftype_v16qi
, IX86_BUILTIN_PMOVMSKB128
);
13502 def_builtin (MASK_SSE2
, "__builtin_ia32_movnti", void_ftype_pint_int
, IX86_BUILTIN_MOVNTI
);
13503 def_builtin (MASK_SSE2
, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df
, IX86_BUILTIN_MOVNTPD
);
13504 def_builtin (MASK_SSE2
, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di
, IX86_BUILTIN_MOVNTDQ
);
13506 def_builtin (MASK_SSE2
, "__builtin_ia32_pshufd", v4si_ftype_v4si_int
, IX86_BUILTIN_PSHUFD
);
13507 def_builtin (MASK_SSE2
, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int
, IX86_BUILTIN_PSHUFLW
);
13508 def_builtin (MASK_SSE2
, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int
, IX86_BUILTIN_PSHUFHW
);
13509 def_builtin (MASK_SSE2
, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi
, IX86_BUILTIN_PSADBW128
);
13511 def_builtin (MASK_SSE2
, "__builtin_ia32_sqrtpd", v2df_ftype_v2df
, IX86_BUILTIN_SQRTPD
);
13512 def_builtin (MASK_SSE2
, "__builtin_ia32_sqrtsd", v2df_ftype_v2df
, IX86_BUILTIN_SQRTSD
);
13514 def_builtin (MASK_SSE2
, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int
, IX86_BUILTIN_SHUFPD
);
13516 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si
, IX86_BUILTIN_CVTDQ2PD
);
13517 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si
, IX86_BUILTIN_CVTDQ2PS
);
13519 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df
, IX86_BUILTIN_CVTPD2DQ
);
13520 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df
, IX86_BUILTIN_CVTPD2PI
);
13521 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df
, IX86_BUILTIN_CVTPD2PS
);
13522 def_builtin (MASK_SSE2
, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df
, IX86_BUILTIN_CVTTPD2DQ
);
13523 def_builtin (MASK_SSE2
, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df
, IX86_BUILTIN_CVTTPD2PI
);
13525 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si
, IX86_BUILTIN_CVTPI2PD
);
13527 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtsd2si", int_ftype_v2df
, IX86_BUILTIN_CVTSD2SI
);
13528 def_builtin (MASK_SSE2
, "__builtin_ia32_cvttsd2si", int_ftype_v2df
, IX86_BUILTIN_CVTTSD2SI
);
13529 def_builtin (MASK_SSE2
| MASK_64BIT
, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df
, IX86_BUILTIN_CVTSD2SI64
);
13530 def_builtin (MASK_SSE2
| MASK_64BIT
, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df
, IX86_BUILTIN_CVTTSD2SI64
);
13532 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf
, IX86_BUILTIN_CVTPS2DQ
);
13533 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf
, IX86_BUILTIN_CVTPS2PD
);
13534 def_builtin (MASK_SSE2
, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf
, IX86_BUILTIN_CVTTPS2DQ
);
13536 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int
, IX86_BUILTIN_CVTSI2SD
);
13537 def_builtin (MASK_SSE2
| MASK_64BIT
, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64
, IX86_BUILTIN_CVTSI642SD
);
13538 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df
, IX86_BUILTIN_CVTSD2SS
);
13539 def_builtin (MASK_SSE2
, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf
, IX86_BUILTIN_CVTSS2SD
);
13541 def_builtin (MASK_SSE2
, "__builtin_ia32_clflush", void_ftype_pcvoid
, IX86_BUILTIN_CLFLUSH
);
13542 def_builtin (MASK_SSE2
, "__builtin_ia32_lfence", void_ftype_void
, IX86_BUILTIN_LFENCE
);
13543 def_builtin (MASK_SSE2
, "__builtin_ia32_mfence", void_ftype_void
, IX86_BUILTIN_MFENCE
);
13545 def_builtin (MASK_SSE2
, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar
, IX86_BUILTIN_LOADDQU
);
13546 def_builtin (MASK_SSE2
, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi
, IX86_BUILTIN_STOREDQU
);
13548 def_builtin (MASK_SSE2
, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si
, IX86_BUILTIN_PMULUDQ
);
13549 def_builtin (MASK_SSE2
, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si
, IX86_BUILTIN_PMULUDQ128
);
13551 def_builtin (MASK_SSE2
, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di
, IX86_BUILTIN_PSLLW128
);
13552 def_builtin (MASK_SSE2
, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di
, IX86_BUILTIN_PSLLD128
);
13553 def_builtin (MASK_SSE2
, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di
, IX86_BUILTIN_PSLLQ128
);
13555 def_builtin (MASK_SSE2
, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di
, IX86_BUILTIN_PSRLW128
);
13556 def_builtin (MASK_SSE2
, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di
, IX86_BUILTIN_PSRLD128
);
13557 def_builtin (MASK_SSE2
, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di
, IX86_BUILTIN_PSRLQ128
);
13559 def_builtin (MASK_SSE2
, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di
, IX86_BUILTIN_PSRAW128
);
13560 def_builtin (MASK_SSE2
, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di
, IX86_BUILTIN_PSRAD128
);
13562 def_builtin (MASK_SSE2
, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int
, IX86_BUILTIN_PSLLDQI128
);
13563 def_builtin (MASK_SSE2
, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int
, IX86_BUILTIN_PSLLWI128
);
13564 def_builtin (MASK_SSE2
, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int
, IX86_BUILTIN_PSLLDI128
);
13565 def_builtin (MASK_SSE2
, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int
, IX86_BUILTIN_PSLLQI128
);
13567 def_builtin (MASK_SSE2
, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int
, IX86_BUILTIN_PSRLDQI128
);
13568 def_builtin (MASK_SSE2
, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int
, IX86_BUILTIN_PSRLWI128
);
13569 def_builtin (MASK_SSE2
, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int
, IX86_BUILTIN_PSRLDI128
);
13570 def_builtin (MASK_SSE2
, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int
, IX86_BUILTIN_PSRLQI128
);
13572 def_builtin (MASK_SSE2
, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int
, IX86_BUILTIN_PSRAWI128
);
13573 def_builtin (MASK_SSE2
, "__builtin_ia32_psradi128", v4si_ftype_v4si_int
, IX86_BUILTIN_PSRADI128
);
13575 def_builtin (MASK_SSE2
, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi
, IX86_BUILTIN_PMADDWD128
);
13577 /* Prescott New Instructions. */
13578 def_builtin (MASK_SSE3
, "__builtin_ia32_monitor",
13579 void_ftype_pcvoid_unsigned_unsigned
,
13580 IX86_BUILTIN_MONITOR
);
13581 def_builtin (MASK_SSE3
, "__builtin_ia32_mwait",
13582 void_ftype_unsigned_unsigned
,
13583 IX86_BUILTIN_MWAIT
);
13584 def_builtin (MASK_SSE3
, "__builtin_ia32_movshdup",
13586 IX86_BUILTIN_MOVSHDUP
);
13587 def_builtin (MASK_SSE3
, "__builtin_ia32_movsldup",
13589 IX86_BUILTIN_MOVSLDUP
);
13590 def_builtin (MASK_SSE3
, "__builtin_ia32_lddqu",
13591 v16qi_ftype_pcchar
, IX86_BUILTIN_LDDQU
);
13593 /* Access to the vec_init patterns. */
13594 ftype
= build_function_type_list (V2SI_type_node
, integer_type_node
,
13595 integer_type_node
, NULL_TREE
);
13596 def_builtin (MASK_MMX
, "__builtin_ia32_vec_init_v2si",
13597 ftype
, IX86_BUILTIN_VEC_INIT_V2SI
);
13599 ftype
= build_function_type_list (V4HI_type_node
, short_integer_type_node
,
13600 short_integer_type_node
,
13601 short_integer_type_node
,
13602 short_integer_type_node
, NULL_TREE
);
13603 def_builtin (MASK_MMX
, "__builtin_ia32_vec_init_v4hi",
13604 ftype
, IX86_BUILTIN_VEC_INIT_V4HI
);
13606 ftype
= build_function_type_list (V8QI_type_node
, char_type_node
,
13607 char_type_node
, char_type_node
,
13608 char_type_node
, char_type_node
,
13609 char_type_node
, char_type_node
,
13610 char_type_node
, NULL_TREE
);
13611 def_builtin (MASK_MMX
, "__builtin_ia32_vec_init_v8qi",
13612 ftype
, IX86_BUILTIN_VEC_INIT_V8QI
);
13614 /* Access to the vec_extract patterns. */
13615 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
13616 integer_type_node
, NULL_TREE
);
13617 def_builtin (MASK_SSE
, "__builtin_ia32_vec_ext_v2df",
13618 ftype
, IX86_BUILTIN_VEC_EXT_V2DF
);
13620 ftype
= build_function_type_list (long_long_integer_type_node
,
13621 V2DI_type_node
, integer_type_node
,
13623 def_builtin (MASK_SSE
, "__builtin_ia32_vec_ext_v2di",
13624 ftype
, IX86_BUILTIN_VEC_EXT_V2DI
);
13626 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
13627 integer_type_node
, NULL_TREE
);
13628 def_builtin (MASK_SSE
, "__builtin_ia32_vec_ext_v4sf",
13629 ftype
, IX86_BUILTIN_VEC_EXT_V4SF
);
13631 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
13632 integer_type_node
, NULL_TREE
);
13633 def_builtin (MASK_SSE
, "__builtin_ia32_vec_ext_v4si",
13634 ftype
, IX86_BUILTIN_VEC_EXT_V4SI
);
13636 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
13637 integer_type_node
, NULL_TREE
);
13638 def_builtin (MASK_SSE
, "__builtin_ia32_vec_ext_v8hi",
13639 ftype
, IX86_BUILTIN_VEC_EXT_V8HI
);
13641 ftype
= build_function_type_list (intHI_type_node
, V4HI_type_node
,
13642 integer_type_node
, NULL_TREE
);
13643 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_vec_ext_v4hi",
13644 ftype
, IX86_BUILTIN_VEC_EXT_V4HI
);
13646 /* Access to the vec_set patterns. */
13647 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
13649 integer_type_node
, NULL_TREE
);
13650 def_builtin (MASK_SSE
, "__builtin_ia32_vec_set_v8hi",
13651 ftype
, IX86_BUILTIN_VEC_SET_V8HI
);
13653 ftype
= build_function_type_list (V4HI_type_node
, V4HI_type_node
,
13655 integer_type_node
, NULL_TREE
);
13656 def_builtin (MASK_SSE
| MASK_3DNOW_A
, "__builtin_ia32_vec_set_v4hi",
13657 ftype
, IX86_BUILTIN_VEC_SET_V4HI
);
13660 /* Errors in the source file can cause expand_expr to return const0_rtx
13661 where we expect a vector. To avoid crashing, use one of the vector
13662 clear instructions. */
13664 safe_vector_operand (rtx x
, enum machine_mode mode
)
13666 if (x
== const0_rtx
)
13667 x
= CONST0_RTX (mode
);
13671 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
13674 ix86_expand_binop_builtin (enum insn_code icode
, tree arglist
, rtx target
)
13677 tree arg0
= TREE_VALUE (arglist
);
13678 tree arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
13679 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
13680 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
13681 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13682 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13683 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13685 if (VECTOR_MODE_P (mode0
))
13686 op0
= safe_vector_operand (op0
, mode0
);
13687 if (VECTOR_MODE_P (mode1
))
13688 op1
= safe_vector_operand (op1
, mode1
);
13690 if (optimize
|| !target
13691 || GET_MODE (target
) != tmode
13692 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13693 target
= gen_reg_rtx (tmode
);
13695 if (GET_MODE (op1
) == SImode
&& mode1
== TImode
)
13697 rtx x
= gen_reg_rtx (V4SImode
);
13698 emit_insn (gen_sse2_loadd (x
, op1
));
13699 op1
= gen_lowpart (TImode
, x
);
13702 /* In case the insn wants input operands in modes different from
13703 the result, abort. */
13704 if ((GET_MODE (op0
) != mode0
&& GET_MODE (op0
) != VOIDmode
)
13705 || (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
))
13708 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13709 op0
= copy_to_mode_reg (mode0
, op0
);
13710 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13711 op1
= copy_to_mode_reg (mode1
, op1
);
13713 /* ??? Using ix86_fixup_binary_operands is problematic when
13714 we've got mismatched modes. Fake it. */
13720 if (tmode
== mode0
&& tmode
== mode1
)
13722 target
= ix86_fixup_binary_operands (UNKNOWN
, tmode
, xops
);
13726 else if (optimize
|| !ix86_binary_operator_ok (UNKNOWN
, tmode
, xops
))
13728 op0
= force_reg (mode0
, op0
);
13729 op1
= force_reg (mode1
, op1
);
13730 target
= gen_reg_rtx (tmode
);
13733 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
13740 /* Subroutine of ix86_expand_builtin to take care of stores. */
13743 ix86_expand_store_builtin (enum insn_code icode
, tree arglist
)
13746 tree arg0
= TREE_VALUE (arglist
);
13747 tree arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
13748 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
13749 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
13750 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13751 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13753 if (VECTOR_MODE_P (mode1
))
13754 op1
= safe_vector_operand (op1
, mode1
);
13756 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
13757 op1
= copy_to_mode_reg (mode1
, op1
);
13759 pat
= GEN_FCN (icode
) (op0
, op1
);
13765 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
13768 ix86_expand_unop_builtin (enum insn_code icode
, tree arglist
,
13769 rtx target
, int do_load
)
13772 tree arg0
= TREE_VALUE (arglist
);
13773 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
13774 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13775 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13777 if (optimize
|| !target
13778 || GET_MODE (target
) != tmode
13779 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13780 target
= gen_reg_rtx (tmode
);
13782 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
13785 if (VECTOR_MODE_P (mode0
))
13786 op0
= safe_vector_operand (op0
, mode0
);
13788 if ((optimize
&& !register_operand (op0
, mode0
))
13789 || ! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13790 op0
= copy_to_mode_reg (mode0
, op0
);
13793 pat
= GEN_FCN (icode
) (target
, op0
);
13800 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13801 sqrtss, rsqrtss, rcpss. */
13804 ix86_expand_unop1_builtin (enum insn_code icode
, tree arglist
, rtx target
)
13807 tree arg0
= TREE_VALUE (arglist
);
13808 rtx op1
, op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
13809 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13810 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13812 if (optimize
|| !target
13813 || GET_MODE (target
) != tmode
13814 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13815 target
= gen_reg_rtx (tmode
);
13817 if (VECTOR_MODE_P (mode0
))
13818 op0
= safe_vector_operand (op0
, mode0
);
13820 if ((optimize
&& !register_operand (op0
, mode0
))
13821 || ! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13822 op0
= copy_to_mode_reg (mode0
, op0
);
13825 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode0
))
13826 op1
= copy_to_mode_reg (mode0
, op1
);
13828 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
13835 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13838 ix86_expand_sse_compare (const struct builtin_description
*d
, tree arglist
,
13842 tree arg0
= TREE_VALUE (arglist
);
13843 tree arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
13844 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
13845 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
13847 enum machine_mode tmode
= insn_data
[d
->icode
].operand
[0].mode
;
13848 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[1].mode
;
13849 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[2].mode
;
13850 enum rtx_code comparison
= d
->comparison
;
13852 if (VECTOR_MODE_P (mode0
))
13853 op0
= safe_vector_operand (op0
, mode0
);
13854 if (VECTOR_MODE_P (mode1
))
13855 op1
= safe_vector_operand (op1
, mode1
);
13857 /* Swap operands if we have a comparison that isn't available in
13859 if (d
->flag
& BUILTIN_DESC_SWAP_OPERANDS
)
13861 rtx tmp
= gen_reg_rtx (mode1
);
13862 emit_move_insn (tmp
, op1
);
13867 if (optimize
|| !target
13868 || GET_MODE (target
) != tmode
13869 || ! (*insn_data
[d
->icode
].operand
[0].predicate
) (target
, tmode
))
13870 target
= gen_reg_rtx (tmode
);
13872 if ((optimize
&& !register_operand (op0
, mode0
))
13873 || ! (*insn_data
[d
->icode
].operand
[1].predicate
) (op0
, mode0
))
13874 op0
= copy_to_mode_reg (mode0
, op0
);
13875 if ((optimize
&& !register_operand (op1
, mode1
))
13876 || ! (*insn_data
[d
->icode
].operand
[2].predicate
) (op1
, mode1
))
13877 op1
= copy_to_mode_reg (mode1
, op1
);
13879 op2
= gen_rtx_fmt_ee (comparison
, mode0
, op0
, op1
);
13880 pat
= GEN_FCN (d
->icode
) (target
, op0
, op1
, op2
);
13887 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13890 ix86_expand_sse_comi (const struct builtin_description
*d
, tree arglist
,
13894 tree arg0
= TREE_VALUE (arglist
);
13895 tree arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
13896 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
13897 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
13899 enum machine_mode mode0
= insn_data
[d
->icode
].operand
[0].mode
;
13900 enum machine_mode mode1
= insn_data
[d
->icode
].operand
[1].mode
;
13901 enum rtx_code comparison
= d
->comparison
;
13903 if (VECTOR_MODE_P (mode0
))
13904 op0
= safe_vector_operand (op0
, mode0
);
13905 if (VECTOR_MODE_P (mode1
))
13906 op1
= safe_vector_operand (op1
, mode1
);
13908 /* Swap operands if we have a comparison that isn't available in
13910 if (d
->flag
& BUILTIN_DESC_SWAP_OPERANDS
)
13917 target
= gen_reg_rtx (SImode
);
13918 emit_move_insn (target
, const0_rtx
);
13919 target
= gen_rtx_SUBREG (QImode
, target
, 0);
13921 if ((optimize
&& !register_operand (op0
, mode0
))
13922 || !(*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
13923 op0
= copy_to_mode_reg (mode0
, op0
);
13924 if ((optimize
&& !register_operand (op1
, mode1
))
13925 || !(*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
13926 op1
= copy_to_mode_reg (mode1
, op1
);
13928 op2
= gen_rtx_fmt_ee (comparison
, mode0
, op0
, op1
);
13929 pat
= GEN_FCN (d
->icode
) (op0
, op1
);
13933 emit_insn (gen_rtx_SET (VOIDmode
,
13934 gen_rtx_STRICT_LOW_PART (VOIDmode
, target
),
13935 gen_rtx_fmt_ee (comparison
, QImode
,
13939 return SUBREG_REG (target
);
13942 /* Return the integer constant in ARG. Constrain it to be in the range
13943 of the subparts of VEC_TYPE; issue an error if not. */
13946 get_element_number (tree vec_type
, tree arg
)
13948 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
13950 if (!host_integerp (arg
, 1)
13951 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
13953 error ("selector must be an integer constant in the range 0..%i", max
);
13960 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13961 ix86_expand_vector_init. We DO have language-level syntax for this, in
13962 the form of (type){ init-list }. Except that since we can't place emms
13963 instructions from inside the compiler, we can't allow the use of MMX
13964 registers unless the user explicitly asks for it. So we do *not* define
13965 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
13966 we have builtins invoked by mmintrin.h that gives us license to emit
13967 these sorts of instructions. */
13970 ix86_expand_vec_init_builtin (tree type
, tree arglist
, rtx target
)
13972 enum machine_mode tmode
= TYPE_MODE (type
);
13973 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
13974 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
13975 rtvec v
= rtvec_alloc (n_elt
);
13977 gcc_assert (VECTOR_MODE_P (tmode
));
13979 for (i
= 0; i
< n_elt
; ++i
, arglist
= TREE_CHAIN (arglist
))
13981 rtx x
= expand_expr (TREE_VALUE (arglist
), NULL_RTX
, VOIDmode
, 0);
13982 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
13985 gcc_assert (arglist
== NULL
);
13987 if (!target
|| !register_operand (target
, tmode
))
13988 target
= gen_reg_rtx (tmode
);
13990 ix86_expand_vector_init (true, target
, gen_rtx_PARALLEL (tmode
, v
));
13994 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13995 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
13996 had a language-level syntax for referencing vector elements. */
13999 ix86_expand_vec_ext_builtin (tree arglist
, rtx target
)
14001 enum machine_mode tmode
, mode0
;
14006 arg0
= TREE_VALUE (arglist
);
14007 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14009 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14010 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
14012 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
14013 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
14014 gcc_assert (VECTOR_MODE_P (mode0
));
14016 op0
= force_reg (mode0
, op0
);
14018 if (optimize
|| !target
|| !register_operand (target
, tmode
))
14019 target
= gen_reg_rtx (tmode
);
14021 ix86_expand_vector_extract (true, target
, op0
, elt
);
14026 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14027 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
14028 a language-level syntax for referencing vector elements. */
14031 ix86_expand_vec_set_builtin (tree arglist
)
14033 enum machine_mode tmode
, mode1
;
14034 tree arg0
, arg1
, arg2
;
14038 arg0
= TREE_VALUE (arglist
);
14039 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14040 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
14042 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
14043 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
14044 gcc_assert (VECTOR_MODE_P (tmode
));
14046 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, 0);
14047 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, 0);
14048 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
14050 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
14051 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
14053 op0
= force_reg (tmode
, op0
);
14054 op1
= force_reg (mode1
, op1
);
14056 ix86_expand_vector_set (true, op0
, op1
, elt
);
14061 /* Expand an expression EXP that calls a built-in function,
14062 with result going to TARGET if that's convenient
14063 (and in mode MODE if that's convenient).
14064 SUBTARGET may be used as the target for computing one of EXP's operands.
14065 IGNORE is nonzero if the value is to be ignored. */
14068 ix86_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
14069 enum machine_mode mode ATTRIBUTE_UNUSED
,
14070 int ignore ATTRIBUTE_UNUSED
)
14072 const struct builtin_description
*d
;
14074 enum insn_code icode
;
14075 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
14076 tree arglist
= TREE_OPERAND (exp
, 1);
14077 tree arg0
, arg1
, arg2
;
14078 rtx op0
, op1
, op2
, pat
;
14079 enum machine_mode tmode
, mode0
, mode1
, mode2
;
14080 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
14084 case IX86_BUILTIN_EMMS
:
14085 emit_insn (gen_mmx_emms ());
14088 case IX86_BUILTIN_SFENCE
:
14089 emit_insn (gen_sse_sfence ());
14092 case IX86_BUILTIN_MASKMOVQ
:
14093 case IX86_BUILTIN_MASKMOVDQU
:
14094 icode
= (fcode
== IX86_BUILTIN_MASKMOVQ
14095 ? CODE_FOR_mmx_maskmovq
14096 : CODE_FOR_sse2_maskmovdqu
);
14097 /* Note the arg order is different from the operand order. */
14098 arg1
= TREE_VALUE (arglist
);
14099 arg2
= TREE_VALUE (TREE_CHAIN (arglist
));
14100 arg0
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
14101 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14102 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14103 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
14104 mode0
= insn_data
[icode
].operand
[0].mode
;
14105 mode1
= insn_data
[icode
].operand
[1].mode
;
14106 mode2
= insn_data
[icode
].operand
[2].mode
;
14108 op0
= force_reg (Pmode
, op0
);
14109 op0
= gen_rtx_MEM (mode1
, op0
);
14111 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
14112 op0
= copy_to_mode_reg (mode0
, op0
);
14113 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
14114 op1
= copy_to_mode_reg (mode1
, op1
);
14115 if (! (*insn_data
[icode
].operand
[2].predicate
) (op2
, mode2
))
14116 op2
= copy_to_mode_reg (mode2
, op2
);
14117 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14123 case IX86_BUILTIN_SQRTSS
:
14124 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2
, arglist
, target
);
14125 case IX86_BUILTIN_RSQRTSS
:
14126 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2
, arglist
, target
);
14127 case IX86_BUILTIN_RCPSS
:
14128 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2
, arglist
, target
);
14130 case IX86_BUILTIN_LOADUPS
:
14131 return ix86_expand_unop_builtin (CODE_FOR_sse_movups
, arglist
, target
, 1);
14133 case IX86_BUILTIN_STOREUPS
:
14134 return ix86_expand_store_builtin (CODE_FOR_sse_movups
, arglist
);
14136 case IX86_BUILTIN_LOADHPS
:
14137 case IX86_BUILTIN_LOADLPS
:
14138 case IX86_BUILTIN_LOADHPD
:
14139 case IX86_BUILTIN_LOADLPD
:
14140 icode
= (fcode
== IX86_BUILTIN_LOADHPS
? CODE_FOR_sse_loadhps
14141 : fcode
== IX86_BUILTIN_LOADLPS
? CODE_FOR_sse_loadlps
14142 : fcode
== IX86_BUILTIN_LOADHPD
? CODE_FOR_sse2_loadhpd
14143 : CODE_FOR_sse2_loadlpd
);
14144 arg0
= TREE_VALUE (arglist
);
14145 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14146 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14147 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14148 tmode
= insn_data
[icode
].operand
[0].mode
;
14149 mode0
= insn_data
[icode
].operand
[1].mode
;
14150 mode1
= insn_data
[icode
].operand
[2].mode
;
14152 op0
= force_reg (mode0
, op0
);
14153 op1
= gen_rtx_MEM (mode1
, copy_to_mode_reg (Pmode
, op1
));
14154 if (optimize
|| target
== 0
14155 || GET_MODE (target
) != tmode
14156 || !register_operand (target
, tmode
))
14157 target
= gen_reg_rtx (tmode
);
14158 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14164 case IX86_BUILTIN_STOREHPS
:
14165 case IX86_BUILTIN_STORELPS
:
14166 icode
= (fcode
== IX86_BUILTIN_STOREHPS
? CODE_FOR_sse_storehps
14167 : CODE_FOR_sse_storelps
);
14168 arg0
= TREE_VALUE (arglist
);
14169 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14170 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14171 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14172 mode0
= insn_data
[icode
].operand
[0].mode
;
14173 mode1
= insn_data
[icode
].operand
[1].mode
;
14175 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
14176 op1
= force_reg (mode1
, op1
);
14178 pat
= GEN_FCN (icode
) (op0
, op1
);
14184 case IX86_BUILTIN_MOVNTPS
:
14185 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf
, arglist
);
14186 case IX86_BUILTIN_MOVNTQ
:
14187 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi
, arglist
);
14189 case IX86_BUILTIN_LDMXCSR
:
14190 op0
= expand_expr (TREE_VALUE (arglist
), NULL_RTX
, VOIDmode
, 0);
14191 target
= assign_386_stack_local (SImode
, 0);
14192 emit_move_insn (target
, op0
);
14193 emit_insn (gen_sse_ldmxcsr (target
));
14196 case IX86_BUILTIN_STMXCSR
:
14197 target
= assign_386_stack_local (SImode
, 0);
14198 emit_insn (gen_sse_stmxcsr (target
));
14199 return copy_to_mode_reg (SImode
, target
);
14201 case IX86_BUILTIN_SHUFPS
:
14202 case IX86_BUILTIN_SHUFPD
:
14203 icode
= (fcode
== IX86_BUILTIN_SHUFPS
14204 ? CODE_FOR_sse_shufps
14205 : CODE_FOR_sse2_shufpd
);
14206 arg0
= TREE_VALUE (arglist
);
14207 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14208 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
14209 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14210 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14211 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
14212 tmode
= insn_data
[icode
].operand
[0].mode
;
14213 mode0
= insn_data
[icode
].operand
[1].mode
;
14214 mode1
= insn_data
[icode
].operand
[2].mode
;
14215 mode2
= insn_data
[icode
].operand
[3].mode
;
14217 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14218 op0
= copy_to_mode_reg (mode0
, op0
);
14219 if ((optimize
&& !register_operand (op1
, mode1
))
14220 || !(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14221 op1
= copy_to_mode_reg (mode1
, op1
);
14222 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14224 /* @@@ better error message */
14225 error ("mask must be an immediate");
14226 return gen_reg_rtx (tmode
);
14228 if (optimize
|| target
== 0
14229 || GET_MODE (target
) != tmode
14230 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14231 target
= gen_reg_rtx (tmode
);
14232 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
14238 case IX86_BUILTIN_PSHUFW
:
14239 case IX86_BUILTIN_PSHUFD
:
14240 case IX86_BUILTIN_PSHUFHW
:
14241 case IX86_BUILTIN_PSHUFLW
:
14242 icode
= ( fcode
== IX86_BUILTIN_PSHUFHW
? CODE_FOR_sse2_pshufhw
14243 : fcode
== IX86_BUILTIN_PSHUFLW
? CODE_FOR_sse2_pshuflw
14244 : fcode
== IX86_BUILTIN_PSHUFD
? CODE_FOR_sse2_pshufd
14245 : CODE_FOR_mmx_pshufw
);
14246 arg0
= TREE_VALUE (arglist
);
14247 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14248 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14249 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14250 tmode
= insn_data
[icode
].operand
[0].mode
;
14251 mode1
= insn_data
[icode
].operand
[1].mode
;
14252 mode2
= insn_data
[icode
].operand
[2].mode
;
14254 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode1
))
14255 op0
= copy_to_mode_reg (mode1
, op0
);
14256 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode2
))
14258 /* @@@ better error message */
14259 error ("mask must be an immediate");
14263 || GET_MODE (target
) != tmode
14264 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14265 target
= gen_reg_rtx (tmode
);
14266 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14272 case IX86_BUILTIN_PSLLDQI128
:
14273 case IX86_BUILTIN_PSRLDQI128
:
14274 icode
= ( fcode
== IX86_BUILTIN_PSLLDQI128
? CODE_FOR_sse2_ashlti3
14275 : CODE_FOR_sse2_lshrti3
);
14276 arg0
= TREE_VALUE (arglist
);
14277 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14278 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14279 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14280 tmode
= insn_data
[icode
].operand
[0].mode
;
14281 mode1
= insn_data
[icode
].operand
[1].mode
;
14282 mode2
= insn_data
[icode
].operand
[2].mode
;
14284 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode1
))
14286 op0
= copy_to_reg (op0
);
14287 op0
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
), 0);
14289 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode2
))
14291 error ("shift must be an immediate");
14294 target
= gen_reg_rtx (V2DImode
);
14295 pat
= GEN_FCN (icode
) (simplify_gen_subreg (tmode
, target
, V2DImode
, 0), op0
, op1
);
14301 case IX86_BUILTIN_FEMMS
:
14302 emit_insn (gen_mmx_femms ());
14305 case IX86_BUILTIN_PAVGUSB
:
14306 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3
, arglist
, target
);
14308 case IX86_BUILTIN_PF2ID
:
14309 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id
, arglist
, target
, 0);
14311 case IX86_BUILTIN_PFACC
:
14312 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3
, arglist
, target
);
14314 case IX86_BUILTIN_PFADD
:
14315 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3
, arglist
, target
);
14317 case IX86_BUILTIN_PFCMPEQ
:
14318 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3
, arglist
, target
);
14320 case IX86_BUILTIN_PFCMPGE
:
14321 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3
, arglist
, target
);
14323 case IX86_BUILTIN_PFCMPGT
:
14324 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3
, arglist
, target
);
14326 case IX86_BUILTIN_PFMAX
:
14327 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3
, arglist
, target
);
14329 case IX86_BUILTIN_PFMIN
:
14330 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3
, arglist
, target
);
14332 case IX86_BUILTIN_PFMUL
:
14333 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3
, arglist
, target
);
14335 case IX86_BUILTIN_PFRCP
:
14336 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2
, arglist
, target
, 0);
14338 case IX86_BUILTIN_PFRCPIT1
:
14339 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3
, arglist
, target
);
14341 case IX86_BUILTIN_PFRCPIT2
:
14342 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3
, arglist
, target
);
14344 case IX86_BUILTIN_PFRSQIT1
:
14345 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3
, arglist
, target
);
14347 case IX86_BUILTIN_PFRSQRT
:
14348 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2
, arglist
, target
, 0);
14350 case IX86_BUILTIN_PFSUB
:
14351 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3
, arglist
, target
);
14353 case IX86_BUILTIN_PFSUBR
:
14354 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3
, arglist
, target
);
14356 case IX86_BUILTIN_PI2FD
:
14357 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2
, arglist
, target
, 0);
14359 case IX86_BUILTIN_PMULHRW
:
14360 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3
, arglist
, target
);
14362 case IX86_BUILTIN_PF2IW
:
14363 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw
, arglist
, target
, 0);
14365 case IX86_BUILTIN_PFNACC
:
14366 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3
, arglist
, target
);
14368 case IX86_BUILTIN_PFPNACC
:
14369 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3
, arglist
, target
);
14371 case IX86_BUILTIN_PI2FW
:
14372 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw
, arglist
, target
, 0);
14374 case IX86_BUILTIN_PSWAPDSI
:
14375 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2
, arglist
, target
, 0);
14377 case IX86_BUILTIN_PSWAPDSF
:
14378 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2
, arglist
, target
, 0);
14380 case IX86_BUILTIN_SQRTSD
:
14381 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2
, arglist
, target
);
14382 case IX86_BUILTIN_LOADUPD
:
14383 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd
, arglist
, target
, 1);
14384 case IX86_BUILTIN_STOREUPD
:
14385 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd
, arglist
);
14387 case IX86_BUILTIN_MFENCE
:
14388 emit_insn (gen_sse2_mfence ());
14390 case IX86_BUILTIN_LFENCE
:
14391 emit_insn (gen_sse2_lfence ());
14394 case IX86_BUILTIN_CLFLUSH
:
14395 arg0
= TREE_VALUE (arglist
);
14396 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14397 icode
= CODE_FOR_sse2_clflush
;
14398 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, Pmode
))
14399 op0
= copy_to_mode_reg (Pmode
, op0
);
14401 emit_insn (gen_sse2_clflush (op0
));
14404 case IX86_BUILTIN_MOVNTPD
:
14405 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df
, arglist
);
14406 case IX86_BUILTIN_MOVNTDQ
:
14407 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di
, arglist
);
14408 case IX86_BUILTIN_MOVNTI
:
14409 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi
, arglist
);
14411 case IX86_BUILTIN_LOADDQU
:
14412 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu
, arglist
, target
, 1);
14413 case IX86_BUILTIN_STOREDQU
:
14414 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu
, arglist
);
14416 case IX86_BUILTIN_MONITOR
:
14417 arg0
= TREE_VALUE (arglist
);
14418 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14419 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
14420 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14421 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14422 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
14424 op0
= copy_to_mode_reg (SImode
, op0
);
14426 op1
= copy_to_mode_reg (SImode
, op1
);
14428 op2
= copy_to_mode_reg (SImode
, op2
);
14429 emit_insn (gen_sse3_monitor (op0
, op1
, op2
));
14432 case IX86_BUILTIN_MWAIT
:
14433 arg0
= TREE_VALUE (arglist
);
14434 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
14435 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
14436 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
14438 op0
= copy_to_mode_reg (SImode
, op0
);
14440 op1
= copy_to_mode_reg (SImode
, op1
);
14441 emit_insn (gen_sse3_mwait (op0
, op1
));
14444 case IX86_BUILTIN_LDDQU
:
14445 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu
, arglist
,
14448 case IX86_BUILTIN_VEC_INIT_V2SI
:
14449 case IX86_BUILTIN_VEC_INIT_V4HI
:
14450 case IX86_BUILTIN_VEC_INIT_V8QI
:
14451 return ix86_expand_vec_init_builtin (TREE_TYPE (exp
), arglist
, target
);
14453 case IX86_BUILTIN_VEC_EXT_V2DF
:
14454 case IX86_BUILTIN_VEC_EXT_V2DI
:
14455 case IX86_BUILTIN_VEC_EXT_V4SF
:
14456 case IX86_BUILTIN_VEC_EXT_V4SI
:
14457 case IX86_BUILTIN_VEC_EXT_V8HI
:
14458 case IX86_BUILTIN_VEC_EXT_V4HI
:
14459 return ix86_expand_vec_ext_builtin (arglist
, target
);
14461 case IX86_BUILTIN_VEC_SET_V8HI
:
14462 case IX86_BUILTIN_VEC_SET_V4HI
:
14463 return ix86_expand_vec_set_builtin (arglist
);
14469 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
14470 if (d
->code
== fcode
)
14472 /* Compares are treated specially. */
14473 if (d
->icode
== CODE_FOR_sse_maskcmpv4sf3
14474 || d
->icode
== CODE_FOR_sse_vmmaskcmpv4sf3
14475 || d
->icode
== CODE_FOR_sse2_maskcmpv2df3
14476 || d
->icode
== CODE_FOR_sse2_vmmaskcmpv2df3
)
14477 return ix86_expand_sse_compare (d
, arglist
, target
);
14479 return ix86_expand_binop_builtin (d
->icode
, arglist
, target
);
14482 for (i
= 0, d
= bdesc_1arg
; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
14483 if (d
->code
== fcode
)
14484 return ix86_expand_unop_builtin (d
->icode
, arglist
, target
, 0);
14486 for (i
= 0, d
= bdesc_comi
; i
< ARRAY_SIZE (bdesc_comi
); i
++, d
++)
14487 if (d
->code
== fcode
)
14488 return ix86_expand_sse_comi (d
, arglist
, target
);
14490 gcc_unreachable ();
14493 /* Store OPERAND to the memory after reload is completed. This means
14494 that we can't easily use assign_stack_local. */
14496 ix86_force_to_memory (enum machine_mode mode
, rtx operand
)
14499 if (!reload_completed
)
14501 if (TARGET_RED_ZONE
)
14503 result
= gen_rtx_MEM (mode
,
14504 gen_rtx_PLUS (Pmode
,
14506 GEN_INT (-RED_ZONE_SIZE
)));
14507 emit_move_insn (result
, operand
);
14509 else if (!TARGET_RED_ZONE
&& TARGET_64BIT
)
14515 operand
= gen_lowpart (DImode
, operand
);
14519 gen_rtx_SET (VOIDmode
,
14520 gen_rtx_MEM (DImode
,
14521 gen_rtx_PRE_DEC (DImode
,
14522 stack_pointer_rtx
)),
14528 result
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
14537 split_di (&operand
, 1, operands
, operands
+ 1);
14539 gen_rtx_SET (VOIDmode
,
14540 gen_rtx_MEM (SImode
,
14541 gen_rtx_PRE_DEC (Pmode
,
14542 stack_pointer_rtx
)),
14545 gen_rtx_SET (VOIDmode
,
14546 gen_rtx_MEM (SImode
,
14547 gen_rtx_PRE_DEC (Pmode
,
14548 stack_pointer_rtx
)),
14553 /* It is better to store HImodes as SImodes. */
14554 if (!TARGET_PARTIAL_REG_STALL
)
14555 operand
= gen_lowpart (SImode
, operand
);
14559 gen_rtx_SET (VOIDmode
,
14560 gen_rtx_MEM (GET_MODE (operand
),
14561 gen_rtx_PRE_DEC (SImode
,
14562 stack_pointer_rtx
)),
14568 result
= gen_rtx_MEM (mode
, stack_pointer_rtx
);
14573 /* Free operand from the memory. */
14575 ix86_free_from_memory (enum machine_mode mode
)
14577 if (!TARGET_RED_ZONE
)
14581 if (mode
== DImode
|| TARGET_64BIT
)
14583 else if (mode
== HImode
&& TARGET_PARTIAL_REG_STALL
)
14587 /* Use LEA to deallocate stack space. In peephole2 it will be converted
14588 to pop or add instruction if registers are available. */
14589 emit_insn (gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
14590 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
14595 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
14596 QImode must go into class Q_REGS.
14597 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
14598 movdf to do mem-to-mem moves through integer regs. */
14600 ix86_preferred_reload_class (rtx x
, enum reg_class
class)
14602 if (class == NO_REGS
)
14604 if (GET_CODE (x
) == CONST_VECTOR
&& x
!= CONST0_RTX (GET_MODE (x
)))
14606 if (GET_CODE (x
) == CONST_DOUBLE
&& GET_MODE (x
) != VOIDmode
)
14608 /* SSE can't load any constant directly yet. */
14609 if (SSE_CLASS_P (class))
14611 /* Floats can load 0 and 1. */
14612 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x
))
14614 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
14615 if (MAYBE_SSE_CLASS_P (class))
14616 return (reg_class_subset_p (class, GENERAL_REGS
)
14617 ? GENERAL_REGS
: FLOAT_REGS
);
14621 /* General regs can load everything. */
14622 if (reg_class_subset_p (class, GENERAL_REGS
))
14624 /* In case we haven't resolved FLOAT or SSE yet, give up. */
14625 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
14628 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x
))
14630 if (GET_MODE (x
) == QImode
&& ! reg_class_subset_p (class, Q_REGS
))
14635 /* If we are copying between general and FP registers, we need a memory
14636 location. The same is true for SSE and MMX registers.
14638 The macro can't work reliably when one of the CLASSES is class containing
14639 registers from multiple units (SSE, MMX, integer). We avoid this by never
14640 combining those units in single alternative in the machine description.
14641 Ensure that this constraint holds to avoid unexpected surprises.
14643 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
14644 enforce these sanity checks. */
14647 ix86_secondary_memory_needed (enum reg_class class1
, enum reg_class class2
,
14648 enum machine_mode mode
, int strict
)
14650 if (MAYBE_FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class1
)
14651 || MAYBE_FLOAT_CLASS_P (class2
) != FLOAT_CLASS_P (class2
)
14652 || MAYBE_SSE_CLASS_P (class1
) != SSE_CLASS_P (class1
)
14653 || MAYBE_SSE_CLASS_P (class2
) != SSE_CLASS_P (class2
)
14654 || MAYBE_MMX_CLASS_P (class1
) != MMX_CLASS_P (class1
)
14655 || MAYBE_MMX_CLASS_P (class2
) != MMX_CLASS_P (class2
))
14662 if (FLOAT_CLASS_P (class1
) != FLOAT_CLASS_P (class2
))
14665 /* ??? This is a lie. We do have moves between mmx/general, and for
14666 mmx/sse2. But by saying we need secondary memory we discourage the
14667 register allocator from using the mmx registers unless needed. */
14668 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
))
14671 if (SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
14673 /* SSE1 doesn't have any direct moves from other classes. */
14677 /* If the target says that inter-unit moves are more expensive
14678 than moving through memory, then don't generate them. */
14679 if (!TARGET_INTER_UNIT_MOVES
&& !optimize_size
)
14682 /* Between SSE and general, we have moves no larger than word size. */
14683 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
14686 /* ??? For the cost of one register reformat penalty, we could use
14687 the same instructions to move SFmode and DFmode data, but the
14688 relevant move patterns don't support those alternatives. */
14689 if (mode
== SFmode
|| mode
== DFmode
)
14696 /* Return the cost of moving data from a register in class CLASS1 to
14697 one in class CLASS2.
14699 It is not required that the cost always equal 2 when FROM is the same as TO;
14700 on some machines it is expensive to move between registers if they are not
14701 general registers. */
14704 ix86_register_move_cost (enum machine_mode mode
, enum reg_class class1
,
14705 enum reg_class class2
)
14707 /* In case we require secondary memory, compute cost of the store followed
14708 by load. In order to avoid bad register allocation choices, we need
14709 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
14711 if (ix86_secondary_memory_needed (class1
, class2
, mode
, 0))
14715 cost
+= MAX (MEMORY_MOVE_COST (mode
, class1
, 0),
14716 MEMORY_MOVE_COST (mode
, class1
, 1));
14717 cost
+= MAX (MEMORY_MOVE_COST (mode
, class2
, 0),
14718 MEMORY_MOVE_COST (mode
, class2
, 1));
14720 /* In case of copying from general_purpose_register we may emit multiple
14721 stores followed by single load causing memory size mismatch stall.
14722 Count this as arbitrarily high cost of 20. */
14723 if (CLASS_MAX_NREGS (class1
, mode
) > CLASS_MAX_NREGS (class2
, mode
))
14726 /* In the case of FP/MMX moves, the registers actually overlap, and we
14727 have to switch modes in order to treat them differently. */
14728 if ((MMX_CLASS_P (class1
) && MAYBE_FLOAT_CLASS_P (class2
))
14729 || (MMX_CLASS_P (class2
) && MAYBE_FLOAT_CLASS_P (class1
)))
14735 /* Moves between SSE/MMX and integer unit are expensive. */
14736 if (MMX_CLASS_P (class1
) != MMX_CLASS_P (class2
)
14737 || SSE_CLASS_P (class1
) != SSE_CLASS_P (class2
))
14738 return ix86_cost
->mmxsse_to_integer
;
14739 if (MAYBE_FLOAT_CLASS_P (class1
))
14740 return ix86_cost
->fp_move
;
14741 if (MAYBE_SSE_CLASS_P (class1
))
14742 return ix86_cost
->sse_move
;
14743 if (MAYBE_MMX_CLASS_P (class1
))
14744 return ix86_cost
->mmx_move
;
14748 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
14750 ix86_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
14752 /* Flags and only flags can only hold CCmode values. */
14753 if (CC_REGNO_P (regno
))
14754 return GET_MODE_CLASS (mode
) == MODE_CC
;
14755 if (GET_MODE_CLASS (mode
) == MODE_CC
14756 || GET_MODE_CLASS (mode
) == MODE_RANDOM
14757 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
14759 if (FP_REGNO_P (regno
))
14760 return VALID_FP_MODE_P (mode
);
14761 if (SSE_REGNO_P (regno
))
14763 /* We implement the move patterns for all vector modes into and
14764 out of SSE registers, even when no operation instructions
14766 return (VALID_SSE_REG_MODE (mode
)
14767 || VALID_SSE2_REG_MODE (mode
)
14768 || VALID_MMX_REG_MODE (mode
)
14769 || VALID_MMX_REG_MODE_3DNOW (mode
));
14771 if (MMX_REGNO_P (regno
))
14773 /* We implement the move patterns for 3DNOW modes even in MMX mode,
14774 so if the register is available at all, then we can move data of
14775 the given mode into or out of it. */
14776 return (VALID_MMX_REG_MODE (mode
)
14777 || VALID_MMX_REG_MODE_3DNOW (mode
));
14779 /* We handle both integer and floats in the general purpose registers.
14780 In future we should be able to handle vector modes as well. */
14781 if (!VALID_INT_MODE_P (mode
) && !VALID_FP_MODE_P (mode
))
14783 /* Take care for QImode values - they can be in non-QI regs, but then
14784 they do cause partial register stalls. */
14785 if (regno
< 4 || mode
!= QImode
|| TARGET_64BIT
)
14787 return reload_in_progress
|| reload_completed
|| !TARGET_PARTIAL_REG_STALL
;
14790 /* Return the cost of moving data of mode M between a
14791 register and memory. A value of 2 is the default; this cost is
14792 relative to those in `REGISTER_MOVE_COST'.
14794 If moving between registers and memory is more expensive than
14795 between two registers, you should define this macro to express the
14798 Model also increased moving costs of QImode registers in non
14802 ix86_memory_move_cost (enum machine_mode mode
, enum reg_class
class, int in
)
14804 if (FLOAT_CLASS_P (class))
14821 return in
? ix86_cost
->fp_load
[index
] : ix86_cost
->fp_store
[index
];
14823 if (SSE_CLASS_P (class))
14826 switch (GET_MODE_SIZE (mode
))
14840 return in
? ix86_cost
->sse_load
[index
] : ix86_cost
->sse_store
[index
];
14842 if (MMX_CLASS_P (class))
14845 switch (GET_MODE_SIZE (mode
))
14856 return in
? ix86_cost
->mmx_load
[index
] : ix86_cost
->mmx_store
[index
];
14858 switch (GET_MODE_SIZE (mode
))
14862 return (Q_CLASS_P (class) ? ix86_cost
->int_load
[0]
14863 : ix86_cost
->movzbl_load
);
14865 return (Q_CLASS_P (class) ? ix86_cost
->int_store
[0]
14866 : ix86_cost
->int_store
[0] + 4);
14869 return in
? ix86_cost
->int_load
[1] : ix86_cost
->int_store
[1];
14871 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14872 if (mode
== TFmode
)
14874 return ((in
? ix86_cost
->int_load
[2] : ix86_cost
->int_store
[2])
14875 * (((int) GET_MODE_SIZE (mode
)
14876 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
));
14880 /* Compute a (partial) cost for rtx X. Return true if the complete
14881 cost has been computed, and false if subexpressions should be
14882 scanned. In either case, *TOTAL contains the cost result. */
14885 ix86_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
14887 enum machine_mode mode
= GET_MODE (x
);
14895 if (TARGET_64BIT
&& !x86_64_immediate_operand (x
, VOIDmode
))
14897 else if (TARGET_64BIT
&& !x86_64_zext_immediate_operand (x
, VOIDmode
))
14899 else if (flag_pic
&& SYMBOLIC_CONST (x
)
14901 || (!GET_CODE (x
) != LABEL_REF
14902 && (GET_CODE (x
) != SYMBOL_REF
14903 || !SYMBOL_REF_LOCAL_P (x
)))))
14910 if (mode
== VOIDmode
)
14913 switch (standard_80387_constant_p (x
))
14918 default: /* Other constants */
14923 /* Start with (MEM (SYMBOL_REF)), since that's where
14924 it'll probably end up. Add a penalty for size. */
14925 *total
= (COSTS_N_INSNS (1)
14926 + (flag_pic
!= 0 && !TARGET_64BIT
)
14927 + (mode
== SFmode
? 0 : mode
== DFmode
? 1 : 2));
14933 /* The zero extensions is often completely free on x86_64, so make
14934 it as cheap as possible. */
14935 if (TARGET_64BIT
&& mode
== DImode
14936 && GET_MODE (XEXP (x
, 0)) == SImode
)
14938 else if (TARGET_ZERO_EXTEND_WITH_AND
)
14939 *total
= COSTS_N_INSNS (ix86_cost
->add
);
14941 *total
= COSTS_N_INSNS (ix86_cost
->movzx
);
14945 *total
= COSTS_N_INSNS (ix86_cost
->movsx
);
14949 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
14950 && (GET_MODE (XEXP (x
, 0)) != DImode
|| TARGET_64BIT
))
14952 HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
14955 *total
= COSTS_N_INSNS (ix86_cost
->add
);
14958 if ((value
== 2 || value
== 3)
14959 && ix86_cost
->lea
<= ix86_cost
->shift_const
)
14961 *total
= COSTS_N_INSNS (ix86_cost
->lea
);
14971 if (!TARGET_64BIT
&& GET_MODE (XEXP (x
, 0)) == DImode
)
14973 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
14975 if (INTVAL (XEXP (x
, 1)) > 32)
14976 *total
= COSTS_N_INSNS(ix86_cost
->shift_const
+ 2);
14978 *total
= COSTS_N_INSNS(ix86_cost
->shift_const
* 2);
14982 if (GET_CODE (XEXP (x
, 1)) == AND
)
14983 *total
= COSTS_N_INSNS(ix86_cost
->shift_var
* 2);
14985 *total
= COSTS_N_INSNS(ix86_cost
->shift_var
* 6 + 2);
14990 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
14991 *total
= COSTS_N_INSNS (ix86_cost
->shift_const
);
14993 *total
= COSTS_N_INSNS (ix86_cost
->shift_var
);
14998 if (FLOAT_MODE_P (mode
))
15000 *total
= COSTS_N_INSNS (ix86_cost
->fmul
);
15005 rtx op0
= XEXP (x
, 0);
15006 rtx op1
= XEXP (x
, 1);
15008 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
15010 unsigned HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
15011 for (nbits
= 0; value
!= 0; value
&= value
- 1)
15015 /* This is arbitrary. */
15018 /* Compute costs correctly for widening multiplication. */
15019 if ((GET_CODE (op0
) == SIGN_EXTEND
|| GET_CODE (op1
) == ZERO_EXTEND
)
15020 && GET_MODE_SIZE (GET_MODE (XEXP (op0
, 0))) * 2
15021 == GET_MODE_SIZE (mode
))
15023 int is_mulwiden
= 0;
15024 enum machine_mode inner_mode
= GET_MODE (op0
);
15026 if (GET_CODE (op0
) == GET_CODE (op1
))
15027 is_mulwiden
= 1, op1
= XEXP (op1
, 0);
15028 else if (GET_CODE (op1
) == CONST_INT
)
15030 if (GET_CODE (op0
) == SIGN_EXTEND
)
15031 is_mulwiden
= trunc_int_for_mode (INTVAL (op1
), inner_mode
)
15034 is_mulwiden
= !(INTVAL (op1
) & ~GET_MODE_MASK (inner_mode
));
15038 op0
= XEXP (op0
, 0), mode
= GET_MODE (op0
);
15041 *total
= COSTS_N_INSNS (ix86_cost
->mult_init
[MODE_INDEX (mode
)]
15042 + nbits
* ix86_cost
->mult_bit
)
15043 + rtx_cost (op0
, outer_code
) + rtx_cost (op1
, outer_code
);
15052 if (FLOAT_MODE_P (mode
))
15053 *total
= COSTS_N_INSNS (ix86_cost
->fdiv
);
15055 *total
= COSTS_N_INSNS (ix86_cost
->divide
[MODE_INDEX (mode
)]);
15059 if (FLOAT_MODE_P (mode
))
15060 *total
= COSTS_N_INSNS (ix86_cost
->fadd
);
15061 else if (GET_MODE_CLASS (mode
) == MODE_INT
15062 && GET_MODE_BITSIZE (mode
) <= GET_MODE_BITSIZE (Pmode
))
15064 if (GET_CODE (XEXP (x
, 0)) == PLUS
15065 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
15066 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)) == CONST_INT
15067 && CONSTANT_P (XEXP (x
, 1)))
15069 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1));
15070 if (val
== 2 || val
== 4 || val
== 8)
15072 *total
= COSTS_N_INSNS (ix86_cost
->lea
);
15073 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1), outer_code
);
15074 *total
+= rtx_cost (XEXP (XEXP (XEXP (x
, 0), 0), 0),
15076 *total
+= rtx_cost (XEXP (x
, 1), outer_code
);
15080 else if (GET_CODE (XEXP (x
, 0)) == MULT
15081 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
15083 HOST_WIDE_INT val
= INTVAL (XEXP (XEXP (x
, 0), 1));
15084 if (val
== 2 || val
== 4 || val
== 8)
15086 *total
= COSTS_N_INSNS (ix86_cost
->lea
);
15087 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
);
15088 *total
+= rtx_cost (XEXP (x
, 1), outer_code
);
15092 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
15094 *total
= COSTS_N_INSNS (ix86_cost
->lea
);
15095 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
);
15096 *total
+= rtx_cost (XEXP (XEXP (x
, 0), 1), outer_code
);
15097 *total
+= rtx_cost (XEXP (x
, 1), outer_code
);
15104 if (FLOAT_MODE_P (mode
))
15106 *total
= COSTS_N_INSNS (ix86_cost
->fadd
);
15114 if (!TARGET_64BIT
&& mode
== DImode
)
15116 *total
= (COSTS_N_INSNS (ix86_cost
->add
) * 2
15117 + (rtx_cost (XEXP (x
, 0), outer_code
)
15118 << (GET_MODE (XEXP (x
, 0)) != DImode
))
15119 + (rtx_cost (XEXP (x
, 1), outer_code
)
15120 << (GET_MODE (XEXP (x
, 1)) != DImode
)));
15126 if (FLOAT_MODE_P (mode
))
15128 *total
= COSTS_N_INSNS (ix86_cost
->fchs
);
15134 if (!TARGET_64BIT
&& mode
== DImode
)
15135 *total
= COSTS_N_INSNS (ix86_cost
->add
* 2);
15137 *total
= COSTS_N_INSNS (ix86_cost
->add
);
15141 if (GET_CODE (XEXP (x
, 0)) == ZERO_EXTRACT
15142 && XEXP (XEXP (x
, 0), 1) == const1_rtx
15143 && GET_CODE (XEXP (XEXP (x
, 0), 2)) == CONST_INT
15144 && XEXP (x
, 1) == const0_rtx
)
15146 /* This kind of construct is implemented using test[bwl].
15147 Treat it as if we had an AND. */
15148 *total
= (COSTS_N_INSNS (ix86_cost
->add
)
15149 + rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
)
15150 + rtx_cost (const1_rtx
, outer_code
));
15156 if (!TARGET_SSE_MATH
15158 || (mode
== DFmode
&& !TARGET_SSE2
))
15163 if (FLOAT_MODE_P (mode
))
15164 *total
= COSTS_N_INSNS (ix86_cost
->fabs
);
15168 if (FLOAT_MODE_P (mode
))
15169 *total
= COSTS_N_INSNS (ix86_cost
->fsqrt
);
15173 if (XINT (x
, 1) == UNSPEC_TP
)
15184 static int current_machopic_label_num
;
15186 /* Given a symbol name and its associated stub, write out the
15187 definition of the stub. */
15190 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
15192 unsigned int length
;
15193 char *binder_name
, *symbol_name
, lazy_ptr_name
[32];
15194 int label
= ++current_machopic_label_num
;
15196 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
15197 symb
= (*targetm
.strip_name_encoding
) (symb
);
15199 length
= strlen (stub
);
15200 binder_name
= alloca (length
+ 32);
15201 GEN_BINDER_NAME_FOR_STUB (binder_name
, stub
, length
);
15203 length
= strlen (symb
);
15204 symbol_name
= alloca (length
+ 32);
15205 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
15207 sprintf (lazy_ptr_name
, "L%d$lz", label
);
15210 machopic_picsymbol_stub_section ();
15212 machopic_symbol_stub_section ();
15214 fprintf (file
, "%s:\n", stub
);
15215 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
15219 fprintf (file
, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label
, label
);
15220 fprintf (file
, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name
, label
);
15221 fprintf (file
, "\tjmp %%edx\n");
15224 fprintf (file
, "\tjmp *%s\n", lazy_ptr_name
);
15226 fprintf (file
, "%s:\n", binder_name
);
15230 fprintf (file
, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name
, label
);
15231 fprintf (file
, "\tpushl %%eax\n");
15234 fprintf (file
, "\t pushl $%s\n", lazy_ptr_name
);
15236 fprintf (file
, "\tjmp dyld_stub_binding_helper\n");
15238 machopic_lazy_symbol_ptr_section ();
15239 fprintf (file
, "%s:\n", lazy_ptr_name
);
15240 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
15241 fprintf (file
, "\t.long %s\n", binder_name
);
15243 #endif /* TARGET_MACHO */
15245 /* Order the registers for register allocator. */
15248 x86_order_regs_for_local_alloc (void)
15253 /* First allocate the local general purpose registers. */
15254 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
15255 if (GENERAL_REGNO_P (i
) && call_used_regs
[i
])
15256 reg_alloc_order
[pos
++] = i
;
15258 /* Global general purpose registers. */
15259 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
15260 if (GENERAL_REGNO_P (i
) && !call_used_regs
[i
])
15261 reg_alloc_order
[pos
++] = i
;
15263 /* x87 registers come first in case we are doing FP math
15265 if (!TARGET_SSE_MATH
)
15266 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
15267 reg_alloc_order
[pos
++] = i
;
15269 /* SSE registers. */
15270 for (i
= FIRST_SSE_REG
; i
<= LAST_SSE_REG
; i
++)
15271 reg_alloc_order
[pos
++] = i
;
15272 for (i
= FIRST_REX_SSE_REG
; i
<= LAST_REX_SSE_REG
; i
++)
15273 reg_alloc_order
[pos
++] = i
;
15275 /* x87 registers. */
15276 if (TARGET_SSE_MATH
)
15277 for (i
= FIRST_STACK_REG
; i
<= LAST_STACK_REG
; i
++)
15278 reg_alloc_order
[pos
++] = i
;
15280 for (i
= FIRST_MMX_REG
; i
<= LAST_MMX_REG
; i
++)
15281 reg_alloc_order
[pos
++] = i
;
15283 /* Initialize the rest of array as we do not allocate some registers
15285 while (pos
< FIRST_PSEUDO_REGISTER
)
15286 reg_alloc_order
[pos
++] = 0;
15289 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
15290 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
15293 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
15294 struct attribute_spec.handler. */
15296 ix86_handle_struct_attribute (tree
*node
, tree name
,
15297 tree args ATTRIBUTE_UNUSED
,
15298 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
15301 if (DECL_P (*node
))
15303 if (TREE_CODE (*node
) == TYPE_DECL
)
15304 type
= &TREE_TYPE (*node
);
15309 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
15310 || TREE_CODE (*type
) == UNION_TYPE
)))
15312 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name
));
15313 *no_add_attrs
= true;
15316 else if ((is_attribute_p ("ms_struct", name
)
15317 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
15318 || ((is_attribute_p ("gcc_struct", name
)
15319 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
15321 warning ("%qs incompatible attribute ignored",
15322 IDENTIFIER_POINTER (name
));
15323 *no_add_attrs
= true;
15330 ix86_ms_bitfield_layout_p (tree record_type
)
15332 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
15333 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
15334 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
15337 /* Returns an expression indicating where the this parameter is
15338 located on entry to the FUNCTION. */
15341 x86_this_parameter (tree function
)
15343 tree type
= TREE_TYPE (function
);
15347 int n
= aggregate_value_p (TREE_TYPE (type
), type
) != 0;
15348 return gen_rtx_REG (DImode
, x86_64_int_parameter_registers
[n
]);
15351 if (ix86_function_regparm (type
, function
) > 0)
15355 parm
= TYPE_ARG_TYPES (type
);
15356 /* Figure out whether or not the function has a variable number of
15358 for (; parm
; parm
= TREE_CHAIN (parm
))
15359 if (TREE_VALUE (parm
) == void_type_node
)
15361 /* If not, the this parameter is in the first argument. */
15365 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type
)))
15367 return gen_rtx_REG (SImode
, regno
);
15371 if (aggregate_value_p (TREE_TYPE (type
), type
))
15372 return gen_rtx_MEM (SImode
, plus_constant (stack_pointer_rtx
, 8));
15374 return gen_rtx_MEM (SImode
, plus_constant (stack_pointer_rtx
, 4));
15377 /* Determine whether x86_output_mi_thunk can succeed. */
15380 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED
,
15381 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
15382 HOST_WIDE_INT vcall_offset
, tree function
)
15384 /* 64-bit can handle anything. */
15388 /* For 32-bit, everything's fine if we have one free register. */
15389 if (ix86_function_regparm (TREE_TYPE (function
), function
) < 3)
15392 /* Need a free register for vcall_offset. */
15396 /* Need a free register for GOT references. */
15397 if (flag_pic
&& !(*targetm
.binds_local_p
) (function
))
15400 /* Otherwise ok. */
15404 /* Output the assembler code for a thunk function. THUNK_DECL is the
15405 declaration for the thunk function itself, FUNCTION is the decl for
15406 the target function. DELTA is an immediate constant offset to be
15407 added to THIS. If VCALL_OFFSET is nonzero, the word at
15408 *(*this + vcall_offset) should be added to THIS. */
15411 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED
,
15412 tree thunk ATTRIBUTE_UNUSED
, HOST_WIDE_INT delta
,
15413 HOST_WIDE_INT vcall_offset
, tree function
)
15416 rtx
this = x86_this_parameter (function
);
15419 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
15420 pull it in now and let DELTA benefit. */
15423 else if (vcall_offset
)
15425 /* Put the this parameter into %eax. */
15427 xops
[1] = this_reg
= gen_rtx_REG (Pmode
, 0);
15428 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops
);
15431 this_reg
= NULL_RTX
;
15433 /* Adjust the this parameter by a fixed constant. */
15436 xops
[0] = GEN_INT (delta
);
15437 xops
[1] = this_reg
? this_reg
: this;
15440 if (!x86_64_general_operand (xops
[0], DImode
))
15442 tmp
= gen_rtx_REG (DImode
, FIRST_REX_INT_REG
+ 2 /* R10 */);
15444 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops
);
15448 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops
);
15451 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops
);
15454 /* Adjust the this parameter by a value stored in the vtable. */
15458 tmp
= gen_rtx_REG (DImode
, FIRST_REX_INT_REG
+ 2 /* R10 */);
15461 int tmp_regno
= 2 /* ECX */;
15462 if (lookup_attribute ("fastcall",
15463 TYPE_ATTRIBUTES (TREE_TYPE (function
))))
15464 tmp_regno
= 0 /* EAX */;
15465 tmp
= gen_rtx_REG (SImode
, tmp_regno
);
15468 xops
[0] = gen_rtx_MEM (Pmode
, this_reg
);
15471 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops
);
15473 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops
);
15475 /* Adjust the this parameter. */
15476 xops
[0] = gen_rtx_MEM (Pmode
, plus_constant (tmp
, vcall_offset
));
15477 if (TARGET_64BIT
&& !memory_operand (xops
[0], Pmode
))
15479 rtx tmp2
= gen_rtx_REG (DImode
, FIRST_REX_INT_REG
+ 3 /* R11 */);
15480 xops
[0] = GEN_INT (vcall_offset
);
15482 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops
);
15483 xops
[0] = gen_rtx_MEM (Pmode
, gen_rtx_PLUS (Pmode
, tmp
, tmp2
));
15485 xops
[1] = this_reg
;
15487 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops
);
15489 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops
);
15492 /* If necessary, drop THIS back to its stack slot. */
15493 if (this_reg
&& this_reg
!= this)
15495 xops
[0] = this_reg
;
15497 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops
);
15500 xops
[0] = XEXP (DECL_RTL (function
), 0);
15503 if (!flag_pic
|| (*targetm
.binds_local_p
) (function
))
15504 output_asm_insn ("jmp\t%P0", xops
);
15507 tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, xops
[0]), UNSPEC_GOTPCREL
);
15508 tmp
= gen_rtx_CONST (Pmode
, tmp
);
15509 tmp
= gen_rtx_MEM (QImode
, tmp
);
15511 output_asm_insn ("jmp\t%A0", xops
);
15516 if (!flag_pic
|| (*targetm
.binds_local_p
) (function
))
15517 output_asm_insn ("jmp\t%P0", xops
);
15522 rtx sym_ref
= XEXP (DECL_RTL (function
), 0);
15523 tmp
= (gen_rtx_SYMBOL_REF
15525 machopic_indirection_name (sym_ref
, /*stub_p=*/true)));
15526 tmp
= gen_rtx_MEM (QImode
, tmp
);
15528 output_asm_insn ("jmp\t%0", xops
);
15531 #endif /* TARGET_MACHO */
15533 tmp
= gen_rtx_REG (SImode
, 2 /* ECX */);
15534 output_set_got (tmp
);
15537 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops
);
15538 output_asm_insn ("jmp\t{*}%1", xops
);
15544 x86_file_start (void)
15546 default_file_start ();
15547 if (X86_FILE_START_VERSION_DIRECTIVE
)
15548 fputs ("\t.version\t\"01.01\"\n", asm_out_file
);
15549 if (X86_FILE_START_FLTUSED
)
15550 fputs ("\t.global\t__fltused\n", asm_out_file
);
15551 if (ix86_asm_dialect
== ASM_INTEL
)
15552 fputs ("\t.intel_syntax\n", asm_out_file
);
15556 x86_field_alignment (tree field
, int computed
)
15558 enum machine_mode mode
;
15559 tree type
= TREE_TYPE (field
);
15561 if (TARGET_64BIT
|| TARGET_ALIGN_DOUBLE
)
15563 mode
= TYPE_MODE (TREE_CODE (type
) == ARRAY_TYPE
15564 ? get_inner_array_type (type
) : type
);
15565 if (mode
== DFmode
|| mode
== DCmode
15566 || GET_MODE_CLASS (mode
) == MODE_INT
15567 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
)
15568 return MIN (32, computed
);
15572 /* Output assembler code to FILE to increment profiler label # LABELNO
15573 for profiling a function entry. */
15575 x86_function_profiler (FILE *file
, int labelno ATTRIBUTE_UNUSED
)
15580 #ifndef NO_PROFILE_COUNTERS
15581 fprintf (file
, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX
, labelno
);
15583 fprintf (file
, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME
);
15587 #ifndef NO_PROFILE_COUNTERS
15588 fprintf (file
, "\tmovq\t$%sP%d,%%r11\n", LPREFIX
, labelno
);
15590 fprintf (file
, "\tcall\t%s\n", MCOUNT_NAME
);
15594 #ifndef NO_PROFILE_COUNTERS
15595 fprintf (file
, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
15596 LPREFIX
, labelno
, PROFILE_COUNT_REGISTER
);
15598 fprintf (file
, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME
);
15602 #ifndef NO_PROFILE_COUNTERS
15603 fprintf (file
, "\tmovl\t$%sP%d,%%%s\n", LPREFIX
, labelno
,
15604 PROFILE_COUNT_REGISTER
);
15606 fprintf (file
, "\tcall\t%s\n", MCOUNT_NAME
);
15610 /* We don't have exact information about the insn sizes, but we may assume
15611 quite safely that we are informed about all 1 byte insns and memory
15612 address sizes. This is enough to eliminate unnecessary padding in
15616 min_insn_size (rtx insn
)
15620 if (!INSN_P (insn
) || !active_insn_p (insn
))
15623 /* Discard alignments we've emit and jump instructions. */
15624 if (GET_CODE (PATTERN (insn
)) == UNSPEC_VOLATILE
15625 && XINT (PATTERN (insn
), 1) == UNSPECV_ALIGN
)
15627 if (GET_CODE (insn
) == JUMP_INSN
15628 && (GET_CODE (PATTERN (insn
)) == ADDR_VEC
15629 || GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
))
15632 /* Important case - calls are always 5 bytes.
15633 It is common to have many calls in the row. */
15634 if (GET_CODE (insn
) == CALL_INSN
15635 && symbolic_reference_mentioned_p (PATTERN (insn
))
15636 && !SIBLING_CALL_P (insn
))
15638 if (get_attr_length (insn
) <= 1)
15641 /* For normal instructions we may rely on the sizes of addresses
15642 and the presence of symbol to require 4 bytes of encoding.
15643 This is not the case for jumps where references are PC relative. */
15644 if (GET_CODE (insn
) != JUMP_INSN
)
15646 l
= get_attr_length_address (insn
);
15647 if (l
< 4 && symbolic_reference_mentioned_p (PATTERN (insn
)))
15656 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
15660 ix86_avoid_jump_misspredicts (void)
15662 rtx insn
, start
= get_insns ();
15663 int nbytes
= 0, njumps
= 0;
15666 /* Look for all minimal intervals of instructions containing 4 jumps.
15667 The intervals are bounded by START and INSN. NBYTES is the total
15668 size of instructions in the interval including INSN and not including
15669 START. When the NBYTES is smaller than 16 bytes, it is possible
15670 that the end of START and INSN ends up in the same 16byte page.
15672 The smallest offset in the page INSN can start is the case where START
15673 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
15674 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
15676 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
15679 nbytes
+= min_insn_size (insn
);
15681 fprintf(dump_file
, "Insn %i estimated to %i bytes\n",
15682 INSN_UID (insn
), min_insn_size (insn
));
15683 if ((GET_CODE (insn
) == JUMP_INSN
15684 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
15685 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
)
15686 || GET_CODE (insn
) == CALL_INSN
)
15693 start
= NEXT_INSN (start
);
15694 if ((GET_CODE (start
) == JUMP_INSN
15695 && GET_CODE (PATTERN (start
)) != ADDR_VEC
15696 && GET_CODE (PATTERN (start
)) != ADDR_DIFF_VEC
)
15697 || GET_CODE (start
) == CALL_INSN
)
15698 njumps
--, isjump
= 1;
15701 nbytes
-= min_insn_size (start
);
15706 fprintf (dump_file
, "Interval %i to %i has %i bytes\n",
15707 INSN_UID (start
), INSN_UID (insn
), nbytes
);
15709 if (njumps
== 3 && isjump
&& nbytes
< 16)
15711 int padsize
= 15 - nbytes
+ min_insn_size (insn
);
15714 fprintf (dump_file
, "Padding insn %i by %i bytes!\n",
15715 INSN_UID (insn
), padsize
);
15716 emit_insn_before (gen_align (GEN_INT (padsize
)), insn
);
15721 /* AMD Athlon works faster
15722 when RET is not destination of conditional jump or directly preceded
15723 by other jump instruction. We avoid the penalty by inserting NOP just
15724 before the RET instructions in such cases. */
15726 ix86_pad_returns (void)
15731 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR
->preds
)
15733 basic_block bb
= e
->src
;
15734 rtx ret
= BB_END (bb
);
15736 bool replace
= false;
15738 if (GET_CODE (ret
) != JUMP_INSN
|| GET_CODE (PATTERN (ret
)) != RETURN
15739 || !maybe_hot_bb_p (bb
))
15741 for (prev
= PREV_INSN (ret
); prev
; prev
= PREV_INSN (prev
))
15742 if (active_insn_p (prev
) || GET_CODE (prev
) == CODE_LABEL
)
15744 if (prev
&& GET_CODE (prev
) == CODE_LABEL
)
15749 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
15750 if (EDGE_FREQUENCY (e
) && e
->src
->index
>= 0
15751 && !(e
->flags
& EDGE_FALLTHRU
))
15756 prev
= prev_active_insn (ret
);
15758 && ((GET_CODE (prev
) == JUMP_INSN
&& any_condjump_p (prev
))
15759 || GET_CODE (prev
) == CALL_INSN
))
15761 /* Empty functions get branch mispredict even when the jump destination
15762 is not visible to us. */
15763 if (!prev
&& cfun
->function_frequency
> FUNCTION_FREQUENCY_UNLIKELY_EXECUTED
)
15768 emit_insn_before (gen_return_internal_long (), ret
);
15774 /* Implement machine specific optimizations. We implement padding of returns
15775 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
15779 if (TARGET_ATHLON_K8
&& optimize
&& !optimize_size
)
15780 ix86_pad_returns ();
15781 if (TARGET_FOUR_JUMP_LIMIT
&& optimize
&& !optimize_size
)
15782 ix86_avoid_jump_misspredicts ();
15785 /* Return nonzero when QImode register that must be represented via REX prefix
15788 x86_extended_QIreg_mentioned_p (rtx insn
)
15791 extract_insn_cached (insn
);
15792 for (i
= 0; i
< recog_data
.n_operands
; i
++)
15793 if (REG_P (recog_data
.operand
[i
])
15794 && REGNO (recog_data
.operand
[i
]) >= 4)
15799 /* Return nonzero when P points to register encoded via REX prefix.
15800 Called via for_each_rtx. */
15802 extended_reg_mentioned_1 (rtx
*p
, void *data ATTRIBUTE_UNUSED
)
15804 unsigned int regno
;
15807 regno
= REGNO (*p
);
15808 return REX_INT_REGNO_P (regno
) || REX_SSE_REGNO_P (regno
);
15811 /* Return true when INSN mentions register that must be encoded using REX
15814 x86_extended_reg_mentioned_p (rtx insn
)
15816 return for_each_rtx (&PATTERN (insn
), extended_reg_mentioned_1
, NULL
);
15819 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15820 optabs would emit if we didn't have TFmode patterns. */
15823 x86_emit_floatuns (rtx operands
[2])
15825 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
15826 enum machine_mode mode
, inmode
;
15828 inmode
= GET_MODE (operands
[1]);
15829 if (inmode
!= SImode
15830 && inmode
!= DImode
)
15834 in
= force_reg (inmode
, operands
[1]);
15835 mode
= GET_MODE (out
);
15836 neglab
= gen_label_rtx ();
15837 donelab
= gen_label_rtx ();
15838 i1
= gen_reg_rtx (Pmode
);
15839 f0
= gen_reg_rtx (mode
);
15841 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, Pmode
, 0, neglab
);
15843 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
15844 emit_jump_insn (gen_jump (donelab
));
15847 emit_label (neglab
);
15849 i0
= expand_simple_binop (Pmode
, LSHIFTRT
, in
, const1_rtx
, NULL
, 1, OPTAB_DIRECT
);
15850 i1
= expand_simple_binop (Pmode
, AND
, in
, const1_rtx
, NULL
, 1, OPTAB_DIRECT
);
15851 i0
= expand_simple_binop (Pmode
, IOR
, i0
, i1
, i0
, 1, OPTAB_DIRECT
);
15852 expand_float (f0
, i0
, 0);
15853 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
15855 emit_label (donelab
);
15858 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15859 with all elements equal to VAR. Return true if successful. */
15862 ix86_expand_vector_init_duplicate (bool mmx_ok
, enum machine_mode mode
,
15863 rtx target
, rtx val
)
15865 enum machine_mode smode
, wsmode
, wvmode
;
15872 if (!mmx_ok
&& !TARGET_SSE
)
15880 val
= force_reg (GET_MODE_INNER (mode
), val
);
15881 x
= gen_rtx_VEC_DUPLICATE (mode
, val
);
15882 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
15888 val
= gen_lowpart (SImode
, val
);
15889 x
= gen_rtx_TRUNCATE (HImode
, val
);
15890 x
= gen_rtx_VEC_DUPLICATE (mode
, x
);
15891 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
15912 /* Replicate the value once into the next wider mode and recurse. */
15913 val
= convert_modes (wsmode
, smode
, val
, true);
15914 x
= expand_simple_binop (wsmode
, ASHIFT
, val
,
15915 GEN_INT (GET_MODE_BITSIZE (smode
)),
15916 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
15917 val
= expand_simple_binop (wsmode
, IOR
, val
, x
, x
, 1, OPTAB_LIB_WIDEN
);
15919 x
= gen_reg_rtx (wvmode
);
15920 if (!ix86_expand_vector_init_duplicate (mmx_ok
, wvmode
, x
, val
))
15921 gcc_unreachable ();
15922 emit_move_insn (target
, gen_lowpart (mode
, x
));
15930 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15931 whose low element is VAR, and other elements are zero. Return true
15935 ix86_expand_vector_init_low_nonzero (bool mmx_ok
, enum machine_mode mode
,
15936 rtx target
, rtx var
)
15938 enum machine_mode vsimode
;
15945 if (!mmx_ok
&& !TARGET_SSE
)
15951 var
= force_reg (GET_MODE_INNER (mode
), var
);
15952 x
= gen_rtx_VEC_CONCAT (mode
, var
, CONST0_RTX (GET_MODE_INNER (mode
)));
15953 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
15958 var
= force_reg (GET_MODE_INNER (mode
), var
);
15959 x
= gen_rtx_VEC_DUPLICATE (mode
, var
);
15960 x
= gen_rtx_VEC_MERGE (mode
, x
, CONST0_RTX (mode
), const1_rtx
);
15961 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
15966 vsimode
= V4SImode
;
15972 vsimode
= V2SImode
;
15975 /* Zero extend the variable element to SImode and recurse. */
15976 var
= convert_modes (SImode
, GET_MODE_INNER (mode
), var
, true);
15978 x
= gen_reg_rtx (vsimode
);
15979 if (!ix86_expand_vector_init_low_nonzero (mmx_ok
, vsimode
, x
, var
))
15980 gcc_unreachable ();
15982 emit_move_insn (target
, gen_lowpart (mode
, x
));
15990 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15991 consisting of the values in VALS. It is known that all elements
15992 except ONE_VAR are constants. Return true if successful. */
15995 ix86_expand_vector_init_one_var (bool mmx_ok
, enum machine_mode mode
,
15996 rtx target
, rtx vals
, int one_var
)
15998 rtx var
= XVECEXP (vals
, 0, one_var
);
15999 enum machine_mode wmode
;
16002 XVECEXP (vals
, 0, one_var
) = CONST0_RTX (GET_MODE_INNER (mode
));
16003 const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
16011 /* For the two element vectors, it's just as easy to use
16012 the general case. */
16028 /* There's no way to set one QImode entry easily. Combine
16029 the variable value with its adjacent constant value, and
16030 promote to an HImode set. */
16031 x
= XVECEXP (vals
, 0, one_var
^ 1);
16034 var
= convert_modes (HImode
, QImode
, var
, true);
16035 var
= expand_simple_binop (HImode
, ASHIFT
, var
, GEN_INT (8),
16036 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16037 x
= GEN_INT (INTVAL (x
) & 0xff);
16041 var
= convert_modes (HImode
, QImode
, var
, true);
16042 x
= gen_int_mode (INTVAL (x
) << 8, HImode
);
16044 if (x
!= const0_rtx
)
16045 var
= expand_simple_binop (HImode
, IOR
, var
, x
, var
,
16046 1, OPTAB_LIB_WIDEN
);
16048 x
= gen_reg_rtx (wmode
);
16049 emit_move_insn (x
, gen_lowpart (wmode
, const_vec
));
16050 ix86_expand_vector_set (mmx_ok
, x
, var
, one_var
>> 1);
16052 emit_move_insn (target
, gen_lowpart (mode
, x
));
16059 emit_move_insn (target
, const_vec
);
16060 ix86_expand_vector_set (mmx_ok
, target
, var
, one_var
);
16064 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
16065 all values variable, and none identical. */
16068 ix86_expand_vector_init_general (bool mmx_ok
, enum machine_mode mode
,
16069 rtx target
, rtx vals
)
16071 enum machine_mode half_mode
= GET_MODE_INNER (mode
);
16072 rtx op0
= NULL
, op1
= NULL
;
16073 bool use_vec_concat
= false;
16079 if (!mmx_ok
&& !TARGET_SSE
)
16085 /* For the two element vectors, we always implement VEC_CONCAT. */
16086 op0
= XVECEXP (vals
, 0, 0);
16087 op1
= XVECEXP (vals
, 0, 1);
16088 use_vec_concat
= true;
16092 half_mode
= V2SFmode
;
16095 half_mode
= V2SImode
;
16101 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
16102 Recurse to load the two halves. */
16104 op0
= gen_reg_rtx (half_mode
);
16105 v
= gen_rtvec (2, XVECEXP (vals
, 0, 0), XVECEXP (vals
, 0, 1));
16106 ix86_expand_vector_init (false, op0
, gen_rtx_PARALLEL (half_mode
, v
));
16108 op1
= gen_reg_rtx (half_mode
);
16109 v
= gen_rtvec (2, XVECEXP (vals
, 0, 2), XVECEXP (vals
, 0, 3));
16110 ix86_expand_vector_init (false, op1
, gen_rtx_PARALLEL (half_mode
, v
));
16112 use_vec_concat
= true;
16123 gcc_unreachable ();
16126 if (use_vec_concat
)
16128 if (!register_operand (op0
, half_mode
))
16129 op0
= force_reg (half_mode
, op0
);
16130 if (!register_operand (op1
, half_mode
))
16131 op1
= force_reg (half_mode
, op1
);
16133 emit_insn (gen_rtx_SET (VOIDmode
, target
,
16134 gen_rtx_VEC_CONCAT (mode
, op0
, op1
)));
16138 int i
, j
, n_elts
, n_words
, n_elt_per_word
;
16139 enum machine_mode inner_mode
;
16140 rtx words
[4], shift
;
16142 inner_mode
= GET_MODE_INNER (mode
);
16143 n_elts
= GET_MODE_NUNITS (mode
);
16144 n_words
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
16145 n_elt_per_word
= n_elts
/ n_words
;
16146 shift
= GEN_INT (GET_MODE_BITSIZE (inner_mode
));
16148 for (i
= 0; i
< n_words
; ++i
)
16150 rtx word
= NULL_RTX
;
16152 for (j
= 0; j
< n_elt_per_word
; ++j
)
16154 rtx elt
= XVECEXP (vals
, 0, (i
+1)*n_elt_per_word
- j
- 1);
16155 elt
= convert_modes (word_mode
, inner_mode
, elt
, true);
16161 word
= expand_simple_binop (word_mode
, ASHIFT
, word
, shift
,
16162 word
, 1, OPTAB_LIB_WIDEN
);
16163 word
= expand_simple_binop (word_mode
, IOR
, word
, elt
,
16164 word
, 1, OPTAB_LIB_WIDEN
);
16172 emit_move_insn (target
, gen_lowpart (mode
, words
[0]));
16173 else if (n_words
== 2)
16175 rtx tmp
= gen_reg_rtx (mode
);
16176 emit_insn (gen_rtx_CLOBBER (VOIDmode
, tmp
));
16177 emit_move_insn (gen_lowpart (word_mode
, tmp
), words
[0]);
16178 emit_move_insn (gen_highpart (word_mode
, tmp
), words
[1]);
16179 emit_move_insn (target
, tmp
);
16181 else if (n_words
== 4)
16183 rtx tmp
= gen_reg_rtx (V4SImode
);
16184 vals
= gen_rtx_PARALLEL (V4SImode
, gen_rtvec_v (4, words
));
16185 ix86_expand_vector_init_general (false, V4SImode
, tmp
, vals
);
16186 emit_move_insn (target
, gen_lowpart (mode
, tmp
));
16189 gcc_unreachable ();
16193 /* Initialize vector TARGET via VALS. Suppress the use of MMX
16194 instructions unless MMX_OK is true. */
16197 ix86_expand_vector_init (bool mmx_ok
, rtx target
, rtx vals
)
16199 enum machine_mode mode
= GET_MODE (target
);
16200 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
16201 int n_elts
= GET_MODE_NUNITS (mode
);
16202 int n_var
= 0, one_var
= -1;
16203 bool all_same
= true, all_const_zero
= true;
16207 for (i
= 0; i
< n_elts
; ++i
)
16209 x
= XVECEXP (vals
, 0, i
);
16210 if (!CONSTANT_P (x
))
16211 n_var
++, one_var
= i
;
16212 else if (x
!= CONST0_RTX (inner_mode
))
16213 all_const_zero
= false;
16214 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
16218 /* Constants are best loaded from the constant pool. */
16221 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
16225 /* If all values are identical, broadcast the value. */
16227 && ix86_expand_vector_init_duplicate (mmx_ok
, mode
, target
,
16228 XVECEXP (vals
, 0, 0)))
16231 /* Values where only one field is non-constant are best loaded from
16232 the pool and overwritten via move later. */
16235 if (all_const_zero
&& one_var
== 0
16236 && ix86_expand_vector_init_low_nonzero (mmx_ok
, mode
, target
,
16237 XVECEXP (vals
, 0, 0)))
16240 if (ix86_expand_vector_init_one_var (mmx_ok
, mode
, target
, vals
, one_var
))
16244 ix86_expand_vector_init_general (mmx_ok
, mode
, target
, vals
);
16248 ix86_expand_vector_set (bool mmx_ok
, rtx target
, rtx val
, int elt
)
16250 enum machine_mode mode
= GET_MODE (target
);
16251 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
16252 bool use_vec_merge
= false;
16268 /* For the two element vectors, we implement a VEC_CONCAT with
16269 the extraction of the other element. */
16271 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (1 - elt
)));
16272 tmp
= gen_rtx_VEC_SELECT (inner_mode
, target
, tmp
);
16275 op0
= val
, op1
= tmp
;
16277 op0
= tmp
, op1
= val
;
16279 tmp
= gen_rtx_VEC_CONCAT (mode
, op0
, op1
);
16280 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
16288 use_vec_merge
= true;
16292 /* tmp = op0 = A B C D */
16293 tmp
= copy_to_reg (target
);
16295 /* op0 = C C D D */
16296 emit_insn (gen_sse_unpcklps (target
, target
, target
));
16298 /* op0 = C C D X */
16299 ix86_expand_vector_set (false, target
, val
, 0);
16301 /* op0 = A B X D */
16302 emit_insn (gen_sse_shufps_1 (target
, target
, tmp
,
16303 GEN_INT (1), GEN_INT (0),
16304 GEN_INT (2+4), GEN_INT (3+4)));
16308 tmp
= copy_to_reg (target
);
16309 ix86_expand_vector_set (false, target
, val
, 0);
16310 emit_insn (gen_sse_shufps_1 (target
, target
, tmp
,
16311 GEN_INT (0), GEN_INT (1),
16312 GEN_INT (0+4), GEN_INT (3+4)));
16316 tmp
= copy_to_reg (target
);
16317 ix86_expand_vector_set (false, target
, val
, 0);
16318 emit_insn (gen_sse_shufps_1 (target
, target
, tmp
,
16319 GEN_INT (0), GEN_INT (1),
16320 GEN_INT (2+4), GEN_INT (0+4)));
16324 gcc_unreachable ();
16329 /* Element 0 handled by vec_merge below. */
16332 use_vec_merge
= true;
16338 /* With SSE2, use integer shuffles to swap element 0 and ELT,
16339 store into element 0, then shuffle them back. */
16343 order
[0] = GEN_INT (elt
);
16344 order
[1] = const1_rtx
;
16345 order
[2] = const2_rtx
;
16346 order
[3] = GEN_INT (3);
16347 order
[elt
] = const0_rtx
;
16349 emit_insn (gen_sse2_pshufd_1 (target
, target
, order
[0],
16350 order
[1], order
[2], order
[3]));
16352 ix86_expand_vector_set (false, target
, val
, 0);
16354 emit_insn (gen_sse2_pshufd_1 (target
, target
, order
[0],
16355 order
[1], order
[2], order
[3]));
16359 /* For SSE1, we have to reuse the V4SF code. */
16360 ix86_expand_vector_set (false, gen_lowpart (V4SFmode
, target
),
16361 gen_lowpart (SFmode
, val
), elt
);
16366 use_vec_merge
= TARGET_SSE2
;
16369 use_vec_merge
= mmx_ok
&& (TARGET_SSE
|| TARGET_3DNOW_A
);
16380 tmp
= gen_rtx_VEC_DUPLICATE (mode
, val
);
16381 tmp
= gen_rtx_VEC_MERGE (mode
, tmp
, target
, GEN_INT (1 << elt
));
16382 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
16386 rtx mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), false);
16388 emit_move_insn (mem
, target
);
16390 tmp
= adjust_address (mem
, inner_mode
, elt
*GET_MODE_SIZE (inner_mode
));
16391 emit_move_insn (tmp
, val
);
16393 emit_move_insn (target
, mem
);
16398 ix86_expand_vector_extract (bool mmx_ok
, rtx target
, rtx vec
, int elt
)
16400 enum machine_mode mode
= GET_MODE (vec
);
16401 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
16402 bool use_vec_extr
= false;
16415 use_vec_extr
= true;
16427 tmp
= gen_reg_rtx (mode
);
16428 emit_insn (gen_sse_shufps_1 (tmp
, vec
, vec
,
16429 GEN_INT (elt
), GEN_INT (elt
),
16430 GEN_INT (elt
+4), GEN_INT (elt
+4)));
16434 tmp
= gen_reg_rtx (mode
);
16435 emit_insn (gen_sse_unpckhps (tmp
, vec
, vec
));
16439 gcc_unreachable ();
16442 use_vec_extr
= true;
16457 tmp
= gen_reg_rtx (mode
);
16458 emit_insn (gen_sse2_pshufd_1 (tmp
, vec
,
16459 GEN_INT (elt
), GEN_INT (elt
),
16460 GEN_INT (elt
), GEN_INT (elt
)));
16464 tmp
= gen_reg_rtx (mode
);
16465 emit_insn (gen_sse2_punpckhdq (tmp
, vec
, vec
));
16469 gcc_unreachable ();
16472 use_vec_extr
= true;
16477 /* For SSE1, we have to reuse the V4SF code. */
16478 ix86_expand_vector_extract (false, gen_lowpart (SFmode
, target
),
16479 gen_lowpart (V4SFmode
, vec
), elt
);
16485 use_vec_extr
= TARGET_SSE2
;
16488 use_vec_extr
= mmx_ok
&& (TARGET_SSE
|| TARGET_3DNOW_A
);
16493 /* ??? Could extract the appropriate HImode element and shift. */
16500 tmp
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (1, GEN_INT (elt
)));
16501 tmp
= gen_rtx_VEC_SELECT (inner_mode
, vec
, tmp
);
16503 /* Let the rtl optimizers know about the zero extension performed. */
16504 if (inner_mode
== HImode
)
16506 tmp
= gen_rtx_ZERO_EXTEND (SImode
, tmp
);
16507 target
= gen_lowpart (SImode
, target
);
16510 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
16514 rtx mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), false);
16516 emit_move_insn (mem
, vec
);
16518 tmp
= adjust_address (mem
, inner_mode
, elt
*GET_MODE_SIZE (inner_mode
));
16519 emit_move_insn (target
, tmp
);
16523 /* Implements target hook vector_mode_supported_p. */
16525 ix86_vector_mode_supported_p (enum machine_mode mode
)
16527 if (TARGET_SSE
&& VALID_SSE_REG_MODE (mode
))
16529 if (TARGET_SSE2
&& VALID_SSE2_REG_MODE (mode
))
16531 if (TARGET_MMX
&& VALID_MMX_REG_MODE (mode
))
16533 if (TARGET_3DNOW
&& VALID_MMX_REG_MODE_3DNOW (mode
))
16538 /* Worker function for TARGET_MD_ASM_CLOBBERS.
16540 We do this in the new i386 backend to maintain source compatibility
16541 with the old cc0-based compiler. */
16544 ix86_md_asm_clobbers (tree clobbers
)
16546 clobbers
= tree_cons (NULL_TREE
, build_string (5, "flags"),
16548 clobbers
= tree_cons (NULL_TREE
, build_string (4, "fpsr"),
16550 clobbers
= tree_cons (NULL_TREE
, build_string (7, "dirflag"),
16555 /* Worker function for REVERSE_CONDITION. */
16558 ix86_reverse_condition (enum rtx_code code
, enum machine_mode mode
)
16560 return (mode
!= CCFPmode
&& mode
!= CCFPUmode
16561 ? reverse_condition (code
)
16562 : reverse_condition_maybe_unordered (code
));
16565 /* Output code to perform an x87 FP register move, from OPERANDS[1]
16569 output_387_reg_move (rtx insn
, rtx
*operands
)
16571 if (REG_P (operands
[1])
16572 && find_regno_note (insn
, REG_DEAD
, REGNO (operands
[1])))
16574 if (REGNO (operands
[0]) == FIRST_STACK_REG
16575 && TARGET_USE_FFREEP
)
16576 return "ffreep\t%y0";
16577 return "fstp\t%y0";
16579 if (STACK_TOP_P (operands
[0]))
16580 return "fld%z1\t%y1";
16584 /* Output code to perform a conditional jump to LABEL, if C2 flag in
16585 FP status register is set. */
16588 ix86_emit_fp_unordered_jump (rtx label
)
16590 rtx reg
= gen_reg_rtx (HImode
);
16593 emit_insn (gen_x86_fnstsw_1 (reg
));
16595 if (TARGET_USE_SAHF
)
16597 emit_insn (gen_x86_sahf_1 (reg
));
16599 temp
= gen_rtx_REG (CCmode
, FLAGS_REG
);
16600 temp
= gen_rtx_UNORDERED (VOIDmode
, temp
, const0_rtx
);
16604 emit_insn (gen_testqi_ext_ccno_0 (reg
, GEN_INT (0x04)));
16606 temp
= gen_rtx_REG (CCNOmode
, FLAGS_REG
);
16607 temp
= gen_rtx_NE (VOIDmode
, temp
, const0_rtx
);
16610 temp
= gen_rtx_IF_THEN_ELSE (VOIDmode
, temp
,
16611 gen_rtx_LABEL_REF (VOIDmode
, label
),
16613 temp
= gen_rtx_SET (VOIDmode
, pc_rtx
, temp
);
16614 emit_jump_insn (temp
);
16617 /* Output code to perform a log1p XFmode calculation. */
16619 void ix86_emit_i387_log1p (rtx op0
, rtx op1
)
16621 rtx label1
= gen_label_rtx ();
16622 rtx label2
= gen_label_rtx ();
16624 rtx tmp
= gen_reg_rtx (XFmode
);
16625 rtx tmp2
= gen_reg_rtx (XFmode
);
16627 emit_insn (gen_absxf2 (tmp
, op1
));
16628 emit_insn (gen_cmpxf (tmp
,
16629 CONST_DOUBLE_FROM_REAL_VALUE (
16630 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode
),
16632 emit_jump_insn (gen_bge (label1
));
16634 emit_move_insn (tmp2
, standard_80387_constant_rtx (4)); /* fldln2 */
16635 emit_insn (gen_fyl2xp1_xf3 (op0
, tmp2
, op1
));
16636 emit_jump (label2
);
16638 emit_label (label1
);
16639 emit_move_insn (tmp
, CONST1_RTX (XFmode
));
16640 emit_insn (gen_addxf3 (tmp
, op1
, tmp
));
16641 emit_move_insn (tmp2
, standard_80387_constant_rtx (4)); /* fldln2 */
16642 emit_insn (gen_fyl2x_xf3 (op0
, tmp2
, tmp
));
16644 emit_label (label2
);
16647 /* Solaris named-section hook. Parameters are as for
16648 named_section_real. */
16651 i386_solaris_elf_named_section (const char *name
, unsigned int flags
,
16654 /* With Binutils 2.15, the "@unwind" marker must be specified on
16655 every occurrence of the ".eh_frame" section, not just the first
16658 && strcmp (name
, ".eh_frame") == 0)
16660 fprintf (asm_out_file
, "\t.section\t%s,\"%s\",@unwind\n", name
,
16661 flags
& SECTION_WRITE
? "aw" : "a");
16664 default_elf_asm_named_section (name
, flags
, decl
);
16667 #include "gt-i386.h"