]> gcc.gnu.org Git - gcc.git/blob - gcc/config/i386/i386.c
i386.c (ix86_expand_vector_init_one_var): Fix typo in QImode expansion to ix86_expand...
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
55
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
63
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
107 };
108
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
152 };
153
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
196 };
197
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
240 };
241
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
284 };
285
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
328 };
329
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 5, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
372 };
373
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 5, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
416 };
417
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
460 };
461
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
504 };
505
506 const struct processor_costs *ix86_cost = &pentium_cost;
507
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
519
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_3dnow_a = m_ATHLON_K8;
529 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints = 0;
535 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
536 const int x86_partial_reg_stall = m_PPRO;
537 const int x86_use_loop = m_K6;
538 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
539 const int x86_use_mov0 = m_K6;
540 const int x86_use_cltd = ~(m_PENT | m_K6);
541 const int x86_read_modify_write = ~m_PENT;
542 const int x86_read_modify = ~(m_PENT | m_PPRO);
543 const int x86_split_long_moves = m_PPRO;
544 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
545 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
546 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
547 const int x86_qimode_math = ~(0);
548 const int x86_promote_qi_regs = 0;
549 const int x86_himode_math = ~(m_PPRO);
550 const int x86_promote_hi_regs = m_PPRO;
551 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
552 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
553 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
556 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
557 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
559 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
560 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
562 const int x86_shift1 = ~m_486;
563 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
564 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
565 /* Set for machines where the type and dependencies are resolved on SSE
566 register parts instead of whole registers, so we may maintain just
567 lower part of scalar values in proper format leaving the upper part
568 undefined. */
569 const int x86_sse_split_regs = m_ATHLON_K8;
570 const int x86_sse_typeless_stores = m_ATHLON_K8;
571 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
572 const int x86_use_ffreep = m_ATHLON_K8;
573 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
574 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
575 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
576 /* Some CPU cores are not able to predict more than 4 branch instructions in
577 the 16 byte window. */
578 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
579 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
580 const int x86_use_bt = m_ATHLON_K8;
581
582 /* In case the average insn count for single function invocation is
583 lower than this constant, emit fast (but longer) prologue and
584 epilogue code. */
585 #define FAST_PROLOGUE_INSN_COUNT 20
586
587 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
588 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
589 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
590 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
591
592 /* Array of the smallest class containing reg number REGNO, indexed by
593 REGNO. Used by REGNO_REG_CLASS in i386.h. */
594
595 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
596 {
597 /* ax, dx, cx, bx */
598 AREG, DREG, CREG, BREG,
599 /* si, di, bp, sp */
600 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
601 /* FP registers */
602 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
603 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
604 /* arg pointer */
605 NON_Q_REGS,
606 /* flags, fpsr, dirflag, frame */
607 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
608 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
609 SSE_REGS, SSE_REGS,
610 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
611 MMX_REGS, MMX_REGS,
612 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
613 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
614 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
615 SSE_REGS, SSE_REGS,
616 };
617
618 /* The "default" register map used in 32bit mode. */
619
620 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
621 {
622 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
623 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
624 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
625 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
626 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
629 };
630
631 static int const x86_64_int_parameter_registers[6] =
632 {
633 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
634 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
635 };
636
637 static int const x86_64_int_return_registers[4] =
638 {
639 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
640 };
641
642 /* The "default" register map used in 64bit mode. */
643 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
644 {
645 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
646 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
647 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
648 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
649 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
650 8,9,10,11,12,13,14,15, /* extended integer registers */
651 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
652 };
653
654 /* Define the register numbers to be used in Dwarf debugging information.
655 The SVR4 reference port C compiler uses the following register numbers
656 in its Dwarf output code:
657 0 for %eax (gcc regno = 0)
658 1 for %ecx (gcc regno = 2)
659 2 for %edx (gcc regno = 1)
660 3 for %ebx (gcc regno = 3)
661 4 for %esp (gcc regno = 7)
662 5 for %ebp (gcc regno = 6)
663 6 for %esi (gcc regno = 4)
664 7 for %edi (gcc regno = 5)
665 The following three DWARF register numbers are never generated by
666 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
667 believes these numbers have these meanings.
668 8 for %eip (no gcc equivalent)
669 9 for %eflags (gcc regno = 17)
670 10 for %trapno (no gcc equivalent)
671 It is not at all clear how we should number the FP stack registers
672 for the x86 architecture. If the version of SDB on x86/svr4 were
673 a bit less brain dead with respect to floating-point then we would
674 have a precedent to follow with respect to DWARF register numbers
675 for x86 FP registers, but the SDB on x86/svr4 is so completely
676 broken with respect to FP registers that it is hardly worth thinking
677 of it as something to strive for compatibility with.
678 The version of x86/svr4 SDB I have at the moment does (partially)
679 seem to believe that DWARF register number 11 is associated with
680 the x86 register %st(0), but that's about all. Higher DWARF
681 register numbers don't seem to be associated with anything in
682 particular, and even for DWARF regno 11, SDB only seems to under-
683 stand that it should say that a variable lives in %st(0) (when
684 asked via an `=' command) if we said it was in DWARF regno 11,
685 but SDB still prints garbage when asked for the value of the
686 variable in question (via a `/' command).
687 (Also note that the labels SDB prints for various FP stack regs
688 when doing an `x' command are all wrong.)
689 Note that these problems generally don't affect the native SVR4
690 C compiler because it doesn't allow the use of -O with -g and
691 because when it is *not* optimizing, it allocates a memory
692 location for each floating-point variable, and the memory
693 location is what gets described in the DWARF AT_location
694 attribute for the variable in question.
695 Regardless of the severe mental illness of the x86/svr4 SDB, we
696 do something sensible here and we use the following DWARF
697 register numbers. Note that these are all stack-top-relative
698 numbers.
699 11 for %st(0) (gcc regno = 8)
700 12 for %st(1) (gcc regno = 9)
701 13 for %st(2) (gcc regno = 10)
702 14 for %st(3) (gcc regno = 11)
703 15 for %st(4) (gcc regno = 12)
704 16 for %st(5) (gcc regno = 13)
705 17 for %st(6) (gcc regno = 14)
706 18 for %st(7) (gcc regno = 15)
707 */
708 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
709 {
710 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
711 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
712 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
713 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
714 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
715 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
717 };
718
719 /* Test and compare insns in i386.md store the information needed to
720 generate branch and scc insns here. */
721
722 rtx ix86_compare_op0 = NULL_RTX;
723 rtx ix86_compare_op1 = NULL_RTX;
724
725 #define MAX_386_STACK_LOCALS 3
726 /* Size of the register save area. */
727 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
728
729 /* Define the structure for the machine field in struct function. */
730
731 struct stack_local_entry GTY(())
732 {
733 unsigned short mode;
734 unsigned short n;
735 rtx rtl;
736 struct stack_local_entry *next;
737 };
738
739 /* Structure describing stack frame layout.
740 Stack grows downward:
741
742 [arguments]
743 <- ARG_POINTER
744 saved pc
745
746 saved frame pointer if frame_pointer_needed
747 <- HARD_FRAME_POINTER
748 [saved regs]
749
750 [padding1] \
751 )
752 [va_arg registers] (
753 > to_allocate <- FRAME_POINTER
754 [frame] (
755 )
756 [padding2] /
757 */
758 struct ix86_frame
759 {
760 int nregs;
761 int padding1;
762 int va_arg_size;
763 HOST_WIDE_INT frame;
764 int padding2;
765 int outgoing_arguments_size;
766 int red_zone_size;
767
768 HOST_WIDE_INT to_allocate;
769 /* The offsets relative to ARG_POINTER. */
770 HOST_WIDE_INT frame_pointer_offset;
771 HOST_WIDE_INT hard_frame_pointer_offset;
772 HOST_WIDE_INT stack_pointer_offset;
773
774 /* When save_regs_using_mov is set, emit prologue using
775 move instead of push instructions. */
776 bool save_regs_using_mov;
777 };
778
779 /* Used to enable/disable debugging features. */
780 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
781 /* Code model option as passed by user. */
782 const char *ix86_cmodel_string;
783 /* Parsed value. */
784 enum cmodel ix86_cmodel;
785 /* Asm dialect. */
786 const char *ix86_asm_string;
787 enum asm_dialect ix86_asm_dialect = ASM_ATT;
788 /* TLS dialext. */
789 const char *ix86_tls_dialect_string;
790 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
791
792 /* Which unit we are generating floating point math for. */
793 enum fpmath_unit ix86_fpmath;
794
795 /* Which cpu are we scheduling for. */
796 enum processor_type ix86_tune;
797 /* Which instruction set architecture to use. */
798 enum processor_type ix86_arch;
799
800 /* Strings to hold which cpu and instruction set architecture to use. */
801 const char *ix86_tune_string; /* for -mtune=<xxx> */
802 const char *ix86_arch_string; /* for -march=<xxx> */
803 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
804
805 /* # of registers to use to pass arguments. */
806 const char *ix86_regparm_string;
807
808 /* true if sse prefetch instruction is not NOOP. */
809 int x86_prefetch_sse;
810
811 /* ix86_regparm_string as a number */
812 int ix86_regparm;
813
814 /* Alignment to use for loops and jumps: */
815
816 /* Power of two alignment for loops. */
817 const char *ix86_align_loops_string;
818
819 /* Power of two alignment for non-loop jumps. */
820 const char *ix86_align_jumps_string;
821
822 /* Power of two alignment for stack boundary in bytes. */
823 const char *ix86_preferred_stack_boundary_string;
824
825 /* Preferred alignment for stack boundary in bits. */
826 unsigned int ix86_preferred_stack_boundary;
827
828 /* Values 1-5: see jump.c */
829 int ix86_branch_cost;
830 const char *ix86_branch_cost_string;
831
832 /* Power of two alignment for functions. */
833 const char *ix86_align_funcs_string;
834
835 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
836 char internal_label_prefix[16];
837 int internal_label_prefix_len;
838 \f
839 static void output_pic_addr_const (FILE *, rtx, int);
840 static void put_condition_code (enum rtx_code, enum machine_mode,
841 int, int, FILE *);
842 static const char *get_some_local_dynamic_name (void);
843 static int get_some_local_dynamic_name_1 (rtx *, void *);
844 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
845 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
846 rtx *);
847 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
848 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
849 enum machine_mode);
850 static rtx get_thread_pointer (int);
851 static rtx legitimize_tls_address (rtx, enum tls_model, int);
852 static void get_pc_thunk_name (char [32], unsigned int);
853 static rtx gen_push (rtx);
854 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
855 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
856 static struct machine_function * ix86_init_machine_status (void);
857 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
858 static int ix86_nsaved_regs (void);
859 static void ix86_emit_save_regs (void);
860 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
861 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
862 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
863 static HOST_WIDE_INT ix86_GOT_alias_set (void);
864 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
865 static rtx ix86_expand_aligntest (rtx, int);
866 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
867 static int ix86_issue_rate (void);
868 static int ix86_adjust_cost (rtx, rtx, rtx, int);
869 static int ia32_multipass_dfa_lookahead (void);
870 static void ix86_init_mmx_sse_builtins (void);
871 static rtx x86_this_parameter (tree);
872 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
873 HOST_WIDE_INT, tree);
874 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
875 static void x86_file_start (void);
876 static void ix86_reorg (void);
877 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
878 static tree ix86_build_builtin_va_list (void);
879 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
880 tree, int *, int);
881 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
882 static bool ix86_vector_mode_supported_p (enum machine_mode);
883
884 static int ix86_address_cost (rtx);
885 static bool ix86_cannot_force_const_mem (rtx);
886 static rtx ix86_delegitimize_address (rtx);
887
888 struct builtin_description;
889 static rtx ix86_expand_sse_comi (const struct builtin_description *,
890 tree, rtx);
891 static rtx ix86_expand_sse_compare (const struct builtin_description *,
892 tree, rtx);
893 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
894 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
895 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
896 static rtx ix86_expand_store_builtin (enum insn_code, tree);
897 static rtx safe_vector_operand (rtx, enum machine_mode);
898 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
899 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
900 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
901 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
902 static int ix86_fp_comparison_cost (enum rtx_code code);
903 static unsigned int ix86_select_alt_pic_regnum (void);
904 static int ix86_save_reg (unsigned int, int);
905 static void ix86_compute_frame_layout (struct ix86_frame *);
906 static int ix86_comp_type_attributes (tree, tree);
907 static int ix86_function_regparm (tree, tree);
908 const struct attribute_spec ix86_attribute_table[];
909 static bool ix86_function_ok_for_sibcall (tree, tree);
910 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
911 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
912 static int ix86_value_regno (enum machine_mode);
913 static bool contains_128bit_aligned_vector_p (tree);
914 static rtx ix86_struct_value_rtx (tree, int);
915 static bool ix86_ms_bitfield_layout_p (tree);
916 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
917 static int extended_reg_mentioned_1 (rtx *, void *);
918 static bool ix86_rtx_costs (rtx, int, int, int *);
919 static int min_insn_size (rtx);
920 static tree ix86_md_asm_clobbers (tree clobbers);
921 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
922 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
923 tree, bool);
924 static void ix86_init_builtins (void);
925 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
926
927 /* This function is only used on Solaris. */
928 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
929 ATTRIBUTE_UNUSED;
930
931 /* Register class used for passing given 64bit part of the argument.
932 These represent classes as documented by the PS ABI, with the exception
933 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
934 use SF or DFmode move instead of DImode to avoid reformatting penalties.
935
936 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
937 whenever possible (upper half does contain padding).
938 */
939 enum x86_64_reg_class
940 {
941 X86_64_NO_CLASS,
942 X86_64_INTEGER_CLASS,
943 X86_64_INTEGERSI_CLASS,
944 X86_64_SSE_CLASS,
945 X86_64_SSESF_CLASS,
946 X86_64_SSEDF_CLASS,
947 X86_64_SSEUP_CLASS,
948 X86_64_X87_CLASS,
949 X86_64_X87UP_CLASS,
950 X86_64_COMPLEX_X87_CLASS,
951 X86_64_MEMORY_CLASS
952 };
953 static const char * const x86_64_reg_class_name[] = {
954 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
955 "sseup", "x87", "x87up", "cplx87", "no"
956 };
957
958 #define MAX_CLASSES 4
959
960 /* Table of constants used by fldpi, fldln2, etc.... */
961 static REAL_VALUE_TYPE ext_80387_constants_table [5];
962 static bool ext_80387_constants_init = 0;
963 static void init_ext_80387_constants (void);
964 \f
965 /* Initialize the GCC target structure. */
966 #undef TARGET_ATTRIBUTE_TABLE
967 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
968 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
969 # undef TARGET_MERGE_DECL_ATTRIBUTES
970 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
971 #endif
972
973 #undef TARGET_COMP_TYPE_ATTRIBUTES
974 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
975
976 #undef TARGET_INIT_BUILTINS
977 #define TARGET_INIT_BUILTINS ix86_init_builtins
978 #undef TARGET_EXPAND_BUILTIN
979 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
980
981 #undef TARGET_ASM_FUNCTION_EPILOGUE
982 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
983
984 #undef TARGET_ASM_OPEN_PAREN
985 #define TARGET_ASM_OPEN_PAREN ""
986 #undef TARGET_ASM_CLOSE_PAREN
987 #define TARGET_ASM_CLOSE_PAREN ""
988
989 #undef TARGET_ASM_ALIGNED_HI_OP
990 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
991 #undef TARGET_ASM_ALIGNED_SI_OP
992 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
993 #ifdef ASM_QUAD
994 #undef TARGET_ASM_ALIGNED_DI_OP
995 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
996 #endif
997
998 #undef TARGET_ASM_UNALIGNED_HI_OP
999 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1000 #undef TARGET_ASM_UNALIGNED_SI_OP
1001 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1002 #undef TARGET_ASM_UNALIGNED_DI_OP
1003 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1004
1005 #undef TARGET_SCHED_ADJUST_COST
1006 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1007 #undef TARGET_SCHED_ISSUE_RATE
1008 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1009 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1010 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1011 ia32_multipass_dfa_lookahead
1012
1013 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1014 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1015
1016 #ifdef HAVE_AS_TLS
1017 #undef TARGET_HAVE_TLS
1018 #define TARGET_HAVE_TLS true
1019 #endif
1020 #undef TARGET_CANNOT_FORCE_CONST_MEM
1021 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1022
1023 #undef TARGET_DELEGITIMIZE_ADDRESS
1024 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1025
1026 #undef TARGET_MS_BITFIELD_LAYOUT_P
1027 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1028
1029 #undef TARGET_ASM_OUTPUT_MI_THUNK
1030 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1031 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1032 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1033
1034 #undef TARGET_ASM_FILE_START
1035 #define TARGET_ASM_FILE_START x86_file_start
1036
1037 #undef TARGET_RTX_COSTS
1038 #define TARGET_RTX_COSTS ix86_rtx_costs
1039 #undef TARGET_ADDRESS_COST
1040 #define TARGET_ADDRESS_COST ix86_address_cost
1041
1042 #undef TARGET_FIXED_CONDITION_CODE_REGS
1043 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1044 #undef TARGET_CC_MODES_COMPATIBLE
1045 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1046
1047 #undef TARGET_MACHINE_DEPENDENT_REORG
1048 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1049
1050 #undef TARGET_BUILD_BUILTIN_VA_LIST
1051 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1052
1053 #undef TARGET_MD_ASM_CLOBBERS
1054 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1055
1056 #undef TARGET_PROMOTE_PROTOTYPES
1057 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1058 #undef TARGET_STRUCT_VALUE_RTX
1059 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1060 #undef TARGET_SETUP_INCOMING_VARARGS
1061 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1062 #undef TARGET_MUST_PASS_IN_STACK
1063 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1064 #undef TARGET_PASS_BY_REFERENCE
1065 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1066
1067 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1068 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1069
1070 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1071 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1072
1073 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1074 #undef TARGET_INSERT_ATTRIBUTES
1075 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1076 #endif
1077
1078 struct gcc_target targetm = TARGET_INITIALIZER;
1079
1080 \f
1081 /* The svr4 ABI for the i386 says that records and unions are returned
1082 in memory. */
1083 #ifndef DEFAULT_PCC_STRUCT_RETURN
1084 #define DEFAULT_PCC_STRUCT_RETURN 1
1085 #endif
1086
1087 /* Sometimes certain combinations of command options do not make
1088 sense on a particular target machine. You can define a macro
1089 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1090 defined, is executed once just after all the command options have
1091 been parsed.
1092
1093 Don't use this macro to turn on various extra optimizations for
1094 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1095
1096 void
1097 override_options (void)
1098 {
1099 int i;
1100 int ix86_tune_defaulted = 0;
1101
1102 /* Comes from final.c -- no real reason to change it. */
1103 #define MAX_CODE_ALIGN 16
1104
1105 static struct ptt
1106 {
1107 const struct processor_costs *cost; /* Processor costs */
1108 const int target_enable; /* Target flags to enable. */
1109 const int target_disable; /* Target flags to disable. */
1110 const int align_loop; /* Default alignments. */
1111 const int align_loop_max_skip;
1112 const int align_jump;
1113 const int align_jump_max_skip;
1114 const int align_func;
1115 }
1116 const processor_target_table[PROCESSOR_max] =
1117 {
1118 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1119 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1120 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1121 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1122 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1123 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1124 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1125 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1126 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1127 };
1128
1129 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1130 static struct pta
1131 {
1132 const char *const name; /* processor name or nickname. */
1133 const enum processor_type processor;
1134 const enum pta_flags
1135 {
1136 PTA_SSE = 1,
1137 PTA_SSE2 = 2,
1138 PTA_SSE3 = 4,
1139 PTA_MMX = 8,
1140 PTA_PREFETCH_SSE = 16,
1141 PTA_3DNOW = 32,
1142 PTA_3DNOW_A = 64,
1143 PTA_64BIT = 128
1144 } flags;
1145 }
1146 const processor_alias_table[] =
1147 {
1148 {"i386", PROCESSOR_I386, 0},
1149 {"i486", PROCESSOR_I486, 0},
1150 {"i586", PROCESSOR_PENTIUM, 0},
1151 {"pentium", PROCESSOR_PENTIUM, 0},
1152 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1153 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1154 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1155 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1156 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1157 {"i686", PROCESSOR_PENTIUMPRO, 0},
1158 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1159 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1160 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1161 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1162 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1163 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1164 | PTA_MMX | PTA_PREFETCH_SSE},
1165 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1166 | PTA_MMX | PTA_PREFETCH_SSE},
1167 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1168 | PTA_MMX | PTA_PREFETCH_SSE},
1169 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1170 | PTA_MMX | PTA_PREFETCH_SSE},
1171 {"k6", PROCESSOR_K6, PTA_MMX},
1172 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1173 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1174 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1175 | PTA_3DNOW_A},
1176 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1177 | PTA_3DNOW | PTA_3DNOW_A},
1178 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1179 | PTA_3DNOW_A | PTA_SSE},
1180 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1181 | PTA_3DNOW_A | PTA_SSE},
1182 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1183 | PTA_3DNOW_A | PTA_SSE},
1184 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1185 | PTA_SSE | PTA_SSE2 },
1186 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1187 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1188 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1189 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1190 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1191 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1192 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1193 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1194 };
1195
1196 int const pta_size = ARRAY_SIZE (processor_alias_table);
1197
1198 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1199 SUBTARGET_OVERRIDE_OPTIONS;
1200 #endif
1201
1202 /* Set the default values for switches whose default depends on TARGET_64BIT
1203 in case they weren't overwritten by command line options. */
1204 if (TARGET_64BIT)
1205 {
1206 if (flag_omit_frame_pointer == 2)
1207 flag_omit_frame_pointer = 1;
1208 if (flag_asynchronous_unwind_tables == 2)
1209 flag_asynchronous_unwind_tables = 1;
1210 if (flag_pcc_struct_return == 2)
1211 flag_pcc_struct_return = 0;
1212 }
1213 else
1214 {
1215 if (flag_omit_frame_pointer == 2)
1216 flag_omit_frame_pointer = 0;
1217 if (flag_asynchronous_unwind_tables == 2)
1218 flag_asynchronous_unwind_tables = 0;
1219 if (flag_pcc_struct_return == 2)
1220 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1221 }
1222
1223 if (!ix86_tune_string && ix86_arch_string)
1224 ix86_tune_string = ix86_arch_string;
1225 if (!ix86_tune_string)
1226 {
1227 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1228 ix86_tune_defaulted = 1;
1229 }
1230 if (!ix86_arch_string)
1231 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1232
1233 if (ix86_cmodel_string != 0)
1234 {
1235 if (!strcmp (ix86_cmodel_string, "small"))
1236 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1237 else if (flag_pic)
1238 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1239 else if (!strcmp (ix86_cmodel_string, "32"))
1240 ix86_cmodel = CM_32;
1241 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1242 ix86_cmodel = CM_KERNEL;
1243 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1244 ix86_cmodel = CM_MEDIUM;
1245 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1246 ix86_cmodel = CM_LARGE;
1247 else
1248 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1249 }
1250 else
1251 {
1252 ix86_cmodel = CM_32;
1253 if (TARGET_64BIT)
1254 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1255 }
1256 if (ix86_asm_string != 0)
1257 {
1258 if (!strcmp (ix86_asm_string, "intel"))
1259 ix86_asm_dialect = ASM_INTEL;
1260 else if (!strcmp (ix86_asm_string, "att"))
1261 ix86_asm_dialect = ASM_ATT;
1262 else
1263 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1264 }
1265 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1266 error ("code model %qs not supported in the %s bit mode",
1267 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1268 if (ix86_cmodel == CM_LARGE)
1269 sorry ("code model %<large%> not supported yet");
1270 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1271 sorry ("%i-bit mode not compiled in",
1272 (target_flags & MASK_64BIT) ? 64 : 32);
1273
1274 for (i = 0; i < pta_size; i++)
1275 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1276 {
1277 ix86_arch = processor_alias_table[i].processor;
1278 /* Default cpu tuning to the architecture. */
1279 ix86_tune = ix86_arch;
1280 if (processor_alias_table[i].flags & PTA_MMX
1281 && !(target_flags_explicit & MASK_MMX))
1282 target_flags |= MASK_MMX;
1283 if (processor_alias_table[i].flags & PTA_3DNOW
1284 && !(target_flags_explicit & MASK_3DNOW))
1285 target_flags |= MASK_3DNOW;
1286 if (processor_alias_table[i].flags & PTA_3DNOW_A
1287 && !(target_flags_explicit & MASK_3DNOW_A))
1288 target_flags |= MASK_3DNOW_A;
1289 if (processor_alias_table[i].flags & PTA_SSE
1290 && !(target_flags_explicit & MASK_SSE))
1291 target_flags |= MASK_SSE;
1292 if (processor_alias_table[i].flags & PTA_SSE2
1293 && !(target_flags_explicit & MASK_SSE2))
1294 target_flags |= MASK_SSE2;
1295 if (processor_alias_table[i].flags & PTA_SSE3
1296 && !(target_flags_explicit & MASK_SSE3))
1297 target_flags |= MASK_SSE3;
1298 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1299 x86_prefetch_sse = true;
1300 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1301 error ("CPU you selected does not support x86-64 "
1302 "instruction set");
1303 break;
1304 }
1305
1306 if (i == pta_size)
1307 error ("bad value (%s) for -march= switch", ix86_arch_string);
1308
1309 for (i = 0; i < pta_size; i++)
1310 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1311 {
1312 ix86_tune = processor_alias_table[i].processor;
1313 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1314 {
1315 if (ix86_tune_defaulted)
1316 {
1317 ix86_tune_string = "x86-64";
1318 for (i = 0; i < pta_size; i++)
1319 if (! strcmp (ix86_tune_string,
1320 processor_alias_table[i].name))
1321 break;
1322 ix86_tune = processor_alias_table[i].processor;
1323 }
1324 else
1325 error ("CPU you selected does not support x86-64 "
1326 "instruction set");
1327 }
1328 /* Intel CPUs have always interpreted SSE prefetch instructions as
1329 NOPs; so, we can enable SSE prefetch instructions even when
1330 -mtune (rather than -march) points us to a processor that has them.
1331 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1332 higher processors. */
1333 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1334 x86_prefetch_sse = true;
1335 break;
1336 }
1337 if (i == pta_size)
1338 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1339
1340 if (optimize_size)
1341 ix86_cost = &size_cost;
1342 else
1343 ix86_cost = processor_target_table[ix86_tune].cost;
1344 target_flags |= processor_target_table[ix86_tune].target_enable;
1345 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1346
1347 /* Arrange to set up i386_stack_locals for all functions. */
1348 init_machine_status = ix86_init_machine_status;
1349
1350 /* Validate -mregparm= value. */
1351 if (ix86_regparm_string)
1352 {
1353 i = atoi (ix86_regparm_string);
1354 if (i < 0 || i > REGPARM_MAX)
1355 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1356 else
1357 ix86_regparm = i;
1358 }
1359 else
1360 if (TARGET_64BIT)
1361 ix86_regparm = REGPARM_MAX;
1362
1363 /* If the user has provided any of the -malign-* options,
1364 warn and use that value only if -falign-* is not set.
1365 Remove this code in GCC 3.2 or later. */
1366 if (ix86_align_loops_string)
1367 {
1368 warning ("-malign-loops is obsolete, use -falign-loops");
1369 if (align_loops == 0)
1370 {
1371 i = atoi (ix86_align_loops_string);
1372 if (i < 0 || i > MAX_CODE_ALIGN)
1373 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1374 else
1375 align_loops = 1 << i;
1376 }
1377 }
1378
1379 if (ix86_align_jumps_string)
1380 {
1381 warning ("-malign-jumps is obsolete, use -falign-jumps");
1382 if (align_jumps == 0)
1383 {
1384 i = atoi (ix86_align_jumps_string);
1385 if (i < 0 || i > MAX_CODE_ALIGN)
1386 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1387 else
1388 align_jumps = 1 << i;
1389 }
1390 }
1391
1392 if (ix86_align_funcs_string)
1393 {
1394 warning ("-malign-functions is obsolete, use -falign-functions");
1395 if (align_functions == 0)
1396 {
1397 i = atoi (ix86_align_funcs_string);
1398 if (i < 0 || i > MAX_CODE_ALIGN)
1399 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1400 else
1401 align_functions = 1 << i;
1402 }
1403 }
1404
1405 /* Default align_* from the processor table. */
1406 if (align_loops == 0)
1407 {
1408 align_loops = processor_target_table[ix86_tune].align_loop;
1409 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1410 }
1411 if (align_jumps == 0)
1412 {
1413 align_jumps = processor_target_table[ix86_tune].align_jump;
1414 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1415 }
1416 if (align_functions == 0)
1417 {
1418 align_functions = processor_target_table[ix86_tune].align_func;
1419 }
1420
1421 /* Validate -mpreferred-stack-boundary= value, or provide default.
1422 The default of 128 bits is for Pentium III's SSE __m128, but we
1423 don't want additional code to keep the stack aligned when
1424 optimizing for code size. */
1425 ix86_preferred_stack_boundary = (optimize_size
1426 ? TARGET_64BIT ? 128 : 32
1427 : 128);
1428 if (ix86_preferred_stack_boundary_string)
1429 {
1430 i = atoi (ix86_preferred_stack_boundary_string);
1431 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1432 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1433 TARGET_64BIT ? 4 : 2);
1434 else
1435 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1436 }
1437
1438 /* Validate -mbranch-cost= value, or provide default. */
1439 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1440 if (ix86_branch_cost_string)
1441 {
1442 i = atoi (ix86_branch_cost_string);
1443 if (i < 0 || i > 5)
1444 error ("-mbranch-cost=%d is not between 0 and 5", i);
1445 else
1446 ix86_branch_cost = i;
1447 }
1448
1449 if (ix86_tls_dialect_string)
1450 {
1451 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1452 ix86_tls_dialect = TLS_DIALECT_GNU;
1453 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1454 ix86_tls_dialect = TLS_DIALECT_SUN;
1455 else
1456 error ("bad value (%s) for -mtls-dialect= switch",
1457 ix86_tls_dialect_string);
1458 }
1459
1460 /* Keep nonleaf frame pointers. */
1461 if (flag_omit_frame_pointer)
1462 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1463 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1464 flag_omit_frame_pointer = 1;
1465
1466 /* If we're doing fast math, we don't care about comparison order
1467 wrt NaNs. This lets us use a shorter comparison sequence. */
1468 if (flag_unsafe_math_optimizations)
1469 target_flags &= ~MASK_IEEE_FP;
1470
1471 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1472 since the insns won't need emulation. */
1473 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1474 target_flags &= ~MASK_NO_FANCY_MATH_387;
1475
1476 /* Likewise, if the target doesn't have a 387, or we've specified
1477 software floating point, don't use 387 inline instrinsics. */
1478 if (!TARGET_80387)
1479 target_flags |= MASK_NO_FANCY_MATH_387;
1480
1481 /* Turn on SSE2 builtins for -msse3. */
1482 if (TARGET_SSE3)
1483 target_flags |= MASK_SSE2;
1484
1485 /* Turn on SSE builtins for -msse2. */
1486 if (TARGET_SSE2)
1487 target_flags |= MASK_SSE;
1488
1489 /* Turn on MMX builtins for -msse. */
1490 if (TARGET_SSE)
1491 {
1492 target_flags |= MASK_MMX & ~target_flags_explicit;
1493 x86_prefetch_sse = true;
1494 }
1495
1496 /* Turn on MMX builtins for 3Dnow. */
1497 if (TARGET_3DNOW)
1498 target_flags |= MASK_MMX;
1499
1500 if (TARGET_64BIT)
1501 {
1502 if (TARGET_ALIGN_DOUBLE)
1503 error ("-malign-double makes no sense in the 64bit mode");
1504 if (TARGET_RTD)
1505 error ("-mrtd calling convention not supported in the 64bit mode");
1506
1507 /* Enable by default the SSE and MMX builtins. Do allow the user to
1508 explicitly disable any of these. In particular, disabling SSE and
1509 MMX for kernel code is extremely useful. */
1510 target_flags
1511 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1512 & ~target_flags_explicit);
1513
1514 if (TARGET_SSE)
1515 ix86_fpmath = FPMATH_SSE;
1516 }
1517 else
1518 {
1519 ix86_fpmath = FPMATH_387;
1520 /* i386 ABI does not specify red zone. It still makes sense to use it
1521 when programmer takes care to stack from being destroyed. */
1522 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1523 target_flags |= MASK_NO_RED_ZONE;
1524 }
1525
1526 if (ix86_fpmath_string != 0)
1527 {
1528 if (! strcmp (ix86_fpmath_string, "387"))
1529 ix86_fpmath = FPMATH_387;
1530 else if (! strcmp (ix86_fpmath_string, "sse"))
1531 {
1532 if (!TARGET_SSE)
1533 {
1534 warning ("SSE instruction set disabled, using 387 arithmetics");
1535 ix86_fpmath = FPMATH_387;
1536 }
1537 else
1538 ix86_fpmath = FPMATH_SSE;
1539 }
1540 else if (! strcmp (ix86_fpmath_string, "387,sse")
1541 || ! strcmp (ix86_fpmath_string, "sse,387"))
1542 {
1543 if (!TARGET_SSE)
1544 {
1545 warning ("SSE instruction set disabled, using 387 arithmetics");
1546 ix86_fpmath = FPMATH_387;
1547 }
1548 else if (!TARGET_80387)
1549 {
1550 warning ("387 instruction set disabled, using SSE arithmetics");
1551 ix86_fpmath = FPMATH_SSE;
1552 }
1553 else
1554 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1555 }
1556 else
1557 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1558 }
1559
1560 /* If the i387 is disabled, then do not return values in it. */
1561 if (!TARGET_80387)
1562 target_flags &= ~MASK_FLOAT_RETURNS;
1563
1564 if ((x86_accumulate_outgoing_args & TUNEMASK)
1565 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1566 && !optimize_size)
1567 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1568
1569 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1570 {
1571 char *p;
1572 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1573 p = strchr (internal_label_prefix, 'X');
1574 internal_label_prefix_len = p - internal_label_prefix;
1575 *p = '\0';
1576 }
1577
1578 /* When scheduling description is not available, disable scheduler pass
1579 so it won't slow down the compilation and make x87 code slower. */
1580 if (!TARGET_SCHEDULE)
1581 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1582 }
1583 \f
1584 void
1585 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1586 {
1587 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1588 make the problem with not enough registers even worse. */
1589 #ifdef INSN_SCHEDULING
1590 if (level > 1)
1591 flag_schedule_insns = 0;
1592 #endif
1593
1594 /* The default values of these switches depend on the TARGET_64BIT
1595 that is not known at this moment. Mark these values with 2 and
1596 let user the to override these. In case there is no command line option
1597 specifying them, we will set the defaults in override_options. */
1598 if (optimize >= 1)
1599 flag_omit_frame_pointer = 2;
1600 flag_pcc_struct_return = 2;
1601 flag_asynchronous_unwind_tables = 2;
1602 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1603 SUBTARGET_OPTIMIZATION_OPTIONS;
1604 #endif
1605 }
1606 \f
1607 /* Table of valid machine attributes. */
1608 const struct attribute_spec ix86_attribute_table[] =
1609 {
1610 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1611 /* Stdcall attribute says callee is responsible for popping arguments
1612 if they are not variable. */
1613 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1614 /* Fastcall attribute says callee is responsible for popping arguments
1615 if they are not variable. */
1616 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1617 /* Cdecl attribute says the callee is a normal C declaration */
1618 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1619 /* Regparm attribute specifies how many integer arguments are to be
1620 passed in registers. */
1621 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1622 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1623 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1624 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1625 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1626 #endif
1627 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1628 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1629 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1630 SUBTARGET_ATTRIBUTE_TABLE,
1631 #endif
1632 { NULL, 0, 0, false, false, false, NULL }
1633 };
1634
1635 /* Decide whether we can make a sibling call to a function. DECL is the
1636 declaration of the function being targeted by the call and EXP is the
1637 CALL_EXPR representing the call. */
1638
1639 static bool
1640 ix86_function_ok_for_sibcall (tree decl, tree exp)
1641 {
1642 /* If we are generating position-independent code, we cannot sibcall
1643 optimize any indirect call, or a direct call to a global function,
1644 as the PLT requires %ebx be live. */
1645 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1646 return false;
1647
1648 /* If we are returning floats on the 80387 register stack, we cannot
1649 make a sibcall from a function that doesn't return a float to a
1650 function that does or, conversely, from a function that does return
1651 a float to a function that doesn't; the necessary stack adjustment
1652 would not be executed. */
1653 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1654 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1655 return false;
1656
1657 /* If this call is indirect, we'll need to be able to use a call-clobbered
1658 register for the address of the target function. Make sure that all
1659 such registers are not used for passing parameters. */
1660 if (!decl && !TARGET_64BIT)
1661 {
1662 tree type;
1663
1664 /* We're looking at the CALL_EXPR, we need the type of the function. */
1665 type = TREE_OPERAND (exp, 0); /* pointer expression */
1666 type = TREE_TYPE (type); /* pointer type */
1667 type = TREE_TYPE (type); /* function type */
1668
1669 if (ix86_function_regparm (type, NULL) >= 3)
1670 {
1671 /* ??? Need to count the actual number of registers to be used,
1672 not the possible number of registers. Fix later. */
1673 return false;
1674 }
1675 }
1676
1677 /* Otherwise okay. That also includes certain types of indirect calls. */
1678 return true;
1679 }
1680
1681 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1682 arguments as in struct attribute_spec.handler. */
1683 static tree
1684 ix86_handle_cdecl_attribute (tree *node, tree name,
1685 tree args ATTRIBUTE_UNUSED,
1686 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1687 {
1688 if (TREE_CODE (*node) != FUNCTION_TYPE
1689 && TREE_CODE (*node) != METHOD_TYPE
1690 && TREE_CODE (*node) != FIELD_DECL
1691 && TREE_CODE (*node) != TYPE_DECL)
1692 {
1693 warning ("%qs attribute only applies to functions",
1694 IDENTIFIER_POINTER (name));
1695 *no_add_attrs = true;
1696 }
1697 else
1698 {
1699 if (is_attribute_p ("fastcall", name))
1700 {
1701 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1702 {
1703 error ("fastcall and stdcall attributes are not compatible");
1704 }
1705 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1706 {
1707 error ("fastcall and regparm attributes are not compatible");
1708 }
1709 }
1710 else if (is_attribute_p ("stdcall", name))
1711 {
1712 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1713 {
1714 error ("fastcall and stdcall attributes are not compatible");
1715 }
1716 }
1717 }
1718
1719 if (TARGET_64BIT)
1720 {
1721 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
1722 *no_add_attrs = true;
1723 }
1724
1725 return NULL_TREE;
1726 }
1727
1728 /* Handle a "regparm" attribute;
1729 arguments as in struct attribute_spec.handler. */
1730 static tree
1731 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1732 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1733 {
1734 if (TREE_CODE (*node) != FUNCTION_TYPE
1735 && TREE_CODE (*node) != METHOD_TYPE
1736 && TREE_CODE (*node) != FIELD_DECL
1737 && TREE_CODE (*node) != TYPE_DECL)
1738 {
1739 warning ("%qs attribute only applies to functions",
1740 IDENTIFIER_POINTER (name));
1741 *no_add_attrs = true;
1742 }
1743 else
1744 {
1745 tree cst;
1746
1747 cst = TREE_VALUE (args);
1748 if (TREE_CODE (cst) != INTEGER_CST)
1749 {
1750 warning ("%qs attribute requires an integer constant argument",
1751 IDENTIFIER_POINTER (name));
1752 *no_add_attrs = true;
1753 }
1754 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1755 {
1756 warning ("argument to %qs attribute larger than %d",
1757 IDENTIFIER_POINTER (name), REGPARM_MAX);
1758 *no_add_attrs = true;
1759 }
1760
1761 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1762 {
1763 error ("fastcall and regparm attributes are not compatible");
1764 }
1765 }
1766
1767 return NULL_TREE;
1768 }
1769
1770 /* Return 0 if the attributes for two types are incompatible, 1 if they
1771 are compatible, and 2 if they are nearly compatible (which causes a
1772 warning to be generated). */
1773
1774 static int
1775 ix86_comp_type_attributes (tree type1, tree type2)
1776 {
1777 /* Check for mismatch of non-default calling convention. */
1778 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1779
1780 if (TREE_CODE (type1) != FUNCTION_TYPE)
1781 return 1;
1782
1783 /* Check for mismatched fastcall types */
1784 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1785 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1786 return 0;
1787
1788 /* Check for mismatched return types (cdecl vs stdcall). */
1789 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1790 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1791 return 0;
1792 if (ix86_function_regparm (type1, NULL)
1793 != ix86_function_regparm (type2, NULL))
1794 return 0;
1795 return 1;
1796 }
1797 \f
1798 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1799 DECL may be NULL when calling function indirectly
1800 or considering a libcall. */
1801
1802 static int
1803 ix86_function_regparm (tree type, tree decl)
1804 {
1805 tree attr;
1806 int regparm = ix86_regparm;
1807 bool user_convention = false;
1808
1809 if (!TARGET_64BIT)
1810 {
1811 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1812 if (attr)
1813 {
1814 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1815 user_convention = true;
1816 }
1817
1818 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1819 {
1820 regparm = 2;
1821 user_convention = true;
1822 }
1823
1824 /* Use register calling convention for local functions when possible. */
1825 if (!TARGET_64BIT && !user_convention && decl
1826 && flag_unit_at_a_time && !profile_flag)
1827 {
1828 struct cgraph_local_info *i = cgraph_local_info (decl);
1829 if (i && i->local)
1830 {
1831 /* We can't use regparm(3) for nested functions as these use
1832 static chain pointer in third argument. */
1833 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1834 regparm = 2;
1835 else
1836 regparm = 3;
1837 }
1838 }
1839 }
1840 return regparm;
1841 }
1842
1843 /* Return true if EAX is live at the start of the function. Used by
1844 ix86_expand_prologue to determine if we need special help before
1845 calling allocate_stack_worker. */
1846
1847 static bool
1848 ix86_eax_live_at_start_p (void)
1849 {
1850 /* Cheat. Don't bother working forward from ix86_function_regparm
1851 to the function type to whether an actual argument is located in
1852 eax. Instead just look at cfg info, which is still close enough
1853 to correct at this point. This gives false positives for broken
1854 functions that might use uninitialized data that happens to be
1855 allocated in eax, but who cares? */
1856 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1857 }
1858
1859 /* Value is the number of bytes of arguments automatically
1860 popped when returning from a subroutine call.
1861 FUNDECL is the declaration node of the function (as a tree),
1862 FUNTYPE is the data type of the function (as a tree),
1863 or for a library call it is an identifier node for the subroutine name.
1864 SIZE is the number of bytes of arguments passed on the stack.
1865
1866 On the 80386, the RTD insn may be used to pop them if the number
1867 of args is fixed, but if the number is variable then the caller
1868 must pop them all. RTD can't be used for library calls now
1869 because the library is compiled with the Unix compiler.
1870 Use of RTD is a selectable option, since it is incompatible with
1871 standard Unix calling sequences. If the option is not selected,
1872 the caller must always pop the args.
1873
1874 The attribute stdcall is equivalent to RTD on a per module basis. */
1875
1876 int
1877 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1878 {
1879 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1880
1881 /* Cdecl functions override -mrtd, and never pop the stack. */
1882 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1883
1884 /* Stdcall and fastcall functions will pop the stack if not
1885 variable args. */
1886 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1887 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1888 rtd = 1;
1889
1890 if (rtd
1891 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1892 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1893 == void_type_node)))
1894 return size;
1895 }
1896
1897 /* Lose any fake structure return argument if it is passed on the stack. */
1898 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1899 && !TARGET_64BIT
1900 && !KEEP_AGGREGATE_RETURN_POINTER)
1901 {
1902 int nregs = ix86_function_regparm (funtype, fundecl);
1903
1904 if (!nregs)
1905 return GET_MODE_SIZE (Pmode);
1906 }
1907
1908 return 0;
1909 }
1910 \f
1911 /* Argument support functions. */
1912
1913 /* Return true when register may be used to pass function parameters. */
1914 bool
1915 ix86_function_arg_regno_p (int regno)
1916 {
1917 int i;
1918 if (!TARGET_64BIT)
1919 return (regno < REGPARM_MAX
1920 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1921 if (SSE_REGNO_P (regno) && TARGET_SSE)
1922 return true;
1923 /* RAX is used as hidden argument to va_arg functions. */
1924 if (!regno)
1925 return true;
1926 for (i = 0; i < REGPARM_MAX; i++)
1927 if (regno == x86_64_int_parameter_registers[i])
1928 return true;
1929 return false;
1930 }
1931
1932 /* Return if we do not know how to pass TYPE solely in registers. */
1933
1934 static bool
1935 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
1936 {
1937 if (must_pass_in_stack_var_size_or_pad (mode, type))
1938 return true;
1939
1940 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
1941 The layout_type routine is crafty and tries to trick us into passing
1942 currently unsupported vector types on the stack by using TImode. */
1943 return (!TARGET_64BIT && mode == TImode
1944 && type && TREE_CODE (type) != VECTOR_TYPE);
1945 }
1946
1947 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1948 for a call to a function whose data type is FNTYPE.
1949 For a library call, FNTYPE is 0. */
1950
1951 void
1952 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1953 tree fntype, /* tree ptr for function decl */
1954 rtx libname, /* SYMBOL_REF of library name or 0 */
1955 tree fndecl)
1956 {
1957 static CUMULATIVE_ARGS zero_cum;
1958 tree param, next_param;
1959
1960 if (TARGET_DEBUG_ARG)
1961 {
1962 fprintf (stderr, "\ninit_cumulative_args (");
1963 if (fntype)
1964 fprintf (stderr, "fntype code = %s, ret code = %s",
1965 tree_code_name[(int) TREE_CODE (fntype)],
1966 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1967 else
1968 fprintf (stderr, "no fntype");
1969
1970 if (libname)
1971 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1972 }
1973
1974 *cum = zero_cum;
1975
1976 /* Set up the number of registers to use for passing arguments. */
1977 if (fntype)
1978 cum->nregs = ix86_function_regparm (fntype, fndecl);
1979 else
1980 cum->nregs = ix86_regparm;
1981 if (TARGET_SSE)
1982 cum->sse_nregs = SSE_REGPARM_MAX;
1983 if (TARGET_MMX)
1984 cum->mmx_nregs = MMX_REGPARM_MAX;
1985 cum->warn_sse = true;
1986 cum->warn_mmx = true;
1987 cum->maybe_vaarg = false;
1988
1989 /* Use ecx and edx registers if function has fastcall attribute */
1990 if (fntype && !TARGET_64BIT)
1991 {
1992 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1993 {
1994 cum->nregs = 2;
1995 cum->fastcall = 1;
1996 }
1997 }
1998
1999 /* Determine if this function has variable arguments. This is
2000 indicated by the last argument being 'void_type_mode' if there
2001 are no variable arguments. If there are variable arguments, then
2002 we won't pass anything in registers in 32-bit mode. */
2003
2004 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2005 {
2006 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2007 param != 0; param = next_param)
2008 {
2009 next_param = TREE_CHAIN (param);
2010 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2011 {
2012 if (!TARGET_64BIT)
2013 {
2014 cum->nregs = 0;
2015 cum->sse_nregs = 0;
2016 cum->mmx_nregs = 0;
2017 cum->warn_sse = 0;
2018 cum->warn_mmx = 0;
2019 cum->fastcall = 0;
2020 }
2021 cum->maybe_vaarg = true;
2022 }
2023 }
2024 }
2025 if ((!fntype && !libname)
2026 || (fntype && !TYPE_ARG_TYPES (fntype)))
2027 cum->maybe_vaarg = 1;
2028
2029 if (TARGET_DEBUG_ARG)
2030 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2031
2032 return;
2033 }
2034
2035 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2036 But in the case of vector types, it is some vector mode.
2037
2038 When we have only some of our vector isa extensions enabled, then there
2039 are some modes for which vector_mode_supported_p is false. For these
2040 modes, the generic vector support in gcc will choose some non-vector mode
2041 in order to implement the type. By computing the natural mode, we'll
2042 select the proper ABI location for the operand and not depend on whatever
2043 the middle-end decides to do with these vector types. */
2044
2045 static enum machine_mode
2046 type_natural_mode (tree type)
2047 {
2048 enum machine_mode mode = TYPE_MODE (type);
2049
2050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2051 {
2052 HOST_WIDE_INT size = int_size_in_bytes (type);
2053 if ((size == 8 || size == 16)
2054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2055 && TYPE_VECTOR_SUBPARTS (type) > 1)
2056 {
2057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2058
2059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2060 mode = MIN_MODE_VECTOR_FLOAT;
2061 else
2062 mode = MIN_MODE_VECTOR_INT;
2063
2064 /* Get the mode which has this inner mode and number of units. */
2065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2067 && GET_MODE_INNER (mode) == innermode)
2068 return mode;
2069
2070 abort ();
2071 }
2072 }
2073
2074 return mode;
2075 }
2076
2077 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2078 this may not agree with the mode that the type system has chosen for the
2079 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2080 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2081
2082 static rtx
2083 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2084 unsigned int regno)
2085 {
2086 rtx tmp;
2087
2088 if (orig_mode != BLKmode)
2089 tmp = gen_rtx_REG (orig_mode, regno);
2090 else
2091 {
2092 tmp = gen_rtx_REG (mode, regno);
2093 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2094 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2095 }
2096
2097 return tmp;
2098 }
2099
2100 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2101 of this code is to classify each 8bytes of incoming argument by the register
2102 class and assign registers accordingly. */
2103
2104 /* Return the union class of CLASS1 and CLASS2.
2105 See the x86-64 PS ABI for details. */
2106
2107 static enum x86_64_reg_class
2108 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2109 {
2110 /* Rule #1: If both classes are equal, this is the resulting class. */
2111 if (class1 == class2)
2112 return class1;
2113
2114 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2115 the other class. */
2116 if (class1 == X86_64_NO_CLASS)
2117 return class2;
2118 if (class2 == X86_64_NO_CLASS)
2119 return class1;
2120
2121 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2122 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2123 return X86_64_MEMORY_CLASS;
2124
2125 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2126 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2127 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2128 return X86_64_INTEGERSI_CLASS;
2129 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2130 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2131 return X86_64_INTEGER_CLASS;
2132
2133 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2134 MEMORY is used. */
2135 if (class1 == X86_64_X87_CLASS
2136 || class1 == X86_64_X87UP_CLASS
2137 || class1 == X86_64_COMPLEX_X87_CLASS
2138 || class2 == X86_64_X87_CLASS
2139 || class2 == X86_64_X87UP_CLASS
2140 || class2 == X86_64_COMPLEX_X87_CLASS)
2141 return X86_64_MEMORY_CLASS;
2142
2143 /* Rule #6: Otherwise class SSE is used. */
2144 return X86_64_SSE_CLASS;
2145 }
2146
2147 /* Classify the argument of type TYPE and mode MODE.
2148 CLASSES will be filled by the register class used to pass each word
2149 of the operand. The number of words is returned. In case the parameter
2150 should be passed in memory, 0 is returned. As a special case for zero
2151 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2152
2153 BIT_OFFSET is used internally for handling records and specifies offset
2154 of the offset in bits modulo 256 to avoid overflow cases.
2155
2156 See the x86-64 PS ABI for details.
2157 */
2158
2159 static int
2160 classify_argument (enum machine_mode mode, tree type,
2161 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2162 {
2163 HOST_WIDE_INT bytes =
2164 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2165 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2166
2167 /* Variable sized entities are always passed/returned in memory. */
2168 if (bytes < 0)
2169 return 0;
2170
2171 if (mode != VOIDmode
2172 && targetm.calls.must_pass_in_stack (mode, type))
2173 return 0;
2174
2175 if (type && AGGREGATE_TYPE_P (type))
2176 {
2177 int i;
2178 tree field;
2179 enum x86_64_reg_class subclasses[MAX_CLASSES];
2180
2181 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2182 if (bytes > 16)
2183 return 0;
2184
2185 for (i = 0; i < words; i++)
2186 classes[i] = X86_64_NO_CLASS;
2187
2188 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2189 signalize memory class, so handle it as special case. */
2190 if (!words)
2191 {
2192 classes[0] = X86_64_NO_CLASS;
2193 return 1;
2194 }
2195
2196 /* Classify each field of record and merge classes. */
2197 if (TREE_CODE (type) == RECORD_TYPE)
2198 {
2199 /* For classes first merge in the field of the subclasses. */
2200 if (TYPE_BINFO (type))
2201 {
2202 tree binfo, base_binfo;
2203 int basenum;
2204
2205 for (binfo = TYPE_BINFO (type), basenum = 0;
2206 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2207 {
2208 int num;
2209 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2210 tree type = BINFO_TYPE (base_binfo);
2211
2212 num = classify_argument (TYPE_MODE (type),
2213 type, subclasses,
2214 (offset + bit_offset) % 256);
2215 if (!num)
2216 return 0;
2217 for (i = 0; i < num; i++)
2218 {
2219 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2220 classes[i + pos] =
2221 merge_classes (subclasses[i], classes[i + pos]);
2222 }
2223 }
2224 }
2225 /* And now merge the fields of structure. */
2226 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2227 {
2228 if (TREE_CODE (field) == FIELD_DECL)
2229 {
2230 int num;
2231
2232 /* Bitfields are always classified as integer. Handle them
2233 early, since later code would consider them to be
2234 misaligned integers. */
2235 if (DECL_BIT_FIELD (field))
2236 {
2237 for (i = int_bit_position (field) / 8 / 8;
2238 i < (int_bit_position (field)
2239 + tree_low_cst (DECL_SIZE (field), 0)
2240 + 63) / 8 / 8; i++)
2241 classes[i] =
2242 merge_classes (X86_64_INTEGER_CLASS,
2243 classes[i]);
2244 }
2245 else
2246 {
2247 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2248 TREE_TYPE (field), subclasses,
2249 (int_bit_position (field)
2250 + bit_offset) % 256);
2251 if (!num)
2252 return 0;
2253 for (i = 0; i < num; i++)
2254 {
2255 int pos =
2256 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2257 classes[i + pos] =
2258 merge_classes (subclasses[i], classes[i + pos]);
2259 }
2260 }
2261 }
2262 }
2263 }
2264 /* Arrays are handled as small records. */
2265 else if (TREE_CODE (type) == ARRAY_TYPE)
2266 {
2267 int num;
2268 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2269 TREE_TYPE (type), subclasses, bit_offset);
2270 if (!num)
2271 return 0;
2272
2273 /* The partial classes are now full classes. */
2274 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2275 subclasses[0] = X86_64_SSE_CLASS;
2276 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2277 subclasses[0] = X86_64_INTEGER_CLASS;
2278
2279 for (i = 0; i < words; i++)
2280 classes[i] = subclasses[i % num];
2281 }
2282 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2283 else if (TREE_CODE (type) == UNION_TYPE
2284 || TREE_CODE (type) == QUAL_UNION_TYPE)
2285 {
2286 /* For classes first merge in the field of the subclasses. */
2287 if (TYPE_BINFO (type))
2288 {
2289 tree binfo, base_binfo;
2290 int basenum;
2291
2292 for (binfo = TYPE_BINFO (type), basenum = 0;
2293 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2294 {
2295 int num;
2296 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2297 tree type = BINFO_TYPE (base_binfo);
2298
2299 num = classify_argument (TYPE_MODE (type),
2300 type, subclasses,
2301 (offset + (bit_offset % 64)) % 256);
2302 if (!num)
2303 return 0;
2304 for (i = 0; i < num; i++)
2305 {
2306 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2307 classes[i + pos] =
2308 merge_classes (subclasses[i], classes[i + pos]);
2309 }
2310 }
2311 }
2312 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2313 {
2314 if (TREE_CODE (field) == FIELD_DECL)
2315 {
2316 int num;
2317 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2318 TREE_TYPE (field), subclasses,
2319 bit_offset);
2320 if (!num)
2321 return 0;
2322 for (i = 0; i < num; i++)
2323 classes[i] = merge_classes (subclasses[i], classes[i]);
2324 }
2325 }
2326 }
2327 else
2328 abort ();
2329
2330 /* Final merger cleanup. */
2331 for (i = 0; i < words; i++)
2332 {
2333 /* If one class is MEMORY, everything should be passed in
2334 memory. */
2335 if (classes[i] == X86_64_MEMORY_CLASS)
2336 return 0;
2337
2338 /* The X86_64_SSEUP_CLASS should be always preceded by
2339 X86_64_SSE_CLASS. */
2340 if (classes[i] == X86_64_SSEUP_CLASS
2341 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2342 classes[i] = X86_64_SSE_CLASS;
2343
2344 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2345 if (classes[i] == X86_64_X87UP_CLASS
2346 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2347 classes[i] = X86_64_SSE_CLASS;
2348 }
2349 return words;
2350 }
2351
2352 /* Compute alignment needed. We align all types to natural boundaries with
2353 exception of XFmode that is aligned to 64bits. */
2354 if (mode != VOIDmode && mode != BLKmode)
2355 {
2356 int mode_alignment = GET_MODE_BITSIZE (mode);
2357
2358 if (mode == XFmode)
2359 mode_alignment = 128;
2360 else if (mode == XCmode)
2361 mode_alignment = 256;
2362 if (COMPLEX_MODE_P (mode))
2363 mode_alignment /= 2;
2364 /* Misaligned fields are always returned in memory. */
2365 if (bit_offset % mode_alignment)
2366 return 0;
2367 }
2368
2369 /* for V1xx modes, just use the base mode */
2370 if (VECTOR_MODE_P (mode)
2371 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2372 mode = GET_MODE_INNER (mode);
2373
2374 /* Classification of atomic types. */
2375 switch (mode)
2376 {
2377 case DImode:
2378 case SImode:
2379 case HImode:
2380 case QImode:
2381 case CSImode:
2382 case CHImode:
2383 case CQImode:
2384 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2385 classes[0] = X86_64_INTEGERSI_CLASS;
2386 else
2387 classes[0] = X86_64_INTEGER_CLASS;
2388 return 1;
2389 case CDImode:
2390 case TImode:
2391 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2392 return 2;
2393 case CTImode:
2394 return 0;
2395 case SFmode:
2396 if (!(bit_offset % 64))
2397 classes[0] = X86_64_SSESF_CLASS;
2398 else
2399 classes[0] = X86_64_SSE_CLASS;
2400 return 1;
2401 case DFmode:
2402 classes[0] = X86_64_SSEDF_CLASS;
2403 return 1;
2404 case XFmode:
2405 classes[0] = X86_64_X87_CLASS;
2406 classes[1] = X86_64_X87UP_CLASS;
2407 return 2;
2408 case TFmode:
2409 classes[0] = X86_64_SSE_CLASS;
2410 classes[1] = X86_64_SSEUP_CLASS;
2411 return 2;
2412 case SCmode:
2413 classes[0] = X86_64_SSE_CLASS;
2414 return 1;
2415 case DCmode:
2416 classes[0] = X86_64_SSEDF_CLASS;
2417 classes[1] = X86_64_SSEDF_CLASS;
2418 return 2;
2419 case XCmode:
2420 classes[0] = X86_64_COMPLEX_X87_CLASS;
2421 return 1;
2422 case TCmode:
2423 /* This modes is larger than 16 bytes. */
2424 return 0;
2425 case V4SFmode:
2426 case V4SImode:
2427 case V16QImode:
2428 case V8HImode:
2429 case V2DFmode:
2430 case V2DImode:
2431 classes[0] = X86_64_SSE_CLASS;
2432 classes[1] = X86_64_SSEUP_CLASS;
2433 return 2;
2434 case V2SFmode:
2435 case V2SImode:
2436 case V4HImode:
2437 case V8QImode:
2438 classes[0] = X86_64_SSE_CLASS;
2439 return 1;
2440 case BLKmode:
2441 case VOIDmode:
2442 return 0;
2443 default:
2444 if (VECTOR_MODE_P (mode))
2445 {
2446 if (bytes > 16)
2447 return 0;
2448 if (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT)
2449 {
2450 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2451 classes[0] = X86_64_INTEGERSI_CLASS;
2452 else
2453 classes[0] = X86_64_INTEGER_CLASS;
2454 classes[1] = X86_64_INTEGER_CLASS;
2455 return 1 + (bytes > 8);
2456 }
2457 }
2458 abort ();
2459 }
2460 }
2461
2462 /* Examine the argument and return set number of register required in each
2463 class. Return 0 iff parameter should be passed in memory. */
2464 static int
2465 examine_argument (enum machine_mode mode, tree type, int in_return,
2466 int *int_nregs, int *sse_nregs)
2467 {
2468 enum x86_64_reg_class class[MAX_CLASSES];
2469 int n = classify_argument (mode, type, class, 0);
2470
2471 *int_nregs = 0;
2472 *sse_nregs = 0;
2473 if (!n)
2474 return 0;
2475 for (n--; n >= 0; n--)
2476 switch (class[n])
2477 {
2478 case X86_64_INTEGER_CLASS:
2479 case X86_64_INTEGERSI_CLASS:
2480 (*int_nregs)++;
2481 break;
2482 case X86_64_SSE_CLASS:
2483 case X86_64_SSESF_CLASS:
2484 case X86_64_SSEDF_CLASS:
2485 (*sse_nregs)++;
2486 break;
2487 case X86_64_NO_CLASS:
2488 case X86_64_SSEUP_CLASS:
2489 break;
2490 case X86_64_X87_CLASS:
2491 case X86_64_X87UP_CLASS:
2492 if (!in_return)
2493 return 0;
2494 break;
2495 case X86_64_COMPLEX_X87_CLASS:
2496 return in_return ? 2 : 0;
2497 case X86_64_MEMORY_CLASS:
2498 abort ();
2499 }
2500 return 1;
2501 }
2502
2503 /* Construct container for the argument used by GCC interface. See
2504 FUNCTION_ARG for the detailed description. */
2505
2506 static rtx
2507 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2508 tree type, int in_return, int nintregs, int nsseregs,
2509 const int *intreg, int sse_regno)
2510 {
2511 enum machine_mode tmpmode;
2512 int bytes =
2513 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2514 enum x86_64_reg_class class[MAX_CLASSES];
2515 int n;
2516 int i;
2517 int nexps = 0;
2518 int needed_sseregs, needed_intregs;
2519 rtx exp[MAX_CLASSES];
2520 rtx ret;
2521
2522 n = classify_argument (mode, type, class, 0);
2523 if (TARGET_DEBUG_ARG)
2524 {
2525 if (!n)
2526 fprintf (stderr, "Memory class\n");
2527 else
2528 {
2529 fprintf (stderr, "Classes:");
2530 for (i = 0; i < n; i++)
2531 {
2532 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2533 }
2534 fprintf (stderr, "\n");
2535 }
2536 }
2537 if (!n)
2538 return NULL;
2539 if (!examine_argument (mode, type, in_return, &needed_intregs,
2540 &needed_sseregs))
2541 return NULL;
2542 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2543 return NULL;
2544
2545 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2546 some less clueful developer tries to use floating-point anyway. */
2547 if (needed_sseregs && !TARGET_SSE)
2548 {
2549 static bool issued_error;
2550 if (!issued_error)
2551 {
2552 issued_error = true;
2553 if (in_return)
2554 error ("SSE register return with SSE disabled");
2555 else
2556 error ("SSE register argument with SSE disabled");
2557 }
2558 return NULL;
2559 }
2560
2561 /* First construct simple cases. Avoid SCmode, since we want to use
2562 single register to pass this type. */
2563 if (n == 1 && mode != SCmode)
2564 switch (class[0])
2565 {
2566 case X86_64_INTEGER_CLASS:
2567 case X86_64_INTEGERSI_CLASS:
2568 return gen_rtx_REG (mode, intreg[0]);
2569 case X86_64_SSE_CLASS:
2570 case X86_64_SSESF_CLASS:
2571 case X86_64_SSEDF_CLASS:
2572 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2573 case X86_64_X87_CLASS:
2574 case X86_64_COMPLEX_X87_CLASS:
2575 return gen_rtx_REG (mode, FIRST_STACK_REG);
2576 case X86_64_NO_CLASS:
2577 /* Zero sized array, struct or class. */
2578 return NULL;
2579 default:
2580 abort ();
2581 }
2582 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2583 && mode != BLKmode)
2584 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2585 if (n == 2
2586 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2587 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2588 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2589 && class[1] == X86_64_INTEGER_CLASS
2590 && (mode == CDImode || mode == TImode || mode == TFmode)
2591 && intreg[0] + 1 == intreg[1])
2592 return gen_rtx_REG (mode, intreg[0]);
2593
2594 /* Otherwise figure out the entries of the PARALLEL. */
2595 for (i = 0; i < n; i++)
2596 {
2597 switch (class[i])
2598 {
2599 case X86_64_NO_CLASS:
2600 break;
2601 case X86_64_INTEGER_CLASS:
2602 case X86_64_INTEGERSI_CLASS:
2603 /* Merge TImodes on aligned occasions here too. */
2604 if (i * 8 + 8 > bytes)
2605 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2606 else if (class[i] == X86_64_INTEGERSI_CLASS)
2607 tmpmode = SImode;
2608 else
2609 tmpmode = DImode;
2610 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2611 if (tmpmode == BLKmode)
2612 tmpmode = DImode;
2613 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2614 gen_rtx_REG (tmpmode, *intreg),
2615 GEN_INT (i*8));
2616 intreg++;
2617 break;
2618 case X86_64_SSESF_CLASS:
2619 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2620 gen_rtx_REG (SFmode,
2621 SSE_REGNO (sse_regno)),
2622 GEN_INT (i*8));
2623 sse_regno++;
2624 break;
2625 case X86_64_SSEDF_CLASS:
2626 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2627 gen_rtx_REG (DFmode,
2628 SSE_REGNO (sse_regno)),
2629 GEN_INT (i*8));
2630 sse_regno++;
2631 break;
2632 case X86_64_SSE_CLASS:
2633 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2634 tmpmode = TImode;
2635 else
2636 tmpmode = DImode;
2637 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2638 gen_rtx_REG (tmpmode,
2639 SSE_REGNO (sse_regno)),
2640 GEN_INT (i*8));
2641 if (tmpmode == TImode)
2642 i++;
2643 sse_regno++;
2644 break;
2645 default:
2646 abort ();
2647 }
2648 }
2649 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2650 for (i = 0; i < nexps; i++)
2651 XVECEXP (ret, 0, i) = exp [i];
2652 return ret;
2653 }
2654
2655 /* Update the data in CUM to advance over an argument
2656 of mode MODE and data type TYPE.
2657 (TYPE is null for libcalls where that information may not be available.) */
2658
2659 void
2660 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2661 tree type, int named)
2662 {
2663 int bytes =
2664 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2665 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2666
2667 if (type)
2668 mode = type_natural_mode (type);
2669
2670 if (TARGET_DEBUG_ARG)
2671 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
2672 "mode=%s, named=%d)\n\n",
2673 words, cum->words, cum->nregs, cum->sse_nregs,
2674 GET_MODE_NAME (mode), named);
2675
2676 if (TARGET_64BIT)
2677 {
2678 int int_nregs, sse_nregs;
2679 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2680 cum->words += words;
2681 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2682 {
2683 cum->nregs -= int_nregs;
2684 cum->sse_nregs -= sse_nregs;
2685 cum->regno += int_nregs;
2686 cum->sse_regno += sse_nregs;
2687 }
2688 else
2689 cum->words += words;
2690 }
2691 else
2692 {
2693 switch (mode)
2694 {
2695 default:
2696 break;
2697
2698 case BLKmode:
2699 if (bytes < 0)
2700 break;
2701 /* FALLTHRU */
2702
2703 case DImode:
2704 case SImode:
2705 case HImode:
2706 case QImode:
2707 cum->words += words;
2708 cum->nregs -= words;
2709 cum->regno += words;
2710
2711 if (cum->nregs <= 0)
2712 {
2713 cum->nregs = 0;
2714 cum->regno = 0;
2715 }
2716 break;
2717
2718 case TImode:
2719 case V16QImode:
2720 case V8HImode:
2721 case V4SImode:
2722 case V2DImode:
2723 case V4SFmode:
2724 case V2DFmode:
2725 if (!type || !AGGREGATE_TYPE_P (type))
2726 {
2727 cum->sse_words += words;
2728 cum->sse_nregs -= 1;
2729 cum->sse_regno += 1;
2730 if (cum->sse_nregs <= 0)
2731 {
2732 cum->sse_nregs = 0;
2733 cum->sse_regno = 0;
2734 }
2735 }
2736 break;
2737
2738 case V8QImode:
2739 case V4HImode:
2740 case V2SImode:
2741 case V2SFmode:
2742 if (!type || !AGGREGATE_TYPE_P (type))
2743 {
2744 cum->mmx_words += words;
2745 cum->mmx_nregs -= 1;
2746 cum->mmx_regno += 1;
2747 if (cum->mmx_nregs <= 0)
2748 {
2749 cum->mmx_nregs = 0;
2750 cum->mmx_regno = 0;
2751 }
2752 }
2753 break;
2754 }
2755 }
2756 }
2757
2758 /* Define where to put the arguments to a function.
2759 Value is zero to push the argument on the stack,
2760 or a hard register in which to store the argument.
2761
2762 MODE is the argument's machine mode.
2763 TYPE is the data type of the argument (as a tree).
2764 This is null for libcalls where that information may
2765 not be available.
2766 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2767 the preceding args and about the function being called.
2768 NAMED is nonzero if this argument is a named parameter
2769 (otherwise it is an extra parameter matching an ellipsis). */
2770
2771 rtx
2772 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
2773 tree type, int named)
2774 {
2775 enum machine_mode mode = orig_mode;
2776 rtx ret = NULL_RTX;
2777 int bytes =
2778 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2779 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2780 static bool warnedsse, warnedmmx;
2781
2782 /* To simplify the code below, represent vector types with a vector mode
2783 even if MMX/SSE are not active. */
2784 if (type && TREE_CODE (type) == VECTOR_TYPE)
2785 mode = type_natural_mode (type);
2786
2787 /* Handle a hidden AL argument containing number of registers for varargs
2788 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2789 any AL settings. */
2790 if (mode == VOIDmode)
2791 {
2792 if (TARGET_64BIT)
2793 return GEN_INT (cum->maybe_vaarg
2794 ? (cum->sse_nregs < 0
2795 ? SSE_REGPARM_MAX
2796 : cum->sse_regno)
2797 : -1);
2798 else
2799 return constm1_rtx;
2800 }
2801 if (TARGET_64BIT)
2802 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
2803 cum->sse_nregs,
2804 &x86_64_int_parameter_registers [cum->regno],
2805 cum->sse_regno);
2806 else
2807 switch (mode)
2808 {
2809 /* For now, pass fp/complex values on the stack. */
2810 default:
2811 break;
2812
2813 case BLKmode:
2814 if (bytes < 0)
2815 break;
2816 /* FALLTHRU */
2817 case DImode:
2818 case SImode:
2819 case HImode:
2820 case QImode:
2821 if (words <= cum->nregs)
2822 {
2823 int regno = cum->regno;
2824
2825 /* Fastcall allocates the first two DWORD (SImode) or
2826 smaller arguments to ECX and EDX. */
2827 if (cum->fastcall)
2828 {
2829 if (mode == BLKmode || mode == DImode)
2830 break;
2831
2832 /* ECX not EAX is the first allocated register. */
2833 if (regno == 0)
2834 regno = 2;
2835 }
2836 ret = gen_rtx_REG (mode, regno);
2837 }
2838 break;
2839 case TImode:
2840 case V16QImode:
2841 case V8HImode:
2842 case V4SImode:
2843 case V2DImode:
2844 case V4SFmode:
2845 case V2DFmode:
2846 if (!type || !AGGREGATE_TYPE_P (type))
2847 {
2848 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2849 {
2850 warnedsse = true;
2851 warning ("SSE vector argument without SSE enabled "
2852 "changes the ABI");
2853 }
2854 if (cum->sse_nregs)
2855 ret = gen_reg_or_parallel (mode, orig_mode,
2856 cum->sse_regno + FIRST_SSE_REG);
2857 }
2858 break;
2859 case V8QImode:
2860 case V4HImode:
2861 case V2SImode:
2862 case V2SFmode:
2863 if (!type || !AGGREGATE_TYPE_P (type))
2864 {
2865 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
2866 {
2867 warnedmmx = true;
2868 warning ("MMX vector argument without MMX enabled "
2869 "changes the ABI");
2870 }
2871 if (cum->mmx_nregs)
2872 ret = gen_reg_or_parallel (mode, orig_mode,
2873 cum->mmx_regno + FIRST_MMX_REG);
2874 }
2875 break;
2876 }
2877
2878 if (TARGET_DEBUG_ARG)
2879 {
2880 fprintf (stderr,
2881 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2882 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2883
2884 if (ret)
2885 print_simple_rtl (stderr, ret);
2886 else
2887 fprintf (stderr, ", stack");
2888
2889 fprintf (stderr, " )\n");
2890 }
2891
2892 return ret;
2893 }
2894
2895 /* A C expression that indicates when an argument must be passed by
2896 reference. If nonzero for an argument, a copy of that argument is
2897 made in memory and a pointer to the argument is passed instead of
2898 the argument itself. The pointer is passed in whatever way is
2899 appropriate for passing a pointer to that type. */
2900
2901 static bool
2902 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2903 enum machine_mode mode ATTRIBUTE_UNUSED,
2904 tree type, bool named ATTRIBUTE_UNUSED)
2905 {
2906 if (!TARGET_64BIT)
2907 return 0;
2908
2909 if (type && int_size_in_bytes (type) == -1)
2910 {
2911 if (TARGET_DEBUG_ARG)
2912 fprintf (stderr, "function_arg_pass_by_reference\n");
2913 return 1;
2914 }
2915
2916 return 0;
2917 }
2918
2919 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2920 ABI. Only called if TARGET_SSE. */
2921 static bool
2922 contains_128bit_aligned_vector_p (tree type)
2923 {
2924 enum machine_mode mode = TYPE_MODE (type);
2925 if (SSE_REG_MODE_P (mode)
2926 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2927 return true;
2928 if (TYPE_ALIGN (type) < 128)
2929 return false;
2930
2931 if (AGGREGATE_TYPE_P (type))
2932 {
2933 /* Walk the aggregates recursively. */
2934 if (TREE_CODE (type) == RECORD_TYPE
2935 || TREE_CODE (type) == UNION_TYPE
2936 || TREE_CODE (type) == QUAL_UNION_TYPE)
2937 {
2938 tree field;
2939
2940 if (TYPE_BINFO (type))
2941 {
2942 tree binfo, base_binfo;
2943 int i;
2944
2945 for (binfo = TYPE_BINFO (type), i = 0;
2946 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2947 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo)))
2948 return true;
2949 }
2950 /* And now merge the fields of structure. */
2951 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2952 {
2953 if (TREE_CODE (field) == FIELD_DECL
2954 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2955 return true;
2956 }
2957 }
2958 /* Just for use if some languages passes arrays by value. */
2959 else if (TREE_CODE (type) == ARRAY_TYPE)
2960 {
2961 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2962 return true;
2963 }
2964 else
2965 abort ();
2966 }
2967 return false;
2968 }
2969
2970 /* Gives the alignment boundary, in bits, of an argument with the
2971 specified mode and type. */
2972
2973 int
2974 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2975 {
2976 int align;
2977 if (type)
2978 align = TYPE_ALIGN (type);
2979 else
2980 align = GET_MODE_ALIGNMENT (mode);
2981 if (align < PARM_BOUNDARY)
2982 align = PARM_BOUNDARY;
2983 if (!TARGET_64BIT)
2984 {
2985 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2986 make an exception for SSE modes since these require 128bit
2987 alignment.
2988
2989 The handling here differs from field_alignment. ICC aligns MMX
2990 arguments to 4 byte boundaries, while structure fields are aligned
2991 to 8 byte boundaries. */
2992 if (!TARGET_SSE)
2993 align = PARM_BOUNDARY;
2994 else if (!type)
2995 {
2996 if (!SSE_REG_MODE_P (mode))
2997 align = PARM_BOUNDARY;
2998 }
2999 else
3000 {
3001 if (!contains_128bit_aligned_vector_p (type))
3002 align = PARM_BOUNDARY;
3003 }
3004 }
3005 if (align > 128)
3006 align = 128;
3007 return align;
3008 }
3009
3010 /* Return true if N is a possible register number of function value. */
3011 bool
3012 ix86_function_value_regno_p (int regno)
3013 {
3014 if (!TARGET_64BIT)
3015 {
3016 return ((regno) == 0
3017 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3018 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
3019 }
3020 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
3021 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
3022 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
3023 }
3024
3025 /* Define how to find the value returned by a function.
3026 VALTYPE is the data type of the value (as a tree).
3027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3028 otherwise, FUNC is 0. */
3029 rtx
3030 ix86_function_value (tree valtype)
3031 {
3032 enum machine_mode natmode = type_natural_mode (valtype);
3033
3034 if (TARGET_64BIT)
3035 {
3036 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3037 1, REGPARM_MAX, SSE_REGPARM_MAX,
3038 x86_64_int_return_registers, 0);
3039 /* For zero sized structures, construct_container return NULL, but we
3040 need to keep rest of compiler happy by returning meaningful value. */
3041 if (!ret)
3042 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3043 return ret;
3044 }
3045 else
3046 return gen_rtx_REG (TYPE_MODE (valtype), ix86_value_regno (natmode));
3047 }
3048
3049 /* Return false iff type is returned in memory. */
3050 int
3051 ix86_return_in_memory (tree type)
3052 {
3053 int needed_intregs, needed_sseregs, size;
3054 enum machine_mode mode = type_natural_mode (type);
3055
3056 if (TARGET_64BIT)
3057 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3058
3059 if (mode == BLKmode)
3060 return 1;
3061
3062 size = int_size_in_bytes (type);
3063
3064 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3065 return 0;
3066
3067 if (VECTOR_MODE_P (mode) || mode == TImode)
3068 {
3069 /* User-created vectors small enough to fit in EAX. */
3070 if (size < 8)
3071 return 0;
3072
3073 /* MMX/3dNow values are returned on the stack, since we've
3074 got to EMMS/FEMMS before returning. */
3075 if (size == 8)
3076 return 1;
3077
3078 /* SSE values are returned in XMM0, except when it doesn't exist. */
3079 if (size == 16)
3080 return (TARGET_SSE ? 0 : 1);
3081 }
3082
3083 if (mode == XFmode)
3084 return 0;
3085
3086 if (size > 12)
3087 return 1;
3088 return 0;
3089 }
3090
3091 /* When returning SSE vector types, we have a choice of either
3092 (1) being abi incompatible with a -march switch, or
3093 (2) generating an error.
3094 Given no good solution, I think the safest thing is one warning.
3095 The user won't be able to use -Werror, but....
3096
3097 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3098 called in response to actually generating a caller or callee that
3099 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3100 via aggregate_value_p for general type probing from tree-ssa. */
3101
3102 static rtx
3103 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3104 {
3105 static bool warned;
3106
3107 if (!TARGET_SSE && type && !warned)
3108 {
3109 /* Look at the return type of the function, not the function type. */
3110 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3111
3112 if (mode == TImode
3113 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3114 {
3115 warned = true;
3116 warning ("SSE vector return without SSE enabled changes the ABI");
3117 }
3118 }
3119
3120 return NULL;
3121 }
3122
3123 /* Define how to find the value returned by a library function
3124 assuming the value has mode MODE. */
3125 rtx
3126 ix86_libcall_value (enum machine_mode mode)
3127 {
3128 if (TARGET_64BIT)
3129 {
3130 switch (mode)
3131 {
3132 case SFmode:
3133 case SCmode:
3134 case DFmode:
3135 case DCmode:
3136 case TFmode:
3137 return gen_rtx_REG (mode, FIRST_SSE_REG);
3138 case XFmode:
3139 case XCmode:
3140 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3141 case TCmode:
3142 return NULL;
3143 default:
3144 return gen_rtx_REG (mode, 0);
3145 }
3146 }
3147 else
3148 return gen_rtx_REG (mode, ix86_value_regno (mode));
3149 }
3150
3151 /* Given a mode, return the register to use for a return value. */
3152
3153 static int
3154 ix86_value_regno (enum machine_mode mode)
3155 {
3156 /* Floating point return values in %st(0). */
3157 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
3158 return FIRST_FLOAT_REG;
3159 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3160 we prevent this case when sse is not available. */
3161 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3162 return FIRST_SSE_REG;
3163 /* Everything else in %eax. */
3164 return 0;
3165 }
3166 \f
3167 /* Create the va_list data type. */
3168
3169 static tree
3170 ix86_build_builtin_va_list (void)
3171 {
3172 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3173
3174 /* For i386 we use plain pointer to argument area. */
3175 if (!TARGET_64BIT)
3176 return build_pointer_type (char_type_node);
3177
3178 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3179 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3180
3181 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3182 unsigned_type_node);
3183 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3184 unsigned_type_node);
3185 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3186 ptr_type_node);
3187 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3188 ptr_type_node);
3189
3190 DECL_FIELD_CONTEXT (f_gpr) = record;
3191 DECL_FIELD_CONTEXT (f_fpr) = record;
3192 DECL_FIELD_CONTEXT (f_ovf) = record;
3193 DECL_FIELD_CONTEXT (f_sav) = record;
3194
3195 TREE_CHAIN (record) = type_decl;
3196 TYPE_NAME (record) = type_decl;
3197 TYPE_FIELDS (record) = f_gpr;
3198 TREE_CHAIN (f_gpr) = f_fpr;
3199 TREE_CHAIN (f_fpr) = f_ovf;
3200 TREE_CHAIN (f_ovf) = f_sav;
3201
3202 layout_type (record);
3203
3204 /* The correct type is an array type of one element. */
3205 return build_array_type (record, build_index_type (size_zero_node));
3206 }
3207
3208 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3209
3210 static void
3211 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3212 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3213 int no_rtl)
3214 {
3215 CUMULATIVE_ARGS next_cum;
3216 rtx save_area = NULL_RTX, mem;
3217 rtx label;
3218 rtx label_ref;
3219 rtx tmp_reg;
3220 rtx nsse_reg;
3221 int set;
3222 tree fntype;
3223 int stdarg_p;
3224 int i;
3225
3226 if (!TARGET_64BIT)
3227 return;
3228
3229 /* Indicate to allocate space on the stack for varargs save area. */
3230 ix86_save_varrargs_registers = 1;
3231
3232 cfun->stack_alignment_needed = 128;
3233
3234 fntype = TREE_TYPE (current_function_decl);
3235 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3236 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3237 != void_type_node));
3238
3239 /* For varargs, we do not want to skip the dummy va_dcl argument.
3240 For stdargs, we do want to skip the last named argument. */
3241 next_cum = *cum;
3242 if (stdarg_p)
3243 function_arg_advance (&next_cum, mode, type, 1);
3244
3245 if (!no_rtl)
3246 save_area = frame_pointer_rtx;
3247
3248 set = get_varargs_alias_set ();
3249
3250 for (i = next_cum.regno; i < ix86_regparm; i++)
3251 {
3252 mem = gen_rtx_MEM (Pmode,
3253 plus_constant (save_area, i * UNITS_PER_WORD));
3254 set_mem_alias_set (mem, set);
3255 emit_move_insn (mem, gen_rtx_REG (Pmode,
3256 x86_64_int_parameter_registers[i]));
3257 }
3258
3259 if (next_cum.sse_nregs)
3260 {
3261 /* Now emit code to save SSE registers. The AX parameter contains number
3262 of SSE parameter registers used to call this function. We use
3263 sse_prologue_save insn template that produces computed jump across
3264 SSE saves. We need some preparation work to get this working. */
3265
3266 label = gen_label_rtx ();
3267 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3268
3269 /* Compute address to jump to :
3270 label - 5*eax + nnamed_sse_arguments*5 */
3271 tmp_reg = gen_reg_rtx (Pmode);
3272 nsse_reg = gen_reg_rtx (Pmode);
3273 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3274 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3275 gen_rtx_MULT (Pmode, nsse_reg,
3276 GEN_INT (4))));
3277 if (next_cum.sse_regno)
3278 emit_move_insn
3279 (nsse_reg,
3280 gen_rtx_CONST (DImode,
3281 gen_rtx_PLUS (DImode,
3282 label_ref,
3283 GEN_INT (next_cum.sse_regno * 4))));
3284 else
3285 emit_move_insn (nsse_reg, label_ref);
3286 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3287
3288 /* Compute address of memory block we save into. We always use pointer
3289 pointing 127 bytes after first byte to store - this is needed to keep
3290 instruction size limited by 4 bytes. */
3291 tmp_reg = gen_reg_rtx (Pmode);
3292 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3293 plus_constant (save_area,
3294 8 * REGPARM_MAX + 127)));
3295 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3296 set_mem_alias_set (mem, set);
3297 set_mem_align (mem, BITS_PER_WORD);
3298
3299 /* And finally do the dirty job! */
3300 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3301 GEN_INT (next_cum.sse_regno), label));
3302 }
3303
3304 }
3305
3306 /* Implement va_start. */
3307
3308 void
3309 ix86_va_start (tree valist, rtx nextarg)
3310 {
3311 HOST_WIDE_INT words, n_gpr, n_fpr;
3312 tree f_gpr, f_fpr, f_ovf, f_sav;
3313 tree gpr, fpr, ovf, sav, t;
3314
3315 /* Only 64bit target needs something special. */
3316 if (!TARGET_64BIT)
3317 {
3318 std_expand_builtin_va_start (valist, nextarg);
3319 return;
3320 }
3321
3322 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3323 f_fpr = TREE_CHAIN (f_gpr);
3324 f_ovf = TREE_CHAIN (f_fpr);
3325 f_sav = TREE_CHAIN (f_ovf);
3326
3327 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3328 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3329 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3330 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3331 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3332
3333 /* Count number of gp and fp argument registers used. */
3334 words = current_function_args_info.words;
3335 n_gpr = current_function_args_info.regno;
3336 n_fpr = current_function_args_info.sse_regno;
3337
3338 if (TARGET_DEBUG_ARG)
3339 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3340 (int) words, (int) n_gpr, (int) n_fpr);
3341
3342 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3343 build_int_cst (NULL_TREE, n_gpr * 8));
3344 TREE_SIDE_EFFECTS (t) = 1;
3345 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3346
3347 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3348 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3349 TREE_SIDE_EFFECTS (t) = 1;
3350 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3351
3352 /* Find the overflow area. */
3353 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3354 if (words != 0)
3355 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3356 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3357 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3358 TREE_SIDE_EFFECTS (t) = 1;
3359 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3360
3361 /* Find the register save area.
3362 Prologue of the function save it right above stack frame. */
3363 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3364 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3365 TREE_SIDE_EFFECTS (t) = 1;
3366 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3367 }
3368
3369 /* Implement va_arg. */
3370
3371 tree
3372 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3373 {
3374 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3375 tree f_gpr, f_fpr, f_ovf, f_sav;
3376 tree gpr, fpr, ovf, sav, t;
3377 int size, rsize;
3378 tree lab_false, lab_over = NULL_TREE;
3379 tree addr, t2;
3380 rtx container;
3381 int indirect_p = 0;
3382 tree ptrtype;
3383 enum machine_mode nat_mode;
3384
3385 /* Only 64bit target needs something special. */
3386 if (!TARGET_64BIT)
3387 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3388
3389 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3390 f_fpr = TREE_CHAIN (f_gpr);
3391 f_ovf = TREE_CHAIN (f_fpr);
3392 f_sav = TREE_CHAIN (f_ovf);
3393
3394 valist = build_va_arg_indirect_ref (valist);
3395 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3396 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3397 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3398 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3399
3400 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3401 if (indirect_p)
3402 type = build_pointer_type (type);
3403 size = int_size_in_bytes (type);
3404 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3405
3406 nat_mode = type_natural_mode (type);
3407 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3408 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3409
3410 /* Pull the value out of the saved registers. */
3411
3412 addr = create_tmp_var (ptr_type_node, "addr");
3413 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3414
3415 if (container)
3416 {
3417 int needed_intregs, needed_sseregs;
3418 bool need_temp;
3419 tree int_addr, sse_addr;
3420
3421 lab_false = create_artificial_label ();
3422 lab_over = create_artificial_label ();
3423
3424 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3425
3426 need_temp = (!REG_P (container)
3427 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3428 || TYPE_ALIGN (type) > 128));
3429
3430 /* In case we are passing structure, verify that it is consecutive block
3431 on the register save area. If not we need to do moves. */
3432 if (!need_temp && !REG_P (container))
3433 {
3434 /* Verify that all registers are strictly consecutive */
3435 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3436 {
3437 int i;
3438
3439 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3440 {
3441 rtx slot = XVECEXP (container, 0, i);
3442 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3443 || INTVAL (XEXP (slot, 1)) != i * 16)
3444 need_temp = 1;
3445 }
3446 }
3447 else
3448 {
3449 int i;
3450
3451 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3452 {
3453 rtx slot = XVECEXP (container, 0, i);
3454 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3455 || INTVAL (XEXP (slot, 1)) != i * 8)
3456 need_temp = 1;
3457 }
3458 }
3459 }
3460 if (!need_temp)
3461 {
3462 int_addr = addr;
3463 sse_addr = addr;
3464 }
3465 else
3466 {
3467 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3468 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3469 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3470 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3471 }
3472
3473 /* First ensure that we fit completely in registers. */
3474 if (needed_intregs)
3475 {
3476 t = build_int_cst (TREE_TYPE (gpr),
3477 (REGPARM_MAX - needed_intregs + 1) * 8);
3478 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3479 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3480 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3481 gimplify_and_add (t, pre_p);
3482 }
3483 if (needed_sseregs)
3484 {
3485 t = build_int_cst (TREE_TYPE (fpr),
3486 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3487 + REGPARM_MAX * 8);
3488 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3489 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3490 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3491 gimplify_and_add (t, pre_p);
3492 }
3493
3494 /* Compute index to start of area used for integer regs. */
3495 if (needed_intregs)
3496 {
3497 /* int_addr = gpr + sav; */
3498 t = fold_convert (ptr_type_node, gpr);
3499 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3500 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3501 gimplify_and_add (t, pre_p);
3502 }
3503 if (needed_sseregs)
3504 {
3505 /* sse_addr = fpr + sav; */
3506 t = fold_convert (ptr_type_node, fpr);
3507 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3508 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3509 gimplify_and_add (t, pre_p);
3510 }
3511 if (need_temp)
3512 {
3513 int i;
3514 tree temp = create_tmp_var (type, "va_arg_tmp");
3515
3516 /* addr = &temp; */
3517 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3518 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3519 gimplify_and_add (t, pre_p);
3520
3521 for (i = 0; i < XVECLEN (container, 0); i++)
3522 {
3523 rtx slot = XVECEXP (container, 0, i);
3524 rtx reg = XEXP (slot, 0);
3525 enum machine_mode mode = GET_MODE (reg);
3526 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3527 tree addr_type = build_pointer_type (piece_type);
3528 tree src_addr, src;
3529 int src_offset;
3530 tree dest_addr, dest;
3531
3532 if (SSE_REGNO_P (REGNO (reg)))
3533 {
3534 src_addr = sse_addr;
3535 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3536 }
3537 else
3538 {
3539 src_addr = int_addr;
3540 src_offset = REGNO (reg) * 8;
3541 }
3542 src_addr = fold_convert (addr_type, src_addr);
3543 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3544 size_int (src_offset)));
3545 src = build_va_arg_indirect_ref (src_addr);
3546
3547 dest_addr = fold_convert (addr_type, addr);
3548 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3549 size_int (INTVAL (XEXP (slot, 1)))));
3550 dest = build_va_arg_indirect_ref (dest_addr);
3551
3552 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3553 gimplify_and_add (t, pre_p);
3554 }
3555 }
3556
3557 if (needed_intregs)
3558 {
3559 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3560 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
3561 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3562 gimplify_and_add (t, pre_p);
3563 }
3564 if (needed_sseregs)
3565 {
3566 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3567 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
3568 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3569 gimplify_and_add (t, pre_p);
3570 }
3571
3572 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3573 gimplify_and_add (t, pre_p);
3574
3575 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3576 append_to_statement_list (t, pre_p);
3577 }
3578
3579 /* ... otherwise out of the overflow area. */
3580
3581 /* Care for on-stack alignment if needed. */
3582 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3583 t = ovf;
3584 else
3585 {
3586 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3587 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3588 build_int_cst (TREE_TYPE (ovf), align - 1));
3589 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3590 build_int_cst (TREE_TYPE (t), -align));
3591 }
3592 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3593
3594 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3595 gimplify_and_add (t2, pre_p);
3596
3597 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3598 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
3599 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3600 gimplify_and_add (t, pre_p);
3601
3602 if (container)
3603 {
3604 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3605 append_to_statement_list (t, pre_p);
3606 }
3607
3608 ptrtype = build_pointer_type (type);
3609 addr = fold_convert (ptrtype, addr);
3610
3611 if (indirect_p)
3612 addr = build_va_arg_indirect_ref (addr);
3613 return build_va_arg_indirect_ref (addr);
3614 }
3615 \f
3616 /* Return nonzero if OPNUM's MEM should be matched
3617 in movabs* patterns. */
3618
3619 int
3620 ix86_check_movabs (rtx insn, int opnum)
3621 {
3622 rtx set, mem;
3623
3624 set = PATTERN (insn);
3625 if (GET_CODE (set) == PARALLEL)
3626 set = XVECEXP (set, 0, 0);
3627 if (GET_CODE (set) != SET)
3628 abort ();
3629 mem = XEXP (set, opnum);
3630 while (GET_CODE (mem) == SUBREG)
3631 mem = SUBREG_REG (mem);
3632 if (GET_CODE (mem) != MEM)
3633 abort ();
3634 return (volatile_ok || !MEM_VOLATILE_P (mem));
3635 }
3636 \f
3637 /* Initialize the table of extra 80387 mathematical constants. */
3638
3639 static void
3640 init_ext_80387_constants (void)
3641 {
3642 static const char * cst[5] =
3643 {
3644 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3645 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3646 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3647 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3648 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3649 };
3650 int i;
3651
3652 for (i = 0; i < 5; i++)
3653 {
3654 real_from_string (&ext_80387_constants_table[i], cst[i]);
3655 /* Ensure each constant is rounded to XFmode precision. */
3656 real_convert (&ext_80387_constants_table[i],
3657 XFmode, &ext_80387_constants_table[i]);
3658 }
3659
3660 ext_80387_constants_init = 1;
3661 }
3662
3663 /* Return true if the constant is something that can be loaded with
3664 a special instruction. */
3665
3666 int
3667 standard_80387_constant_p (rtx x)
3668 {
3669 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3670 return -1;
3671
3672 if (x == CONST0_RTX (GET_MODE (x)))
3673 return 1;
3674 if (x == CONST1_RTX (GET_MODE (x)))
3675 return 2;
3676
3677 /* For XFmode constants, try to find a special 80387 instruction when
3678 optimizing for size or on those CPUs that benefit from them. */
3679 if (GET_MODE (x) == XFmode
3680 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3681 {
3682 REAL_VALUE_TYPE r;
3683 int i;
3684
3685 if (! ext_80387_constants_init)
3686 init_ext_80387_constants ();
3687
3688 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3689 for (i = 0; i < 5; i++)
3690 if (real_identical (&r, &ext_80387_constants_table[i]))
3691 return i + 3;
3692 }
3693
3694 return 0;
3695 }
3696
3697 /* Return the opcode of the special instruction to be used to load
3698 the constant X. */
3699
3700 const char *
3701 standard_80387_constant_opcode (rtx x)
3702 {
3703 switch (standard_80387_constant_p (x))
3704 {
3705 case 1:
3706 return "fldz";
3707 case 2:
3708 return "fld1";
3709 case 3:
3710 return "fldlg2";
3711 case 4:
3712 return "fldln2";
3713 case 5:
3714 return "fldl2e";
3715 case 6:
3716 return "fldl2t";
3717 case 7:
3718 return "fldpi";
3719 }
3720 abort ();
3721 }
3722
3723 /* Return the CONST_DOUBLE representing the 80387 constant that is
3724 loaded by the specified special instruction. The argument IDX
3725 matches the return value from standard_80387_constant_p. */
3726
3727 rtx
3728 standard_80387_constant_rtx (int idx)
3729 {
3730 int i;
3731
3732 if (! ext_80387_constants_init)
3733 init_ext_80387_constants ();
3734
3735 switch (idx)
3736 {
3737 case 3:
3738 case 4:
3739 case 5:
3740 case 6:
3741 case 7:
3742 i = idx - 3;
3743 break;
3744
3745 default:
3746 abort ();
3747 }
3748
3749 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3750 XFmode);
3751 }
3752
3753 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3754 */
3755 int
3756 standard_sse_constant_p (rtx x)
3757 {
3758 if (x == const0_rtx)
3759 return 1;
3760 return (x == CONST0_RTX (GET_MODE (x)));
3761 }
3762
3763 /* Returns 1 if OP contains a symbol reference */
3764
3765 int
3766 symbolic_reference_mentioned_p (rtx op)
3767 {
3768 const char *fmt;
3769 int i;
3770
3771 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3772 return 1;
3773
3774 fmt = GET_RTX_FORMAT (GET_CODE (op));
3775 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3776 {
3777 if (fmt[i] == 'E')
3778 {
3779 int j;
3780
3781 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3782 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3783 return 1;
3784 }
3785
3786 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3787 return 1;
3788 }
3789
3790 return 0;
3791 }
3792
3793 /* Return 1 if it is appropriate to emit `ret' instructions in the
3794 body of a function. Do this only if the epilogue is simple, needing a
3795 couple of insns. Prior to reloading, we can't tell how many registers
3796 must be saved, so return 0 then. Return 0 if there is no frame
3797 marker to de-allocate. */
3798
3799 int
3800 ix86_can_use_return_insn_p (void)
3801 {
3802 struct ix86_frame frame;
3803
3804 if (! reload_completed || frame_pointer_needed)
3805 return 0;
3806
3807 /* Don't allow more than 32 pop, since that's all we can do
3808 with one instruction. */
3809 if (current_function_pops_args
3810 && current_function_args_size >= 32768)
3811 return 0;
3812
3813 ix86_compute_frame_layout (&frame);
3814 return frame.to_allocate == 0 && frame.nregs == 0;
3815 }
3816 \f
3817 /* Value should be nonzero if functions must have frame pointers.
3818 Zero means the frame pointer need not be set up (and parms may
3819 be accessed via the stack pointer) in functions that seem suitable. */
3820
3821 int
3822 ix86_frame_pointer_required (void)
3823 {
3824 /* If we accessed previous frames, then the generated code expects
3825 to be able to access the saved ebp value in our frame. */
3826 if (cfun->machine->accesses_prev_frame)
3827 return 1;
3828
3829 /* Several x86 os'es need a frame pointer for other reasons,
3830 usually pertaining to setjmp. */
3831 if (SUBTARGET_FRAME_POINTER_REQUIRED)
3832 return 1;
3833
3834 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3835 the frame pointer by default. Turn it back on now if we've not
3836 got a leaf function. */
3837 if (TARGET_OMIT_LEAF_FRAME_POINTER
3838 && (!current_function_is_leaf))
3839 return 1;
3840
3841 if (current_function_profile)
3842 return 1;
3843
3844 return 0;
3845 }
3846
3847 /* Record that the current function accesses previous call frames. */
3848
3849 void
3850 ix86_setup_frame_addresses (void)
3851 {
3852 cfun->machine->accesses_prev_frame = 1;
3853 }
3854 \f
3855 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3856 # define USE_HIDDEN_LINKONCE 1
3857 #else
3858 # define USE_HIDDEN_LINKONCE 0
3859 #endif
3860
3861 static int pic_labels_used;
3862
3863 /* Fills in the label name that should be used for a pc thunk for
3864 the given register. */
3865
3866 static void
3867 get_pc_thunk_name (char name[32], unsigned int regno)
3868 {
3869 if (USE_HIDDEN_LINKONCE)
3870 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
3871 else
3872 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
3873 }
3874
3875
3876 /* This function generates code for -fpic that loads %ebx with
3877 the return address of the caller and then returns. */
3878
3879 void
3880 ix86_file_end (void)
3881 {
3882 rtx xops[2];
3883 int regno;
3884
3885 for (regno = 0; regno < 8; ++regno)
3886 {
3887 char name[32];
3888
3889 if (! ((pic_labels_used >> regno) & 1))
3890 continue;
3891
3892 get_pc_thunk_name (name, regno);
3893
3894 if (USE_HIDDEN_LINKONCE)
3895 {
3896 tree decl;
3897
3898 decl = build_decl (FUNCTION_DECL, get_identifier (name),
3899 error_mark_node);
3900 TREE_PUBLIC (decl) = 1;
3901 TREE_STATIC (decl) = 1;
3902 DECL_ONE_ONLY (decl) = 1;
3903
3904 (*targetm.asm_out.unique_section) (decl, 0);
3905 named_section (decl, NULL, 0);
3906
3907 (*targetm.asm_out.globalize_label) (asm_out_file, name);
3908 fputs ("\t.hidden\t", asm_out_file);
3909 assemble_name (asm_out_file, name);
3910 fputc ('\n', asm_out_file);
3911 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
3912 }
3913 else
3914 {
3915 text_section ();
3916 ASM_OUTPUT_LABEL (asm_out_file, name);
3917 }
3918
3919 xops[0] = gen_rtx_REG (SImode, regno);
3920 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
3921 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
3922 output_asm_insn ("ret", xops);
3923 }
3924
3925 if (NEED_INDICATE_EXEC_STACK)
3926 file_end_indicate_exec_stack ();
3927 }
3928
3929 /* Emit code for the SET_GOT patterns. */
3930
3931 const char *
3932 output_set_got (rtx dest)
3933 {
3934 rtx xops[3];
3935
3936 xops[0] = dest;
3937 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
3938
3939 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
3940 {
3941 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
3942
3943 if (!flag_pic)
3944 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3945 else
3946 output_asm_insn ("call\t%a2", xops);
3947
3948 #if TARGET_MACHO
3949 /* Output the "canonical" label name ("Lxx$pb") here too. This
3950 is what will be referred to by the Mach-O PIC subsystem. */
3951 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
3952 #endif
3953 (*targetm.asm_out.internal_label) (asm_out_file, "L",
3954 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
3955
3956 if (flag_pic)
3957 output_asm_insn ("pop{l}\t%0", xops);
3958 }
3959 else
3960 {
3961 char name[32];
3962 get_pc_thunk_name (name, REGNO (dest));
3963 pic_labels_used |= 1 << REGNO (dest);
3964
3965 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3966 xops[2] = gen_rtx_MEM (QImode, xops[2]);
3967 output_asm_insn ("call\t%X2", xops);
3968 }
3969
3970 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
3971 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
3972 else if (!TARGET_MACHO)
3973 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
3974
3975 return "";
3976 }
3977
3978 /* Generate an "push" pattern for input ARG. */
3979
3980 static rtx
3981 gen_push (rtx arg)
3982 {
3983 return gen_rtx_SET (VOIDmode,
3984 gen_rtx_MEM (Pmode,
3985 gen_rtx_PRE_DEC (Pmode,
3986 stack_pointer_rtx)),
3987 arg);
3988 }
3989
3990 /* Return >= 0 if there is an unused call-clobbered register available
3991 for the entire function. */
3992
3993 static unsigned int
3994 ix86_select_alt_pic_regnum (void)
3995 {
3996 if (current_function_is_leaf && !current_function_profile)
3997 {
3998 int i;
3999 for (i = 2; i >= 0; --i)
4000 if (!regs_ever_live[i])
4001 return i;
4002 }
4003
4004 return INVALID_REGNUM;
4005 }
4006
4007 /* Return 1 if we need to save REGNO. */
4008 static int
4009 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4010 {
4011 if (pic_offset_table_rtx
4012 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4013 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4014 || current_function_profile
4015 || current_function_calls_eh_return
4016 || current_function_uses_const_pool))
4017 {
4018 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4019 return 0;
4020 return 1;
4021 }
4022
4023 if (current_function_calls_eh_return && maybe_eh_return)
4024 {
4025 unsigned i;
4026 for (i = 0; ; i++)
4027 {
4028 unsigned test = EH_RETURN_DATA_REGNO (i);
4029 if (test == INVALID_REGNUM)
4030 break;
4031 if (test == regno)
4032 return 1;
4033 }
4034 }
4035
4036 return (regs_ever_live[regno]
4037 && !call_used_regs[regno]
4038 && !fixed_regs[regno]
4039 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4040 }
4041
4042 /* Return number of registers to be saved on the stack. */
4043
4044 static int
4045 ix86_nsaved_regs (void)
4046 {
4047 int nregs = 0;
4048 int regno;
4049
4050 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4051 if (ix86_save_reg (regno, true))
4052 nregs++;
4053 return nregs;
4054 }
4055
4056 /* Return the offset between two registers, one to be eliminated, and the other
4057 its replacement, at the start of a routine. */
4058
4059 HOST_WIDE_INT
4060 ix86_initial_elimination_offset (int from, int to)
4061 {
4062 struct ix86_frame frame;
4063 ix86_compute_frame_layout (&frame);
4064
4065 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4066 return frame.hard_frame_pointer_offset;
4067 else if (from == FRAME_POINTER_REGNUM
4068 && to == HARD_FRAME_POINTER_REGNUM)
4069 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4070 else
4071 {
4072 if (to != STACK_POINTER_REGNUM)
4073 abort ();
4074 else if (from == ARG_POINTER_REGNUM)
4075 return frame.stack_pointer_offset;
4076 else if (from != FRAME_POINTER_REGNUM)
4077 abort ();
4078 else
4079 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4080 }
4081 }
4082
4083 /* Fill structure ix86_frame about frame of currently computed function. */
4084
4085 static void
4086 ix86_compute_frame_layout (struct ix86_frame *frame)
4087 {
4088 HOST_WIDE_INT total_size;
4089 unsigned int stack_alignment_needed;
4090 HOST_WIDE_INT offset;
4091 unsigned int preferred_alignment;
4092 HOST_WIDE_INT size = get_frame_size ();
4093
4094 frame->nregs = ix86_nsaved_regs ();
4095 total_size = size;
4096
4097 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4098 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4099
4100 /* During reload iteration the amount of registers saved can change.
4101 Recompute the value as needed. Do not recompute when amount of registers
4102 didn't change as reload does mutiple calls to the function and does not
4103 expect the decision to change within single iteration. */
4104 if (!optimize_size
4105 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4106 {
4107 int count = frame->nregs;
4108
4109 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4110 /* The fast prologue uses move instead of push to save registers. This
4111 is significantly longer, but also executes faster as modern hardware
4112 can execute the moves in parallel, but can't do that for push/pop.
4113
4114 Be careful about choosing what prologue to emit: When function takes
4115 many instructions to execute we may use slow version as well as in
4116 case function is known to be outside hot spot (this is known with
4117 feedback only). Weight the size of function by number of registers
4118 to save as it is cheap to use one or two push instructions but very
4119 slow to use many of them. */
4120 if (count)
4121 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4122 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4123 || (flag_branch_probabilities
4124 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4125 cfun->machine->use_fast_prologue_epilogue = false;
4126 else
4127 cfun->machine->use_fast_prologue_epilogue
4128 = !expensive_function_p (count);
4129 }
4130 if (TARGET_PROLOGUE_USING_MOVE
4131 && cfun->machine->use_fast_prologue_epilogue)
4132 frame->save_regs_using_mov = true;
4133 else
4134 frame->save_regs_using_mov = false;
4135
4136
4137 /* Skip return address and saved base pointer. */
4138 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4139
4140 frame->hard_frame_pointer_offset = offset;
4141
4142 /* Do some sanity checking of stack_alignment_needed and
4143 preferred_alignment, since i386 port is the only using those features
4144 that may break easily. */
4145
4146 if (size && !stack_alignment_needed)
4147 abort ();
4148 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4149 abort ();
4150 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4151 abort ();
4152 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4153 abort ();
4154
4155 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4156 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4157
4158 /* Register save area */
4159 offset += frame->nregs * UNITS_PER_WORD;
4160
4161 /* Va-arg area */
4162 if (ix86_save_varrargs_registers)
4163 {
4164 offset += X86_64_VARARGS_SIZE;
4165 frame->va_arg_size = X86_64_VARARGS_SIZE;
4166 }
4167 else
4168 frame->va_arg_size = 0;
4169
4170 /* Align start of frame for local function. */
4171 frame->padding1 = ((offset + stack_alignment_needed - 1)
4172 & -stack_alignment_needed) - offset;
4173
4174 offset += frame->padding1;
4175
4176 /* Frame pointer points here. */
4177 frame->frame_pointer_offset = offset;
4178
4179 offset += size;
4180
4181 /* Add outgoing arguments area. Can be skipped if we eliminated
4182 all the function calls as dead code.
4183 Skipping is however impossible when function calls alloca. Alloca
4184 expander assumes that last current_function_outgoing_args_size
4185 of stack frame are unused. */
4186 if (ACCUMULATE_OUTGOING_ARGS
4187 && (!current_function_is_leaf || current_function_calls_alloca))
4188 {
4189 offset += current_function_outgoing_args_size;
4190 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4191 }
4192 else
4193 frame->outgoing_arguments_size = 0;
4194
4195 /* Align stack boundary. Only needed if we're calling another function
4196 or using alloca. */
4197 if (!current_function_is_leaf || current_function_calls_alloca)
4198 frame->padding2 = ((offset + preferred_alignment - 1)
4199 & -preferred_alignment) - offset;
4200 else
4201 frame->padding2 = 0;
4202
4203 offset += frame->padding2;
4204
4205 /* We've reached end of stack frame. */
4206 frame->stack_pointer_offset = offset;
4207
4208 /* Size prologue needs to allocate. */
4209 frame->to_allocate =
4210 (size + frame->padding1 + frame->padding2
4211 + frame->outgoing_arguments_size + frame->va_arg_size);
4212
4213 if ((!frame->to_allocate && frame->nregs <= 1)
4214 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4215 frame->save_regs_using_mov = false;
4216
4217 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4218 && current_function_is_leaf)
4219 {
4220 frame->red_zone_size = frame->to_allocate;
4221 if (frame->save_regs_using_mov)
4222 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4223 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4224 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4225 }
4226 else
4227 frame->red_zone_size = 0;
4228 frame->to_allocate -= frame->red_zone_size;
4229 frame->stack_pointer_offset -= frame->red_zone_size;
4230 #if 0
4231 fprintf (stderr, "nregs: %i\n", frame->nregs);
4232 fprintf (stderr, "size: %i\n", size);
4233 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4234 fprintf (stderr, "padding1: %i\n", frame->padding1);
4235 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4236 fprintf (stderr, "padding2: %i\n", frame->padding2);
4237 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4238 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4239 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4240 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4241 frame->hard_frame_pointer_offset);
4242 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4243 #endif
4244 }
4245
4246 /* Emit code to save registers in the prologue. */
4247
4248 static void
4249 ix86_emit_save_regs (void)
4250 {
4251 int regno;
4252 rtx insn;
4253
4254 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4255 if (ix86_save_reg (regno, true))
4256 {
4257 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4258 RTX_FRAME_RELATED_P (insn) = 1;
4259 }
4260 }
4261
4262 /* Emit code to save registers using MOV insns. First register
4263 is restored from POINTER + OFFSET. */
4264 static void
4265 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4266 {
4267 int regno;
4268 rtx insn;
4269
4270 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4271 if (ix86_save_reg (regno, true))
4272 {
4273 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4274 Pmode, offset),
4275 gen_rtx_REG (Pmode, regno));
4276 RTX_FRAME_RELATED_P (insn) = 1;
4277 offset += UNITS_PER_WORD;
4278 }
4279 }
4280
4281 /* Expand prologue or epilogue stack adjustment.
4282 The pattern exist to put a dependency on all ebp-based memory accesses.
4283 STYLE should be negative if instructions should be marked as frame related,
4284 zero if %r11 register is live and cannot be freely used and positive
4285 otherwise. */
4286
4287 static void
4288 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4289 {
4290 rtx insn;
4291
4292 if (! TARGET_64BIT)
4293 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4294 else if (x86_64_immediate_operand (offset, DImode))
4295 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4296 else
4297 {
4298 rtx r11;
4299 /* r11 is used by indirect sibcall return as well, set before the
4300 epilogue and used after the epilogue. ATM indirect sibcall
4301 shouldn't be used together with huge frame sizes in one
4302 function because of the frame_size check in sibcall.c. */
4303 if (style == 0)
4304 abort ();
4305 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4306 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4307 if (style < 0)
4308 RTX_FRAME_RELATED_P (insn) = 1;
4309 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4310 offset));
4311 }
4312 if (style < 0)
4313 RTX_FRAME_RELATED_P (insn) = 1;
4314 }
4315
4316 /* Expand the prologue into a bunch of separate insns. */
4317
4318 void
4319 ix86_expand_prologue (void)
4320 {
4321 rtx insn;
4322 bool pic_reg_used;
4323 struct ix86_frame frame;
4324 HOST_WIDE_INT allocate;
4325
4326 ix86_compute_frame_layout (&frame);
4327
4328 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4329 slower on all targets. Also sdb doesn't like it. */
4330
4331 if (frame_pointer_needed)
4332 {
4333 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4334 RTX_FRAME_RELATED_P (insn) = 1;
4335
4336 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4337 RTX_FRAME_RELATED_P (insn) = 1;
4338 }
4339
4340 allocate = frame.to_allocate;
4341
4342 if (!frame.save_regs_using_mov)
4343 ix86_emit_save_regs ();
4344 else
4345 allocate += frame.nregs * UNITS_PER_WORD;
4346
4347 /* When using red zone we may start register saving before allocating
4348 the stack frame saving one cycle of the prologue. */
4349 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4350 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4351 : stack_pointer_rtx,
4352 -frame.nregs * UNITS_PER_WORD);
4353
4354 if (allocate == 0)
4355 ;
4356 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4357 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4358 GEN_INT (-allocate), -1);
4359 else
4360 {
4361 /* Only valid for Win32. */
4362 rtx eax = gen_rtx_REG (SImode, 0);
4363 bool eax_live = ix86_eax_live_at_start_p ();
4364 rtx t;
4365
4366 if (TARGET_64BIT)
4367 abort ();
4368
4369 if (eax_live)
4370 {
4371 emit_insn (gen_push (eax));
4372 allocate -= 4;
4373 }
4374
4375 emit_move_insn (eax, GEN_INT (allocate));
4376
4377 insn = emit_insn (gen_allocate_stack_worker (eax));
4378 RTX_FRAME_RELATED_P (insn) = 1;
4379 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4380 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4381 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4382 t, REG_NOTES (insn));
4383
4384 if (eax_live)
4385 {
4386 if (frame_pointer_needed)
4387 t = plus_constant (hard_frame_pointer_rtx,
4388 allocate
4389 - frame.to_allocate
4390 - frame.nregs * UNITS_PER_WORD);
4391 else
4392 t = plus_constant (stack_pointer_rtx, allocate);
4393 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4394 }
4395 }
4396
4397 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4398 {
4399 if (!frame_pointer_needed || !frame.to_allocate)
4400 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4401 else
4402 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4403 -frame.nregs * UNITS_PER_WORD);
4404 }
4405
4406 pic_reg_used = false;
4407 if (pic_offset_table_rtx
4408 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4409 || current_function_profile))
4410 {
4411 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4412
4413 if (alt_pic_reg_used != INVALID_REGNUM)
4414 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4415
4416 pic_reg_used = true;
4417 }
4418
4419 if (pic_reg_used)
4420 {
4421 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4422
4423 /* Even with accurate pre-reload life analysis, we can wind up
4424 deleting all references to the pic register after reload.
4425 Consider if cross-jumping unifies two sides of a branch
4426 controlled by a comparison vs the only read from a global.
4427 In which case, allow the set_got to be deleted, though we're
4428 too late to do anything about the ebx save in the prologue. */
4429 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4430 }
4431
4432 /* Prevent function calls from be scheduled before the call to mcount.
4433 In the pic_reg_used case, make sure that the got load isn't deleted. */
4434 if (current_function_profile)
4435 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4436 }
4437
4438 /* Emit code to restore saved registers using MOV insns. First register
4439 is restored from POINTER + OFFSET. */
4440 static void
4441 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4442 int maybe_eh_return)
4443 {
4444 int regno;
4445 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4446
4447 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4448 if (ix86_save_reg (regno, maybe_eh_return))
4449 {
4450 /* Ensure that adjust_address won't be forced to produce pointer
4451 out of range allowed by x86-64 instruction set. */
4452 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4453 {
4454 rtx r11;
4455
4456 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4457 emit_move_insn (r11, GEN_INT (offset));
4458 emit_insn (gen_adddi3 (r11, r11, pointer));
4459 base_address = gen_rtx_MEM (Pmode, r11);
4460 offset = 0;
4461 }
4462 emit_move_insn (gen_rtx_REG (Pmode, regno),
4463 adjust_address (base_address, Pmode, offset));
4464 offset += UNITS_PER_WORD;
4465 }
4466 }
4467
4468 /* Restore function stack, frame, and registers. */
4469
4470 void
4471 ix86_expand_epilogue (int style)
4472 {
4473 int regno;
4474 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4475 struct ix86_frame frame;
4476 HOST_WIDE_INT offset;
4477
4478 ix86_compute_frame_layout (&frame);
4479
4480 /* Calculate start of saved registers relative to ebp. Special care
4481 must be taken for the normal return case of a function using
4482 eh_return: the eax and edx registers are marked as saved, but not
4483 restored along this path. */
4484 offset = frame.nregs;
4485 if (current_function_calls_eh_return && style != 2)
4486 offset -= 2;
4487 offset *= -UNITS_PER_WORD;
4488
4489 /* If we're only restoring one register and sp is not valid then
4490 using a move instruction to restore the register since it's
4491 less work than reloading sp and popping the register.
4492
4493 The default code result in stack adjustment using add/lea instruction,
4494 while this code results in LEAVE instruction (or discrete equivalent),
4495 so it is profitable in some other cases as well. Especially when there
4496 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4497 and there is exactly one register to pop. This heuristic may need some
4498 tuning in future. */
4499 if ((!sp_valid && frame.nregs <= 1)
4500 || (TARGET_EPILOGUE_USING_MOVE
4501 && cfun->machine->use_fast_prologue_epilogue
4502 && (frame.nregs > 1 || frame.to_allocate))
4503 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4504 || (frame_pointer_needed && TARGET_USE_LEAVE
4505 && cfun->machine->use_fast_prologue_epilogue
4506 && frame.nregs == 1)
4507 || current_function_calls_eh_return)
4508 {
4509 /* Restore registers. We can use ebp or esp to address the memory
4510 locations. If both are available, default to ebp, since offsets
4511 are known to be small. Only exception is esp pointing directly to the
4512 end of block of saved registers, where we may simplify addressing
4513 mode. */
4514
4515 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4516 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4517 frame.to_allocate, style == 2);
4518 else
4519 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4520 offset, style == 2);
4521
4522 /* eh_return epilogues need %ecx added to the stack pointer. */
4523 if (style == 2)
4524 {
4525 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4526
4527 if (frame_pointer_needed)
4528 {
4529 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4530 tmp = plus_constant (tmp, UNITS_PER_WORD);
4531 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4532
4533 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4534 emit_move_insn (hard_frame_pointer_rtx, tmp);
4535
4536 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4537 const0_rtx, style);
4538 }
4539 else
4540 {
4541 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4542 tmp = plus_constant (tmp, (frame.to_allocate
4543 + frame.nregs * UNITS_PER_WORD));
4544 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4545 }
4546 }
4547 else if (!frame_pointer_needed)
4548 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4549 GEN_INT (frame.to_allocate
4550 + frame.nregs * UNITS_PER_WORD),
4551 style);
4552 /* If not an i386, mov & pop is faster than "leave". */
4553 else if (TARGET_USE_LEAVE || optimize_size
4554 || !cfun->machine->use_fast_prologue_epilogue)
4555 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4556 else
4557 {
4558 pro_epilogue_adjust_stack (stack_pointer_rtx,
4559 hard_frame_pointer_rtx,
4560 const0_rtx, style);
4561 if (TARGET_64BIT)
4562 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4563 else
4564 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4565 }
4566 }
4567 else
4568 {
4569 /* First step is to deallocate the stack frame so that we can
4570 pop the registers. */
4571 if (!sp_valid)
4572 {
4573 if (!frame_pointer_needed)
4574 abort ();
4575 pro_epilogue_adjust_stack (stack_pointer_rtx,
4576 hard_frame_pointer_rtx,
4577 GEN_INT (offset), style);
4578 }
4579 else if (frame.to_allocate)
4580 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4581 GEN_INT (frame.to_allocate), style);
4582
4583 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4584 if (ix86_save_reg (regno, false))
4585 {
4586 if (TARGET_64BIT)
4587 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4588 else
4589 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4590 }
4591 if (frame_pointer_needed)
4592 {
4593 /* Leave results in shorter dependency chains on CPUs that are
4594 able to grok it fast. */
4595 if (TARGET_USE_LEAVE)
4596 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4597 else if (TARGET_64BIT)
4598 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4599 else
4600 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4601 }
4602 }
4603
4604 /* Sibcall epilogues don't want a return instruction. */
4605 if (style == 0)
4606 return;
4607
4608 if (current_function_pops_args && current_function_args_size)
4609 {
4610 rtx popc = GEN_INT (current_function_pops_args);
4611
4612 /* i386 can only pop 64K bytes. If asked to pop more, pop
4613 return address, do explicit add, and jump indirectly to the
4614 caller. */
4615
4616 if (current_function_pops_args >= 65536)
4617 {
4618 rtx ecx = gen_rtx_REG (SImode, 2);
4619
4620 /* There is no "pascal" calling convention in 64bit ABI. */
4621 if (TARGET_64BIT)
4622 abort ();
4623
4624 emit_insn (gen_popsi1 (ecx));
4625 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4626 emit_jump_insn (gen_return_indirect_internal (ecx));
4627 }
4628 else
4629 emit_jump_insn (gen_return_pop_internal (popc));
4630 }
4631 else
4632 emit_jump_insn (gen_return_internal ());
4633 }
4634
4635 /* Reset from the function's potential modifications. */
4636
4637 static void
4638 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4639 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4640 {
4641 if (pic_offset_table_rtx)
4642 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4643 }
4644 \f
4645 /* Extract the parts of an RTL expression that is a valid memory address
4646 for an instruction. Return 0 if the structure of the address is
4647 grossly off. Return -1 if the address contains ASHIFT, so it is not
4648 strictly valid, but still used for computing length of lea instruction. */
4649
4650 int
4651 ix86_decompose_address (rtx addr, struct ix86_address *out)
4652 {
4653 rtx base = NULL_RTX;
4654 rtx index = NULL_RTX;
4655 rtx disp = NULL_RTX;
4656 HOST_WIDE_INT scale = 1;
4657 rtx scale_rtx = NULL_RTX;
4658 int retval = 1;
4659 enum ix86_address_seg seg = SEG_DEFAULT;
4660
4661 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4662 base = addr;
4663 else if (GET_CODE (addr) == PLUS)
4664 {
4665 rtx addends[4], op;
4666 int n = 0, i;
4667
4668 op = addr;
4669 do
4670 {
4671 if (n >= 4)
4672 return 0;
4673 addends[n++] = XEXP (op, 1);
4674 op = XEXP (op, 0);
4675 }
4676 while (GET_CODE (op) == PLUS);
4677 if (n >= 4)
4678 return 0;
4679 addends[n] = op;
4680
4681 for (i = n; i >= 0; --i)
4682 {
4683 op = addends[i];
4684 switch (GET_CODE (op))
4685 {
4686 case MULT:
4687 if (index)
4688 return 0;
4689 index = XEXP (op, 0);
4690 scale_rtx = XEXP (op, 1);
4691 break;
4692
4693 case UNSPEC:
4694 if (XINT (op, 1) == UNSPEC_TP
4695 && TARGET_TLS_DIRECT_SEG_REFS
4696 && seg == SEG_DEFAULT)
4697 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4698 else
4699 return 0;
4700 break;
4701
4702 case REG:
4703 case SUBREG:
4704 if (!base)
4705 base = op;
4706 else if (!index)
4707 index = op;
4708 else
4709 return 0;
4710 break;
4711
4712 case CONST:
4713 case CONST_INT:
4714 case SYMBOL_REF:
4715 case LABEL_REF:
4716 if (disp)
4717 return 0;
4718 disp = op;
4719 break;
4720
4721 default:
4722 return 0;
4723 }
4724 }
4725 }
4726 else if (GET_CODE (addr) == MULT)
4727 {
4728 index = XEXP (addr, 0); /* index*scale */
4729 scale_rtx = XEXP (addr, 1);
4730 }
4731 else if (GET_CODE (addr) == ASHIFT)
4732 {
4733 rtx tmp;
4734
4735 /* We're called for lea too, which implements ashift on occasion. */
4736 index = XEXP (addr, 0);
4737 tmp = XEXP (addr, 1);
4738 if (GET_CODE (tmp) != CONST_INT)
4739 return 0;
4740 scale = INTVAL (tmp);
4741 if ((unsigned HOST_WIDE_INT) scale > 3)
4742 return 0;
4743 scale = 1 << scale;
4744 retval = -1;
4745 }
4746 else
4747 disp = addr; /* displacement */
4748
4749 /* Extract the integral value of scale. */
4750 if (scale_rtx)
4751 {
4752 if (GET_CODE (scale_rtx) != CONST_INT)
4753 return 0;
4754 scale = INTVAL (scale_rtx);
4755 }
4756
4757 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4758 if (base && index && scale == 1
4759 && (index == arg_pointer_rtx
4760 || index == frame_pointer_rtx
4761 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
4762 {
4763 rtx tmp = base;
4764 base = index;
4765 index = tmp;
4766 }
4767
4768 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4769 if ((base == hard_frame_pointer_rtx
4770 || base == frame_pointer_rtx
4771 || base == arg_pointer_rtx) && !disp)
4772 disp = const0_rtx;
4773
4774 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4775 Avoid this by transforming to [%esi+0]. */
4776 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4777 && base && !index && !disp
4778 && REG_P (base)
4779 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
4780 disp = const0_rtx;
4781
4782 /* Special case: encode reg+reg instead of reg*2. */
4783 if (!base && index && scale && scale == 2)
4784 base = index, scale = 1;
4785
4786 /* Special case: scaling cannot be encoded without base or displacement. */
4787 if (!base && !disp && index && scale != 1)
4788 disp = const0_rtx;
4789
4790 out->base = base;
4791 out->index = index;
4792 out->disp = disp;
4793 out->scale = scale;
4794 out->seg = seg;
4795
4796 return retval;
4797 }
4798 \f
4799 /* Return cost of the memory address x.
4800 For i386, it is better to use a complex address than let gcc copy
4801 the address into a reg and make a new pseudo. But not if the address
4802 requires to two regs - that would mean more pseudos with longer
4803 lifetimes. */
4804 static int
4805 ix86_address_cost (rtx x)
4806 {
4807 struct ix86_address parts;
4808 int cost = 1;
4809
4810 if (!ix86_decompose_address (x, &parts))
4811 abort ();
4812
4813 /* More complex memory references are better. */
4814 if (parts.disp && parts.disp != const0_rtx)
4815 cost--;
4816 if (parts.seg != SEG_DEFAULT)
4817 cost--;
4818
4819 /* Attempt to minimize number of registers in the address. */
4820 if ((parts.base
4821 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
4822 || (parts.index
4823 && (!REG_P (parts.index)
4824 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
4825 cost++;
4826
4827 if (parts.base
4828 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
4829 && parts.index
4830 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
4831 && parts.base != parts.index)
4832 cost++;
4833
4834 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4835 since it's predecode logic can't detect the length of instructions
4836 and it degenerates to vector decoded. Increase cost of such
4837 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4838 to split such addresses or even refuse such addresses at all.
4839
4840 Following addressing modes are affected:
4841 [base+scale*index]
4842 [scale*index+disp]
4843 [base+index]
4844
4845 The first and last case may be avoidable by explicitly coding the zero in
4846 memory address, but I don't have AMD-K6 machine handy to check this
4847 theory. */
4848
4849 if (TARGET_K6
4850 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
4851 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
4852 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
4853 cost += 10;
4854
4855 return cost;
4856 }
4857 \f
4858 /* If X is a machine specific address (i.e. a symbol or label being
4859 referenced as a displacement from the GOT implemented using an
4860 UNSPEC), then return the base term. Otherwise return X. */
4861
4862 rtx
4863 ix86_find_base_term (rtx x)
4864 {
4865 rtx term;
4866
4867 if (TARGET_64BIT)
4868 {
4869 if (GET_CODE (x) != CONST)
4870 return x;
4871 term = XEXP (x, 0);
4872 if (GET_CODE (term) == PLUS
4873 && (GET_CODE (XEXP (term, 1)) == CONST_INT
4874 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
4875 term = XEXP (term, 0);
4876 if (GET_CODE (term) != UNSPEC
4877 || XINT (term, 1) != UNSPEC_GOTPCREL)
4878 return x;
4879
4880 term = XVECEXP (term, 0, 0);
4881
4882 if (GET_CODE (term) != SYMBOL_REF
4883 && GET_CODE (term) != LABEL_REF)
4884 return x;
4885
4886 return term;
4887 }
4888
4889 term = ix86_delegitimize_address (x);
4890
4891 if (GET_CODE (term) != SYMBOL_REF
4892 && GET_CODE (term) != LABEL_REF)
4893 return x;
4894
4895 return term;
4896 }
4897
4898 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4899 this is used for to form addresses to local data when -fPIC is in
4900 use. */
4901
4902 static bool
4903 darwin_local_data_pic (rtx disp)
4904 {
4905 if (GET_CODE (disp) == MINUS)
4906 {
4907 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
4908 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
4909 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
4910 {
4911 const char *sym_name = XSTR (XEXP (disp, 1), 0);
4912 if (! strcmp (sym_name, "<pic base>"))
4913 return true;
4914 }
4915 }
4916
4917 return false;
4918 }
4919 \f
4920 /* Determine if a given RTX is a valid constant. We already know this
4921 satisfies CONSTANT_P. */
4922
4923 bool
4924 legitimate_constant_p (rtx x)
4925 {
4926 switch (GET_CODE (x))
4927 {
4928 case CONST:
4929 x = XEXP (x, 0);
4930
4931 if (GET_CODE (x) == PLUS)
4932 {
4933 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4934 return false;
4935 x = XEXP (x, 0);
4936 }
4937
4938 if (TARGET_MACHO && darwin_local_data_pic (x))
4939 return true;
4940
4941 /* Only some unspecs are valid as "constants". */
4942 if (GET_CODE (x) == UNSPEC)
4943 switch (XINT (x, 1))
4944 {
4945 case UNSPEC_TPOFF:
4946 case UNSPEC_NTPOFF:
4947 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4948 case UNSPEC_DTPOFF:
4949 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4950 default:
4951 return false;
4952 }
4953
4954 /* We must have drilled down to a symbol. */
4955 if (!symbolic_operand (x, Pmode))
4956 return false;
4957 /* FALLTHRU */
4958
4959 case SYMBOL_REF:
4960 /* TLS symbols are never valid. */
4961 if (tls_symbolic_operand (x, Pmode))
4962 return false;
4963 break;
4964
4965 default:
4966 break;
4967 }
4968
4969 /* Otherwise we handle everything else in the move patterns. */
4970 return true;
4971 }
4972
4973 /* Determine if it's legal to put X into the constant pool. This
4974 is not possible for the address of thread-local symbols, which
4975 is checked above. */
4976
4977 static bool
4978 ix86_cannot_force_const_mem (rtx x)
4979 {
4980 return !legitimate_constant_p (x);
4981 }
4982
4983 /* Determine if a given RTX is a valid constant address. */
4984
4985 bool
4986 constant_address_p (rtx x)
4987 {
4988 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
4989 }
4990
4991 /* Nonzero if the constant value X is a legitimate general operand
4992 when generating PIC code. It is given that flag_pic is on and
4993 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4994
4995 bool
4996 legitimate_pic_operand_p (rtx x)
4997 {
4998 rtx inner;
4999
5000 switch (GET_CODE (x))
5001 {
5002 case CONST:
5003 inner = XEXP (x, 0);
5004
5005 /* Only some unspecs are valid as "constants". */
5006 if (GET_CODE (inner) == UNSPEC)
5007 switch (XINT (inner, 1))
5008 {
5009 case UNSPEC_TPOFF:
5010 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5011 default:
5012 return false;
5013 }
5014 /* FALLTHRU */
5015
5016 case SYMBOL_REF:
5017 case LABEL_REF:
5018 return legitimate_pic_address_disp_p (x);
5019
5020 default:
5021 return true;
5022 }
5023 }
5024
5025 /* Determine if a given CONST RTX is a valid memory displacement
5026 in PIC mode. */
5027
5028 int
5029 legitimate_pic_address_disp_p (rtx disp)
5030 {
5031 bool saw_plus;
5032
5033 /* In 64bit mode we can allow direct addresses of symbols and labels
5034 when they are not dynamic symbols. */
5035 if (TARGET_64BIT)
5036 {
5037 /* TLS references should always be enclosed in UNSPEC. */
5038 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5039 return 0;
5040 if (GET_CODE (disp) == SYMBOL_REF
5041 && ix86_cmodel == CM_SMALL_PIC
5042 && SYMBOL_REF_LOCAL_P (disp))
5043 return 1;
5044 if (GET_CODE (disp) == LABEL_REF)
5045 return 1;
5046 if (GET_CODE (disp) == CONST
5047 && GET_CODE (XEXP (disp, 0)) == PLUS)
5048 {
5049 rtx op0 = XEXP (XEXP (disp, 0), 0);
5050 rtx op1 = XEXP (XEXP (disp, 0), 1);
5051
5052 /* TLS references should always be enclosed in UNSPEC. */
5053 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5054 return 0;
5055 if (((GET_CODE (op0) == SYMBOL_REF
5056 && ix86_cmodel == CM_SMALL_PIC
5057 && SYMBOL_REF_LOCAL_P (op0))
5058 || GET_CODE (op0) == LABEL_REF)
5059 && GET_CODE (op1) == CONST_INT
5060 && INTVAL (op1) < 16*1024*1024
5061 && INTVAL (op1) >= -16*1024*1024)
5062 return 1;
5063 }
5064 }
5065 if (GET_CODE (disp) != CONST)
5066 return 0;
5067 disp = XEXP (disp, 0);
5068
5069 if (TARGET_64BIT)
5070 {
5071 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5072 of GOT tables. We should not need these anyway. */
5073 if (GET_CODE (disp) != UNSPEC
5074 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5075 return 0;
5076
5077 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5078 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5079 return 0;
5080 return 1;
5081 }
5082
5083 saw_plus = false;
5084 if (GET_CODE (disp) == PLUS)
5085 {
5086 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5087 return 0;
5088 disp = XEXP (disp, 0);
5089 saw_plus = true;
5090 }
5091
5092 if (TARGET_MACHO && darwin_local_data_pic (disp))
5093 return 1;
5094
5095 if (GET_CODE (disp) != UNSPEC)
5096 return 0;
5097
5098 switch (XINT (disp, 1))
5099 {
5100 case UNSPEC_GOT:
5101 if (saw_plus)
5102 return false;
5103 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5104 case UNSPEC_GOTOFF:
5105 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5106 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5107 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5108 return false;
5109 case UNSPEC_GOTTPOFF:
5110 case UNSPEC_GOTNTPOFF:
5111 case UNSPEC_INDNTPOFF:
5112 if (saw_plus)
5113 return false;
5114 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5115 case UNSPEC_NTPOFF:
5116 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5117 case UNSPEC_DTPOFF:
5118 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5119 }
5120
5121 return 0;
5122 }
5123
5124 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5125 memory address for an instruction. The MODE argument is the machine mode
5126 for the MEM expression that wants to use this address.
5127
5128 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5129 convert common non-canonical forms to canonical form so that they will
5130 be recognized. */
5131
5132 int
5133 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5134 {
5135 struct ix86_address parts;
5136 rtx base, index, disp;
5137 HOST_WIDE_INT scale;
5138 const char *reason = NULL;
5139 rtx reason_rtx = NULL_RTX;
5140
5141 if (TARGET_DEBUG_ADDR)
5142 {
5143 fprintf (stderr,
5144 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5145 GET_MODE_NAME (mode), strict);
5146 debug_rtx (addr);
5147 }
5148
5149 if (ix86_decompose_address (addr, &parts) <= 0)
5150 {
5151 reason = "decomposition failed";
5152 goto report_error;
5153 }
5154
5155 base = parts.base;
5156 index = parts.index;
5157 disp = parts.disp;
5158 scale = parts.scale;
5159
5160 /* Validate base register.
5161
5162 Don't allow SUBREG's here, it can lead to spill failures when the base
5163 is one word out of a two word structure, which is represented internally
5164 as a DImode int. */
5165
5166 if (base)
5167 {
5168 reason_rtx = base;
5169
5170 if (GET_CODE (base) != REG)
5171 {
5172 reason = "base is not a register";
5173 goto report_error;
5174 }
5175
5176 if (GET_MODE (base) != Pmode)
5177 {
5178 reason = "base is not in Pmode";
5179 goto report_error;
5180 }
5181
5182 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
5183 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
5184 {
5185 reason = "base is not valid";
5186 goto report_error;
5187 }
5188 }
5189
5190 /* Validate index register.
5191
5192 Don't allow SUBREG's here, it can lead to spill failures when the index
5193 is one word out of a two word structure, which is represented internally
5194 as a DImode int. */
5195
5196 if (index)
5197 {
5198 reason_rtx = index;
5199
5200 if (GET_CODE (index) != REG)
5201 {
5202 reason = "index is not a register";
5203 goto report_error;
5204 }
5205
5206 if (GET_MODE (index) != Pmode)
5207 {
5208 reason = "index is not in Pmode";
5209 goto report_error;
5210 }
5211
5212 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
5213 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
5214 {
5215 reason = "index is not valid";
5216 goto report_error;
5217 }
5218 }
5219
5220 /* Validate scale factor. */
5221 if (scale != 1)
5222 {
5223 reason_rtx = GEN_INT (scale);
5224 if (!index)
5225 {
5226 reason = "scale without index";
5227 goto report_error;
5228 }
5229
5230 if (scale != 2 && scale != 4 && scale != 8)
5231 {
5232 reason = "scale is not a valid multiplier";
5233 goto report_error;
5234 }
5235 }
5236
5237 /* Validate displacement. */
5238 if (disp)
5239 {
5240 reason_rtx = disp;
5241
5242 if (GET_CODE (disp) == CONST
5243 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5244 switch (XINT (XEXP (disp, 0), 1))
5245 {
5246 case UNSPEC_GOT:
5247 case UNSPEC_GOTOFF:
5248 case UNSPEC_GOTPCREL:
5249 if (!flag_pic)
5250 abort ();
5251 goto is_legitimate_pic;
5252
5253 case UNSPEC_GOTTPOFF:
5254 case UNSPEC_GOTNTPOFF:
5255 case UNSPEC_INDNTPOFF:
5256 case UNSPEC_NTPOFF:
5257 case UNSPEC_DTPOFF:
5258 break;
5259
5260 default:
5261 reason = "invalid address unspec";
5262 goto report_error;
5263 }
5264
5265 else if (flag_pic && (SYMBOLIC_CONST (disp)
5266 #if TARGET_MACHO
5267 && !machopic_operand_p (disp)
5268 #endif
5269 ))
5270 {
5271 is_legitimate_pic:
5272 if (TARGET_64BIT && (index || base))
5273 {
5274 /* foo@dtpoff(%rX) is ok. */
5275 if (GET_CODE (disp) != CONST
5276 || GET_CODE (XEXP (disp, 0)) != PLUS
5277 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5278 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5279 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5280 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5281 {
5282 reason = "non-constant pic memory reference";
5283 goto report_error;
5284 }
5285 }
5286 else if (! legitimate_pic_address_disp_p (disp))
5287 {
5288 reason = "displacement is an invalid pic construct";
5289 goto report_error;
5290 }
5291
5292 /* This code used to verify that a symbolic pic displacement
5293 includes the pic_offset_table_rtx register.
5294
5295 While this is good idea, unfortunately these constructs may
5296 be created by "adds using lea" optimization for incorrect
5297 code like:
5298
5299 int a;
5300 int foo(int i)
5301 {
5302 return *(&a+i);
5303 }
5304
5305 This code is nonsensical, but results in addressing
5306 GOT table with pic_offset_table_rtx base. We can't
5307 just refuse it easily, since it gets matched by
5308 "addsi3" pattern, that later gets split to lea in the
5309 case output register differs from input. While this
5310 can be handled by separate addsi pattern for this case
5311 that never results in lea, this seems to be easier and
5312 correct fix for crash to disable this test. */
5313 }
5314 else if (GET_CODE (disp) != LABEL_REF
5315 && GET_CODE (disp) != CONST_INT
5316 && (GET_CODE (disp) != CONST
5317 || !legitimate_constant_p (disp))
5318 && (GET_CODE (disp) != SYMBOL_REF
5319 || !legitimate_constant_p (disp)))
5320 {
5321 reason = "displacement is not constant";
5322 goto report_error;
5323 }
5324 else if (TARGET_64BIT
5325 && !x86_64_immediate_operand (disp, VOIDmode))
5326 {
5327 reason = "displacement is out of range";
5328 goto report_error;
5329 }
5330 }
5331
5332 /* Everything looks valid. */
5333 if (TARGET_DEBUG_ADDR)
5334 fprintf (stderr, "Success.\n");
5335 return TRUE;
5336
5337 report_error:
5338 if (TARGET_DEBUG_ADDR)
5339 {
5340 fprintf (stderr, "Error: %s\n", reason);
5341 debug_rtx (reason_rtx);
5342 }
5343 return FALSE;
5344 }
5345 \f
5346 /* Return an unique alias set for the GOT. */
5347
5348 static HOST_WIDE_INT
5349 ix86_GOT_alias_set (void)
5350 {
5351 static HOST_WIDE_INT set = -1;
5352 if (set == -1)
5353 set = new_alias_set ();
5354 return set;
5355 }
5356
5357 /* Return a legitimate reference for ORIG (an address) using the
5358 register REG. If REG is 0, a new pseudo is generated.
5359
5360 There are two types of references that must be handled:
5361
5362 1. Global data references must load the address from the GOT, via
5363 the PIC reg. An insn is emitted to do this load, and the reg is
5364 returned.
5365
5366 2. Static data references, constant pool addresses, and code labels
5367 compute the address as an offset from the GOT, whose base is in
5368 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5369 differentiate them from global data objects. The returned
5370 address is the PIC reg + an unspec constant.
5371
5372 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5373 reg also appears in the address. */
5374
5375 static rtx
5376 legitimize_pic_address (rtx orig, rtx reg)
5377 {
5378 rtx addr = orig;
5379 rtx new = orig;
5380 rtx base;
5381
5382 #if TARGET_MACHO
5383 if (reg == 0)
5384 reg = gen_reg_rtx (Pmode);
5385 /* Use the generic Mach-O PIC machinery. */
5386 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5387 #endif
5388
5389 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5390 new = addr;
5391 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5392 {
5393 /* This symbol may be referenced via a displacement from the PIC
5394 base address (@GOTOFF). */
5395
5396 if (reload_in_progress)
5397 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5398 if (GET_CODE (addr) == CONST)
5399 addr = XEXP (addr, 0);
5400 if (GET_CODE (addr) == PLUS)
5401 {
5402 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5403 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5404 }
5405 else
5406 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5407 new = gen_rtx_CONST (Pmode, new);
5408 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5409
5410 if (reg != 0)
5411 {
5412 emit_move_insn (reg, new);
5413 new = reg;
5414 }
5415 }
5416 else if (GET_CODE (addr) == SYMBOL_REF)
5417 {
5418 if (TARGET_64BIT)
5419 {
5420 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5421 new = gen_rtx_CONST (Pmode, new);
5422 new = gen_const_mem (Pmode, new);
5423 set_mem_alias_set (new, ix86_GOT_alias_set ());
5424
5425 if (reg == 0)
5426 reg = gen_reg_rtx (Pmode);
5427 /* Use directly gen_movsi, otherwise the address is loaded
5428 into register for CSE. We don't want to CSE this addresses,
5429 instead we CSE addresses from the GOT table, so skip this. */
5430 emit_insn (gen_movsi (reg, new));
5431 new = reg;
5432 }
5433 else
5434 {
5435 /* This symbol must be referenced via a load from the
5436 Global Offset Table (@GOT). */
5437
5438 if (reload_in_progress)
5439 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5440 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5441 new = gen_rtx_CONST (Pmode, new);
5442 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5443 new = gen_const_mem (Pmode, new);
5444 set_mem_alias_set (new, ix86_GOT_alias_set ());
5445
5446 if (reg == 0)
5447 reg = gen_reg_rtx (Pmode);
5448 emit_move_insn (reg, new);
5449 new = reg;
5450 }
5451 }
5452 else
5453 {
5454 if (GET_CODE (addr) == CONST)
5455 {
5456 addr = XEXP (addr, 0);
5457
5458 /* We must match stuff we generate before. Assume the only
5459 unspecs that can get here are ours. Not that we could do
5460 anything with them anyway.... */
5461 if (GET_CODE (addr) == UNSPEC
5462 || (GET_CODE (addr) == PLUS
5463 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5464 return orig;
5465 if (GET_CODE (addr) != PLUS)
5466 abort ();
5467 }
5468 if (GET_CODE (addr) == PLUS)
5469 {
5470 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5471
5472 /* Check first to see if this is a constant offset from a @GOTOFF
5473 symbol reference. */
5474 if (local_symbolic_operand (op0, Pmode)
5475 && GET_CODE (op1) == CONST_INT)
5476 {
5477 if (!TARGET_64BIT)
5478 {
5479 if (reload_in_progress)
5480 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5481 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5482 UNSPEC_GOTOFF);
5483 new = gen_rtx_PLUS (Pmode, new, op1);
5484 new = gen_rtx_CONST (Pmode, new);
5485 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5486
5487 if (reg != 0)
5488 {
5489 emit_move_insn (reg, new);
5490 new = reg;
5491 }
5492 }
5493 else
5494 {
5495 if (INTVAL (op1) < -16*1024*1024
5496 || INTVAL (op1) >= 16*1024*1024)
5497 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5498 }
5499 }
5500 else
5501 {
5502 base = legitimize_pic_address (XEXP (addr, 0), reg);
5503 new = legitimize_pic_address (XEXP (addr, 1),
5504 base == reg ? NULL_RTX : reg);
5505
5506 if (GET_CODE (new) == CONST_INT)
5507 new = plus_constant (base, INTVAL (new));
5508 else
5509 {
5510 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5511 {
5512 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5513 new = XEXP (new, 1);
5514 }
5515 new = gen_rtx_PLUS (Pmode, base, new);
5516 }
5517 }
5518 }
5519 }
5520 return new;
5521 }
5522 \f
5523 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5524
5525 static rtx
5526 get_thread_pointer (int to_reg)
5527 {
5528 rtx tp, reg, insn;
5529
5530 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5531 if (!to_reg)
5532 return tp;
5533
5534 reg = gen_reg_rtx (Pmode);
5535 insn = gen_rtx_SET (VOIDmode, reg, tp);
5536 insn = emit_insn (insn);
5537
5538 return reg;
5539 }
5540
5541 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5542 false if we expect this to be used for a memory address and true if
5543 we expect to load the address into a register. */
5544
5545 static rtx
5546 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5547 {
5548 rtx dest, base, off, pic;
5549 int type;
5550
5551 switch (model)
5552 {
5553 case TLS_MODEL_GLOBAL_DYNAMIC:
5554 dest = gen_reg_rtx (Pmode);
5555 if (TARGET_64BIT)
5556 {
5557 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5558
5559 start_sequence ();
5560 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5561 insns = get_insns ();
5562 end_sequence ();
5563
5564 emit_libcall_block (insns, dest, rax, x);
5565 }
5566 else
5567 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5568 break;
5569
5570 case TLS_MODEL_LOCAL_DYNAMIC:
5571 base = gen_reg_rtx (Pmode);
5572 if (TARGET_64BIT)
5573 {
5574 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5575
5576 start_sequence ();
5577 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5578 insns = get_insns ();
5579 end_sequence ();
5580
5581 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5582 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5583 emit_libcall_block (insns, base, rax, note);
5584 }
5585 else
5586 emit_insn (gen_tls_local_dynamic_base_32 (base));
5587
5588 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5589 off = gen_rtx_CONST (Pmode, off);
5590
5591 return gen_rtx_PLUS (Pmode, base, off);
5592
5593 case TLS_MODEL_INITIAL_EXEC:
5594 if (TARGET_64BIT)
5595 {
5596 pic = NULL;
5597 type = UNSPEC_GOTNTPOFF;
5598 }
5599 else if (flag_pic)
5600 {
5601 if (reload_in_progress)
5602 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5603 pic = pic_offset_table_rtx;
5604 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5605 }
5606 else if (!TARGET_GNU_TLS)
5607 {
5608 pic = gen_reg_rtx (Pmode);
5609 emit_insn (gen_set_got (pic));
5610 type = UNSPEC_GOTTPOFF;
5611 }
5612 else
5613 {
5614 pic = NULL;
5615 type = UNSPEC_INDNTPOFF;
5616 }
5617
5618 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5619 off = gen_rtx_CONST (Pmode, off);
5620 if (pic)
5621 off = gen_rtx_PLUS (Pmode, pic, off);
5622 off = gen_const_mem (Pmode, off);
5623 set_mem_alias_set (off, ix86_GOT_alias_set ());
5624
5625 if (TARGET_64BIT || TARGET_GNU_TLS)
5626 {
5627 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5628 off = force_reg (Pmode, off);
5629 return gen_rtx_PLUS (Pmode, base, off);
5630 }
5631 else
5632 {
5633 base = get_thread_pointer (true);
5634 dest = gen_reg_rtx (Pmode);
5635 emit_insn (gen_subsi3 (dest, base, off));
5636 }
5637 break;
5638
5639 case TLS_MODEL_LOCAL_EXEC:
5640 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5641 (TARGET_64BIT || TARGET_GNU_TLS)
5642 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5643 off = gen_rtx_CONST (Pmode, off);
5644
5645 if (TARGET_64BIT || TARGET_GNU_TLS)
5646 {
5647 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5648 return gen_rtx_PLUS (Pmode, base, off);
5649 }
5650 else
5651 {
5652 base = get_thread_pointer (true);
5653 dest = gen_reg_rtx (Pmode);
5654 emit_insn (gen_subsi3 (dest, base, off));
5655 }
5656 break;
5657
5658 default:
5659 abort ();
5660 }
5661
5662 return dest;
5663 }
5664
5665 /* Try machine-dependent ways of modifying an illegitimate address
5666 to be legitimate. If we find one, return the new, valid address.
5667 This macro is used in only one place: `memory_address' in explow.c.
5668
5669 OLDX is the address as it was before break_out_memory_refs was called.
5670 In some cases it is useful to look at this to decide what needs to be done.
5671
5672 MODE and WIN are passed so that this macro can use
5673 GO_IF_LEGITIMATE_ADDRESS.
5674
5675 It is always safe for this macro to do nothing. It exists to recognize
5676 opportunities to optimize the output.
5677
5678 For the 80386, we handle X+REG by loading X into a register R and
5679 using R+REG. R will go in a general reg and indexing will be used.
5680 However, if REG is a broken-out memory address or multiplication,
5681 nothing needs to be done because REG can certainly go in a general reg.
5682
5683 When -fpic is used, special handling is needed for symbolic references.
5684 See comments by legitimize_pic_address in i386.c for details. */
5685
5686 rtx
5687 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5688 {
5689 int changed = 0;
5690 unsigned log;
5691
5692 if (TARGET_DEBUG_ADDR)
5693 {
5694 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5695 GET_MODE_NAME (mode));
5696 debug_rtx (x);
5697 }
5698
5699 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5700 if (log)
5701 return legitimize_tls_address (x, log, false);
5702 if (GET_CODE (x) == CONST
5703 && GET_CODE (XEXP (x, 0)) == PLUS
5704 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5705 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5706 {
5707 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5708 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5709 }
5710
5711 if (flag_pic && SYMBOLIC_CONST (x))
5712 return legitimize_pic_address (x, 0);
5713
5714 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5715 if (GET_CODE (x) == ASHIFT
5716 && GET_CODE (XEXP (x, 1)) == CONST_INT
5717 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
5718 {
5719 changed = 1;
5720 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5721 GEN_INT (1 << log));
5722 }
5723
5724 if (GET_CODE (x) == PLUS)
5725 {
5726 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5727
5728 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5729 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5730 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
5731 {
5732 changed = 1;
5733 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5734 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5735 GEN_INT (1 << log));
5736 }
5737
5738 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5739 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5740 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
5741 {
5742 changed = 1;
5743 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5744 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5745 GEN_INT (1 << log));
5746 }
5747
5748 /* Put multiply first if it isn't already. */
5749 if (GET_CODE (XEXP (x, 1)) == MULT)
5750 {
5751 rtx tmp = XEXP (x, 0);
5752 XEXP (x, 0) = XEXP (x, 1);
5753 XEXP (x, 1) = tmp;
5754 changed = 1;
5755 }
5756
5757 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5758 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5759 created by virtual register instantiation, register elimination, and
5760 similar optimizations. */
5761 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5762 {
5763 changed = 1;
5764 x = gen_rtx_PLUS (Pmode,
5765 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5766 XEXP (XEXP (x, 1), 0)),
5767 XEXP (XEXP (x, 1), 1));
5768 }
5769
5770 /* Canonicalize
5771 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5772 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5773 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5774 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5775 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5776 && CONSTANT_P (XEXP (x, 1)))
5777 {
5778 rtx constant;
5779 rtx other = NULL_RTX;
5780
5781 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5782 {
5783 constant = XEXP (x, 1);
5784 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5785 }
5786 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5787 {
5788 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
5789 other = XEXP (x, 1);
5790 }
5791 else
5792 constant = 0;
5793
5794 if (constant)
5795 {
5796 changed = 1;
5797 x = gen_rtx_PLUS (Pmode,
5798 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
5799 XEXP (XEXP (XEXP (x, 0), 1), 0)),
5800 plus_constant (other, INTVAL (constant)));
5801 }
5802 }
5803
5804 if (changed && legitimate_address_p (mode, x, FALSE))
5805 return x;
5806
5807 if (GET_CODE (XEXP (x, 0)) == MULT)
5808 {
5809 changed = 1;
5810 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
5811 }
5812
5813 if (GET_CODE (XEXP (x, 1)) == MULT)
5814 {
5815 changed = 1;
5816 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
5817 }
5818
5819 if (changed
5820 && GET_CODE (XEXP (x, 1)) == REG
5821 && GET_CODE (XEXP (x, 0)) == REG)
5822 return x;
5823
5824 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
5825 {
5826 changed = 1;
5827 x = legitimize_pic_address (x, 0);
5828 }
5829
5830 if (changed && legitimate_address_p (mode, x, FALSE))
5831 return x;
5832
5833 if (GET_CODE (XEXP (x, 0)) == REG)
5834 {
5835 rtx temp = gen_reg_rtx (Pmode);
5836 rtx val = force_operand (XEXP (x, 1), temp);
5837 if (val != temp)
5838 emit_move_insn (temp, val);
5839
5840 XEXP (x, 1) = temp;
5841 return x;
5842 }
5843
5844 else if (GET_CODE (XEXP (x, 1)) == REG)
5845 {
5846 rtx temp = gen_reg_rtx (Pmode);
5847 rtx val = force_operand (XEXP (x, 0), temp);
5848 if (val != temp)
5849 emit_move_insn (temp, val);
5850
5851 XEXP (x, 0) = temp;
5852 return x;
5853 }
5854 }
5855
5856 return x;
5857 }
5858 \f
5859 /* Print an integer constant expression in assembler syntax. Addition
5860 and subtraction are the only arithmetic that may appear in these
5861 expressions. FILE is the stdio stream to write to, X is the rtx, and
5862 CODE is the operand print code from the output string. */
5863
5864 static void
5865 output_pic_addr_const (FILE *file, rtx x, int code)
5866 {
5867 char buf[256];
5868
5869 switch (GET_CODE (x))
5870 {
5871 case PC:
5872 if (flag_pic)
5873 putc ('.', file);
5874 else
5875 abort ();
5876 break;
5877
5878 case SYMBOL_REF:
5879 /* Mark the decl as referenced so that cgraph will output the function. */
5880 if (SYMBOL_REF_DECL (x))
5881 mark_decl_referenced (SYMBOL_REF_DECL (x));
5882
5883 assemble_name (file, XSTR (x, 0));
5884 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5885 fputs ("@PLT", file);
5886 break;
5887
5888 case LABEL_REF:
5889 x = XEXP (x, 0);
5890 /* FALLTHRU */
5891 case CODE_LABEL:
5892 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5893 assemble_name (asm_out_file, buf);
5894 break;
5895
5896 case CONST_INT:
5897 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5898 break;
5899
5900 case CONST:
5901 /* This used to output parentheses around the expression,
5902 but that does not work on the 386 (either ATT or BSD assembler). */
5903 output_pic_addr_const (file, XEXP (x, 0), code);
5904 break;
5905
5906 case CONST_DOUBLE:
5907 if (GET_MODE (x) == VOIDmode)
5908 {
5909 /* We can use %d if the number is <32 bits and positive. */
5910 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
5911 fprintf (file, "0x%lx%08lx",
5912 (unsigned long) CONST_DOUBLE_HIGH (x),
5913 (unsigned long) CONST_DOUBLE_LOW (x));
5914 else
5915 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5916 }
5917 else
5918 /* We can't handle floating point constants;
5919 PRINT_OPERAND must handle them. */
5920 output_operand_lossage ("floating constant misused");
5921 break;
5922
5923 case PLUS:
5924 /* Some assemblers need integer constants to appear first. */
5925 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5926 {
5927 output_pic_addr_const (file, XEXP (x, 0), code);
5928 putc ('+', file);
5929 output_pic_addr_const (file, XEXP (x, 1), code);
5930 }
5931 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5932 {
5933 output_pic_addr_const (file, XEXP (x, 1), code);
5934 putc ('+', file);
5935 output_pic_addr_const (file, XEXP (x, 0), code);
5936 }
5937 else
5938 abort ();
5939 break;
5940
5941 case MINUS:
5942 if (!TARGET_MACHO)
5943 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
5944 output_pic_addr_const (file, XEXP (x, 0), code);
5945 putc ('-', file);
5946 output_pic_addr_const (file, XEXP (x, 1), code);
5947 if (!TARGET_MACHO)
5948 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
5949 break;
5950
5951 case UNSPEC:
5952 if (XVECLEN (x, 0) != 1)
5953 abort ();
5954 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5955 switch (XINT (x, 1))
5956 {
5957 case UNSPEC_GOT:
5958 fputs ("@GOT", file);
5959 break;
5960 case UNSPEC_GOTOFF:
5961 fputs ("@GOTOFF", file);
5962 break;
5963 case UNSPEC_GOTPCREL:
5964 fputs ("@GOTPCREL(%rip)", file);
5965 break;
5966 case UNSPEC_GOTTPOFF:
5967 /* FIXME: This might be @TPOFF in Sun ld too. */
5968 fputs ("@GOTTPOFF", file);
5969 break;
5970 case UNSPEC_TPOFF:
5971 fputs ("@TPOFF", file);
5972 break;
5973 case UNSPEC_NTPOFF:
5974 if (TARGET_64BIT)
5975 fputs ("@TPOFF", file);
5976 else
5977 fputs ("@NTPOFF", file);
5978 break;
5979 case UNSPEC_DTPOFF:
5980 fputs ("@DTPOFF", file);
5981 break;
5982 case UNSPEC_GOTNTPOFF:
5983 if (TARGET_64BIT)
5984 fputs ("@GOTTPOFF(%rip)", file);
5985 else
5986 fputs ("@GOTNTPOFF", file);
5987 break;
5988 case UNSPEC_INDNTPOFF:
5989 fputs ("@INDNTPOFF", file);
5990 break;
5991 default:
5992 output_operand_lossage ("invalid UNSPEC as operand");
5993 break;
5994 }
5995 break;
5996
5997 default:
5998 output_operand_lossage ("invalid expression as operand");
5999 }
6000 }
6001
6002 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
6003 We need to emit DTP-relative relocations. */
6004
6005 void
6006 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6007 {
6008 fputs (ASM_LONG, file);
6009 output_addr_const (file, x);
6010 fputs ("@DTPOFF", file);
6011 switch (size)
6012 {
6013 case 4:
6014 break;
6015 case 8:
6016 fputs (", 0", file);
6017 break;
6018 default:
6019 abort ();
6020 }
6021 }
6022
6023 /* In the name of slightly smaller debug output, and to cater to
6024 general assembler losage, recognize PIC+GOTOFF and turn it back
6025 into a direct symbol reference. */
6026
6027 static rtx
6028 ix86_delegitimize_address (rtx orig_x)
6029 {
6030 rtx x = orig_x, y;
6031
6032 if (GET_CODE (x) == MEM)
6033 x = XEXP (x, 0);
6034
6035 if (TARGET_64BIT)
6036 {
6037 if (GET_CODE (x) != CONST
6038 || GET_CODE (XEXP (x, 0)) != UNSPEC
6039 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6040 || GET_CODE (orig_x) != MEM)
6041 return orig_x;
6042 return XVECEXP (XEXP (x, 0), 0, 0);
6043 }
6044
6045 if (GET_CODE (x) != PLUS
6046 || GET_CODE (XEXP (x, 1)) != CONST)
6047 return orig_x;
6048
6049 if (GET_CODE (XEXP (x, 0)) == REG
6050 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6051 /* %ebx + GOT/GOTOFF */
6052 y = NULL;
6053 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6054 {
6055 /* %ebx + %reg * scale + GOT/GOTOFF */
6056 y = XEXP (x, 0);
6057 if (GET_CODE (XEXP (y, 0)) == REG
6058 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6059 y = XEXP (y, 1);
6060 else if (GET_CODE (XEXP (y, 1)) == REG
6061 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6062 y = XEXP (y, 0);
6063 else
6064 return orig_x;
6065 if (GET_CODE (y) != REG
6066 && GET_CODE (y) != MULT
6067 && GET_CODE (y) != ASHIFT)
6068 return orig_x;
6069 }
6070 else
6071 return orig_x;
6072
6073 x = XEXP (XEXP (x, 1), 0);
6074 if (GET_CODE (x) == UNSPEC
6075 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6076 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6077 {
6078 if (y)
6079 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6080 return XVECEXP (x, 0, 0);
6081 }
6082
6083 if (GET_CODE (x) == PLUS
6084 && GET_CODE (XEXP (x, 0)) == UNSPEC
6085 && GET_CODE (XEXP (x, 1)) == CONST_INT
6086 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6087 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6088 && GET_CODE (orig_x) != MEM)))
6089 {
6090 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6091 if (y)
6092 return gen_rtx_PLUS (Pmode, y, x);
6093 return x;
6094 }
6095
6096 return orig_x;
6097 }
6098 \f
6099 static void
6100 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6101 int fp, FILE *file)
6102 {
6103 const char *suffix;
6104
6105 if (mode == CCFPmode || mode == CCFPUmode)
6106 {
6107 enum rtx_code second_code, bypass_code;
6108 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6109 if (bypass_code != UNKNOWN || second_code != UNKNOWN)
6110 abort ();
6111 code = ix86_fp_compare_code_to_integer (code);
6112 mode = CCmode;
6113 }
6114 if (reverse)
6115 code = reverse_condition (code);
6116
6117 switch (code)
6118 {
6119 case EQ:
6120 suffix = "e";
6121 break;
6122 case NE:
6123 suffix = "ne";
6124 break;
6125 case GT:
6126 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6127 abort ();
6128 suffix = "g";
6129 break;
6130 case GTU:
6131 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6132 Those same assemblers have the same but opposite losage on cmov. */
6133 if (mode != CCmode)
6134 abort ();
6135 suffix = fp ? "nbe" : "a";
6136 break;
6137 case LT:
6138 if (mode == CCNOmode || mode == CCGOCmode)
6139 suffix = "s";
6140 else if (mode == CCmode || mode == CCGCmode)
6141 suffix = "l";
6142 else
6143 abort ();
6144 break;
6145 case LTU:
6146 if (mode != CCmode)
6147 abort ();
6148 suffix = "b";
6149 break;
6150 case GE:
6151 if (mode == CCNOmode || mode == CCGOCmode)
6152 suffix = "ns";
6153 else if (mode == CCmode || mode == CCGCmode)
6154 suffix = "ge";
6155 else
6156 abort ();
6157 break;
6158 case GEU:
6159 /* ??? As above. */
6160 if (mode != CCmode)
6161 abort ();
6162 suffix = fp ? "nb" : "ae";
6163 break;
6164 case LE:
6165 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6166 abort ();
6167 suffix = "le";
6168 break;
6169 case LEU:
6170 if (mode != CCmode)
6171 abort ();
6172 suffix = "be";
6173 break;
6174 case UNORDERED:
6175 suffix = fp ? "u" : "p";
6176 break;
6177 case ORDERED:
6178 suffix = fp ? "nu" : "np";
6179 break;
6180 default:
6181 abort ();
6182 }
6183 fputs (suffix, file);
6184 }
6185
6186 /* Print the name of register X to FILE based on its machine mode and number.
6187 If CODE is 'w', pretend the mode is HImode.
6188 If CODE is 'b', pretend the mode is QImode.
6189 If CODE is 'k', pretend the mode is SImode.
6190 If CODE is 'q', pretend the mode is DImode.
6191 If CODE is 'h', pretend the reg is the `high' byte register.
6192 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6193
6194 void
6195 print_reg (rtx x, int code, FILE *file)
6196 {
6197 if (REGNO (x) == ARG_POINTER_REGNUM
6198 || REGNO (x) == FRAME_POINTER_REGNUM
6199 || REGNO (x) == FLAGS_REG
6200 || REGNO (x) == FPSR_REG)
6201 abort ();
6202
6203 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6204 putc ('%', file);
6205
6206 if (code == 'w' || MMX_REG_P (x))
6207 code = 2;
6208 else if (code == 'b')
6209 code = 1;
6210 else if (code == 'k')
6211 code = 4;
6212 else if (code == 'q')
6213 code = 8;
6214 else if (code == 'y')
6215 code = 3;
6216 else if (code == 'h')
6217 code = 0;
6218 else
6219 code = GET_MODE_SIZE (GET_MODE (x));
6220
6221 /* Irritatingly, AMD extended registers use different naming convention
6222 from the normal registers. */
6223 if (REX_INT_REG_P (x))
6224 {
6225 if (!TARGET_64BIT)
6226 abort ();
6227 switch (code)
6228 {
6229 case 0:
6230 error ("extended registers have no high halves");
6231 break;
6232 case 1:
6233 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6234 break;
6235 case 2:
6236 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6237 break;
6238 case 4:
6239 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6240 break;
6241 case 8:
6242 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6243 break;
6244 default:
6245 error ("unsupported operand size for extended register");
6246 break;
6247 }
6248 return;
6249 }
6250 switch (code)
6251 {
6252 case 3:
6253 if (STACK_TOP_P (x))
6254 {
6255 fputs ("st(0)", file);
6256 break;
6257 }
6258 /* FALLTHRU */
6259 case 8:
6260 case 4:
6261 case 12:
6262 if (! ANY_FP_REG_P (x))
6263 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6264 /* FALLTHRU */
6265 case 16:
6266 case 2:
6267 normal:
6268 fputs (hi_reg_name[REGNO (x)], file);
6269 break;
6270 case 1:
6271 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6272 goto normal;
6273 fputs (qi_reg_name[REGNO (x)], file);
6274 break;
6275 case 0:
6276 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6277 goto normal;
6278 fputs (qi_high_reg_name[REGNO (x)], file);
6279 break;
6280 default:
6281 abort ();
6282 }
6283 }
6284
6285 /* Locate some local-dynamic symbol still in use by this function
6286 so that we can print its name in some tls_local_dynamic_base
6287 pattern. */
6288
6289 static const char *
6290 get_some_local_dynamic_name (void)
6291 {
6292 rtx insn;
6293
6294 if (cfun->machine->some_ld_name)
6295 return cfun->machine->some_ld_name;
6296
6297 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6298 if (INSN_P (insn)
6299 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6300 return cfun->machine->some_ld_name;
6301
6302 abort ();
6303 }
6304
6305 static int
6306 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6307 {
6308 rtx x = *px;
6309
6310 if (GET_CODE (x) == SYMBOL_REF
6311 && local_dynamic_symbolic_operand (x, Pmode))
6312 {
6313 cfun->machine->some_ld_name = XSTR (x, 0);
6314 return 1;
6315 }
6316
6317 return 0;
6318 }
6319
6320 /* Meaning of CODE:
6321 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6322 C -- print opcode suffix for set/cmov insn.
6323 c -- like C, but print reversed condition
6324 F,f -- likewise, but for floating-point.
6325 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6326 otherwise nothing
6327 R -- print the prefix for register names.
6328 z -- print the opcode suffix for the size of the current operand.
6329 * -- print a star (in certain assembler syntax)
6330 A -- print an absolute memory reference.
6331 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6332 s -- print a shift double count, followed by the assemblers argument
6333 delimiter.
6334 b -- print the QImode name of the register for the indicated operand.
6335 %b0 would print %al if operands[0] is reg 0.
6336 w -- likewise, print the HImode name of the register.
6337 k -- likewise, print the SImode name of the register.
6338 q -- likewise, print the DImode name of the register.
6339 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6340 y -- print "st(0)" instead of "st" as a register.
6341 D -- print condition for SSE cmp instruction.
6342 P -- if PIC, print an @PLT suffix.
6343 X -- don't print any sort of PIC '@' suffix for a symbol.
6344 & -- print some in-use local-dynamic symbol name.
6345 H -- print a memory address offset by 8; used for sse high-parts
6346 */
6347
6348 void
6349 print_operand (FILE *file, rtx x, int code)
6350 {
6351 if (code)
6352 {
6353 switch (code)
6354 {
6355 case '*':
6356 if (ASSEMBLER_DIALECT == ASM_ATT)
6357 putc ('*', file);
6358 return;
6359
6360 case '&':
6361 assemble_name (file, get_some_local_dynamic_name ());
6362 return;
6363
6364 case 'A':
6365 if (ASSEMBLER_DIALECT == ASM_ATT)
6366 putc ('*', file);
6367 else if (ASSEMBLER_DIALECT == ASM_INTEL)
6368 {
6369 /* Intel syntax. For absolute addresses, registers should not
6370 be surrounded by braces. */
6371 if (GET_CODE (x) != REG)
6372 {
6373 putc ('[', file);
6374 PRINT_OPERAND (file, x, 0);
6375 putc (']', file);
6376 return;
6377 }
6378 }
6379 else
6380 abort ();
6381
6382 PRINT_OPERAND (file, x, 0);
6383 return;
6384
6385
6386 case 'L':
6387 if (ASSEMBLER_DIALECT == ASM_ATT)
6388 putc ('l', file);
6389 return;
6390
6391 case 'W':
6392 if (ASSEMBLER_DIALECT == ASM_ATT)
6393 putc ('w', file);
6394 return;
6395
6396 case 'B':
6397 if (ASSEMBLER_DIALECT == ASM_ATT)
6398 putc ('b', file);
6399 return;
6400
6401 case 'Q':
6402 if (ASSEMBLER_DIALECT == ASM_ATT)
6403 putc ('l', file);
6404 return;
6405
6406 case 'S':
6407 if (ASSEMBLER_DIALECT == ASM_ATT)
6408 putc ('s', file);
6409 return;
6410
6411 case 'T':
6412 if (ASSEMBLER_DIALECT == ASM_ATT)
6413 putc ('t', file);
6414 return;
6415
6416 case 'z':
6417 /* 387 opcodes don't get size suffixes if the operands are
6418 registers. */
6419 if (STACK_REG_P (x))
6420 return;
6421
6422 /* Likewise if using Intel opcodes. */
6423 if (ASSEMBLER_DIALECT == ASM_INTEL)
6424 return;
6425
6426 /* This is the size of op from size of operand. */
6427 switch (GET_MODE_SIZE (GET_MODE (x)))
6428 {
6429 case 2:
6430 #ifdef HAVE_GAS_FILDS_FISTS
6431 putc ('s', file);
6432 #endif
6433 return;
6434
6435 case 4:
6436 if (GET_MODE (x) == SFmode)
6437 {
6438 putc ('s', file);
6439 return;
6440 }
6441 else
6442 putc ('l', file);
6443 return;
6444
6445 case 12:
6446 case 16:
6447 putc ('t', file);
6448 return;
6449
6450 case 8:
6451 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6452 {
6453 #ifdef GAS_MNEMONICS
6454 putc ('q', file);
6455 #else
6456 putc ('l', file);
6457 putc ('l', file);
6458 #endif
6459 }
6460 else
6461 putc ('l', file);
6462 return;
6463
6464 default:
6465 abort ();
6466 }
6467
6468 case 'b':
6469 case 'w':
6470 case 'k':
6471 case 'q':
6472 case 'h':
6473 case 'y':
6474 case 'X':
6475 case 'P':
6476 break;
6477
6478 case 's':
6479 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6480 {
6481 PRINT_OPERAND (file, x, 0);
6482 putc (',', file);
6483 }
6484 return;
6485
6486 case 'D':
6487 /* Little bit of braindamage here. The SSE compare instructions
6488 does use completely different names for the comparisons that the
6489 fp conditional moves. */
6490 switch (GET_CODE (x))
6491 {
6492 case EQ:
6493 case UNEQ:
6494 fputs ("eq", file);
6495 break;
6496 case LT:
6497 case UNLT:
6498 fputs ("lt", file);
6499 break;
6500 case LE:
6501 case UNLE:
6502 fputs ("le", file);
6503 break;
6504 case UNORDERED:
6505 fputs ("unord", file);
6506 break;
6507 case NE:
6508 case LTGT:
6509 fputs ("neq", file);
6510 break;
6511 case UNGE:
6512 case GE:
6513 fputs ("nlt", file);
6514 break;
6515 case UNGT:
6516 case GT:
6517 fputs ("nle", file);
6518 break;
6519 case ORDERED:
6520 fputs ("ord", file);
6521 break;
6522 default:
6523 abort ();
6524 break;
6525 }
6526 return;
6527 case 'O':
6528 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6529 if (ASSEMBLER_DIALECT == ASM_ATT)
6530 {
6531 switch (GET_MODE (x))
6532 {
6533 case HImode: putc ('w', file); break;
6534 case SImode:
6535 case SFmode: putc ('l', file); break;
6536 case DImode:
6537 case DFmode: putc ('q', file); break;
6538 default: abort ();
6539 }
6540 putc ('.', file);
6541 }
6542 #endif
6543 return;
6544 case 'C':
6545 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6546 return;
6547 case 'F':
6548 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6549 if (ASSEMBLER_DIALECT == ASM_ATT)
6550 putc ('.', file);
6551 #endif
6552 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6553 return;
6554
6555 /* Like above, but reverse condition */
6556 case 'c':
6557 /* Check to see if argument to %c is really a constant
6558 and not a condition code which needs to be reversed. */
6559 if (!COMPARISON_P (x))
6560 {
6561 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6562 return;
6563 }
6564 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6565 return;
6566 case 'f':
6567 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6568 if (ASSEMBLER_DIALECT == ASM_ATT)
6569 putc ('.', file);
6570 #endif
6571 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6572 return;
6573
6574 case 'H':
6575 /* It doesn't actually matter what mode we use here, as we're
6576 only going to use this for printing. */
6577 x = adjust_address_nv (x, DImode, 8);
6578 break;
6579
6580 case '+':
6581 {
6582 rtx x;
6583
6584 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6585 return;
6586
6587 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6588 if (x)
6589 {
6590 int pred_val = INTVAL (XEXP (x, 0));
6591
6592 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6593 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6594 {
6595 int taken = pred_val > REG_BR_PROB_BASE / 2;
6596 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6597
6598 /* Emit hints only in the case default branch prediction
6599 heuristics would fail. */
6600 if (taken != cputaken)
6601 {
6602 /* We use 3e (DS) prefix for taken branches and
6603 2e (CS) prefix for not taken branches. */
6604 if (taken)
6605 fputs ("ds ; ", file);
6606 else
6607 fputs ("cs ; ", file);
6608 }
6609 }
6610 }
6611 return;
6612 }
6613 default:
6614 output_operand_lossage ("invalid operand code '%c'", code);
6615 }
6616 }
6617
6618 if (GET_CODE (x) == REG)
6619 print_reg (x, code, file);
6620
6621 else if (GET_CODE (x) == MEM)
6622 {
6623 /* No `byte ptr' prefix for call instructions. */
6624 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6625 {
6626 const char * size;
6627 switch (GET_MODE_SIZE (GET_MODE (x)))
6628 {
6629 case 1: size = "BYTE"; break;
6630 case 2: size = "WORD"; break;
6631 case 4: size = "DWORD"; break;
6632 case 8: size = "QWORD"; break;
6633 case 12: size = "XWORD"; break;
6634 case 16: size = "XMMWORD"; break;
6635 default:
6636 abort ();
6637 }
6638
6639 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6640 if (code == 'b')
6641 size = "BYTE";
6642 else if (code == 'w')
6643 size = "WORD";
6644 else if (code == 'k')
6645 size = "DWORD";
6646
6647 fputs (size, file);
6648 fputs (" PTR ", file);
6649 }
6650
6651 x = XEXP (x, 0);
6652 /* Avoid (%rip) for call operands. */
6653 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6654 && GET_CODE (x) != CONST_INT)
6655 output_addr_const (file, x);
6656 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6657 output_operand_lossage ("invalid constraints for operand");
6658 else
6659 output_address (x);
6660 }
6661
6662 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6663 {
6664 REAL_VALUE_TYPE r;
6665 long l;
6666
6667 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6668 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6669
6670 if (ASSEMBLER_DIALECT == ASM_ATT)
6671 putc ('$', file);
6672 fprintf (file, "0x%08lx", l);
6673 }
6674
6675 /* These float cases don't actually occur as immediate operands. */
6676 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6677 {
6678 char dstr[30];
6679
6680 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6681 fprintf (file, "%s", dstr);
6682 }
6683
6684 else if (GET_CODE (x) == CONST_DOUBLE
6685 && GET_MODE (x) == XFmode)
6686 {
6687 char dstr[30];
6688
6689 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6690 fprintf (file, "%s", dstr);
6691 }
6692
6693 else
6694 {
6695 if (code != 'P')
6696 {
6697 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6698 {
6699 if (ASSEMBLER_DIALECT == ASM_ATT)
6700 putc ('$', file);
6701 }
6702 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6703 || GET_CODE (x) == LABEL_REF)
6704 {
6705 if (ASSEMBLER_DIALECT == ASM_ATT)
6706 putc ('$', file);
6707 else
6708 fputs ("OFFSET FLAT:", file);
6709 }
6710 }
6711 if (GET_CODE (x) == CONST_INT)
6712 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6713 else if (flag_pic)
6714 output_pic_addr_const (file, x, code);
6715 else
6716 output_addr_const (file, x);
6717 }
6718 }
6719 \f
6720 /* Print a memory operand whose address is ADDR. */
6721
6722 void
6723 print_operand_address (FILE *file, rtx addr)
6724 {
6725 struct ix86_address parts;
6726 rtx base, index, disp;
6727 int scale;
6728
6729 if (! ix86_decompose_address (addr, &parts))
6730 abort ();
6731
6732 base = parts.base;
6733 index = parts.index;
6734 disp = parts.disp;
6735 scale = parts.scale;
6736
6737 switch (parts.seg)
6738 {
6739 case SEG_DEFAULT:
6740 break;
6741 case SEG_FS:
6742 case SEG_GS:
6743 if (USER_LABEL_PREFIX[0] == 0)
6744 putc ('%', file);
6745 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6746 break;
6747 default:
6748 abort ();
6749 }
6750
6751 if (!base && !index)
6752 {
6753 /* Displacement only requires special attention. */
6754
6755 if (GET_CODE (disp) == CONST_INT)
6756 {
6757 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6758 {
6759 if (USER_LABEL_PREFIX[0] == 0)
6760 putc ('%', file);
6761 fputs ("ds:", file);
6762 }
6763 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6764 }
6765 else if (flag_pic)
6766 output_pic_addr_const (file, disp, 0);
6767 else
6768 output_addr_const (file, disp);
6769
6770 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6771 if (TARGET_64BIT
6772 && ((GET_CODE (disp) == SYMBOL_REF
6773 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6774 || GET_CODE (disp) == LABEL_REF
6775 || (GET_CODE (disp) == CONST
6776 && GET_CODE (XEXP (disp, 0)) == PLUS
6777 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
6778 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
6779 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
6780 fputs ("(%rip)", file);
6781 }
6782 else
6783 {
6784 if (ASSEMBLER_DIALECT == ASM_ATT)
6785 {
6786 if (disp)
6787 {
6788 if (flag_pic)
6789 output_pic_addr_const (file, disp, 0);
6790 else if (GET_CODE (disp) == LABEL_REF)
6791 output_asm_label (disp);
6792 else
6793 output_addr_const (file, disp);
6794 }
6795
6796 putc ('(', file);
6797 if (base)
6798 print_reg (base, 0, file);
6799 if (index)
6800 {
6801 putc (',', file);
6802 print_reg (index, 0, file);
6803 if (scale != 1)
6804 fprintf (file, ",%d", scale);
6805 }
6806 putc (')', file);
6807 }
6808 else
6809 {
6810 rtx offset = NULL_RTX;
6811
6812 if (disp)
6813 {
6814 /* Pull out the offset of a symbol; print any symbol itself. */
6815 if (GET_CODE (disp) == CONST
6816 && GET_CODE (XEXP (disp, 0)) == PLUS
6817 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
6818 {
6819 offset = XEXP (XEXP (disp, 0), 1);
6820 disp = gen_rtx_CONST (VOIDmode,
6821 XEXP (XEXP (disp, 0), 0));
6822 }
6823
6824 if (flag_pic)
6825 output_pic_addr_const (file, disp, 0);
6826 else if (GET_CODE (disp) == LABEL_REF)
6827 output_asm_label (disp);
6828 else if (GET_CODE (disp) == CONST_INT)
6829 offset = disp;
6830 else
6831 output_addr_const (file, disp);
6832 }
6833
6834 putc ('[', file);
6835 if (base)
6836 {
6837 print_reg (base, 0, file);
6838 if (offset)
6839 {
6840 if (INTVAL (offset) >= 0)
6841 putc ('+', file);
6842 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6843 }
6844 }
6845 else if (offset)
6846 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6847 else
6848 putc ('0', file);
6849
6850 if (index)
6851 {
6852 putc ('+', file);
6853 print_reg (index, 0, file);
6854 if (scale != 1)
6855 fprintf (file, "*%d", scale);
6856 }
6857 putc (']', file);
6858 }
6859 }
6860 }
6861
6862 bool
6863 output_addr_const_extra (FILE *file, rtx x)
6864 {
6865 rtx op;
6866
6867 if (GET_CODE (x) != UNSPEC)
6868 return false;
6869
6870 op = XVECEXP (x, 0, 0);
6871 switch (XINT (x, 1))
6872 {
6873 case UNSPEC_GOTTPOFF:
6874 output_addr_const (file, op);
6875 /* FIXME: This might be @TPOFF in Sun ld. */
6876 fputs ("@GOTTPOFF", file);
6877 break;
6878 case UNSPEC_TPOFF:
6879 output_addr_const (file, op);
6880 fputs ("@TPOFF", file);
6881 break;
6882 case UNSPEC_NTPOFF:
6883 output_addr_const (file, op);
6884 if (TARGET_64BIT)
6885 fputs ("@TPOFF", file);
6886 else
6887 fputs ("@NTPOFF", file);
6888 break;
6889 case UNSPEC_DTPOFF:
6890 output_addr_const (file, op);
6891 fputs ("@DTPOFF", file);
6892 break;
6893 case UNSPEC_GOTNTPOFF:
6894 output_addr_const (file, op);
6895 if (TARGET_64BIT)
6896 fputs ("@GOTTPOFF(%rip)", file);
6897 else
6898 fputs ("@GOTNTPOFF", file);
6899 break;
6900 case UNSPEC_INDNTPOFF:
6901 output_addr_const (file, op);
6902 fputs ("@INDNTPOFF", file);
6903 break;
6904
6905 default:
6906 return false;
6907 }
6908
6909 return true;
6910 }
6911 \f
6912 /* Split one or more DImode RTL references into pairs of SImode
6913 references. The RTL can be REG, offsettable MEM, integer constant, or
6914 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6915 split and "num" is its length. lo_half and hi_half are output arrays
6916 that parallel "operands". */
6917
6918 void
6919 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6920 {
6921 while (num--)
6922 {
6923 rtx op = operands[num];
6924
6925 /* simplify_subreg refuse to split volatile memory addresses,
6926 but we still have to handle it. */
6927 if (GET_CODE (op) == MEM)
6928 {
6929 lo_half[num] = adjust_address (op, SImode, 0);
6930 hi_half[num] = adjust_address (op, SImode, 4);
6931 }
6932 else
6933 {
6934 lo_half[num] = simplify_gen_subreg (SImode, op,
6935 GET_MODE (op) == VOIDmode
6936 ? DImode : GET_MODE (op), 0);
6937 hi_half[num] = simplify_gen_subreg (SImode, op,
6938 GET_MODE (op) == VOIDmode
6939 ? DImode : GET_MODE (op), 4);
6940 }
6941 }
6942 }
6943 /* Split one or more TImode RTL references into pairs of SImode
6944 references. The RTL can be REG, offsettable MEM, integer constant, or
6945 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6946 split and "num" is its length. lo_half and hi_half are output arrays
6947 that parallel "operands". */
6948
6949 void
6950 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6951 {
6952 while (num--)
6953 {
6954 rtx op = operands[num];
6955
6956 /* simplify_subreg refuse to split volatile memory addresses, but we
6957 still have to handle it. */
6958 if (GET_CODE (op) == MEM)
6959 {
6960 lo_half[num] = adjust_address (op, DImode, 0);
6961 hi_half[num] = adjust_address (op, DImode, 8);
6962 }
6963 else
6964 {
6965 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
6966 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
6967 }
6968 }
6969 }
6970 \f
6971 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6972 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6973 is the expression of the binary operation. The output may either be
6974 emitted here, or returned to the caller, like all output_* functions.
6975
6976 There is no guarantee that the operands are the same mode, as they
6977 might be within FLOAT or FLOAT_EXTEND expressions. */
6978
6979 #ifndef SYSV386_COMPAT
6980 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6981 wants to fix the assemblers because that causes incompatibility
6982 with gcc. No-one wants to fix gcc because that causes
6983 incompatibility with assemblers... You can use the option of
6984 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6985 #define SYSV386_COMPAT 1
6986 #endif
6987
6988 const char *
6989 output_387_binary_op (rtx insn, rtx *operands)
6990 {
6991 static char buf[30];
6992 const char *p;
6993 const char *ssep;
6994 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
6995
6996 #ifdef ENABLE_CHECKING
6997 /* Even if we do not want to check the inputs, this documents input
6998 constraints. Which helps in understanding the following code. */
6999 if (STACK_REG_P (operands[0])
7000 && ((REG_P (operands[1])
7001 && REGNO (operands[0]) == REGNO (operands[1])
7002 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7003 || (REG_P (operands[2])
7004 && REGNO (operands[0]) == REGNO (operands[2])
7005 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7006 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7007 ; /* ok */
7008 else if (!is_sse)
7009 abort ();
7010 #endif
7011
7012 switch (GET_CODE (operands[3]))
7013 {
7014 case PLUS:
7015 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7016 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7017 p = "fiadd";
7018 else
7019 p = "fadd";
7020 ssep = "add";
7021 break;
7022
7023 case MINUS:
7024 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7025 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7026 p = "fisub";
7027 else
7028 p = "fsub";
7029 ssep = "sub";
7030 break;
7031
7032 case MULT:
7033 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7034 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7035 p = "fimul";
7036 else
7037 p = "fmul";
7038 ssep = "mul";
7039 break;
7040
7041 case DIV:
7042 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7043 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7044 p = "fidiv";
7045 else
7046 p = "fdiv";
7047 ssep = "div";
7048 break;
7049
7050 default:
7051 abort ();
7052 }
7053
7054 if (is_sse)
7055 {
7056 strcpy (buf, ssep);
7057 if (GET_MODE (operands[0]) == SFmode)
7058 strcat (buf, "ss\t{%2, %0|%0, %2}");
7059 else
7060 strcat (buf, "sd\t{%2, %0|%0, %2}");
7061 return buf;
7062 }
7063 strcpy (buf, p);
7064
7065 switch (GET_CODE (operands[3]))
7066 {
7067 case MULT:
7068 case PLUS:
7069 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7070 {
7071 rtx temp = operands[2];
7072 operands[2] = operands[1];
7073 operands[1] = temp;
7074 }
7075
7076 /* know operands[0] == operands[1]. */
7077
7078 if (GET_CODE (operands[2]) == MEM)
7079 {
7080 p = "%z2\t%2";
7081 break;
7082 }
7083
7084 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7085 {
7086 if (STACK_TOP_P (operands[0]))
7087 /* How is it that we are storing to a dead operand[2]?
7088 Well, presumably operands[1] is dead too. We can't
7089 store the result to st(0) as st(0) gets popped on this
7090 instruction. Instead store to operands[2] (which I
7091 think has to be st(1)). st(1) will be popped later.
7092 gcc <= 2.8.1 didn't have this check and generated
7093 assembly code that the Unixware assembler rejected. */
7094 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7095 else
7096 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7097 break;
7098 }
7099
7100 if (STACK_TOP_P (operands[0]))
7101 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7102 else
7103 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7104 break;
7105
7106 case MINUS:
7107 case DIV:
7108 if (GET_CODE (operands[1]) == MEM)
7109 {
7110 p = "r%z1\t%1";
7111 break;
7112 }
7113
7114 if (GET_CODE (operands[2]) == MEM)
7115 {
7116 p = "%z2\t%2";
7117 break;
7118 }
7119
7120 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7121 {
7122 #if SYSV386_COMPAT
7123 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7124 derived assemblers, confusingly reverse the direction of
7125 the operation for fsub{r} and fdiv{r} when the
7126 destination register is not st(0). The Intel assembler
7127 doesn't have this brain damage. Read !SYSV386_COMPAT to
7128 figure out what the hardware really does. */
7129 if (STACK_TOP_P (operands[0]))
7130 p = "{p\t%0, %2|rp\t%2, %0}";
7131 else
7132 p = "{rp\t%2, %0|p\t%0, %2}";
7133 #else
7134 if (STACK_TOP_P (operands[0]))
7135 /* As above for fmul/fadd, we can't store to st(0). */
7136 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7137 else
7138 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7139 #endif
7140 break;
7141 }
7142
7143 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7144 {
7145 #if SYSV386_COMPAT
7146 if (STACK_TOP_P (operands[0]))
7147 p = "{rp\t%0, %1|p\t%1, %0}";
7148 else
7149 p = "{p\t%1, %0|rp\t%0, %1}";
7150 #else
7151 if (STACK_TOP_P (operands[0]))
7152 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7153 else
7154 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7155 #endif
7156 break;
7157 }
7158
7159 if (STACK_TOP_P (operands[0]))
7160 {
7161 if (STACK_TOP_P (operands[1]))
7162 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7163 else
7164 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7165 break;
7166 }
7167 else if (STACK_TOP_P (operands[1]))
7168 {
7169 #if SYSV386_COMPAT
7170 p = "{\t%1, %0|r\t%0, %1}";
7171 #else
7172 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7173 #endif
7174 }
7175 else
7176 {
7177 #if SYSV386_COMPAT
7178 p = "{r\t%2, %0|\t%0, %2}";
7179 #else
7180 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7181 #endif
7182 }
7183 break;
7184
7185 default:
7186 abort ();
7187 }
7188
7189 strcat (buf, p);
7190 return buf;
7191 }
7192
7193 /* Output code to initialize control word copies used by trunc?f?i and
7194 rounding patterns. CURRENT_MODE is set to current control word,
7195 while NEW_MODE is set to new control word. */
7196
7197 void
7198 emit_i387_cw_initialization (rtx current_mode, rtx new_mode, int mode)
7199 {
7200 rtx reg = gen_reg_rtx (HImode);
7201
7202 emit_insn (gen_x86_fnstcw_1 (current_mode));
7203 emit_move_insn (reg, current_mode);
7204
7205 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
7206 && !TARGET_64BIT)
7207 {
7208 switch (mode)
7209 {
7210 case I387_CW_FLOOR:
7211 /* round down toward -oo */
7212 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7213 break;
7214
7215 case I387_CW_CEIL:
7216 /* round up toward +oo */
7217 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7218 break;
7219
7220 case I387_CW_TRUNC:
7221 /* round toward zero (truncate) */
7222 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7223 break;
7224
7225 case I387_CW_MASK_PM:
7226 /* mask precision exception for nearbyint() */
7227 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7228 break;
7229
7230 default:
7231 abort();
7232 }
7233 }
7234 else
7235 {
7236 switch (mode)
7237 {
7238 case I387_CW_FLOOR:
7239 /* round down toward -oo */
7240 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7241 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7242 break;
7243
7244 case I387_CW_CEIL:
7245 /* round up toward +oo */
7246 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7247 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7248 break;
7249
7250 case I387_CW_TRUNC:
7251 /* round toward zero (truncate) */
7252 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7253 break;
7254
7255 case I387_CW_MASK_PM:
7256 /* mask precision exception for nearbyint() */
7257 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7258 break;
7259
7260 default:
7261 abort();
7262 }
7263 }
7264
7265 emit_move_insn (new_mode, reg);
7266 }
7267
7268 /* Output code for INSN to convert a float to a signed int. OPERANDS
7269 are the insn operands. The output may be [HSD]Imode and the input
7270 operand may be [SDX]Fmode. */
7271
7272 const char *
7273 output_fix_trunc (rtx insn, rtx *operands)
7274 {
7275 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7276 int dimode_p = GET_MODE (operands[0]) == DImode;
7277
7278 /* Jump through a hoop or two for DImode, since the hardware has no
7279 non-popping instruction. We used to do this a different way, but
7280 that was somewhat fragile and broke with post-reload splitters. */
7281 if (dimode_p && !stack_top_dies)
7282 output_asm_insn ("fld\t%y1", operands);
7283
7284 if (!STACK_TOP_P (operands[1]))
7285 abort ();
7286
7287 if (GET_CODE (operands[0]) != MEM)
7288 abort ();
7289
7290 output_asm_insn ("fldcw\t%3", operands);
7291 if (stack_top_dies || dimode_p)
7292 output_asm_insn ("fistp%z0\t%0", operands);
7293 else
7294 output_asm_insn ("fist%z0\t%0", operands);
7295 output_asm_insn ("fldcw\t%2", operands);
7296
7297 return "";
7298 }
7299
7300 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7301 should be used. UNORDERED_P is true when fucom should be used. */
7302
7303 const char *
7304 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7305 {
7306 int stack_top_dies;
7307 rtx cmp_op0, cmp_op1;
7308 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7309
7310 if (eflags_p)
7311 {
7312 cmp_op0 = operands[0];
7313 cmp_op1 = operands[1];
7314 }
7315 else
7316 {
7317 cmp_op0 = operands[1];
7318 cmp_op1 = operands[2];
7319 }
7320
7321 if (is_sse)
7322 {
7323 if (GET_MODE (operands[0]) == SFmode)
7324 if (unordered_p)
7325 return "ucomiss\t{%1, %0|%0, %1}";
7326 else
7327 return "comiss\t{%1, %0|%0, %1}";
7328 else
7329 if (unordered_p)
7330 return "ucomisd\t{%1, %0|%0, %1}";
7331 else
7332 return "comisd\t{%1, %0|%0, %1}";
7333 }
7334
7335 if (! STACK_TOP_P (cmp_op0))
7336 abort ();
7337
7338 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7339
7340 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7341 {
7342 if (stack_top_dies)
7343 {
7344 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7345 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7346 }
7347 else
7348 return "ftst\n\tfnstsw\t%0";
7349 }
7350
7351 if (STACK_REG_P (cmp_op1)
7352 && stack_top_dies
7353 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7354 && REGNO (cmp_op1) != FIRST_STACK_REG)
7355 {
7356 /* If both the top of the 387 stack dies, and the other operand
7357 is also a stack register that dies, then this must be a
7358 `fcompp' float compare */
7359
7360 if (eflags_p)
7361 {
7362 /* There is no double popping fcomi variant. Fortunately,
7363 eflags is immune from the fstp's cc clobbering. */
7364 if (unordered_p)
7365 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7366 else
7367 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7368 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7369 }
7370 else
7371 {
7372 if (unordered_p)
7373 return "fucompp\n\tfnstsw\t%0";
7374 else
7375 return "fcompp\n\tfnstsw\t%0";
7376 }
7377 }
7378 else
7379 {
7380 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7381
7382 static const char * const alt[16] =
7383 {
7384 "fcom%z2\t%y2\n\tfnstsw\t%0",
7385 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7386 "fucom%z2\t%y2\n\tfnstsw\t%0",
7387 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7388
7389 "ficom%z2\t%y2\n\tfnstsw\t%0",
7390 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7391 NULL,
7392 NULL,
7393
7394 "fcomi\t{%y1, %0|%0, %y1}",
7395 "fcomip\t{%y1, %0|%0, %y1}",
7396 "fucomi\t{%y1, %0|%0, %y1}",
7397 "fucomip\t{%y1, %0|%0, %y1}",
7398
7399 NULL,
7400 NULL,
7401 NULL,
7402 NULL
7403 };
7404
7405 int mask;
7406 const char *ret;
7407
7408 mask = eflags_p << 3;
7409 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
7410 mask |= unordered_p << 1;
7411 mask |= stack_top_dies;
7412
7413 if (mask >= 16)
7414 abort ();
7415 ret = alt[mask];
7416 if (ret == NULL)
7417 abort ();
7418
7419 return ret;
7420 }
7421 }
7422
7423 void
7424 ix86_output_addr_vec_elt (FILE *file, int value)
7425 {
7426 const char *directive = ASM_LONG;
7427
7428 if (TARGET_64BIT)
7429 {
7430 #ifdef ASM_QUAD
7431 directive = ASM_QUAD;
7432 #else
7433 abort ();
7434 #endif
7435 }
7436
7437 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7438 }
7439
7440 void
7441 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7442 {
7443 if (TARGET_64BIT)
7444 fprintf (file, "%s%s%d-%s%d\n",
7445 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7446 else if (HAVE_AS_GOTOFF_IN_DATA)
7447 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7448 #if TARGET_MACHO
7449 else if (TARGET_MACHO)
7450 {
7451 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7452 machopic_output_function_base_name (file);
7453 fprintf(file, "\n");
7454 }
7455 #endif
7456 else
7457 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7458 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7459 }
7460 \f
7461 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7462 for the target. */
7463
7464 void
7465 ix86_expand_clear (rtx dest)
7466 {
7467 rtx tmp;
7468
7469 /* We play register width games, which are only valid after reload. */
7470 if (!reload_completed)
7471 abort ();
7472
7473 /* Avoid HImode and its attendant prefix byte. */
7474 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7475 dest = gen_rtx_REG (SImode, REGNO (dest));
7476
7477 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7478
7479 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7480 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7481 {
7482 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7483 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7484 }
7485
7486 emit_insn (tmp);
7487 }
7488
7489 /* X is an unchanging MEM. If it is a constant pool reference, return
7490 the constant pool rtx, else NULL. */
7491
7492 rtx
7493 maybe_get_pool_constant (rtx x)
7494 {
7495 x = ix86_delegitimize_address (XEXP (x, 0));
7496
7497 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7498 return get_pool_constant (x);
7499
7500 return NULL_RTX;
7501 }
7502
7503 void
7504 ix86_expand_move (enum machine_mode mode, rtx operands[])
7505 {
7506 int strict = (reload_in_progress || reload_completed);
7507 rtx op0, op1;
7508 enum tls_model model;
7509
7510 op0 = operands[0];
7511 op1 = operands[1];
7512
7513 if (GET_CODE (op1) == SYMBOL_REF)
7514 {
7515 model = SYMBOL_REF_TLS_MODEL (op1);
7516 if (model)
7517 {
7518 op1 = legitimize_tls_address (op1, model, true);
7519 op1 = force_operand (op1, op0);
7520 if (op1 == op0)
7521 return;
7522 }
7523 }
7524 else if (GET_CODE (op1) == CONST
7525 && GET_CODE (XEXP (op1, 0)) == PLUS
7526 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
7527 {
7528 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
7529 if (model)
7530 {
7531 rtx addend = XEXP (XEXP (op1, 0), 1);
7532 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
7533 op1 = force_operand (op1, NULL);
7534 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
7535 op0, 1, OPTAB_DIRECT);
7536 if (op1 == op0)
7537 return;
7538 }
7539 }
7540
7541 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7542 {
7543 #if TARGET_MACHO
7544 if (MACHOPIC_PURE)
7545 {
7546 rtx temp = ((reload_in_progress
7547 || ((op0 && GET_CODE (op0) == REG)
7548 && mode == Pmode))
7549 ? op0 : gen_reg_rtx (Pmode));
7550 op1 = machopic_indirect_data_reference (op1, temp);
7551 op1 = machopic_legitimize_pic_address (op1, mode,
7552 temp == op1 ? 0 : temp);
7553 }
7554 else if (MACHOPIC_INDIRECT)
7555 op1 = machopic_indirect_data_reference (op1, 0);
7556 if (op0 == op1)
7557 return;
7558 #else
7559 if (GET_CODE (op0) == MEM)
7560 op1 = force_reg (Pmode, op1);
7561 else
7562 op1 = legitimize_address (op1, op1, Pmode);
7563 #endif /* TARGET_MACHO */
7564 }
7565 else
7566 {
7567 if (GET_CODE (op0) == MEM
7568 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7569 || !push_operand (op0, mode))
7570 && GET_CODE (op1) == MEM)
7571 op1 = force_reg (mode, op1);
7572
7573 if (push_operand (op0, mode)
7574 && ! general_no_elim_operand (op1, mode))
7575 op1 = copy_to_mode_reg (mode, op1);
7576
7577 /* Force large constants in 64bit compilation into register
7578 to get them CSEed. */
7579 if (TARGET_64BIT && mode == DImode
7580 && immediate_operand (op1, mode)
7581 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7582 && !register_operand (op0, mode)
7583 && optimize && !reload_completed && !reload_in_progress)
7584 op1 = copy_to_mode_reg (mode, op1);
7585
7586 if (FLOAT_MODE_P (mode))
7587 {
7588 /* If we are loading a floating point constant to a register,
7589 force the value to memory now, since we'll get better code
7590 out the back end. */
7591
7592 if (strict)
7593 ;
7594 else if (GET_CODE (op1) == CONST_DOUBLE)
7595 {
7596 op1 = validize_mem (force_const_mem (mode, op1));
7597 if (!register_operand (op0, mode))
7598 {
7599 rtx temp = gen_reg_rtx (mode);
7600 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7601 emit_move_insn (op0, temp);
7602 return;
7603 }
7604 }
7605 }
7606 }
7607
7608 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7609 }
7610
7611 void
7612 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7613 {
7614 rtx op0 = operands[0], op1 = operands[1];
7615
7616 /* Force constants other than zero into memory. We do not know how
7617 the instructions used to build constants modify the upper 64 bits
7618 of the register, once we have that information we may be able
7619 to handle some of them more efficiently. */
7620 if ((reload_in_progress | reload_completed) == 0
7621 && register_operand (op0, mode)
7622 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
7623 op1 = validize_mem (force_const_mem (mode, op1));
7624
7625 /* Make operand1 a register if it isn't already. */
7626 if (!no_new_pseudos
7627 && !register_operand (op0, mode)
7628 && !register_operand (op1, mode))
7629 {
7630 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
7631 return;
7632 }
7633
7634 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7635 }
7636
7637 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
7638 straight to ix86_expand_vector_move. */
7639
7640 void
7641 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
7642 {
7643 rtx op0, op1, m;
7644
7645 op0 = operands[0];
7646 op1 = operands[1];
7647
7648 if (MEM_P (op1))
7649 {
7650 /* If we're optimizing for size, movups is the smallest. */
7651 if (optimize_size)
7652 {
7653 op0 = gen_lowpart (V4SFmode, op0);
7654 op1 = gen_lowpart (V4SFmode, op1);
7655 emit_insn (gen_sse_movups (op0, op1));
7656 return;
7657 }
7658
7659 /* ??? If we have typed data, then it would appear that using
7660 movdqu is the only way to get unaligned data loaded with
7661 integer type. */
7662 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7663 {
7664 op0 = gen_lowpart (V16QImode, op0);
7665 op1 = gen_lowpart (V16QImode, op1);
7666 emit_insn (gen_sse2_movdqu (op0, op1));
7667 return;
7668 }
7669
7670 if (TARGET_SSE2 && mode == V2DFmode)
7671 {
7672 rtx zero;
7673
7674 /* When SSE registers are split into halves, we can avoid
7675 writing to the top half twice. */
7676 if (TARGET_SSE_SPLIT_REGS)
7677 {
7678 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7679 zero = op0;
7680 }
7681 else
7682 {
7683 /* ??? Not sure about the best option for the Intel chips.
7684 The following would seem to satisfy; the register is
7685 entirely cleared, breaking the dependency chain. We
7686 then store to the upper half, with a dependency depth
7687 of one. A rumor has it that Intel recommends two movsd
7688 followed by an unpacklpd, but this is unconfirmed. And
7689 given that the dependency depth of the unpacklpd would
7690 still be one, I'm not sure why this would be better. */
7691 zero = CONST0_RTX (V2DFmode);
7692 }
7693
7694 m = adjust_address (op1, DFmode, 0);
7695 emit_insn (gen_sse2_loadlpd (op0, zero, m));
7696 m = adjust_address (op1, DFmode, 8);
7697 emit_insn (gen_sse2_loadhpd (op0, op0, m));
7698 }
7699 else
7700 {
7701 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
7702 emit_move_insn (op0, CONST0_RTX (mode));
7703 else
7704 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7705
7706 m = adjust_address (op1, V2SFmode, 0);
7707 emit_insn (gen_sse_loadlps (op0, op0, m));
7708 m = adjust_address (op1, V2SFmode, 8);
7709 emit_insn (gen_sse_loadhps (op0, op0, m));
7710 }
7711 }
7712 else if (MEM_P (op0))
7713 {
7714 /* If we're optimizing for size, movups is the smallest. */
7715 if (optimize_size)
7716 {
7717 op0 = gen_lowpart (V4SFmode, op0);
7718 op1 = gen_lowpart (V4SFmode, op1);
7719 emit_insn (gen_sse_movups (op0, op1));
7720 return;
7721 }
7722
7723 /* ??? Similar to above, only less clear because of quote
7724 typeless stores unquote. */
7725 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
7726 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7727 {
7728 op0 = gen_lowpart (V16QImode, op0);
7729 op1 = gen_lowpart (V16QImode, op1);
7730 emit_insn (gen_sse2_movdqu (op0, op1));
7731 return;
7732 }
7733
7734 if (TARGET_SSE2 && mode == V2DFmode)
7735 {
7736 m = adjust_address (op0, DFmode, 0);
7737 emit_insn (gen_sse2_storelpd (m, op1));
7738 m = adjust_address (op0, DFmode, 8);
7739 emit_insn (gen_sse2_storehpd (m, op1));
7740 }
7741 else
7742 {
7743 if (mode != V4SFmode)
7744 op1 = gen_lowpart (V4SFmode, op1);
7745 m = adjust_address (op0, V2SFmode, 0);
7746 emit_insn (gen_sse_storelps (m, op1));
7747 m = adjust_address (op0, V2SFmode, 8);
7748 emit_insn (gen_sse_storehps (m, op1));
7749 }
7750 }
7751 else
7752 gcc_unreachable ();
7753 }
7754
7755
7756 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
7757 destination to use for the operation. If different from the true
7758 destination in operands[0], a copy operation will be required. */
7759
7760 rtx
7761 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
7762 rtx operands[])
7763 {
7764 int matching_memory;
7765 rtx src1, src2, dst;
7766
7767 dst = operands[0];
7768 src1 = operands[1];
7769 src2 = operands[2];
7770
7771 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7772 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7773 && (rtx_equal_p (dst, src2)
7774 || immediate_operand (src1, mode)))
7775 {
7776 rtx temp = src1;
7777 src1 = src2;
7778 src2 = temp;
7779 }
7780
7781 /* If the destination is memory, and we do not have matching source
7782 operands, do things in registers. */
7783 matching_memory = 0;
7784 if (GET_CODE (dst) == MEM)
7785 {
7786 if (rtx_equal_p (dst, src1))
7787 matching_memory = 1;
7788 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7789 && rtx_equal_p (dst, src2))
7790 matching_memory = 2;
7791 else
7792 dst = gen_reg_rtx (mode);
7793 }
7794
7795 /* Both source operands cannot be in memory. */
7796 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
7797 {
7798 if (matching_memory != 2)
7799 src2 = force_reg (mode, src2);
7800 else
7801 src1 = force_reg (mode, src1);
7802 }
7803
7804 /* If the operation is not commutable, source 1 cannot be a constant
7805 or non-matching memory. */
7806 if ((CONSTANT_P (src1)
7807 || (!matching_memory && GET_CODE (src1) == MEM))
7808 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7809 src1 = force_reg (mode, src1);
7810
7811 /* If optimizing, copy to regs to improve CSE */
7812 if (optimize && ! no_new_pseudos)
7813 {
7814 if (GET_CODE (dst) == MEM)
7815 dst = gen_reg_rtx (mode);
7816 if (GET_CODE (src1) == MEM)
7817 src1 = force_reg (mode, src1);
7818 if (GET_CODE (src2) == MEM)
7819 src2 = force_reg (mode, src2);
7820 }
7821
7822 src1 = operands[1] = src1;
7823 src2 = operands[2] = src2;
7824 return dst;
7825 }
7826
7827 /* Similarly, but assume that the destination has already been
7828 set up properly. */
7829
7830 void
7831 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
7832 enum machine_mode mode, rtx operands[])
7833 {
7834 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
7835 gcc_assert (dst == operands[0]);
7836 }
7837
7838 /* Attempt to expand a binary operator. Make the expansion closer to the
7839 actual machine, then just general_operand, which will allow 3 separate
7840 memory references (one output, two input) in a single insn. */
7841
7842 void
7843 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
7844 rtx operands[])
7845 {
7846 rtx src1, src2, dst, op, clob;
7847
7848 dst = ix86_fixup_binary_operands (code, mode, operands);
7849 src1 = operands[1];
7850 src2 = operands[2];
7851
7852 /* Emit the instruction. */
7853
7854 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
7855 if (reload_in_progress)
7856 {
7857 /* Reload doesn't know about the flags register, and doesn't know that
7858 it doesn't want to clobber it. We can only do this with PLUS. */
7859 if (code != PLUS)
7860 abort ();
7861 emit_insn (op);
7862 }
7863 else
7864 {
7865 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7866 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7867 }
7868
7869 /* Fix up the destination if needed. */
7870 if (dst != operands[0])
7871 emit_move_insn (operands[0], dst);
7872 }
7873
7874 /* Return TRUE or FALSE depending on whether the binary operator meets the
7875 appropriate constraints. */
7876
7877 int
7878 ix86_binary_operator_ok (enum rtx_code code,
7879 enum machine_mode mode ATTRIBUTE_UNUSED,
7880 rtx operands[3])
7881 {
7882 /* Both source operands cannot be in memory. */
7883 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
7884 return 0;
7885 /* If the operation is not commutable, source 1 cannot be a constant. */
7886 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7887 return 0;
7888 /* If the destination is memory, we must have a matching source operand. */
7889 if (GET_CODE (operands[0]) == MEM
7890 && ! (rtx_equal_p (operands[0], operands[1])
7891 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7892 && rtx_equal_p (operands[0], operands[2]))))
7893 return 0;
7894 /* If the operation is not commutable and the source 1 is memory, we must
7895 have a matching destination. */
7896 if (GET_CODE (operands[1]) == MEM
7897 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
7898 && ! rtx_equal_p (operands[0], operands[1]))
7899 return 0;
7900 return 1;
7901 }
7902
7903 /* Attempt to expand a unary operator. Make the expansion closer to the
7904 actual machine, then just general_operand, which will allow 2 separate
7905 memory references (one output, one input) in a single insn. */
7906
7907 void
7908 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
7909 rtx operands[])
7910 {
7911 int matching_memory;
7912 rtx src, dst, op, clob;
7913
7914 dst = operands[0];
7915 src = operands[1];
7916
7917 /* If the destination is memory, and we do not have matching source
7918 operands, do things in registers. */
7919 matching_memory = 0;
7920 if (MEM_P (dst))
7921 {
7922 if (rtx_equal_p (dst, src))
7923 matching_memory = 1;
7924 else
7925 dst = gen_reg_rtx (mode);
7926 }
7927
7928 /* When source operand is memory, destination must match. */
7929 if (MEM_P (src) && !matching_memory)
7930 src = force_reg (mode, src);
7931
7932 /* If optimizing, copy to regs to improve CSE. */
7933 if (optimize && ! no_new_pseudos)
7934 {
7935 if (GET_CODE (dst) == MEM)
7936 dst = gen_reg_rtx (mode);
7937 if (GET_CODE (src) == MEM)
7938 src = force_reg (mode, src);
7939 }
7940
7941 /* Emit the instruction. */
7942
7943 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
7944 if (reload_in_progress || code == NOT)
7945 {
7946 /* Reload doesn't know about the flags register, and doesn't know that
7947 it doesn't want to clobber it. */
7948 if (code != NOT)
7949 abort ();
7950 emit_insn (op);
7951 }
7952 else
7953 {
7954 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7955 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7956 }
7957
7958 /* Fix up the destination if needed. */
7959 if (dst != operands[0])
7960 emit_move_insn (operands[0], dst);
7961 }
7962
7963 /* Return TRUE or FALSE depending on whether the unary operator meets the
7964 appropriate constraints. */
7965
7966 int
7967 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
7968 enum machine_mode mode ATTRIBUTE_UNUSED,
7969 rtx operands[2] ATTRIBUTE_UNUSED)
7970 {
7971 /* If one of operands is memory, source and destination must match. */
7972 if ((GET_CODE (operands[0]) == MEM
7973 || GET_CODE (operands[1]) == MEM)
7974 && ! rtx_equal_p (operands[0], operands[1]))
7975 return FALSE;
7976 return TRUE;
7977 }
7978
7979 /* Generate code for floating point ABS or NEG. */
7980
7981 void
7982 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
7983 rtx operands[])
7984 {
7985 rtx mask, set, use, clob, dst, src;
7986 bool matching_memory;
7987 bool use_sse = false;
7988 bool vector_mode = VECTOR_MODE_P (mode);
7989 enum machine_mode elt_mode = mode;
7990 enum machine_mode vec_mode = VOIDmode;
7991
7992 if (vector_mode)
7993 {
7994 elt_mode = GET_MODE_INNER (mode);
7995 vec_mode = mode;
7996 use_sse = true;
7997 }
7998 if (TARGET_SSE_MATH)
7999 {
8000 if (mode == SFmode)
8001 {
8002 use_sse = true;
8003 vec_mode = V4SFmode;
8004 }
8005 else if (mode == DFmode && TARGET_SSE2)
8006 {
8007 use_sse = true;
8008 vec_mode = V2DFmode;
8009 }
8010 }
8011
8012 /* NEG and ABS performed with SSE use bitwise mask operations.
8013 Create the appropriate mask now. */
8014 if (use_sse)
8015 {
8016 HOST_WIDE_INT hi, lo;
8017 int shift = 63;
8018 rtvec v;
8019
8020 /* Find the sign bit, sign extended to 2*HWI. */
8021 if (elt_mode == SFmode)
8022 lo = 0x80000000, hi = lo < 0;
8023 else if (HOST_BITS_PER_WIDE_INT >= 64)
8024 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8025 else
8026 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8027
8028 /* If we're looking for the absolute value, then we want
8029 the compliment. */
8030 if (code == ABS)
8031 lo = ~lo, hi = ~hi;
8032
8033 /* Force this value into the low part of a fp vector constant. */
8034 mask = immed_double_const (lo, hi, elt_mode == SFmode ? SImode : DImode);
8035 mask = gen_lowpart (elt_mode, mask);
8036
8037 switch (mode)
8038 {
8039 case SFmode:
8040 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8041 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8042 break;
8043
8044 case DFmode:
8045 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8046 break;
8047
8048 case V4SFmode:
8049 v = gen_rtvec (4, mask, mask, mask, mask);
8050 break;
8051
8052 case V4DFmode:
8053 v = gen_rtvec (2, mask, mask);
8054 break;
8055
8056 default:
8057 gcc_unreachable ();
8058 }
8059
8060 mask = gen_rtx_CONST_VECTOR (vec_mode, v);
8061 mask = force_reg (vec_mode, mask);
8062 }
8063 else
8064 {
8065 /* When not using SSE, we don't use the mask, but prefer to keep the
8066 same general form of the insn pattern to reduce duplication when
8067 it comes time to split. */
8068 mask = const0_rtx;
8069 }
8070
8071 dst = operands[0];
8072 src = operands[1];
8073
8074 /* If the destination is memory, and we don't have matching source
8075 operands, do things in registers. */
8076 matching_memory = false;
8077 if (MEM_P (dst))
8078 {
8079 if (rtx_equal_p (dst, src) && (!optimize || no_new_pseudos))
8080 matching_memory = true;
8081 else
8082 dst = gen_reg_rtx (mode);
8083 }
8084 if (MEM_P (src) && !matching_memory)
8085 src = force_reg (mode, src);
8086
8087 if (vector_mode)
8088 {
8089 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8090 set = gen_rtx_SET (VOIDmode, dst, set);
8091 emit_insn (set);
8092 }
8093 else
8094 {
8095 set = gen_rtx_fmt_e (code, mode, src);
8096 set = gen_rtx_SET (VOIDmode, dst, set);
8097 use = gen_rtx_USE (VOIDmode, mask);
8098 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8099 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8100 }
8101
8102 if (dst != operands[0])
8103 emit_move_insn (operands[0], dst);
8104 }
8105
8106 /* Return TRUE or FALSE depending on whether the first SET in INSN
8107 has source and destination with matching CC modes, and that the
8108 CC mode is at least as constrained as REQ_MODE. */
8109
8110 int
8111 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8112 {
8113 rtx set;
8114 enum machine_mode set_mode;
8115
8116 set = PATTERN (insn);
8117 if (GET_CODE (set) == PARALLEL)
8118 set = XVECEXP (set, 0, 0);
8119 if (GET_CODE (set) != SET)
8120 abort ();
8121 if (GET_CODE (SET_SRC (set)) != COMPARE)
8122 abort ();
8123
8124 set_mode = GET_MODE (SET_DEST (set));
8125 switch (set_mode)
8126 {
8127 case CCNOmode:
8128 if (req_mode != CCNOmode
8129 && (req_mode != CCmode
8130 || XEXP (SET_SRC (set), 1) != const0_rtx))
8131 return 0;
8132 break;
8133 case CCmode:
8134 if (req_mode == CCGCmode)
8135 return 0;
8136 /* FALLTHRU */
8137 case CCGCmode:
8138 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8139 return 0;
8140 /* FALLTHRU */
8141 case CCGOCmode:
8142 if (req_mode == CCZmode)
8143 return 0;
8144 /* FALLTHRU */
8145 case CCZmode:
8146 break;
8147
8148 default:
8149 abort ();
8150 }
8151
8152 return (GET_MODE (SET_SRC (set)) == set_mode);
8153 }
8154
8155 /* Generate insn patterns to do an integer compare of OPERANDS. */
8156
8157 static rtx
8158 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8159 {
8160 enum machine_mode cmpmode;
8161 rtx tmp, flags;
8162
8163 cmpmode = SELECT_CC_MODE (code, op0, op1);
8164 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8165
8166 /* This is very simple, but making the interface the same as in the
8167 FP case makes the rest of the code easier. */
8168 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8169 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8170
8171 /* Return the test that should be put into the flags user, i.e.
8172 the bcc, scc, or cmov instruction. */
8173 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8174 }
8175
8176 /* Figure out whether to use ordered or unordered fp comparisons.
8177 Return the appropriate mode to use. */
8178
8179 enum machine_mode
8180 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8181 {
8182 /* ??? In order to make all comparisons reversible, we do all comparisons
8183 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8184 all forms trapping and nontrapping comparisons, we can make inequality
8185 comparisons trapping again, since it results in better code when using
8186 FCOM based compares. */
8187 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8188 }
8189
8190 enum machine_mode
8191 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8192 {
8193 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8194 return ix86_fp_compare_mode (code);
8195 switch (code)
8196 {
8197 /* Only zero flag is needed. */
8198 case EQ: /* ZF=0 */
8199 case NE: /* ZF!=0 */
8200 return CCZmode;
8201 /* Codes needing carry flag. */
8202 case GEU: /* CF=0 */
8203 case GTU: /* CF=0 & ZF=0 */
8204 case LTU: /* CF=1 */
8205 case LEU: /* CF=1 | ZF=1 */
8206 return CCmode;
8207 /* Codes possibly doable only with sign flag when
8208 comparing against zero. */
8209 case GE: /* SF=OF or SF=0 */
8210 case LT: /* SF<>OF or SF=1 */
8211 if (op1 == const0_rtx)
8212 return CCGOCmode;
8213 else
8214 /* For other cases Carry flag is not required. */
8215 return CCGCmode;
8216 /* Codes doable only with sign flag when comparing
8217 against zero, but we miss jump instruction for it
8218 so we need to use relational tests against overflow
8219 that thus needs to be zero. */
8220 case GT: /* ZF=0 & SF=OF */
8221 case LE: /* ZF=1 | SF<>OF */
8222 if (op1 == const0_rtx)
8223 return CCNOmode;
8224 else
8225 return CCGCmode;
8226 /* strcmp pattern do (use flags) and combine may ask us for proper
8227 mode. */
8228 case USE:
8229 return CCmode;
8230 default:
8231 abort ();
8232 }
8233 }
8234
8235 /* Return the fixed registers used for condition codes. */
8236
8237 static bool
8238 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8239 {
8240 *p1 = FLAGS_REG;
8241 *p2 = FPSR_REG;
8242 return true;
8243 }
8244
8245 /* If two condition code modes are compatible, return a condition code
8246 mode which is compatible with both. Otherwise, return
8247 VOIDmode. */
8248
8249 static enum machine_mode
8250 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
8251 {
8252 if (m1 == m2)
8253 return m1;
8254
8255 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
8256 return VOIDmode;
8257
8258 if ((m1 == CCGCmode && m2 == CCGOCmode)
8259 || (m1 == CCGOCmode && m2 == CCGCmode))
8260 return CCGCmode;
8261
8262 switch (m1)
8263 {
8264 default:
8265 abort ();
8266
8267 case CCmode:
8268 case CCGCmode:
8269 case CCGOCmode:
8270 case CCNOmode:
8271 case CCZmode:
8272 switch (m2)
8273 {
8274 default:
8275 return VOIDmode;
8276
8277 case CCmode:
8278 case CCGCmode:
8279 case CCGOCmode:
8280 case CCNOmode:
8281 case CCZmode:
8282 return CCmode;
8283 }
8284
8285 case CCFPmode:
8286 case CCFPUmode:
8287 /* These are only compatible with themselves, which we already
8288 checked above. */
8289 return VOIDmode;
8290 }
8291 }
8292
8293 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8294
8295 int
8296 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
8297 {
8298 enum rtx_code swapped_code = swap_condition (code);
8299 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
8300 || (ix86_fp_comparison_cost (swapped_code)
8301 == ix86_fp_comparison_fcomi_cost (swapped_code)));
8302 }
8303
8304 /* Swap, force into registers, or otherwise massage the two operands
8305 to a fp comparison. The operands are updated in place; the new
8306 comparison code is returned. */
8307
8308 static enum rtx_code
8309 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
8310 {
8311 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
8312 rtx op0 = *pop0, op1 = *pop1;
8313 enum machine_mode op_mode = GET_MODE (op0);
8314 int is_sse = SSE_REG_P (op0) || SSE_REG_P (op1);
8315
8316 /* All of the unordered compare instructions only work on registers.
8317 The same is true of the fcomi compare instructions. The same is
8318 true of the XFmode compare instructions if not comparing with
8319 zero (ftst insn is used in this case). */
8320
8321 if (!is_sse
8322 && (fpcmp_mode == CCFPUmode
8323 || (op_mode == XFmode
8324 && ! (standard_80387_constant_p (op0) == 1
8325 || standard_80387_constant_p (op1) == 1))
8326 || ix86_use_fcomi_compare (code)))
8327 {
8328 op0 = force_reg (op_mode, op0);
8329 op1 = force_reg (op_mode, op1);
8330 }
8331 else
8332 {
8333 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8334 things around if they appear profitable, otherwise force op0
8335 into a register. */
8336
8337 if (standard_80387_constant_p (op0) == 0
8338 || (GET_CODE (op0) == MEM
8339 && ! (standard_80387_constant_p (op1) == 0
8340 || GET_CODE (op1) == MEM)))
8341 {
8342 rtx tmp;
8343 tmp = op0, op0 = op1, op1 = tmp;
8344 code = swap_condition (code);
8345 }
8346
8347 if (GET_CODE (op0) != REG)
8348 op0 = force_reg (op_mode, op0);
8349
8350 if (CONSTANT_P (op1))
8351 {
8352 int tmp = standard_80387_constant_p (op1);
8353 if (tmp == 0)
8354 op1 = validize_mem (force_const_mem (op_mode, op1));
8355 else if (tmp == 1)
8356 {
8357 if (TARGET_CMOVE)
8358 op1 = force_reg (op_mode, op1);
8359 }
8360 else
8361 op1 = force_reg (op_mode, op1);
8362 }
8363 }
8364
8365 /* Try to rearrange the comparison to make it cheaper. */
8366 if (ix86_fp_comparison_cost (code)
8367 > ix86_fp_comparison_cost (swap_condition (code))
8368 && (GET_CODE (op1) == REG || !no_new_pseudos))
8369 {
8370 rtx tmp;
8371 tmp = op0, op0 = op1, op1 = tmp;
8372 code = swap_condition (code);
8373 if (GET_CODE (op0) != REG)
8374 op0 = force_reg (op_mode, op0);
8375 }
8376
8377 *pop0 = op0;
8378 *pop1 = op1;
8379 return code;
8380 }
8381
8382 /* Convert comparison codes we use to represent FP comparison to integer
8383 code that will result in proper branch. Return UNKNOWN if no such code
8384 is available. */
8385
8386 enum rtx_code
8387 ix86_fp_compare_code_to_integer (enum rtx_code code)
8388 {
8389 switch (code)
8390 {
8391 case GT:
8392 return GTU;
8393 case GE:
8394 return GEU;
8395 case ORDERED:
8396 case UNORDERED:
8397 return code;
8398 break;
8399 case UNEQ:
8400 return EQ;
8401 break;
8402 case UNLT:
8403 return LTU;
8404 break;
8405 case UNLE:
8406 return LEU;
8407 break;
8408 case LTGT:
8409 return NE;
8410 break;
8411 default:
8412 return UNKNOWN;
8413 }
8414 }
8415
8416 /* Split comparison code CODE into comparisons we can do using branch
8417 instructions. BYPASS_CODE is comparison code for branch that will
8418 branch around FIRST_CODE and SECOND_CODE. If some of branches
8419 is not required, set value to UNKNOWN.
8420 We never require more than two branches. */
8421
8422 void
8423 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8424 enum rtx_code *first_code,
8425 enum rtx_code *second_code)
8426 {
8427 *first_code = code;
8428 *bypass_code = UNKNOWN;
8429 *second_code = UNKNOWN;
8430
8431 /* The fcomi comparison sets flags as follows:
8432
8433 cmp ZF PF CF
8434 > 0 0 0
8435 < 0 0 1
8436 = 1 0 0
8437 un 1 1 1 */
8438
8439 switch (code)
8440 {
8441 case GT: /* GTU - CF=0 & ZF=0 */
8442 case GE: /* GEU - CF=0 */
8443 case ORDERED: /* PF=0 */
8444 case UNORDERED: /* PF=1 */
8445 case UNEQ: /* EQ - ZF=1 */
8446 case UNLT: /* LTU - CF=1 */
8447 case UNLE: /* LEU - CF=1 | ZF=1 */
8448 case LTGT: /* EQ - ZF=0 */
8449 break;
8450 case LT: /* LTU - CF=1 - fails on unordered */
8451 *first_code = UNLT;
8452 *bypass_code = UNORDERED;
8453 break;
8454 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8455 *first_code = UNLE;
8456 *bypass_code = UNORDERED;
8457 break;
8458 case EQ: /* EQ - ZF=1 - fails on unordered */
8459 *first_code = UNEQ;
8460 *bypass_code = UNORDERED;
8461 break;
8462 case NE: /* NE - ZF=0 - fails on unordered */
8463 *first_code = LTGT;
8464 *second_code = UNORDERED;
8465 break;
8466 case UNGE: /* GEU - CF=0 - fails on unordered */
8467 *first_code = GE;
8468 *second_code = UNORDERED;
8469 break;
8470 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8471 *first_code = GT;
8472 *second_code = UNORDERED;
8473 break;
8474 default:
8475 abort ();
8476 }
8477 if (!TARGET_IEEE_FP)
8478 {
8479 *second_code = UNKNOWN;
8480 *bypass_code = UNKNOWN;
8481 }
8482 }
8483
8484 /* Return cost of comparison done fcom + arithmetics operations on AX.
8485 All following functions do use number of instructions as a cost metrics.
8486 In future this should be tweaked to compute bytes for optimize_size and
8487 take into account performance of various instructions on various CPUs. */
8488 static int
8489 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8490 {
8491 if (!TARGET_IEEE_FP)
8492 return 4;
8493 /* The cost of code output by ix86_expand_fp_compare. */
8494 switch (code)
8495 {
8496 case UNLE:
8497 case UNLT:
8498 case LTGT:
8499 case GT:
8500 case GE:
8501 case UNORDERED:
8502 case ORDERED:
8503 case UNEQ:
8504 return 4;
8505 break;
8506 case LT:
8507 case NE:
8508 case EQ:
8509 case UNGE:
8510 return 5;
8511 break;
8512 case LE:
8513 case UNGT:
8514 return 6;
8515 break;
8516 default:
8517 abort ();
8518 }
8519 }
8520
8521 /* Return cost of comparison done using fcomi operation.
8522 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8523 static int
8524 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8525 {
8526 enum rtx_code bypass_code, first_code, second_code;
8527 /* Return arbitrarily high cost when instruction is not supported - this
8528 prevents gcc from using it. */
8529 if (!TARGET_CMOVE)
8530 return 1024;
8531 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8532 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8533 }
8534
8535 /* Return cost of comparison done using sahf operation.
8536 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8537 static int
8538 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8539 {
8540 enum rtx_code bypass_code, first_code, second_code;
8541 /* Return arbitrarily high cost when instruction is not preferred - this
8542 avoids gcc from using it. */
8543 if (!TARGET_USE_SAHF && !optimize_size)
8544 return 1024;
8545 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8546 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
8547 }
8548
8549 /* Compute cost of the comparison done using any method.
8550 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8551 static int
8552 ix86_fp_comparison_cost (enum rtx_code code)
8553 {
8554 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8555 int min;
8556
8557 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8558 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8559
8560 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8561 if (min > sahf_cost)
8562 min = sahf_cost;
8563 if (min > fcomi_cost)
8564 min = fcomi_cost;
8565 return min;
8566 }
8567
8568 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8569
8570 static rtx
8571 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8572 rtx *second_test, rtx *bypass_test)
8573 {
8574 enum machine_mode fpcmp_mode, intcmp_mode;
8575 rtx tmp, tmp2;
8576 int cost = ix86_fp_comparison_cost (code);
8577 enum rtx_code bypass_code, first_code, second_code;
8578
8579 fpcmp_mode = ix86_fp_compare_mode (code);
8580 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8581
8582 if (second_test)
8583 *second_test = NULL_RTX;
8584 if (bypass_test)
8585 *bypass_test = NULL_RTX;
8586
8587 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8588
8589 /* Do fcomi/sahf based test when profitable. */
8590 if ((bypass_code == UNKNOWN || bypass_test)
8591 && (second_code == UNKNOWN || second_test)
8592 && ix86_fp_comparison_arithmetics_cost (code) > cost)
8593 {
8594 if (TARGET_CMOVE)
8595 {
8596 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8597 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
8598 tmp);
8599 emit_insn (tmp);
8600 }
8601 else
8602 {
8603 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8604 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8605 if (!scratch)
8606 scratch = gen_reg_rtx (HImode);
8607 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8608 emit_insn (gen_x86_sahf_1 (scratch));
8609 }
8610
8611 /* The FP codes work out to act like unsigned. */
8612 intcmp_mode = fpcmp_mode;
8613 code = first_code;
8614 if (bypass_code != UNKNOWN)
8615 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
8616 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8617 const0_rtx);
8618 if (second_code != UNKNOWN)
8619 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
8620 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8621 const0_rtx);
8622 }
8623 else
8624 {
8625 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8626 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8627 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8628 if (!scratch)
8629 scratch = gen_reg_rtx (HImode);
8630 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8631
8632 /* In the unordered case, we have to check C2 for NaN's, which
8633 doesn't happen to work out to anything nice combination-wise.
8634 So do some bit twiddling on the value we've got in AH to come
8635 up with an appropriate set of condition codes. */
8636
8637 intcmp_mode = CCNOmode;
8638 switch (code)
8639 {
8640 case GT:
8641 case UNGT:
8642 if (code == GT || !TARGET_IEEE_FP)
8643 {
8644 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8645 code = EQ;
8646 }
8647 else
8648 {
8649 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8650 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8651 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
8652 intcmp_mode = CCmode;
8653 code = GEU;
8654 }
8655 break;
8656 case LT:
8657 case UNLT:
8658 if (code == LT && TARGET_IEEE_FP)
8659 {
8660 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8661 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
8662 intcmp_mode = CCmode;
8663 code = EQ;
8664 }
8665 else
8666 {
8667 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
8668 code = NE;
8669 }
8670 break;
8671 case GE:
8672 case UNGE:
8673 if (code == GE || !TARGET_IEEE_FP)
8674 {
8675 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
8676 code = EQ;
8677 }
8678 else
8679 {
8680 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8681 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8682 GEN_INT (0x01)));
8683 code = NE;
8684 }
8685 break;
8686 case LE:
8687 case UNLE:
8688 if (code == LE && TARGET_IEEE_FP)
8689 {
8690 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8691 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8692 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8693 intcmp_mode = CCmode;
8694 code = LTU;
8695 }
8696 else
8697 {
8698 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8699 code = NE;
8700 }
8701 break;
8702 case EQ:
8703 case UNEQ:
8704 if (code == EQ && TARGET_IEEE_FP)
8705 {
8706 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8707 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8708 intcmp_mode = CCmode;
8709 code = EQ;
8710 }
8711 else
8712 {
8713 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8714 code = NE;
8715 break;
8716 }
8717 break;
8718 case NE:
8719 case LTGT:
8720 if (code == NE && TARGET_IEEE_FP)
8721 {
8722 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8723 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8724 GEN_INT (0x40)));
8725 code = NE;
8726 }
8727 else
8728 {
8729 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8730 code = EQ;
8731 }
8732 break;
8733
8734 case UNORDERED:
8735 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8736 code = NE;
8737 break;
8738 case ORDERED:
8739 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8740 code = EQ;
8741 break;
8742
8743 default:
8744 abort ();
8745 }
8746 }
8747
8748 /* Return the test that should be put into the flags user, i.e.
8749 the bcc, scc, or cmov instruction. */
8750 return gen_rtx_fmt_ee (code, VOIDmode,
8751 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8752 const0_rtx);
8753 }
8754
8755 rtx
8756 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
8757 {
8758 rtx op0, op1, ret;
8759 op0 = ix86_compare_op0;
8760 op1 = ix86_compare_op1;
8761
8762 if (second_test)
8763 *second_test = NULL_RTX;
8764 if (bypass_test)
8765 *bypass_test = NULL_RTX;
8766
8767 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8768 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8769 second_test, bypass_test);
8770 else
8771 ret = ix86_expand_int_compare (code, op0, op1);
8772
8773 return ret;
8774 }
8775
8776 /* Return true if the CODE will result in nontrivial jump sequence. */
8777 bool
8778 ix86_fp_jump_nontrivial_p (enum rtx_code code)
8779 {
8780 enum rtx_code bypass_code, first_code, second_code;
8781 if (!TARGET_CMOVE)
8782 return true;
8783 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8784 return bypass_code != UNKNOWN || second_code != UNKNOWN;
8785 }
8786
8787 void
8788 ix86_expand_branch (enum rtx_code code, rtx label)
8789 {
8790 rtx tmp;
8791
8792 switch (GET_MODE (ix86_compare_op0))
8793 {
8794 case QImode:
8795 case HImode:
8796 case SImode:
8797 simple:
8798 tmp = ix86_expand_compare (code, NULL, NULL);
8799 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8800 gen_rtx_LABEL_REF (VOIDmode, label),
8801 pc_rtx);
8802 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
8803 return;
8804
8805 case SFmode:
8806 case DFmode:
8807 case XFmode:
8808 {
8809 rtvec vec;
8810 int use_fcomi;
8811 enum rtx_code bypass_code, first_code, second_code;
8812
8813 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
8814 &ix86_compare_op1);
8815
8816 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8817
8818 /* Check whether we will use the natural sequence with one jump. If
8819 so, we can expand jump early. Otherwise delay expansion by
8820 creating compound insn to not confuse optimizers. */
8821 if (bypass_code == UNKNOWN && second_code == UNKNOWN
8822 && TARGET_CMOVE)
8823 {
8824 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
8825 gen_rtx_LABEL_REF (VOIDmode, label),
8826 pc_rtx, NULL_RTX, NULL_RTX);
8827 }
8828 else
8829 {
8830 tmp = gen_rtx_fmt_ee (code, VOIDmode,
8831 ix86_compare_op0, ix86_compare_op1);
8832 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8833 gen_rtx_LABEL_REF (VOIDmode, label),
8834 pc_rtx);
8835 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
8836
8837 use_fcomi = ix86_use_fcomi_compare (code);
8838 vec = rtvec_alloc (3 + !use_fcomi);
8839 RTVEC_ELT (vec, 0) = tmp;
8840 RTVEC_ELT (vec, 1)
8841 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
8842 RTVEC_ELT (vec, 2)
8843 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
8844 if (! use_fcomi)
8845 RTVEC_ELT (vec, 3)
8846 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
8847
8848 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
8849 }
8850 return;
8851 }
8852
8853 case DImode:
8854 if (TARGET_64BIT)
8855 goto simple;
8856 /* Expand DImode branch into multiple compare+branch. */
8857 {
8858 rtx lo[2], hi[2], label2;
8859 enum rtx_code code1, code2, code3;
8860
8861 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
8862 {
8863 tmp = ix86_compare_op0;
8864 ix86_compare_op0 = ix86_compare_op1;
8865 ix86_compare_op1 = tmp;
8866 code = swap_condition (code);
8867 }
8868 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
8869 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
8870
8871 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8872 avoid two branches. This costs one extra insn, so disable when
8873 optimizing for size. */
8874
8875 if ((code == EQ || code == NE)
8876 && (!optimize_size
8877 || hi[1] == const0_rtx || lo[1] == const0_rtx))
8878 {
8879 rtx xor0, xor1;
8880
8881 xor1 = hi[0];
8882 if (hi[1] != const0_rtx)
8883 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
8884 NULL_RTX, 0, OPTAB_WIDEN);
8885
8886 xor0 = lo[0];
8887 if (lo[1] != const0_rtx)
8888 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
8889 NULL_RTX, 0, OPTAB_WIDEN);
8890
8891 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
8892 NULL_RTX, 0, OPTAB_WIDEN);
8893
8894 ix86_compare_op0 = tmp;
8895 ix86_compare_op1 = const0_rtx;
8896 ix86_expand_branch (code, label);
8897 return;
8898 }
8899
8900 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8901 op1 is a constant and the low word is zero, then we can just
8902 examine the high word. */
8903
8904 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
8905 switch (code)
8906 {
8907 case LT: case LTU: case GE: case GEU:
8908 ix86_compare_op0 = hi[0];
8909 ix86_compare_op1 = hi[1];
8910 ix86_expand_branch (code, label);
8911 return;
8912 default:
8913 break;
8914 }
8915
8916 /* Otherwise, we need two or three jumps. */
8917
8918 label2 = gen_label_rtx ();
8919
8920 code1 = code;
8921 code2 = swap_condition (code);
8922 code3 = unsigned_condition (code);
8923
8924 switch (code)
8925 {
8926 case LT: case GT: case LTU: case GTU:
8927 break;
8928
8929 case LE: code1 = LT; code2 = GT; break;
8930 case GE: code1 = GT; code2 = LT; break;
8931 case LEU: code1 = LTU; code2 = GTU; break;
8932 case GEU: code1 = GTU; code2 = LTU; break;
8933
8934 case EQ: code1 = UNKNOWN; code2 = NE; break;
8935 case NE: code2 = UNKNOWN; break;
8936
8937 default:
8938 abort ();
8939 }
8940
8941 /*
8942 * a < b =>
8943 * if (hi(a) < hi(b)) goto true;
8944 * if (hi(a) > hi(b)) goto false;
8945 * if (lo(a) < lo(b)) goto true;
8946 * false:
8947 */
8948
8949 ix86_compare_op0 = hi[0];
8950 ix86_compare_op1 = hi[1];
8951
8952 if (code1 != UNKNOWN)
8953 ix86_expand_branch (code1, label);
8954 if (code2 != UNKNOWN)
8955 ix86_expand_branch (code2, label2);
8956
8957 ix86_compare_op0 = lo[0];
8958 ix86_compare_op1 = lo[1];
8959 ix86_expand_branch (code3, label);
8960
8961 if (code2 != UNKNOWN)
8962 emit_label (label2);
8963 return;
8964 }
8965
8966 default:
8967 abort ();
8968 }
8969 }
8970
8971 /* Split branch based on floating point condition. */
8972 void
8973 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
8974 rtx target1, rtx target2, rtx tmp, rtx pushed)
8975 {
8976 rtx second, bypass;
8977 rtx label = NULL_RTX;
8978 rtx condition;
8979 int bypass_probability = -1, second_probability = -1, probability = -1;
8980 rtx i;
8981
8982 if (target2 != pc_rtx)
8983 {
8984 rtx tmp = target2;
8985 code = reverse_condition_maybe_unordered (code);
8986 target2 = target1;
8987 target1 = tmp;
8988 }
8989
8990 condition = ix86_expand_fp_compare (code, op1, op2,
8991 tmp, &second, &bypass);
8992
8993 /* Remove pushed operand from stack. */
8994 if (pushed)
8995 ix86_free_from_memory (GET_MODE (pushed));
8996
8997 if (split_branch_probability >= 0)
8998 {
8999 /* Distribute the probabilities across the jumps.
9000 Assume the BYPASS and SECOND to be always test
9001 for UNORDERED. */
9002 probability = split_branch_probability;
9003
9004 /* Value of 1 is low enough to make no need for probability
9005 to be updated. Later we may run some experiments and see
9006 if unordered values are more frequent in practice. */
9007 if (bypass)
9008 bypass_probability = 1;
9009 if (second)
9010 second_probability = 1;
9011 }
9012 if (bypass != NULL_RTX)
9013 {
9014 label = gen_label_rtx ();
9015 i = emit_jump_insn (gen_rtx_SET
9016 (VOIDmode, pc_rtx,
9017 gen_rtx_IF_THEN_ELSE (VOIDmode,
9018 bypass,
9019 gen_rtx_LABEL_REF (VOIDmode,
9020 label),
9021 pc_rtx)));
9022 if (bypass_probability >= 0)
9023 REG_NOTES (i)
9024 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9025 GEN_INT (bypass_probability),
9026 REG_NOTES (i));
9027 }
9028 i = emit_jump_insn (gen_rtx_SET
9029 (VOIDmode, pc_rtx,
9030 gen_rtx_IF_THEN_ELSE (VOIDmode,
9031 condition, target1, target2)));
9032 if (probability >= 0)
9033 REG_NOTES (i)
9034 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9035 GEN_INT (probability),
9036 REG_NOTES (i));
9037 if (second != NULL_RTX)
9038 {
9039 i = emit_jump_insn (gen_rtx_SET
9040 (VOIDmode, pc_rtx,
9041 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9042 target2)));
9043 if (second_probability >= 0)
9044 REG_NOTES (i)
9045 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9046 GEN_INT (second_probability),
9047 REG_NOTES (i));
9048 }
9049 if (label != NULL_RTX)
9050 emit_label (label);
9051 }
9052
9053 int
9054 ix86_expand_setcc (enum rtx_code code, rtx dest)
9055 {
9056 rtx ret, tmp, tmpreg, equiv;
9057 rtx second_test, bypass_test;
9058
9059 if (GET_MODE (ix86_compare_op0) == DImode
9060 && !TARGET_64BIT)
9061 return 0; /* FAIL */
9062
9063 if (GET_MODE (dest) != QImode)
9064 abort ();
9065
9066 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9067 PUT_MODE (ret, QImode);
9068
9069 tmp = dest;
9070 tmpreg = dest;
9071
9072 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9073 if (bypass_test || second_test)
9074 {
9075 rtx test = second_test;
9076 int bypass = 0;
9077 rtx tmp2 = gen_reg_rtx (QImode);
9078 if (bypass_test)
9079 {
9080 if (second_test)
9081 abort ();
9082 test = bypass_test;
9083 bypass = 1;
9084 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9085 }
9086 PUT_MODE (test, QImode);
9087 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9088
9089 if (bypass)
9090 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9091 else
9092 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9093 }
9094
9095 /* Attach a REG_EQUAL note describing the comparison result. */
9096 equiv = simplify_gen_relational (code, QImode,
9097 GET_MODE (ix86_compare_op0),
9098 ix86_compare_op0, ix86_compare_op1);
9099 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9100
9101 return 1; /* DONE */
9102 }
9103
9104 /* Expand comparison setting or clearing carry flag. Return true when
9105 successful and set pop for the operation. */
9106 static bool
9107 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9108 {
9109 enum machine_mode mode =
9110 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9111
9112 /* Do not handle DImode compares that go trought special path. Also we can't
9113 deal with FP compares yet. This is possible to add. */
9114 if ((mode == DImode && !TARGET_64BIT))
9115 return false;
9116 if (FLOAT_MODE_P (mode))
9117 {
9118 rtx second_test = NULL, bypass_test = NULL;
9119 rtx compare_op, compare_seq;
9120
9121 /* Shortcut: following common codes never translate into carry flag compares. */
9122 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9123 || code == ORDERED || code == UNORDERED)
9124 return false;
9125
9126 /* These comparisons require zero flag; swap operands so they won't. */
9127 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9128 && !TARGET_IEEE_FP)
9129 {
9130 rtx tmp = op0;
9131 op0 = op1;
9132 op1 = tmp;
9133 code = swap_condition (code);
9134 }
9135
9136 /* Try to expand the comparison and verify that we end up with carry flag
9137 based comparison. This is fails to be true only when we decide to expand
9138 comparison using arithmetic that is not too common scenario. */
9139 start_sequence ();
9140 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9141 &second_test, &bypass_test);
9142 compare_seq = get_insns ();
9143 end_sequence ();
9144
9145 if (second_test || bypass_test)
9146 return false;
9147 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9148 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9149 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9150 else
9151 code = GET_CODE (compare_op);
9152 if (code != LTU && code != GEU)
9153 return false;
9154 emit_insn (compare_seq);
9155 *pop = compare_op;
9156 return true;
9157 }
9158 if (!INTEGRAL_MODE_P (mode))
9159 return false;
9160 switch (code)
9161 {
9162 case LTU:
9163 case GEU:
9164 break;
9165
9166 /* Convert a==0 into (unsigned)a<1. */
9167 case EQ:
9168 case NE:
9169 if (op1 != const0_rtx)
9170 return false;
9171 op1 = const1_rtx;
9172 code = (code == EQ ? LTU : GEU);
9173 break;
9174
9175 /* Convert a>b into b<a or a>=b-1. */
9176 case GTU:
9177 case LEU:
9178 if (GET_CODE (op1) == CONST_INT)
9179 {
9180 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9181 /* Bail out on overflow. We still can swap operands but that
9182 would force loading of the constant into register. */
9183 if (op1 == const0_rtx
9184 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9185 return false;
9186 code = (code == GTU ? GEU : LTU);
9187 }
9188 else
9189 {
9190 rtx tmp = op1;
9191 op1 = op0;
9192 op0 = tmp;
9193 code = (code == GTU ? LTU : GEU);
9194 }
9195 break;
9196
9197 /* Convert a>=0 into (unsigned)a<0x80000000. */
9198 case LT:
9199 case GE:
9200 if (mode == DImode || op1 != const0_rtx)
9201 return false;
9202 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9203 code = (code == LT ? GEU : LTU);
9204 break;
9205 case LE:
9206 case GT:
9207 if (mode == DImode || op1 != constm1_rtx)
9208 return false;
9209 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9210 code = (code == LE ? GEU : LTU);
9211 break;
9212
9213 default:
9214 return false;
9215 }
9216 /* Swapping operands may cause constant to appear as first operand. */
9217 if (!nonimmediate_operand (op0, VOIDmode))
9218 {
9219 if (no_new_pseudos)
9220 return false;
9221 op0 = force_reg (mode, op0);
9222 }
9223 ix86_compare_op0 = op0;
9224 ix86_compare_op1 = op1;
9225 *pop = ix86_expand_compare (code, NULL, NULL);
9226 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
9227 abort ();
9228 return true;
9229 }
9230
9231 int
9232 ix86_expand_int_movcc (rtx operands[])
9233 {
9234 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9235 rtx compare_seq, compare_op;
9236 rtx second_test, bypass_test;
9237 enum machine_mode mode = GET_MODE (operands[0]);
9238 bool sign_bit_compare_p = false;;
9239
9240 start_sequence ();
9241 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9242 compare_seq = get_insns ();
9243 end_sequence ();
9244
9245 compare_code = GET_CODE (compare_op);
9246
9247 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9248 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9249 sign_bit_compare_p = true;
9250
9251 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9252 HImode insns, we'd be swallowed in word prefix ops. */
9253
9254 if ((mode != HImode || TARGET_FAST_PREFIX)
9255 && (mode != DImode || TARGET_64BIT)
9256 && GET_CODE (operands[2]) == CONST_INT
9257 && GET_CODE (operands[3]) == CONST_INT)
9258 {
9259 rtx out = operands[0];
9260 HOST_WIDE_INT ct = INTVAL (operands[2]);
9261 HOST_WIDE_INT cf = INTVAL (operands[3]);
9262 HOST_WIDE_INT diff;
9263
9264 diff = ct - cf;
9265 /* Sign bit compares are better done using shifts than we do by using
9266 sbb. */
9267 if (sign_bit_compare_p
9268 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9269 ix86_compare_op1, &compare_op))
9270 {
9271 /* Detect overlap between destination and compare sources. */
9272 rtx tmp = out;
9273
9274 if (!sign_bit_compare_p)
9275 {
9276 bool fpcmp = false;
9277
9278 compare_code = GET_CODE (compare_op);
9279
9280 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9281 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9282 {
9283 fpcmp = true;
9284 compare_code = ix86_fp_compare_code_to_integer (compare_code);
9285 }
9286
9287 /* To simplify rest of code, restrict to the GEU case. */
9288 if (compare_code == LTU)
9289 {
9290 HOST_WIDE_INT tmp = ct;
9291 ct = cf;
9292 cf = tmp;
9293 compare_code = reverse_condition (compare_code);
9294 code = reverse_condition (code);
9295 }
9296 else
9297 {
9298 if (fpcmp)
9299 PUT_CODE (compare_op,
9300 reverse_condition_maybe_unordered
9301 (GET_CODE (compare_op)));
9302 else
9303 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9304 }
9305 diff = ct - cf;
9306
9307 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
9308 || reg_overlap_mentioned_p (out, ix86_compare_op1))
9309 tmp = gen_reg_rtx (mode);
9310
9311 if (mode == DImode)
9312 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
9313 else
9314 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
9315 }
9316 else
9317 {
9318 if (code == GT || code == GE)
9319 code = reverse_condition (code);
9320 else
9321 {
9322 HOST_WIDE_INT tmp = ct;
9323 ct = cf;
9324 cf = tmp;
9325 diff = ct - cf;
9326 }
9327 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
9328 ix86_compare_op1, VOIDmode, 0, -1);
9329 }
9330
9331 if (diff == 1)
9332 {
9333 /*
9334 * cmpl op0,op1
9335 * sbbl dest,dest
9336 * [addl dest, ct]
9337 *
9338 * Size 5 - 8.
9339 */
9340 if (ct)
9341 tmp = expand_simple_binop (mode, PLUS,
9342 tmp, GEN_INT (ct),
9343 copy_rtx (tmp), 1, OPTAB_DIRECT);
9344 }
9345 else if (cf == -1)
9346 {
9347 /*
9348 * cmpl op0,op1
9349 * sbbl dest,dest
9350 * orl $ct, dest
9351 *
9352 * Size 8.
9353 */
9354 tmp = expand_simple_binop (mode, IOR,
9355 tmp, GEN_INT (ct),
9356 copy_rtx (tmp), 1, OPTAB_DIRECT);
9357 }
9358 else if (diff == -1 && ct)
9359 {
9360 /*
9361 * cmpl op0,op1
9362 * sbbl dest,dest
9363 * notl dest
9364 * [addl dest, cf]
9365 *
9366 * Size 8 - 11.
9367 */
9368 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9369 if (cf)
9370 tmp = expand_simple_binop (mode, PLUS,
9371 copy_rtx (tmp), GEN_INT (cf),
9372 copy_rtx (tmp), 1, OPTAB_DIRECT);
9373 }
9374 else
9375 {
9376 /*
9377 * cmpl op0,op1
9378 * sbbl dest,dest
9379 * [notl dest]
9380 * andl cf - ct, dest
9381 * [addl dest, ct]
9382 *
9383 * Size 8 - 11.
9384 */
9385
9386 if (cf == 0)
9387 {
9388 cf = ct;
9389 ct = 0;
9390 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9391 }
9392
9393 tmp = expand_simple_binop (mode, AND,
9394 copy_rtx (tmp),
9395 gen_int_mode (cf - ct, mode),
9396 copy_rtx (tmp), 1, OPTAB_DIRECT);
9397 if (ct)
9398 tmp = expand_simple_binop (mode, PLUS,
9399 copy_rtx (tmp), GEN_INT (ct),
9400 copy_rtx (tmp), 1, OPTAB_DIRECT);
9401 }
9402
9403 if (!rtx_equal_p (tmp, out))
9404 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9405
9406 return 1; /* DONE */
9407 }
9408
9409 if (diff < 0)
9410 {
9411 HOST_WIDE_INT tmp;
9412 tmp = ct, ct = cf, cf = tmp;
9413 diff = -diff;
9414 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9415 {
9416 /* We may be reversing unordered compare to normal compare, that
9417 is not valid in general (we may convert non-trapping condition
9418 to trapping one), however on i386 we currently emit all
9419 comparisons unordered. */
9420 compare_code = reverse_condition_maybe_unordered (compare_code);
9421 code = reverse_condition_maybe_unordered (code);
9422 }
9423 else
9424 {
9425 compare_code = reverse_condition (compare_code);
9426 code = reverse_condition (code);
9427 }
9428 }
9429
9430 compare_code = UNKNOWN;
9431 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9432 && GET_CODE (ix86_compare_op1) == CONST_INT)
9433 {
9434 if (ix86_compare_op1 == const0_rtx
9435 && (code == LT || code == GE))
9436 compare_code = code;
9437 else if (ix86_compare_op1 == constm1_rtx)
9438 {
9439 if (code == LE)
9440 compare_code = LT;
9441 else if (code == GT)
9442 compare_code = GE;
9443 }
9444 }
9445
9446 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9447 if (compare_code != UNKNOWN
9448 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9449 && (cf == -1 || ct == -1))
9450 {
9451 /* If lea code below could be used, only optimize
9452 if it results in a 2 insn sequence. */
9453
9454 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9455 || diff == 3 || diff == 5 || diff == 9)
9456 || (compare_code == LT && ct == -1)
9457 || (compare_code == GE && cf == -1))
9458 {
9459 /*
9460 * notl op1 (if necessary)
9461 * sarl $31, op1
9462 * orl cf, op1
9463 */
9464 if (ct != -1)
9465 {
9466 cf = ct;
9467 ct = -1;
9468 code = reverse_condition (code);
9469 }
9470
9471 out = emit_store_flag (out, code, ix86_compare_op0,
9472 ix86_compare_op1, VOIDmode, 0, -1);
9473
9474 out = expand_simple_binop (mode, IOR,
9475 out, GEN_INT (cf),
9476 out, 1, OPTAB_DIRECT);
9477 if (out != operands[0])
9478 emit_move_insn (operands[0], out);
9479
9480 return 1; /* DONE */
9481 }
9482 }
9483
9484
9485 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9486 || diff == 3 || diff == 5 || diff == 9)
9487 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9488 && (mode != DImode
9489 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9490 {
9491 /*
9492 * xorl dest,dest
9493 * cmpl op1,op2
9494 * setcc dest
9495 * lea cf(dest*(ct-cf)),dest
9496 *
9497 * Size 14.
9498 *
9499 * This also catches the degenerate setcc-only case.
9500 */
9501
9502 rtx tmp;
9503 int nops;
9504
9505 out = emit_store_flag (out, code, ix86_compare_op0,
9506 ix86_compare_op1, VOIDmode, 0, 1);
9507
9508 nops = 0;
9509 /* On x86_64 the lea instruction operates on Pmode, so we need
9510 to get arithmetics done in proper mode to match. */
9511 if (diff == 1)
9512 tmp = copy_rtx (out);
9513 else
9514 {
9515 rtx out1;
9516 out1 = copy_rtx (out);
9517 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9518 nops++;
9519 if (diff & 1)
9520 {
9521 tmp = gen_rtx_PLUS (mode, tmp, out1);
9522 nops++;
9523 }
9524 }
9525 if (cf != 0)
9526 {
9527 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9528 nops++;
9529 }
9530 if (!rtx_equal_p (tmp, out))
9531 {
9532 if (nops == 1)
9533 out = force_operand (tmp, copy_rtx (out));
9534 else
9535 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9536 }
9537 if (!rtx_equal_p (out, operands[0]))
9538 emit_move_insn (operands[0], copy_rtx (out));
9539
9540 return 1; /* DONE */
9541 }
9542
9543 /*
9544 * General case: Jumpful:
9545 * xorl dest,dest cmpl op1, op2
9546 * cmpl op1, op2 movl ct, dest
9547 * setcc dest jcc 1f
9548 * decl dest movl cf, dest
9549 * andl (cf-ct),dest 1:
9550 * addl ct,dest
9551 *
9552 * Size 20. Size 14.
9553 *
9554 * This is reasonably steep, but branch mispredict costs are
9555 * high on modern cpus, so consider failing only if optimizing
9556 * for space.
9557 */
9558
9559 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9560 && BRANCH_COST >= 2)
9561 {
9562 if (cf == 0)
9563 {
9564 cf = ct;
9565 ct = 0;
9566 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9567 /* We may be reversing unordered compare to normal compare,
9568 that is not valid in general (we may convert non-trapping
9569 condition to trapping one), however on i386 we currently
9570 emit all comparisons unordered. */
9571 code = reverse_condition_maybe_unordered (code);
9572 else
9573 {
9574 code = reverse_condition (code);
9575 if (compare_code != UNKNOWN)
9576 compare_code = reverse_condition (compare_code);
9577 }
9578 }
9579
9580 if (compare_code != UNKNOWN)
9581 {
9582 /* notl op1 (if needed)
9583 sarl $31, op1
9584 andl (cf-ct), op1
9585 addl ct, op1
9586
9587 For x < 0 (resp. x <= -1) there will be no notl,
9588 so if possible swap the constants to get rid of the
9589 complement.
9590 True/false will be -1/0 while code below (store flag
9591 followed by decrement) is 0/-1, so the constants need
9592 to be exchanged once more. */
9593
9594 if (compare_code == GE || !cf)
9595 {
9596 code = reverse_condition (code);
9597 compare_code = LT;
9598 }
9599 else
9600 {
9601 HOST_WIDE_INT tmp = cf;
9602 cf = ct;
9603 ct = tmp;
9604 }
9605
9606 out = emit_store_flag (out, code, ix86_compare_op0,
9607 ix86_compare_op1, VOIDmode, 0, -1);
9608 }
9609 else
9610 {
9611 out = emit_store_flag (out, code, ix86_compare_op0,
9612 ix86_compare_op1, VOIDmode, 0, 1);
9613
9614 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
9615 copy_rtx (out), 1, OPTAB_DIRECT);
9616 }
9617
9618 out = expand_simple_binop (mode, AND, copy_rtx (out),
9619 gen_int_mode (cf - ct, mode),
9620 copy_rtx (out), 1, OPTAB_DIRECT);
9621 if (ct)
9622 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
9623 copy_rtx (out), 1, OPTAB_DIRECT);
9624 if (!rtx_equal_p (out, operands[0]))
9625 emit_move_insn (operands[0], copy_rtx (out));
9626
9627 return 1; /* DONE */
9628 }
9629 }
9630
9631 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9632 {
9633 /* Try a few things more with specific constants and a variable. */
9634
9635 optab op;
9636 rtx var, orig_out, out, tmp;
9637
9638 if (BRANCH_COST <= 2)
9639 return 0; /* FAIL */
9640
9641 /* If one of the two operands is an interesting constant, load a
9642 constant with the above and mask it in with a logical operation. */
9643
9644 if (GET_CODE (operands[2]) == CONST_INT)
9645 {
9646 var = operands[3];
9647 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
9648 operands[3] = constm1_rtx, op = and_optab;
9649 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
9650 operands[3] = const0_rtx, op = ior_optab;
9651 else
9652 return 0; /* FAIL */
9653 }
9654 else if (GET_CODE (operands[3]) == CONST_INT)
9655 {
9656 var = operands[2];
9657 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
9658 operands[2] = constm1_rtx, op = and_optab;
9659 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
9660 operands[2] = const0_rtx, op = ior_optab;
9661 else
9662 return 0; /* FAIL */
9663 }
9664 else
9665 return 0; /* FAIL */
9666
9667 orig_out = operands[0];
9668 tmp = gen_reg_rtx (mode);
9669 operands[0] = tmp;
9670
9671 /* Recurse to get the constant loaded. */
9672 if (ix86_expand_int_movcc (operands) == 0)
9673 return 0; /* FAIL */
9674
9675 /* Mask in the interesting variable. */
9676 out = expand_binop (mode, op, var, tmp, orig_out, 0,
9677 OPTAB_WIDEN);
9678 if (!rtx_equal_p (out, orig_out))
9679 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
9680
9681 return 1; /* DONE */
9682 }
9683
9684 /*
9685 * For comparison with above,
9686 *
9687 * movl cf,dest
9688 * movl ct,tmp
9689 * cmpl op1,op2
9690 * cmovcc tmp,dest
9691 *
9692 * Size 15.
9693 */
9694
9695 if (! nonimmediate_operand (operands[2], mode))
9696 operands[2] = force_reg (mode, operands[2]);
9697 if (! nonimmediate_operand (operands[3], mode))
9698 operands[3] = force_reg (mode, operands[3]);
9699
9700 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9701 {
9702 rtx tmp = gen_reg_rtx (mode);
9703 emit_move_insn (tmp, operands[3]);
9704 operands[3] = tmp;
9705 }
9706 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9707 {
9708 rtx tmp = gen_reg_rtx (mode);
9709 emit_move_insn (tmp, operands[2]);
9710 operands[2] = tmp;
9711 }
9712
9713 if (! register_operand (operands[2], VOIDmode)
9714 && (mode == QImode
9715 || ! register_operand (operands[3], VOIDmode)))
9716 operands[2] = force_reg (mode, operands[2]);
9717
9718 if (mode == QImode
9719 && ! register_operand (operands[3], VOIDmode))
9720 operands[3] = force_reg (mode, operands[3]);
9721
9722 emit_insn (compare_seq);
9723 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9724 gen_rtx_IF_THEN_ELSE (mode,
9725 compare_op, operands[2],
9726 operands[3])));
9727 if (bypass_test)
9728 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9729 gen_rtx_IF_THEN_ELSE (mode,
9730 bypass_test,
9731 copy_rtx (operands[3]),
9732 copy_rtx (operands[0]))));
9733 if (second_test)
9734 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9735 gen_rtx_IF_THEN_ELSE (mode,
9736 second_test,
9737 copy_rtx (operands[2]),
9738 copy_rtx (operands[0]))));
9739
9740 return 1; /* DONE */
9741 }
9742
9743 int
9744 ix86_expand_fp_movcc (rtx operands[])
9745 {
9746 enum machine_mode mode = GET_MODE (operands[0]);
9747 enum rtx_code code = GET_CODE (operands[1]);
9748 rtx tmp, compare_op, second_test, bypass_test;
9749
9750 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
9751 {
9752 rtx cmp_op0, cmp_op1, if_true, if_false;
9753 rtx clob;
9754 enum machine_mode vmode, cmode;
9755 bool is_minmax = false;
9756
9757 cmp_op0 = ix86_compare_op0;
9758 cmp_op1 = ix86_compare_op1;
9759 if_true = operands[2];
9760 if_false = operands[3];
9761
9762 /* Since we've no cmove for sse registers, don't force bad register
9763 allocation just to gain access to it. Deny movcc when the
9764 comparison mode doesn't match the move mode. */
9765 cmode = GET_MODE (cmp_op0);
9766 if (cmode == VOIDmode)
9767 cmode = GET_MODE (cmp_op1);
9768 if (cmode != mode)
9769 return 0;
9770
9771 /* We have no LTGT as an operator. We could implement it with
9772 NE & ORDERED, but this requires an extra temporary. It's
9773 not clear that it's worth it. */
9774 if (code == LTGT || code == UNEQ)
9775 return 0;
9776
9777 /* Massage condition to satisfy sse_comparison_operator. Try
9778 to canonicalize the destination operand to be first in the
9779 comparison - this helps reload to avoid extra moves. */
9780 if (!sse_comparison_operator (operands[1], VOIDmode)
9781 || (COMMUTATIVE_P (operands[1])
9782 && rtx_equal_p (operands[0], cmp_op1)))
9783 {
9784 tmp = cmp_op0;
9785 cmp_op0 = cmp_op1;
9786 cmp_op1 = tmp;
9787 code = swap_condition (code);
9788 }
9789
9790 /* Detect conditional moves that exactly match min/max operational
9791 semantics. Note that this is IEEE safe, as long as we don't
9792 interchange the operands. Which is why we keep this in the form
9793 if an IF_THEN_ELSE instead of reducing to SMIN/SMAX. */
9794 if ((code == LT || code == UNGE) && REG_P (cmp_op0) && REG_P (cmp_op1))
9795 {
9796 if (((cmp_op0 == if_true && cmp_op1 == if_false)
9797 || (cmp_op0 == if_false && cmp_op1 == if_true)))
9798 {
9799 is_minmax = true;
9800 if (code == UNGE)
9801 {
9802 code = LT;
9803 tmp = if_true;
9804 if_true = if_false;
9805 if_false = tmp;
9806 }
9807 }
9808 }
9809
9810 if (mode == SFmode)
9811 vmode = V4SFmode;
9812 else if (mode == DFmode)
9813 vmode = V2DFmode;
9814 else
9815 gcc_unreachable ();
9816
9817 cmp_op0 = force_reg (mode, cmp_op0);
9818 if (!nonimmediate_operand (cmp_op1, mode))
9819 cmp_op1 = force_reg (mode, cmp_op1);
9820
9821 tmp = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
9822 gcc_assert (sse_comparison_operator (tmp, VOIDmode));
9823
9824 tmp = gen_rtx_IF_THEN_ELSE (mode, tmp, if_true, if_false);
9825 tmp = gen_rtx_SET (VOIDmode, operands[0], tmp);
9826
9827 if (!is_minmax)
9828 {
9829 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (vmode));
9830 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9831 }
9832
9833 emit_insn (tmp);
9834 return 1;
9835 }
9836
9837 /* The floating point conditional move instructions don't directly
9838 support conditions resulting from a signed integer comparison. */
9839
9840 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9841
9842 /* The floating point conditional move instructions don't directly
9843 support signed integer comparisons. */
9844
9845 if (!fcmov_comparison_operator (compare_op, VOIDmode))
9846 {
9847 if (second_test != NULL || bypass_test != NULL)
9848 abort ();
9849 tmp = gen_reg_rtx (QImode);
9850 ix86_expand_setcc (code, tmp);
9851 code = NE;
9852 ix86_compare_op0 = tmp;
9853 ix86_compare_op1 = const0_rtx;
9854 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9855 }
9856 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9857 {
9858 tmp = gen_reg_rtx (mode);
9859 emit_move_insn (tmp, operands[3]);
9860 operands[3] = tmp;
9861 }
9862 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9863 {
9864 tmp = gen_reg_rtx (mode);
9865 emit_move_insn (tmp, operands[2]);
9866 operands[2] = tmp;
9867 }
9868
9869 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9870 gen_rtx_IF_THEN_ELSE (mode, compare_op,
9871 operands[2], operands[3])));
9872 if (bypass_test)
9873 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9874 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
9875 operands[3], operands[0])));
9876 if (second_test)
9877 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9878 gen_rtx_IF_THEN_ELSE (mode, second_test,
9879 operands[2], operands[0])));
9880
9881 return 1;
9882 }
9883
9884 void
9885 ix86_split_sse_movcc (rtx operands[])
9886 {
9887 rtx dest, scratch, cmp, op_true, op_false, x;
9888 enum machine_mode mode, vmode;
9889
9890 /* Note that the operator CMP has been set up with matching constraints
9891 such that dest is valid for the comparison. Unless one of the true
9892 or false operands are zero, the true operand has already been placed
9893 in SCRATCH. */
9894 dest = operands[0];
9895 scratch = operands[1];
9896 op_true = operands[2];
9897 op_false = operands[3];
9898 cmp = operands[4];
9899
9900 mode = GET_MODE (dest);
9901 vmode = GET_MODE (scratch);
9902
9903 emit_insn (gen_rtx_SET (VOIDmode, dest, cmp));
9904
9905 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9906
9907 if (op_false == CONST0_RTX (mode))
9908 {
9909 op_true = simplify_gen_subreg (vmode, op_true, mode, 0);
9910 x = gen_rtx_AND (vmode, dest, op_true);
9911 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9912 }
9913 else
9914 {
9915 op_false = simplify_gen_subreg (vmode, op_false, mode, 0);
9916
9917 if (op_true == CONST0_RTX (mode))
9918 {
9919 x = gen_rtx_NOT (vmode, dest);
9920 x = gen_rtx_AND (vmode, x, op_false);
9921 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9922 }
9923 else
9924 {
9925 x = gen_rtx_AND (vmode, scratch, dest);
9926 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9927
9928 x = gen_rtx_NOT (vmode, dest);
9929 x = gen_rtx_AND (vmode, x, op_false);
9930 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9931
9932 x = gen_rtx_IOR (vmode, dest, scratch);
9933 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9934 }
9935 }
9936 }
9937
9938 /* Expand conditional increment or decrement using adb/sbb instructions.
9939 The default case using setcc followed by the conditional move can be
9940 done by generic code. */
9941 int
9942 ix86_expand_int_addcc (rtx operands[])
9943 {
9944 enum rtx_code code = GET_CODE (operands[1]);
9945 rtx compare_op;
9946 rtx val = const0_rtx;
9947 bool fpcmp = false;
9948 enum machine_mode mode = GET_MODE (operands[0]);
9949
9950 if (operands[3] != const1_rtx
9951 && operands[3] != constm1_rtx)
9952 return 0;
9953 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9954 ix86_compare_op1, &compare_op))
9955 return 0;
9956 code = GET_CODE (compare_op);
9957
9958 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9959 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9960 {
9961 fpcmp = true;
9962 code = ix86_fp_compare_code_to_integer (code);
9963 }
9964
9965 if (code != LTU)
9966 {
9967 val = constm1_rtx;
9968 if (fpcmp)
9969 PUT_CODE (compare_op,
9970 reverse_condition_maybe_unordered
9971 (GET_CODE (compare_op)));
9972 else
9973 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9974 }
9975 PUT_MODE (compare_op, mode);
9976
9977 /* Construct either adc or sbb insn. */
9978 if ((code == LTU) == (operands[3] == constm1_rtx))
9979 {
9980 switch (GET_MODE (operands[0]))
9981 {
9982 case QImode:
9983 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
9984 break;
9985 case HImode:
9986 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
9987 break;
9988 case SImode:
9989 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
9990 break;
9991 case DImode:
9992 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9993 break;
9994 default:
9995 abort ();
9996 }
9997 }
9998 else
9999 {
10000 switch (GET_MODE (operands[0]))
10001 {
10002 case QImode:
10003 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
10004 break;
10005 case HImode:
10006 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10007 break;
10008 case SImode:
10009 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10010 break;
10011 case DImode:
10012 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10013 break;
10014 default:
10015 abort ();
10016 }
10017 }
10018 return 1; /* DONE */
10019 }
10020
10021
10022 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10023 works for floating pointer parameters and nonoffsetable memories.
10024 For pushes, it returns just stack offsets; the values will be saved
10025 in the right order. Maximally three parts are generated. */
10026
10027 static int
10028 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10029 {
10030 int size;
10031
10032 if (!TARGET_64BIT)
10033 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10034 else
10035 size = (GET_MODE_SIZE (mode) + 4) / 8;
10036
10037 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
10038 abort ();
10039 if (size < 2 || size > 3)
10040 abort ();
10041
10042 /* Optimize constant pool reference to immediates. This is used by fp
10043 moves, that force all constants to memory to allow combining. */
10044 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
10045 {
10046 rtx tmp = maybe_get_pool_constant (operand);
10047 if (tmp)
10048 operand = tmp;
10049 }
10050
10051 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10052 {
10053 /* The only non-offsetable memories we handle are pushes. */
10054 if (! push_operand (operand, VOIDmode))
10055 abort ();
10056
10057 operand = copy_rtx (operand);
10058 PUT_MODE (operand, Pmode);
10059 parts[0] = parts[1] = parts[2] = operand;
10060 }
10061 else if (!TARGET_64BIT)
10062 {
10063 if (mode == DImode)
10064 split_di (&operand, 1, &parts[0], &parts[1]);
10065 else
10066 {
10067 if (REG_P (operand))
10068 {
10069 if (!reload_completed)
10070 abort ();
10071 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
10072 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
10073 if (size == 3)
10074 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
10075 }
10076 else if (offsettable_memref_p (operand))
10077 {
10078 operand = adjust_address (operand, SImode, 0);
10079 parts[0] = operand;
10080 parts[1] = adjust_address (operand, SImode, 4);
10081 if (size == 3)
10082 parts[2] = adjust_address (operand, SImode, 8);
10083 }
10084 else if (GET_CODE (operand) == CONST_DOUBLE)
10085 {
10086 REAL_VALUE_TYPE r;
10087 long l[4];
10088
10089 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10090 switch (mode)
10091 {
10092 case XFmode:
10093 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10094 parts[2] = gen_int_mode (l[2], SImode);
10095 break;
10096 case DFmode:
10097 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
10098 break;
10099 default:
10100 abort ();
10101 }
10102 parts[1] = gen_int_mode (l[1], SImode);
10103 parts[0] = gen_int_mode (l[0], SImode);
10104 }
10105 else
10106 abort ();
10107 }
10108 }
10109 else
10110 {
10111 if (mode == TImode)
10112 split_ti (&operand, 1, &parts[0], &parts[1]);
10113 if (mode == XFmode || mode == TFmode)
10114 {
10115 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
10116 if (REG_P (operand))
10117 {
10118 if (!reload_completed)
10119 abort ();
10120 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
10121 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
10122 }
10123 else if (offsettable_memref_p (operand))
10124 {
10125 operand = adjust_address (operand, DImode, 0);
10126 parts[0] = operand;
10127 parts[1] = adjust_address (operand, upper_mode, 8);
10128 }
10129 else if (GET_CODE (operand) == CONST_DOUBLE)
10130 {
10131 REAL_VALUE_TYPE r;
10132 long l[4];
10133
10134 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10135 real_to_target (l, &r, mode);
10136
10137 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10138 if (HOST_BITS_PER_WIDE_INT >= 64)
10139 parts[0]
10140 = gen_int_mode
10141 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
10142 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
10143 DImode);
10144 else
10145 parts[0] = immed_double_const (l[0], l[1], DImode);
10146
10147 if (upper_mode == SImode)
10148 parts[1] = gen_int_mode (l[2], SImode);
10149 else if (HOST_BITS_PER_WIDE_INT >= 64)
10150 parts[1]
10151 = gen_int_mode
10152 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
10153 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
10154 DImode);
10155 else
10156 parts[1] = immed_double_const (l[2], l[3], DImode);
10157 }
10158 else
10159 abort ();
10160 }
10161 }
10162
10163 return size;
10164 }
10165
10166 /* Emit insns to perform a move or push of DI, DF, and XF values.
10167 Return false when normal moves are needed; true when all required
10168 insns have been emitted. Operands 2-4 contain the input values
10169 int the correct order; operands 5-7 contain the output values. */
10170
10171 void
10172 ix86_split_long_move (rtx operands[])
10173 {
10174 rtx part[2][3];
10175 int nparts;
10176 int push = 0;
10177 int collisions = 0;
10178 enum machine_mode mode = GET_MODE (operands[0]);
10179
10180 /* The DFmode expanders may ask us to move double.
10181 For 64bit target this is single move. By hiding the fact
10182 here we simplify i386.md splitters. */
10183 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
10184 {
10185 /* Optimize constant pool reference to immediates. This is used by
10186 fp moves, that force all constants to memory to allow combining. */
10187
10188 if (GET_CODE (operands[1]) == MEM
10189 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10190 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
10191 operands[1] = get_pool_constant (XEXP (operands[1], 0));
10192 if (push_operand (operands[0], VOIDmode))
10193 {
10194 operands[0] = copy_rtx (operands[0]);
10195 PUT_MODE (operands[0], Pmode);
10196 }
10197 else
10198 operands[0] = gen_lowpart (DImode, operands[0]);
10199 operands[1] = gen_lowpart (DImode, operands[1]);
10200 emit_move_insn (operands[0], operands[1]);
10201 return;
10202 }
10203
10204 /* The only non-offsettable memory we handle is push. */
10205 if (push_operand (operands[0], VOIDmode))
10206 push = 1;
10207 else if (GET_CODE (operands[0]) == MEM
10208 && ! offsettable_memref_p (operands[0]))
10209 abort ();
10210
10211 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
10212 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
10213
10214 /* When emitting push, take care for source operands on the stack. */
10215 if (push && GET_CODE (operands[1]) == MEM
10216 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
10217 {
10218 if (nparts == 3)
10219 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
10220 XEXP (part[1][2], 0));
10221 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
10222 XEXP (part[1][1], 0));
10223 }
10224
10225 /* We need to do copy in the right order in case an address register
10226 of the source overlaps the destination. */
10227 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
10228 {
10229 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
10230 collisions++;
10231 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10232 collisions++;
10233 if (nparts == 3
10234 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
10235 collisions++;
10236
10237 /* Collision in the middle part can be handled by reordering. */
10238 if (collisions == 1 && nparts == 3
10239 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10240 {
10241 rtx tmp;
10242 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
10243 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
10244 }
10245
10246 /* If there are more collisions, we can't handle it by reordering.
10247 Do an lea to the last part and use only one colliding move. */
10248 else if (collisions > 1)
10249 {
10250 rtx base;
10251
10252 collisions = 1;
10253
10254 base = part[0][nparts - 1];
10255
10256 /* Handle the case when the last part isn't valid for lea.
10257 Happens in 64-bit mode storing the 12-byte XFmode. */
10258 if (GET_MODE (base) != Pmode)
10259 base = gen_rtx_REG (Pmode, REGNO (base));
10260
10261 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
10262 part[1][0] = replace_equiv_address (part[1][0], base);
10263 part[1][1] = replace_equiv_address (part[1][1],
10264 plus_constant (base, UNITS_PER_WORD));
10265 if (nparts == 3)
10266 part[1][2] = replace_equiv_address (part[1][2],
10267 plus_constant (base, 8));
10268 }
10269 }
10270
10271 if (push)
10272 {
10273 if (!TARGET_64BIT)
10274 {
10275 if (nparts == 3)
10276 {
10277 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
10278 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
10279 emit_move_insn (part[0][2], part[1][2]);
10280 }
10281 }
10282 else
10283 {
10284 /* In 64bit mode we don't have 32bit push available. In case this is
10285 register, it is OK - we will just use larger counterpart. We also
10286 retype memory - these comes from attempt to avoid REX prefix on
10287 moving of second half of TFmode value. */
10288 if (GET_MODE (part[1][1]) == SImode)
10289 {
10290 if (GET_CODE (part[1][1]) == MEM)
10291 part[1][1] = adjust_address (part[1][1], DImode, 0);
10292 else if (REG_P (part[1][1]))
10293 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
10294 else
10295 abort ();
10296 if (GET_MODE (part[1][0]) == SImode)
10297 part[1][0] = part[1][1];
10298 }
10299 }
10300 emit_move_insn (part[0][1], part[1][1]);
10301 emit_move_insn (part[0][0], part[1][0]);
10302 return;
10303 }
10304
10305 /* Choose correct order to not overwrite the source before it is copied. */
10306 if ((REG_P (part[0][0])
10307 && REG_P (part[1][1])
10308 && (REGNO (part[0][0]) == REGNO (part[1][1])
10309 || (nparts == 3
10310 && REGNO (part[0][0]) == REGNO (part[1][2]))))
10311 || (collisions > 0
10312 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
10313 {
10314 if (nparts == 3)
10315 {
10316 operands[2] = part[0][2];
10317 operands[3] = part[0][1];
10318 operands[4] = part[0][0];
10319 operands[5] = part[1][2];
10320 operands[6] = part[1][1];
10321 operands[7] = part[1][0];
10322 }
10323 else
10324 {
10325 operands[2] = part[0][1];
10326 operands[3] = part[0][0];
10327 operands[5] = part[1][1];
10328 operands[6] = part[1][0];
10329 }
10330 }
10331 else
10332 {
10333 if (nparts == 3)
10334 {
10335 operands[2] = part[0][0];
10336 operands[3] = part[0][1];
10337 operands[4] = part[0][2];
10338 operands[5] = part[1][0];
10339 operands[6] = part[1][1];
10340 operands[7] = part[1][2];
10341 }
10342 else
10343 {
10344 operands[2] = part[0][0];
10345 operands[3] = part[0][1];
10346 operands[5] = part[1][0];
10347 operands[6] = part[1][1];
10348 }
10349 }
10350
10351 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
10352 if (optimize_size)
10353 {
10354 if (GET_CODE (operands[5]) == CONST_INT
10355 && operands[5] != const0_rtx
10356 && REG_P (operands[2]))
10357 {
10358 if (GET_CODE (operands[6]) == CONST_INT
10359 && INTVAL (operands[6]) == INTVAL (operands[5]))
10360 operands[6] = operands[2];
10361
10362 if (nparts == 3
10363 && GET_CODE (operands[7]) == CONST_INT
10364 && INTVAL (operands[7]) == INTVAL (operands[5]))
10365 operands[7] = operands[2];
10366 }
10367
10368 if (nparts == 3
10369 && GET_CODE (operands[6]) == CONST_INT
10370 && operands[6] != const0_rtx
10371 && REG_P (operands[3])
10372 && GET_CODE (operands[7]) == CONST_INT
10373 && INTVAL (operands[7]) == INTVAL (operands[6]))
10374 operands[7] = operands[3];
10375 }
10376
10377 emit_move_insn (operands[2], operands[5]);
10378 emit_move_insn (operands[3], operands[6]);
10379 if (nparts == 3)
10380 emit_move_insn (operands[4], operands[7]);
10381
10382 return;
10383 }
10384
10385 /* Helper function of ix86_split_ashldi used to generate an SImode
10386 left shift by a constant, either using a single shift or
10387 a sequence of add instructions. */
10388
10389 static void
10390 ix86_expand_ashlsi3_const (rtx operand, int count)
10391 {
10392 if (count == 1)
10393 emit_insn (gen_addsi3 (operand, operand, operand));
10394 else if (!optimize_size
10395 && count * ix86_cost->add <= ix86_cost->shift_const)
10396 {
10397 int i;
10398 for (i=0; i<count; i++)
10399 emit_insn (gen_addsi3 (operand, operand, operand));
10400 }
10401 else
10402 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
10403 }
10404
10405 void
10406 ix86_split_ashldi (rtx *operands, rtx scratch)
10407 {
10408 rtx low[2], high[2];
10409 int count;
10410
10411 if (GET_CODE (operands[2]) == CONST_INT)
10412 {
10413 split_di (operands, 2, low, high);
10414 count = INTVAL (operands[2]) & 63;
10415
10416 if (count >= 32)
10417 {
10418 emit_move_insn (high[0], low[1]);
10419 emit_move_insn (low[0], const0_rtx);
10420
10421 if (count > 32)
10422 ix86_expand_ashlsi3_const (high[0], count - 32);
10423 }
10424 else
10425 {
10426 if (!rtx_equal_p (operands[0], operands[1]))
10427 emit_move_insn (operands[0], operands[1]);
10428 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10429 ix86_expand_ashlsi3_const (low[0], count);
10430 }
10431 return;
10432 }
10433
10434 split_di (operands, 1, low, high);
10435
10436 if (operands[1] == const1_rtx)
10437 {
10438 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10439 can be done with two 32-bit shifts, no branches, no cmoves. */
10440 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
10441 {
10442 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
10443
10444 ix86_expand_clear (low[0]);
10445 ix86_expand_clear (high[0]);
10446 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
10447
10448 d = gen_lowpart (QImode, low[0]);
10449 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10450 s = gen_rtx_EQ (QImode, flags, const0_rtx);
10451 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10452
10453 d = gen_lowpart (QImode, high[0]);
10454 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10455 s = gen_rtx_NE (QImode, flags, const0_rtx);
10456 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10457 }
10458
10459 /* Otherwise, we can get the same results by manually performing
10460 a bit extract operation on bit 5, and then performing the two
10461 shifts. The two methods of getting 0/1 into low/high are exactly
10462 the same size. Avoiding the shift in the bit extract case helps
10463 pentium4 a bit; no one else seems to care much either way. */
10464 else
10465 {
10466 rtx x;
10467
10468 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
10469 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
10470 else
10471 x = gen_lowpart (SImode, operands[2]);
10472 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
10473
10474 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
10475 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
10476 emit_move_insn (low[0], high[0]);
10477 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
10478 }
10479
10480 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10481 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
10482 return;
10483 }
10484
10485 if (operands[1] == constm1_rtx)
10486 {
10487 /* For -1LL << N, we can avoid the shld instruction, because we
10488 know that we're shifting 0...31 ones into a -1. */
10489 emit_move_insn (low[0], constm1_rtx);
10490 if (optimize_size)
10491 emit_move_insn (high[0], low[0]);
10492 else
10493 emit_move_insn (high[0], constm1_rtx);
10494 }
10495 else
10496 {
10497 if (!rtx_equal_p (operands[0], operands[1]))
10498 emit_move_insn (operands[0], operands[1]);
10499
10500 split_di (operands, 1, low, high);
10501 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10502 }
10503
10504 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10505
10506 if (TARGET_CMOVE && scratch)
10507 {
10508 ix86_expand_clear (scratch);
10509 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
10510 }
10511 else
10512 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10513 }
10514
10515 void
10516 ix86_split_ashrdi (rtx *operands, rtx scratch)
10517 {
10518 rtx low[2], high[2];
10519 int count;
10520
10521 if (GET_CODE (operands[2]) == CONST_INT)
10522 {
10523 split_di (operands, 2, low, high);
10524 count = INTVAL (operands[2]) & 63;
10525
10526 if (count == 63)
10527 {
10528 emit_move_insn (high[0], high[1]);
10529 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10530 emit_move_insn (low[0], high[0]);
10531
10532 }
10533 else if (count >= 32)
10534 {
10535 emit_move_insn (low[0], high[1]);
10536 emit_move_insn (high[0], low[0]);
10537 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10538 if (count > 32)
10539 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10540 }
10541 else
10542 {
10543 if (!rtx_equal_p (operands[0], operands[1]))
10544 emit_move_insn (operands[0], operands[1]);
10545 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10546 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10547 }
10548 }
10549 else
10550 {
10551 if (!rtx_equal_p (operands[0], operands[1]))
10552 emit_move_insn (operands[0], operands[1]);
10553
10554 split_di (operands, 1, low, high);
10555
10556 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10557 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10558
10559 if (TARGET_CMOVE && scratch)
10560 {
10561 emit_move_insn (scratch, high[0]);
10562 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10563 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10564 scratch));
10565 }
10566 else
10567 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10568 }
10569 }
10570
10571 void
10572 ix86_split_lshrdi (rtx *operands, rtx scratch)
10573 {
10574 rtx low[2], high[2];
10575 int count;
10576
10577 if (GET_CODE (operands[2]) == CONST_INT)
10578 {
10579 split_di (operands, 2, low, high);
10580 count = INTVAL (operands[2]) & 63;
10581
10582 if (count >= 32)
10583 {
10584 emit_move_insn (low[0], high[1]);
10585 ix86_expand_clear (high[0]);
10586
10587 if (count > 32)
10588 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10589 }
10590 else
10591 {
10592 if (!rtx_equal_p (operands[0], operands[1]))
10593 emit_move_insn (operands[0], operands[1]);
10594 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10595 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10596 }
10597 }
10598 else
10599 {
10600 if (!rtx_equal_p (operands[0], operands[1]))
10601 emit_move_insn (operands[0], operands[1]);
10602
10603 split_di (operands, 1, low, high);
10604
10605 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10606 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10607
10608 /* Heh. By reversing the arguments, we can reuse this pattern. */
10609 if (TARGET_CMOVE && scratch)
10610 {
10611 ix86_expand_clear (scratch);
10612 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10613 scratch));
10614 }
10615 else
10616 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10617 }
10618 }
10619
10620 /* Helper function for the string operations below. Dest VARIABLE whether
10621 it is aligned to VALUE bytes. If true, jump to the label. */
10622 static rtx
10623 ix86_expand_aligntest (rtx variable, int value)
10624 {
10625 rtx label = gen_label_rtx ();
10626 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10627 if (GET_MODE (variable) == DImode)
10628 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10629 else
10630 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10631 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10632 1, label);
10633 return label;
10634 }
10635
10636 /* Adjust COUNTER by the VALUE. */
10637 static void
10638 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10639 {
10640 if (GET_MODE (countreg) == DImode)
10641 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10642 else
10643 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10644 }
10645
10646 /* Zero extend possibly SImode EXP to Pmode register. */
10647 rtx
10648 ix86_zero_extend_to_Pmode (rtx exp)
10649 {
10650 rtx r;
10651 if (GET_MODE (exp) == VOIDmode)
10652 return force_reg (Pmode, exp);
10653 if (GET_MODE (exp) == Pmode)
10654 return copy_to_mode_reg (Pmode, exp);
10655 r = gen_reg_rtx (Pmode);
10656 emit_insn (gen_zero_extendsidi2 (r, exp));
10657 return r;
10658 }
10659
10660 /* Expand string move (memcpy) operation. Use i386 string operations when
10661 profitable. expand_clrmem contains similar code. */
10662 int
10663 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10664 {
10665 rtx srcreg, destreg, countreg, srcexp, destexp;
10666 enum machine_mode counter_mode;
10667 HOST_WIDE_INT align = 0;
10668 unsigned HOST_WIDE_INT count = 0;
10669
10670 if (GET_CODE (align_exp) == CONST_INT)
10671 align = INTVAL (align_exp);
10672
10673 /* Can't use any of this if the user has appropriated esi or edi. */
10674 if (global_regs[4] || global_regs[5])
10675 return 0;
10676
10677 /* This simple hack avoids all inlining code and simplifies code below. */
10678 if (!TARGET_ALIGN_STRINGOPS)
10679 align = 64;
10680
10681 if (GET_CODE (count_exp) == CONST_INT)
10682 {
10683 count = INTVAL (count_exp);
10684 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10685 return 0;
10686 }
10687
10688 /* Figure out proper mode for counter. For 32bits it is always SImode,
10689 for 64bits use SImode when possible, otherwise DImode.
10690 Set count to number of bytes copied when known at compile time. */
10691 if (!TARGET_64BIT
10692 || GET_MODE (count_exp) == SImode
10693 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10694 counter_mode = SImode;
10695 else
10696 counter_mode = DImode;
10697
10698 if (counter_mode != SImode && counter_mode != DImode)
10699 abort ();
10700
10701 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10702 if (destreg != XEXP (dst, 0))
10703 dst = replace_equiv_address_nv (dst, destreg);
10704 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10705 if (srcreg != XEXP (src, 0))
10706 src = replace_equiv_address_nv (src, srcreg);
10707
10708 /* When optimizing for size emit simple rep ; movsb instruction for
10709 counts not divisible by 4. */
10710
10711 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10712 {
10713 emit_insn (gen_cld ());
10714 countreg = ix86_zero_extend_to_Pmode (count_exp);
10715 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10716 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
10717 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
10718 destexp, srcexp));
10719 }
10720
10721 /* For constant aligned (or small unaligned) copies use rep movsl
10722 followed by code copying the rest. For PentiumPro ensure 8 byte
10723 alignment to allow rep movsl acceleration. */
10724
10725 else if (count != 0
10726 && (align >= 8
10727 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10728 || optimize_size || count < (unsigned int) 64))
10729 {
10730 unsigned HOST_WIDE_INT offset = 0;
10731 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10732 rtx srcmem, dstmem;
10733
10734 emit_insn (gen_cld ());
10735 if (count & ~(size - 1))
10736 {
10737 countreg = copy_to_mode_reg (counter_mode,
10738 GEN_INT ((count >> (size == 4 ? 2 : 3))
10739 & (TARGET_64BIT ? -1 : 0x3fffffff)));
10740 countreg = ix86_zero_extend_to_Pmode (countreg);
10741
10742 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10743 GEN_INT (size == 4 ? 2 : 3));
10744 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10745 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10746
10747 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10748 countreg, destexp, srcexp));
10749 offset = count & ~(size - 1);
10750 }
10751 if (size == 8 && (count & 0x04))
10752 {
10753 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
10754 offset);
10755 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
10756 offset);
10757 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10758 offset += 4;
10759 }
10760 if (count & 0x02)
10761 {
10762 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
10763 offset);
10764 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
10765 offset);
10766 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10767 offset += 2;
10768 }
10769 if (count & 0x01)
10770 {
10771 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
10772 offset);
10773 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
10774 offset);
10775 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10776 }
10777 }
10778 /* The generic code based on the glibc implementation:
10779 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10780 allowing accelerated copying there)
10781 - copy the data using rep movsl
10782 - copy the rest. */
10783 else
10784 {
10785 rtx countreg2;
10786 rtx label = NULL;
10787 rtx srcmem, dstmem;
10788 int desired_alignment = (TARGET_PENTIUMPRO
10789 && (count == 0 || count >= (unsigned int) 260)
10790 ? 8 : UNITS_PER_WORD);
10791 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10792 dst = change_address (dst, BLKmode, destreg);
10793 src = change_address (src, BLKmode, srcreg);
10794
10795 /* In case we don't know anything about the alignment, default to
10796 library version, since it is usually equally fast and result in
10797 shorter code.
10798
10799 Also emit call when we know that the count is large and call overhead
10800 will not be important. */
10801 if (!TARGET_INLINE_ALL_STRINGOPS
10802 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10803 return 0;
10804
10805 if (TARGET_SINGLE_STRINGOP)
10806 emit_insn (gen_cld ());
10807
10808 countreg2 = gen_reg_rtx (Pmode);
10809 countreg = copy_to_mode_reg (counter_mode, count_exp);
10810
10811 /* We don't use loops to align destination and to copy parts smaller
10812 than 4 bytes, because gcc is able to optimize such code better (in
10813 the case the destination or the count really is aligned, gcc is often
10814 able to predict the branches) and also it is friendlier to the
10815 hardware branch prediction.
10816
10817 Using loops is beneficial for generic case, because we can
10818 handle small counts using the loops. Many CPUs (such as Athlon)
10819 have large REP prefix setup costs.
10820
10821 This is quite costly. Maybe we can revisit this decision later or
10822 add some customizability to this code. */
10823
10824 if (count == 0 && align < desired_alignment)
10825 {
10826 label = gen_label_rtx ();
10827 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10828 LEU, 0, counter_mode, 1, label);
10829 }
10830 if (align <= 1)
10831 {
10832 rtx label = ix86_expand_aligntest (destreg, 1);
10833 srcmem = change_address (src, QImode, srcreg);
10834 dstmem = change_address (dst, QImode, destreg);
10835 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10836 ix86_adjust_counter (countreg, 1);
10837 emit_label (label);
10838 LABEL_NUSES (label) = 1;
10839 }
10840 if (align <= 2)
10841 {
10842 rtx label = ix86_expand_aligntest (destreg, 2);
10843 srcmem = change_address (src, HImode, srcreg);
10844 dstmem = change_address (dst, HImode, destreg);
10845 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10846 ix86_adjust_counter (countreg, 2);
10847 emit_label (label);
10848 LABEL_NUSES (label) = 1;
10849 }
10850 if (align <= 4 && desired_alignment > 4)
10851 {
10852 rtx label = ix86_expand_aligntest (destreg, 4);
10853 srcmem = change_address (src, SImode, srcreg);
10854 dstmem = change_address (dst, SImode, destreg);
10855 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10856 ix86_adjust_counter (countreg, 4);
10857 emit_label (label);
10858 LABEL_NUSES (label) = 1;
10859 }
10860
10861 if (label && desired_alignment > 4 && !TARGET_64BIT)
10862 {
10863 emit_label (label);
10864 LABEL_NUSES (label) = 1;
10865 label = NULL_RTX;
10866 }
10867 if (!TARGET_SINGLE_STRINGOP)
10868 emit_insn (gen_cld ());
10869 if (TARGET_64BIT)
10870 {
10871 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10872 GEN_INT (3)));
10873 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10874 }
10875 else
10876 {
10877 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10878 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10879 }
10880 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10881 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10882 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10883 countreg2, destexp, srcexp));
10884
10885 if (label)
10886 {
10887 emit_label (label);
10888 LABEL_NUSES (label) = 1;
10889 }
10890 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10891 {
10892 srcmem = change_address (src, SImode, srcreg);
10893 dstmem = change_address (dst, SImode, destreg);
10894 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10895 }
10896 if ((align <= 4 || count == 0) && TARGET_64BIT)
10897 {
10898 rtx label = ix86_expand_aligntest (countreg, 4);
10899 srcmem = change_address (src, SImode, srcreg);
10900 dstmem = change_address (dst, SImode, destreg);
10901 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10902 emit_label (label);
10903 LABEL_NUSES (label) = 1;
10904 }
10905 if (align > 2 && count != 0 && (count & 2))
10906 {
10907 srcmem = change_address (src, HImode, srcreg);
10908 dstmem = change_address (dst, HImode, destreg);
10909 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10910 }
10911 if (align <= 2 || count == 0)
10912 {
10913 rtx label = ix86_expand_aligntest (countreg, 2);
10914 srcmem = change_address (src, HImode, srcreg);
10915 dstmem = change_address (dst, HImode, destreg);
10916 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10917 emit_label (label);
10918 LABEL_NUSES (label) = 1;
10919 }
10920 if (align > 1 && count != 0 && (count & 1))
10921 {
10922 srcmem = change_address (src, QImode, srcreg);
10923 dstmem = change_address (dst, QImode, destreg);
10924 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10925 }
10926 if (align <= 1 || count == 0)
10927 {
10928 rtx label = ix86_expand_aligntest (countreg, 1);
10929 srcmem = change_address (src, QImode, srcreg);
10930 dstmem = change_address (dst, QImode, destreg);
10931 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10932 emit_label (label);
10933 LABEL_NUSES (label) = 1;
10934 }
10935 }
10936
10937 return 1;
10938 }
10939
10940 /* Expand string clear operation (bzero). Use i386 string operations when
10941 profitable. expand_movmem contains similar code. */
10942 int
10943 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
10944 {
10945 rtx destreg, zeroreg, countreg, destexp;
10946 enum machine_mode counter_mode;
10947 HOST_WIDE_INT align = 0;
10948 unsigned HOST_WIDE_INT count = 0;
10949
10950 if (GET_CODE (align_exp) == CONST_INT)
10951 align = INTVAL (align_exp);
10952
10953 /* Can't use any of this if the user has appropriated esi. */
10954 if (global_regs[4])
10955 return 0;
10956
10957 /* This simple hack avoids all inlining code and simplifies code below. */
10958 if (!TARGET_ALIGN_STRINGOPS)
10959 align = 32;
10960
10961 if (GET_CODE (count_exp) == CONST_INT)
10962 {
10963 count = INTVAL (count_exp);
10964 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10965 return 0;
10966 }
10967 /* Figure out proper mode for counter. For 32bits it is always SImode,
10968 for 64bits use SImode when possible, otherwise DImode.
10969 Set count to number of bytes copied when known at compile time. */
10970 if (!TARGET_64BIT
10971 || GET_MODE (count_exp) == SImode
10972 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10973 counter_mode = SImode;
10974 else
10975 counter_mode = DImode;
10976
10977 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10978 if (destreg != XEXP (dst, 0))
10979 dst = replace_equiv_address_nv (dst, destreg);
10980
10981
10982 /* When optimizing for size emit simple rep ; movsb instruction for
10983 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10984 sequence is 7 bytes long, so if optimizing for size and count is
10985 small enough that some stosl, stosw and stosb instructions without
10986 rep are shorter, fall back into the next if. */
10987
10988 if ((!optimize || optimize_size)
10989 && (count == 0
10990 || ((count & 0x03)
10991 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
10992 {
10993 emit_insn (gen_cld ());
10994
10995 countreg = ix86_zero_extend_to_Pmode (count_exp);
10996 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
10997 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10998 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
10999 }
11000 else if (count != 0
11001 && (align >= 8
11002 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11003 || optimize_size || count < (unsigned int) 64))
11004 {
11005 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11006 unsigned HOST_WIDE_INT offset = 0;
11007
11008 emit_insn (gen_cld ());
11009
11010 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
11011 if (count & ~(size - 1))
11012 {
11013 unsigned HOST_WIDE_INT repcount;
11014 unsigned int max_nonrep;
11015
11016 repcount = count >> (size == 4 ? 2 : 3);
11017 if (!TARGET_64BIT)
11018 repcount &= 0x3fffffff;
11019
11020 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
11021 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
11022 bytes. In both cases the latter seems to be faster for small
11023 values of N. */
11024 max_nonrep = size == 4 ? 7 : 4;
11025 if (!optimize_size)
11026 switch (ix86_tune)
11027 {
11028 case PROCESSOR_PENTIUM4:
11029 case PROCESSOR_NOCONA:
11030 max_nonrep = 3;
11031 break;
11032 default:
11033 break;
11034 }
11035
11036 if (repcount <= max_nonrep)
11037 while (repcount-- > 0)
11038 {
11039 rtx mem = adjust_automodify_address_nv (dst,
11040 GET_MODE (zeroreg),
11041 destreg, offset);
11042 emit_insn (gen_strset (destreg, mem, zeroreg));
11043 offset += size;
11044 }
11045 else
11046 {
11047 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
11048 countreg = ix86_zero_extend_to_Pmode (countreg);
11049 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11050 GEN_INT (size == 4 ? 2 : 3));
11051 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11052 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
11053 destexp));
11054 offset = count & ~(size - 1);
11055 }
11056 }
11057 if (size == 8 && (count & 0x04))
11058 {
11059 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
11060 offset);
11061 emit_insn (gen_strset (destreg, mem,
11062 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11063 offset += 4;
11064 }
11065 if (count & 0x02)
11066 {
11067 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
11068 offset);
11069 emit_insn (gen_strset (destreg, mem,
11070 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11071 offset += 2;
11072 }
11073 if (count & 0x01)
11074 {
11075 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
11076 offset);
11077 emit_insn (gen_strset (destreg, mem,
11078 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11079 }
11080 }
11081 else
11082 {
11083 rtx countreg2;
11084 rtx label = NULL;
11085 /* Compute desired alignment of the string operation. */
11086 int desired_alignment = (TARGET_PENTIUMPRO
11087 && (count == 0 || count >= (unsigned int) 260)
11088 ? 8 : UNITS_PER_WORD);
11089
11090 /* In case we don't know anything about the alignment, default to
11091 library version, since it is usually equally fast and result in
11092 shorter code.
11093
11094 Also emit call when we know that the count is large and call overhead
11095 will not be important. */
11096 if (!TARGET_INLINE_ALL_STRINGOPS
11097 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11098 return 0;
11099
11100 if (TARGET_SINGLE_STRINGOP)
11101 emit_insn (gen_cld ());
11102
11103 countreg2 = gen_reg_rtx (Pmode);
11104 countreg = copy_to_mode_reg (counter_mode, count_exp);
11105 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
11106 /* Get rid of MEM_OFFSET, it won't be accurate. */
11107 dst = change_address (dst, BLKmode, destreg);
11108
11109 if (count == 0 && align < desired_alignment)
11110 {
11111 label = gen_label_rtx ();
11112 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11113 LEU, 0, counter_mode, 1, label);
11114 }
11115 if (align <= 1)
11116 {
11117 rtx label = ix86_expand_aligntest (destreg, 1);
11118 emit_insn (gen_strset (destreg, dst,
11119 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11120 ix86_adjust_counter (countreg, 1);
11121 emit_label (label);
11122 LABEL_NUSES (label) = 1;
11123 }
11124 if (align <= 2)
11125 {
11126 rtx label = ix86_expand_aligntest (destreg, 2);
11127 emit_insn (gen_strset (destreg, dst,
11128 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11129 ix86_adjust_counter (countreg, 2);
11130 emit_label (label);
11131 LABEL_NUSES (label) = 1;
11132 }
11133 if (align <= 4 && desired_alignment > 4)
11134 {
11135 rtx label = ix86_expand_aligntest (destreg, 4);
11136 emit_insn (gen_strset (destreg, dst,
11137 (TARGET_64BIT
11138 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
11139 : zeroreg)));
11140 ix86_adjust_counter (countreg, 4);
11141 emit_label (label);
11142 LABEL_NUSES (label) = 1;
11143 }
11144
11145 if (label && desired_alignment > 4 && !TARGET_64BIT)
11146 {
11147 emit_label (label);
11148 LABEL_NUSES (label) = 1;
11149 label = NULL_RTX;
11150 }
11151
11152 if (!TARGET_SINGLE_STRINGOP)
11153 emit_insn (gen_cld ());
11154 if (TARGET_64BIT)
11155 {
11156 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11157 GEN_INT (3)));
11158 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11159 }
11160 else
11161 {
11162 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11163 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11164 }
11165 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11166 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
11167
11168 if (label)
11169 {
11170 emit_label (label);
11171 LABEL_NUSES (label) = 1;
11172 }
11173
11174 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11175 emit_insn (gen_strset (destreg, dst,
11176 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11177 if (TARGET_64BIT && (align <= 4 || count == 0))
11178 {
11179 rtx label = ix86_expand_aligntest (countreg, 4);
11180 emit_insn (gen_strset (destreg, dst,
11181 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11182 emit_label (label);
11183 LABEL_NUSES (label) = 1;
11184 }
11185 if (align > 2 && count != 0 && (count & 2))
11186 emit_insn (gen_strset (destreg, dst,
11187 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11188 if (align <= 2 || count == 0)
11189 {
11190 rtx label = ix86_expand_aligntest (countreg, 2);
11191 emit_insn (gen_strset (destreg, dst,
11192 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11193 emit_label (label);
11194 LABEL_NUSES (label) = 1;
11195 }
11196 if (align > 1 && count != 0 && (count & 1))
11197 emit_insn (gen_strset (destreg, dst,
11198 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11199 if (align <= 1 || count == 0)
11200 {
11201 rtx label = ix86_expand_aligntest (countreg, 1);
11202 emit_insn (gen_strset (destreg, dst,
11203 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11204 emit_label (label);
11205 LABEL_NUSES (label) = 1;
11206 }
11207 }
11208 return 1;
11209 }
11210
11211 /* Expand strlen. */
11212 int
11213 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
11214 {
11215 rtx addr, scratch1, scratch2, scratch3, scratch4;
11216
11217 /* The generic case of strlen expander is long. Avoid it's
11218 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11219
11220 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11221 && !TARGET_INLINE_ALL_STRINGOPS
11222 && !optimize_size
11223 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
11224 return 0;
11225
11226 addr = force_reg (Pmode, XEXP (src, 0));
11227 scratch1 = gen_reg_rtx (Pmode);
11228
11229 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11230 && !optimize_size)
11231 {
11232 /* Well it seems that some optimizer does not combine a call like
11233 foo(strlen(bar), strlen(bar));
11234 when the move and the subtraction is done here. It does calculate
11235 the length just once when these instructions are done inside of
11236 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11237 often used and I use one fewer register for the lifetime of
11238 output_strlen_unroll() this is better. */
11239
11240 emit_move_insn (out, addr);
11241
11242 ix86_expand_strlensi_unroll_1 (out, src, align);
11243
11244 /* strlensi_unroll_1 returns the address of the zero at the end of
11245 the string, like memchr(), so compute the length by subtracting
11246 the start address. */
11247 if (TARGET_64BIT)
11248 emit_insn (gen_subdi3 (out, out, addr));
11249 else
11250 emit_insn (gen_subsi3 (out, out, addr));
11251 }
11252 else
11253 {
11254 rtx unspec;
11255 scratch2 = gen_reg_rtx (Pmode);
11256 scratch3 = gen_reg_rtx (Pmode);
11257 scratch4 = force_reg (Pmode, constm1_rtx);
11258
11259 emit_move_insn (scratch3, addr);
11260 eoschar = force_reg (QImode, eoschar);
11261
11262 emit_insn (gen_cld ());
11263 src = replace_equiv_address_nv (src, scratch3);
11264
11265 /* If .md starts supporting :P, this can be done in .md. */
11266 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
11267 scratch4), UNSPEC_SCAS);
11268 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
11269 if (TARGET_64BIT)
11270 {
11271 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
11272 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
11273 }
11274 else
11275 {
11276 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
11277 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
11278 }
11279 }
11280 return 1;
11281 }
11282
11283 /* Expand the appropriate insns for doing strlen if not just doing
11284 repnz; scasb
11285
11286 out = result, initialized with the start address
11287 align_rtx = alignment of the address.
11288 scratch = scratch register, initialized with the startaddress when
11289 not aligned, otherwise undefined
11290
11291 This is just the body. It needs the initializations mentioned above and
11292 some address computing at the end. These things are done in i386.md. */
11293
11294 static void
11295 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
11296 {
11297 int align;
11298 rtx tmp;
11299 rtx align_2_label = NULL_RTX;
11300 rtx align_3_label = NULL_RTX;
11301 rtx align_4_label = gen_label_rtx ();
11302 rtx end_0_label = gen_label_rtx ();
11303 rtx mem;
11304 rtx tmpreg = gen_reg_rtx (SImode);
11305 rtx scratch = gen_reg_rtx (SImode);
11306 rtx cmp;
11307
11308 align = 0;
11309 if (GET_CODE (align_rtx) == CONST_INT)
11310 align = INTVAL (align_rtx);
11311
11312 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
11313
11314 /* Is there a known alignment and is it less than 4? */
11315 if (align < 4)
11316 {
11317 rtx scratch1 = gen_reg_rtx (Pmode);
11318 emit_move_insn (scratch1, out);
11319 /* Is there a known alignment and is it not 2? */
11320 if (align != 2)
11321 {
11322 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
11323 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
11324
11325 /* Leave just the 3 lower bits. */
11326 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
11327 NULL_RTX, 0, OPTAB_WIDEN);
11328
11329 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11330 Pmode, 1, align_4_label);
11331 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
11332 Pmode, 1, align_2_label);
11333 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
11334 Pmode, 1, align_3_label);
11335 }
11336 else
11337 {
11338 /* Since the alignment is 2, we have to check 2 or 0 bytes;
11339 check if is aligned to 4 - byte. */
11340
11341 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
11342 NULL_RTX, 0, OPTAB_WIDEN);
11343
11344 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11345 Pmode, 1, align_4_label);
11346 }
11347
11348 mem = change_address (src, QImode, out);
11349
11350 /* Now compare the bytes. */
11351
11352 /* Compare the first n unaligned byte on a byte per byte basis. */
11353 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
11354 QImode, 1, end_0_label);
11355
11356 /* Increment the address. */
11357 if (TARGET_64BIT)
11358 emit_insn (gen_adddi3 (out, out, const1_rtx));
11359 else
11360 emit_insn (gen_addsi3 (out, out, const1_rtx));
11361
11362 /* Not needed with an alignment of 2 */
11363 if (align != 2)
11364 {
11365 emit_label (align_2_label);
11366
11367 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11368 end_0_label);
11369
11370 if (TARGET_64BIT)
11371 emit_insn (gen_adddi3 (out, out, const1_rtx));
11372 else
11373 emit_insn (gen_addsi3 (out, out, const1_rtx));
11374
11375 emit_label (align_3_label);
11376 }
11377
11378 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11379 end_0_label);
11380
11381 if (TARGET_64BIT)
11382 emit_insn (gen_adddi3 (out, out, const1_rtx));
11383 else
11384 emit_insn (gen_addsi3 (out, out, const1_rtx));
11385 }
11386
11387 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11388 align this loop. It gives only huge programs, but does not help to
11389 speed up. */
11390 emit_label (align_4_label);
11391
11392 mem = change_address (src, SImode, out);
11393 emit_move_insn (scratch, mem);
11394 if (TARGET_64BIT)
11395 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
11396 else
11397 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
11398
11399 /* This formula yields a nonzero result iff one of the bytes is zero.
11400 This saves three branches inside loop and many cycles. */
11401
11402 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
11403 emit_insn (gen_one_cmplsi2 (scratch, scratch));
11404 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
11405 emit_insn (gen_andsi3 (tmpreg, tmpreg,
11406 gen_int_mode (0x80808080, SImode)));
11407 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
11408 align_4_label);
11409
11410 if (TARGET_CMOVE)
11411 {
11412 rtx reg = gen_reg_rtx (SImode);
11413 rtx reg2 = gen_reg_rtx (Pmode);
11414 emit_move_insn (reg, tmpreg);
11415 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11416
11417 /* If zero is not in the first two bytes, move two bytes forward. */
11418 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11419 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11420 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11421 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11422 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11423 reg,
11424 tmpreg)));
11425 /* Emit lea manually to avoid clobbering of flags. */
11426 emit_insn (gen_rtx_SET (SImode, reg2,
11427 gen_rtx_PLUS (Pmode, out, const2_rtx)));
11428
11429 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11430 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11431 emit_insn (gen_rtx_SET (VOIDmode, out,
11432 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11433 reg2,
11434 out)));
11435
11436 }
11437 else
11438 {
11439 rtx end_2_label = gen_label_rtx ();
11440 /* Is zero in the first two bytes? */
11441
11442 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11443 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11444 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11445 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11446 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11447 pc_rtx);
11448 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11449 JUMP_LABEL (tmp) = end_2_label;
11450
11451 /* Not in the first two. Move two bytes forward. */
11452 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11453 if (TARGET_64BIT)
11454 emit_insn (gen_adddi3 (out, out, const2_rtx));
11455 else
11456 emit_insn (gen_addsi3 (out, out, const2_rtx));
11457
11458 emit_label (end_2_label);
11459
11460 }
11461
11462 /* Avoid branch in fixing the byte. */
11463 tmpreg = gen_lowpart (QImode, tmpreg);
11464 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11465 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11466 if (TARGET_64BIT)
11467 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11468 else
11469 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11470
11471 emit_label (end_0_label);
11472 }
11473
11474 void
11475 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11476 rtx callarg2 ATTRIBUTE_UNUSED,
11477 rtx pop, int sibcall)
11478 {
11479 rtx use = NULL, call;
11480
11481 if (pop == const0_rtx)
11482 pop = NULL;
11483 if (TARGET_64BIT && pop)
11484 abort ();
11485
11486 #if TARGET_MACHO
11487 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11488 fnaddr = machopic_indirect_call_target (fnaddr);
11489 #else
11490 /* Static functions and indirect calls don't need the pic register. */
11491 if (! TARGET_64BIT && flag_pic
11492 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11493 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11494 use_reg (&use, pic_offset_table_rtx);
11495
11496 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11497 {
11498 rtx al = gen_rtx_REG (QImode, 0);
11499 emit_move_insn (al, callarg2);
11500 use_reg (&use, al);
11501 }
11502 #endif /* TARGET_MACHO */
11503
11504 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11505 {
11506 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11507 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11508 }
11509 if (sibcall && TARGET_64BIT
11510 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11511 {
11512 rtx addr;
11513 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11514 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11515 emit_move_insn (fnaddr, addr);
11516 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11517 }
11518
11519 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11520 if (retval)
11521 call = gen_rtx_SET (VOIDmode, retval, call);
11522 if (pop)
11523 {
11524 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11525 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11526 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11527 }
11528
11529 call = emit_call_insn (call);
11530 if (use)
11531 CALL_INSN_FUNCTION_USAGE (call) = use;
11532 }
11533
11534 \f
11535 /* Clear stack slot assignments remembered from previous functions.
11536 This is called from INIT_EXPANDERS once before RTL is emitted for each
11537 function. */
11538
11539 static struct machine_function *
11540 ix86_init_machine_status (void)
11541 {
11542 struct machine_function *f;
11543
11544 f = ggc_alloc_cleared (sizeof (struct machine_function));
11545 f->use_fast_prologue_epilogue_nregs = -1;
11546
11547 return f;
11548 }
11549
11550 /* Return a MEM corresponding to a stack slot with mode MODE.
11551 Allocate a new slot if necessary.
11552
11553 The RTL for a function can have several slots available: N is
11554 which slot to use. */
11555
11556 rtx
11557 assign_386_stack_local (enum machine_mode mode, int n)
11558 {
11559 struct stack_local_entry *s;
11560
11561 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11562 abort ();
11563
11564 for (s = ix86_stack_locals; s; s = s->next)
11565 if (s->mode == mode && s->n == n)
11566 return s->rtl;
11567
11568 s = (struct stack_local_entry *)
11569 ggc_alloc (sizeof (struct stack_local_entry));
11570 s->n = n;
11571 s->mode = mode;
11572 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11573
11574 s->next = ix86_stack_locals;
11575 ix86_stack_locals = s;
11576 return s->rtl;
11577 }
11578
11579 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11580
11581 static GTY(()) rtx ix86_tls_symbol;
11582 rtx
11583 ix86_tls_get_addr (void)
11584 {
11585
11586 if (!ix86_tls_symbol)
11587 {
11588 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11589 (TARGET_GNU_TLS && !TARGET_64BIT)
11590 ? "___tls_get_addr"
11591 : "__tls_get_addr");
11592 }
11593
11594 return ix86_tls_symbol;
11595 }
11596 \f
11597 /* Calculate the length of the memory address in the instruction
11598 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11599
11600 int
11601 memory_address_length (rtx addr)
11602 {
11603 struct ix86_address parts;
11604 rtx base, index, disp;
11605 int len;
11606
11607 if (GET_CODE (addr) == PRE_DEC
11608 || GET_CODE (addr) == POST_INC
11609 || GET_CODE (addr) == PRE_MODIFY
11610 || GET_CODE (addr) == POST_MODIFY)
11611 return 0;
11612
11613 if (! ix86_decompose_address (addr, &parts))
11614 abort ();
11615
11616 base = parts.base;
11617 index = parts.index;
11618 disp = parts.disp;
11619 len = 0;
11620
11621 /* Rule of thumb:
11622 - esp as the base always wants an index,
11623 - ebp as the base always wants a displacement. */
11624
11625 /* Register Indirect. */
11626 if (base && !index && !disp)
11627 {
11628 /* esp (for its index) and ebp (for its displacement) need
11629 the two-byte modrm form. */
11630 if (addr == stack_pointer_rtx
11631 || addr == arg_pointer_rtx
11632 || addr == frame_pointer_rtx
11633 || addr == hard_frame_pointer_rtx)
11634 len = 1;
11635 }
11636
11637 /* Direct Addressing. */
11638 else if (disp && !base && !index)
11639 len = 4;
11640
11641 else
11642 {
11643 /* Find the length of the displacement constant. */
11644 if (disp)
11645 {
11646 if (GET_CODE (disp) == CONST_INT
11647 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11648 && base)
11649 len = 1;
11650 else
11651 len = 4;
11652 }
11653 /* ebp always wants a displacement. */
11654 else if (base == hard_frame_pointer_rtx)
11655 len = 1;
11656
11657 /* An index requires the two-byte modrm form.... */
11658 if (index
11659 /* ...like esp, which always wants an index. */
11660 || base == stack_pointer_rtx
11661 || base == arg_pointer_rtx
11662 || base == frame_pointer_rtx)
11663 len += 1;
11664 }
11665
11666 return len;
11667 }
11668
11669 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11670 is set, expect that insn have 8bit immediate alternative. */
11671 int
11672 ix86_attr_length_immediate_default (rtx insn, int shortform)
11673 {
11674 int len = 0;
11675 int i;
11676 extract_insn_cached (insn);
11677 for (i = recog_data.n_operands - 1; i >= 0; --i)
11678 if (CONSTANT_P (recog_data.operand[i]))
11679 {
11680 if (len)
11681 abort ();
11682 if (shortform
11683 && GET_CODE (recog_data.operand[i]) == CONST_INT
11684 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11685 len = 1;
11686 else
11687 {
11688 switch (get_attr_mode (insn))
11689 {
11690 case MODE_QI:
11691 len+=1;
11692 break;
11693 case MODE_HI:
11694 len+=2;
11695 break;
11696 case MODE_SI:
11697 len+=4;
11698 break;
11699 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11700 case MODE_DI:
11701 len+=4;
11702 break;
11703 default:
11704 fatal_insn ("unknown insn mode", insn);
11705 }
11706 }
11707 }
11708 return len;
11709 }
11710 /* Compute default value for "length_address" attribute. */
11711 int
11712 ix86_attr_length_address_default (rtx insn)
11713 {
11714 int i;
11715
11716 if (get_attr_type (insn) == TYPE_LEA)
11717 {
11718 rtx set = PATTERN (insn);
11719 if (GET_CODE (set) == SET)
11720 ;
11721 else if (GET_CODE (set) == PARALLEL
11722 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11723 set = XVECEXP (set, 0, 0);
11724 else
11725 {
11726 #ifdef ENABLE_CHECKING
11727 abort ();
11728 #endif
11729 return 0;
11730 }
11731
11732 return memory_address_length (SET_SRC (set));
11733 }
11734
11735 extract_insn_cached (insn);
11736 for (i = recog_data.n_operands - 1; i >= 0; --i)
11737 if (GET_CODE (recog_data.operand[i]) == MEM)
11738 {
11739 return memory_address_length (XEXP (recog_data.operand[i], 0));
11740 break;
11741 }
11742 return 0;
11743 }
11744 \f
11745 /* Return the maximum number of instructions a cpu can issue. */
11746
11747 static int
11748 ix86_issue_rate (void)
11749 {
11750 switch (ix86_tune)
11751 {
11752 case PROCESSOR_PENTIUM:
11753 case PROCESSOR_K6:
11754 return 2;
11755
11756 case PROCESSOR_PENTIUMPRO:
11757 case PROCESSOR_PENTIUM4:
11758 case PROCESSOR_ATHLON:
11759 case PROCESSOR_K8:
11760 case PROCESSOR_NOCONA:
11761 return 3;
11762
11763 default:
11764 return 1;
11765 }
11766 }
11767
11768 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11769 by DEP_INSN and nothing set by DEP_INSN. */
11770
11771 static int
11772 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11773 {
11774 rtx set, set2;
11775
11776 /* Simplify the test for uninteresting insns. */
11777 if (insn_type != TYPE_SETCC
11778 && insn_type != TYPE_ICMOV
11779 && insn_type != TYPE_FCMOV
11780 && insn_type != TYPE_IBR)
11781 return 0;
11782
11783 if ((set = single_set (dep_insn)) != 0)
11784 {
11785 set = SET_DEST (set);
11786 set2 = NULL_RTX;
11787 }
11788 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11789 && XVECLEN (PATTERN (dep_insn), 0) == 2
11790 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11791 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11792 {
11793 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11794 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11795 }
11796 else
11797 return 0;
11798
11799 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11800 return 0;
11801
11802 /* This test is true if the dependent insn reads the flags but
11803 not any other potentially set register. */
11804 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11805 return 0;
11806
11807 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11808 return 0;
11809
11810 return 1;
11811 }
11812
11813 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11814 address with operands set by DEP_INSN. */
11815
11816 static int
11817 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11818 {
11819 rtx addr;
11820
11821 if (insn_type == TYPE_LEA
11822 && TARGET_PENTIUM)
11823 {
11824 addr = PATTERN (insn);
11825 if (GET_CODE (addr) == SET)
11826 ;
11827 else if (GET_CODE (addr) == PARALLEL
11828 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
11829 addr = XVECEXP (addr, 0, 0);
11830 else
11831 abort ();
11832 addr = SET_SRC (addr);
11833 }
11834 else
11835 {
11836 int i;
11837 extract_insn_cached (insn);
11838 for (i = recog_data.n_operands - 1; i >= 0; --i)
11839 if (GET_CODE (recog_data.operand[i]) == MEM)
11840 {
11841 addr = XEXP (recog_data.operand[i], 0);
11842 goto found;
11843 }
11844 return 0;
11845 found:;
11846 }
11847
11848 return modified_in_p (addr, dep_insn);
11849 }
11850
11851 static int
11852 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
11853 {
11854 enum attr_type insn_type, dep_insn_type;
11855 enum attr_memory memory;
11856 rtx set, set2;
11857 int dep_insn_code_number;
11858
11859 /* Anti and output dependencies have zero cost on all CPUs. */
11860 if (REG_NOTE_KIND (link) != 0)
11861 return 0;
11862
11863 dep_insn_code_number = recog_memoized (dep_insn);
11864
11865 /* If we can't recognize the insns, we can't really do anything. */
11866 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
11867 return cost;
11868
11869 insn_type = get_attr_type (insn);
11870 dep_insn_type = get_attr_type (dep_insn);
11871
11872 switch (ix86_tune)
11873 {
11874 case PROCESSOR_PENTIUM:
11875 /* Address Generation Interlock adds a cycle of latency. */
11876 if (ix86_agi_dependant (insn, dep_insn, insn_type))
11877 cost += 1;
11878
11879 /* ??? Compares pair with jump/setcc. */
11880 if (ix86_flags_dependant (insn, dep_insn, insn_type))
11881 cost = 0;
11882
11883 /* Floating point stores require value to be ready one cycle earlier. */
11884 if (insn_type == TYPE_FMOV
11885 && get_attr_memory (insn) == MEMORY_STORE
11886 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11887 cost += 1;
11888 break;
11889
11890 case PROCESSOR_PENTIUMPRO:
11891 memory = get_attr_memory (insn);
11892
11893 /* INT->FP conversion is expensive. */
11894 if (get_attr_fp_int_src (dep_insn))
11895 cost += 5;
11896
11897 /* There is one cycle extra latency between an FP op and a store. */
11898 if (insn_type == TYPE_FMOV
11899 && (set = single_set (dep_insn)) != NULL_RTX
11900 && (set2 = single_set (insn)) != NULL_RTX
11901 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
11902 && GET_CODE (SET_DEST (set2)) == MEM)
11903 cost += 1;
11904
11905 /* Show ability of reorder buffer to hide latency of load by executing
11906 in parallel with previous instruction in case
11907 previous instruction is not needed to compute the address. */
11908 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11909 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11910 {
11911 /* Claim moves to take one cycle, as core can issue one load
11912 at time and the next load can start cycle later. */
11913 if (dep_insn_type == TYPE_IMOV
11914 || dep_insn_type == TYPE_FMOV)
11915 cost = 1;
11916 else if (cost > 1)
11917 cost--;
11918 }
11919 break;
11920
11921 case PROCESSOR_K6:
11922 memory = get_attr_memory (insn);
11923
11924 /* The esp dependency is resolved before the instruction is really
11925 finished. */
11926 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
11927 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
11928 return 1;
11929
11930 /* INT->FP conversion is expensive. */
11931 if (get_attr_fp_int_src (dep_insn))
11932 cost += 5;
11933
11934 /* Show ability of reorder buffer to hide latency of load by executing
11935 in parallel with previous instruction in case
11936 previous instruction is not needed to compute the address. */
11937 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11938 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11939 {
11940 /* Claim moves to take one cycle, as core can issue one load
11941 at time and the next load can start cycle later. */
11942 if (dep_insn_type == TYPE_IMOV
11943 || dep_insn_type == TYPE_FMOV)
11944 cost = 1;
11945 else if (cost > 2)
11946 cost -= 2;
11947 else
11948 cost = 1;
11949 }
11950 break;
11951
11952 case PROCESSOR_ATHLON:
11953 case PROCESSOR_K8:
11954 memory = get_attr_memory (insn);
11955
11956 /* Show ability of reorder buffer to hide latency of load by executing
11957 in parallel with previous instruction in case
11958 previous instruction is not needed to compute the address. */
11959 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11960 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11961 {
11962 enum attr_unit unit = get_attr_unit (insn);
11963 int loadcost = 3;
11964
11965 /* Because of the difference between the length of integer and
11966 floating unit pipeline preparation stages, the memory operands
11967 for floating point are cheaper.
11968
11969 ??? For Athlon it the difference is most probably 2. */
11970 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
11971 loadcost = 3;
11972 else
11973 loadcost = TARGET_ATHLON ? 2 : 0;
11974
11975 if (cost >= loadcost)
11976 cost -= loadcost;
11977 else
11978 cost = 0;
11979 }
11980
11981 default:
11982 break;
11983 }
11984
11985 return cost;
11986 }
11987
11988 /* How many alternative schedules to try. This should be as wide as the
11989 scheduling freedom in the DFA, but no wider. Making this value too
11990 large results extra work for the scheduler. */
11991
11992 static int
11993 ia32_multipass_dfa_lookahead (void)
11994 {
11995 if (ix86_tune == PROCESSOR_PENTIUM)
11996 return 2;
11997
11998 if (ix86_tune == PROCESSOR_PENTIUMPRO
11999 || ix86_tune == PROCESSOR_K6)
12000 return 1;
12001
12002 else
12003 return 0;
12004 }
12005
12006 \f
12007 /* Compute the alignment given to a constant that is being placed in memory.
12008 EXP is the constant and ALIGN is the alignment that the object would
12009 ordinarily have.
12010 The value of this function is used instead of that alignment to align
12011 the object. */
12012
12013 int
12014 ix86_constant_alignment (tree exp, int align)
12015 {
12016 if (TREE_CODE (exp) == REAL_CST)
12017 {
12018 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
12019 return 64;
12020 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
12021 return 128;
12022 }
12023 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
12024 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
12025 return BITS_PER_WORD;
12026
12027 return align;
12028 }
12029
12030 /* Compute the alignment for a static variable.
12031 TYPE is the data type, and ALIGN is the alignment that
12032 the object would ordinarily have. The value of this function is used
12033 instead of that alignment to align the object. */
12034
12035 int
12036 ix86_data_alignment (tree type, int align)
12037 {
12038 if (AGGREGATE_TYPE_P (type)
12039 && TYPE_SIZE (type)
12040 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12041 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
12042 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
12043 return 256;
12044
12045 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12046 to 16byte boundary. */
12047 if (TARGET_64BIT)
12048 {
12049 if (AGGREGATE_TYPE_P (type)
12050 && TYPE_SIZE (type)
12051 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12052 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
12053 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12054 return 128;
12055 }
12056
12057 if (TREE_CODE (type) == ARRAY_TYPE)
12058 {
12059 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12060 return 64;
12061 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12062 return 128;
12063 }
12064 else if (TREE_CODE (type) == COMPLEX_TYPE)
12065 {
12066
12067 if (TYPE_MODE (type) == DCmode && align < 64)
12068 return 64;
12069 if (TYPE_MODE (type) == XCmode && align < 128)
12070 return 128;
12071 }
12072 else if ((TREE_CODE (type) == RECORD_TYPE
12073 || TREE_CODE (type) == UNION_TYPE
12074 || TREE_CODE (type) == QUAL_UNION_TYPE)
12075 && TYPE_FIELDS (type))
12076 {
12077 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12078 return 64;
12079 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12080 return 128;
12081 }
12082 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12083 || TREE_CODE (type) == INTEGER_TYPE)
12084 {
12085 if (TYPE_MODE (type) == DFmode && align < 64)
12086 return 64;
12087 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12088 return 128;
12089 }
12090
12091 return align;
12092 }
12093
12094 /* Compute the alignment for a local variable.
12095 TYPE is the data type, and ALIGN is the alignment that
12096 the object would ordinarily have. The value of this macro is used
12097 instead of that alignment to align the object. */
12098
12099 int
12100 ix86_local_alignment (tree type, int align)
12101 {
12102 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12103 to 16byte boundary. */
12104 if (TARGET_64BIT)
12105 {
12106 if (AGGREGATE_TYPE_P (type)
12107 && TYPE_SIZE (type)
12108 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12109 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
12110 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12111 return 128;
12112 }
12113 if (TREE_CODE (type) == ARRAY_TYPE)
12114 {
12115 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12116 return 64;
12117 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12118 return 128;
12119 }
12120 else if (TREE_CODE (type) == COMPLEX_TYPE)
12121 {
12122 if (TYPE_MODE (type) == DCmode && align < 64)
12123 return 64;
12124 if (TYPE_MODE (type) == XCmode && align < 128)
12125 return 128;
12126 }
12127 else if ((TREE_CODE (type) == RECORD_TYPE
12128 || TREE_CODE (type) == UNION_TYPE
12129 || TREE_CODE (type) == QUAL_UNION_TYPE)
12130 && TYPE_FIELDS (type))
12131 {
12132 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12133 return 64;
12134 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12135 return 128;
12136 }
12137 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12138 || TREE_CODE (type) == INTEGER_TYPE)
12139 {
12140
12141 if (TYPE_MODE (type) == DFmode && align < 64)
12142 return 64;
12143 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12144 return 128;
12145 }
12146 return align;
12147 }
12148 \f
12149 /* Emit RTL insns to initialize the variable parts of a trampoline.
12150 FNADDR is an RTX for the address of the function's pure code.
12151 CXT is an RTX for the static chain value for the function. */
12152 void
12153 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
12154 {
12155 if (!TARGET_64BIT)
12156 {
12157 /* Compute offset from the end of the jmp to the target function. */
12158 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
12159 plus_constant (tramp, 10),
12160 NULL_RTX, 1, OPTAB_DIRECT);
12161 emit_move_insn (gen_rtx_MEM (QImode, tramp),
12162 gen_int_mode (0xb9, QImode));
12163 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
12164 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
12165 gen_int_mode (0xe9, QImode));
12166 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
12167 }
12168 else
12169 {
12170 int offset = 0;
12171 /* Try to load address using shorter movl instead of movabs.
12172 We may want to support movq for kernel mode, but kernel does not use
12173 trampolines at the moment. */
12174 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
12175 {
12176 fnaddr = copy_to_mode_reg (DImode, fnaddr);
12177 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12178 gen_int_mode (0xbb41, HImode));
12179 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
12180 gen_lowpart (SImode, fnaddr));
12181 offset += 6;
12182 }
12183 else
12184 {
12185 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12186 gen_int_mode (0xbb49, HImode));
12187 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12188 fnaddr);
12189 offset += 10;
12190 }
12191 /* Load static chain using movabs to r10. */
12192 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12193 gen_int_mode (0xba49, HImode));
12194 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12195 cxt);
12196 offset += 10;
12197 /* Jump to the r11 */
12198 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12199 gen_int_mode (0xff49, HImode));
12200 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
12201 gen_int_mode (0xe3, QImode));
12202 offset += 3;
12203 if (offset > TRAMPOLINE_SIZE)
12204 abort ();
12205 }
12206
12207 #ifdef ENABLE_EXECUTE_STACK
12208 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
12209 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
12210 #endif
12211 }
12212 \f
12213 /* Codes for all the SSE/MMX builtins. */
12214 enum ix86_builtins
12215 {
12216 IX86_BUILTIN_ADDPS,
12217 IX86_BUILTIN_ADDSS,
12218 IX86_BUILTIN_DIVPS,
12219 IX86_BUILTIN_DIVSS,
12220 IX86_BUILTIN_MULPS,
12221 IX86_BUILTIN_MULSS,
12222 IX86_BUILTIN_SUBPS,
12223 IX86_BUILTIN_SUBSS,
12224
12225 IX86_BUILTIN_CMPEQPS,
12226 IX86_BUILTIN_CMPLTPS,
12227 IX86_BUILTIN_CMPLEPS,
12228 IX86_BUILTIN_CMPGTPS,
12229 IX86_BUILTIN_CMPGEPS,
12230 IX86_BUILTIN_CMPNEQPS,
12231 IX86_BUILTIN_CMPNLTPS,
12232 IX86_BUILTIN_CMPNLEPS,
12233 IX86_BUILTIN_CMPNGTPS,
12234 IX86_BUILTIN_CMPNGEPS,
12235 IX86_BUILTIN_CMPORDPS,
12236 IX86_BUILTIN_CMPUNORDPS,
12237 IX86_BUILTIN_CMPNEPS,
12238 IX86_BUILTIN_CMPEQSS,
12239 IX86_BUILTIN_CMPLTSS,
12240 IX86_BUILTIN_CMPLESS,
12241 IX86_BUILTIN_CMPNEQSS,
12242 IX86_BUILTIN_CMPNLTSS,
12243 IX86_BUILTIN_CMPNLESS,
12244 IX86_BUILTIN_CMPNGTSS,
12245 IX86_BUILTIN_CMPNGESS,
12246 IX86_BUILTIN_CMPORDSS,
12247 IX86_BUILTIN_CMPUNORDSS,
12248 IX86_BUILTIN_CMPNESS,
12249
12250 IX86_BUILTIN_COMIEQSS,
12251 IX86_BUILTIN_COMILTSS,
12252 IX86_BUILTIN_COMILESS,
12253 IX86_BUILTIN_COMIGTSS,
12254 IX86_BUILTIN_COMIGESS,
12255 IX86_BUILTIN_COMINEQSS,
12256 IX86_BUILTIN_UCOMIEQSS,
12257 IX86_BUILTIN_UCOMILTSS,
12258 IX86_BUILTIN_UCOMILESS,
12259 IX86_BUILTIN_UCOMIGTSS,
12260 IX86_BUILTIN_UCOMIGESS,
12261 IX86_BUILTIN_UCOMINEQSS,
12262
12263 IX86_BUILTIN_CVTPI2PS,
12264 IX86_BUILTIN_CVTPS2PI,
12265 IX86_BUILTIN_CVTSI2SS,
12266 IX86_BUILTIN_CVTSI642SS,
12267 IX86_BUILTIN_CVTSS2SI,
12268 IX86_BUILTIN_CVTSS2SI64,
12269 IX86_BUILTIN_CVTTPS2PI,
12270 IX86_BUILTIN_CVTTSS2SI,
12271 IX86_BUILTIN_CVTTSS2SI64,
12272
12273 IX86_BUILTIN_MAXPS,
12274 IX86_BUILTIN_MAXSS,
12275 IX86_BUILTIN_MINPS,
12276 IX86_BUILTIN_MINSS,
12277
12278 IX86_BUILTIN_LOADUPS,
12279 IX86_BUILTIN_STOREUPS,
12280 IX86_BUILTIN_MOVSS,
12281
12282 IX86_BUILTIN_MOVHLPS,
12283 IX86_BUILTIN_MOVLHPS,
12284 IX86_BUILTIN_LOADHPS,
12285 IX86_BUILTIN_LOADLPS,
12286 IX86_BUILTIN_STOREHPS,
12287 IX86_BUILTIN_STORELPS,
12288
12289 IX86_BUILTIN_MASKMOVQ,
12290 IX86_BUILTIN_MOVMSKPS,
12291 IX86_BUILTIN_PMOVMSKB,
12292
12293 IX86_BUILTIN_MOVNTPS,
12294 IX86_BUILTIN_MOVNTQ,
12295
12296 IX86_BUILTIN_LOADDQU,
12297 IX86_BUILTIN_STOREDQU,
12298
12299 IX86_BUILTIN_PACKSSWB,
12300 IX86_BUILTIN_PACKSSDW,
12301 IX86_BUILTIN_PACKUSWB,
12302
12303 IX86_BUILTIN_PADDB,
12304 IX86_BUILTIN_PADDW,
12305 IX86_BUILTIN_PADDD,
12306 IX86_BUILTIN_PADDQ,
12307 IX86_BUILTIN_PADDSB,
12308 IX86_BUILTIN_PADDSW,
12309 IX86_BUILTIN_PADDUSB,
12310 IX86_BUILTIN_PADDUSW,
12311 IX86_BUILTIN_PSUBB,
12312 IX86_BUILTIN_PSUBW,
12313 IX86_BUILTIN_PSUBD,
12314 IX86_BUILTIN_PSUBQ,
12315 IX86_BUILTIN_PSUBSB,
12316 IX86_BUILTIN_PSUBSW,
12317 IX86_BUILTIN_PSUBUSB,
12318 IX86_BUILTIN_PSUBUSW,
12319
12320 IX86_BUILTIN_PAND,
12321 IX86_BUILTIN_PANDN,
12322 IX86_BUILTIN_POR,
12323 IX86_BUILTIN_PXOR,
12324
12325 IX86_BUILTIN_PAVGB,
12326 IX86_BUILTIN_PAVGW,
12327
12328 IX86_BUILTIN_PCMPEQB,
12329 IX86_BUILTIN_PCMPEQW,
12330 IX86_BUILTIN_PCMPEQD,
12331 IX86_BUILTIN_PCMPGTB,
12332 IX86_BUILTIN_PCMPGTW,
12333 IX86_BUILTIN_PCMPGTD,
12334
12335 IX86_BUILTIN_PMADDWD,
12336
12337 IX86_BUILTIN_PMAXSW,
12338 IX86_BUILTIN_PMAXUB,
12339 IX86_BUILTIN_PMINSW,
12340 IX86_BUILTIN_PMINUB,
12341
12342 IX86_BUILTIN_PMULHUW,
12343 IX86_BUILTIN_PMULHW,
12344 IX86_BUILTIN_PMULLW,
12345
12346 IX86_BUILTIN_PSADBW,
12347 IX86_BUILTIN_PSHUFW,
12348
12349 IX86_BUILTIN_PSLLW,
12350 IX86_BUILTIN_PSLLD,
12351 IX86_BUILTIN_PSLLQ,
12352 IX86_BUILTIN_PSRAW,
12353 IX86_BUILTIN_PSRAD,
12354 IX86_BUILTIN_PSRLW,
12355 IX86_BUILTIN_PSRLD,
12356 IX86_BUILTIN_PSRLQ,
12357 IX86_BUILTIN_PSLLWI,
12358 IX86_BUILTIN_PSLLDI,
12359 IX86_BUILTIN_PSLLQI,
12360 IX86_BUILTIN_PSRAWI,
12361 IX86_BUILTIN_PSRADI,
12362 IX86_BUILTIN_PSRLWI,
12363 IX86_BUILTIN_PSRLDI,
12364 IX86_BUILTIN_PSRLQI,
12365
12366 IX86_BUILTIN_PUNPCKHBW,
12367 IX86_BUILTIN_PUNPCKHWD,
12368 IX86_BUILTIN_PUNPCKHDQ,
12369 IX86_BUILTIN_PUNPCKLBW,
12370 IX86_BUILTIN_PUNPCKLWD,
12371 IX86_BUILTIN_PUNPCKLDQ,
12372
12373 IX86_BUILTIN_SHUFPS,
12374
12375 IX86_BUILTIN_RCPPS,
12376 IX86_BUILTIN_RCPSS,
12377 IX86_BUILTIN_RSQRTPS,
12378 IX86_BUILTIN_RSQRTSS,
12379 IX86_BUILTIN_SQRTPS,
12380 IX86_BUILTIN_SQRTSS,
12381
12382 IX86_BUILTIN_UNPCKHPS,
12383 IX86_BUILTIN_UNPCKLPS,
12384
12385 IX86_BUILTIN_ANDPS,
12386 IX86_BUILTIN_ANDNPS,
12387 IX86_BUILTIN_ORPS,
12388 IX86_BUILTIN_XORPS,
12389
12390 IX86_BUILTIN_EMMS,
12391 IX86_BUILTIN_LDMXCSR,
12392 IX86_BUILTIN_STMXCSR,
12393 IX86_BUILTIN_SFENCE,
12394
12395 /* 3DNow! Original */
12396 IX86_BUILTIN_FEMMS,
12397 IX86_BUILTIN_PAVGUSB,
12398 IX86_BUILTIN_PF2ID,
12399 IX86_BUILTIN_PFACC,
12400 IX86_BUILTIN_PFADD,
12401 IX86_BUILTIN_PFCMPEQ,
12402 IX86_BUILTIN_PFCMPGE,
12403 IX86_BUILTIN_PFCMPGT,
12404 IX86_BUILTIN_PFMAX,
12405 IX86_BUILTIN_PFMIN,
12406 IX86_BUILTIN_PFMUL,
12407 IX86_BUILTIN_PFRCP,
12408 IX86_BUILTIN_PFRCPIT1,
12409 IX86_BUILTIN_PFRCPIT2,
12410 IX86_BUILTIN_PFRSQIT1,
12411 IX86_BUILTIN_PFRSQRT,
12412 IX86_BUILTIN_PFSUB,
12413 IX86_BUILTIN_PFSUBR,
12414 IX86_BUILTIN_PI2FD,
12415 IX86_BUILTIN_PMULHRW,
12416
12417 /* 3DNow! Athlon Extensions */
12418 IX86_BUILTIN_PF2IW,
12419 IX86_BUILTIN_PFNACC,
12420 IX86_BUILTIN_PFPNACC,
12421 IX86_BUILTIN_PI2FW,
12422 IX86_BUILTIN_PSWAPDSI,
12423 IX86_BUILTIN_PSWAPDSF,
12424
12425 /* SSE2 */
12426 IX86_BUILTIN_ADDPD,
12427 IX86_BUILTIN_ADDSD,
12428 IX86_BUILTIN_DIVPD,
12429 IX86_BUILTIN_DIVSD,
12430 IX86_BUILTIN_MULPD,
12431 IX86_BUILTIN_MULSD,
12432 IX86_BUILTIN_SUBPD,
12433 IX86_BUILTIN_SUBSD,
12434
12435 IX86_BUILTIN_CMPEQPD,
12436 IX86_BUILTIN_CMPLTPD,
12437 IX86_BUILTIN_CMPLEPD,
12438 IX86_BUILTIN_CMPGTPD,
12439 IX86_BUILTIN_CMPGEPD,
12440 IX86_BUILTIN_CMPNEQPD,
12441 IX86_BUILTIN_CMPNLTPD,
12442 IX86_BUILTIN_CMPNLEPD,
12443 IX86_BUILTIN_CMPNGTPD,
12444 IX86_BUILTIN_CMPNGEPD,
12445 IX86_BUILTIN_CMPORDPD,
12446 IX86_BUILTIN_CMPUNORDPD,
12447 IX86_BUILTIN_CMPNEPD,
12448 IX86_BUILTIN_CMPEQSD,
12449 IX86_BUILTIN_CMPLTSD,
12450 IX86_BUILTIN_CMPLESD,
12451 IX86_BUILTIN_CMPNEQSD,
12452 IX86_BUILTIN_CMPNLTSD,
12453 IX86_BUILTIN_CMPNLESD,
12454 IX86_BUILTIN_CMPORDSD,
12455 IX86_BUILTIN_CMPUNORDSD,
12456 IX86_BUILTIN_CMPNESD,
12457
12458 IX86_BUILTIN_COMIEQSD,
12459 IX86_BUILTIN_COMILTSD,
12460 IX86_BUILTIN_COMILESD,
12461 IX86_BUILTIN_COMIGTSD,
12462 IX86_BUILTIN_COMIGESD,
12463 IX86_BUILTIN_COMINEQSD,
12464 IX86_BUILTIN_UCOMIEQSD,
12465 IX86_BUILTIN_UCOMILTSD,
12466 IX86_BUILTIN_UCOMILESD,
12467 IX86_BUILTIN_UCOMIGTSD,
12468 IX86_BUILTIN_UCOMIGESD,
12469 IX86_BUILTIN_UCOMINEQSD,
12470
12471 IX86_BUILTIN_MAXPD,
12472 IX86_BUILTIN_MAXSD,
12473 IX86_BUILTIN_MINPD,
12474 IX86_BUILTIN_MINSD,
12475
12476 IX86_BUILTIN_ANDPD,
12477 IX86_BUILTIN_ANDNPD,
12478 IX86_BUILTIN_ORPD,
12479 IX86_BUILTIN_XORPD,
12480
12481 IX86_BUILTIN_SQRTPD,
12482 IX86_BUILTIN_SQRTSD,
12483
12484 IX86_BUILTIN_UNPCKHPD,
12485 IX86_BUILTIN_UNPCKLPD,
12486
12487 IX86_BUILTIN_SHUFPD,
12488
12489 IX86_BUILTIN_LOADUPD,
12490 IX86_BUILTIN_STOREUPD,
12491 IX86_BUILTIN_MOVSD,
12492
12493 IX86_BUILTIN_LOADHPD,
12494 IX86_BUILTIN_LOADLPD,
12495
12496 IX86_BUILTIN_CVTDQ2PD,
12497 IX86_BUILTIN_CVTDQ2PS,
12498
12499 IX86_BUILTIN_CVTPD2DQ,
12500 IX86_BUILTIN_CVTPD2PI,
12501 IX86_BUILTIN_CVTPD2PS,
12502 IX86_BUILTIN_CVTTPD2DQ,
12503 IX86_BUILTIN_CVTTPD2PI,
12504
12505 IX86_BUILTIN_CVTPI2PD,
12506 IX86_BUILTIN_CVTSI2SD,
12507 IX86_BUILTIN_CVTSI642SD,
12508
12509 IX86_BUILTIN_CVTSD2SI,
12510 IX86_BUILTIN_CVTSD2SI64,
12511 IX86_BUILTIN_CVTSD2SS,
12512 IX86_BUILTIN_CVTSS2SD,
12513 IX86_BUILTIN_CVTTSD2SI,
12514 IX86_BUILTIN_CVTTSD2SI64,
12515
12516 IX86_BUILTIN_CVTPS2DQ,
12517 IX86_BUILTIN_CVTPS2PD,
12518 IX86_BUILTIN_CVTTPS2DQ,
12519
12520 IX86_BUILTIN_MOVNTI,
12521 IX86_BUILTIN_MOVNTPD,
12522 IX86_BUILTIN_MOVNTDQ,
12523
12524 /* SSE2 MMX */
12525 IX86_BUILTIN_MASKMOVDQU,
12526 IX86_BUILTIN_MOVMSKPD,
12527 IX86_BUILTIN_PMOVMSKB128,
12528
12529 IX86_BUILTIN_PACKSSWB128,
12530 IX86_BUILTIN_PACKSSDW128,
12531 IX86_BUILTIN_PACKUSWB128,
12532
12533 IX86_BUILTIN_PADDB128,
12534 IX86_BUILTIN_PADDW128,
12535 IX86_BUILTIN_PADDD128,
12536 IX86_BUILTIN_PADDQ128,
12537 IX86_BUILTIN_PADDSB128,
12538 IX86_BUILTIN_PADDSW128,
12539 IX86_BUILTIN_PADDUSB128,
12540 IX86_BUILTIN_PADDUSW128,
12541 IX86_BUILTIN_PSUBB128,
12542 IX86_BUILTIN_PSUBW128,
12543 IX86_BUILTIN_PSUBD128,
12544 IX86_BUILTIN_PSUBQ128,
12545 IX86_BUILTIN_PSUBSB128,
12546 IX86_BUILTIN_PSUBSW128,
12547 IX86_BUILTIN_PSUBUSB128,
12548 IX86_BUILTIN_PSUBUSW128,
12549
12550 IX86_BUILTIN_PAND128,
12551 IX86_BUILTIN_PANDN128,
12552 IX86_BUILTIN_POR128,
12553 IX86_BUILTIN_PXOR128,
12554
12555 IX86_BUILTIN_PAVGB128,
12556 IX86_BUILTIN_PAVGW128,
12557
12558 IX86_BUILTIN_PCMPEQB128,
12559 IX86_BUILTIN_PCMPEQW128,
12560 IX86_BUILTIN_PCMPEQD128,
12561 IX86_BUILTIN_PCMPGTB128,
12562 IX86_BUILTIN_PCMPGTW128,
12563 IX86_BUILTIN_PCMPGTD128,
12564
12565 IX86_BUILTIN_PMADDWD128,
12566
12567 IX86_BUILTIN_PMAXSW128,
12568 IX86_BUILTIN_PMAXUB128,
12569 IX86_BUILTIN_PMINSW128,
12570 IX86_BUILTIN_PMINUB128,
12571
12572 IX86_BUILTIN_PMULUDQ,
12573 IX86_BUILTIN_PMULUDQ128,
12574 IX86_BUILTIN_PMULHUW128,
12575 IX86_BUILTIN_PMULHW128,
12576 IX86_BUILTIN_PMULLW128,
12577
12578 IX86_BUILTIN_PSADBW128,
12579 IX86_BUILTIN_PSHUFHW,
12580 IX86_BUILTIN_PSHUFLW,
12581 IX86_BUILTIN_PSHUFD,
12582
12583 IX86_BUILTIN_PSLLW128,
12584 IX86_BUILTIN_PSLLD128,
12585 IX86_BUILTIN_PSLLQ128,
12586 IX86_BUILTIN_PSRAW128,
12587 IX86_BUILTIN_PSRAD128,
12588 IX86_BUILTIN_PSRLW128,
12589 IX86_BUILTIN_PSRLD128,
12590 IX86_BUILTIN_PSRLQ128,
12591 IX86_BUILTIN_PSLLDQI128,
12592 IX86_BUILTIN_PSLLWI128,
12593 IX86_BUILTIN_PSLLDI128,
12594 IX86_BUILTIN_PSLLQI128,
12595 IX86_BUILTIN_PSRAWI128,
12596 IX86_BUILTIN_PSRADI128,
12597 IX86_BUILTIN_PSRLDQI128,
12598 IX86_BUILTIN_PSRLWI128,
12599 IX86_BUILTIN_PSRLDI128,
12600 IX86_BUILTIN_PSRLQI128,
12601
12602 IX86_BUILTIN_PUNPCKHBW128,
12603 IX86_BUILTIN_PUNPCKHWD128,
12604 IX86_BUILTIN_PUNPCKHDQ128,
12605 IX86_BUILTIN_PUNPCKHQDQ128,
12606 IX86_BUILTIN_PUNPCKLBW128,
12607 IX86_BUILTIN_PUNPCKLWD128,
12608 IX86_BUILTIN_PUNPCKLDQ128,
12609 IX86_BUILTIN_PUNPCKLQDQ128,
12610
12611 IX86_BUILTIN_CLFLUSH,
12612 IX86_BUILTIN_MFENCE,
12613 IX86_BUILTIN_LFENCE,
12614
12615 /* Prescott New Instructions. */
12616 IX86_BUILTIN_ADDSUBPS,
12617 IX86_BUILTIN_HADDPS,
12618 IX86_BUILTIN_HSUBPS,
12619 IX86_BUILTIN_MOVSHDUP,
12620 IX86_BUILTIN_MOVSLDUP,
12621 IX86_BUILTIN_ADDSUBPD,
12622 IX86_BUILTIN_HADDPD,
12623 IX86_BUILTIN_HSUBPD,
12624 IX86_BUILTIN_LDDQU,
12625
12626 IX86_BUILTIN_MONITOR,
12627 IX86_BUILTIN_MWAIT,
12628
12629 IX86_BUILTIN_VEC_INIT_V2SI,
12630 IX86_BUILTIN_VEC_INIT_V4HI,
12631 IX86_BUILTIN_VEC_INIT_V8QI,
12632 IX86_BUILTIN_VEC_EXT_V2DF,
12633 IX86_BUILTIN_VEC_EXT_V2DI,
12634 IX86_BUILTIN_VEC_EXT_V4SF,
12635 IX86_BUILTIN_VEC_EXT_V4SI,
12636 IX86_BUILTIN_VEC_EXT_V8HI,
12637 IX86_BUILTIN_VEC_EXT_V4HI,
12638 IX86_BUILTIN_VEC_SET_V8HI,
12639 IX86_BUILTIN_VEC_SET_V4HI,
12640
12641 IX86_BUILTIN_MAX
12642 };
12643
12644 #define def_builtin(MASK, NAME, TYPE, CODE) \
12645 do { \
12646 if ((MASK) & target_flags \
12647 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
12648 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
12649 NULL, NULL_TREE); \
12650 } while (0)
12651
12652 /* Bits for builtin_description.flag. */
12653
12654 /* Set when we don't support the comparison natively, and should
12655 swap_comparison in order to support it. */
12656 #define BUILTIN_DESC_SWAP_OPERANDS 1
12657
12658 struct builtin_description
12659 {
12660 const unsigned int mask;
12661 const enum insn_code icode;
12662 const char *const name;
12663 const enum ix86_builtins code;
12664 const enum rtx_code comparison;
12665 const unsigned int flag;
12666 };
12667
12668 static const struct builtin_description bdesc_comi[] =
12669 {
12670 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
12671 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
12672 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
12673 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
12674 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
12675 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
12676 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
12677 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
12678 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
12679 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
12680 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
12681 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
12682 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
12683 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
12684 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
12685 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
12686 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
12687 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
12688 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
12689 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
12690 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
12691 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
12692 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
12693 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
12694 };
12695
12696 static const struct builtin_description bdesc_2arg[] =
12697 {
12698 /* SSE */
12699 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
12700 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
12701 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
12702 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
12703 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
12704 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
12705 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
12706 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
12707
12708 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
12709 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
12710 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
12711 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
12712 BUILTIN_DESC_SWAP_OPERANDS },
12713 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
12714 BUILTIN_DESC_SWAP_OPERANDS },
12715 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
12716 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
12717 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
12718 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
12719 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
12720 BUILTIN_DESC_SWAP_OPERANDS },
12721 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
12722 BUILTIN_DESC_SWAP_OPERANDS },
12723 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
12724 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
12725 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
12726 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
12727 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
12728 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
12729 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
12730 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
12731 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
12732 BUILTIN_DESC_SWAP_OPERANDS },
12733 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
12734 BUILTIN_DESC_SWAP_OPERANDS },
12735 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
12736
12737 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
12738 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
12739 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
12740 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
12741
12742 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
12743 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
12744 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
12745 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
12746
12747 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
12748 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
12749 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
12750 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
12751 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
12752
12753 /* MMX */
12754 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
12755 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
12756 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
12757 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
12758 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
12759 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
12760 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
12761 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
12762
12763 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
12764 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
12765 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
12766 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
12767 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
12768 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
12769 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
12770 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
12771
12772 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
12773 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
12774 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
12775
12776 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
12777 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
12778 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
12779 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
12780
12781 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
12782 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
12783
12784 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
12785 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
12786 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
12787 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
12788 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
12789 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
12790
12791 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
12792 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
12793 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
12794 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
12795
12796 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
12797 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
12798 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
12799 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
12800 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
12801 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
12802
12803 /* Special. */
12804 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
12805 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
12806 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
12807
12808 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
12809 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
12810 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
12811
12812 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
12813 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
12814 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
12815 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
12816 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
12817 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
12818
12819 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
12820 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
12821 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
12822 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
12823 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
12824 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
12825
12826 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
12827 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
12828 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
12829 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
12830
12831 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
12832 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
12833
12834 /* SSE2 */
12835 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
12836 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
12837 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
12838 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
12839 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
12840 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
12841 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
12842 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
12843
12844 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
12845 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
12846 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
12847 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
12848 BUILTIN_DESC_SWAP_OPERANDS },
12849 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
12850 BUILTIN_DESC_SWAP_OPERANDS },
12851 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
12852 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
12853 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
12854 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
12855 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
12856 BUILTIN_DESC_SWAP_OPERANDS },
12857 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
12858 BUILTIN_DESC_SWAP_OPERANDS },
12859 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
12860 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12861 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12862 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12863 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12864 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
12865 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
12866 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
12867 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
12868
12869 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12870 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12871 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12872 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12873
12874 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12875 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12876 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12877 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12878
12879 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12880 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12881 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12882
12883 /* SSE2 MMX */
12884 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12885 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12886 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12887 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12888 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12889 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12890 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12891 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12892
12893 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12894 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12895 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12896 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12897 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12898 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12899 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12900 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12901
12902 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12903 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12904
12905 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12906 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12907 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12908 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12909
12910 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12911 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12912
12913 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12914 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12915 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12916 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12917 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12918 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12919
12920 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12921 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12922 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12923 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12924
12925 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12926 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12927 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12928 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12929 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12930 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12931 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12932 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12933
12934 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12935 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12936 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12937
12938 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12939 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12940
12941 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
12942 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
12943
12944 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12945 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12946 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12947
12948 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12949 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12950 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12951
12952 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12953 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12954
12955 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12956
12957 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12958 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12959 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12960 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12961
12962 /* SSE3 MMX */
12963 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12964 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12965 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12966 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12967 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12968 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
12969 };
12970
12971 static const struct builtin_description bdesc_1arg[] =
12972 {
12973 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
12974 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
12975
12976 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
12977 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
12978 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
12979
12980 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
12981 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
12982 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
12983 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
12984 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
12985 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
12986
12987 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
12988 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
12989
12990 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
12991
12992 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
12993 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
12994
12995 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
12996 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
12997 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
12998 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
12999 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
13000
13001 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
13002
13003 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
13004 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
13005 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
13006 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
13007
13008 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
13009 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
13010 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
13011
13012 /* SSE3 */
13013 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
13014 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
13015 };
13016
13017 static void
13018 ix86_init_builtins (void)
13019 {
13020 if (TARGET_MMX)
13021 ix86_init_mmx_sse_builtins ();
13022 }
13023
13024 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
13025 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13026 builtins. */
13027 static void
13028 ix86_init_mmx_sse_builtins (void)
13029 {
13030 const struct builtin_description * d;
13031 size_t i;
13032
13033 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
13034 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13035 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
13036 tree V2DI_type_node
13037 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
13038 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
13039 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
13040 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
13041 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13042 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13043 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
13044
13045 tree pchar_type_node = build_pointer_type (char_type_node);
13046 tree pcchar_type_node = build_pointer_type (
13047 build_type_variant (char_type_node, 1, 0));
13048 tree pfloat_type_node = build_pointer_type (float_type_node);
13049 tree pcfloat_type_node = build_pointer_type (
13050 build_type_variant (float_type_node, 1, 0));
13051 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
13052 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
13053 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
13054
13055 /* Comparisons. */
13056 tree int_ftype_v4sf_v4sf
13057 = build_function_type_list (integer_type_node,
13058 V4SF_type_node, V4SF_type_node, NULL_TREE);
13059 tree v4si_ftype_v4sf_v4sf
13060 = build_function_type_list (V4SI_type_node,
13061 V4SF_type_node, V4SF_type_node, NULL_TREE);
13062 /* MMX/SSE/integer conversions. */
13063 tree int_ftype_v4sf
13064 = build_function_type_list (integer_type_node,
13065 V4SF_type_node, NULL_TREE);
13066 tree int64_ftype_v4sf
13067 = build_function_type_list (long_long_integer_type_node,
13068 V4SF_type_node, NULL_TREE);
13069 tree int_ftype_v8qi
13070 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
13071 tree v4sf_ftype_v4sf_int
13072 = build_function_type_list (V4SF_type_node,
13073 V4SF_type_node, integer_type_node, NULL_TREE);
13074 tree v4sf_ftype_v4sf_int64
13075 = build_function_type_list (V4SF_type_node,
13076 V4SF_type_node, long_long_integer_type_node,
13077 NULL_TREE);
13078 tree v4sf_ftype_v4sf_v2si
13079 = build_function_type_list (V4SF_type_node,
13080 V4SF_type_node, V2SI_type_node, NULL_TREE);
13081
13082 /* Miscellaneous. */
13083 tree v8qi_ftype_v4hi_v4hi
13084 = build_function_type_list (V8QI_type_node,
13085 V4HI_type_node, V4HI_type_node, NULL_TREE);
13086 tree v4hi_ftype_v2si_v2si
13087 = build_function_type_list (V4HI_type_node,
13088 V2SI_type_node, V2SI_type_node, NULL_TREE);
13089 tree v4sf_ftype_v4sf_v4sf_int
13090 = build_function_type_list (V4SF_type_node,
13091 V4SF_type_node, V4SF_type_node,
13092 integer_type_node, NULL_TREE);
13093 tree v2si_ftype_v4hi_v4hi
13094 = build_function_type_list (V2SI_type_node,
13095 V4HI_type_node, V4HI_type_node, NULL_TREE);
13096 tree v4hi_ftype_v4hi_int
13097 = build_function_type_list (V4HI_type_node,
13098 V4HI_type_node, integer_type_node, NULL_TREE);
13099 tree v4hi_ftype_v4hi_di
13100 = build_function_type_list (V4HI_type_node,
13101 V4HI_type_node, long_long_unsigned_type_node,
13102 NULL_TREE);
13103 tree v2si_ftype_v2si_di
13104 = build_function_type_list (V2SI_type_node,
13105 V2SI_type_node, long_long_unsigned_type_node,
13106 NULL_TREE);
13107 tree void_ftype_void
13108 = build_function_type (void_type_node, void_list_node);
13109 tree void_ftype_unsigned
13110 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
13111 tree void_ftype_unsigned_unsigned
13112 = build_function_type_list (void_type_node, unsigned_type_node,
13113 unsigned_type_node, NULL_TREE);
13114 tree void_ftype_pcvoid_unsigned_unsigned
13115 = build_function_type_list (void_type_node, const_ptr_type_node,
13116 unsigned_type_node, unsigned_type_node,
13117 NULL_TREE);
13118 tree unsigned_ftype_void
13119 = build_function_type (unsigned_type_node, void_list_node);
13120 tree v2si_ftype_v4sf
13121 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
13122 /* Loads/stores. */
13123 tree void_ftype_v8qi_v8qi_pchar
13124 = build_function_type_list (void_type_node,
13125 V8QI_type_node, V8QI_type_node,
13126 pchar_type_node, NULL_TREE);
13127 tree v4sf_ftype_pcfloat
13128 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
13129 /* @@@ the type is bogus */
13130 tree v4sf_ftype_v4sf_pv2si
13131 = build_function_type_list (V4SF_type_node,
13132 V4SF_type_node, pv2si_type_node, NULL_TREE);
13133 tree void_ftype_pv2si_v4sf
13134 = build_function_type_list (void_type_node,
13135 pv2si_type_node, V4SF_type_node, NULL_TREE);
13136 tree void_ftype_pfloat_v4sf
13137 = build_function_type_list (void_type_node,
13138 pfloat_type_node, V4SF_type_node, NULL_TREE);
13139 tree void_ftype_pdi_di
13140 = build_function_type_list (void_type_node,
13141 pdi_type_node, long_long_unsigned_type_node,
13142 NULL_TREE);
13143 tree void_ftype_pv2di_v2di
13144 = build_function_type_list (void_type_node,
13145 pv2di_type_node, V2DI_type_node, NULL_TREE);
13146 /* Normal vector unops. */
13147 tree v4sf_ftype_v4sf
13148 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13149
13150 /* Normal vector binops. */
13151 tree v4sf_ftype_v4sf_v4sf
13152 = build_function_type_list (V4SF_type_node,
13153 V4SF_type_node, V4SF_type_node, NULL_TREE);
13154 tree v8qi_ftype_v8qi_v8qi
13155 = build_function_type_list (V8QI_type_node,
13156 V8QI_type_node, V8QI_type_node, NULL_TREE);
13157 tree v4hi_ftype_v4hi_v4hi
13158 = build_function_type_list (V4HI_type_node,
13159 V4HI_type_node, V4HI_type_node, NULL_TREE);
13160 tree v2si_ftype_v2si_v2si
13161 = build_function_type_list (V2SI_type_node,
13162 V2SI_type_node, V2SI_type_node, NULL_TREE);
13163 tree di_ftype_di_di
13164 = build_function_type_list (long_long_unsigned_type_node,
13165 long_long_unsigned_type_node,
13166 long_long_unsigned_type_node, NULL_TREE);
13167
13168 tree v2si_ftype_v2sf
13169 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
13170 tree v2sf_ftype_v2si
13171 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
13172 tree v2si_ftype_v2si
13173 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
13174 tree v2sf_ftype_v2sf
13175 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
13176 tree v2sf_ftype_v2sf_v2sf
13177 = build_function_type_list (V2SF_type_node,
13178 V2SF_type_node, V2SF_type_node, NULL_TREE);
13179 tree v2si_ftype_v2sf_v2sf
13180 = build_function_type_list (V2SI_type_node,
13181 V2SF_type_node, V2SF_type_node, NULL_TREE);
13182 tree pint_type_node = build_pointer_type (integer_type_node);
13183 tree pdouble_type_node = build_pointer_type (double_type_node);
13184 tree pcdouble_type_node = build_pointer_type (
13185 build_type_variant (double_type_node, 1, 0));
13186 tree int_ftype_v2df_v2df
13187 = build_function_type_list (integer_type_node,
13188 V2DF_type_node, V2DF_type_node, NULL_TREE);
13189
13190 tree ti_ftype_ti_ti
13191 = build_function_type_list (intTI_type_node,
13192 intTI_type_node, intTI_type_node, NULL_TREE);
13193 tree void_ftype_pcvoid
13194 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
13195 tree v4sf_ftype_v4si
13196 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
13197 tree v4si_ftype_v4sf
13198 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
13199 tree v2df_ftype_v4si
13200 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
13201 tree v4si_ftype_v2df
13202 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
13203 tree v2si_ftype_v2df
13204 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
13205 tree v4sf_ftype_v2df
13206 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
13207 tree v2df_ftype_v2si
13208 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
13209 tree v2df_ftype_v4sf
13210 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
13211 tree int_ftype_v2df
13212 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
13213 tree int64_ftype_v2df
13214 = build_function_type_list (long_long_integer_type_node,
13215 V2DF_type_node, NULL_TREE);
13216 tree v2df_ftype_v2df_int
13217 = build_function_type_list (V2DF_type_node,
13218 V2DF_type_node, integer_type_node, NULL_TREE);
13219 tree v2df_ftype_v2df_int64
13220 = build_function_type_list (V2DF_type_node,
13221 V2DF_type_node, long_long_integer_type_node,
13222 NULL_TREE);
13223 tree v4sf_ftype_v4sf_v2df
13224 = build_function_type_list (V4SF_type_node,
13225 V4SF_type_node, V2DF_type_node, NULL_TREE);
13226 tree v2df_ftype_v2df_v4sf
13227 = build_function_type_list (V2DF_type_node,
13228 V2DF_type_node, V4SF_type_node, NULL_TREE);
13229 tree v2df_ftype_v2df_v2df_int
13230 = build_function_type_list (V2DF_type_node,
13231 V2DF_type_node, V2DF_type_node,
13232 integer_type_node,
13233 NULL_TREE);
13234 tree v2df_ftype_v2df_pcdouble
13235 = build_function_type_list (V2DF_type_node,
13236 V2DF_type_node, pcdouble_type_node, NULL_TREE);
13237 tree void_ftype_pdouble_v2df
13238 = build_function_type_list (void_type_node,
13239 pdouble_type_node, V2DF_type_node, NULL_TREE);
13240 tree void_ftype_pint_int
13241 = build_function_type_list (void_type_node,
13242 pint_type_node, integer_type_node, NULL_TREE);
13243 tree void_ftype_v16qi_v16qi_pchar
13244 = build_function_type_list (void_type_node,
13245 V16QI_type_node, V16QI_type_node,
13246 pchar_type_node, NULL_TREE);
13247 tree v2df_ftype_pcdouble
13248 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
13249 tree v2df_ftype_v2df_v2df
13250 = build_function_type_list (V2DF_type_node,
13251 V2DF_type_node, V2DF_type_node, NULL_TREE);
13252 tree v16qi_ftype_v16qi_v16qi
13253 = build_function_type_list (V16QI_type_node,
13254 V16QI_type_node, V16QI_type_node, NULL_TREE);
13255 tree v8hi_ftype_v8hi_v8hi
13256 = build_function_type_list (V8HI_type_node,
13257 V8HI_type_node, V8HI_type_node, NULL_TREE);
13258 tree v4si_ftype_v4si_v4si
13259 = build_function_type_list (V4SI_type_node,
13260 V4SI_type_node, V4SI_type_node, NULL_TREE);
13261 tree v2di_ftype_v2di_v2di
13262 = build_function_type_list (V2DI_type_node,
13263 V2DI_type_node, V2DI_type_node, NULL_TREE);
13264 tree v2di_ftype_v2df_v2df
13265 = build_function_type_list (V2DI_type_node,
13266 V2DF_type_node, V2DF_type_node, NULL_TREE);
13267 tree v2df_ftype_v2df
13268 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13269 tree v2di_ftype_v2di_int
13270 = build_function_type_list (V2DI_type_node,
13271 V2DI_type_node, integer_type_node, NULL_TREE);
13272 tree v4si_ftype_v4si_int
13273 = build_function_type_list (V4SI_type_node,
13274 V4SI_type_node, integer_type_node, NULL_TREE);
13275 tree v8hi_ftype_v8hi_int
13276 = build_function_type_list (V8HI_type_node,
13277 V8HI_type_node, integer_type_node, NULL_TREE);
13278 tree v8hi_ftype_v8hi_v2di
13279 = build_function_type_list (V8HI_type_node,
13280 V8HI_type_node, V2DI_type_node, NULL_TREE);
13281 tree v4si_ftype_v4si_v2di
13282 = build_function_type_list (V4SI_type_node,
13283 V4SI_type_node, V2DI_type_node, NULL_TREE);
13284 tree v4si_ftype_v8hi_v8hi
13285 = build_function_type_list (V4SI_type_node,
13286 V8HI_type_node, V8HI_type_node, NULL_TREE);
13287 tree di_ftype_v8qi_v8qi
13288 = build_function_type_list (long_long_unsigned_type_node,
13289 V8QI_type_node, V8QI_type_node, NULL_TREE);
13290 tree di_ftype_v2si_v2si
13291 = build_function_type_list (long_long_unsigned_type_node,
13292 V2SI_type_node, V2SI_type_node, NULL_TREE);
13293 tree v2di_ftype_v16qi_v16qi
13294 = build_function_type_list (V2DI_type_node,
13295 V16QI_type_node, V16QI_type_node, NULL_TREE);
13296 tree v2di_ftype_v4si_v4si
13297 = build_function_type_list (V2DI_type_node,
13298 V4SI_type_node, V4SI_type_node, NULL_TREE);
13299 tree int_ftype_v16qi
13300 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
13301 tree v16qi_ftype_pcchar
13302 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
13303 tree void_ftype_pchar_v16qi
13304 = build_function_type_list (void_type_node,
13305 pchar_type_node, V16QI_type_node, NULL_TREE);
13306
13307 tree float80_type;
13308 tree float128_type;
13309 tree ftype;
13310
13311 /* The __float80 type. */
13312 if (TYPE_MODE (long_double_type_node) == XFmode)
13313 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
13314 "__float80");
13315 else
13316 {
13317 /* The __float80 type. */
13318 float80_type = make_node (REAL_TYPE);
13319 TYPE_PRECISION (float80_type) = 80;
13320 layout_type (float80_type);
13321 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
13322 }
13323
13324 float128_type = make_node (REAL_TYPE);
13325 TYPE_PRECISION (float128_type) = 128;
13326 layout_type (float128_type);
13327 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
13328
13329 /* Add all builtins that are more or less simple operations on two
13330 operands. */
13331 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13332 {
13333 /* Use one of the operands; the target can have a different mode for
13334 mask-generating compares. */
13335 enum machine_mode mode;
13336 tree type;
13337
13338 if (d->name == 0)
13339 continue;
13340 mode = insn_data[d->icode].operand[1].mode;
13341
13342 switch (mode)
13343 {
13344 case V16QImode:
13345 type = v16qi_ftype_v16qi_v16qi;
13346 break;
13347 case V8HImode:
13348 type = v8hi_ftype_v8hi_v8hi;
13349 break;
13350 case V4SImode:
13351 type = v4si_ftype_v4si_v4si;
13352 break;
13353 case V2DImode:
13354 type = v2di_ftype_v2di_v2di;
13355 break;
13356 case V2DFmode:
13357 type = v2df_ftype_v2df_v2df;
13358 break;
13359 case TImode:
13360 type = ti_ftype_ti_ti;
13361 break;
13362 case V4SFmode:
13363 type = v4sf_ftype_v4sf_v4sf;
13364 break;
13365 case V8QImode:
13366 type = v8qi_ftype_v8qi_v8qi;
13367 break;
13368 case V4HImode:
13369 type = v4hi_ftype_v4hi_v4hi;
13370 break;
13371 case V2SImode:
13372 type = v2si_ftype_v2si_v2si;
13373 break;
13374 case DImode:
13375 type = di_ftype_di_di;
13376 break;
13377
13378 default:
13379 abort ();
13380 }
13381
13382 /* Override for comparisons. */
13383 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
13384 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
13385 type = v4si_ftype_v4sf_v4sf;
13386
13387 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
13388 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
13389 type = v2di_ftype_v2df_v2df;
13390
13391 def_builtin (d->mask, d->name, type, d->code);
13392 }
13393
13394 /* Add the remaining MMX insns with somewhat more complicated types. */
13395 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
13396 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
13397 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
13398 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
13399
13400 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
13401 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
13402 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
13403
13404 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
13405 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
13406
13407 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
13408 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
13409
13410 /* comi/ucomi insns. */
13411 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13412 if (d->mask == MASK_SSE2)
13413 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
13414 else
13415 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
13416
13417 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
13418 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
13419 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
13420
13421 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
13422 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
13423 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
13424 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
13425 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
13426 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
13427 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
13428 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
13429 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
13430 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
13431 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
13432
13433 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
13434
13435 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
13436 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
13437
13438 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
13439 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
13440 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
13441 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
13442
13443 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
13444 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
13445 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
13446 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
13447
13448 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
13449
13450 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
13451
13452 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
13453 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
13454 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
13455 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
13456 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
13457 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
13458
13459 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
13460
13461 /* Original 3DNow! */
13462 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
13463 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
13464 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
13465 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
13466 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
13467 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
13468 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
13469 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
13470 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
13471 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
13472 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
13473 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
13474 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
13475 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
13476 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
13477 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
13478 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
13479 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
13480 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
13481 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
13482
13483 /* 3DNow! extension as used in the Athlon CPU. */
13484 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
13485 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
13486 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
13487 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
13488 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
13489 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
13490
13491 /* SSE2 */
13492 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
13493
13494 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
13495 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
13496
13497 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
13498 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
13499
13500 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
13501 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
13502 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
13503 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
13504 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
13505
13506 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
13507 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
13508 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
13509 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
13510
13511 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
13512 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
13513
13514 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
13515
13516 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
13517 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
13518
13519 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
13520 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
13521 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
13522 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
13523 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
13524
13525 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
13526
13527 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
13528 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
13529 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
13530 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
13531
13532 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
13533 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
13534 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
13535
13536 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
13537 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
13538 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
13539 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
13540
13541 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
13542 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
13543 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
13544
13545 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
13546 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
13547
13548 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
13549 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
13550
13551 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
13552 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
13553 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
13554
13555 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
13556 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
13557 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
13558
13559 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
13560 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
13561
13562 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
13563 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
13564 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
13565 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
13566
13567 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
13568 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
13569 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
13570 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
13571
13572 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
13573 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
13574
13575 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
13576
13577 /* Prescott New Instructions. */
13578 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
13579 void_ftype_pcvoid_unsigned_unsigned,
13580 IX86_BUILTIN_MONITOR);
13581 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
13582 void_ftype_unsigned_unsigned,
13583 IX86_BUILTIN_MWAIT);
13584 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
13585 v4sf_ftype_v4sf,
13586 IX86_BUILTIN_MOVSHDUP);
13587 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
13588 v4sf_ftype_v4sf,
13589 IX86_BUILTIN_MOVSLDUP);
13590 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
13591 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
13592
13593 /* Access to the vec_init patterns. */
13594 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
13595 integer_type_node, NULL_TREE);
13596 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
13597 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
13598
13599 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
13600 short_integer_type_node,
13601 short_integer_type_node,
13602 short_integer_type_node, NULL_TREE);
13603 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
13604 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
13605
13606 ftype = build_function_type_list (V8QI_type_node, char_type_node,
13607 char_type_node, char_type_node,
13608 char_type_node, char_type_node,
13609 char_type_node, char_type_node,
13610 char_type_node, NULL_TREE);
13611 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
13612 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
13613
13614 /* Access to the vec_extract patterns. */
13615 ftype = build_function_type_list (double_type_node, V2DF_type_node,
13616 integer_type_node, NULL_TREE);
13617 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
13618 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
13619
13620 ftype = build_function_type_list (long_long_integer_type_node,
13621 V2DI_type_node, integer_type_node,
13622 NULL_TREE);
13623 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
13624 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
13625
13626 ftype = build_function_type_list (float_type_node, V4SF_type_node,
13627 integer_type_node, NULL_TREE);
13628 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
13629 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
13630
13631 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
13632 integer_type_node, NULL_TREE);
13633 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
13634 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
13635
13636 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
13637 integer_type_node, NULL_TREE);
13638 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
13639 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
13640
13641 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
13642 integer_type_node, NULL_TREE);
13643 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
13644 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
13645
13646 /* Access to the vec_set patterns. */
13647 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
13648 intHI_type_node,
13649 integer_type_node, NULL_TREE);
13650 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
13651 ftype, IX86_BUILTIN_VEC_SET_V8HI);
13652
13653 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
13654 intHI_type_node,
13655 integer_type_node, NULL_TREE);
13656 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
13657 ftype, IX86_BUILTIN_VEC_SET_V4HI);
13658 }
13659
13660 /* Errors in the source file can cause expand_expr to return const0_rtx
13661 where we expect a vector. To avoid crashing, use one of the vector
13662 clear instructions. */
13663 static rtx
13664 safe_vector_operand (rtx x, enum machine_mode mode)
13665 {
13666 if (x == const0_rtx)
13667 x = CONST0_RTX (mode);
13668 return x;
13669 }
13670
13671 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
13672
13673 static rtx
13674 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
13675 {
13676 rtx pat, xops[3];
13677 tree arg0 = TREE_VALUE (arglist);
13678 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13679 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13680 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13681 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13682 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13683 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13684
13685 if (VECTOR_MODE_P (mode0))
13686 op0 = safe_vector_operand (op0, mode0);
13687 if (VECTOR_MODE_P (mode1))
13688 op1 = safe_vector_operand (op1, mode1);
13689
13690 if (optimize || !target
13691 || GET_MODE (target) != tmode
13692 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13693 target = gen_reg_rtx (tmode);
13694
13695 if (GET_MODE (op1) == SImode && mode1 == TImode)
13696 {
13697 rtx x = gen_reg_rtx (V4SImode);
13698 emit_insn (gen_sse2_loadd (x, op1));
13699 op1 = gen_lowpart (TImode, x);
13700 }
13701
13702 /* In case the insn wants input operands in modes different from
13703 the result, abort. */
13704 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
13705 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
13706 abort ();
13707
13708 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13709 op0 = copy_to_mode_reg (mode0, op0);
13710 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13711 op1 = copy_to_mode_reg (mode1, op1);
13712
13713 /* ??? Using ix86_fixup_binary_operands is problematic when
13714 we've got mismatched modes. Fake it. */
13715
13716 xops[0] = target;
13717 xops[1] = op0;
13718 xops[2] = op1;
13719
13720 if (tmode == mode0 && tmode == mode1)
13721 {
13722 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
13723 op0 = xops[1];
13724 op1 = xops[2];
13725 }
13726 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
13727 {
13728 op0 = force_reg (mode0, op0);
13729 op1 = force_reg (mode1, op1);
13730 target = gen_reg_rtx (tmode);
13731 }
13732
13733 pat = GEN_FCN (icode) (target, op0, op1);
13734 if (! pat)
13735 return 0;
13736 emit_insn (pat);
13737 return target;
13738 }
13739
13740 /* Subroutine of ix86_expand_builtin to take care of stores. */
13741
13742 static rtx
13743 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
13744 {
13745 rtx pat;
13746 tree arg0 = TREE_VALUE (arglist);
13747 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13748 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13749 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13750 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
13751 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
13752
13753 if (VECTOR_MODE_P (mode1))
13754 op1 = safe_vector_operand (op1, mode1);
13755
13756 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13757 op1 = copy_to_mode_reg (mode1, op1);
13758
13759 pat = GEN_FCN (icode) (op0, op1);
13760 if (pat)
13761 emit_insn (pat);
13762 return 0;
13763 }
13764
13765 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
13766
13767 static rtx
13768 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
13769 rtx target, int do_load)
13770 {
13771 rtx pat;
13772 tree arg0 = TREE_VALUE (arglist);
13773 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13774 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13775 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13776
13777 if (optimize || !target
13778 || GET_MODE (target) != tmode
13779 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13780 target = gen_reg_rtx (tmode);
13781 if (do_load)
13782 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13783 else
13784 {
13785 if (VECTOR_MODE_P (mode0))
13786 op0 = safe_vector_operand (op0, mode0);
13787
13788 if ((optimize && !register_operand (op0, mode0))
13789 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13790 op0 = copy_to_mode_reg (mode0, op0);
13791 }
13792
13793 pat = GEN_FCN (icode) (target, op0);
13794 if (! pat)
13795 return 0;
13796 emit_insn (pat);
13797 return target;
13798 }
13799
13800 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13801 sqrtss, rsqrtss, rcpss. */
13802
13803 static rtx
13804 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
13805 {
13806 rtx pat;
13807 tree arg0 = TREE_VALUE (arglist);
13808 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13809 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13810 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13811
13812 if (optimize || !target
13813 || GET_MODE (target) != tmode
13814 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13815 target = gen_reg_rtx (tmode);
13816
13817 if (VECTOR_MODE_P (mode0))
13818 op0 = safe_vector_operand (op0, mode0);
13819
13820 if ((optimize && !register_operand (op0, mode0))
13821 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13822 op0 = copy_to_mode_reg (mode0, op0);
13823
13824 op1 = op0;
13825 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
13826 op1 = copy_to_mode_reg (mode0, op1);
13827
13828 pat = GEN_FCN (icode) (target, op0, op1);
13829 if (! pat)
13830 return 0;
13831 emit_insn (pat);
13832 return target;
13833 }
13834
13835 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13836
13837 static rtx
13838 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13839 rtx target)
13840 {
13841 rtx pat;
13842 tree arg0 = TREE_VALUE (arglist);
13843 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13844 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13845 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13846 rtx op2;
13847 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13848 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13849 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13850 enum rtx_code comparison = d->comparison;
13851
13852 if (VECTOR_MODE_P (mode0))
13853 op0 = safe_vector_operand (op0, mode0);
13854 if (VECTOR_MODE_P (mode1))
13855 op1 = safe_vector_operand (op1, mode1);
13856
13857 /* Swap operands if we have a comparison that isn't available in
13858 hardware. */
13859 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
13860 {
13861 rtx tmp = gen_reg_rtx (mode1);
13862 emit_move_insn (tmp, op1);
13863 op1 = op0;
13864 op0 = tmp;
13865 }
13866
13867 if (optimize || !target
13868 || GET_MODE (target) != tmode
13869 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13870 target = gen_reg_rtx (tmode);
13871
13872 if ((optimize && !register_operand (op0, mode0))
13873 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13874 op0 = copy_to_mode_reg (mode0, op0);
13875 if ((optimize && !register_operand (op1, mode1))
13876 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13877 op1 = copy_to_mode_reg (mode1, op1);
13878
13879 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13880 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13881 if (! pat)
13882 return 0;
13883 emit_insn (pat);
13884 return target;
13885 }
13886
13887 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13888
13889 static rtx
13890 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13891 rtx target)
13892 {
13893 rtx pat;
13894 tree arg0 = TREE_VALUE (arglist);
13895 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13896 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13897 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13898 rtx op2;
13899 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13900 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13901 enum rtx_code comparison = d->comparison;
13902
13903 if (VECTOR_MODE_P (mode0))
13904 op0 = safe_vector_operand (op0, mode0);
13905 if (VECTOR_MODE_P (mode1))
13906 op1 = safe_vector_operand (op1, mode1);
13907
13908 /* Swap operands if we have a comparison that isn't available in
13909 hardware. */
13910 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
13911 {
13912 rtx tmp = op1;
13913 op1 = op0;
13914 op0 = tmp;
13915 }
13916
13917 target = gen_reg_rtx (SImode);
13918 emit_move_insn (target, const0_rtx);
13919 target = gen_rtx_SUBREG (QImode, target, 0);
13920
13921 if ((optimize && !register_operand (op0, mode0))
13922 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13923 op0 = copy_to_mode_reg (mode0, op0);
13924 if ((optimize && !register_operand (op1, mode1))
13925 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13926 op1 = copy_to_mode_reg (mode1, op1);
13927
13928 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13929 pat = GEN_FCN (d->icode) (op0, op1);
13930 if (! pat)
13931 return 0;
13932 emit_insn (pat);
13933 emit_insn (gen_rtx_SET (VOIDmode,
13934 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13935 gen_rtx_fmt_ee (comparison, QImode,
13936 SET_DEST (pat),
13937 const0_rtx)));
13938
13939 return SUBREG_REG (target);
13940 }
13941
13942 /* Return the integer constant in ARG. Constrain it to be in the range
13943 of the subparts of VEC_TYPE; issue an error if not. */
13944
13945 static int
13946 get_element_number (tree vec_type, tree arg)
13947 {
13948 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
13949
13950 if (!host_integerp (arg, 1)
13951 || (elt = tree_low_cst (arg, 1), elt > max))
13952 {
13953 error ("selector must be an integer constant in the range 0..%i", max);
13954 return 0;
13955 }
13956
13957 return elt;
13958 }
13959
13960 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13961 ix86_expand_vector_init. We DO have language-level syntax for this, in
13962 the form of (type){ init-list }. Except that since we can't place emms
13963 instructions from inside the compiler, we can't allow the use of MMX
13964 registers unless the user explicitly asks for it. So we do *not* define
13965 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
13966 we have builtins invoked by mmintrin.h that gives us license to emit
13967 these sorts of instructions. */
13968
13969 static rtx
13970 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
13971 {
13972 enum machine_mode tmode = TYPE_MODE (type);
13973 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
13974 int i, n_elt = GET_MODE_NUNITS (tmode);
13975 rtvec v = rtvec_alloc (n_elt);
13976
13977 gcc_assert (VECTOR_MODE_P (tmode));
13978
13979 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
13980 {
13981 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
13982 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
13983 }
13984
13985 gcc_assert (arglist == NULL);
13986
13987 if (!target || !register_operand (target, tmode))
13988 target = gen_reg_rtx (tmode);
13989
13990 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
13991 return target;
13992 }
13993
13994 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13995 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
13996 had a language-level syntax for referencing vector elements. */
13997
13998 static rtx
13999 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
14000 {
14001 enum machine_mode tmode, mode0;
14002 tree arg0, arg1;
14003 int elt;
14004 rtx op0;
14005
14006 arg0 = TREE_VALUE (arglist);
14007 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14008
14009 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14010 elt = get_element_number (TREE_TYPE (arg0), arg1);
14011
14012 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14013 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14014 gcc_assert (VECTOR_MODE_P (mode0));
14015
14016 op0 = force_reg (mode0, op0);
14017
14018 if (optimize || !target || !register_operand (target, tmode))
14019 target = gen_reg_rtx (tmode);
14020
14021 ix86_expand_vector_extract (true, target, op0, elt);
14022
14023 return target;
14024 }
14025
14026 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14027 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
14028 a language-level syntax for referencing vector elements. */
14029
14030 static rtx
14031 ix86_expand_vec_set_builtin (tree arglist)
14032 {
14033 enum machine_mode tmode, mode1;
14034 tree arg0, arg1, arg2;
14035 int elt;
14036 rtx op0, op1;
14037
14038 arg0 = TREE_VALUE (arglist);
14039 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14040 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14041
14042 tmode = TYPE_MODE (TREE_TYPE (arg0));
14043 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14044 gcc_assert (VECTOR_MODE_P (tmode));
14045
14046 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
14047 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
14048 elt = get_element_number (TREE_TYPE (arg0), arg2);
14049
14050 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14051 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14052
14053 op0 = force_reg (tmode, op0);
14054 op1 = force_reg (mode1, op1);
14055
14056 ix86_expand_vector_set (true, op0, op1, elt);
14057
14058 return op0;
14059 }
14060
14061 /* Expand an expression EXP that calls a built-in function,
14062 with result going to TARGET if that's convenient
14063 (and in mode MODE if that's convenient).
14064 SUBTARGET may be used as the target for computing one of EXP's operands.
14065 IGNORE is nonzero if the value is to be ignored. */
14066
14067 static rtx
14068 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14069 enum machine_mode mode ATTRIBUTE_UNUSED,
14070 int ignore ATTRIBUTE_UNUSED)
14071 {
14072 const struct builtin_description *d;
14073 size_t i;
14074 enum insn_code icode;
14075 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
14076 tree arglist = TREE_OPERAND (exp, 1);
14077 tree arg0, arg1, arg2;
14078 rtx op0, op1, op2, pat;
14079 enum machine_mode tmode, mode0, mode1, mode2;
14080 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14081
14082 switch (fcode)
14083 {
14084 case IX86_BUILTIN_EMMS:
14085 emit_insn (gen_mmx_emms ());
14086 return 0;
14087
14088 case IX86_BUILTIN_SFENCE:
14089 emit_insn (gen_sse_sfence ());
14090 return 0;
14091
14092 case IX86_BUILTIN_MASKMOVQ:
14093 case IX86_BUILTIN_MASKMOVDQU:
14094 icode = (fcode == IX86_BUILTIN_MASKMOVQ
14095 ? CODE_FOR_mmx_maskmovq
14096 : CODE_FOR_sse2_maskmovdqu);
14097 /* Note the arg order is different from the operand order. */
14098 arg1 = TREE_VALUE (arglist);
14099 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
14100 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14101 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14102 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14103 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14104 mode0 = insn_data[icode].operand[0].mode;
14105 mode1 = insn_data[icode].operand[1].mode;
14106 mode2 = insn_data[icode].operand[2].mode;
14107
14108 op0 = force_reg (Pmode, op0);
14109 op0 = gen_rtx_MEM (mode1, op0);
14110
14111 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14112 op0 = copy_to_mode_reg (mode0, op0);
14113 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14114 op1 = copy_to_mode_reg (mode1, op1);
14115 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
14116 op2 = copy_to_mode_reg (mode2, op2);
14117 pat = GEN_FCN (icode) (op0, op1, op2);
14118 if (! pat)
14119 return 0;
14120 emit_insn (pat);
14121 return 0;
14122
14123 case IX86_BUILTIN_SQRTSS:
14124 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
14125 case IX86_BUILTIN_RSQRTSS:
14126 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
14127 case IX86_BUILTIN_RCPSS:
14128 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
14129
14130 case IX86_BUILTIN_LOADUPS:
14131 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
14132
14133 case IX86_BUILTIN_STOREUPS:
14134 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
14135
14136 case IX86_BUILTIN_LOADHPS:
14137 case IX86_BUILTIN_LOADLPS:
14138 case IX86_BUILTIN_LOADHPD:
14139 case IX86_BUILTIN_LOADLPD:
14140 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
14141 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
14142 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
14143 : CODE_FOR_sse2_loadlpd);
14144 arg0 = TREE_VALUE (arglist);
14145 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14146 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14147 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14148 tmode = insn_data[icode].operand[0].mode;
14149 mode0 = insn_data[icode].operand[1].mode;
14150 mode1 = insn_data[icode].operand[2].mode;
14151
14152 op0 = force_reg (mode0, op0);
14153 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
14154 if (optimize || target == 0
14155 || GET_MODE (target) != tmode
14156 || !register_operand (target, tmode))
14157 target = gen_reg_rtx (tmode);
14158 pat = GEN_FCN (icode) (target, op0, op1);
14159 if (! pat)
14160 return 0;
14161 emit_insn (pat);
14162 return target;
14163
14164 case IX86_BUILTIN_STOREHPS:
14165 case IX86_BUILTIN_STORELPS:
14166 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
14167 : CODE_FOR_sse_storelps);
14168 arg0 = TREE_VALUE (arglist);
14169 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14170 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14171 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14172 mode0 = insn_data[icode].operand[0].mode;
14173 mode1 = insn_data[icode].operand[1].mode;
14174
14175 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14176 op1 = force_reg (mode1, op1);
14177
14178 pat = GEN_FCN (icode) (op0, op1);
14179 if (! pat)
14180 return 0;
14181 emit_insn (pat);
14182 return const0_rtx;
14183
14184 case IX86_BUILTIN_MOVNTPS:
14185 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
14186 case IX86_BUILTIN_MOVNTQ:
14187 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
14188
14189 case IX86_BUILTIN_LDMXCSR:
14190 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14191 target = assign_386_stack_local (SImode, 0);
14192 emit_move_insn (target, op0);
14193 emit_insn (gen_sse_ldmxcsr (target));
14194 return 0;
14195
14196 case IX86_BUILTIN_STMXCSR:
14197 target = assign_386_stack_local (SImode, 0);
14198 emit_insn (gen_sse_stmxcsr (target));
14199 return copy_to_mode_reg (SImode, target);
14200
14201 case IX86_BUILTIN_SHUFPS:
14202 case IX86_BUILTIN_SHUFPD:
14203 icode = (fcode == IX86_BUILTIN_SHUFPS
14204 ? CODE_FOR_sse_shufps
14205 : CODE_FOR_sse2_shufpd);
14206 arg0 = TREE_VALUE (arglist);
14207 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14208 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14209 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14210 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14211 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14212 tmode = insn_data[icode].operand[0].mode;
14213 mode0 = insn_data[icode].operand[1].mode;
14214 mode1 = insn_data[icode].operand[2].mode;
14215 mode2 = insn_data[icode].operand[3].mode;
14216
14217 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14218 op0 = copy_to_mode_reg (mode0, op0);
14219 if ((optimize && !register_operand (op1, mode1))
14220 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
14221 op1 = copy_to_mode_reg (mode1, op1);
14222 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14223 {
14224 /* @@@ better error message */
14225 error ("mask must be an immediate");
14226 return gen_reg_rtx (tmode);
14227 }
14228 if (optimize || target == 0
14229 || GET_MODE (target) != tmode
14230 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14231 target = gen_reg_rtx (tmode);
14232 pat = GEN_FCN (icode) (target, op0, op1, op2);
14233 if (! pat)
14234 return 0;
14235 emit_insn (pat);
14236 return target;
14237
14238 case IX86_BUILTIN_PSHUFW:
14239 case IX86_BUILTIN_PSHUFD:
14240 case IX86_BUILTIN_PSHUFHW:
14241 case IX86_BUILTIN_PSHUFLW:
14242 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
14243 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
14244 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
14245 : CODE_FOR_mmx_pshufw);
14246 arg0 = TREE_VALUE (arglist);
14247 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14248 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14249 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14250 tmode = insn_data[icode].operand[0].mode;
14251 mode1 = insn_data[icode].operand[1].mode;
14252 mode2 = insn_data[icode].operand[2].mode;
14253
14254 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14255 op0 = copy_to_mode_reg (mode1, op0);
14256 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14257 {
14258 /* @@@ better error message */
14259 error ("mask must be an immediate");
14260 return const0_rtx;
14261 }
14262 if (target == 0
14263 || GET_MODE (target) != tmode
14264 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14265 target = gen_reg_rtx (tmode);
14266 pat = GEN_FCN (icode) (target, op0, op1);
14267 if (! pat)
14268 return 0;
14269 emit_insn (pat);
14270 return target;
14271
14272 case IX86_BUILTIN_PSLLDQI128:
14273 case IX86_BUILTIN_PSRLDQI128:
14274 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
14275 : CODE_FOR_sse2_lshrti3);
14276 arg0 = TREE_VALUE (arglist);
14277 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14278 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14279 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14280 tmode = insn_data[icode].operand[0].mode;
14281 mode1 = insn_data[icode].operand[1].mode;
14282 mode2 = insn_data[icode].operand[2].mode;
14283
14284 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14285 {
14286 op0 = copy_to_reg (op0);
14287 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
14288 }
14289 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14290 {
14291 error ("shift must be an immediate");
14292 return const0_rtx;
14293 }
14294 target = gen_reg_rtx (V2DImode);
14295 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
14296 if (! pat)
14297 return 0;
14298 emit_insn (pat);
14299 return target;
14300
14301 case IX86_BUILTIN_FEMMS:
14302 emit_insn (gen_mmx_femms ());
14303 return NULL_RTX;
14304
14305 case IX86_BUILTIN_PAVGUSB:
14306 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
14307
14308 case IX86_BUILTIN_PF2ID:
14309 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
14310
14311 case IX86_BUILTIN_PFACC:
14312 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
14313
14314 case IX86_BUILTIN_PFADD:
14315 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
14316
14317 case IX86_BUILTIN_PFCMPEQ:
14318 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
14319
14320 case IX86_BUILTIN_PFCMPGE:
14321 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
14322
14323 case IX86_BUILTIN_PFCMPGT:
14324 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
14325
14326 case IX86_BUILTIN_PFMAX:
14327 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
14328
14329 case IX86_BUILTIN_PFMIN:
14330 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
14331
14332 case IX86_BUILTIN_PFMUL:
14333 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
14334
14335 case IX86_BUILTIN_PFRCP:
14336 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
14337
14338 case IX86_BUILTIN_PFRCPIT1:
14339 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
14340
14341 case IX86_BUILTIN_PFRCPIT2:
14342 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
14343
14344 case IX86_BUILTIN_PFRSQIT1:
14345 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
14346
14347 case IX86_BUILTIN_PFRSQRT:
14348 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
14349
14350 case IX86_BUILTIN_PFSUB:
14351 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
14352
14353 case IX86_BUILTIN_PFSUBR:
14354 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
14355
14356 case IX86_BUILTIN_PI2FD:
14357 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
14358
14359 case IX86_BUILTIN_PMULHRW:
14360 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
14361
14362 case IX86_BUILTIN_PF2IW:
14363 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
14364
14365 case IX86_BUILTIN_PFNACC:
14366 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
14367
14368 case IX86_BUILTIN_PFPNACC:
14369 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
14370
14371 case IX86_BUILTIN_PI2FW:
14372 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
14373
14374 case IX86_BUILTIN_PSWAPDSI:
14375 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
14376
14377 case IX86_BUILTIN_PSWAPDSF:
14378 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
14379
14380 case IX86_BUILTIN_SQRTSD:
14381 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
14382 case IX86_BUILTIN_LOADUPD:
14383 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
14384 case IX86_BUILTIN_STOREUPD:
14385 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
14386
14387 case IX86_BUILTIN_MFENCE:
14388 emit_insn (gen_sse2_mfence ());
14389 return 0;
14390 case IX86_BUILTIN_LFENCE:
14391 emit_insn (gen_sse2_lfence ());
14392 return 0;
14393
14394 case IX86_BUILTIN_CLFLUSH:
14395 arg0 = TREE_VALUE (arglist);
14396 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14397 icode = CODE_FOR_sse2_clflush;
14398 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
14399 op0 = copy_to_mode_reg (Pmode, op0);
14400
14401 emit_insn (gen_sse2_clflush (op0));
14402 return 0;
14403
14404 case IX86_BUILTIN_MOVNTPD:
14405 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
14406 case IX86_BUILTIN_MOVNTDQ:
14407 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
14408 case IX86_BUILTIN_MOVNTI:
14409 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
14410
14411 case IX86_BUILTIN_LOADDQU:
14412 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
14413 case IX86_BUILTIN_STOREDQU:
14414 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
14415
14416 case IX86_BUILTIN_MONITOR:
14417 arg0 = TREE_VALUE (arglist);
14418 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14419 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14420 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14421 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14422 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14423 if (!REG_P (op0))
14424 op0 = copy_to_mode_reg (SImode, op0);
14425 if (!REG_P (op1))
14426 op1 = copy_to_mode_reg (SImode, op1);
14427 if (!REG_P (op2))
14428 op2 = copy_to_mode_reg (SImode, op2);
14429 emit_insn (gen_sse3_monitor (op0, op1, op2));
14430 return 0;
14431
14432 case IX86_BUILTIN_MWAIT:
14433 arg0 = TREE_VALUE (arglist);
14434 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14435 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14436 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14437 if (!REG_P (op0))
14438 op0 = copy_to_mode_reg (SImode, op0);
14439 if (!REG_P (op1))
14440 op1 = copy_to_mode_reg (SImode, op1);
14441 emit_insn (gen_sse3_mwait (op0, op1));
14442 return 0;
14443
14444 case IX86_BUILTIN_LDDQU:
14445 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
14446 target, 1);
14447
14448 case IX86_BUILTIN_VEC_INIT_V2SI:
14449 case IX86_BUILTIN_VEC_INIT_V4HI:
14450 case IX86_BUILTIN_VEC_INIT_V8QI:
14451 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
14452
14453 case IX86_BUILTIN_VEC_EXT_V2DF:
14454 case IX86_BUILTIN_VEC_EXT_V2DI:
14455 case IX86_BUILTIN_VEC_EXT_V4SF:
14456 case IX86_BUILTIN_VEC_EXT_V4SI:
14457 case IX86_BUILTIN_VEC_EXT_V8HI:
14458 case IX86_BUILTIN_VEC_EXT_V4HI:
14459 return ix86_expand_vec_ext_builtin (arglist, target);
14460
14461 case IX86_BUILTIN_VEC_SET_V8HI:
14462 case IX86_BUILTIN_VEC_SET_V4HI:
14463 return ix86_expand_vec_set_builtin (arglist);
14464
14465 default:
14466 break;
14467 }
14468
14469 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14470 if (d->code == fcode)
14471 {
14472 /* Compares are treated specially. */
14473 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14474 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
14475 || d->icode == CODE_FOR_sse2_maskcmpv2df3
14476 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14477 return ix86_expand_sse_compare (d, arglist, target);
14478
14479 return ix86_expand_binop_builtin (d->icode, arglist, target);
14480 }
14481
14482 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
14483 if (d->code == fcode)
14484 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
14485
14486 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14487 if (d->code == fcode)
14488 return ix86_expand_sse_comi (d, arglist, target);
14489
14490 gcc_unreachable ();
14491 }
14492
14493 /* Store OPERAND to the memory after reload is completed. This means
14494 that we can't easily use assign_stack_local. */
14495 rtx
14496 ix86_force_to_memory (enum machine_mode mode, rtx operand)
14497 {
14498 rtx result;
14499 if (!reload_completed)
14500 abort ();
14501 if (TARGET_RED_ZONE)
14502 {
14503 result = gen_rtx_MEM (mode,
14504 gen_rtx_PLUS (Pmode,
14505 stack_pointer_rtx,
14506 GEN_INT (-RED_ZONE_SIZE)));
14507 emit_move_insn (result, operand);
14508 }
14509 else if (!TARGET_RED_ZONE && TARGET_64BIT)
14510 {
14511 switch (mode)
14512 {
14513 case HImode:
14514 case SImode:
14515 operand = gen_lowpart (DImode, operand);
14516 /* FALLTHRU */
14517 case DImode:
14518 emit_insn (
14519 gen_rtx_SET (VOIDmode,
14520 gen_rtx_MEM (DImode,
14521 gen_rtx_PRE_DEC (DImode,
14522 stack_pointer_rtx)),
14523 operand));
14524 break;
14525 default:
14526 abort ();
14527 }
14528 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14529 }
14530 else
14531 {
14532 switch (mode)
14533 {
14534 case DImode:
14535 {
14536 rtx operands[2];
14537 split_di (&operand, 1, operands, operands + 1);
14538 emit_insn (
14539 gen_rtx_SET (VOIDmode,
14540 gen_rtx_MEM (SImode,
14541 gen_rtx_PRE_DEC (Pmode,
14542 stack_pointer_rtx)),
14543 operands[1]));
14544 emit_insn (
14545 gen_rtx_SET (VOIDmode,
14546 gen_rtx_MEM (SImode,
14547 gen_rtx_PRE_DEC (Pmode,
14548 stack_pointer_rtx)),
14549 operands[0]));
14550 }
14551 break;
14552 case HImode:
14553 /* It is better to store HImodes as SImodes. */
14554 if (!TARGET_PARTIAL_REG_STALL)
14555 operand = gen_lowpart (SImode, operand);
14556 /* FALLTHRU */
14557 case SImode:
14558 emit_insn (
14559 gen_rtx_SET (VOIDmode,
14560 gen_rtx_MEM (GET_MODE (operand),
14561 gen_rtx_PRE_DEC (SImode,
14562 stack_pointer_rtx)),
14563 operand));
14564 break;
14565 default:
14566 abort ();
14567 }
14568 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14569 }
14570 return result;
14571 }
14572
14573 /* Free operand from the memory. */
14574 void
14575 ix86_free_from_memory (enum machine_mode mode)
14576 {
14577 if (!TARGET_RED_ZONE)
14578 {
14579 int size;
14580
14581 if (mode == DImode || TARGET_64BIT)
14582 size = 8;
14583 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
14584 size = 2;
14585 else
14586 size = 4;
14587 /* Use LEA to deallocate stack space. In peephole2 it will be converted
14588 to pop or add instruction if registers are available. */
14589 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14590 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
14591 GEN_INT (size))));
14592 }
14593 }
14594
14595 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
14596 QImode must go into class Q_REGS.
14597 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
14598 movdf to do mem-to-mem moves through integer regs. */
14599 enum reg_class
14600 ix86_preferred_reload_class (rtx x, enum reg_class class)
14601 {
14602 if (class == NO_REGS)
14603 return NO_REGS;
14604 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
14605 return NO_REGS;
14606 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
14607 {
14608 /* SSE can't load any constant directly yet. */
14609 if (SSE_CLASS_P (class))
14610 return NO_REGS;
14611 /* Floats can load 0 and 1. */
14612 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
14613 {
14614 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
14615 if (MAYBE_SSE_CLASS_P (class))
14616 return (reg_class_subset_p (class, GENERAL_REGS)
14617 ? GENERAL_REGS : FLOAT_REGS);
14618 else
14619 return class;
14620 }
14621 /* General regs can load everything. */
14622 if (reg_class_subset_p (class, GENERAL_REGS))
14623 return class;
14624 /* In case we haven't resolved FLOAT or SSE yet, give up. */
14625 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
14626 return NO_REGS;
14627 }
14628 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
14629 return NO_REGS;
14630 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
14631 return Q_REGS;
14632 return class;
14633 }
14634
14635 /* If we are copying between general and FP registers, we need a memory
14636 location. The same is true for SSE and MMX registers.
14637
14638 The macro can't work reliably when one of the CLASSES is class containing
14639 registers from multiple units (SSE, MMX, integer). We avoid this by never
14640 combining those units in single alternative in the machine description.
14641 Ensure that this constraint holds to avoid unexpected surprises.
14642
14643 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
14644 enforce these sanity checks. */
14645
14646 int
14647 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
14648 enum machine_mode mode, int strict)
14649 {
14650 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
14651 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
14652 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
14653 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
14654 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
14655 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
14656 {
14657 if (strict)
14658 abort ();
14659 return true;
14660 }
14661
14662 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
14663 return true;
14664
14665 /* ??? This is a lie. We do have moves between mmx/general, and for
14666 mmx/sse2. But by saying we need secondary memory we discourage the
14667 register allocator from using the mmx registers unless needed. */
14668 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
14669 return true;
14670
14671 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
14672 {
14673 /* SSE1 doesn't have any direct moves from other classes. */
14674 if (!TARGET_SSE2)
14675 return true;
14676
14677 /* If the target says that inter-unit moves are more expensive
14678 than moving through memory, then don't generate them. */
14679 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
14680 return true;
14681
14682 /* Between SSE and general, we have moves no larger than word size. */
14683 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
14684 return true;
14685
14686 /* ??? For the cost of one register reformat penalty, we could use
14687 the same instructions to move SFmode and DFmode data, but the
14688 relevant move patterns don't support those alternatives. */
14689 if (mode == SFmode || mode == DFmode)
14690 return true;
14691 }
14692
14693 return false;
14694 }
14695
14696 /* Return the cost of moving data from a register in class CLASS1 to
14697 one in class CLASS2.
14698
14699 It is not required that the cost always equal 2 when FROM is the same as TO;
14700 on some machines it is expensive to move between registers if they are not
14701 general registers. */
14702
14703 int
14704 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
14705 enum reg_class class2)
14706 {
14707 /* In case we require secondary memory, compute cost of the store followed
14708 by load. In order to avoid bad register allocation choices, we need
14709 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
14710
14711 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
14712 {
14713 int cost = 1;
14714
14715 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
14716 MEMORY_MOVE_COST (mode, class1, 1));
14717 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
14718 MEMORY_MOVE_COST (mode, class2, 1));
14719
14720 /* In case of copying from general_purpose_register we may emit multiple
14721 stores followed by single load causing memory size mismatch stall.
14722 Count this as arbitrarily high cost of 20. */
14723 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
14724 cost += 20;
14725
14726 /* In the case of FP/MMX moves, the registers actually overlap, and we
14727 have to switch modes in order to treat them differently. */
14728 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
14729 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
14730 cost += 20;
14731
14732 return cost;
14733 }
14734
14735 /* Moves between SSE/MMX and integer unit are expensive. */
14736 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
14737 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
14738 return ix86_cost->mmxsse_to_integer;
14739 if (MAYBE_FLOAT_CLASS_P (class1))
14740 return ix86_cost->fp_move;
14741 if (MAYBE_SSE_CLASS_P (class1))
14742 return ix86_cost->sse_move;
14743 if (MAYBE_MMX_CLASS_P (class1))
14744 return ix86_cost->mmx_move;
14745 return 2;
14746 }
14747
14748 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
14749 int
14750 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
14751 {
14752 /* Flags and only flags can only hold CCmode values. */
14753 if (CC_REGNO_P (regno))
14754 return GET_MODE_CLASS (mode) == MODE_CC;
14755 if (GET_MODE_CLASS (mode) == MODE_CC
14756 || GET_MODE_CLASS (mode) == MODE_RANDOM
14757 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
14758 return 0;
14759 if (FP_REGNO_P (regno))
14760 return VALID_FP_MODE_P (mode);
14761 if (SSE_REGNO_P (regno))
14762 {
14763 /* We implement the move patterns for all vector modes into and
14764 out of SSE registers, even when no operation instructions
14765 are available. */
14766 return (VALID_SSE_REG_MODE (mode)
14767 || VALID_SSE2_REG_MODE (mode)
14768 || VALID_MMX_REG_MODE (mode)
14769 || VALID_MMX_REG_MODE_3DNOW (mode));
14770 }
14771 if (MMX_REGNO_P (regno))
14772 {
14773 /* We implement the move patterns for 3DNOW modes even in MMX mode,
14774 so if the register is available at all, then we can move data of
14775 the given mode into or out of it. */
14776 return (VALID_MMX_REG_MODE (mode)
14777 || VALID_MMX_REG_MODE_3DNOW (mode));
14778 }
14779 /* We handle both integer and floats in the general purpose registers.
14780 In future we should be able to handle vector modes as well. */
14781 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
14782 return 0;
14783 /* Take care for QImode values - they can be in non-QI regs, but then
14784 they do cause partial register stalls. */
14785 if (regno < 4 || mode != QImode || TARGET_64BIT)
14786 return 1;
14787 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
14788 }
14789
14790 /* Return the cost of moving data of mode M between a
14791 register and memory. A value of 2 is the default; this cost is
14792 relative to those in `REGISTER_MOVE_COST'.
14793
14794 If moving between registers and memory is more expensive than
14795 between two registers, you should define this macro to express the
14796 relative cost.
14797
14798 Model also increased moving costs of QImode registers in non
14799 Q_REGS classes.
14800 */
14801 int
14802 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
14803 {
14804 if (FLOAT_CLASS_P (class))
14805 {
14806 int index;
14807 switch (mode)
14808 {
14809 case SFmode:
14810 index = 0;
14811 break;
14812 case DFmode:
14813 index = 1;
14814 break;
14815 case XFmode:
14816 index = 2;
14817 break;
14818 default:
14819 return 100;
14820 }
14821 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
14822 }
14823 if (SSE_CLASS_P (class))
14824 {
14825 int index;
14826 switch (GET_MODE_SIZE (mode))
14827 {
14828 case 4:
14829 index = 0;
14830 break;
14831 case 8:
14832 index = 1;
14833 break;
14834 case 16:
14835 index = 2;
14836 break;
14837 default:
14838 return 100;
14839 }
14840 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
14841 }
14842 if (MMX_CLASS_P (class))
14843 {
14844 int index;
14845 switch (GET_MODE_SIZE (mode))
14846 {
14847 case 4:
14848 index = 0;
14849 break;
14850 case 8:
14851 index = 1;
14852 break;
14853 default:
14854 return 100;
14855 }
14856 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14857 }
14858 switch (GET_MODE_SIZE (mode))
14859 {
14860 case 1:
14861 if (in)
14862 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14863 : ix86_cost->movzbl_load);
14864 else
14865 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14866 : ix86_cost->int_store[0] + 4);
14867 break;
14868 case 2:
14869 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14870 default:
14871 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14872 if (mode == TFmode)
14873 mode = XFmode;
14874 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14875 * (((int) GET_MODE_SIZE (mode)
14876 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14877 }
14878 }
14879
14880 /* Compute a (partial) cost for rtx X. Return true if the complete
14881 cost has been computed, and false if subexpressions should be
14882 scanned. In either case, *TOTAL contains the cost result. */
14883
14884 static bool
14885 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14886 {
14887 enum machine_mode mode = GET_MODE (x);
14888
14889 switch (code)
14890 {
14891 case CONST_INT:
14892 case CONST:
14893 case LABEL_REF:
14894 case SYMBOL_REF:
14895 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
14896 *total = 3;
14897 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
14898 *total = 2;
14899 else if (flag_pic && SYMBOLIC_CONST (x)
14900 && (!TARGET_64BIT
14901 || (!GET_CODE (x) != LABEL_REF
14902 && (GET_CODE (x) != SYMBOL_REF
14903 || !SYMBOL_REF_LOCAL_P (x)))))
14904 *total = 1;
14905 else
14906 *total = 0;
14907 return true;
14908
14909 case CONST_DOUBLE:
14910 if (mode == VOIDmode)
14911 *total = 0;
14912 else
14913 switch (standard_80387_constant_p (x))
14914 {
14915 case 1: /* 0.0 */
14916 *total = 1;
14917 break;
14918 default: /* Other constants */
14919 *total = 2;
14920 break;
14921 case 0:
14922 case -1:
14923 /* Start with (MEM (SYMBOL_REF)), since that's where
14924 it'll probably end up. Add a penalty for size. */
14925 *total = (COSTS_N_INSNS (1)
14926 + (flag_pic != 0 && !TARGET_64BIT)
14927 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14928 break;
14929 }
14930 return true;
14931
14932 case ZERO_EXTEND:
14933 /* The zero extensions is often completely free on x86_64, so make
14934 it as cheap as possible. */
14935 if (TARGET_64BIT && mode == DImode
14936 && GET_MODE (XEXP (x, 0)) == SImode)
14937 *total = 1;
14938 else if (TARGET_ZERO_EXTEND_WITH_AND)
14939 *total = COSTS_N_INSNS (ix86_cost->add);
14940 else
14941 *total = COSTS_N_INSNS (ix86_cost->movzx);
14942 return false;
14943
14944 case SIGN_EXTEND:
14945 *total = COSTS_N_INSNS (ix86_cost->movsx);
14946 return false;
14947
14948 case ASHIFT:
14949 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14950 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14951 {
14952 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14953 if (value == 1)
14954 {
14955 *total = COSTS_N_INSNS (ix86_cost->add);
14956 return false;
14957 }
14958 if ((value == 2 || value == 3)
14959 && ix86_cost->lea <= ix86_cost->shift_const)
14960 {
14961 *total = COSTS_N_INSNS (ix86_cost->lea);
14962 return false;
14963 }
14964 }
14965 /* FALLTHRU */
14966
14967 case ROTATE:
14968 case ASHIFTRT:
14969 case LSHIFTRT:
14970 case ROTATERT:
14971 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14972 {
14973 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14974 {
14975 if (INTVAL (XEXP (x, 1)) > 32)
14976 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14977 else
14978 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14979 }
14980 else
14981 {
14982 if (GET_CODE (XEXP (x, 1)) == AND)
14983 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14984 else
14985 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
14986 }
14987 }
14988 else
14989 {
14990 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14991 *total = COSTS_N_INSNS (ix86_cost->shift_const);
14992 else
14993 *total = COSTS_N_INSNS (ix86_cost->shift_var);
14994 }
14995 return false;
14996
14997 case MULT:
14998 if (FLOAT_MODE_P (mode))
14999 {
15000 *total = COSTS_N_INSNS (ix86_cost->fmul);
15001 return false;
15002 }
15003 else
15004 {
15005 rtx op0 = XEXP (x, 0);
15006 rtx op1 = XEXP (x, 1);
15007 int nbits;
15008 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15009 {
15010 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
15011 for (nbits = 0; value != 0; value &= value - 1)
15012 nbits++;
15013 }
15014 else
15015 /* This is arbitrary. */
15016 nbits = 7;
15017
15018 /* Compute costs correctly for widening multiplication. */
15019 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
15020 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
15021 == GET_MODE_SIZE (mode))
15022 {
15023 int is_mulwiden = 0;
15024 enum machine_mode inner_mode = GET_MODE (op0);
15025
15026 if (GET_CODE (op0) == GET_CODE (op1))
15027 is_mulwiden = 1, op1 = XEXP (op1, 0);
15028 else if (GET_CODE (op1) == CONST_INT)
15029 {
15030 if (GET_CODE (op0) == SIGN_EXTEND)
15031 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
15032 == INTVAL (op1);
15033 else
15034 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
15035 }
15036
15037 if (is_mulwiden)
15038 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
15039 }
15040
15041 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15042 + nbits * ix86_cost->mult_bit)
15043 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
15044
15045 return true;
15046 }
15047
15048 case DIV:
15049 case UDIV:
15050 case MOD:
15051 case UMOD:
15052 if (FLOAT_MODE_P (mode))
15053 *total = COSTS_N_INSNS (ix86_cost->fdiv);
15054 else
15055 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
15056 return false;
15057
15058 case PLUS:
15059 if (FLOAT_MODE_P (mode))
15060 *total = COSTS_N_INSNS (ix86_cost->fadd);
15061 else if (GET_MODE_CLASS (mode) == MODE_INT
15062 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
15063 {
15064 if (GET_CODE (XEXP (x, 0)) == PLUS
15065 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
15066 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
15067 && CONSTANT_P (XEXP (x, 1)))
15068 {
15069 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
15070 if (val == 2 || val == 4 || val == 8)
15071 {
15072 *total = COSTS_N_INSNS (ix86_cost->lea);
15073 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15074 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
15075 outer_code);
15076 *total += rtx_cost (XEXP (x, 1), outer_code);
15077 return true;
15078 }
15079 }
15080 else if (GET_CODE (XEXP (x, 0)) == MULT
15081 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
15082 {
15083 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
15084 if (val == 2 || val == 4 || val == 8)
15085 {
15086 *total = COSTS_N_INSNS (ix86_cost->lea);
15087 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15088 *total += rtx_cost (XEXP (x, 1), outer_code);
15089 return true;
15090 }
15091 }
15092 else if (GET_CODE (XEXP (x, 0)) == PLUS)
15093 {
15094 *total = COSTS_N_INSNS (ix86_cost->lea);
15095 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15096 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15097 *total += rtx_cost (XEXP (x, 1), outer_code);
15098 return true;
15099 }
15100 }
15101 /* FALLTHRU */
15102
15103 case MINUS:
15104 if (FLOAT_MODE_P (mode))
15105 {
15106 *total = COSTS_N_INSNS (ix86_cost->fadd);
15107 return false;
15108 }
15109 /* FALLTHRU */
15110
15111 case AND:
15112 case IOR:
15113 case XOR:
15114 if (!TARGET_64BIT && mode == DImode)
15115 {
15116 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
15117 + (rtx_cost (XEXP (x, 0), outer_code)
15118 << (GET_MODE (XEXP (x, 0)) != DImode))
15119 + (rtx_cost (XEXP (x, 1), outer_code)
15120 << (GET_MODE (XEXP (x, 1)) != DImode)));
15121 return true;
15122 }
15123 /* FALLTHRU */
15124
15125 case NEG:
15126 if (FLOAT_MODE_P (mode))
15127 {
15128 *total = COSTS_N_INSNS (ix86_cost->fchs);
15129 return false;
15130 }
15131 /* FALLTHRU */
15132
15133 case NOT:
15134 if (!TARGET_64BIT && mode == DImode)
15135 *total = COSTS_N_INSNS (ix86_cost->add * 2);
15136 else
15137 *total = COSTS_N_INSNS (ix86_cost->add);
15138 return false;
15139
15140 case COMPARE:
15141 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
15142 && XEXP (XEXP (x, 0), 1) == const1_rtx
15143 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
15144 && XEXP (x, 1) == const0_rtx)
15145 {
15146 /* This kind of construct is implemented using test[bwl].
15147 Treat it as if we had an AND. */
15148 *total = (COSTS_N_INSNS (ix86_cost->add)
15149 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
15150 + rtx_cost (const1_rtx, outer_code));
15151 return true;
15152 }
15153 return false;
15154
15155 case FLOAT_EXTEND:
15156 if (!TARGET_SSE_MATH
15157 || mode == XFmode
15158 || (mode == DFmode && !TARGET_SSE2))
15159 *total = 0;
15160 return false;
15161
15162 case ABS:
15163 if (FLOAT_MODE_P (mode))
15164 *total = COSTS_N_INSNS (ix86_cost->fabs);
15165 return false;
15166
15167 case SQRT:
15168 if (FLOAT_MODE_P (mode))
15169 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
15170 return false;
15171
15172 case UNSPEC:
15173 if (XINT (x, 1) == UNSPEC_TP)
15174 *total = 0;
15175 return false;
15176
15177 default:
15178 return false;
15179 }
15180 }
15181
15182 #if TARGET_MACHO
15183
15184 static int current_machopic_label_num;
15185
15186 /* Given a symbol name and its associated stub, write out the
15187 definition of the stub. */
15188
15189 void
15190 machopic_output_stub (FILE *file, const char *symb, const char *stub)
15191 {
15192 unsigned int length;
15193 char *binder_name, *symbol_name, lazy_ptr_name[32];
15194 int label = ++current_machopic_label_num;
15195
15196 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
15197 symb = (*targetm.strip_name_encoding) (symb);
15198
15199 length = strlen (stub);
15200 binder_name = alloca (length + 32);
15201 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
15202
15203 length = strlen (symb);
15204 symbol_name = alloca (length + 32);
15205 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
15206
15207 sprintf (lazy_ptr_name, "L%d$lz", label);
15208
15209 if (MACHOPIC_PURE)
15210 machopic_picsymbol_stub_section ();
15211 else
15212 machopic_symbol_stub_section ();
15213
15214 fprintf (file, "%s:\n", stub);
15215 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15216
15217 if (MACHOPIC_PURE)
15218 {
15219 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
15220 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
15221 fprintf (file, "\tjmp %%edx\n");
15222 }
15223 else
15224 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
15225
15226 fprintf (file, "%s:\n", binder_name);
15227
15228 if (MACHOPIC_PURE)
15229 {
15230 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
15231 fprintf (file, "\tpushl %%eax\n");
15232 }
15233 else
15234 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
15235
15236 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
15237
15238 machopic_lazy_symbol_ptr_section ();
15239 fprintf (file, "%s:\n", lazy_ptr_name);
15240 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15241 fprintf (file, "\t.long %s\n", binder_name);
15242 }
15243 #endif /* TARGET_MACHO */
15244
15245 /* Order the registers for register allocator. */
15246
15247 void
15248 x86_order_regs_for_local_alloc (void)
15249 {
15250 int pos = 0;
15251 int i;
15252
15253 /* First allocate the local general purpose registers. */
15254 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15255 if (GENERAL_REGNO_P (i) && call_used_regs[i])
15256 reg_alloc_order [pos++] = i;
15257
15258 /* Global general purpose registers. */
15259 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15260 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
15261 reg_alloc_order [pos++] = i;
15262
15263 /* x87 registers come first in case we are doing FP math
15264 using them. */
15265 if (!TARGET_SSE_MATH)
15266 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15267 reg_alloc_order [pos++] = i;
15268
15269 /* SSE registers. */
15270 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
15271 reg_alloc_order [pos++] = i;
15272 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
15273 reg_alloc_order [pos++] = i;
15274
15275 /* x87 registers. */
15276 if (TARGET_SSE_MATH)
15277 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15278 reg_alloc_order [pos++] = i;
15279
15280 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
15281 reg_alloc_order [pos++] = i;
15282
15283 /* Initialize the rest of array as we do not allocate some registers
15284 at all. */
15285 while (pos < FIRST_PSEUDO_REGISTER)
15286 reg_alloc_order [pos++] = 0;
15287 }
15288
15289 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
15290 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
15291 #endif
15292
15293 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
15294 struct attribute_spec.handler. */
15295 static tree
15296 ix86_handle_struct_attribute (tree *node, tree name,
15297 tree args ATTRIBUTE_UNUSED,
15298 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
15299 {
15300 tree *type = NULL;
15301 if (DECL_P (*node))
15302 {
15303 if (TREE_CODE (*node) == TYPE_DECL)
15304 type = &TREE_TYPE (*node);
15305 }
15306 else
15307 type = node;
15308
15309 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
15310 || TREE_CODE (*type) == UNION_TYPE)))
15311 {
15312 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
15313 *no_add_attrs = true;
15314 }
15315
15316 else if ((is_attribute_p ("ms_struct", name)
15317 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
15318 || ((is_attribute_p ("gcc_struct", name)
15319 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
15320 {
15321 warning ("%qs incompatible attribute ignored",
15322 IDENTIFIER_POINTER (name));
15323 *no_add_attrs = true;
15324 }
15325
15326 return NULL_TREE;
15327 }
15328
15329 static bool
15330 ix86_ms_bitfield_layout_p (tree record_type)
15331 {
15332 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
15333 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
15334 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
15335 }
15336
15337 /* Returns an expression indicating where the this parameter is
15338 located on entry to the FUNCTION. */
15339
15340 static rtx
15341 x86_this_parameter (tree function)
15342 {
15343 tree type = TREE_TYPE (function);
15344
15345 if (TARGET_64BIT)
15346 {
15347 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
15348 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
15349 }
15350
15351 if (ix86_function_regparm (type, function) > 0)
15352 {
15353 tree parm;
15354
15355 parm = TYPE_ARG_TYPES (type);
15356 /* Figure out whether or not the function has a variable number of
15357 arguments. */
15358 for (; parm; parm = TREE_CHAIN (parm))
15359 if (TREE_VALUE (parm) == void_type_node)
15360 break;
15361 /* If not, the this parameter is in the first argument. */
15362 if (parm)
15363 {
15364 int regno = 0;
15365 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
15366 regno = 2;
15367 return gen_rtx_REG (SImode, regno);
15368 }
15369 }
15370
15371 if (aggregate_value_p (TREE_TYPE (type), type))
15372 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
15373 else
15374 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
15375 }
15376
15377 /* Determine whether x86_output_mi_thunk can succeed. */
15378
15379 static bool
15380 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
15381 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
15382 HOST_WIDE_INT vcall_offset, tree function)
15383 {
15384 /* 64-bit can handle anything. */
15385 if (TARGET_64BIT)
15386 return true;
15387
15388 /* For 32-bit, everything's fine if we have one free register. */
15389 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
15390 return true;
15391
15392 /* Need a free register for vcall_offset. */
15393 if (vcall_offset)
15394 return false;
15395
15396 /* Need a free register for GOT references. */
15397 if (flag_pic && !(*targetm.binds_local_p) (function))
15398 return false;
15399
15400 /* Otherwise ok. */
15401 return true;
15402 }
15403
15404 /* Output the assembler code for a thunk function. THUNK_DECL is the
15405 declaration for the thunk function itself, FUNCTION is the decl for
15406 the target function. DELTA is an immediate constant offset to be
15407 added to THIS. If VCALL_OFFSET is nonzero, the word at
15408 *(*this + vcall_offset) should be added to THIS. */
15409
15410 static void
15411 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
15412 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
15413 HOST_WIDE_INT vcall_offset, tree function)
15414 {
15415 rtx xops[3];
15416 rtx this = x86_this_parameter (function);
15417 rtx this_reg, tmp;
15418
15419 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
15420 pull it in now and let DELTA benefit. */
15421 if (REG_P (this))
15422 this_reg = this;
15423 else if (vcall_offset)
15424 {
15425 /* Put the this parameter into %eax. */
15426 xops[0] = this;
15427 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
15428 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15429 }
15430 else
15431 this_reg = NULL_RTX;
15432
15433 /* Adjust the this parameter by a fixed constant. */
15434 if (delta)
15435 {
15436 xops[0] = GEN_INT (delta);
15437 xops[1] = this_reg ? this_reg : this;
15438 if (TARGET_64BIT)
15439 {
15440 if (!x86_64_general_operand (xops[0], DImode))
15441 {
15442 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15443 xops[1] = tmp;
15444 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
15445 xops[0] = tmp;
15446 xops[1] = this;
15447 }
15448 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15449 }
15450 else
15451 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15452 }
15453
15454 /* Adjust the this parameter by a value stored in the vtable. */
15455 if (vcall_offset)
15456 {
15457 if (TARGET_64BIT)
15458 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15459 else
15460 {
15461 int tmp_regno = 2 /* ECX */;
15462 if (lookup_attribute ("fastcall",
15463 TYPE_ATTRIBUTES (TREE_TYPE (function))))
15464 tmp_regno = 0 /* EAX */;
15465 tmp = gen_rtx_REG (SImode, tmp_regno);
15466 }
15467
15468 xops[0] = gen_rtx_MEM (Pmode, this_reg);
15469 xops[1] = tmp;
15470 if (TARGET_64BIT)
15471 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15472 else
15473 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15474
15475 /* Adjust the this parameter. */
15476 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
15477 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
15478 {
15479 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
15480 xops[0] = GEN_INT (vcall_offset);
15481 xops[1] = tmp2;
15482 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15483 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
15484 }
15485 xops[1] = this_reg;
15486 if (TARGET_64BIT)
15487 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15488 else
15489 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15490 }
15491
15492 /* If necessary, drop THIS back to its stack slot. */
15493 if (this_reg && this_reg != this)
15494 {
15495 xops[0] = this_reg;
15496 xops[1] = this;
15497 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15498 }
15499
15500 xops[0] = XEXP (DECL_RTL (function), 0);
15501 if (TARGET_64BIT)
15502 {
15503 if (!flag_pic || (*targetm.binds_local_p) (function))
15504 output_asm_insn ("jmp\t%P0", xops);
15505 else
15506 {
15507 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
15508 tmp = gen_rtx_CONST (Pmode, tmp);
15509 tmp = gen_rtx_MEM (QImode, tmp);
15510 xops[0] = tmp;
15511 output_asm_insn ("jmp\t%A0", xops);
15512 }
15513 }
15514 else
15515 {
15516 if (!flag_pic || (*targetm.binds_local_p) (function))
15517 output_asm_insn ("jmp\t%P0", xops);
15518 else
15519 #if TARGET_MACHO
15520 if (TARGET_MACHO)
15521 {
15522 rtx sym_ref = XEXP (DECL_RTL (function), 0);
15523 tmp = (gen_rtx_SYMBOL_REF
15524 (Pmode,
15525 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
15526 tmp = gen_rtx_MEM (QImode, tmp);
15527 xops[0] = tmp;
15528 output_asm_insn ("jmp\t%0", xops);
15529 }
15530 else
15531 #endif /* TARGET_MACHO */
15532 {
15533 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
15534 output_set_got (tmp);
15535
15536 xops[1] = tmp;
15537 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
15538 output_asm_insn ("jmp\t{*}%1", xops);
15539 }
15540 }
15541 }
15542
15543 static void
15544 x86_file_start (void)
15545 {
15546 default_file_start ();
15547 if (X86_FILE_START_VERSION_DIRECTIVE)
15548 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
15549 if (X86_FILE_START_FLTUSED)
15550 fputs ("\t.global\t__fltused\n", asm_out_file);
15551 if (ix86_asm_dialect == ASM_INTEL)
15552 fputs ("\t.intel_syntax\n", asm_out_file);
15553 }
15554
15555 int
15556 x86_field_alignment (tree field, int computed)
15557 {
15558 enum machine_mode mode;
15559 tree type = TREE_TYPE (field);
15560
15561 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
15562 return computed;
15563 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
15564 ? get_inner_array_type (type) : type);
15565 if (mode == DFmode || mode == DCmode
15566 || GET_MODE_CLASS (mode) == MODE_INT
15567 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
15568 return MIN (32, computed);
15569 return computed;
15570 }
15571
15572 /* Output assembler code to FILE to increment profiler label # LABELNO
15573 for profiling a function entry. */
15574 void
15575 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
15576 {
15577 if (TARGET_64BIT)
15578 if (flag_pic)
15579 {
15580 #ifndef NO_PROFILE_COUNTERS
15581 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
15582 #endif
15583 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
15584 }
15585 else
15586 {
15587 #ifndef NO_PROFILE_COUNTERS
15588 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
15589 #endif
15590 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15591 }
15592 else if (flag_pic)
15593 {
15594 #ifndef NO_PROFILE_COUNTERS
15595 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
15596 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
15597 #endif
15598 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
15599 }
15600 else
15601 {
15602 #ifndef NO_PROFILE_COUNTERS
15603 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
15604 PROFILE_COUNT_REGISTER);
15605 #endif
15606 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15607 }
15608 }
15609
15610 /* We don't have exact information about the insn sizes, but we may assume
15611 quite safely that we are informed about all 1 byte insns and memory
15612 address sizes. This is enough to eliminate unnecessary padding in
15613 99% of cases. */
15614
15615 static int
15616 min_insn_size (rtx insn)
15617 {
15618 int l = 0;
15619
15620 if (!INSN_P (insn) || !active_insn_p (insn))
15621 return 0;
15622
15623 /* Discard alignments we've emit and jump instructions. */
15624 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
15625 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
15626 return 0;
15627 if (GET_CODE (insn) == JUMP_INSN
15628 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
15629 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
15630 return 0;
15631
15632 /* Important case - calls are always 5 bytes.
15633 It is common to have many calls in the row. */
15634 if (GET_CODE (insn) == CALL_INSN
15635 && symbolic_reference_mentioned_p (PATTERN (insn))
15636 && !SIBLING_CALL_P (insn))
15637 return 5;
15638 if (get_attr_length (insn) <= 1)
15639 return 1;
15640
15641 /* For normal instructions we may rely on the sizes of addresses
15642 and the presence of symbol to require 4 bytes of encoding.
15643 This is not the case for jumps where references are PC relative. */
15644 if (GET_CODE (insn) != JUMP_INSN)
15645 {
15646 l = get_attr_length_address (insn);
15647 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
15648 l = 4;
15649 }
15650 if (l)
15651 return 1+l;
15652 else
15653 return 2;
15654 }
15655
15656 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
15657 window. */
15658
15659 static void
15660 ix86_avoid_jump_misspredicts (void)
15661 {
15662 rtx insn, start = get_insns ();
15663 int nbytes = 0, njumps = 0;
15664 int isjump = 0;
15665
15666 /* Look for all minimal intervals of instructions containing 4 jumps.
15667 The intervals are bounded by START and INSN. NBYTES is the total
15668 size of instructions in the interval including INSN and not including
15669 START. When the NBYTES is smaller than 16 bytes, it is possible
15670 that the end of START and INSN ends up in the same 16byte page.
15671
15672 The smallest offset in the page INSN can start is the case where START
15673 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
15674 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
15675 */
15676 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15677 {
15678
15679 nbytes += min_insn_size (insn);
15680 if (dump_file)
15681 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
15682 INSN_UID (insn), min_insn_size (insn));
15683 if ((GET_CODE (insn) == JUMP_INSN
15684 && GET_CODE (PATTERN (insn)) != ADDR_VEC
15685 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
15686 || GET_CODE (insn) == CALL_INSN)
15687 njumps++;
15688 else
15689 continue;
15690
15691 while (njumps > 3)
15692 {
15693 start = NEXT_INSN (start);
15694 if ((GET_CODE (start) == JUMP_INSN
15695 && GET_CODE (PATTERN (start)) != ADDR_VEC
15696 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
15697 || GET_CODE (start) == CALL_INSN)
15698 njumps--, isjump = 1;
15699 else
15700 isjump = 0;
15701 nbytes -= min_insn_size (start);
15702 }
15703 if (njumps < 0)
15704 abort ();
15705 if (dump_file)
15706 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
15707 INSN_UID (start), INSN_UID (insn), nbytes);
15708
15709 if (njumps == 3 && isjump && nbytes < 16)
15710 {
15711 int padsize = 15 - nbytes + min_insn_size (insn);
15712
15713 if (dump_file)
15714 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
15715 INSN_UID (insn), padsize);
15716 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
15717 }
15718 }
15719 }
15720
15721 /* AMD Athlon works faster
15722 when RET is not destination of conditional jump or directly preceded
15723 by other jump instruction. We avoid the penalty by inserting NOP just
15724 before the RET instructions in such cases. */
15725 static void
15726 ix86_pad_returns (void)
15727 {
15728 edge e;
15729 edge_iterator ei;
15730
15731 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
15732 {
15733 basic_block bb = e->src;
15734 rtx ret = BB_END (bb);
15735 rtx prev;
15736 bool replace = false;
15737
15738 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
15739 || !maybe_hot_bb_p (bb))
15740 continue;
15741 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
15742 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
15743 break;
15744 if (prev && GET_CODE (prev) == CODE_LABEL)
15745 {
15746 edge e;
15747 edge_iterator ei;
15748
15749 FOR_EACH_EDGE (e, ei, bb->preds)
15750 if (EDGE_FREQUENCY (e) && e->src->index >= 0
15751 && !(e->flags & EDGE_FALLTHRU))
15752 replace = true;
15753 }
15754 if (!replace)
15755 {
15756 prev = prev_active_insn (ret);
15757 if (prev
15758 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
15759 || GET_CODE (prev) == CALL_INSN))
15760 replace = true;
15761 /* Empty functions get branch mispredict even when the jump destination
15762 is not visible to us. */
15763 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
15764 replace = true;
15765 }
15766 if (replace)
15767 {
15768 emit_insn_before (gen_return_internal_long (), ret);
15769 delete_insn (ret);
15770 }
15771 }
15772 }
15773
15774 /* Implement machine specific optimizations. We implement padding of returns
15775 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
15776 static void
15777 ix86_reorg (void)
15778 {
15779 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
15780 ix86_pad_returns ();
15781 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
15782 ix86_avoid_jump_misspredicts ();
15783 }
15784
15785 /* Return nonzero when QImode register that must be represented via REX prefix
15786 is used. */
15787 bool
15788 x86_extended_QIreg_mentioned_p (rtx insn)
15789 {
15790 int i;
15791 extract_insn_cached (insn);
15792 for (i = 0; i < recog_data.n_operands; i++)
15793 if (REG_P (recog_data.operand[i])
15794 && REGNO (recog_data.operand[i]) >= 4)
15795 return true;
15796 return false;
15797 }
15798
15799 /* Return nonzero when P points to register encoded via REX prefix.
15800 Called via for_each_rtx. */
15801 static int
15802 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
15803 {
15804 unsigned int regno;
15805 if (!REG_P (*p))
15806 return 0;
15807 regno = REGNO (*p);
15808 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
15809 }
15810
15811 /* Return true when INSN mentions register that must be encoded using REX
15812 prefix. */
15813 bool
15814 x86_extended_reg_mentioned_p (rtx insn)
15815 {
15816 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
15817 }
15818
15819 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15820 optabs would emit if we didn't have TFmode patterns. */
15821
15822 void
15823 x86_emit_floatuns (rtx operands[2])
15824 {
15825 rtx neglab, donelab, i0, i1, f0, in, out;
15826 enum machine_mode mode, inmode;
15827
15828 inmode = GET_MODE (operands[1]);
15829 if (inmode != SImode
15830 && inmode != DImode)
15831 abort ();
15832
15833 out = operands[0];
15834 in = force_reg (inmode, operands[1]);
15835 mode = GET_MODE (out);
15836 neglab = gen_label_rtx ();
15837 donelab = gen_label_rtx ();
15838 i1 = gen_reg_rtx (Pmode);
15839 f0 = gen_reg_rtx (mode);
15840
15841 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
15842
15843 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
15844 emit_jump_insn (gen_jump (donelab));
15845 emit_barrier ();
15846
15847 emit_label (neglab);
15848
15849 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15850 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15851 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
15852 expand_float (f0, i0, 0);
15853 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
15854
15855 emit_label (donelab);
15856 }
15857 \f
15858 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15859 with all elements equal to VAR. Return true if successful. */
15860
15861 static bool
15862 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
15863 rtx target, rtx val)
15864 {
15865 enum machine_mode smode, wsmode, wvmode;
15866 rtx x;
15867
15868 switch (mode)
15869 {
15870 case V2SImode:
15871 case V2SFmode:
15872 if (!mmx_ok && !TARGET_SSE)
15873 return false;
15874 /* FALLTHRU */
15875
15876 case V2DFmode:
15877 case V2DImode:
15878 case V4SFmode:
15879 case V4SImode:
15880 val = force_reg (GET_MODE_INNER (mode), val);
15881 x = gen_rtx_VEC_DUPLICATE (mode, val);
15882 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15883 return true;
15884
15885 case V4HImode:
15886 if (!mmx_ok)
15887 return false;
15888 val = gen_lowpart (SImode, val);
15889 x = gen_rtx_TRUNCATE (HImode, val);
15890 x = gen_rtx_VEC_DUPLICATE (mode, x);
15891 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15892 return true;
15893
15894 case V8QImode:
15895 if (!mmx_ok)
15896 return false;
15897 smode = QImode;
15898 wsmode = HImode;
15899 wvmode = V4HImode;
15900 goto widen;
15901 case V8HImode:
15902 smode = HImode;
15903 wsmode = SImode;
15904 wvmode = V4SImode;
15905 goto widen;
15906 case V16QImode:
15907 smode = QImode;
15908 wsmode = HImode;
15909 wvmode = V8HImode;
15910 goto widen;
15911 widen:
15912 /* Replicate the value once into the next wider mode and recurse. */
15913 val = convert_modes (wsmode, smode, val, true);
15914 x = expand_simple_binop (wsmode, ASHIFT, val,
15915 GEN_INT (GET_MODE_BITSIZE (smode)),
15916 NULL_RTX, 1, OPTAB_LIB_WIDEN);
15917 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
15918
15919 x = gen_reg_rtx (wvmode);
15920 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
15921 gcc_unreachable ();
15922 emit_move_insn (target, gen_lowpart (mode, x));
15923 return true;
15924
15925 default:
15926 return false;
15927 }
15928 }
15929
15930 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15931 whose low element is VAR, and other elements are zero. Return true
15932 if successful. */
15933
15934 static bool
15935 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
15936 rtx target, rtx var)
15937 {
15938 enum machine_mode vsimode;
15939 rtx x;
15940
15941 switch (mode)
15942 {
15943 case V2SFmode:
15944 case V2SImode:
15945 if (!mmx_ok && !TARGET_SSE)
15946 return false;
15947 /* FALLTHRU */
15948
15949 case V2DFmode:
15950 case V2DImode:
15951 var = force_reg (GET_MODE_INNER (mode), var);
15952 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
15953 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15954 return true;
15955
15956 case V4SFmode:
15957 case V4SImode:
15958 var = force_reg (GET_MODE_INNER (mode), var);
15959 x = gen_rtx_VEC_DUPLICATE (mode, var);
15960 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
15961 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15962 return true;
15963
15964 case V8HImode:
15965 case V16QImode:
15966 vsimode = V4SImode;
15967 goto widen;
15968 case V4HImode:
15969 case V8QImode:
15970 if (!mmx_ok)
15971 return false;
15972 vsimode = V2SImode;
15973 goto widen;
15974 widen:
15975 /* Zero extend the variable element to SImode and recurse. */
15976 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
15977
15978 x = gen_reg_rtx (vsimode);
15979 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
15980 gcc_unreachable ();
15981
15982 emit_move_insn (target, gen_lowpart (mode, x));
15983 return true;
15984
15985 default:
15986 return false;
15987 }
15988 }
15989
15990 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15991 consisting of the values in VALS. It is known that all elements
15992 except ONE_VAR are constants. Return true if successful. */
15993
15994 static bool
15995 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
15996 rtx target, rtx vals, int one_var)
15997 {
15998 rtx var = XVECEXP (vals, 0, one_var);
15999 enum machine_mode wmode;
16000 rtx const_vec, x;
16001
16002 XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
16003 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
16004
16005 switch (mode)
16006 {
16007 case V2DFmode:
16008 case V2DImode:
16009 case V2SFmode:
16010 case V2SImode:
16011 /* For the two element vectors, it's just as easy to use
16012 the general case. */
16013 return false;
16014
16015 case V4SFmode:
16016 case V4SImode:
16017 case V8HImode:
16018 case V4HImode:
16019 break;
16020
16021 case V16QImode:
16022 wmode = V8HImode;
16023 goto widen;
16024 case V8QImode:
16025 wmode = V4HImode;
16026 goto widen;
16027 widen:
16028 /* There's no way to set one QImode entry easily. Combine
16029 the variable value with its adjacent constant value, and
16030 promote to an HImode set. */
16031 x = XVECEXP (vals, 0, one_var ^ 1);
16032 if (one_var & 1)
16033 {
16034 var = convert_modes (HImode, QImode, var, true);
16035 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
16036 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16037 x = GEN_INT (INTVAL (x) & 0xff);
16038 }
16039 else
16040 {
16041 var = convert_modes (HImode, QImode, var, true);
16042 x = gen_int_mode (INTVAL (x) << 8, HImode);
16043 }
16044 if (x != const0_rtx)
16045 var = expand_simple_binop (HImode, IOR, var, x, var,
16046 1, OPTAB_LIB_WIDEN);
16047
16048 x = gen_reg_rtx (wmode);
16049 emit_move_insn (x, gen_lowpart (wmode, const_vec));
16050 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
16051
16052 emit_move_insn (target, gen_lowpart (mode, x));
16053 return true;
16054
16055 default:
16056 return false;
16057 }
16058
16059 emit_move_insn (target, const_vec);
16060 ix86_expand_vector_set (mmx_ok, target, var, one_var);
16061 return true;
16062 }
16063
16064 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
16065 all values variable, and none identical. */
16066
16067 static void
16068 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
16069 rtx target, rtx vals)
16070 {
16071 enum machine_mode half_mode = GET_MODE_INNER (mode);
16072 rtx op0 = NULL, op1 = NULL;
16073 bool use_vec_concat = false;
16074
16075 switch (mode)
16076 {
16077 case V2SFmode:
16078 case V2SImode:
16079 if (!mmx_ok && !TARGET_SSE)
16080 break;
16081 /* FALLTHRU */
16082
16083 case V2DFmode:
16084 case V2DImode:
16085 /* For the two element vectors, we always implement VEC_CONCAT. */
16086 op0 = XVECEXP (vals, 0, 0);
16087 op1 = XVECEXP (vals, 0, 1);
16088 use_vec_concat = true;
16089 break;
16090
16091 case V4SFmode:
16092 half_mode = V2SFmode;
16093 goto half;
16094 case V4SImode:
16095 half_mode = V2SImode;
16096 goto half;
16097 half:
16098 {
16099 rtvec v;
16100
16101 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
16102 Recurse to load the two halves. */
16103
16104 op0 = gen_reg_rtx (half_mode);
16105 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
16106 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
16107
16108 op1 = gen_reg_rtx (half_mode);
16109 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
16110 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
16111
16112 use_vec_concat = true;
16113 }
16114 break;
16115
16116 case V8HImode:
16117 case V16QImode:
16118 case V4HImode:
16119 case V8QImode:
16120 break;
16121
16122 default:
16123 gcc_unreachable ();
16124 }
16125
16126 if (use_vec_concat)
16127 {
16128 if (!register_operand (op0, half_mode))
16129 op0 = force_reg (half_mode, op0);
16130 if (!register_operand (op1, half_mode))
16131 op1 = force_reg (half_mode, op1);
16132
16133 emit_insn (gen_rtx_SET (VOIDmode, target,
16134 gen_rtx_VEC_CONCAT (mode, op0, op1)));
16135 }
16136 else
16137 {
16138 int i, j, n_elts, n_words, n_elt_per_word;
16139 enum machine_mode inner_mode;
16140 rtx words[4], shift;
16141
16142 inner_mode = GET_MODE_INNER (mode);
16143 n_elts = GET_MODE_NUNITS (mode);
16144 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
16145 n_elt_per_word = n_elts / n_words;
16146 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
16147
16148 for (i = 0; i < n_words; ++i)
16149 {
16150 rtx word = NULL_RTX;
16151
16152 for (j = 0; j < n_elt_per_word; ++j)
16153 {
16154 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
16155 elt = convert_modes (word_mode, inner_mode, elt, true);
16156
16157 if (j == 0)
16158 word = elt;
16159 else
16160 {
16161 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
16162 word, 1, OPTAB_LIB_WIDEN);
16163 word = expand_simple_binop (word_mode, IOR, word, elt,
16164 word, 1, OPTAB_LIB_WIDEN);
16165 }
16166 }
16167
16168 words[i] = word;
16169 }
16170
16171 if (n_words == 1)
16172 emit_move_insn (target, gen_lowpart (mode, words[0]));
16173 else if (n_words == 2)
16174 {
16175 rtx tmp = gen_reg_rtx (mode);
16176 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
16177 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
16178 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
16179 emit_move_insn (target, tmp);
16180 }
16181 else if (n_words == 4)
16182 {
16183 rtx tmp = gen_reg_rtx (V4SImode);
16184 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
16185 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
16186 emit_move_insn (target, gen_lowpart (mode, tmp));
16187 }
16188 else
16189 gcc_unreachable ();
16190 }
16191 }
16192
16193 /* Initialize vector TARGET via VALS. Suppress the use of MMX
16194 instructions unless MMX_OK is true. */
16195
16196 void
16197 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
16198 {
16199 enum machine_mode mode = GET_MODE (target);
16200 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16201 int n_elts = GET_MODE_NUNITS (mode);
16202 int n_var = 0, one_var = -1;
16203 bool all_same = true, all_const_zero = true;
16204 int i;
16205 rtx x;
16206
16207 for (i = 0; i < n_elts; ++i)
16208 {
16209 x = XVECEXP (vals, 0, i);
16210 if (!CONSTANT_P (x))
16211 n_var++, one_var = i;
16212 else if (x != CONST0_RTX (inner_mode))
16213 all_const_zero = false;
16214 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
16215 all_same = false;
16216 }
16217
16218 /* Constants are best loaded from the constant pool. */
16219 if (n_var == 0)
16220 {
16221 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
16222 return;
16223 }
16224
16225 /* If all values are identical, broadcast the value. */
16226 if (all_same
16227 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
16228 XVECEXP (vals, 0, 0)))
16229 return;
16230
16231 /* Values where only one field is non-constant are best loaded from
16232 the pool and overwritten via move later. */
16233 if (n_var == 1)
16234 {
16235 if (all_const_zero && one_var == 0
16236 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
16237 XVECEXP (vals, 0, 0)))
16238 return;
16239
16240 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
16241 return;
16242 }
16243
16244 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
16245 }
16246
16247 void
16248 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
16249 {
16250 enum machine_mode mode = GET_MODE (target);
16251 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16252 bool use_vec_merge = false;
16253 rtx tmp;
16254
16255 switch (mode)
16256 {
16257 case V2SFmode:
16258 case V2SImode:
16259 if (!mmx_ok)
16260 break;
16261 /* FALLTHRU */
16262
16263 case V2DFmode:
16264 case V2DImode:
16265 {
16266 rtx op0, op1;
16267
16268 /* For the two element vectors, we implement a VEC_CONCAT with
16269 the extraction of the other element. */
16270
16271 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
16272 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
16273
16274 if (elt == 0)
16275 op0 = val, op1 = tmp;
16276 else
16277 op0 = tmp, op1 = val;
16278
16279 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
16280 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16281 }
16282 return;
16283
16284 case V4SFmode:
16285 switch (elt)
16286 {
16287 case 0:
16288 use_vec_merge = true;
16289 break;
16290
16291 case 1:
16292 /* tmp = op0 = A B C D */
16293 tmp = copy_to_reg (target);
16294
16295 /* op0 = C C D D */
16296 emit_insn (gen_sse_unpcklps (target, target, target));
16297
16298 /* op0 = C C D X */
16299 ix86_expand_vector_set (false, target, val, 0);
16300
16301 /* op0 = A B X D */
16302 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16303 GEN_INT (1), GEN_INT (0),
16304 GEN_INT (2+4), GEN_INT (3+4)));
16305 return;
16306
16307 case 2:
16308 tmp = copy_to_reg (target);
16309 ix86_expand_vector_set (false, target, val, 0);
16310 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16311 GEN_INT (0), GEN_INT (1),
16312 GEN_INT (0+4), GEN_INT (3+4)));
16313 return;
16314
16315 case 3:
16316 tmp = copy_to_reg (target);
16317 ix86_expand_vector_set (false, target, val, 0);
16318 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16319 GEN_INT (0), GEN_INT (1),
16320 GEN_INT (2+4), GEN_INT (0+4)));
16321 return;
16322
16323 default:
16324 gcc_unreachable ();
16325 }
16326 break;
16327
16328 case V4SImode:
16329 /* Element 0 handled by vec_merge below. */
16330 if (elt == 0)
16331 {
16332 use_vec_merge = true;
16333 break;
16334 }
16335
16336 if (TARGET_SSE2)
16337 {
16338 /* With SSE2, use integer shuffles to swap element 0 and ELT,
16339 store into element 0, then shuffle them back. */
16340
16341 rtx order[4];
16342
16343 order[0] = GEN_INT (elt);
16344 order[1] = const1_rtx;
16345 order[2] = const2_rtx;
16346 order[3] = GEN_INT (3);
16347 order[elt] = const0_rtx;
16348
16349 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
16350 order[1], order[2], order[3]));
16351
16352 ix86_expand_vector_set (false, target, val, 0);
16353
16354 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
16355 order[1], order[2], order[3]));
16356 }
16357 else
16358 {
16359 /* For SSE1, we have to reuse the V4SF code. */
16360 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
16361 gen_lowpart (SFmode, val), elt);
16362 }
16363 return;
16364
16365 case V8HImode:
16366 use_vec_merge = TARGET_SSE2;
16367 break;
16368 case V4HImode:
16369 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
16370 break;
16371
16372 case V16QImode:
16373 case V8QImode:
16374 default:
16375 break;
16376 }
16377
16378 if (use_vec_merge)
16379 {
16380 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
16381 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
16382 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16383 }
16384 else
16385 {
16386 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
16387
16388 emit_move_insn (mem, target);
16389
16390 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
16391 emit_move_insn (tmp, val);
16392
16393 emit_move_insn (target, mem);
16394 }
16395 }
16396
16397 void
16398 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
16399 {
16400 enum machine_mode mode = GET_MODE (vec);
16401 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16402 bool use_vec_extr = false;
16403 rtx tmp;
16404
16405 switch (mode)
16406 {
16407 case V2SImode:
16408 case V2SFmode:
16409 if (!mmx_ok)
16410 break;
16411 /* FALLTHRU */
16412
16413 case V2DFmode:
16414 case V2DImode:
16415 use_vec_extr = true;
16416 break;
16417
16418 case V4SFmode:
16419 switch (elt)
16420 {
16421 case 0:
16422 tmp = vec;
16423 break;
16424
16425 case 1:
16426 case 3:
16427 tmp = gen_reg_rtx (mode);
16428 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
16429 GEN_INT (elt), GEN_INT (elt),
16430 GEN_INT (elt+4), GEN_INT (elt+4)));
16431 break;
16432
16433 case 2:
16434 tmp = gen_reg_rtx (mode);
16435 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
16436 break;
16437
16438 default:
16439 gcc_unreachable ();
16440 }
16441 vec = tmp;
16442 use_vec_extr = true;
16443 elt = 0;
16444 break;
16445
16446 case V4SImode:
16447 if (TARGET_SSE2)
16448 {
16449 switch (elt)
16450 {
16451 case 0:
16452 tmp = vec;
16453 break;
16454
16455 case 1:
16456 case 3:
16457 tmp = gen_reg_rtx (mode);
16458 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
16459 GEN_INT (elt), GEN_INT (elt),
16460 GEN_INT (elt), GEN_INT (elt)));
16461 break;
16462
16463 case 2:
16464 tmp = gen_reg_rtx (mode);
16465 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
16466 break;
16467
16468 default:
16469 gcc_unreachable ();
16470 }
16471 vec = tmp;
16472 use_vec_extr = true;
16473 elt = 0;
16474 }
16475 else
16476 {
16477 /* For SSE1, we have to reuse the V4SF code. */
16478 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
16479 gen_lowpart (V4SFmode, vec), elt);
16480 return;
16481 }
16482 break;
16483
16484 case V8HImode:
16485 use_vec_extr = TARGET_SSE2;
16486 break;
16487 case V4HImode:
16488 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
16489 break;
16490
16491 case V16QImode:
16492 case V8QImode:
16493 /* ??? Could extract the appropriate HImode element and shift. */
16494 default:
16495 break;
16496 }
16497
16498 if (use_vec_extr)
16499 {
16500 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
16501 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
16502
16503 /* Let the rtl optimizers know about the zero extension performed. */
16504 if (inner_mode == HImode)
16505 {
16506 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
16507 target = gen_lowpart (SImode, target);
16508 }
16509
16510 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16511 }
16512 else
16513 {
16514 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
16515
16516 emit_move_insn (mem, vec);
16517
16518 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
16519 emit_move_insn (target, tmp);
16520 }
16521 }
16522 \f
16523 /* Implements target hook vector_mode_supported_p. */
16524 static bool
16525 ix86_vector_mode_supported_p (enum machine_mode mode)
16526 {
16527 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
16528 return true;
16529 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
16530 return true;
16531 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
16532 return true;
16533 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
16534 return true;
16535 return false;
16536 }
16537
16538 /* Worker function for TARGET_MD_ASM_CLOBBERS.
16539
16540 We do this in the new i386 backend to maintain source compatibility
16541 with the old cc0-based compiler. */
16542
16543 static tree
16544 ix86_md_asm_clobbers (tree clobbers)
16545 {
16546 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
16547 clobbers);
16548 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
16549 clobbers);
16550 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
16551 clobbers);
16552 return clobbers;
16553 }
16554
16555 /* Worker function for REVERSE_CONDITION. */
16556
16557 enum rtx_code
16558 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
16559 {
16560 return (mode != CCFPmode && mode != CCFPUmode
16561 ? reverse_condition (code)
16562 : reverse_condition_maybe_unordered (code));
16563 }
16564
16565 /* Output code to perform an x87 FP register move, from OPERANDS[1]
16566 to OPERANDS[0]. */
16567
16568 const char *
16569 output_387_reg_move (rtx insn, rtx *operands)
16570 {
16571 if (REG_P (operands[1])
16572 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
16573 {
16574 if (REGNO (operands[0]) == FIRST_STACK_REG
16575 && TARGET_USE_FFREEP)
16576 return "ffreep\t%y0";
16577 return "fstp\t%y0";
16578 }
16579 if (STACK_TOP_P (operands[0]))
16580 return "fld%z1\t%y1";
16581 return "fst\t%y0";
16582 }
16583
16584 /* Output code to perform a conditional jump to LABEL, if C2 flag in
16585 FP status register is set. */
16586
16587 void
16588 ix86_emit_fp_unordered_jump (rtx label)
16589 {
16590 rtx reg = gen_reg_rtx (HImode);
16591 rtx temp;
16592
16593 emit_insn (gen_x86_fnstsw_1 (reg));
16594
16595 if (TARGET_USE_SAHF)
16596 {
16597 emit_insn (gen_x86_sahf_1 (reg));
16598
16599 temp = gen_rtx_REG (CCmode, FLAGS_REG);
16600 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
16601 }
16602 else
16603 {
16604 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
16605
16606 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16607 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
16608 }
16609
16610 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
16611 gen_rtx_LABEL_REF (VOIDmode, label),
16612 pc_rtx);
16613 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
16614 emit_jump_insn (temp);
16615 }
16616
16617 /* Output code to perform a log1p XFmode calculation. */
16618
16619 void ix86_emit_i387_log1p (rtx op0, rtx op1)
16620 {
16621 rtx label1 = gen_label_rtx ();
16622 rtx label2 = gen_label_rtx ();
16623
16624 rtx tmp = gen_reg_rtx (XFmode);
16625 rtx tmp2 = gen_reg_rtx (XFmode);
16626
16627 emit_insn (gen_absxf2 (tmp, op1));
16628 emit_insn (gen_cmpxf (tmp,
16629 CONST_DOUBLE_FROM_REAL_VALUE (
16630 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
16631 XFmode)));
16632 emit_jump_insn (gen_bge (label1));
16633
16634 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
16635 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
16636 emit_jump (label2);
16637
16638 emit_label (label1);
16639 emit_move_insn (tmp, CONST1_RTX (XFmode));
16640 emit_insn (gen_addxf3 (tmp, op1, tmp));
16641 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
16642 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
16643
16644 emit_label (label2);
16645 }
16646
16647 /* Solaris named-section hook. Parameters are as for
16648 named_section_real. */
16649
16650 static void
16651 i386_solaris_elf_named_section (const char *name, unsigned int flags,
16652 tree decl)
16653 {
16654 /* With Binutils 2.15, the "@unwind" marker must be specified on
16655 every occurrence of the ".eh_frame" section, not just the first
16656 one. */
16657 if (TARGET_64BIT
16658 && strcmp (name, ".eh_frame") == 0)
16659 {
16660 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
16661 flags & SECTION_WRITE ? "aw" : "a");
16662 return;
16663 }
16664 default_elf_asm_named_section (name, flags, decl);
16665 }
16666
16667 #include "gt-i386.h"
This page took 0.779066 seconds and 6 git commands to generate.