]> gcc.gnu.org Git - gcc.git/blob - gcc/config/i386/i386.c
re PR target/19009 (Loading of FP constants into FP reg via SSE reg)
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
55
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
63
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
107 };
108
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
152 };
153
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
196 };
197
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
240 };
241
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
284 };
285
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
328 };
329
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 5, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
372 };
373
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 5, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
416 };
417
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
460 };
461
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
504 };
505
506 const struct processor_costs *ix86_cost = &pentium_cost;
507
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
519
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_3dnow_a = m_ATHLON_K8;
529 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints = 0;
535 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
536 const int x86_partial_reg_stall = m_PPRO;
537 const int x86_use_loop = m_K6;
538 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
539 const int x86_use_mov0 = m_K6;
540 const int x86_use_cltd = ~(m_PENT | m_K6);
541 const int x86_read_modify_write = ~m_PENT;
542 const int x86_read_modify = ~(m_PENT | m_PPRO);
543 const int x86_split_long_moves = m_PPRO;
544 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
545 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
546 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
547 const int x86_qimode_math = ~(0);
548 const int x86_promote_qi_regs = 0;
549 const int x86_himode_math = ~(m_PPRO);
550 const int x86_promote_hi_regs = m_PPRO;
551 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
552 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
553 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
556 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
557 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
559 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
560 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
562 const int x86_shift1 = ~m_486;
563 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
564 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
565 /* Set for machines where the type and dependencies are resolved on SSE
566 register parts instead of whole registers, so we may maintain just
567 lower part of scalar values in proper format leaving the upper part
568 undefined. */
569 const int x86_sse_split_regs = m_ATHLON_K8;
570 const int x86_sse_typeless_stores = m_ATHLON_K8;
571 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
572 const int x86_use_ffreep = m_ATHLON_K8;
573 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
574 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
575 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
576 /* Some CPU cores are not able to predict more than 4 branch instructions in
577 the 16 byte window. */
578 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
579 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
580 const int x86_use_bt = m_ATHLON_K8;
581
582 /* In case the average insn count for single function invocation is
583 lower than this constant, emit fast (but longer) prologue and
584 epilogue code. */
585 #define FAST_PROLOGUE_INSN_COUNT 20
586
587 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
588 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
589 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
590 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
591
592 /* Array of the smallest class containing reg number REGNO, indexed by
593 REGNO. Used by REGNO_REG_CLASS in i386.h. */
594
595 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
596 {
597 /* ax, dx, cx, bx */
598 AREG, DREG, CREG, BREG,
599 /* si, di, bp, sp */
600 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
601 /* FP registers */
602 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
603 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
604 /* arg pointer */
605 NON_Q_REGS,
606 /* flags, fpsr, dirflag, frame */
607 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
608 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
609 SSE_REGS, SSE_REGS,
610 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
611 MMX_REGS, MMX_REGS,
612 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
613 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
614 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
615 SSE_REGS, SSE_REGS,
616 };
617
618 /* The "default" register map used in 32bit mode. */
619
620 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
621 {
622 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
623 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
624 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
625 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
626 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
629 };
630
631 static int const x86_64_int_parameter_registers[6] =
632 {
633 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
634 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
635 };
636
637 static int const x86_64_int_return_registers[4] =
638 {
639 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
640 };
641
642 /* The "default" register map used in 64bit mode. */
643 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
644 {
645 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
646 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
647 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
648 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
649 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
650 8,9,10,11,12,13,14,15, /* extended integer registers */
651 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
652 };
653
654 /* Define the register numbers to be used in Dwarf debugging information.
655 The SVR4 reference port C compiler uses the following register numbers
656 in its Dwarf output code:
657 0 for %eax (gcc regno = 0)
658 1 for %ecx (gcc regno = 2)
659 2 for %edx (gcc regno = 1)
660 3 for %ebx (gcc regno = 3)
661 4 for %esp (gcc regno = 7)
662 5 for %ebp (gcc regno = 6)
663 6 for %esi (gcc regno = 4)
664 7 for %edi (gcc regno = 5)
665 The following three DWARF register numbers are never generated by
666 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
667 believes these numbers have these meanings.
668 8 for %eip (no gcc equivalent)
669 9 for %eflags (gcc regno = 17)
670 10 for %trapno (no gcc equivalent)
671 It is not at all clear how we should number the FP stack registers
672 for the x86 architecture. If the version of SDB on x86/svr4 were
673 a bit less brain dead with respect to floating-point then we would
674 have a precedent to follow with respect to DWARF register numbers
675 for x86 FP registers, but the SDB on x86/svr4 is so completely
676 broken with respect to FP registers that it is hardly worth thinking
677 of it as something to strive for compatibility with.
678 The version of x86/svr4 SDB I have at the moment does (partially)
679 seem to believe that DWARF register number 11 is associated with
680 the x86 register %st(0), but that's about all. Higher DWARF
681 register numbers don't seem to be associated with anything in
682 particular, and even for DWARF regno 11, SDB only seems to under-
683 stand that it should say that a variable lives in %st(0) (when
684 asked via an `=' command) if we said it was in DWARF regno 11,
685 but SDB still prints garbage when asked for the value of the
686 variable in question (via a `/' command).
687 (Also note that the labels SDB prints for various FP stack regs
688 when doing an `x' command are all wrong.)
689 Note that these problems generally don't affect the native SVR4
690 C compiler because it doesn't allow the use of -O with -g and
691 because when it is *not* optimizing, it allocates a memory
692 location for each floating-point variable, and the memory
693 location is what gets described in the DWARF AT_location
694 attribute for the variable in question.
695 Regardless of the severe mental illness of the x86/svr4 SDB, we
696 do something sensible here and we use the following DWARF
697 register numbers. Note that these are all stack-top-relative
698 numbers.
699 11 for %st(0) (gcc regno = 8)
700 12 for %st(1) (gcc regno = 9)
701 13 for %st(2) (gcc regno = 10)
702 14 for %st(3) (gcc regno = 11)
703 15 for %st(4) (gcc regno = 12)
704 16 for %st(5) (gcc regno = 13)
705 17 for %st(6) (gcc regno = 14)
706 18 for %st(7) (gcc regno = 15)
707 */
708 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
709 {
710 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
711 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
712 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
713 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
714 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
715 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
717 };
718
719 /* Test and compare insns in i386.md store the information needed to
720 generate branch and scc insns here. */
721
722 rtx ix86_compare_op0 = NULL_RTX;
723 rtx ix86_compare_op1 = NULL_RTX;
724
725 #define MAX_386_STACK_LOCALS 3
726 /* Size of the register save area. */
727 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
728
729 /* Define the structure for the machine field in struct function. */
730
731 struct stack_local_entry GTY(())
732 {
733 unsigned short mode;
734 unsigned short n;
735 rtx rtl;
736 struct stack_local_entry *next;
737 };
738
739 /* Structure describing stack frame layout.
740 Stack grows downward:
741
742 [arguments]
743 <- ARG_POINTER
744 saved pc
745
746 saved frame pointer if frame_pointer_needed
747 <- HARD_FRAME_POINTER
748 [saved regs]
749
750 [padding1] \
751 )
752 [va_arg registers] (
753 > to_allocate <- FRAME_POINTER
754 [frame] (
755 )
756 [padding2] /
757 */
758 struct ix86_frame
759 {
760 int nregs;
761 int padding1;
762 int va_arg_size;
763 HOST_WIDE_INT frame;
764 int padding2;
765 int outgoing_arguments_size;
766 int red_zone_size;
767
768 HOST_WIDE_INT to_allocate;
769 /* The offsets relative to ARG_POINTER. */
770 HOST_WIDE_INT frame_pointer_offset;
771 HOST_WIDE_INT hard_frame_pointer_offset;
772 HOST_WIDE_INT stack_pointer_offset;
773
774 /* When save_regs_using_mov is set, emit prologue using
775 move instead of push instructions. */
776 bool save_regs_using_mov;
777 };
778
779 /* Used to enable/disable debugging features. */
780 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
781 /* Code model option as passed by user. */
782 const char *ix86_cmodel_string;
783 /* Parsed value. */
784 enum cmodel ix86_cmodel;
785 /* Asm dialect. */
786 const char *ix86_asm_string;
787 enum asm_dialect ix86_asm_dialect = ASM_ATT;
788 /* TLS dialext. */
789 const char *ix86_tls_dialect_string;
790 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
791
792 /* Which unit we are generating floating point math for. */
793 enum fpmath_unit ix86_fpmath;
794
795 /* Which cpu are we scheduling for. */
796 enum processor_type ix86_tune;
797 /* Which instruction set architecture to use. */
798 enum processor_type ix86_arch;
799
800 /* Strings to hold which cpu and instruction set architecture to use. */
801 const char *ix86_tune_string; /* for -mtune=<xxx> */
802 const char *ix86_arch_string; /* for -march=<xxx> */
803 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
804
805 /* # of registers to use to pass arguments. */
806 const char *ix86_regparm_string;
807
808 /* true if sse prefetch instruction is not NOOP. */
809 int x86_prefetch_sse;
810
811 /* ix86_regparm_string as a number */
812 int ix86_regparm;
813
814 /* Alignment to use for loops and jumps: */
815
816 /* Power of two alignment for loops. */
817 const char *ix86_align_loops_string;
818
819 /* Power of two alignment for non-loop jumps. */
820 const char *ix86_align_jumps_string;
821
822 /* Power of two alignment for stack boundary in bytes. */
823 const char *ix86_preferred_stack_boundary_string;
824
825 /* Preferred alignment for stack boundary in bits. */
826 unsigned int ix86_preferred_stack_boundary;
827
828 /* Values 1-5: see jump.c */
829 int ix86_branch_cost;
830 const char *ix86_branch_cost_string;
831
832 /* Power of two alignment for functions. */
833 const char *ix86_align_funcs_string;
834
835 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
836 char internal_label_prefix[16];
837 int internal_label_prefix_len;
838 \f
839 static void output_pic_addr_const (FILE *, rtx, int);
840 static void put_condition_code (enum rtx_code, enum machine_mode,
841 int, int, FILE *);
842 static const char *get_some_local_dynamic_name (void);
843 static int get_some_local_dynamic_name_1 (rtx *, void *);
844 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
845 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
846 rtx *);
847 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
848 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
849 enum machine_mode);
850 static rtx get_thread_pointer (int);
851 static rtx legitimize_tls_address (rtx, enum tls_model, int);
852 static void get_pc_thunk_name (char [32], unsigned int);
853 static rtx gen_push (rtx);
854 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
855 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
856 static struct machine_function * ix86_init_machine_status (void);
857 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
858 static int ix86_nsaved_regs (void);
859 static void ix86_emit_save_regs (void);
860 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
861 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
862 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
863 static HOST_WIDE_INT ix86_GOT_alias_set (void);
864 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
865 static rtx ix86_expand_aligntest (rtx, int);
866 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
867 static int ix86_issue_rate (void);
868 static int ix86_adjust_cost (rtx, rtx, rtx, int);
869 static int ia32_multipass_dfa_lookahead (void);
870 static void ix86_init_mmx_sse_builtins (void);
871 static rtx x86_this_parameter (tree);
872 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
873 HOST_WIDE_INT, tree);
874 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
875 static void x86_file_start (void);
876 static void ix86_reorg (void);
877 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
878 static tree ix86_build_builtin_va_list (void);
879 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
880 tree, int *, int);
881 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
882 static bool ix86_vector_mode_supported_p (enum machine_mode);
883
884 static int ix86_address_cost (rtx);
885 static bool ix86_cannot_force_const_mem (rtx);
886 static rtx ix86_delegitimize_address (rtx);
887
888 struct builtin_description;
889 static rtx ix86_expand_sse_comi (const struct builtin_description *,
890 tree, rtx);
891 static rtx ix86_expand_sse_compare (const struct builtin_description *,
892 tree, rtx);
893 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
894 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
895 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
896 static rtx ix86_expand_store_builtin (enum insn_code, tree);
897 static rtx safe_vector_operand (rtx, enum machine_mode);
898 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
899 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
900 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
901 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
902 static int ix86_fp_comparison_cost (enum rtx_code code);
903 static unsigned int ix86_select_alt_pic_regnum (void);
904 static int ix86_save_reg (unsigned int, int);
905 static void ix86_compute_frame_layout (struct ix86_frame *);
906 static int ix86_comp_type_attributes (tree, tree);
907 static int ix86_function_regparm (tree, tree);
908 const struct attribute_spec ix86_attribute_table[];
909 static bool ix86_function_ok_for_sibcall (tree, tree);
910 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
911 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
912 static int ix86_value_regno (enum machine_mode);
913 static bool contains_128bit_aligned_vector_p (tree);
914 static rtx ix86_struct_value_rtx (tree, int);
915 static bool ix86_ms_bitfield_layout_p (tree);
916 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
917 static int extended_reg_mentioned_1 (rtx *, void *);
918 static bool ix86_rtx_costs (rtx, int, int, int *);
919 static int min_insn_size (rtx);
920 static tree ix86_md_asm_clobbers (tree clobbers);
921 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
922 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
923 tree, bool);
924 static void ix86_init_builtins (void);
925 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
926
927 /* This function is only used on Solaris. */
928 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
929 ATTRIBUTE_UNUSED;
930
931 /* Register class used for passing given 64bit part of the argument.
932 These represent classes as documented by the PS ABI, with the exception
933 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
934 use SF or DFmode move instead of DImode to avoid reformatting penalties.
935
936 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
937 whenever possible (upper half does contain padding).
938 */
939 enum x86_64_reg_class
940 {
941 X86_64_NO_CLASS,
942 X86_64_INTEGER_CLASS,
943 X86_64_INTEGERSI_CLASS,
944 X86_64_SSE_CLASS,
945 X86_64_SSESF_CLASS,
946 X86_64_SSEDF_CLASS,
947 X86_64_SSEUP_CLASS,
948 X86_64_X87_CLASS,
949 X86_64_X87UP_CLASS,
950 X86_64_COMPLEX_X87_CLASS,
951 X86_64_MEMORY_CLASS
952 };
953 static const char * const x86_64_reg_class_name[] = {
954 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
955 "sseup", "x87", "x87up", "cplx87", "no"
956 };
957
958 #define MAX_CLASSES 4
959
960 /* Table of constants used by fldpi, fldln2, etc.... */
961 static REAL_VALUE_TYPE ext_80387_constants_table [5];
962 static bool ext_80387_constants_init = 0;
963 static void init_ext_80387_constants (void);
964 \f
965 /* Initialize the GCC target structure. */
966 #undef TARGET_ATTRIBUTE_TABLE
967 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
968 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
969 # undef TARGET_MERGE_DECL_ATTRIBUTES
970 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
971 #endif
972
973 #undef TARGET_COMP_TYPE_ATTRIBUTES
974 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
975
976 #undef TARGET_INIT_BUILTINS
977 #define TARGET_INIT_BUILTINS ix86_init_builtins
978 #undef TARGET_EXPAND_BUILTIN
979 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
980
981 #undef TARGET_ASM_FUNCTION_EPILOGUE
982 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
983
984 #undef TARGET_ASM_OPEN_PAREN
985 #define TARGET_ASM_OPEN_PAREN ""
986 #undef TARGET_ASM_CLOSE_PAREN
987 #define TARGET_ASM_CLOSE_PAREN ""
988
989 #undef TARGET_ASM_ALIGNED_HI_OP
990 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
991 #undef TARGET_ASM_ALIGNED_SI_OP
992 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
993 #ifdef ASM_QUAD
994 #undef TARGET_ASM_ALIGNED_DI_OP
995 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
996 #endif
997
998 #undef TARGET_ASM_UNALIGNED_HI_OP
999 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1000 #undef TARGET_ASM_UNALIGNED_SI_OP
1001 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1002 #undef TARGET_ASM_UNALIGNED_DI_OP
1003 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1004
1005 #undef TARGET_SCHED_ADJUST_COST
1006 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1007 #undef TARGET_SCHED_ISSUE_RATE
1008 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1009 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1010 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1011 ia32_multipass_dfa_lookahead
1012
1013 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1014 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1015
1016 #ifdef HAVE_AS_TLS
1017 #undef TARGET_HAVE_TLS
1018 #define TARGET_HAVE_TLS true
1019 #endif
1020 #undef TARGET_CANNOT_FORCE_CONST_MEM
1021 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1022
1023 #undef TARGET_DELEGITIMIZE_ADDRESS
1024 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1025
1026 #undef TARGET_MS_BITFIELD_LAYOUT_P
1027 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1028
1029 #undef TARGET_ASM_OUTPUT_MI_THUNK
1030 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1031 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1032 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1033
1034 #undef TARGET_ASM_FILE_START
1035 #define TARGET_ASM_FILE_START x86_file_start
1036
1037 #undef TARGET_RTX_COSTS
1038 #define TARGET_RTX_COSTS ix86_rtx_costs
1039 #undef TARGET_ADDRESS_COST
1040 #define TARGET_ADDRESS_COST ix86_address_cost
1041
1042 #undef TARGET_FIXED_CONDITION_CODE_REGS
1043 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1044 #undef TARGET_CC_MODES_COMPATIBLE
1045 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1046
1047 #undef TARGET_MACHINE_DEPENDENT_REORG
1048 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1049
1050 #undef TARGET_BUILD_BUILTIN_VA_LIST
1051 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1052
1053 #undef TARGET_MD_ASM_CLOBBERS
1054 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1055
1056 #undef TARGET_PROMOTE_PROTOTYPES
1057 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1058 #undef TARGET_STRUCT_VALUE_RTX
1059 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1060 #undef TARGET_SETUP_INCOMING_VARARGS
1061 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1062 #undef TARGET_MUST_PASS_IN_STACK
1063 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1064 #undef TARGET_PASS_BY_REFERENCE
1065 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1066
1067 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1068 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1069
1070 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1071 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1072
1073 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1074 #undef TARGET_INSERT_ATTRIBUTES
1075 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1076 #endif
1077
1078 struct gcc_target targetm = TARGET_INITIALIZER;
1079
1080 \f
1081 /* The svr4 ABI for the i386 says that records and unions are returned
1082 in memory. */
1083 #ifndef DEFAULT_PCC_STRUCT_RETURN
1084 #define DEFAULT_PCC_STRUCT_RETURN 1
1085 #endif
1086
1087 /* Sometimes certain combinations of command options do not make
1088 sense on a particular target machine. You can define a macro
1089 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1090 defined, is executed once just after all the command options have
1091 been parsed.
1092
1093 Don't use this macro to turn on various extra optimizations for
1094 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1095
1096 void
1097 override_options (void)
1098 {
1099 int i;
1100 int ix86_tune_defaulted = 0;
1101
1102 /* Comes from final.c -- no real reason to change it. */
1103 #define MAX_CODE_ALIGN 16
1104
1105 static struct ptt
1106 {
1107 const struct processor_costs *cost; /* Processor costs */
1108 const int target_enable; /* Target flags to enable. */
1109 const int target_disable; /* Target flags to disable. */
1110 const int align_loop; /* Default alignments. */
1111 const int align_loop_max_skip;
1112 const int align_jump;
1113 const int align_jump_max_skip;
1114 const int align_func;
1115 }
1116 const processor_target_table[PROCESSOR_max] =
1117 {
1118 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1119 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1120 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1121 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1122 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1123 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1124 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1125 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1126 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1127 };
1128
1129 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1130 static struct pta
1131 {
1132 const char *const name; /* processor name or nickname. */
1133 const enum processor_type processor;
1134 const enum pta_flags
1135 {
1136 PTA_SSE = 1,
1137 PTA_SSE2 = 2,
1138 PTA_SSE3 = 4,
1139 PTA_MMX = 8,
1140 PTA_PREFETCH_SSE = 16,
1141 PTA_3DNOW = 32,
1142 PTA_3DNOW_A = 64,
1143 PTA_64BIT = 128
1144 } flags;
1145 }
1146 const processor_alias_table[] =
1147 {
1148 {"i386", PROCESSOR_I386, 0},
1149 {"i486", PROCESSOR_I486, 0},
1150 {"i586", PROCESSOR_PENTIUM, 0},
1151 {"pentium", PROCESSOR_PENTIUM, 0},
1152 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1153 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1154 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1155 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1156 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1157 {"i686", PROCESSOR_PENTIUMPRO, 0},
1158 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1159 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1160 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1161 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1162 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1163 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1164 | PTA_MMX | PTA_PREFETCH_SSE},
1165 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1166 | PTA_MMX | PTA_PREFETCH_SSE},
1167 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1168 | PTA_MMX | PTA_PREFETCH_SSE},
1169 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1170 | PTA_MMX | PTA_PREFETCH_SSE},
1171 {"k6", PROCESSOR_K6, PTA_MMX},
1172 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1173 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1174 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1175 | PTA_3DNOW_A},
1176 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1177 | PTA_3DNOW | PTA_3DNOW_A},
1178 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1179 | PTA_3DNOW_A | PTA_SSE},
1180 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1181 | PTA_3DNOW_A | PTA_SSE},
1182 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1183 | PTA_3DNOW_A | PTA_SSE},
1184 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1185 | PTA_SSE | PTA_SSE2 },
1186 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1187 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1188 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1189 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1190 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1191 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1192 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1193 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1194 };
1195
1196 int const pta_size = ARRAY_SIZE (processor_alias_table);
1197
1198 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1199 SUBTARGET_OVERRIDE_OPTIONS;
1200 #endif
1201
1202 /* Set the default values for switches whose default depends on TARGET_64BIT
1203 in case they weren't overwritten by command line options. */
1204 if (TARGET_64BIT)
1205 {
1206 if (flag_omit_frame_pointer == 2)
1207 flag_omit_frame_pointer = 1;
1208 if (flag_asynchronous_unwind_tables == 2)
1209 flag_asynchronous_unwind_tables = 1;
1210 if (flag_pcc_struct_return == 2)
1211 flag_pcc_struct_return = 0;
1212 }
1213 else
1214 {
1215 if (flag_omit_frame_pointer == 2)
1216 flag_omit_frame_pointer = 0;
1217 if (flag_asynchronous_unwind_tables == 2)
1218 flag_asynchronous_unwind_tables = 0;
1219 if (flag_pcc_struct_return == 2)
1220 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1221 }
1222
1223 if (!ix86_tune_string && ix86_arch_string)
1224 ix86_tune_string = ix86_arch_string;
1225 if (!ix86_tune_string)
1226 {
1227 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1228 ix86_tune_defaulted = 1;
1229 }
1230 if (!ix86_arch_string)
1231 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1232
1233 if (ix86_cmodel_string != 0)
1234 {
1235 if (!strcmp (ix86_cmodel_string, "small"))
1236 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1237 else if (flag_pic)
1238 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1239 else if (!strcmp (ix86_cmodel_string, "32"))
1240 ix86_cmodel = CM_32;
1241 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1242 ix86_cmodel = CM_KERNEL;
1243 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1244 ix86_cmodel = CM_MEDIUM;
1245 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1246 ix86_cmodel = CM_LARGE;
1247 else
1248 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1249 }
1250 else
1251 {
1252 ix86_cmodel = CM_32;
1253 if (TARGET_64BIT)
1254 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1255 }
1256 if (ix86_asm_string != 0)
1257 {
1258 if (!strcmp (ix86_asm_string, "intel"))
1259 ix86_asm_dialect = ASM_INTEL;
1260 else if (!strcmp (ix86_asm_string, "att"))
1261 ix86_asm_dialect = ASM_ATT;
1262 else
1263 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1264 }
1265 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1266 error ("code model %qs not supported in the %s bit mode",
1267 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1268 if (ix86_cmodel == CM_LARGE)
1269 sorry ("code model %<large%> not supported yet");
1270 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1271 sorry ("%i-bit mode not compiled in",
1272 (target_flags & MASK_64BIT) ? 64 : 32);
1273
1274 for (i = 0; i < pta_size; i++)
1275 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1276 {
1277 ix86_arch = processor_alias_table[i].processor;
1278 /* Default cpu tuning to the architecture. */
1279 ix86_tune = ix86_arch;
1280 if (processor_alias_table[i].flags & PTA_MMX
1281 && !(target_flags_explicit & MASK_MMX))
1282 target_flags |= MASK_MMX;
1283 if (processor_alias_table[i].flags & PTA_3DNOW
1284 && !(target_flags_explicit & MASK_3DNOW))
1285 target_flags |= MASK_3DNOW;
1286 if (processor_alias_table[i].flags & PTA_3DNOW_A
1287 && !(target_flags_explicit & MASK_3DNOW_A))
1288 target_flags |= MASK_3DNOW_A;
1289 if (processor_alias_table[i].flags & PTA_SSE
1290 && !(target_flags_explicit & MASK_SSE))
1291 target_flags |= MASK_SSE;
1292 if (processor_alias_table[i].flags & PTA_SSE2
1293 && !(target_flags_explicit & MASK_SSE2))
1294 target_flags |= MASK_SSE2;
1295 if (processor_alias_table[i].flags & PTA_SSE3
1296 && !(target_flags_explicit & MASK_SSE3))
1297 target_flags |= MASK_SSE3;
1298 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1299 x86_prefetch_sse = true;
1300 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1301 error ("CPU you selected does not support x86-64 "
1302 "instruction set");
1303 break;
1304 }
1305
1306 if (i == pta_size)
1307 error ("bad value (%s) for -march= switch", ix86_arch_string);
1308
1309 for (i = 0; i < pta_size; i++)
1310 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1311 {
1312 ix86_tune = processor_alias_table[i].processor;
1313 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1314 {
1315 if (ix86_tune_defaulted)
1316 {
1317 ix86_tune_string = "x86-64";
1318 for (i = 0; i < pta_size; i++)
1319 if (! strcmp (ix86_tune_string,
1320 processor_alias_table[i].name))
1321 break;
1322 ix86_tune = processor_alias_table[i].processor;
1323 }
1324 else
1325 error ("CPU you selected does not support x86-64 "
1326 "instruction set");
1327 }
1328 /* Intel CPUs have always interpreted SSE prefetch instructions as
1329 NOPs; so, we can enable SSE prefetch instructions even when
1330 -mtune (rather than -march) points us to a processor that has them.
1331 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1332 higher processors. */
1333 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1334 x86_prefetch_sse = true;
1335 break;
1336 }
1337 if (i == pta_size)
1338 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1339
1340 if (optimize_size)
1341 ix86_cost = &size_cost;
1342 else
1343 ix86_cost = processor_target_table[ix86_tune].cost;
1344 target_flags |= processor_target_table[ix86_tune].target_enable;
1345 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1346
1347 /* Arrange to set up i386_stack_locals for all functions. */
1348 init_machine_status = ix86_init_machine_status;
1349
1350 /* Validate -mregparm= value. */
1351 if (ix86_regparm_string)
1352 {
1353 i = atoi (ix86_regparm_string);
1354 if (i < 0 || i > REGPARM_MAX)
1355 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1356 else
1357 ix86_regparm = i;
1358 }
1359 else
1360 if (TARGET_64BIT)
1361 ix86_regparm = REGPARM_MAX;
1362
1363 /* If the user has provided any of the -malign-* options,
1364 warn and use that value only if -falign-* is not set.
1365 Remove this code in GCC 3.2 or later. */
1366 if (ix86_align_loops_string)
1367 {
1368 warning ("-malign-loops is obsolete, use -falign-loops");
1369 if (align_loops == 0)
1370 {
1371 i = atoi (ix86_align_loops_string);
1372 if (i < 0 || i > MAX_CODE_ALIGN)
1373 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1374 else
1375 align_loops = 1 << i;
1376 }
1377 }
1378
1379 if (ix86_align_jumps_string)
1380 {
1381 warning ("-malign-jumps is obsolete, use -falign-jumps");
1382 if (align_jumps == 0)
1383 {
1384 i = atoi (ix86_align_jumps_string);
1385 if (i < 0 || i > MAX_CODE_ALIGN)
1386 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1387 else
1388 align_jumps = 1 << i;
1389 }
1390 }
1391
1392 if (ix86_align_funcs_string)
1393 {
1394 warning ("-malign-functions is obsolete, use -falign-functions");
1395 if (align_functions == 0)
1396 {
1397 i = atoi (ix86_align_funcs_string);
1398 if (i < 0 || i > MAX_CODE_ALIGN)
1399 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1400 else
1401 align_functions = 1 << i;
1402 }
1403 }
1404
1405 /* Default align_* from the processor table. */
1406 if (align_loops == 0)
1407 {
1408 align_loops = processor_target_table[ix86_tune].align_loop;
1409 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1410 }
1411 if (align_jumps == 0)
1412 {
1413 align_jumps = processor_target_table[ix86_tune].align_jump;
1414 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1415 }
1416 if (align_functions == 0)
1417 {
1418 align_functions = processor_target_table[ix86_tune].align_func;
1419 }
1420
1421 /* Validate -mpreferred-stack-boundary= value, or provide default.
1422 The default of 128 bits is for Pentium III's SSE __m128, but we
1423 don't want additional code to keep the stack aligned when
1424 optimizing for code size. */
1425 ix86_preferred_stack_boundary = (optimize_size
1426 ? TARGET_64BIT ? 128 : 32
1427 : 128);
1428 if (ix86_preferred_stack_boundary_string)
1429 {
1430 i = atoi (ix86_preferred_stack_boundary_string);
1431 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1432 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1433 TARGET_64BIT ? 4 : 2);
1434 else
1435 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1436 }
1437
1438 /* Validate -mbranch-cost= value, or provide default. */
1439 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1440 if (ix86_branch_cost_string)
1441 {
1442 i = atoi (ix86_branch_cost_string);
1443 if (i < 0 || i > 5)
1444 error ("-mbranch-cost=%d is not between 0 and 5", i);
1445 else
1446 ix86_branch_cost = i;
1447 }
1448
1449 if (ix86_tls_dialect_string)
1450 {
1451 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1452 ix86_tls_dialect = TLS_DIALECT_GNU;
1453 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1454 ix86_tls_dialect = TLS_DIALECT_SUN;
1455 else
1456 error ("bad value (%s) for -mtls-dialect= switch",
1457 ix86_tls_dialect_string);
1458 }
1459
1460 /* Keep nonleaf frame pointers. */
1461 if (flag_omit_frame_pointer)
1462 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1463 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1464 flag_omit_frame_pointer = 1;
1465
1466 /* If we're doing fast math, we don't care about comparison order
1467 wrt NaNs. This lets us use a shorter comparison sequence. */
1468 if (flag_unsafe_math_optimizations)
1469 target_flags &= ~MASK_IEEE_FP;
1470
1471 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1472 since the insns won't need emulation. */
1473 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1474 target_flags &= ~MASK_NO_FANCY_MATH_387;
1475
1476 /* Likewise, if the target doesn't have a 387, or we've specified
1477 software floating point, don't use 387 inline instrinsics. */
1478 if (!TARGET_80387)
1479 target_flags |= MASK_NO_FANCY_MATH_387;
1480
1481 /* Turn on SSE2 builtins for -msse3. */
1482 if (TARGET_SSE3)
1483 target_flags |= MASK_SSE2;
1484
1485 /* Turn on SSE builtins for -msse2. */
1486 if (TARGET_SSE2)
1487 target_flags |= MASK_SSE;
1488
1489 /* Turn on MMX builtins for -msse. */
1490 if (TARGET_SSE)
1491 {
1492 target_flags |= MASK_MMX & ~target_flags_explicit;
1493 x86_prefetch_sse = true;
1494 }
1495
1496 /* Turn on MMX builtins for 3Dnow. */
1497 if (TARGET_3DNOW)
1498 target_flags |= MASK_MMX;
1499
1500 if (TARGET_64BIT)
1501 {
1502 if (TARGET_ALIGN_DOUBLE)
1503 error ("-malign-double makes no sense in the 64bit mode");
1504 if (TARGET_RTD)
1505 error ("-mrtd calling convention not supported in the 64bit mode");
1506
1507 /* Enable by default the SSE and MMX builtins. Do allow the user to
1508 explicitly disable any of these. In particular, disabling SSE and
1509 MMX for kernel code is extremely useful. */
1510 target_flags
1511 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1512 & ~target_flags_explicit);
1513
1514 if (TARGET_SSE)
1515 ix86_fpmath = FPMATH_SSE;
1516 }
1517 else
1518 {
1519 ix86_fpmath = FPMATH_387;
1520 /* i386 ABI does not specify red zone. It still makes sense to use it
1521 when programmer takes care to stack from being destroyed. */
1522 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1523 target_flags |= MASK_NO_RED_ZONE;
1524 }
1525
1526 if (ix86_fpmath_string != 0)
1527 {
1528 if (! strcmp (ix86_fpmath_string, "387"))
1529 ix86_fpmath = FPMATH_387;
1530 else if (! strcmp (ix86_fpmath_string, "sse"))
1531 {
1532 if (!TARGET_SSE)
1533 {
1534 warning ("SSE instruction set disabled, using 387 arithmetics");
1535 ix86_fpmath = FPMATH_387;
1536 }
1537 else
1538 ix86_fpmath = FPMATH_SSE;
1539 }
1540 else if (! strcmp (ix86_fpmath_string, "387,sse")
1541 || ! strcmp (ix86_fpmath_string, "sse,387"))
1542 {
1543 if (!TARGET_SSE)
1544 {
1545 warning ("SSE instruction set disabled, using 387 arithmetics");
1546 ix86_fpmath = FPMATH_387;
1547 }
1548 else if (!TARGET_80387)
1549 {
1550 warning ("387 instruction set disabled, using SSE arithmetics");
1551 ix86_fpmath = FPMATH_SSE;
1552 }
1553 else
1554 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1555 }
1556 else
1557 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1558 }
1559
1560 /* If fpmath doesn't include 387, disable use of x87 intrinsics. */
1561 if (! (ix86_fpmath & FPMATH_387))
1562 target_flags |= MASK_NO_FANCY_MATH_387;
1563
1564 if ((x86_accumulate_outgoing_args & TUNEMASK)
1565 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1566 && !optimize_size)
1567 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1568
1569 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1570 {
1571 char *p;
1572 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1573 p = strchr (internal_label_prefix, 'X');
1574 internal_label_prefix_len = p - internal_label_prefix;
1575 *p = '\0';
1576 }
1577
1578 /* When scheduling description is not available, disable scheduler pass
1579 so it won't slow down the compilation and make x87 code slower. */
1580 if (!TARGET_SCHEDULE)
1581 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1582 }
1583 \f
1584 void
1585 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1586 {
1587 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1588 make the problem with not enough registers even worse. */
1589 #ifdef INSN_SCHEDULING
1590 if (level > 1)
1591 flag_schedule_insns = 0;
1592 #endif
1593
1594 /* The default values of these switches depend on the TARGET_64BIT
1595 that is not known at this moment. Mark these values with 2 and
1596 let user the to override these. In case there is no command line option
1597 specifying them, we will set the defaults in override_options. */
1598 if (optimize >= 1)
1599 flag_omit_frame_pointer = 2;
1600 flag_pcc_struct_return = 2;
1601 flag_asynchronous_unwind_tables = 2;
1602 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1603 SUBTARGET_OPTIMIZATION_OPTIONS;
1604 #endif
1605 }
1606 \f
1607 /* Table of valid machine attributes. */
1608 const struct attribute_spec ix86_attribute_table[] =
1609 {
1610 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1611 /* Stdcall attribute says callee is responsible for popping arguments
1612 if they are not variable. */
1613 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1614 /* Fastcall attribute says callee is responsible for popping arguments
1615 if they are not variable. */
1616 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1617 /* Cdecl attribute says the callee is a normal C declaration */
1618 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1619 /* Regparm attribute specifies how many integer arguments are to be
1620 passed in registers. */
1621 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1622 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1623 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1624 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1625 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1626 #endif
1627 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1628 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1629 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1630 SUBTARGET_ATTRIBUTE_TABLE,
1631 #endif
1632 { NULL, 0, 0, false, false, false, NULL }
1633 };
1634
1635 /* Decide whether we can make a sibling call to a function. DECL is the
1636 declaration of the function being targeted by the call and EXP is the
1637 CALL_EXPR representing the call. */
1638
1639 static bool
1640 ix86_function_ok_for_sibcall (tree decl, tree exp)
1641 {
1642 /* If we are generating position-independent code, we cannot sibcall
1643 optimize any indirect call, or a direct call to a global function,
1644 as the PLT requires %ebx be live. */
1645 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1646 return false;
1647
1648 /* If we are returning floats on the 80387 register stack, we cannot
1649 make a sibcall from a function that doesn't return a float to a
1650 function that does or, conversely, from a function that does return
1651 a float to a function that doesn't; the necessary stack adjustment
1652 would not be executed. */
1653 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1654 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1655 return false;
1656
1657 /* If this call is indirect, we'll need to be able to use a call-clobbered
1658 register for the address of the target function. Make sure that all
1659 such registers are not used for passing parameters. */
1660 if (!decl && !TARGET_64BIT)
1661 {
1662 tree type;
1663
1664 /* We're looking at the CALL_EXPR, we need the type of the function. */
1665 type = TREE_OPERAND (exp, 0); /* pointer expression */
1666 type = TREE_TYPE (type); /* pointer type */
1667 type = TREE_TYPE (type); /* function type */
1668
1669 if (ix86_function_regparm (type, NULL) >= 3)
1670 {
1671 /* ??? Need to count the actual number of registers to be used,
1672 not the possible number of registers. Fix later. */
1673 return false;
1674 }
1675 }
1676
1677 /* Otherwise okay. That also includes certain types of indirect calls. */
1678 return true;
1679 }
1680
1681 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1682 arguments as in struct attribute_spec.handler. */
1683 static tree
1684 ix86_handle_cdecl_attribute (tree *node, tree name,
1685 tree args ATTRIBUTE_UNUSED,
1686 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1687 {
1688 if (TREE_CODE (*node) != FUNCTION_TYPE
1689 && TREE_CODE (*node) != METHOD_TYPE
1690 && TREE_CODE (*node) != FIELD_DECL
1691 && TREE_CODE (*node) != TYPE_DECL)
1692 {
1693 warning ("%qs attribute only applies to functions",
1694 IDENTIFIER_POINTER (name));
1695 *no_add_attrs = true;
1696 }
1697 else
1698 {
1699 if (is_attribute_p ("fastcall", name))
1700 {
1701 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1702 {
1703 error ("fastcall and stdcall attributes are not compatible");
1704 }
1705 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1706 {
1707 error ("fastcall and regparm attributes are not compatible");
1708 }
1709 }
1710 else if (is_attribute_p ("stdcall", name))
1711 {
1712 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1713 {
1714 error ("fastcall and stdcall attributes are not compatible");
1715 }
1716 }
1717 }
1718
1719 if (TARGET_64BIT)
1720 {
1721 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
1722 *no_add_attrs = true;
1723 }
1724
1725 return NULL_TREE;
1726 }
1727
1728 /* Handle a "regparm" attribute;
1729 arguments as in struct attribute_spec.handler. */
1730 static tree
1731 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1732 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1733 {
1734 if (TREE_CODE (*node) != FUNCTION_TYPE
1735 && TREE_CODE (*node) != METHOD_TYPE
1736 && TREE_CODE (*node) != FIELD_DECL
1737 && TREE_CODE (*node) != TYPE_DECL)
1738 {
1739 warning ("%qs attribute only applies to functions",
1740 IDENTIFIER_POINTER (name));
1741 *no_add_attrs = true;
1742 }
1743 else
1744 {
1745 tree cst;
1746
1747 cst = TREE_VALUE (args);
1748 if (TREE_CODE (cst) != INTEGER_CST)
1749 {
1750 warning ("%qs attribute requires an integer constant argument",
1751 IDENTIFIER_POINTER (name));
1752 *no_add_attrs = true;
1753 }
1754 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1755 {
1756 warning ("argument to %qs attribute larger than %d",
1757 IDENTIFIER_POINTER (name), REGPARM_MAX);
1758 *no_add_attrs = true;
1759 }
1760
1761 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1762 {
1763 error ("fastcall and regparm attributes are not compatible");
1764 }
1765 }
1766
1767 return NULL_TREE;
1768 }
1769
1770 /* Return 0 if the attributes for two types are incompatible, 1 if they
1771 are compatible, and 2 if they are nearly compatible (which causes a
1772 warning to be generated). */
1773
1774 static int
1775 ix86_comp_type_attributes (tree type1, tree type2)
1776 {
1777 /* Check for mismatch of non-default calling convention. */
1778 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1779
1780 if (TREE_CODE (type1) != FUNCTION_TYPE)
1781 return 1;
1782
1783 /* Check for mismatched fastcall types */
1784 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1785 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1786 return 0;
1787
1788 /* Check for mismatched return types (cdecl vs stdcall). */
1789 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1790 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1791 return 0;
1792 if (ix86_function_regparm (type1, NULL)
1793 != ix86_function_regparm (type2, NULL))
1794 return 0;
1795 return 1;
1796 }
1797 \f
1798 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1799 DECL may be NULL when calling function indirectly
1800 or considering a libcall. */
1801
1802 static int
1803 ix86_function_regparm (tree type, tree decl)
1804 {
1805 tree attr;
1806 int regparm = ix86_regparm;
1807 bool user_convention = false;
1808
1809 if (!TARGET_64BIT)
1810 {
1811 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1812 if (attr)
1813 {
1814 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1815 user_convention = true;
1816 }
1817
1818 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1819 {
1820 regparm = 2;
1821 user_convention = true;
1822 }
1823
1824 /* Use register calling convention for local functions when possible. */
1825 if (!TARGET_64BIT && !user_convention && decl
1826 && flag_unit_at_a_time && !profile_flag)
1827 {
1828 struct cgraph_local_info *i = cgraph_local_info (decl);
1829 if (i && i->local)
1830 {
1831 /* We can't use regparm(3) for nested functions as these use
1832 static chain pointer in third argument. */
1833 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1834 regparm = 2;
1835 else
1836 regparm = 3;
1837 }
1838 }
1839 }
1840 return regparm;
1841 }
1842
1843 /* Return true if EAX is live at the start of the function. Used by
1844 ix86_expand_prologue to determine if we need special help before
1845 calling allocate_stack_worker. */
1846
1847 static bool
1848 ix86_eax_live_at_start_p (void)
1849 {
1850 /* Cheat. Don't bother working forward from ix86_function_regparm
1851 to the function type to whether an actual argument is located in
1852 eax. Instead just look at cfg info, which is still close enough
1853 to correct at this point. This gives false positives for broken
1854 functions that might use uninitialized data that happens to be
1855 allocated in eax, but who cares? */
1856 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1857 }
1858
1859 /* Value is the number of bytes of arguments automatically
1860 popped when returning from a subroutine call.
1861 FUNDECL is the declaration node of the function (as a tree),
1862 FUNTYPE is the data type of the function (as a tree),
1863 or for a library call it is an identifier node for the subroutine name.
1864 SIZE is the number of bytes of arguments passed on the stack.
1865
1866 On the 80386, the RTD insn may be used to pop them if the number
1867 of args is fixed, but if the number is variable then the caller
1868 must pop them all. RTD can't be used for library calls now
1869 because the library is compiled with the Unix compiler.
1870 Use of RTD is a selectable option, since it is incompatible with
1871 standard Unix calling sequences. If the option is not selected,
1872 the caller must always pop the args.
1873
1874 The attribute stdcall is equivalent to RTD on a per module basis. */
1875
1876 int
1877 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1878 {
1879 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1880
1881 /* Cdecl functions override -mrtd, and never pop the stack. */
1882 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1883
1884 /* Stdcall and fastcall functions will pop the stack if not
1885 variable args. */
1886 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1887 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1888 rtd = 1;
1889
1890 if (rtd
1891 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1892 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1893 == void_type_node)))
1894 return size;
1895 }
1896
1897 /* Lose any fake structure return argument if it is passed on the stack. */
1898 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1899 && !TARGET_64BIT
1900 && !KEEP_AGGREGATE_RETURN_POINTER)
1901 {
1902 int nregs = ix86_function_regparm (funtype, fundecl);
1903
1904 if (!nregs)
1905 return GET_MODE_SIZE (Pmode);
1906 }
1907
1908 return 0;
1909 }
1910 \f
1911 /* Argument support functions. */
1912
1913 /* Return true when register may be used to pass function parameters. */
1914 bool
1915 ix86_function_arg_regno_p (int regno)
1916 {
1917 int i;
1918 if (!TARGET_64BIT)
1919 return (regno < REGPARM_MAX
1920 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1921 if (SSE_REGNO_P (regno) && TARGET_SSE)
1922 return true;
1923 /* RAX is used as hidden argument to va_arg functions. */
1924 if (!regno)
1925 return true;
1926 for (i = 0; i < REGPARM_MAX; i++)
1927 if (regno == x86_64_int_parameter_registers[i])
1928 return true;
1929 return false;
1930 }
1931
1932 /* Return if we do not know how to pass TYPE solely in registers. */
1933
1934 static bool
1935 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
1936 {
1937 if (must_pass_in_stack_var_size_or_pad (mode, type))
1938 return true;
1939
1940 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
1941 The layout_type routine is crafty and tries to trick us into passing
1942 currently unsupported vector types on the stack by using TImode. */
1943 return (!TARGET_64BIT && mode == TImode
1944 && type && TREE_CODE (type) != VECTOR_TYPE);
1945 }
1946
1947 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1948 for a call to a function whose data type is FNTYPE.
1949 For a library call, FNTYPE is 0. */
1950
1951 void
1952 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1953 tree fntype, /* tree ptr for function decl */
1954 rtx libname, /* SYMBOL_REF of library name or 0 */
1955 tree fndecl)
1956 {
1957 static CUMULATIVE_ARGS zero_cum;
1958 tree param, next_param;
1959
1960 if (TARGET_DEBUG_ARG)
1961 {
1962 fprintf (stderr, "\ninit_cumulative_args (");
1963 if (fntype)
1964 fprintf (stderr, "fntype code = %s, ret code = %s",
1965 tree_code_name[(int) TREE_CODE (fntype)],
1966 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1967 else
1968 fprintf (stderr, "no fntype");
1969
1970 if (libname)
1971 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1972 }
1973
1974 *cum = zero_cum;
1975
1976 /* Set up the number of registers to use for passing arguments. */
1977 if (fntype)
1978 cum->nregs = ix86_function_regparm (fntype, fndecl);
1979 else
1980 cum->nregs = ix86_regparm;
1981 if (TARGET_SSE)
1982 cum->sse_nregs = SSE_REGPARM_MAX;
1983 if (TARGET_MMX)
1984 cum->mmx_nregs = MMX_REGPARM_MAX;
1985 cum->warn_sse = true;
1986 cum->warn_mmx = true;
1987 cum->maybe_vaarg = false;
1988
1989 /* Use ecx and edx registers if function has fastcall attribute */
1990 if (fntype && !TARGET_64BIT)
1991 {
1992 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1993 {
1994 cum->nregs = 2;
1995 cum->fastcall = 1;
1996 }
1997 }
1998
1999 /* Determine if this function has variable arguments. This is
2000 indicated by the last argument being 'void_type_mode' if there
2001 are no variable arguments. If there are variable arguments, then
2002 we won't pass anything in registers in 32-bit mode. */
2003
2004 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2005 {
2006 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2007 param != 0; param = next_param)
2008 {
2009 next_param = TREE_CHAIN (param);
2010 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2011 {
2012 if (!TARGET_64BIT)
2013 {
2014 cum->nregs = 0;
2015 cum->sse_nregs = 0;
2016 cum->mmx_nregs = 0;
2017 cum->warn_sse = 0;
2018 cum->warn_mmx = 0;
2019 cum->fastcall = 0;
2020 }
2021 cum->maybe_vaarg = true;
2022 }
2023 }
2024 }
2025 if ((!fntype && !libname)
2026 || (fntype && !TYPE_ARG_TYPES (fntype)))
2027 cum->maybe_vaarg = 1;
2028
2029 if (TARGET_DEBUG_ARG)
2030 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2031
2032 return;
2033 }
2034
2035 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2036 But in the case of vector types, it is some vector mode.
2037
2038 When we have only some of our vector isa extensions enabled, then there
2039 are some modes for which vector_mode_supported_p is false. For these
2040 modes, the generic vector support in gcc will choose some non-vector mode
2041 in order to implement the type. By computing the natural mode, we'll
2042 select the proper ABI location for the operand and not depend on whatever
2043 the middle-end decides to do with these vector types. */
2044
2045 static enum machine_mode
2046 type_natural_mode (tree type)
2047 {
2048 enum machine_mode mode = TYPE_MODE (type);
2049
2050 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2051 {
2052 HOST_WIDE_INT size = int_size_in_bytes (type);
2053 if ((size == 8 || size == 16)
2054 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2055 && TYPE_VECTOR_SUBPARTS (type) > 1)
2056 {
2057 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2058
2059 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2060 mode = MIN_MODE_VECTOR_FLOAT;
2061 else
2062 mode = MIN_MODE_VECTOR_INT;
2063
2064 /* Get the mode which has this inner mode and number of units. */
2065 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2066 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2067 && GET_MODE_INNER (mode) == innermode)
2068 return mode;
2069
2070 abort ();
2071 }
2072 }
2073
2074 return mode;
2075 }
2076
2077 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2078 this may not agree with the mode that the type system has chosen for the
2079 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2080 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2081
2082 static rtx
2083 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2084 unsigned int regno)
2085 {
2086 rtx tmp;
2087
2088 if (orig_mode != BLKmode)
2089 tmp = gen_rtx_REG (orig_mode, regno);
2090 else
2091 {
2092 tmp = gen_rtx_REG (mode, regno);
2093 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2094 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2095 }
2096
2097 return tmp;
2098 }
2099
2100 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2101 of this code is to classify each 8bytes of incoming argument by the register
2102 class and assign registers accordingly. */
2103
2104 /* Return the union class of CLASS1 and CLASS2.
2105 See the x86-64 PS ABI for details. */
2106
2107 static enum x86_64_reg_class
2108 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2109 {
2110 /* Rule #1: If both classes are equal, this is the resulting class. */
2111 if (class1 == class2)
2112 return class1;
2113
2114 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2115 the other class. */
2116 if (class1 == X86_64_NO_CLASS)
2117 return class2;
2118 if (class2 == X86_64_NO_CLASS)
2119 return class1;
2120
2121 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2122 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2123 return X86_64_MEMORY_CLASS;
2124
2125 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2126 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2127 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2128 return X86_64_INTEGERSI_CLASS;
2129 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2130 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2131 return X86_64_INTEGER_CLASS;
2132
2133 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2134 MEMORY is used. */
2135 if (class1 == X86_64_X87_CLASS
2136 || class1 == X86_64_X87UP_CLASS
2137 || class1 == X86_64_COMPLEX_X87_CLASS
2138 || class2 == X86_64_X87_CLASS
2139 || class2 == X86_64_X87UP_CLASS
2140 || class2 == X86_64_COMPLEX_X87_CLASS)
2141 return X86_64_MEMORY_CLASS;
2142
2143 /* Rule #6: Otherwise class SSE is used. */
2144 return X86_64_SSE_CLASS;
2145 }
2146
2147 /* Classify the argument of type TYPE and mode MODE.
2148 CLASSES will be filled by the register class used to pass each word
2149 of the operand. The number of words is returned. In case the parameter
2150 should be passed in memory, 0 is returned. As a special case for zero
2151 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2152
2153 BIT_OFFSET is used internally for handling records and specifies offset
2154 of the offset in bits modulo 256 to avoid overflow cases.
2155
2156 See the x86-64 PS ABI for details.
2157 */
2158
2159 static int
2160 classify_argument (enum machine_mode mode, tree type,
2161 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2162 {
2163 HOST_WIDE_INT bytes =
2164 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2165 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2166
2167 /* Variable sized entities are always passed/returned in memory. */
2168 if (bytes < 0)
2169 return 0;
2170
2171 if (mode != VOIDmode
2172 && targetm.calls.must_pass_in_stack (mode, type))
2173 return 0;
2174
2175 if (type && AGGREGATE_TYPE_P (type))
2176 {
2177 int i;
2178 tree field;
2179 enum x86_64_reg_class subclasses[MAX_CLASSES];
2180
2181 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2182 if (bytes > 16)
2183 return 0;
2184
2185 for (i = 0; i < words; i++)
2186 classes[i] = X86_64_NO_CLASS;
2187
2188 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2189 signalize memory class, so handle it as special case. */
2190 if (!words)
2191 {
2192 classes[0] = X86_64_NO_CLASS;
2193 return 1;
2194 }
2195
2196 /* Classify each field of record and merge classes. */
2197 if (TREE_CODE (type) == RECORD_TYPE)
2198 {
2199 /* For classes first merge in the field of the subclasses. */
2200 if (TYPE_BINFO (type))
2201 {
2202 tree binfo, base_binfo;
2203 int basenum;
2204
2205 for (binfo = TYPE_BINFO (type), basenum = 0;
2206 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2207 {
2208 int num;
2209 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2210 tree type = BINFO_TYPE (base_binfo);
2211
2212 num = classify_argument (TYPE_MODE (type),
2213 type, subclasses,
2214 (offset + bit_offset) % 256);
2215 if (!num)
2216 return 0;
2217 for (i = 0; i < num; i++)
2218 {
2219 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2220 classes[i + pos] =
2221 merge_classes (subclasses[i], classes[i + pos]);
2222 }
2223 }
2224 }
2225 /* And now merge the fields of structure. */
2226 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2227 {
2228 if (TREE_CODE (field) == FIELD_DECL)
2229 {
2230 int num;
2231
2232 /* Bitfields are always classified as integer. Handle them
2233 early, since later code would consider them to be
2234 misaligned integers. */
2235 if (DECL_BIT_FIELD (field))
2236 {
2237 for (i = int_bit_position (field) / 8 / 8;
2238 i < (int_bit_position (field)
2239 + tree_low_cst (DECL_SIZE (field), 0)
2240 + 63) / 8 / 8; i++)
2241 classes[i] =
2242 merge_classes (X86_64_INTEGER_CLASS,
2243 classes[i]);
2244 }
2245 else
2246 {
2247 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2248 TREE_TYPE (field), subclasses,
2249 (int_bit_position (field)
2250 + bit_offset) % 256);
2251 if (!num)
2252 return 0;
2253 for (i = 0; i < num; i++)
2254 {
2255 int pos =
2256 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2257 classes[i + pos] =
2258 merge_classes (subclasses[i], classes[i + pos]);
2259 }
2260 }
2261 }
2262 }
2263 }
2264 /* Arrays are handled as small records. */
2265 else if (TREE_CODE (type) == ARRAY_TYPE)
2266 {
2267 int num;
2268 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2269 TREE_TYPE (type), subclasses, bit_offset);
2270 if (!num)
2271 return 0;
2272
2273 /* The partial classes are now full classes. */
2274 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2275 subclasses[0] = X86_64_SSE_CLASS;
2276 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2277 subclasses[0] = X86_64_INTEGER_CLASS;
2278
2279 for (i = 0; i < words; i++)
2280 classes[i] = subclasses[i % num];
2281 }
2282 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2283 else if (TREE_CODE (type) == UNION_TYPE
2284 || TREE_CODE (type) == QUAL_UNION_TYPE)
2285 {
2286 /* For classes first merge in the field of the subclasses. */
2287 if (TYPE_BINFO (type))
2288 {
2289 tree binfo, base_binfo;
2290 int basenum;
2291
2292 for (binfo = TYPE_BINFO (type), basenum = 0;
2293 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2294 {
2295 int num;
2296 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2297 tree type = BINFO_TYPE (base_binfo);
2298
2299 num = classify_argument (TYPE_MODE (type),
2300 type, subclasses,
2301 (offset + (bit_offset % 64)) % 256);
2302 if (!num)
2303 return 0;
2304 for (i = 0; i < num; i++)
2305 {
2306 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2307 classes[i + pos] =
2308 merge_classes (subclasses[i], classes[i + pos]);
2309 }
2310 }
2311 }
2312 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2313 {
2314 if (TREE_CODE (field) == FIELD_DECL)
2315 {
2316 int num;
2317 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2318 TREE_TYPE (field), subclasses,
2319 bit_offset);
2320 if (!num)
2321 return 0;
2322 for (i = 0; i < num; i++)
2323 classes[i] = merge_classes (subclasses[i], classes[i]);
2324 }
2325 }
2326 }
2327 else
2328 abort ();
2329
2330 /* Final merger cleanup. */
2331 for (i = 0; i < words; i++)
2332 {
2333 /* If one class is MEMORY, everything should be passed in
2334 memory. */
2335 if (classes[i] == X86_64_MEMORY_CLASS)
2336 return 0;
2337
2338 /* The X86_64_SSEUP_CLASS should be always preceded by
2339 X86_64_SSE_CLASS. */
2340 if (classes[i] == X86_64_SSEUP_CLASS
2341 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2342 classes[i] = X86_64_SSE_CLASS;
2343
2344 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2345 if (classes[i] == X86_64_X87UP_CLASS
2346 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2347 classes[i] = X86_64_SSE_CLASS;
2348 }
2349 return words;
2350 }
2351
2352 /* Compute alignment needed. We align all types to natural boundaries with
2353 exception of XFmode that is aligned to 64bits. */
2354 if (mode != VOIDmode && mode != BLKmode)
2355 {
2356 int mode_alignment = GET_MODE_BITSIZE (mode);
2357
2358 if (mode == XFmode)
2359 mode_alignment = 128;
2360 else if (mode == XCmode)
2361 mode_alignment = 256;
2362 if (COMPLEX_MODE_P (mode))
2363 mode_alignment /= 2;
2364 /* Misaligned fields are always returned in memory. */
2365 if (bit_offset % mode_alignment)
2366 return 0;
2367 }
2368
2369 /* for V1xx modes, just use the base mode */
2370 if (VECTOR_MODE_P (mode)
2371 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2372 mode = GET_MODE_INNER (mode);
2373
2374 /* Classification of atomic types. */
2375 switch (mode)
2376 {
2377 case DImode:
2378 case SImode:
2379 case HImode:
2380 case QImode:
2381 case CSImode:
2382 case CHImode:
2383 case CQImode:
2384 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2385 classes[0] = X86_64_INTEGERSI_CLASS;
2386 else
2387 classes[0] = X86_64_INTEGER_CLASS;
2388 return 1;
2389 case CDImode:
2390 case TImode:
2391 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2392 return 2;
2393 case CTImode:
2394 return 0;
2395 case SFmode:
2396 if (!(bit_offset % 64))
2397 classes[0] = X86_64_SSESF_CLASS;
2398 else
2399 classes[0] = X86_64_SSE_CLASS;
2400 return 1;
2401 case DFmode:
2402 classes[0] = X86_64_SSEDF_CLASS;
2403 return 1;
2404 case XFmode:
2405 classes[0] = X86_64_X87_CLASS;
2406 classes[1] = X86_64_X87UP_CLASS;
2407 return 2;
2408 case TFmode:
2409 classes[0] = X86_64_SSE_CLASS;
2410 classes[1] = X86_64_SSEUP_CLASS;
2411 return 2;
2412 case SCmode:
2413 classes[0] = X86_64_SSE_CLASS;
2414 return 1;
2415 case DCmode:
2416 classes[0] = X86_64_SSEDF_CLASS;
2417 classes[1] = X86_64_SSEDF_CLASS;
2418 return 2;
2419 case XCmode:
2420 classes[0] = X86_64_COMPLEX_X87_CLASS;
2421 return 1;
2422 case TCmode:
2423 /* This modes is larger than 16 bytes. */
2424 return 0;
2425 case V4SFmode:
2426 case V4SImode:
2427 case V16QImode:
2428 case V8HImode:
2429 case V2DFmode:
2430 case V2DImode:
2431 classes[0] = X86_64_SSE_CLASS;
2432 classes[1] = X86_64_SSEUP_CLASS;
2433 return 2;
2434 case V2SFmode:
2435 case V2SImode:
2436 case V4HImode:
2437 case V8QImode:
2438 classes[0] = X86_64_SSE_CLASS;
2439 return 1;
2440 case BLKmode:
2441 case VOIDmode:
2442 return 0;
2443 default:
2444 if (VECTOR_MODE_P (mode))
2445 {
2446 if (bytes > 16)
2447 return 0;
2448 if (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT)
2449 {
2450 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2451 classes[0] = X86_64_INTEGERSI_CLASS;
2452 else
2453 classes[0] = X86_64_INTEGER_CLASS;
2454 classes[1] = X86_64_INTEGER_CLASS;
2455 return 1 + (bytes > 8);
2456 }
2457 }
2458 abort ();
2459 }
2460 }
2461
2462 /* Examine the argument and return set number of register required in each
2463 class. Return 0 iff parameter should be passed in memory. */
2464 static int
2465 examine_argument (enum machine_mode mode, tree type, int in_return,
2466 int *int_nregs, int *sse_nregs)
2467 {
2468 enum x86_64_reg_class class[MAX_CLASSES];
2469 int n = classify_argument (mode, type, class, 0);
2470
2471 *int_nregs = 0;
2472 *sse_nregs = 0;
2473 if (!n)
2474 return 0;
2475 for (n--; n >= 0; n--)
2476 switch (class[n])
2477 {
2478 case X86_64_INTEGER_CLASS:
2479 case X86_64_INTEGERSI_CLASS:
2480 (*int_nregs)++;
2481 break;
2482 case X86_64_SSE_CLASS:
2483 case X86_64_SSESF_CLASS:
2484 case X86_64_SSEDF_CLASS:
2485 (*sse_nregs)++;
2486 break;
2487 case X86_64_NO_CLASS:
2488 case X86_64_SSEUP_CLASS:
2489 break;
2490 case X86_64_X87_CLASS:
2491 case X86_64_X87UP_CLASS:
2492 if (!in_return)
2493 return 0;
2494 break;
2495 case X86_64_COMPLEX_X87_CLASS:
2496 return in_return ? 2 : 0;
2497 case X86_64_MEMORY_CLASS:
2498 abort ();
2499 }
2500 return 1;
2501 }
2502
2503 /* Construct container for the argument used by GCC interface. See
2504 FUNCTION_ARG for the detailed description. */
2505
2506 static rtx
2507 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2508 tree type, int in_return, int nintregs, int nsseregs,
2509 const int *intreg, int sse_regno)
2510 {
2511 enum machine_mode tmpmode;
2512 int bytes =
2513 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2514 enum x86_64_reg_class class[MAX_CLASSES];
2515 int n;
2516 int i;
2517 int nexps = 0;
2518 int needed_sseregs, needed_intregs;
2519 rtx exp[MAX_CLASSES];
2520 rtx ret;
2521
2522 n = classify_argument (mode, type, class, 0);
2523 if (TARGET_DEBUG_ARG)
2524 {
2525 if (!n)
2526 fprintf (stderr, "Memory class\n");
2527 else
2528 {
2529 fprintf (stderr, "Classes:");
2530 for (i = 0; i < n; i++)
2531 {
2532 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2533 }
2534 fprintf (stderr, "\n");
2535 }
2536 }
2537 if (!n)
2538 return NULL;
2539 if (!examine_argument (mode, type, in_return, &needed_intregs,
2540 &needed_sseregs))
2541 return NULL;
2542 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2543 return NULL;
2544
2545 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2546 some less clueful developer tries to use floating-point anyway. */
2547 if (needed_sseregs && !TARGET_SSE)
2548 {
2549 static bool issued_error;
2550 if (!issued_error)
2551 {
2552 issued_error = true;
2553 if (in_return)
2554 error ("SSE register return with SSE disabled");
2555 else
2556 error ("SSE register argument with SSE disabled");
2557 }
2558 return NULL;
2559 }
2560
2561 /* First construct simple cases. Avoid SCmode, since we want to use
2562 single register to pass this type. */
2563 if (n == 1 && mode != SCmode)
2564 switch (class[0])
2565 {
2566 case X86_64_INTEGER_CLASS:
2567 case X86_64_INTEGERSI_CLASS:
2568 return gen_rtx_REG (mode, intreg[0]);
2569 case X86_64_SSE_CLASS:
2570 case X86_64_SSESF_CLASS:
2571 case X86_64_SSEDF_CLASS:
2572 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2573 case X86_64_X87_CLASS:
2574 case X86_64_COMPLEX_X87_CLASS:
2575 return gen_rtx_REG (mode, FIRST_STACK_REG);
2576 case X86_64_NO_CLASS:
2577 /* Zero sized array, struct or class. */
2578 return NULL;
2579 default:
2580 abort ();
2581 }
2582 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2583 && mode != BLKmode)
2584 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2585 if (n == 2
2586 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2587 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2588 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2589 && class[1] == X86_64_INTEGER_CLASS
2590 && (mode == CDImode || mode == TImode || mode == TFmode)
2591 && intreg[0] + 1 == intreg[1])
2592 return gen_rtx_REG (mode, intreg[0]);
2593
2594 /* Otherwise figure out the entries of the PARALLEL. */
2595 for (i = 0; i < n; i++)
2596 {
2597 switch (class[i])
2598 {
2599 case X86_64_NO_CLASS:
2600 break;
2601 case X86_64_INTEGER_CLASS:
2602 case X86_64_INTEGERSI_CLASS:
2603 /* Merge TImodes on aligned occasions here too. */
2604 if (i * 8 + 8 > bytes)
2605 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2606 else if (class[i] == X86_64_INTEGERSI_CLASS)
2607 tmpmode = SImode;
2608 else
2609 tmpmode = DImode;
2610 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2611 if (tmpmode == BLKmode)
2612 tmpmode = DImode;
2613 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2614 gen_rtx_REG (tmpmode, *intreg),
2615 GEN_INT (i*8));
2616 intreg++;
2617 break;
2618 case X86_64_SSESF_CLASS:
2619 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2620 gen_rtx_REG (SFmode,
2621 SSE_REGNO (sse_regno)),
2622 GEN_INT (i*8));
2623 sse_regno++;
2624 break;
2625 case X86_64_SSEDF_CLASS:
2626 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2627 gen_rtx_REG (DFmode,
2628 SSE_REGNO (sse_regno)),
2629 GEN_INT (i*8));
2630 sse_regno++;
2631 break;
2632 case X86_64_SSE_CLASS:
2633 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2634 tmpmode = TImode;
2635 else
2636 tmpmode = DImode;
2637 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2638 gen_rtx_REG (tmpmode,
2639 SSE_REGNO (sse_regno)),
2640 GEN_INT (i*8));
2641 if (tmpmode == TImode)
2642 i++;
2643 sse_regno++;
2644 break;
2645 default:
2646 abort ();
2647 }
2648 }
2649 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2650 for (i = 0; i < nexps; i++)
2651 XVECEXP (ret, 0, i) = exp [i];
2652 return ret;
2653 }
2654
2655 /* Update the data in CUM to advance over an argument
2656 of mode MODE and data type TYPE.
2657 (TYPE is null for libcalls where that information may not be available.) */
2658
2659 void
2660 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2661 tree type, int named)
2662 {
2663 int bytes =
2664 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2665 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2666
2667 if (type)
2668 mode = type_natural_mode (type);
2669
2670 if (TARGET_DEBUG_ARG)
2671 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
2672 "mode=%s, named=%d)\n\n",
2673 words, cum->words, cum->nregs, cum->sse_nregs,
2674 GET_MODE_NAME (mode), named);
2675
2676 if (TARGET_64BIT)
2677 {
2678 int int_nregs, sse_nregs;
2679 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2680 cum->words += words;
2681 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2682 {
2683 cum->nregs -= int_nregs;
2684 cum->sse_nregs -= sse_nregs;
2685 cum->regno += int_nregs;
2686 cum->sse_regno += sse_nregs;
2687 }
2688 else
2689 cum->words += words;
2690 }
2691 else
2692 {
2693 switch (mode)
2694 {
2695 default:
2696 break;
2697
2698 case BLKmode:
2699 if (bytes < 0)
2700 break;
2701 /* FALLTHRU */
2702
2703 case DImode:
2704 case SImode:
2705 case HImode:
2706 case QImode:
2707 cum->words += words;
2708 cum->nregs -= words;
2709 cum->regno += words;
2710
2711 if (cum->nregs <= 0)
2712 {
2713 cum->nregs = 0;
2714 cum->regno = 0;
2715 }
2716 break;
2717
2718 case TImode:
2719 case V16QImode:
2720 case V8HImode:
2721 case V4SImode:
2722 case V2DImode:
2723 case V4SFmode:
2724 case V2DFmode:
2725 if (!type || !AGGREGATE_TYPE_P (type))
2726 {
2727 cum->sse_words += words;
2728 cum->sse_nregs -= 1;
2729 cum->sse_regno += 1;
2730 if (cum->sse_nregs <= 0)
2731 {
2732 cum->sse_nregs = 0;
2733 cum->sse_regno = 0;
2734 }
2735 }
2736 break;
2737
2738 case V8QImode:
2739 case V4HImode:
2740 case V2SImode:
2741 case V2SFmode:
2742 if (!type || !AGGREGATE_TYPE_P (type))
2743 {
2744 cum->mmx_words += words;
2745 cum->mmx_nregs -= 1;
2746 cum->mmx_regno += 1;
2747 if (cum->mmx_nregs <= 0)
2748 {
2749 cum->mmx_nregs = 0;
2750 cum->mmx_regno = 0;
2751 }
2752 }
2753 break;
2754 }
2755 }
2756 }
2757
2758 /* Define where to put the arguments to a function.
2759 Value is zero to push the argument on the stack,
2760 or a hard register in which to store the argument.
2761
2762 MODE is the argument's machine mode.
2763 TYPE is the data type of the argument (as a tree).
2764 This is null for libcalls where that information may
2765 not be available.
2766 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2767 the preceding args and about the function being called.
2768 NAMED is nonzero if this argument is a named parameter
2769 (otherwise it is an extra parameter matching an ellipsis). */
2770
2771 rtx
2772 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
2773 tree type, int named)
2774 {
2775 enum machine_mode mode = orig_mode;
2776 rtx ret = NULL_RTX;
2777 int bytes =
2778 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2779 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2780 static bool warnedsse, warnedmmx;
2781
2782 /* To simplify the code below, represent vector types with a vector mode
2783 even if MMX/SSE are not active. */
2784 if (type && TREE_CODE (type) == VECTOR_TYPE)
2785 mode = type_natural_mode (type);
2786
2787 /* Handle a hidden AL argument containing number of registers for varargs
2788 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2789 any AL settings. */
2790 if (mode == VOIDmode)
2791 {
2792 if (TARGET_64BIT)
2793 return GEN_INT (cum->maybe_vaarg
2794 ? (cum->sse_nregs < 0
2795 ? SSE_REGPARM_MAX
2796 : cum->sse_regno)
2797 : -1);
2798 else
2799 return constm1_rtx;
2800 }
2801 if (TARGET_64BIT)
2802 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
2803 cum->sse_nregs,
2804 &x86_64_int_parameter_registers [cum->regno],
2805 cum->sse_regno);
2806 else
2807 switch (mode)
2808 {
2809 /* For now, pass fp/complex values on the stack. */
2810 default:
2811 break;
2812
2813 case BLKmode:
2814 if (bytes < 0)
2815 break;
2816 /* FALLTHRU */
2817 case DImode:
2818 case SImode:
2819 case HImode:
2820 case QImode:
2821 if (words <= cum->nregs)
2822 {
2823 int regno = cum->regno;
2824
2825 /* Fastcall allocates the first two DWORD (SImode) or
2826 smaller arguments to ECX and EDX. */
2827 if (cum->fastcall)
2828 {
2829 if (mode == BLKmode || mode == DImode)
2830 break;
2831
2832 /* ECX not EAX is the first allocated register. */
2833 if (regno == 0)
2834 regno = 2;
2835 }
2836 ret = gen_rtx_REG (mode, regno);
2837 }
2838 break;
2839 case TImode:
2840 case V16QImode:
2841 case V8HImode:
2842 case V4SImode:
2843 case V2DImode:
2844 case V4SFmode:
2845 case V2DFmode:
2846 if (!type || !AGGREGATE_TYPE_P (type))
2847 {
2848 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2849 {
2850 warnedsse = true;
2851 warning ("SSE vector argument without SSE enabled "
2852 "changes the ABI");
2853 }
2854 if (cum->sse_nregs)
2855 ret = gen_reg_or_parallel (mode, orig_mode,
2856 cum->sse_regno + FIRST_SSE_REG);
2857 }
2858 break;
2859 case V8QImode:
2860 case V4HImode:
2861 case V2SImode:
2862 case V2SFmode:
2863 if (!type || !AGGREGATE_TYPE_P (type))
2864 {
2865 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
2866 {
2867 warnedmmx = true;
2868 warning ("MMX vector argument without MMX enabled "
2869 "changes the ABI");
2870 }
2871 if (cum->mmx_nregs)
2872 ret = gen_reg_or_parallel (mode, orig_mode,
2873 cum->mmx_regno + FIRST_MMX_REG);
2874 }
2875 break;
2876 }
2877
2878 if (TARGET_DEBUG_ARG)
2879 {
2880 fprintf (stderr,
2881 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2882 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2883
2884 if (ret)
2885 print_simple_rtl (stderr, ret);
2886 else
2887 fprintf (stderr, ", stack");
2888
2889 fprintf (stderr, " )\n");
2890 }
2891
2892 return ret;
2893 }
2894
2895 /* A C expression that indicates when an argument must be passed by
2896 reference. If nonzero for an argument, a copy of that argument is
2897 made in memory and a pointer to the argument is passed instead of
2898 the argument itself. The pointer is passed in whatever way is
2899 appropriate for passing a pointer to that type. */
2900
2901 static bool
2902 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2903 enum machine_mode mode ATTRIBUTE_UNUSED,
2904 tree type, bool named ATTRIBUTE_UNUSED)
2905 {
2906 if (!TARGET_64BIT)
2907 return 0;
2908
2909 if (type && int_size_in_bytes (type) == -1)
2910 {
2911 if (TARGET_DEBUG_ARG)
2912 fprintf (stderr, "function_arg_pass_by_reference\n");
2913 return 1;
2914 }
2915
2916 return 0;
2917 }
2918
2919 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2920 ABI. Only called if TARGET_SSE. */
2921 static bool
2922 contains_128bit_aligned_vector_p (tree type)
2923 {
2924 enum machine_mode mode = TYPE_MODE (type);
2925 if (SSE_REG_MODE_P (mode)
2926 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2927 return true;
2928 if (TYPE_ALIGN (type) < 128)
2929 return false;
2930
2931 if (AGGREGATE_TYPE_P (type))
2932 {
2933 /* Walk the aggregates recursively. */
2934 if (TREE_CODE (type) == RECORD_TYPE
2935 || TREE_CODE (type) == UNION_TYPE
2936 || TREE_CODE (type) == QUAL_UNION_TYPE)
2937 {
2938 tree field;
2939
2940 if (TYPE_BINFO (type))
2941 {
2942 tree binfo, base_binfo;
2943 int i;
2944
2945 for (binfo = TYPE_BINFO (type), i = 0;
2946 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2947 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo)))
2948 return true;
2949 }
2950 /* And now merge the fields of structure. */
2951 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2952 {
2953 if (TREE_CODE (field) == FIELD_DECL
2954 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2955 return true;
2956 }
2957 }
2958 /* Just for use if some languages passes arrays by value. */
2959 else if (TREE_CODE (type) == ARRAY_TYPE)
2960 {
2961 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2962 return true;
2963 }
2964 else
2965 abort ();
2966 }
2967 return false;
2968 }
2969
2970 /* Gives the alignment boundary, in bits, of an argument with the
2971 specified mode and type. */
2972
2973 int
2974 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2975 {
2976 int align;
2977 if (type)
2978 align = TYPE_ALIGN (type);
2979 else
2980 align = GET_MODE_ALIGNMENT (mode);
2981 if (align < PARM_BOUNDARY)
2982 align = PARM_BOUNDARY;
2983 if (!TARGET_64BIT)
2984 {
2985 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2986 make an exception for SSE modes since these require 128bit
2987 alignment.
2988
2989 The handling here differs from field_alignment. ICC aligns MMX
2990 arguments to 4 byte boundaries, while structure fields are aligned
2991 to 8 byte boundaries. */
2992 if (!TARGET_SSE)
2993 align = PARM_BOUNDARY;
2994 else if (!type)
2995 {
2996 if (!SSE_REG_MODE_P (mode))
2997 align = PARM_BOUNDARY;
2998 }
2999 else
3000 {
3001 if (!contains_128bit_aligned_vector_p (type))
3002 align = PARM_BOUNDARY;
3003 }
3004 }
3005 if (align > 128)
3006 align = 128;
3007 return align;
3008 }
3009
3010 /* Return true if N is a possible register number of function value. */
3011 bool
3012 ix86_function_value_regno_p (int regno)
3013 {
3014 if (!TARGET_64BIT)
3015 {
3016 return ((regno) == 0
3017 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3018 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
3019 }
3020 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
3021 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
3022 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
3023 }
3024
3025 /* Define how to find the value returned by a function.
3026 VALTYPE is the data type of the value (as a tree).
3027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3028 otherwise, FUNC is 0. */
3029 rtx
3030 ix86_function_value (tree valtype)
3031 {
3032 enum machine_mode natmode = type_natural_mode (valtype);
3033
3034 if (TARGET_64BIT)
3035 {
3036 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3037 1, REGPARM_MAX, SSE_REGPARM_MAX,
3038 x86_64_int_return_registers, 0);
3039 /* For zero sized structures, construct_container return NULL, but we
3040 need to keep rest of compiler happy by returning meaningful value. */
3041 if (!ret)
3042 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3043 return ret;
3044 }
3045 else
3046 return gen_rtx_REG (TYPE_MODE (valtype), ix86_value_regno (natmode));
3047 }
3048
3049 /* Return false iff type is returned in memory. */
3050 int
3051 ix86_return_in_memory (tree type)
3052 {
3053 int needed_intregs, needed_sseregs, size;
3054 enum machine_mode mode = type_natural_mode (type);
3055
3056 if (TARGET_64BIT)
3057 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3058
3059 if (mode == BLKmode)
3060 return 1;
3061
3062 size = int_size_in_bytes (type);
3063
3064 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3065 return 0;
3066
3067 if (VECTOR_MODE_P (mode) || mode == TImode)
3068 {
3069 /* User-created vectors small enough to fit in EAX. */
3070 if (size < 8)
3071 return 0;
3072
3073 /* MMX/3dNow values are returned on the stack, since we've
3074 got to EMMS/FEMMS before returning. */
3075 if (size == 8)
3076 return 1;
3077
3078 /* SSE values are returned in XMM0, except when it doesn't exist. */
3079 if (size == 16)
3080 return (TARGET_SSE ? 0 : 1);
3081 }
3082
3083 if (mode == XFmode)
3084 return 0;
3085
3086 if (size > 12)
3087 return 1;
3088 return 0;
3089 }
3090
3091 /* When returning SSE vector types, we have a choice of either
3092 (1) being abi incompatible with a -march switch, or
3093 (2) generating an error.
3094 Given no good solution, I think the safest thing is one warning.
3095 The user won't be able to use -Werror, but....
3096
3097 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3098 called in response to actually generating a caller or callee that
3099 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3100 via aggregate_value_p for general type probing from tree-ssa. */
3101
3102 static rtx
3103 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3104 {
3105 static bool warned;
3106
3107 if (!TARGET_SSE && type && !warned)
3108 {
3109 /* Look at the return type of the function, not the function type. */
3110 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3111
3112 if (mode == TImode
3113 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3114 {
3115 warned = true;
3116 warning ("SSE vector return without SSE enabled changes the ABI");
3117 }
3118 }
3119
3120 return NULL;
3121 }
3122
3123 /* Define how to find the value returned by a library function
3124 assuming the value has mode MODE. */
3125 rtx
3126 ix86_libcall_value (enum machine_mode mode)
3127 {
3128 if (TARGET_64BIT)
3129 {
3130 switch (mode)
3131 {
3132 case SFmode:
3133 case SCmode:
3134 case DFmode:
3135 case DCmode:
3136 case TFmode:
3137 return gen_rtx_REG (mode, FIRST_SSE_REG);
3138 case XFmode:
3139 case XCmode:
3140 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3141 case TCmode:
3142 return NULL;
3143 default:
3144 return gen_rtx_REG (mode, 0);
3145 }
3146 }
3147 else
3148 return gen_rtx_REG (mode, ix86_value_regno (mode));
3149 }
3150
3151 /* Given a mode, return the register to use for a return value. */
3152
3153 static int
3154 ix86_value_regno (enum machine_mode mode)
3155 {
3156 /* Floating point return values in %st(0). */
3157 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
3158 return FIRST_FLOAT_REG;
3159 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3160 we prevent this case when sse is not available. */
3161 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3162 return FIRST_SSE_REG;
3163 /* Everything else in %eax. */
3164 return 0;
3165 }
3166 \f
3167 /* Create the va_list data type. */
3168
3169 static tree
3170 ix86_build_builtin_va_list (void)
3171 {
3172 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3173
3174 /* For i386 we use plain pointer to argument area. */
3175 if (!TARGET_64BIT)
3176 return build_pointer_type (char_type_node);
3177
3178 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3179 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3180
3181 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3182 unsigned_type_node);
3183 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3184 unsigned_type_node);
3185 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3186 ptr_type_node);
3187 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3188 ptr_type_node);
3189
3190 DECL_FIELD_CONTEXT (f_gpr) = record;
3191 DECL_FIELD_CONTEXT (f_fpr) = record;
3192 DECL_FIELD_CONTEXT (f_ovf) = record;
3193 DECL_FIELD_CONTEXT (f_sav) = record;
3194
3195 TREE_CHAIN (record) = type_decl;
3196 TYPE_NAME (record) = type_decl;
3197 TYPE_FIELDS (record) = f_gpr;
3198 TREE_CHAIN (f_gpr) = f_fpr;
3199 TREE_CHAIN (f_fpr) = f_ovf;
3200 TREE_CHAIN (f_ovf) = f_sav;
3201
3202 layout_type (record);
3203
3204 /* The correct type is an array type of one element. */
3205 return build_array_type (record, build_index_type (size_zero_node));
3206 }
3207
3208 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3209
3210 static void
3211 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3212 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3213 int no_rtl)
3214 {
3215 CUMULATIVE_ARGS next_cum;
3216 rtx save_area = NULL_RTX, mem;
3217 rtx label;
3218 rtx label_ref;
3219 rtx tmp_reg;
3220 rtx nsse_reg;
3221 int set;
3222 tree fntype;
3223 int stdarg_p;
3224 int i;
3225
3226 if (!TARGET_64BIT)
3227 return;
3228
3229 /* Indicate to allocate space on the stack for varargs save area. */
3230 ix86_save_varrargs_registers = 1;
3231
3232 cfun->stack_alignment_needed = 128;
3233
3234 fntype = TREE_TYPE (current_function_decl);
3235 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3236 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3237 != void_type_node));
3238
3239 /* For varargs, we do not want to skip the dummy va_dcl argument.
3240 For stdargs, we do want to skip the last named argument. */
3241 next_cum = *cum;
3242 if (stdarg_p)
3243 function_arg_advance (&next_cum, mode, type, 1);
3244
3245 if (!no_rtl)
3246 save_area = frame_pointer_rtx;
3247
3248 set = get_varargs_alias_set ();
3249
3250 for (i = next_cum.regno; i < ix86_regparm; i++)
3251 {
3252 mem = gen_rtx_MEM (Pmode,
3253 plus_constant (save_area, i * UNITS_PER_WORD));
3254 set_mem_alias_set (mem, set);
3255 emit_move_insn (mem, gen_rtx_REG (Pmode,
3256 x86_64_int_parameter_registers[i]));
3257 }
3258
3259 if (next_cum.sse_nregs)
3260 {
3261 /* Now emit code to save SSE registers. The AX parameter contains number
3262 of SSE parameter registers used to call this function. We use
3263 sse_prologue_save insn template that produces computed jump across
3264 SSE saves. We need some preparation work to get this working. */
3265
3266 label = gen_label_rtx ();
3267 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3268
3269 /* Compute address to jump to :
3270 label - 5*eax + nnamed_sse_arguments*5 */
3271 tmp_reg = gen_reg_rtx (Pmode);
3272 nsse_reg = gen_reg_rtx (Pmode);
3273 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3274 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3275 gen_rtx_MULT (Pmode, nsse_reg,
3276 GEN_INT (4))));
3277 if (next_cum.sse_regno)
3278 emit_move_insn
3279 (nsse_reg,
3280 gen_rtx_CONST (DImode,
3281 gen_rtx_PLUS (DImode,
3282 label_ref,
3283 GEN_INT (next_cum.sse_regno * 4))));
3284 else
3285 emit_move_insn (nsse_reg, label_ref);
3286 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3287
3288 /* Compute address of memory block we save into. We always use pointer
3289 pointing 127 bytes after first byte to store - this is needed to keep
3290 instruction size limited by 4 bytes. */
3291 tmp_reg = gen_reg_rtx (Pmode);
3292 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3293 plus_constant (save_area,
3294 8 * REGPARM_MAX + 127)));
3295 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3296 set_mem_alias_set (mem, set);
3297 set_mem_align (mem, BITS_PER_WORD);
3298
3299 /* And finally do the dirty job! */
3300 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3301 GEN_INT (next_cum.sse_regno), label));
3302 }
3303
3304 }
3305
3306 /* Implement va_start. */
3307
3308 void
3309 ix86_va_start (tree valist, rtx nextarg)
3310 {
3311 HOST_WIDE_INT words, n_gpr, n_fpr;
3312 tree f_gpr, f_fpr, f_ovf, f_sav;
3313 tree gpr, fpr, ovf, sav, t;
3314
3315 /* Only 64bit target needs something special. */
3316 if (!TARGET_64BIT)
3317 {
3318 std_expand_builtin_va_start (valist, nextarg);
3319 return;
3320 }
3321
3322 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3323 f_fpr = TREE_CHAIN (f_gpr);
3324 f_ovf = TREE_CHAIN (f_fpr);
3325 f_sav = TREE_CHAIN (f_ovf);
3326
3327 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3328 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3329 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3330 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3331 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3332
3333 /* Count number of gp and fp argument registers used. */
3334 words = current_function_args_info.words;
3335 n_gpr = current_function_args_info.regno;
3336 n_fpr = current_function_args_info.sse_regno;
3337
3338 if (TARGET_DEBUG_ARG)
3339 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3340 (int) words, (int) n_gpr, (int) n_fpr);
3341
3342 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3343 build_int_cst (NULL_TREE, n_gpr * 8));
3344 TREE_SIDE_EFFECTS (t) = 1;
3345 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3346
3347 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3348 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3349 TREE_SIDE_EFFECTS (t) = 1;
3350 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3351
3352 /* Find the overflow area. */
3353 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3354 if (words != 0)
3355 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3356 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3357 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3358 TREE_SIDE_EFFECTS (t) = 1;
3359 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3360
3361 /* Find the register save area.
3362 Prologue of the function save it right above stack frame. */
3363 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3364 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3365 TREE_SIDE_EFFECTS (t) = 1;
3366 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3367 }
3368
3369 /* Implement va_arg. */
3370
3371 tree
3372 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3373 {
3374 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3375 tree f_gpr, f_fpr, f_ovf, f_sav;
3376 tree gpr, fpr, ovf, sav, t;
3377 int size, rsize;
3378 tree lab_false, lab_over = NULL_TREE;
3379 tree addr, t2;
3380 rtx container;
3381 int indirect_p = 0;
3382 tree ptrtype;
3383 enum machine_mode nat_mode;
3384
3385 /* Only 64bit target needs something special. */
3386 if (!TARGET_64BIT)
3387 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3388
3389 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3390 f_fpr = TREE_CHAIN (f_gpr);
3391 f_ovf = TREE_CHAIN (f_fpr);
3392 f_sav = TREE_CHAIN (f_ovf);
3393
3394 valist = build_va_arg_indirect_ref (valist);
3395 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3396 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3397 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3398 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3399
3400 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3401 if (indirect_p)
3402 type = build_pointer_type (type);
3403 size = int_size_in_bytes (type);
3404 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3405
3406 nat_mode = type_natural_mode (type);
3407 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3408 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3409
3410 /* Pull the value out of the saved registers. */
3411
3412 addr = create_tmp_var (ptr_type_node, "addr");
3413 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3414
3415 if (container)
3416 {
3417 int needed_intregs, needed_sseregs;
3418 bool need_temp;
3419 tree int_addr, sse_addr;
3420
3421 lab_false = create_artificial_label ();
3422 lab_over = create_artificial_label ();
3423
3424 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3425
3426 need_temp = (!REG_P (container)
3427 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3428 || TYPE_ALIGN (type) > 128));
3429
3430 /* In case we are passing structure, verify that it is consecutive block
3431 on the register save area. If not we need to do moves. */
3432 if (!need_temp && !REG_P (container))
3433 {
3434 /* Verify that all registers are strictly consecutive */
3435 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3436 {
3437 int i;
3438
3439 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3440 {
3441 rtx slot = XVECEXP (container, 0, i);
3442 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3443 || INTVAL (XEXP (slot, 1)) != i * 16)
3444 need_temp = 1;
3445 }
3446 }
3447 else
3448 {
3449 int i;
3450
3451 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3452 {
3453 rtx slot = XVECEXP (container, 0, i);
3454 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3455 || INTVAL (XEXP (slot, 1)) != i * 8)
3456 need_temp = 1;
3457 }
3458 }
3459 }
3460 if (!need_temp)
3461 {
3462 int_addr = addr;
3463 sse_addr = addr;
3464 }
3465 else
3466 {
3467 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3468 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3469 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3470 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3471 }
3472
3473 /* First ensure that we fit completely in registers. */
3474 if (needed_intregs)
3475 {
3476 t = build_int_cst (TREE_TYPE (gpr),
3477 (REGPARM_MAX - needed_intregs + 1) * 8);
3478 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3479 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3480 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3481 gimplify_and_add (t, pre_p);
3482 }
3483 if (needed_sseregs)
3484 {
3485 t = build_int_cst (TREE_TYPE (fpr),
3486 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3487 + REGPARM_MAX * 8);
3488 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3489 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3490 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3491 gimplify_and_add (t, pre_p);
3492 }
3493
3494 /* Compute index to start of area used for integer regs. */
3495 if (needed_intregs)
3496 {
3497 /* int_addr = gpr + sav; */
3498 t = fold_convert (ptr_type_node, gpr);
3499 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3500 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3501 gimplify_and_add (t, pre_p);
3502 }
3503 if (needed_sseregs)
3504 {
3505 /* sse_addr = fpr + sav; */
3506 t = fold_convert (ptr_type_node, fpr);
3507 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3508 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3509 gimplify_and_add (t, pre_p);
3510 }
3511 if (need_temp)
3512 {
3513 int i;
3514 tree temp = create_tmp_var (type, "va_arg_tmp");
3515
3516 /* addr = &temp; */
3517 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3518 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3519 gimplify_and_add (t, pre_p);
3520
3521 for (i = 0; i < XVECLEN (container, 0); i++)
3522 {
3523 rtx slot = XVECEXP (container, 0, i);
3524 rtx reg = XEXP (slot, 0);
3525 enum machine_mode mode = GET_MODE (reg);
3526 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3527 tree addr_type = build_pointer_type (piece_type);
3528 tree src_addr, src;
3529 int src_offset;
3530 tree dest_addr, dest;
3531
3532 if (SSE_REGNO_P (REGNO (reg)))
3533 {
3534 src_addr = sse_addr;
3535 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3536 }
3537 else
3538 {
3539 src_addr = int_addr;
3540 src_offset = REGNO (reg) * 8;
3541 }
3542 src_addr = fold_convert (addr_type, src_addr);
3543 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3544 size_int (src_offset)));
3545 src = build_va_arg_indirect_ref (src_addr);
3546
3547 dest_addr = fold_convert (addr_type, addr);
3548 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3549 size_int (INTVAL (XEXP (slot, 1)))));
3550 dest = build_va_arg_indirect_ref (dest_addr);
3551
3552 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3553 gimplify_and_add (t, pre_p);
3554 }
3555 }
3556
3557 if (needed_intregs)
3558 {
3559 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3560 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
3561 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3562 gimplify_and_add (t, pre_p);
3563 }
3564 if (needed_sseregs)
3565 {
3566 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3567 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
3568 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3569 gimplify_and_add (t, pre_p);
3570 }
3571
3572 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3573 gimplify_and_add (t, pre_p);
3574
3575 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3576 append_to_statement_list (t, pre_p);
3577 }
3578
3579 /* ... otherwise out of the overflow area. */
3580
3581 /* Care for on-stack alignment if needed. */
3582 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3583 t = ovf;
3584 else
3585 {
3586 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3587 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3588 build_int_cst (TREE_TYPE (ovf), align - 1));
3589 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3590 build_int_cst (TREE_TYPE (t), -align));
3591 }
3592 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3593
3594 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3595 gimplify_and_add (t2, pre_p);
3596
3597 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3598 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
3599 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3600 gimplify_and_add (t, pre_p);
3601
3602 if (container)
3603 {
3604 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3605 append_to_statement_list (t, pre_p);
3606 }
3607
3608 ptrtype = build_pointer_type (type);
3609 addr = fold_convert (ptrtype, addr);
3610
3611 if (indirect_p)
3612 addr = build_va_arg_indirect_ref (addr);
3613 return build_va_arg_indirect_ref (addr);
3614 }
3615 \f
3616 /* Return nonzero if OPNUM's MEM should be matched
3617 in movabs* patterns. */
3618
3619 int
3620 ix86_check_movabs (rtx insn, int opnum)
3621 {
3622 rtx set, mem;
3623
3624 set = PATTERN (insn);
3625 if (GET_CODE (set) == PARALLEL)
3626 set = XVECEXP (set, 0, 0);
3627 if (GET_CODE (set) != SET)
3628 abort ();
3629 mem = XEXP (set, opnum);
3630 while (GET_CODE (mem) == SUBREG)
3631 mem = SUBREG_REG (mem);
3632 if (GET_CODE (mem) != MEM)
3633 abort ();
3634 return (volatile_ok || !MEM_VOLATILE_P (mem));
3635 }
3636 \f
3637 /* Initialize the table of extra 80387 mathematical constants. */
3638
3639 static void
3640 init_ext_80387_constants (void)
3641 {
3642 static const char * cst[5] =
3643 {
3644 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3645 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3646 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3647 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3648 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3649 };
3650 int i;
3651
3652 for (i = 0; i < 5; i++)
3653 {
3654 real_from_string (&ext_80387_constants_table[i], cst[i]);
3655 /* Ensure each constant is rounded to XFmode precision. */
3656 real_convert (&ext_80387_constants_table[i],
3657 XFmode, &ext_80387_constants_table[i]);
3658 }
3659
3660 ext_80387_constants_init = 1;
3661 }
3662
3663 /* Return true if the constant is something that can be loaded with
3664 a special instruction. */
3665
3666 int
3667 standard_80387_constant_p (rtx x)
3668 {
3669 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3670 return -1;
3671
3672 if (x == CONST0_RTX (GET_MODE (x)))
3673 return 1;
3674 if (x == CONST1_RTX (GET_MODE (x)))
3675 return 2;
3676
3677 /* For XFmode constants, try to find a special 80387 instruction when
3678 optimizing for size or on those CPUs that benefit from them. */
3679 if (GET_MODE (x) == XFmode
3680 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3681 {
3682 REAL_VALUE_TYPE r;
3683 int i;
3684
3685 if (! ext_80387_constants_init)
3686 init_ext_80387_constants ();
3687
3688 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3689 for (i = 0; i < 5; i++)
3690 if (real_identical (&r, &ext_80387_constants_table[i]))
3691 return i + 3;
3692 }
3693
3694 return 0;
3695 }
3696
3697 /* Return the opcode of the special instruction to be used to load
3698 the constant X. */
3699
3700 const char *
3701 standard_80387_constant_opcode (rtx x)
3702 {
3703 switch (standard_80387_constant_p (x))
3704 {
3705 case 1:
3706 return "fldz";
3707 case 2:
3708 return "fld1";
3709 case 3:
3710 return "fldlg2";
3711 case 4:
3712 return "fldln2";
3713 case 5:
3714 return "fldl2e";
3715 case 6:
3716 return "fldl2t";
3717 case 7:
3718 return "fldpi";
3719 }
3720 abort ();
3721 }
3722
3723 /* Return the CONST_DOUBLE representing the 80387 constant that is
3724 loaded by the specified special instruction. The argument IDX
3725 matches the return value from standard_80387_constant_p. */
3726
3727 rtx
3728 standard_80387_constant_rtx (int idx)
3729 {
3730 int i;
3731
3732 if (! ext_80387_constants_init)
3733 init_ext_80387_constants ();
3734
3735 switch (idx)
3736 {
3737 case 3:
3738 case 4:
3739 case 5:
3740 case 6:
3741 case 7:
3742 i = idx - 3;
3743 break;
3744
3745 default:
3746 abort ();
3747 }
3748
3749 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3750 XFmode);
3751 }
3752
3753 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3754 */
3755 int
3756 standard_sse_constant_p (rtx x)
3757 {
3758 if (x == const0_rtx)
3759 return 1;
3760 return (x == CONST0_RTX (GET_MODE (x)));
3761 }
3762
3763 /* Returns 1 if OP contains a symbol reference */
3764
3765 int
3766 symbolic_reference_mentioned_p (rtx op)
3767 {
3768 const char *fmt;
3769 int i;
3770
3771 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3772 return 1;
3773
3774 fmt = GET_RTX_FORMAT (GET_CODE (op));
3775 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3776 {
3777 if (fmt[i] == 'E')
3778 {
3779 int j;
3780
3781 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3782 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3783 return 1;
3784 }
3785
3786 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3787 return 1;
3788 }
3789
3790 return 0;
3791 }
3792
3793 /* Return 1 if it is appropriate to emit `ret' instructions in the
3794 body of a function. Do this only if the epilogue is simple, needing a
3795 couple of insns. Prior to reloading, we can't tell how many registers
3796 must be saved, so return 0 then. Return 0 if there is no frame
3797 marker to de-allocate. */
3798
3799 int
3800 ix86_can_use_return_insn_p (void)
3801 {
3802 struct ix86_frame frame;
3803
3804 if (! reload_completed || frame_pointer_needed)
3805 return 0;
3806
3807 /* Don't allow more than 32 pop, since that's all we can do
3808 with one instruction. */
3809 if (current_function_pops_args
3810 && current_function_args_size >= 32768)
3811 return 0;
3812
3813 ix86_compute_frame_layout (&frame);
3814 return frame.to_allocate == 0 && frame.nregs == 0;
3815 }
3816 \f
3817 /* Value should be nonzero if functions must have frame pointers.
3818 Zero means the frame pointer need not be set up (and parms may
3819 be accessed via the stack pointer) in functions that seem suitable. */
3820
3821 int
3822 ix86_frame_pointer_required (void)
3823 {
3824 /* If we accessed previous frames, then the generated code expects
3825 to be able to access the saved ebp value in our frame. */
3826 if (cfun->machine->accesses_prev_frame)
3827 return 1;
3828
3829 /* Several x86 os'es need a frame pointer for other reasons,
3830 usually pertaining to setjmp. */
3831 if (SUBTARGET_FRAME_POINTER_REQUIRED)
3832 return 1;
3833
3834 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3835 the frame pointer by default. Turn it back on now if we've not
3836 got a leaf function. */
3837 if (TARGET_OMIT_LEAF_FRAME_POINTER
3838 && (!current_function_is_leaf))
3839 return 1;
3840
3841 if (current_function_profile)
3842 return 1;
3843
3844 return 0;
3845 }
3846
3847 /* Record that the current function accesses previous call frames. */
3848
3849 void
3850 ix86_setup_frame_addresses (void)
3851 {
3852 cfun->machine->accesses_prev_frame = 1;
3853 }
3854 \f
3855 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3856 # define USE_HIDDEN_LINKONCE 1
3857 #else
3858 # define USE_HIDDEN_LINKONCE 0
3859 #endif
3860
3861 static int pic_labels_used;
3862
3863 /* Fills in the label name that should be used for a pc thunk for
3864 the given register. */
3865
3866 static void
3867 get_pc_thunk_name (char name[32], unsigned int regno)
3868 {
3869 if (USE_HIDDEN_LINKONCE)
3870 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
3871 else
3872 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
3873 }
3874
3875
3876 /* This function generates code for -fpic that loads %ebx with
3877 the return address of the caller and then returns. */
3878
3879 void
3880 ix86_file_end (void)
3881 {
3882 rtx xops[2];
3883 int regno;
3884
3885 for (regno = 0; regno < 8; ++regno)
3886 {
3887 char name[32];
3888
3889 if (! ((pic_labels_used >> regno) & 1))
3890 continue;
3891
3892 get_pc_thunk_name (name, regno);
3893
3894 if (USE_HIDDEN_LINKONCE)
3895 {
3896 tree decl;
3897
3898 decl = build_decl (FUNCTION_DECL, get_identifier (name),
3899 error_mark_node);
3900 TREE_PUBLIC (decl) = 1;
3901 TREE_STATIC (decl) = 1;
3902 DECL_ONE_ONLY (decl) = 1;
3903
3904 (*targetm.asm_out.unique_section) (decl, 0);
3905 named_section (decl, NULL, 0);
3906
3907 (*targetm.asm_out.globalize_label) (asm_out_file, name);
3908 fputs ("\t.hidden\t", asm_out_file);
3909 assemble_name (asm_out_file, name);
3910 fputc ('\n', asm_out_file);
3911 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
3912 }
3913 else
3914 {
3915 text_section ();
3916 ASM_OUTPUT_LABEL (asm_out_file, name);
3917 }
3918
3919 xops[0] = gen_rtx_REG (SImode, regno);
3920 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
3921 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
3922 output_asm_insn ("ret", xops);
3923 }
3924
3925 if (NEED_INDICATE_EXEC_STACK)
3926 file_end_indicate_exec_stack ();
3927 }
3928
3929 /* Emit code for the SET_GOT patterns. */
3930
3931 const char *
3932 output_set_got (rtx dest)
3933 {
3934 rtx xops[3];
3935
3936 xops[0] = dest;
3937 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
3938
3939 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
3940 {
3941 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
3942
3943 if (!flag_pic)
3944 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3945 else
3946 output_asm_insn ("call\t%a2", xops);
3947
3948 #if TARGET_MACHO
3949 /* Output the "canonical" label name ("Lxx$pb") here too. This
3950 is what will be referred to by the Mach-O PIC subsystem. */
3951 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
3952 #endif
3953 (*targetm.asm_out.internal_label) (asm_out_file, "L",
3954 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
3955
3956 if (flag_pic)
3957 output_asm_insn ("pop{l}\t%0", xops);
3958 }
3959 else
3960 {
3961 char name[32];
3962 get_pc_thunk_name (name, REGNO (dest));
3963 pic_labels_used |= 1 << REGNO (dest);
3964
3965 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3966 xops[2] = gen_rtx_MEM (QImode, xops[2]);
3967 output_asm_insn ("call\t%X2", xops);
3968 }
3969
3970 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
3971 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
3972 else if (!TARGET_MACHO)
3973 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
3974
3975 return "";
3976 }
3977
3978 /* Generate an "push" pattern for input ARG. */
3979
3980 static rtx
3981 gen_push (rtx arg)
3982 {
3983 return gen_rtx_SET (VOIDmode,
3984 gen_rtx_MEM (Pmode,
3985 gen_rtx_PRE_DEC (Pmode,
3986 stack_pointer_rtx)),
3987 arg);
3988 }
3989
3990 /* Return >= 0 if there is an unused call-clobbered register available
3991 for the entire function. */
3992
3993 static unsigned int
3994 ix86_select_alt_pic_regnum (void)
3995 {
3996 if (current_function_is_leaf && !current_function_profile)
3997 {
3998 int i;
3999 for (i = 2; i >= 0; --i)
4000 if (!regs_ever_live[i])
4001 return i;
4002 }
4003
4004 return INVALID_REGNUM;
4005 }
4006
4007 /* Return 1 if we need to save REGNO. */
4008 static int
4009 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4010 {
4011 if (pic_offset_table_rtx
4012 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4013 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4014 || current_function_profile
4015 || current_function_calls_eh_return
4016 || current_function_uses_const_pool))
4017 {
4018 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4019 return 0;
4020 return 1;
4021 }
4022
4023 if (current_function_calls_eh_return && maybe_eh_return)
4024 {
4025 unsigned i;
4026 for (i = 0; ; i++)
4027 {
4028 unsigned test = EH_RETURN_DATA_REGNO (i);
4029 if (test == INVALID_REGNUM)
4030 break;
4031 if (test == regno)
4032 return 1;
4033 }
4034 }
4035
4036 return (regs_ever_live[regno]
4037 && !call_used_regs[regno]
4038 && !fixed_regs[regno]
4039 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4040 }
4041
4042 /* Return number of registers to be saved on the stack. */
4043
4044 static int
4045 ix86_nsaved_regs (void)
4046 {
4047 int nregs = 0;
4048 int regno;
4049
4050 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4051 if (ix86_save_reg (regno, true))
4052 nregs++;
4053 return nregs;
4054 }
4055
4056 /* Return the offset between two registers, one to be eliminated, and the other
4057 its replacement, at the start of a routine. */
4058
4059 HOST_WIDE_INT
4060 ix86_initial_elimination_offset (int from, int to)
4061 {
4062 struct ix86_frame frame;
4063 ix86_compute_frame_layout (&frame);
4064
4065 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4066 return frame.hard_frame_pointer_offset;
4067 else if (from == FRAME_POINTER_REGNUM
4068 && to == HARD_FRAME_POINTER_REGNUM)
4069 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4070 else
4071 {
4072 if (to != STACK_POINTER_REGNUM)
4073 abort ();
4074 else if (from == ARG_POINTER_REGNUM)
4075 return frame.stack_pointer_offset;
4076 else if (from != FRAME_POINTER_REGNUM)
4077 abort ();
4078 else
4079 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4080 }
4081 }
4082
4083 /* Fill structure ix86_frame about frame of currently computed function. */
4084
4085 static void
4086 ix86_compute_frame_layout (struct ix86_frame *frame)
4087 {
4088 HOST_WIDE_INT total_size;
4089 unsigned int stack_alignment_needed;
4090 HOST_WIDE_INT offset;
4091 unsigned int preferred_alignment;
4092 HOST_WIDE_INT size = get_frame_size ();
4093
4094 frame->nregs = ix86_nsaved_regs ();
4095 total_size = size;
4096
4097 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4098 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4099
4100 /* During reload iteration the amount of registers saved can change.
4101 Recompute the value as needed. Do not recompute when amount of registers
4102 didn't change as reload does mutiple calls to the function and does not
4103 expect the decision to change within single iteration. */
4104 if (!optimize_size
4105 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4106 {
4107 int count = frame->nregs;
4108
4109 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4110 /* The fast prologue uses move instead of push to save registers. This
4111 is significantly longer, but also executes faster as modern hardware
4112 can execute the moves in parallel, but can't do that for push/pop.
4113
4114 Be careful about choosing what prologue to emit: When function takes
4115 many instructions to execute we may use slow version as well as in
4116 case function is known to be outside hot spot (this is known with
4117 feedback only). Weight the size of function by number of registers
4118 to save as it is cheap to use one or two push instructions but very
4119 slow to use many of them. */
4120 if (count)
4121 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4122 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4123 || (flag_branch_probabilities
4124 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4125 cfun->machine->use_fast_prologue_epilogue = false;
4126 else
4127 cfun->machine->use_fast_prologue_epilogue
4128 = !expensive_function_p (count);
4129 }
4130 if (TARGET_PROLOGUE_USING_MOVE
4131 && cfun->machine->use_fast_prologue_epilogue)
4132 frame->save_regs_using_mov = true;
4133 else
4134 frame->save_regs_using_mov = false;
4135
4136
4137 /* Skip return address and saved base pointer. */
4138 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4139
4140 frame->hard_frame_pointer_offset = offset;
4141
4142 /* Do some sanity checking of stack_alignment_needed and
4143 preferred_alignment, since i386 port is the only using those features
4144 that may break easily. */
4145
4146 if (size && !stack_alignment_needed)
4147 abort ();
4148 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4149 abort ();
4150 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4151 abort ();
4152 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4153 abort ();
4154
4155 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4156 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4157
4158 /* Register save area */
4159 offset += frame->nregs * UNITS_PER_WORD;
4160
4161 /* Va-arg area */
4162 if (ix86_save_varrargs_registers)
4163 {
4164 offset += X86_64_VARARGS_SIZE;
4165 frame->va_arg_size = X86_64_VARARGS_SIZE;
4166 }
4167 else
4168 frame->va_arg_size = 0;
4169
4170 /* Align start of frame for local function. */
4171 frame->padding1 = ((offset + stack_alignment_needed - 1)
4172 & -stack_alignment_needed) - offset;
4173
4174 offset += frame->padding1;
4175
4176 /* Frame pointer points here. */
4177 frame->frame_pointer_offset = offset;
4178
4179 offset += size;
4180
4181 /* Add outgoing arguments area. Can be skipped if we eliminated
4182 all the function calls as dead code.
4183 Skipping is however impossible when function calls alloca. Alloca
4184 expander assumes that last current_function_outgoing_args_size
4185 of stack frame are unused. */
4186 if (ACCUMULATE_OUTGOING_ARGS
4187 && (!current_function_is_leaf || current_function_calls_alloca))
4188 {
4189 offset += current_function_outgoing_args_size;
4190 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4191 }
4192 else
4193 frame->outgoing_arguments_size = 0;
4194
4195 /* Align stack boundary. Only needed if we're calling another function
4196 or using alloca. */
4197 if (!current_function_is_leaf || current_function_calls_alloca)
4198 frame->padding2 = ((offset + preferred_alignment - 1)
4199 & -preferred_alignment) - offset;
4200 else
4201 frame->padding2 = 0;
4202
4203 offset += frame->padding2;
4204
4205 /* We've reached end of stack frame. */
4206 frame->stack_pointer_offset = offset;
4207
4208 /* Size prologue needs to allocate. */
4209 frame->to_allocate =
4210 (size + frame->padding1 + frame->padding2
4211 + frame->outgoing_arguments_size + frame->va_arg_size);
4212
4213 if ((!frame->to_allocate && frame->nregs <= 1)
4214 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4215 frame->save_regs_using_mov = false;
4216
4217 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4218 && current_function_is_leaf)
4219 {
4220 frame->red_zone_size = frame->to_allocate;
4221 if (frame->save_regs_using_mov)
4222 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4223 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4224 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4225 }
4226 else
4227 frame->red_zone_size = 0;
4228 frame->to_allocate -= frame->red_zone_size;
4229 frame->stack_pointer_offset -= frame->red_zone_size;
4230 #if 0
4231 fprintf (stderr, "nregs: %i\n", frame->nregs);
4232 fprintf (stderr, "size: %i\n", size);
4233 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4234 fprintf (stderr, "padding1: %i\n", frame->padding1);
4235 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4236 fprintf (stderr, "padding2: %i\n", frame->padding2);
4237 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4238 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4239 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4240 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4241 frame->hard_frame_pointer_offset);
4242 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4243 #endif
4244 }
4245
4246 /* Emit code to save registers in the prologue. */
4247
4248 static void
4249 ix86_emit_save_regs (void)
4250 {
4251 int regno;
4252 rtx insn;
4253
4254 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4255 if (ix86_save_reg (regno, true))
4256 {
4257 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4258 RTX_FRAME_RELATED_P (insn) = 1;
4259 }
4260 }
4261
4262 /* Emit code to save registers using MOV insns. First register
4263 is restored from POINTER + OFFSET. */
4264 static void
4265 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4266 {
4267 int regno;
4268 rtx insn;
4269
4270 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4271 if (ix86_save_reg (regno, true))
4272 {
4273 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4274 Pmode, offset),
4275 gen_rtx_REG (Pmode, regno));
4276 RTX_FRAME_RELATED_P (insn) = 1;
4277 offset += UNITS_PER_WORD;
4278 }
4279 }
4280
4281 /* Expand prologue or epilogue stack adjustment.
4282 The pattern exist to put a dependency on all ebp-based memory accesses.
4283 STYLE should be negative if instructions should be marked as frame related,
4284 zero if %r11 register is live and cannot be freely used and positive
4285 otherwise. */
4286
4287 static void
4288 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4289 {
4290 rtx insn;
4291
4292 if (! TARGET_64BIT)
4293 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4294 else if (x86_64_immediate_operand (offset, DImode))
4295 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4296 else
4297 {
4298 rtx r11;
4299 /* r11 is used by indirect sibcall return as well, set before the
4300 epilogue and used after the epilogue. ATM indirect sibcall
4301 shouldn't be used together with huge frame sizes in one
4302 function because of the frame_size check in sibcall.c. */
4303 if (style == 0)
4304 abort ();
4305 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4306 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4307 if (style < 0)
4308 RTX_FRAME_RELATED_P (insn) = 1;
4309 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4310 offset));
4311 }
4312 if (style < 0)
4313 RTX_FRAME_RELATED_P (insn) = 1;
4314 }
4315
4316 /* Expand the prologue into a bunch of separate insns. */
4317
4318 void
4319 ix86_expand_prologue (void)
4320 {
4321 rtx insn;
4322 bool pic_reg_used;
4323 struct ix86_frame frame;
4324 HOST_WIDE_INT allocate;
4325
4326 ix86_compute_frame_layout (&frame);
4327
4328 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4329 slower on all targets. Also sdb doesn't like it. */
4330
4331 if (frame_pointer_needed)
4332 {
4333 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4334 RTX_FRAME_RELATED_P (insn) = 1;
4335
4336 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4337 RTX_FRAME_RELATED_P (insn) = 1;
4338 }
4339
4340 allocate = frame.to_allocate;
4341
4342 if (!frame.save_regs_using_mov)
4343 ix86_emit_save_regs ();
4344 else
4345 allocate += frame.nregs * UNITS_PER_WORD;
4346
4347 /* When using red zone we may start register saving before allocating
4348 the stack frame saving one cycle of the prologue. */
4349 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4350 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4351 : stack_pointer_rtx,
4352 -frame.nregs * UNITS_PER_WORD);
4353
4354 if (allocate == 0)
4355 ;
4356 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4357 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4358 GEN_INT (-allocate), -1);
4359 else
4360 {
4361 /* Only valid for Win32. */
4362 rtx eax = gen_rtx_REG (SImode, 0);
4363 bool eax_live = ix86_eax_live_at_start_p ();
4364 rtx t;
4365
4366 if (TARGET_64BIT)
4367 abort ();
4368
4369 if (eax_live)
4370 {
4371 emit_insn (gen_push (eax));
4372 allocate -= 4;
4373 }
4374
4375 emit_move_insn (eax, GEN_INT (allocate));
4376
4377 insn = emit_insn (gen_allocate_stack_worker (eax));
4378 RTX_FRAME_RELATED_P (insn) = 1;
4379 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4380 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4381 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4382 t, REG_NOTES (insn));
4383
4384 if (eax_live)
4385 {
4386 if (frame_pointer_needed)
4387 t = plus_constant (hard_frame_pointer_rtx,
4388 allocate
4389 - frame.to_allocate
4390 - frame.nregs * UNITS_PER_WORD);
4391 else
4392 t = plus_constant (stack_pointer_rtx, allocate);
4393 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4394 }
4395 }
4396
4397 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4398 {
4399 if (!frame_pointer_needed || !frame.to_allocate)
4400 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4401 else
4402 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4403 -frame.nregs * UNITS_PER_WORD);
4404 }
4405
4406 pic_reg_used = false;
4407 if (pic_offset_table_rtx
4408 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4409 || current_function_profile))
4410 {
4411 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4412
4413 if (alt_pic_reg_used != INVALID_REGNUM)
4414 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4415
4416 pic_reg_used = true;
4417 }
4418
4419 if (pic_reg_used)
4420 {
4421 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4422
4423 /* Even with accurate pre-reload life analysis, we can wind up
4424 deleting all references to the pic register after reload.
4425 Consider if cross-jumping unifies two sides of a branch
4426 controlled by a comparison vs the only read from a global.
4427 In which case, allow the set_got to be deleted, though we're
4428 too late to do anything about the ebx save in the prologue. */
4429 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4430 }
4431
4432 /* Prevent function calls from be scheduled before the call to mcount.
4433 In the pic_reg_used case, make sure that the got load isn't deleted. */
4434 if (current_function_profile)
4435 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4436 }
4437
4438 /* Emit code to restore saved registers using MOV insns. First register
4439 is restored from POINTER + OFFSET. */
4440 static void
4441 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4442 int maybe_eh_return)
4443 {
4444 int regno;
4445 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4446
4447 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4448 if (ix86_save_reg (regno, maybe_eh_return))
4449 {
4450 /* Ensure that adjust_address won't be forced to produce pointer
4451 out of range allowed by x86-64 instruction set. */
4452 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4453 {
4454 rtx r11;
4455
4456 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4457 emit_move_insn (r11, GEN_INT (offset));
4458 emit_insn (gen_adddi3 (r11, r11, pointer));
4459 base_address = gen_rtx_MEM (Pmode, r11);
4460 offset = 0;
4461 }
4462 emit_move_insn (gen_rtx_REG (Pmode, regno),
4463 adjust_address (base_address, Pmode, offset));
4464 offset += UNITS_PER_WORD;
4465 }
4466 }
4467
4468 /* Restore function stack, frame, and registers. */
4469
4470 void
4471 ix86_expand_epilogue (int style)
4472 {
4473 int regno;
4474 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4475 struct ix86_frame frame;
4476 HOST_WIDE_INT offset;
4477
4478 ix86_compute_frame_layout (&frame);
4479
4480 /* Calculate start of saved registers relative to ebp. Special care
4481 must be taken for the normal return case of a function using
4482 eh_return: the eax and edx registers are marked as saved, but not
4483 restored along this path. */
4484 offset = frame.nregs;
4485 if (current_function_calls_eh_return && style != 2)
4486 offset -= 2;
4487 offset *= -UNITS_PER_WORD;
4488
4489 /* If we're only restoring one register and sp is not valid then
4490 using a move instruction to restore the register since it's
4491 less work than reloading sp and popping the register.
4492
4493 The default code result in stack adjustment using add/lea instruction,
4494 while this code results in LEAVE instruction (or discrete equivalent),
4495 so it is profitable in some other cases as well. Especially when there
4496 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4497 and there is exactly one register to pop. This heuristic may need some
4498 tuning in future. */
4499 if ((!sp_valid && frame.nregs <= 1)
4500 || (TARGET_EPILOGUE_USING_MOVE
4501 && cfun->machine->use_fast_prologue_epilogue
4502 && (frame.nregs > 1 || frame.to_allocate))
4503 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4504 || (frame_pointer_needed && TARGET_USE_LEAVE
4505 && cfun->machine->use_fast_prologue_epilogue
4506 && frame.nregs == 1)
4507 || current_function_calls_eh_return)
4508 {
4509 /* Restore registers. We can use ebp or esp to address the memory
4510 locations. If both are available, default to ebp, since offsets
4511 are known to be small. Only exception is esp pointing directly to the
4512 end of block of saved registers, where we may simplify addressing
4513 mode. */
4514
4515 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4516 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4517 frame.to_allocate, style == 2);
4518 else
4519 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4520 offset, style == 2);
4521
4522 /* eh_return epilogues need %ecx added to the stack pointer. */
4523 if (style == 2)
4524 {
4525 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4526
4527 if (frame_pointer_needed)
4528 {
4529 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4530 tmp = plus_constant (tmp, UNITS_PER_WORD);
4531 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4532
4533 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4534 emit_move_insn (hard_frame_pointer_rtx, tmp);
4535
4536 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4537 const0_rtx, style);
4538 }
4539 else
4540 {
4541 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4542 tmp = plus_constant (tmp, (frame.to_allocate
4543 + frame.nregs * UNITS_PER_WORD));
4544 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4545 }
4546 }
4547 else if (!frame_pointer_needed)
4548 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4549 GEN_INT (frame.to_allocate
4550 + frame.nregs * UNITS_PER_WORD),
4551 style);
4552 /* If not an i386, mov & pop is faster than "leave". */
4553 else if (TARGET_USE_LEAVE || optimize_size
4554 || !cfun->machine->use_fast_prologue_epilogue)
4555 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4556 else
4557 {
4558 pro_epilogue_adjust_stack (stack_pointer_rtx,
4559 hard_frame_pointer_rtx,
4560 const0_rtx, style);
4561 if (TARGET_64BIT)
4562 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4563 else
4564 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4565 }
4566 }
4567 else
4568 {
4569 /* First step is to deallocate the stack frame so that we can
4570 pop the registers. */
4571 if (!sp_valid)
4572 {
4573 if (!frame_pointer_needed)
4574 abort ();
4575 pro_epilogue_adjust_stack (stack_pointer_rtx,
4576 hard_frame_pointer_rtx,
4577 GEN_INT (offset), style);
4578 }
4579 else if (frame.to_allocate)
4580 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4581 GEN_INT (frame.to_allocate), style);
4582
4583 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4584 if (ix86_save_reg (regno, false))
4585 {
4586 if (TARGET_64BIT)
4587 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4588 else
4589 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4590 }
4591 if (frame_pointer_needed)
4592 {
4593 /* Leave results in shorter dependency chains on CPUs that are
4594 able to grok it fast. */
4595 if (TARGET_USE_LEAVE)
4596 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4597 else if (TARGET_64BIT)
4598 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4599 else
4600 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4601 }
4602 }
4603
4604 /* Sibcall epilogues don't want a return instruction. */
4605 if (style == 0)
4606 return;
4607
4608 if (current_function_pops_args && current_function_args_size)
4609 {
4610 rtx popc = GEN_INT (current_function_pops_args);
4611
4612 /* i386 can only pop 64K bytes. If asked to pop more, pop
4613 return address, do explicit add, and jump indirectly to the
4614 caller. */
4615
4616 if (current_function_pops_args >= 65536)
4617 {
4618 rtx ecx = gen_rtx_REG (SImode, 2);
4619
4620 /* There is no "pascal" calling convention in 64bit ABI. */
4621 if (TARGET_64BIT)
4622 abort ();
4623
4624 emit_insn (gen_popsi1 (ecx));
4625 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4626 emit_jump_insn (gen_return_indirect_internal (ecx));
4627 }
4628 else
4629 emit_jump_insn (gen_return_pop_internal (popc));
4630 }
4631 else
4632 emit_jump_insn (gen_return_internal ());
4633 }
4634
4635 /* Reset from the function's potential modifications. */
4636
4637 static void
4638 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4639 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4640 {
4641 if (pic_offset_table_rtx)
4642 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4643 }
4644 \f
4645 /* Extract the parts of an RTL expression that is a valid memory address
4646 for an instruction. Return 0 if the structure of the address is
4647 grossly off. Return -1 if the address contains ASHIFT, so it is not
4648 strictly valid, but still used for computing length of lea instruction. */
4649
4650 int
4651 ix86_decompose_address (rtx addr, struct ix86_address *out)
4652 {
4653 rtx base = NULL_RTX;
4654 rtx index = NULL_RTX;
4655 rtx disp = NULL_RTX;
4656 HOST_WIDE_INT scale = 1;
4657 rtx scale_rtx = NULL_RTX;
4658 int retval = 1;
4659 enum ix86_address_seg seg = SEG_DEFAULT;
4660
4661 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4662 base = addr;
4663 else if (GET_CODE (addr) == PLUS)
4664 {
4665 rtx addends[4], op;
4666 int n = 0, i;
4667
4668 op = addr;
4669 do
4670 {
4671 if (n >= 4)
4672 return 0;
4673 addends[n++] = XEXP (op, 1);
4674 op = XEXP (op, 0);
4675 }
4676 while (GET_CODE (op) == PLUS);
4677 if (n >= 4)
4678 return 0;
4679 addends[n] = op;
4680
4681 for (i = n; i >= 0; --i)
4682 {
4683 op = addends[i];
4684 switch (GET_CODE (op))
4685 {
4686 case MULT:
4687 if (index)
4688 return 0;
4689 index = XEXP (op, 0);
4690 scale_rtx = XEXP (op, 1);
4691 break;
4692
4693 case UNSPEC:
4694 if (XINT (op, 1) == UNSPEC_TP
4695 && TARGET_TLS_DIRECT_SEG_REFS
4696 && seg == SEG_DEFAULT)
4697 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4698 else
4699 return 0;
4700 break;
4701
4702 case REG:
4703 case SUBREG:
4704 if (!base)
4705 base = op;
4706 else if (!index)
4707 index = op;
4708 else
4709 return 0;
4710 break;
4711
4712 case CONST:
4713 case CONST_INT:
4714 case SYMBOL_REF:
4715 case LABEL_REF:
4716 if (disp)
4717 return 0;
4718 disp = op;
4719 break;
4720
4721 default:
4722 return 0;
4723 }
4724 }
4725 }
4726 else if (GET_CODE (addr) == MULT)
4727 {
4728 index = XEXP (addr, 0); /* index*scale */
4729 scale_rtx = XEXP (addr, 1);
4730 }
4731 else if (GET_CODE (addr) == ASHIFT)
4732 {
4733 rtx tmp;
4734
4735 /* We're called for lea too, which implements ashift on occasion. */
4736 index = XEXP (addr, 0);
4737 tmp = XEXP (addr, 1);
4738 if (GET_CODE (tmp) != CONST_INT)
4739 return 0;
4740 scale = INTVAL (tmp);
4741 if ((unsigned HOST_WIDE_INT) scale > 3)
4742 return 0;
4743 scale = 1 << scale;
4744 retval = -1;
4745 }
4746 else
4747 disp = addr; /* displacement */
4748
4749 /* Extract the integral value of scale. */
4750 if (scale_rtx)
4751 {
4752 if (GET_CODE (scale_rtx) != CONST_INT)
4753 return 0;
4754 scale = INTVAL (scale_rtx);
4755 }
4756
4757 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4758 if (base && index && scale == 1
4759 && (index == arg_pointer_rtx
4760 || index == frame_pointer_rtx
4761 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
4762 {
4763 rtx tmp = base;
4764 base = index;
4765 index = tmp;
4766 }
4767
4768 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4769 if ((base == hard_frame_pointer_rtx
4770 || base == frame_pointer_rtx
4771 || base == arg_pointer_rtx) && !disp)
4772 disp = const0_rtx;
4773
4774 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4775 Avoid this by transforming to [%esi+0]. */
4776 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4777 && base && !index && !disp
4778 && REG_P (base)
4779 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
4780 disp = const0_rtx;
4781
4782 /* Special case: encode reg+reg instead of reg*2. */
4783 if (!base && index && scale && scale == 2)
4784 base = index, scale = 1;
4785
4786 /* Special case: scaling cannot be encoded without base or displacement. */
4787 if (!base && !disp && index && scale != 1)
4788 disp = const0_rtx;
4789
4790 out->base = base;
4791 out->index = index;
4792 out->disp = disp;
4793 out->scale = scale;
4794 out->seg = seg;
4795
4796 return retval;
4797 }
4798 \f
4799 /* Return cost of the memory address x.
4800 For i386, it is better to use a complex address than let gcc copy
4801 the address into a reg and make a new pseudo. But not if the address
4802 requires to two regs - that would mean more pseudos with longer
4803 lifetimes. */
4804 static int
4805 ix86_address_cost (rtx x)
4806 {
4807 struct ix86_address parts;
4808 int cost = 1;
4809
4810 if (!ix86_decompose_address (x, &parts))
4811 abort ();
4812
4813 /* More complex memory references are better. */
4814 if (parts.disp && parts.disp != const0_rtx)
4815 cost--;
4816 if (parts.seg != SEG_DEFAULT)
4817 cost--;
4818
4819 /* Attempt to minimize number of registers in the address. */
4820 if ((parts.base
4821 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
4822 || (parts.index
4823 && (!REG_P (parts.index)
4824 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
4825 cost++;
4826
4827 if (parts.base
4828 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
4829 && parts.index
4830 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
4831 && parts.base != parts.index)
4832 cost++;
4833
4834 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4835 since it's predecode logic can't detect the length of instructions
4836 and it degenerates to vector decoded. Increase cost of such
4837 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4838 to split such addresses or even refuse such addresses at all.
4839
4840 Following addressing modes are affected:
4841 [base+scale*index]
4842 [scale*index+disp]
4843 [base+index]
4844
4845 The first and last case may be avoidable by explicitly coding the zero in
4846 memory address, but I don't have AMD-K6 machine handy to check this
4847 theory. */
4848
4849 if (TARGET_K6
4850 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
4851 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
4852 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
4853 cost += 10;
4854
4855 return cost;
4856 }
4857 \f
4858 /* If X is a machine specific address (i.e. a symbol or label being
4859 referenced as a displacement from the GOT implemented using an
4860 UNSPEC), then return the base term. Otherwise return X. */
4861
4862 rtx
4863 ix86_find_base_term (rtx x)
4864 {
4865 rtx term;
4866
4867 if (TARGET_64BIT)
4868 {
4869 if (GET_CODE (x) != CONST)
4870 return x;
4871 term = XEXP (x, 0);
4872 if (GET_CODE (term) == PLUS
4873 && (GET_CODE (XEXP (term, 1)) == CONST_INT
4874 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
4875 term = XEXP (term, 0);
4876 if (GET_CODE (term) != UNSPEC
4877 || XINT (term, 1) != UNSPEC_GOTPCREL)
4878 return x;
4879
4880 term = XVECEXP (term, 0, 0);
4881
4882 if (GET_CODE (term) != SYMBOL_REF
4883 && GET_CODE (term) != LABEL_REF)
4884 return x;
4885
4886 return term;
4887 }
4888
4889 term = ix86_delegitimize_address (x);
4890
4891 if (GET_CODE (term) != SYMBOL_REF
4892 && GET_CODE (term) != LABEL_REF)
4893 return x;
4894
4895 return term;
4896 }
4897
4898 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4899 this is used for to form addresses to local data when -fPIC is in
4900 use. */
4901
4902 static bool
4903 darwin_local_data_pic (rtx disp)
4904 {
4905 if (GET_CODE (disp) == MINUS)
4906 {
4907 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
4908 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
4909 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
4910 {
4911 const char *sym_name = XSTR (XEXP (disp, 1), 0);
4912 if (! strcmp (sym_name, "<pic base>"))
4913 return true;
4914 }
4915 }
4916
4917 return false;
4918 }
4919 \f
4920 /* Determine if a given RTX is a valid constant. We already know this
4921 satisfies CONSTANT_P. */
4922
4923 bool
4924 legitimate_constant_p (rtx x)
4925 {
4926 switch (GET_CODE (x))
4927 {
4928 case CONST:
4929 x = XEXP (x, 0);
4930
4931 if (GET_CODE (x) == PLUS)
4932 {
4933 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4934 return false;
4935 x = XEXP (x, 0);
4936 }
4937
4938 if (TARGET_MACHO && darwin_local_data_pic (x))
4939 return true;
4940
4941 /* Only some unspecs are valid as "constants". */
4942 if (GET_CODE (x) == UNSPEC)
4943 switch (XINT (x, 1))
4944 {
4945 case UNSPEC_TPOFF:
4946 case UNSPEC_NTPOFF:
4947 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4948 case UNSPEC_DTPOFF:
4949 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4950 default:
4951 return false;
4952 }
4953
4954 /* We must have drilled down to a symbol. */
4955 if (!symbolic_operand (x, Pmode))
4956 return false;
4957 /* FALLTHRU */
4958
4959 case SYMBOL_REF:
4960 /* TLS symbols are never valid. */
4961 if (tls_symbolic_operand (x, Pmode))
4962 return false;
4963 break;
4964
4965 default:
4966 break;
4967 }
4968
4969 /* Otherwise we handle everything else in the move patterns. */
4970 return true;
4971 }
4972
4973 /* Determine if it's legal to put X into the constant pool. This
4974 is not possible for the address of thread-local symbols, which
4975 is checked above. */
4976
4977 static bool
4978 ix86_cannot_force_const_mem (rtx x)
4979 {
4980 return !legitimate_constant_p (x);
4981 }
4982
4983 /* Determine if a given RTX is a valid constant address. */
4984
4985 bool
4986 constant_address_p (rtx x)
4987 {
4988 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
4989 }
4990
4991 /* Nonzero if the constant value X is a legitimate general operand
4992 when generating PIC code. It is given that flag_pic is on and
4993 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4994
4995 bool
4996 legitimate_pic_operand_p (rtx x)
4997 {
4998 rtx inner;
4999
5000 switch (GET_CODE (x))
5001 {
5002 case CONST:
5003 inner = XEXP (x, 0);
5004
5005 /* Only some unspecs are valid as "constants". */
5006 if (GET_CODE (inner) == UNSPEC)
5007 switch (XINT (inner, 1))
5008 {
5009 case UNSPEC_TPOFF:
5010 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5011 default:
5012 return false;
5013 }
5014 /* FALLTHRU */
5015
5016 case SYMBOL_REF:
5017 case LABEL_REF:
5018 return legitimate_pic_address_disp_p (x);
5019
5020 default:
5021 return true;
5022 }
5023 }
5024
5025 /* Determine if a given CONST RTX is a valid memory displacement
5026 in PIC mode. */
5027
5028 int
5029 legitimate_pic_address_disp_p (rtx disp)
5030 {
5031 bool saw_plus;
5032
5033 /* In 64bit mode we can allow direct addresses of symbols and labels
5034 when they are not dynamic symbols. */
5035 if (TARGET_64BIT)
5036 {
5037 /* TLS references should always be enclosed in UNSPEC. */
5038 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5039 return 0;
5040 if (GET_CODE (disp) == SYMBOL_REF
5041 && ix86_cmodel == CM_SMALL_PIC
5042 && SYMBOL_REF_LOCAL_P (disp))
5043 return 1;
5044 if (GET_CODE (disp) == LABEL_REF)
5045 return 1;
5046 if (GET_CODE (disp) == CONST
5047 && GET_CODE (XEXP (disp, 0)) == PLUS)
5048 {
5049 rtx op0 = XEXP (XEXP (disp, 0), 0);
5050 rtx op1 = XEXP (XEXP (disp, 0), 1);
5051
5052 /* TLS references should always be enclosed in UNSPEC. */
5053 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5054 return 0;
5055 if (((GET_CODE (op0) == SYMBOL_REF
5056 && ix86_cmodel == CM_SMALL_PIC
5057 && SYMBOL_REF_LOCAL_P (op0))
5058 || GET_CODE (op0) == LABEL_REF)
5059 && GET_CODE (op1) == CONST_INT
5060 && INTVAL (op1) < 16*1024*1024
5061 && INTVAL (op1) >= -16*1024*1024)
5062 return 1;
5063 }
5064 }
5065 if (GET_CODE (disp) != CONST)
5066 return 0;
5067 disp = XEXP (disp, 0);
5068
5069 if (TARGET_64BIT)
5070 {
5071 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5072 of GOT tables. We should not need these anyway. */
5073 if (GET_CODE (disp) != UNSPEC
5074 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5075 return 0;
5076
5077 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5078 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5079 return 0;
5080 return 1;
5081 }
5082
5083 saw_plus = false;
5084 if (GET_CODE (disp) == PLUS)
5085 {
5086 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5087 return 0;
5088 disp = XEXP (disp, 0);
5089 saw_plus = true;
5090 }
5091
5092 if (TARGET_MACHO && darwin_local_data_pic (disp))
5093 return 1;
5094
5095 if (GET_CODE (disp) != UNSPEC)
5096 return 0;
5097
5098 switch (XINT (disp, 1))
5099 {
5100 case UNSPEC_GOT:
5101 if (saw_plus)
5102 return false;
5103 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5104 case UNSPEC_GOTOFF:
5105 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5106 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5107 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5108 return false;
5109 case UNSPEC_GOTTPOFF:
5110 case UNSPEC_GOTNTPOFF:
5111 case UNSPEC_INDNTPOFF:
5112 if (saw_plus)
5113 return false;
5114 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5115 case UNSPEC_NTPOFF:
5116 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5117 case UNSPEC_DTPOFF:
5118 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5119 }
5120
5121 return 0;
5122 }
5123
5124 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5125 memory address for an instruction. The MODE argument is the machine mode
5126 for the MEM expression that wants to use this address.
5127
5128 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5129 convert common non-canonical forms to canonical form so that they will
5130 be recognized. */
5131
5132 int
5133 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5134 {
5135 struct ix86_address parts;
5136 rtx base, index, disp;
5137 HOST_WIDE_INT scale;
5138 const char *reason = NULL;
5139 rtx reason_rtx = NULL_RTX;
5140
5141 if (TARGET_DEBUG_ADDR)
5142 {
5143 fprintf (stderr,
5144 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5145 GET_MODE_NAME (mode), strict);
5146 debug_rtx (addr);
5147 }
5148
5149 if (ix86_decompose_address (addr, &parts) <= 0)
5150 {
5151 reason = "decomposition failed";
5152 goto report_error;
5153 }
5154
5155 base = parts.base;
5156 index = parts.index;
5157 disp = parts.disp;
5158 scale = parts.scale;
5159
5160 /* Validate base register.
5161
5162 Don't allow SUBREG's here, it can lead to spill failures when the base
5163 is one word out of a two word structure, which is represented internally
5164 as a DImode int. */
5165
5166 if (base)
5167 {
5168 reason_rtx = base;
5169
5170 if (GET_CODE (base) != REG)
5171 {
5172 reason = "base is not a register";
5173 goto report_error;
5174 }
5175
5176 if (GET_MODE (base) != Pmode)
5177 {
5178 reason = "base is not in Pmode";
5179 goto report_error;
5180 }
5181
5182 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
5183 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
5184 {
5185 reason = "base is not valid";
5186 goto report_error;
5187 }
5188 }
5189
5190 /* Validate index register.
5191
5192 Don't allow SUBREG's here, it can lead to spill failures when the index
5193 is one word out of a two word structure, which is represented internally
5194 as a DImode int. */
5195
5196 if (index)
5197 {
5198 reason_rtx = index;
5199
5200 if (GET_CODE (index) != REG)
5201 {
5202 reason = "index is not a register";
5203 goto report_error;
5204 }
5205
5206 if (GET_MODE (index) != Pmode)
5207 {
5208 reason = "index is not in Pmode";
5209 goto report_error;
5210 }
5211
5212 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
5213 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
5214 {
5215 reason = "index is not valid";
5216 goto report_error;
5217 }
5218 }
5219
5220 /* Validate scale factor. */
5221 if (scale != 1)
5222 {
5223 reason_rtx = GEN_INT (scale);
5224 if (!index)
5225 {
5226 reason = "scale without index";
5227 goto report_error;
5228 }
5229
5230 if (scale != 2 && scale != 4 && scale != 8)
5231 {
5232 reason = "scale is not a valid multiplier";
5233 goto report_error;
5234 }
5235 }
5236
5237 /* Validate displacement. */
5238 if (disp)
5239 {
5240 reason_rtx = disp;
5241
5242 if (GET_CODE (disp) == CONST
5243 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5244 switch (XINT (XEXP (disp, 0), 1))
5245 {
5246 case UNSPEC_GOT:
5247 case UNSPEC_GOTOFF:
5248 case UNSPEC_GOTPCREL:
5249 if (!flag_pic)
5250 abort ();
5251 goto is_legitimate_pic;
5252
5253 case UNSPEC_GOTTPOFF:
5254 case UNSPEC_GOTNTPOFF:
5255 case UNSPEC_INDNTPOFF:
5256 case UNSPEC_NTPOFF:
5257 case UNSPEC_DTPOFF:
5258 break;
5259
5260 default:
5261 reason = "invalid address unspec";
5262 goto report_error;
5263 }
5264
5265 else if (flag_pic && (SYMBOLIC_CONST (disp)
5266 #if TARGET_MACHO
5267 && !machopic_operand_p (disp)
5268 #endif
5269 ))
5270 {
5271 is_legitimate_pic:
5272 if (TARGET_64BIT && (index || base))
5273 {
5274 /* foo@dtpoff(%rX) is ok. */
5275 if (GET_CODE (disp) != CONST
5276 || GET_CODE (XEXP (disp, 0)) != PLUS
5277 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5278 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5279 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5280 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5281 {
5282 reason = "non-constant pic memory reference";
5283 goto report_error;
5284 }
5285 }
5286 else if (! legitimate_pic_address_disp_p (disp))
5287 {
5288 reason = "displacement is an invalid pic construct";
5289 goto report_error;
5290 }
5291
5292 /* This code used to verify that a symbolic pic displacement
5293 includes the pic_offset_table_rtx register.
5294
5295 While this is good idea, unfortunately these constructs may
5296 be created by "adds using lea" optimization for incorrect
5297 code like:
5298
5299 int a;
5300 int foo(int i)
5301 {
5302 return *(&a+i);
5303 }
5304
5305 This code is nonsensical, but results in addressing
5306 GOT table with pic_offset_table_rtx base. We can't
5307 just refuse it easily, since it gets matched by
5308 "addsi3" pattern, that later gets split to lea in the
5309 case output register differs from input. While this
5310 can be handled by separate addsi pattern for this case
5311 that never results in lea, this seems to be easier and
5312 correct fix for crash to disable this test. */
5313 }
5314 else if (GET_CODE (disp) != LABEL_REF
5315 && GET_CODE (disp) != CONST_INT
5316 && (GET_CODE (disp) != CONST
5317 || !legitimate_constant_p (disp))
5318 && (GET_CODE (disp) != SYMBOL_REF
5319 || !legitimate_constant_p (disp)))
5320 {
5321 reason = "displacement is not constant";
5322 goto report_error;
5323 }
5324 else if (TARGET_64BIT
5325 && !x86_64_immediate_operand (disp, VOIDmode))
5326 {
5327 reason = "displacement is out of range";
5328 goto report_error;
5329 }
5330 }
5331
5332 /* Everything looks valid. */
5333 if (TARGET_DEBUG_ADDR)
5334 fprintf (stderr, "Success.\n");
5335 return TRUE;
5336
5337 report_error:
5338 if (TARGET_DEBUG_ADDR)
5339 {
5340 fprintf (stderr, "Error: %s\n", reason);
5341 debug_rtx (reason_rtx);
5342 }
5343 return FALSE;
5344 }
5345 \f
5346 /* Return an unique alias set for the GOT. */
5347
5348 static HOST_WIDE_INT
5349 ix86_GOT_alias_set (void)
5350 {
5351 static HOST_WIDE_INT set = -1;
5352 if (set == -1)
5353 set = new_alias_set ();
5354 return set;
5355 }
5356
5357 /* Return a legitimate reference for ORIG (an address) using the
5358 register REG. If REG is 0, a new pseudo is generated.
5359
5360 There are two types of references that must be handled:
5361
5362 1. Global data references must load the address from the GOT, via
5363 the PIC reg. An insn is emitted to do this load, and the reg is
5364 returned.
5365
5366 2. Static data references, constant pool addresses, and code labels
5367 compute the address as an offset from the GOT, whose base is in
5368 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5369 differentiate them from global data objects. The returned
5370 address is the PIC reg + an unspec constant.
5371
5372 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5373 reg also appears in the address. */
5374
5375 static rtx
5376 legitimize_pic_address (rtx orig, rtx reg)
5377 {
5378 rtx addr = orig;
5379 rtx new = orig;
5380 rtx base;
5381
5382 #if TARGET_MACHO
5383 if (reg == 0)
5384 reg = gen_reg_rtx (Pmode);
5385 /* Use the generic Mach-O PIC machinery. */
5386 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5387 #endif
5388
5389 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5390 new = addr;
5391 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5392 {
5393 /* This symbol may be referenced via a displacement from the PIC
5394 base address (@GOTOFF). */
5395
5396 if (reload_in_progress)
5397 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5398 if (GET_CODE (addr) == CONST)
5399 addr = XEXP (addr, 0);
5400 if (GET_CODE (addr) == PLUS)
5401 {
5402 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5403 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5404 }
5405 else
5406 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5407 new = gen_rtx_CONST (Pmode, new);
5408 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5409
5410 if (reg != 0)
5411 {
5412 emit_move_insn (reg, new);
5413 new = reg;
5414 }
5415 }
5416 else if (GET_CODE (addr) == SYMBOL_REF)
5417 {
5418 if (TARGET_64BIT)
5419 {
5420 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5421 new = gen_rtx_CONST (Pmode, new);
5422 new = gen_const_mem (Pmode, new);
5423 set_mem_alias_set (new, ix86_GOT_alias_set ());
5424
5425 if (reg == 0)
5426 reg = gen_reg_rtx (Pmode);
5427 /* Use directly gen_movsi, otherwise the address is loaded
5428 into register for CSE. We don't want to CSE this addresses,
5429 instead we CSE addresses from the GOT table, so skip this. */
5430 emit_insn (gen_movsi (reg, new));
5431 new = reg;
5432 }
5433 else
5434 {
5435 /* This symbol must be referenced via a load from the
5436 Global Offset Table (@GOT). */
5437
5438 if (reload_in_progress)
5439 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5440 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5441 new = gen_rtx_CONST (Pmode, new);
5442 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5443 new = gen_const_mem (Pmode, new);
5444 set_mem_alias_set (new, ix86_GOT_alias_set ());
5445
5446 if (reg == 0)
5447 reg = gen_reg_rtx (Pmode);
5448 emit_move_insn (reg, new);
5449 new = reg;
5450 }
5451 }
5452 else
5453 {
5454 if (GET_CODE (addr) == CONST)
5455 {
5456 addr = XEXP (addr, 0);
5457
5458 /* We must match stuff we generate before. Assume the only
5459 unspecs that can get here are ours. Not that we could do
5460 anything with them anyway.... */
5461 if (GET_CODE (addr) == UNSPEC
5462 || (GET_CODE (addr) == PLUS
5463 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5464 return orig;
5465 if (GET_CODE (addr) != PLUS)
5466 abort ();
5467 }
5468 if (GET_CODE (addr) == PLUS)
5469 {
5470 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5471
5472 /* Check first to see if this is a constant offset from a @GOTOFF
5473 symbol reference. */
5474 if (local_symbolic_operand (op0, Pmode)
5475 && GET_CODE (op1) == CONST_INT)
5476 {
5477 if (!TARGET_64BIT)
5478 {
5479 if (reload_in_progress)
5480 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5481 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5482 UNSPEC_GOTOFF);
5483 new = gen_rtx_PLUS (Pmode, new, op1);
5484 new = gen_rtx_CONST (Pmode, new);
5485 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5486
5487 if (reg != 0)
5488 {
5489 emit_move_insn (reg, new);
5490 new = reg;
5491 }
5492 }
5493 else
5494 {
5495 if (INTVAL (op1) < -16*1024*1024
5496 || INTVAL (op1) >= 16*1024*1024)
5497 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5498 }
5499 }
5500 else
5501 {
5502 base = legitimize_pic_address (XEXP (addr, 0), reg);
5503 new = legitimize_pic_address (XEXP (addr, 1),
5504 base == reg ? NULL_RTX : reg);
5505
5506 if (GET_CODE (new) == CONST_INT)
5507 new = plus_constant (base, INTVAL (new));
5508 else
5509 {
5510 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5511 {
5512 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5513 new = XEXP (new, 1);
5514 }
5515 new = gen_rtx_PLUS (Pmode, base, new);
5516 }
5517 }
5518 }
5519 }
5520 return new;
5521 }
5522 \f
5523 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5524
5525 static rtx
5526 get_thread_pointer (int to_reg)
5527 {
5528 rtx tp, reg, insn;
5529
5530 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5531 if (!to_reg)
5532 return tp;
5533
5534 reg = gen_reg_rtx (Pmode);
5535 insn = gen_rtx_SET (VOIDmode, reg, tp);
5536 insn = emit_insn (insn);
5537
5538 return reg;
5539 }
5540
5541 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5542 false if we expect this to be used for a memory address and true if
5543 we expect to load the address into a register. */
5544
5545 static rtx
5546 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5547 {
5548 rtx dest, base, off, pic;
5549 int type;
5550
5551 switch (model)
5552 {
5553 case TLS_MODEL_GLOBAL_DYNAMIC:
5554 dest = gen_reg_rtx (Pmode);
5555 if (TARGET_64BIT)
5556 {
5557 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5558
5559 start_sequence ();
5560 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5561 insns = get_insns ();
5562 end_sequence ();
5563
5564 emit_libcall_block (insns, dest, rax, x);
5565 }
5566 else
5567 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5568 break;
5569
5570 case TLS_MODEL_LOCAL_DYNAMIC:
5571 base = gen_reg_rtx (Pmode);
5572 if (TARGET_64BIT)
5573 {
5574 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5575
5576 start_sequence ();
5577 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5578 insns = get_insns ();
5579 end_sequence ();
5580
5581 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5582 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5583 emit_libcall_block (insns, base, rax, note);
5584 }
5585 else
5586 emit_insn (gen_tls_local_dynamic_base_32 (base));
5587
5588 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5589 off = gen_rtx_CONST (Pmode, off);
5590
5591 return gen_rtx_PLUS (Pmode, base, off);
5592
5593 case TLS_MODEL_INITIAL_EXEC:
5594 if (TARGET_64BIT)
5595 {
5596 pic = NULL;
5597 type = UNSPEC_GOTNTPOFF;
5598 }
5599 else if (flag_pic)
5600 {
5601 if (reload_in_progress)
5602 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5603 pic = pic_offset_table_rtx;
5604 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5605 }
5606 else if (!TARGET_GNU_TLS)
5607 {
5608 pic = gen_reg_rtx (Pmode);
5609 emit_insn (gen_set_got (pic));
5610 type = UNSPEC_GOTTPOFF;
5611 }
5612 else
5613 {
5614 pic = NULL;
5615 type = UNSPEC_INDNTPOFF;
5616 }
5617
5618 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5619 off = gen_rtx_CONST (Pmode, off);
5620 if (pic)
5621 off = gen_rtx_PLUS (Pmode, pic, off);
5622 off = gen_const_mem (Pmode, off);
5623 set_mem_alias_set (off, ix86_GOT_alias_set ());
5624
5625 if (TARGET_64BIT || TARGET_GNU_TLS)
5626 {
5627 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5628 off = force_reg (Pmode, off);
5629 return gen_rtx_PLUS (Pmode, base, off);
5630 }
5631 else
5632 {
5633 base = get_thread_pointer (true);
5634 dest = gen_reg_rtx (Pmode);
5635 emit_insn (gen_subsi3 (dest, base, off));
5636 }
5637 break;
5638
5639 case TLS_MODEL_LOCAL_EXEC:
5640 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5641 (TARGET_64BIT || TARGET_GNU_TLS)
5642 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5643 off = gen_rtx_CONST (Pmode, off);
5644
5645 if (TARGET_64BIT || TARGET_GNU_TLS)
5646 {
5647 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5648 return gen_rtx_PLUS (Pmode, base, off);
5649 }
5650 else
5651 {
5652 base = get_thread_pointer (true);
5653 dest = gen_reg_rtx (Pmode);
5654 emit_insn (gen_subsi3 (dest, base, off));
5655 }
5656 break;
5657
5658 default:
5659 abort ();
5660 }
5661
5662 return dest;
5663 }
5664
5665 /* Try machine-dependent ways of modifying an illegitimate address
5666 to be legitimate. If we find one, return the new, valid address.
5667 This macro is used in only one place: `memory_address' in explow.c.
5668
5669 OLDX is the address as it was before break_out_memory_refs was called.
5670 In some cases it is useful to look at this to decide what needs to be done.
5671
5672 MODE and WIN are passed so that this macro can use
5673 GO_IF_LEGITIMATE_ADDRESS.
5674
5675 It is always safe for this macro to do nothing. It exists to recognize
5676 opportunities to optimize the output.
5677
5678 For the 80386, we handle X+REG by loading X into a register R and
5679 using R+REG. R will go in a general reg and indexing will be used.
5680 However, if REG is a broken-out memory address or multiplication,
5681 nothing needs to be done because REG can certainly go in a general reg.
5682
5683 When -fpic is used, special handling is needed for symbolic references.
5684 See comments by legitimize_pic_address in i386.c for details. */
5685
5686 rtx
5687 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5688 {
5689 int changed = 0;
5690 unsigned log;
5691
5692 if (TARGET_DEBUG_ADDR)
5693 {
5694 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5695 GET_MODE_NAME (mode));
5696 debug_rtx (x);
5697 }
5698
5699 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5700 if (log)
5701 return legitimize_tls_address (x, log, false);
5702 if (GET_CODE (x) == CONST
5703 && GET_CODE (XEXP (x, 0)) == PLUS
5704 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5705 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5706 {
5707 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5708 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5709 }
5710
5711 if (flag_pic && SYMBOLIC_CONST (x))
5712 return legitimize_pic_address (x, 0);
5713
5714 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5715 if (GET_CODE (x) == ASHIFT
5716 && GET_CODE (XEXP (x, 1)) == CONST_INT
5717 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
5718 {
5719 changed = 1;
5720 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5721 GEN_INT (1 << log));
5722 }
5723
5724 if (GET_CODE (x) == PLUS)
5725 {
5726 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5727
5728 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5729 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5730 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
5731 {
5732 changed = 1;
5733 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5734 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5735 GEN_INT (1 << log));
5736 }
5737
5738 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5739 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5740 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
5741 {
5742 changed = 1;
5743 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5744 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5745 GEN_INT (1 << log));
5746 }
5747
5748 /* Put multiply first if it isn't already. */
5749 if (GET_CODE (XEXP (x, 1)) == MULT)
5750 {
5751 rtx tmp = XEXP (x, 0);
5752 XEXP (x, 0) = XEXP (x, 1);
5753 XEXP (x, 1) = tmp;
5754 changed = 1;
5755 }
5756
5757 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5758 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5759 created by virtual register instantiation, register elimination, and
5760 similar optimizations. */
5761 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5762 {
5763 changed = 1;
5764 x = gen_rtx_PLUS (Pmode,
5765 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5766 XEXP (XEXP (x, 1), 0)),
5767 XEXP (XEXP (x, 1), 1));
5768 }
5769
5770 /* Canonicalize
5771 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5772 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5773 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5774 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5775 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5776 && CONSTANT_P (XEXP (x, 1)))
5777 {
5778 rtx constant;
5779 rtx other = NULL_RTX;
5780
5781 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5782 {
5783 constant = XEXP (x, 1);
5784 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5785 }
5786 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5787 {
5788 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
5789 other = XEXP (x, 1);
5790 }
5791 else
5792 constant = 0;
5793
5794 if (constant)
5795 {
5796 changed = 1;
5797 x = gen_rtx_PLUS (Pmode,
5798 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
5799 XEXP (XEXP (XEXP (x, 0), 1), 0)),
5800 plus_constant (other, INTVAL (constant)));
5801 }
5802 }
5803
5804 if (changed && legitimate_address_p (mode, x, FALSE))
5805 return x;
5806
5807 if (GET_CODE (XEXP (x, 0)) == MULT)
5808 {
5809 changed = 1;
5810 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
5811 }
5812
5813 if (GET_CODE (XEXP (x, 1)) == MULT)
5814 {
5815 changed = 1;
5816 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
5817 }
5818
5819 if (changed
5820 && GET_CODE (XEXP (x, 1)) == REG
5821 && GET_CODE (XEXP (x, 0)) == REG)
5822 return x;
5823
5824 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
5825 {
5826 changed = 1;
5827 x = legitimize_pic_address (x, 0);
5828 }
5829
5830 if (changed && legitimate_address_p (mode, x, FALSE))
5831 return x;
5832
5833 if (GET_CODE (XEXP (x, 0)) == REG)
5834 {
5835 rtx temp = gen_reg_rtx (Pmode);
5836 rtx val = force_operand (XEXP (x, 1), temp);
5837 if (val != temp)
5838 emit_move_insn (temp, val);
5839
5840 XEXP (x, 1) = temp;
5841 return x;
5842 }
5843
5844 else if (GET_CODE (XEXP (x, 1)) == REG)
5845 {
5846 rtx temp = gen_reg_rtx (Pmode);
5847 rtx val = force_operand (XEXP (x, 0), temp);
5848 if (val != temp)
5849 emit_move_insn (temp, val);
5850
5851 XEXP (x, 0) = temp;
5852 return x;
5853 }
5854 }
5855
5856 return x;
5857 }
5858 \f
5859 /* Print an integer constant expression in assembler syntax. Addition
5860 and subtraction are the only arithmetic that may appear in these
5861 expressions. FILE is the stdio stream to write to, X is the rtx, and
5862 CODE is the operand print code from the output string. */
5863
5864 static void
5865 output_pic_addr_const (FILE *file, rtx x, int code)
5866 {
5867 char buf[256];
5868
5869 switch (GET_CODE (x))
5870 {
5871 case PC:
5872 if (flag_pic)
5873 putc ('.', file);
5874 else
5875 abort ();
5876 break;
5877
5878 case SYMBOL_REF:
5879 /* Mark the decl as referenced so that cgraph will output the function. */
5880 if (SYMBOL_REF_DECL (x))
5881 mark_decl_referenced (SYMBOL_REF_DECL (x));
5882
5883 assemble_name (file, XSTR (x, 0));
5884 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5885 fputs ("@PLT", file);
5886 break;
5887
5888 case LABEL_REF:
5889 x = XEXP (x, 0);
5890 /* FALLTHRU */
5891 case CODE_LABEL:
5892 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5893 assemble_name (asm_out_file, buf);
5894 break;
5895
5896 case CONST_INT:
5897 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5898 break;
5899
5900 case CONST:
5901 /* This used to output parentheses around the expression,
5902 but that does not work on the 386 (either ATT or BSD assembler). */
5903 output_pic_addr_const (file, XEXP (x, 0), code);
5904 break;
5905
5906 case CONST_DOUBLE:
5907 if (GET_MODE (x) == VOIDmode)
5908 {
5909 /* We can use %d if the number is <32 bits and positive. */
5910 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
5911 fprintf (file, "0x%lx%08lx",
5912 (unsigned long) CONST_DOUBLE_HIGH (x),
5913 (unsigned long) CONST_DOUBLE_LOW (x));
5914 else
5915 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5916 }
5917 else
5918 /* We can't handle floating point constants;
5919 PRINT_OPERAND must handle them. */
5920 output_operand_lossage ("floating constant misused");
5921 break;
5922
5923 case PLUS:
5924 /* Some assemblers need integer constants to appear first. */
5925 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5926 {
5927 output_pic_addr_const (file, XEXP (x, 0), code);
5928 putc ('+', file);
5929 output_pic_addr_const (file, XEXP (x, 1), code);
5930 }
5931 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5932 {
5933 output_pic_addr_const (file, XEXP (x, 1), code);
5934 putc ('+', file);
5935 output_pic_addr_const (file, XEXP (x, 0), code);
5936 }
5937 else
5938 abort ();
5939 break;
5940
5941 case MINUS:
5942 if (!TARGET_MACHO)
5943 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
5944 output_pic_addr_const (file, XEXP (x, 0), code);
5945 putc ('-', file);
5946 output_pic_addr_const (file, XEXP (x, 1), code);
5947 if (!TARGET_MACHO)
5948 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
5949 break;
5950
5951 case UNSPEC:
5952 if (XVECLEN (x, 0) != 1)
5953 abort ();
5954 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5955 switch (XINT (x, 1))
5956 {
5957 case UNSPEC_GOT:
5958 fputs ("@GOT", file);
5959 break;
5960 case UNSPEC_GOTOFF:
5961 fputs ("@GOTOFF", file);
5962 break;
5963 case UNSPEC_GOTPCREL:
5964 fputs ("@GOTPCREL(%rip)", file);
5965 break;
5966 case UNSPEC_GOTTPOFF:
5967 /* FIXME: This might be @TPOFF in Sun ld too. */
5968 fputs ("@GOTTPOFF", file);
5969 break;
5970 case UNSPEC_TPOFF:
5971 fputs ("@TPOFF", file);
5972 break;
5973 case UNSPEC_NTPOFF:
5974 if (TARGET_64BIT)
5975 fputs ("@TPOFF", file);
5976 else
5977 fputs ("@NTPOFF", file);
5978 break;
5979 case UNSPEC_DTPOFF:
5980 fputs ("@DTPOFF", file);
5981 break;
5982 case UNSPEC_GOTNTPOFF:
5983 if (TARGET_64BIT)
5984 fputs ("@GOTTPOFF(%rip)", file);
5985 else
5986 fputs ("@GOTNTPOFF", file);
5987 break;
5988 case UNSPEC_INDNTPOFF:
5989 fputs ("@INDNTPOFF", file);
5990 break;
5991 default:
5992 output_operand_lossage ("invalid UNSPEC as operand");
5993 break;
5994 }
5995 break;
5996
5997 default:
5998 output_operand_lossage ("invalid expression as operand");
5999 }
6000 }
6001
6002 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
6003 We need to emit DTP-relative relocations. */
6004
6005 void
6006 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6007 {
6008 fputs (ASM_LONG, file);
6009 output_addr_const (file, x);
6010 fputs ("@DTPOFF", file);
6011 switch (size)
6012 {
6013 case 4:
6014 break;
6015 case 8:
6016 fputs (", 0", file);
6017 break;
6018 default:
6019 abort ();
6020 }
6021 }
6022
6023 /* In the name of slightly smaller debug output, and to cater to
6024 general assembler losage, recognize PIC+GOTOFF and turn it back
6025 into a direct symbol reference. */
6026
6027 static rtx
6028 ix86_delegitimize_address (rtx orig_x)
6029 {
6030 rtx x = orig_x, y;
6031
6032 if (GET_CODE (x) == MEM)
6033 x = XEXP (x, 0);
6034
6035 if (TARGET_64BIT)
6036 {
6037 if (GET_CODE (x) != CONST
6038 || GET_CODE (XEXP (x, 0)) != UNSPEC
6039 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6040 || GET_CODE (orig_x) != MEM)
6041 return orig_x;
6042 return XVECEXP (XEXP (x, 0), 0, 0);
6043 }
6044
6045 if (GET_CODE (x) != PLUS
6046 || GET_CODE (XEXP (x, 1)) != CONST)
6047 return orig_x;
6048
6049 if (GET_CODE (XEXP (x, 0)) == REG
6050 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6051 /* %ebx + GOT/GOTOFF */
6052 y = NULL;
6053 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6054 {
6055 /* %ebx + %reg * scale + GOT/GOTOFF */
6056 y = XEXP (x, 0);
6057 if (GET_CODE (XEXP (y, 0)) == REG
6058 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6059 y = XEXP (y, 1);
6060 else if (GET_CODE (XEXP (y, 1)) == REG
6061 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6062 y = XEXP (y, 0);
6063 else
6064 return orig_x;
6065 if (GET_CODE (y) != REG
6066 && GET_CODE (y) != MULT
6067 && GET_CODE (y) != ASHIFT)
6068 return orig_x;
6069 }
6070 else
6071 return orig_x;
6072
6073 x = XEXP (XEXP (x, 1), 0);
6074 if (GET_CODE (x) == UNSPEC
6075 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6076 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6077 {
6078 if (y)
6079 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6080 return XVECEXP (x, 0, 0);
6081 }
6082
6083 if (GET_CODE (x) == PLUS
6084 && GET_CODE (XEXP (x, 0)) == UNSPEC
6085 && GET_CODE (XEXP (x, 1)) == CONST_INT
6086 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6087 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6088 && GET_CODE (orig_x) != MEM)))
6089 {
6090 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6091 if (y)
6092 return gen_rtx_PLUS (Pmode, y, x);
6093 return x;
6094 }
6095
6096 return orig_x;
6097 }
6098 \f
6099 static void
6100 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6101 int fp, FILE *file)
6102 {
6103 const char *suffix;
6104
6105 if (mode == CCFPmode || mode == CCFPUmode)
6106 {
6107 enum rtx_code second_code, bypass_code;
6108 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6109 if (bypass_code != UNKNOWN || second_code != UNKNOWN)
6110 abort ();
6111 code = ix86_fp_compare_code_to_integer (code);
6112 mode = CCmode;
6113 }
6114 if (reverse)
6115 code = reverse_condition (code);
6116
6117 switch (code)
6118 {
6119 case EQ:
6120 suffix = "e";
6121 break;
6122 case NE:
6123 suffix = "ne";
6124 break;
6125 case GT:
6126 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6127 abort ();
6128 suffix = "g";
6129 break;
6130 case GTU:
6131 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6132 Those same assemblers have the same but opposite losage on cmov. */
6133 if (mode != CCmode)
6134 abort ();
6135 suffix = fp ? "nbe" : "a";
6136 break;
6137 case LT:
6138 if (mode == CCNOmode || mode == CCGOCmode)
6139 suffix = "s";
6140 else if (mode == CCmode || mode == CCGCmode)
6141 suffix = "l";
6142 else
6143 abort ();
6144 break;
6145 case LTU:
6146 if (mode != CCmode)
6147 abort ();
6148 suffix = "b";
6149 break;
6150 case GE:
6151 if (mode == CCNOmode || mode == CCGOCmode)
6152 suffix = "ns";
6153 else if (mode == CCmode || mode == CCGCmode)
6154 suffix = "ge";
6155 else
6156 abort ();
6157 break;
6158 case GEU:
6159 /* ??? As above. */
6160 if (mode != CCmode)
6161 abort ();
6162 suffix = fp ? "nb" : "ae";
6163 break;
6164 case LE:
6165 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6166 abort ();
6167 suffix = "le";
6168 break;
6169 case LEU:
6170 if (mode != CCmode)
6171 abort ();
6172 suffix = "be";
6173 break;
6174 case UNORDERED:
6175 suffix = fp ? "u" : "p";
6176 break;
6177 case ORDERED:
6178 suffix = fp ? "nu" : "np";
6179 break;
6180 default:
6181 abort ();
6182 }
6183 fputs (suffix, file);
6184 }
6185
6186 /* Print the name of register X to FILE based on its machine mode and number.
6187 If CODE is 'w', pretend the mode is HImode.
6188 If CODE is 'b', pretend the mode is QImode.
6189 If CODE is 'k', pretend the mode is SImode.
6190 If CODE is 'q', pretend the mode is DImode.
6191 If CODE is 'h', pretend the reg is the `high' byte register.
6192 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6193
6194 void
6195 print_reg (rtx x, int code, FILE *file)
6196 {
6197 if (REGNO (x) == ARG_POINTER_REGNUM
6198 || REGNO (x) == FRAME_POINTER_REGNUM
6199 || REGNO (x) == FLAGS_REG
6200 || REGNO (x) == FPSR_REG)
6201 abort ();
6202
6203 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6204 putc ('%', file);
6205
6206 if (code == 'w' || MMX_REG_P (x))
6207 code = 2;
6208 else if (code == 'b')
6209 code = 1;
6210 else if (code == 'k')
6211 code = 4;
6212 else if (code == 'q')
6213 code = 8;
6214 else if (code == 'y')
6215 code = 3;
6216 else if (code == 'h')
6217 code = 0;
6218 else
6219 code = GET_MODE_SIZE (GET_MODE (x));
6220
6221 /* Irritatingly, AMD extended registers use different naming convention
6222 from the normal registers. */
6223 if (REX_INT_REG_P (x))
6224 {
6225 if (!TARGET_64BIT)
6226 abort ();
6227 switch (code)
6228 {
6229 case 0:
6230 error ("extended registers have no high halves");
6231 break;
6232 case 1:
6233 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6234 break;
6235 case 2:
6236 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6237 break;
6238 case 4:
6239 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6240 break;
6241 case 8:
6242 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6243 break;
6244 default:
6245 error ("unsupported operand size for extended register");
6246 break;
6247 }
6248 return;
6249 }
6250 switch (code)
6251 {
6252 case 3:
6253 if (STACK_TOP_P (x))
6254 {
6255 fputs ("st(0)", file);
6256 break;
6257 }
6258 /* FALLTHRU */
6259 case 8:
6260 case 4:
6261 case 12:
6262 if (! ANY_FP_REG_P (x))
6263 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6264 /* FALLTHRU */
6265 case 16:
6266 case 2:
6267 normal:
6268 fputs (hi_reg_name[REGNO (x)], file);
6269 break;
6270 case 1:
6271 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6272 goto normal;
6273 fputs (qi_reg_name[REGNO (x)], file);
6274 break;
6275 case 0:
6276 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6277 goto normal;
6278 fputs (qi_high_reg_name[REGNO (x)], file);
6279 break;
6280 default:
6281 abort ();
6282 }
6283 }
6284
6285 /* Locate some local-dynamic symbol still in use by this function
6286 so that we can print its name in some tls_local_dynamic_base
6287 pattern. */
6288
6289 static const char *
6290 get_some_local_dynamic_name (void)
6291 {
6292 rtx insn;
6293
6294 if (cfun->machine->some_ld_name)
6295 return cfun->machine->some_ld_name;
6296
6297 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6298 if (INSN_P (insn)
6299 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6300 return cfun->machine->some_ld_name;
6301
6302 abort ();
6303 }
6304
6305 static int
6306 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6307 {
6308 rtx x = *px;
6309
6310 if (GET_CODE (x) == SYMBOL_REF
6311 && local_dynamic_symbolic_operand (x, Pmode))
6312 {
6313 cfun->machine->some_ld_name = XSTR (x, 0);
6314 return 1;
6315 }
6316
6317 return 0;
6318 }
6319
6320 /* Meaning of CODE:
6321 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6322 C -- print opcode suffix for set/cmov insn.
6323 c -- like C, but print reversed condition
6324 F,f -- likewise, but for floating-point.
6325 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6326 otherwise nothing
6327 R -- print the prefix for register names.
6328 z -- print the opcode suffix for the size of the current operand.
6329 * -- print a star (in certain assembler syntax)
6330 A -- print an absolute memory reference.
6331 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6332 s -- print a shift double count, followed by the assemblers argument
6333 delimiter.
6334 b -- print the QImode name of the register for the indicated operand.
6335 %b0 would print %al if operands[0] is reg 0.
6336 w -- likewise, print the HImode name of the register.
6337 k -- likewise, print the SImode name of the register.
6338 q -- likewise, print the DImode name of the register.
6339 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6340 y -- print "st(0)" instead of "st" as a register.
6341 D -- print condition for SSE cmp instruction.
6342 P -- if PIC, print an @PLT suffix.
6343 X -- don't print any sort of PIC '@' suffix for a symbol.
6344 & -- print some in-use local-dynamic symbol name.
6345 H -- print a memory address offset by 8; used for sse high-parts
6346 */
6347
6348 void
6349 print_operand (FILE *file, rtx x, int code)
6350 {
6351 if (code)
6352 {
6353 switch (code)
6354 {
6355 case '*':
6356 if (ASSEMBLER_DIALECT == ASM_ATT)
6357 putc ('*', file);
6358 return;
6359
6360 case '&':
6361 assemble_name (file, get_some_local_dynamic_name ());
6362 return;
6363
6364 case 'A':
6365 if (ASSEMBLER_DIALECT == ASM_ATT)
6366 putc ('*', file);
6367 else if (ASSEMBLER_DIALECT == ASM_INTEL)
6368 {
6369 /* Intel syntax. For absolute addresses, registers should not
6370 be surrounded by braces. */
6371 if (GET_CODE (x) != REG)
6372 {
6373 putc ('[', file);
6374 PRINT_OPERAND (file, x, 0);
6375 putc (']', file);
6376 return;
6377 }
6378 }
6379 else
6380 abort ();
6381
6382 PRINT_OPERAND (file, x, 0);
6383 return;
6384
6385
6386 case 'L':
6387 if (ASSEMBLER_DIALECT == ASM_ATT)
6388 putc ('l', file);
6389 return;
6390
6391 case 'W':
6392 if (ASSEMBLER_DIALECT == ASM_ATT)
6393 putc ('w', file);
6394 return;
6395
6396 case 'B':
6397 if (ASSEMBLER_DIALECT == ASM_ATT)
6398 putc ('b', file);
6399 return;
6400
6401 case 'Q':
6402 if (ASSEMBLER_DIALECT == ASM_ATT)
6403 putc ('l', file);
6404 return;
6405
6406 case 'S':
6407 if (ASSEMBLER_DIALECT == ASM_ATT)
6408 putc ('s', file);
6409 return;
6410
6411 case 'T':
6412 if (ASSEMBLER_DIALECT == ASM_ATT)
6413 putc ('t', file);
6414 return;
6415
6416 case 'z':
6417 /* 387 opcodes don't get size suffixes if the operands are
6418 registers. */
6419 if (STACK_REG_P (x))
6420 return;
6421
6422 /* Likewise if using Intel opcodes. */
6423 if (ASSEMBLER_DIALECT == ASM_INTEL)
6424 return;
6425
6426 /* This is the size of op from size of operand. */
6427 switch (GET_MODE_SIZE (GET_MODE (x)))
6428 {
6429 case 2:
6430 #ifdef HAVE_GAS_FILDS_FISTS
6431 putc ('s', file);
6432 #endif
6433 return;
6434
6435 case 4:
6436 if (GET_MODE (x) == SFmode)
6437 {
6438 putc ('s', file);
6439 return;
6440 }
6441 else
6442 putc ('l', file);
6443 return;
6444
6445 case 12:
6446 case 16:
6447 putc ('t', file);
6448 return;
6449
6450 case 8:
6451 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6452 {
6453 #ifdef GAS_MNEMONICS
6454 putc ('q', file);
6455 #else
6456 putc ('l', file);
6457 putc ('l', file);
6458 #endif
6459 }
6460 else
6461 putc ('l', file);
6462 return;
6463
6464 default:
6465 abort ();
6466 }
6467
6468 case 'b':
6469 case 'w':
6470 case 'k':
6471 case 'q':
6472 case 'h':
6473 case 'y':
6474 case 'X':
6475 case 'P':
6476 break;
6477
6478 case 's':
6479 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6480 {
6481 PRINT_OPERAND (file, x, 0);
6482 putc (',', file);
6483 }
6484 return;
6485
6486 case 'D':
6487 /* Little bit of braindamage here. The SSE compare instructions
6488 does use completely different names for the comparisons that the
6489 fp conditional moves. */
6490 switch (GET_CODE (x))
6491 {
6492 case EQ:
6493 case UNEQ:
6494 fputs ("eq", file);
6495 break;
6496 case LT:
6497 case UNLT:
6498 fputs ("lt", file);
6499 break;
6500 case LE:
6501 case UNLE:
6502 fputs ("le", file);
6503 break;
6504 case UNORDERED:
6505 fputs ("unord", file);
6506 break;
6507 case NE:
6508 case LTGT:
6509 fputs ("neq", file);
6510 break;
6511 case UNGE:
6512 case GE:
6513 fputs ("nlt", file);
6514 break;
6515 case UNGT:
6516 case GT:
6517 fputs ("nle", file);
6518 break;
6519 case ORDERED:
6520 fputs ("ord", file);
6521 break;
6522 default:
6523 abort ();
6524 break;
6525 }
6526 return;
6527 case 'O':
6528 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6529 if (ASSEMBLER_DIALECT == ASM_ATT)
6530 {
6531 switch (GET_MODE (x))
6532 {
6533 case HImode: putc ('w', file); break;
6534 case SImode:
6535 case SFmode: putc ('l', file); break;
6536 case DImode:
6537 case DFmode: putc ('q', file); break;
6538 default: abort ();
6539 }
6540 putc ('.', file);
6541 }
6542 #endif
6543 return;
6544 case 'C':
6545 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6546 return;
6547 case 'F':
6548 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6549 if (ASSEMBLER_DIALECT == ASM_ATT)
6550 putc ('.', file);
6551 #endif
6552 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6553 return;
6554
6555 /* Like above, but reverse condition */
6556 case 'c':
6557 /* Check to see if argument to %c is really a constant
6558 and not a condition code which needs to be reversed. */
6559 if (!COMPARISON_P (x))
6560 {
6561 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6562 return;
6563 }
6564 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6565 return;
6566 case 'f':
6567 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6568 if (ASSEMBLER_DIALECT == ASM_ATT)
6569 putc ('.', file);
6570 #endif
6571 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6572 return;
6573
6574 case 'H':
6575 /* It doesn't actually matter what mode we use here, as we're
6576 only going to use this for printing. */
6577 x = adjust_address_nv (x, DImode, 8);
6578 break;
6579
6580 case '+':
6581 {
6582 rtx x;
6583
6584 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6585 return;
6586
6587 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6588 if (x)
6589 {
6590 int pred_val = INTVAL (XEXP (x, 0));
6591
6592 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6593 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6594 {
6595 int taken = pred_val > REG_BR_PROB_BASE / 2;
6596 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6597
6598 /* Emit hints only in the case default branch prediction
6599 heuristics would fail. */
6600 if (taken != cputaken)
6601 {
6602 /* We use 3e (DS) prefix for taken branches and
6603 2e (CS) prefix for not taken branches. */
6604 if (taken)
6605 fputs ("ds ; ", file);
6606 else
6607 fputs ("cs ; ", file);
6608 }
6609 }
6610 }
6611 return;
6612 }
6613 default:
6614 output_operand_lossage ("invalid operand code '%c'", code);
6615 }
6616 }
6617
6618 if (GET_CODE (x) == REG)
6619 print_reg (x, code, file);
6620
6621 else if (GET_CODE (x) == MEM)
6622 {
6623 /* No `byte ptr' prefix for call instructions. */
6624 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6625 {
6626 const char * size;
6627 switch (GET_MODE_SIZE (GET_MODE (x)))
6628 {
6629 case 1: size = "BYTE"; break;
6630 case 2: size = "WORD"; break;
6631 case 4: size = "DWORD"; break;
6632 case 8: size = "QWORD"; break;
6633 case 12: size = "XWORD"; break;
6634 case 16: size = "XMMWORD"; break;
6635 default:
6636 abort ();
6637 }
6638
6639 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6640 if (code == 'b')
6641 size = "BYTE";
6642 else if (code == 'w')
6643 size = "WORD";
6644 else if (code == 'k')
6645 size = "DWORD";
6646
6647 fputs (size, file);
6648 fputs (" PTR ", file);
6649 }
6650
6651 x = XEXP (x, 0);
6652 /* Avoid (%rip) for call operands. */
6653 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6654 && GET_CODE (x) != CONST_INT)
6655 output_addr_const (file, x);
6656 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6657 output_operand_lossage ("invalid constraints for operand");
6658 else
6659 output_address (x);
6660 }
6661
6662 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6663 {
6664 REAL_VALUE_TYPE r;
6665 long l;
6666
6667 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6668 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6669
6670 if (ASSEMBLER_DIALECT == ASM_ATT)
6671 putc ('$', file);
6672 fprintf (file, "0x%08lx", l);
6673 }
6674
6675 /* These float cases don't actually occur as immediate operands. */
6676 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6677 {
6678 char dstr[30];
6679
6680 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6681 fprintf (file, "%s", dstr);
6682 }
6683
6684 else if (GET_CODE (x) == CONST_DOUBLE
6685 && GET_MODE (x) == XFmode)
6686 {
6687 char dstr[30];
6688
6689 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6690 fprintf (file, "%s", dstr);
6691 }
6692
6693 else
6694 {
6695 if (code != 'P')
6696 {
6697 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6698 {
6699 if (ASSEMBLER_DIALECT == ASM_ATT)
6700 putc ('$', file);
6701 }
6702 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6703 || GET_CODE (x) == LABEL_REF)
6704 {
6705 if (ASSEMBLER_DIALECT == ASM_ATT)
6706 putc ('$', file);
6707 else
6708 fputs ("OFFSET FLAT:", file);
6709 }
6710 }
6711 if (GET_CODE (x) == CONST_INT)
6712 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6713 else if (flag_pic)
6714 output_pic_addr_const (file, x, code);
6715 else
6716 output_addr_const (file, x);
6717 }
6718 }
6719 \f
6720 /* Print a memory operand whose address is ADDR. */
6721
6722 void
6723 print_operand_address (FILE *file, rtx addr)
6724 {
6725 struct ix86_address parts;
6726 rtx base, index, disp;
6727 int scale;
6728
6729 if (! ix86_decompose_address (addr, &parts))
6730 abort ();
6731
6732 base = parts.base;
6733 index = parts.index;
6734 disp = parts.disp;
6735 scale = parts.scale;
6736
6737 switch (parts.seg)
6738 {
6739 case SEG_DEFAULT:
6740 break;
6741 case SEG_FS:
6742 case SEG_GS:
6743 if (USER_LABEL_PREFIX[0] == 0)
6744 putc ('%', file);
6745 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6746 break;
6747 default:
6748 abort ();
6749 }
6750
6751 if (!base && !index)
6752 {
6753 /* Displacement only requires special attention. */
6754
6755 if (GET_CODE (disp) == CONST_INT)
6756 {
6757 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6758 {
6759 if (USER_LABEL_PREFIX[0] == 0)
6760 putc ('%', file);
6761 fputs ("ds:", file);
6762 }
6763 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6764 }
6765 else if (flag_pic)
6766 output_pic_addr_const (file, disp, 0);
6767 else
6768 output_addr_const (file, disp);
6769
6770 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6771 if (TARGET_64BIT
6772 && ((GET_CODE (disp) == SYMBOL_REF
6773 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6774 || GET_CODE (disp) == LABEL_REF
6775 || (GET_CODE (disp) == CONST
6776 && GET_CODE (XEXP (disp, 0)) == PLUS
6777 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
6778 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
6779 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
6780 fputs ("(%rip)", file);
6781 }
6782 else
6783 {
6784 if (ASSEMBLER_DIALECT == ASM_ATT)
6785 {
6786 if (disp)
6787 {
6788 if (flag_pic)
6789 output_pic_addr_const (file, disp, 0);
6790 else if (GET_CODE (disp) == LABEL_REF)
6791 output_asm_label (disp);
6792 else
6793 output_addr_const (file, disp);
6794 }
6795
6796 putc ('(', file);
6797 if (base)
6798 print_reg (base, 0, file);
6799 if (index)
6800 {
6801 putc (',', file);
6802 print_reg (index, 0, file);
6803 if (scale != 1)
6804 fprintf (file, ",%d", scale);
6805 }
6806 putc (')', file);
6807 }
6808 else
6809 {
6810 rtx offset = NULL_RTX;
6811
6812 if (disp)
6813 {
6814 /* Pull out the offset of a symbol; print any symbol itself. */
6815 if (GET_CODE (disp) == CONST
6816 && GET_CODE (XEXP (disp, 0)) == PLUS
6817 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
6818 {
6819 offset = XEXP (XEXP (disp, 0), 1);
6820 disp = gen_rtx_CONST (VOIDmode,
6821 XEXP (XEXP (disp, 0), 0));
6822 }
6823
6824 if (flag_pic)
6825 output_pic_addr_const (file, disp, 0);
6826 else if (GET_CODE (disp) == LABEL_REF)
6827 output_asm_label (disp);
6828 else if (GET_CODE (disp) == CONST_INT)
6829 offset = disp;
6830 else
6831 output_addr_const (file, disp);
6832 }
6833
6834 putc ('[', file);
6835 if (base)
6836 {
6837 print_reg (base, 0, file);
6838 if (offset)
6839 {
6840 if (INTVAL (offset) >= 0)
6841 putc ('+', file);
6842 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6843 }
6844 }
6845 else if (offset)
6846 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6847 else
6848 putc ('0', file);
6849
6850 if (index)
6851 {
6852 putc ('+', file);
6853 print_reg (index, 0, file);
6854 if (scale != 1)
6855 fprintf (file, "*%d", scale);
6856 }
6857 putc (']', file);
6858 }
6859 }
6860 }
6861
6862 bool
6863 output_addr_const_extra (FILE *file, rtx x)
6864 {
6865 rtx op;
6866
6867 if (GET_CODE (x) != UNSPEC)
6868 return false;
6869
6870 op = XVECEXP (x, 0, 0);
6871 switch (XINT (x, 1))
6872 {
6873 case UNSPEC_GOTTPOFF:
6874 output_addr_const (file, op);
6875 /* FIXME: This might be @TPOFF in Sun ld. */
6876 fputs ("@GOTTPOFF", file);
6877 break;
6878 case UNSPEC_TPOFF:
6879 output_addr_const (file, op);
6880 fputs ("@TPOFF", file);
6881 break;
6882 case UNSPEC_NTPOFF:
6883 output_addr_const (file, op);
6884 if (TARGET_64BIT)
6885 fputs ("@TPOFF", file);
6886 else
6887 fputs ("@NTPOFF", file);
6888 break;
6889 case UNSPEC_DTPOFF:
6890 output_addr_const (file, op);
6891 fputs ("@DTPOFF", file);
6892 break;
6893 case UNSPEC_GOTNTPOFF:
6894 output_addr_const (file, op);
6895 if (TARGET_64BIT)
6896 fputs ("@GOTTPOFF(%rip)", file);
6897 else
6898 fputs ("@GOTNTPOFF", file);
6899 break;
6900 case UNSPEC_INDNTPOFF:
6901 output_addr_const (file, op);
6902 fputs ("@INDNTPOFF", file);
6903 break;
6904
6905 default:
6906 return false;
6907 }
6908
6909 return true;
6910 }
6911 \f
6912 /* Split one or more DImode RTL references into pairs of SImode
6913 references. The RTL can be REG, offsettable MEM, integer constant, or
6914 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6915 split and "num" is its length. lo_half and hi_half are output arrays
6916 that parallel "operands". */
6917
6918 void
6919 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6920 {
6921 while (num--)
6922 {
6923 rtx op = operands[num];
6924
6925 /* simplify_subreg refuse to split volatile memory addresses,
6926 but we still have to handle it. */
6927 if (GET_CODE (op) == MEM)
6928 {
6929 lo_half[num] = adjust_address (op, SImode, 0);
6930 hi_half[num] = adjust_address (op, SImode, 4);
6931 }
6932 else
6933 {
6934 lo_half[num] = simplify_gen_subreg (SImode, op,
6935 GET_MODE (op) == VOIDmode
6936 ? DImode : GET_MODE (op), 0);
6937 hi_half[num] = simplify_gen_subreg (SImode, op,
6938 GET_MODE (op) == VOIDmode
6939 ? DImode : GET_MODE (op), 4);
6940 }
6941 }
6942 }
6943 /* Split one or more TImode RTL references into pairs of SImode
6944 references. The RTL can be REG, offsettable MEM, integer constant, or
6945 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6946 split and "num" is its length. lo_half and hi_half are output arrays
6947 that parallel "operands". */
6948
6949 void
6950 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6951 {
6952 while (num--)
6953 {
6954 rtx op = operands[num];
6955
6956 /* simplify_subreg refuse to split volatile memory addresses, but we
6957 still have to handle it. */
6958 if (GET_CODE (op) == MEM)
6959 {
6960 lo_half[num] = adjust_address (op, DImode, 0);
6961 hi_half[num] = adjust_address (op, DImode, 8);
6962 }
6963 else
6964 {
6965 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
6966 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
6967 }
6968 }
6969 }
6970 \f
6971 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6972 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6973 is the expression of the binary operation. The output may either be
6974 emitted here, or returned to the caller, like all output_* functions.
6975
6976 There is no guarantee that the operands are the same mode, as they
6977 might be within FLOAT or FLOAT_EXTEND expressions. */
6978
6979 #ifndef SYSV386_COMPAT
6980 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6981 wants to fix the assemblers because that causes incompatibility
6982 with gcc. No-one wants to fix gcc because that causes
6983 incompatibility with assemblers... You can use the option of
6984 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6985 #define SYSV386_COMPAT 1
6986 #endif
6987
6988 const char *
6989 output_387_binary_op (rtx insn, rtx *operands)
6990 {
6991 static char buf[30];
6992 const char *p;
6993 const char *ssep;
6994 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
6995
6996 #ifdef ENABLE_CHECKING
6997 /* Even if we do not want to check the inputs, this documents input
6998 constraints. Which helps in understanding the following code. */
6999 if (STACK_REG_P (operands[0])
7000 && ((REG_P (operands[1])
7001 && REGNO (operands[0]) == REGNO (operands[1])
7002 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7003 || (REG_P (operands[2])
7004 && REGNO (operands[0]) == REGNO (operands[2])
7005 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7006 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7007 ; /* ok */
7008 else if (!is_sse)
7009 abort ();
7010 #endif
7011
7012 switch (GET_CODE (operands[3]))
7013 {
7014 case PLUS:
7015 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7016 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7017 p = "fiadd";
7018 else
7019 p = "fadd";
7020 ssep = "add";
7021 break;
7022
7023 case MINUS:
7024 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7025 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7026 p = "fisub";
7027 else
7028 p = "fsub";
7029 ssep = "sub";
7030 break;
7031
7032 case MULT:
7033 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7034 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7035 p = "fimul";
7036 else
7037 p = "fmul";
7038 ssep = "mul";
7039 break;
7040
7041 case DIV:
7042 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7043 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7044 p = "fidiv";
7045 else
7046 p = "fdiv";
7047 ssep = "div";
7048 break;
7049
7050 default:
7051 abort ();
7052 }
7053
7054 if (is_sse)
7055 {
7056 strcpy (buf, ssep);
7057 if (GET_MODE (operands[0]) == SFmode)
7058 strcat (buf, "ss\t{%2, %0|%0, %2}");
7059 else
7060 strcat (buf, "sd\t{%2, %0|%0, %2}");
7061 return buf;
7062 }
7063 strcpy (buf, p);
7064
7065 switch (GET_CODE (operands[3]))
7066 {
7067 case MULT:
7068 case PLUS:
7069 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7070 {
7071 rtx temp = operands[2];
7072 operands[2] = operands[1];
7073 operands[1] = temp;
7074 }
7075
7076 /* know operands[0] == operands[1]. */
7077
7078 if (GET_CODE (operands[2]) == MEM)
7079 {
7080 p = "%z2\t%2";
7081 break;
7082 }
7083
7084 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7085 {
7086 if (STACK_TOP_P (operands[0]))
7087 /* How is it that we are storing to a dead operand[2]?
7088 Well, presumably operands[1] is dead too. We can't
7089 store the result to st(0) as st(0) gets popped on this
7090 instruction. Instead store to operands[2] (which I
7091 think has to be st(1)). st(1) will be popped later.
7092 gcc <= 2.8.1 didn't have this check and generated
7093 assembly code that the Unixware assembler rejected. */
7094 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7095 else
7096 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7097 break;
7098 }
7099
7100 if (STACK_TOP_P (operands[0]))
7101 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7102 else
7103 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7104 break;
7105
7106 case MINUS:
7107 case DIV:
7108 if (GET_CODE (operands[1]) == MEM)
7109 {
7110 p = "r%z1\t%1";
7111 break;
7112 }
7113
7114 if (GET_CODE (operands[2]) == MEM)
7115 {
7116 p = "%z2\t%2";
7117 break;
7118 }
7119
7120 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7121 {
7122 #if SYSV386_COMPAT
7123 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7124 derived assemblers, confusingly reverse the direction of
7125 the operation for fsub{r} and fdiv{r} when the
7126 destination register is not st(0). The Intel assembler
7127 doesn't have this brain damage. Read !SYSV386_COMPAT to
7128 figure out what the hardware really does. */
7129 if (STACK_TOP_P (operands[0]))
7130 p = "{p\t%0, %2|rp\t%2, %0}";
7131 else
7132 p = "{rp\t%2, %0|p\t%0, %2}";
7133 #else
7134 if (STACK_TOP_P (operands[0]))
7135 /* As above for fmul/fadd, we can't store to st(0). */
7136 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7137 else
7138 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7139 #endif
7140 break;
7141 }
7142
7143 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7144 {
7145 #if SYSV386_COMPAT
7146 if (STACK_TOP_P (operands[0]))
7147 p = "{rp\t%0, %1|p\t%1, %0}";
7148 else
7149 p = "{p\t%1, %0|rp\t%0, %1}";
7150 #else
7151 if (STACK_TOP_P (operands[0]))
7152 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7153 else
7154 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7155 #endif
7156 break;
7157 }
7158
7159 if (STACK_TOP_P (operands[0]))
7160 {
7161 if (STACK_TOP_P (operands[1]))
7162 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7163 else
7164 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7165 break;
7166 }
7167 else if (STACK_TOP_P (operands[1]))
7168 {
7169 #if SYSV386_COMPAT
7170 p = "{\t%1, %0|r\t%0, %1}";
7171 #else
7172 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7173 #endif
7174 }
7175 else
7176 {
7177 #if SYSV386_COMPAT
7178 p = "{r\t%2, %0|\t%0, %2}";
7179 #else
7180 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7181 #endif
7182 }
7183 break;
7184
7185 default:
7186 abort ();
7187 }
7188
7189 strcat (buf, p);
7190 return buf;
7191 }
7192
7193 /* Output code to initialize control word copies used by trunc?f?i and
7194 rounding patterns. CURRENT_MODE is set to current control word,
7195 while NEW_MODE is set to new control word. */
7196
7197 void
7198 emit_i387_cw_initialization (rtx current_mode, rtx new_mode, int mode)
7199 {
7200 rtx reg = gen_reg_rtx (HImode);
7201
7202 emit_insn (gen_x86_fnstcw_1 (current_mode));
7203 emit_move_insn (reg, current_mode);
7204
7205 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
7206 && !TARGET_64BIT)
7207 {
7208 switch (mode)
7209 {
7210 case I387_CW_FLOOR:
7211 /* round down toward -oo */
7212 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7213 break;
7214
7215 case I387_CW_CEIL:
7216 /* round up toward +oo */
7217 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7218 break;
7219
7220 case I387_CW_TRUNC:
7221 /* round toward zero (truncate) */
7222 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7223 break;
7224
7225 case I387_CW_MASK_PM:
7226 /* mask precision exception for nearbyint() */
7227 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7228 break;
7229
7230 default:
7231 abort();
7232 }
7233 }
7234 else
7235 {
7236 switch (mode)
7237 {
7238 case I387_CW_FLOOR:
7239 /* round down toward -oo */
7240 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7241 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7242 break;
7243
7244 case I387_CW_CEIL:
7245 /* round up toward +oo */
7246 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7247 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7248 break;
7249
7250 case I387_CW_TRUNC:
7251 /* round toward zero (truncate) */
7252 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7253 break;
7254
7255 case I387_CW_MASK_PM:
7256 /* mask precision exception for nearbyint() */
7257 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7258 break;
7259
7260 default:
7261 abort();
7262 }
7263 }
7264
7265 emit_move_insn (new_mode, reg);
7266 }
7267
7268 /* Output code for INSN to convert a float to a signed int. OPERANDS
7269 are the insn operands. The output may be [HSD]Imode and the input
7270 operand may be [SDX]Fmode. */
7271
7272 const char *
7273 output_fix_trunc (rtx insn, rtx *operands)
7274 {
7275 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7276 int dimode_p = GET_MODE (operands[0]) == DImode;
7277
7278 /* Jump through a hoop or two for DImode, since the hardware has no
7279 non-popping instruction. We used to do this a different way, but
7280 that was somewhat fragile and broke with post-reload splitters. */
7281 if (dimode_p && !stack_top_dies)
7282 output_asm_insn ("fld\t%y1", operands);
7283
7284 if (!STACK_TOP_P (operands[1]))
7285 abort ();
7286
7287 if (GET_CODE (operands[0]) != MEM)
7288 abort ();
7289
7290 output_asm_insn ("fldcw\t%3", operands);
7291 if (stack_top_dies || dimode_p)
7292 output_asm_insn ("fistp%z0\t%0", operands);
7293 else
7294 output_asm_insn ("fist%z0\t%0", operands);
7295 output_asm_insn ("fldcw\t%2", operands);
7296
7297 return "";
7298 }
7299
7300 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7301 should be used. UNORDERED_P is true when fucom should be used. */
7302
7303 const char *
7304 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7305 {
7306 int stack_top_dies;
7307 rtx cmp_op0, cmp_op1;
7308 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7309
7310 if (eflags_p)
7311 {
7312 cmp_op0 = operands[0];
7313 cmp_op1 = operands[1];
7314 }
7315 else
7316 {
7317 cmp_op0 = operands[1];
7318 cmp_op1 = operands[2];
7319 }
7320
7321 if (is_sse)
7322 {
7323 if (GET_MODE (operands[0]) == SFmode)
7324 if (unordered_p)
7325 return "ucomiss\t{%1, %0|%0, %1}";
7326 else
7327 return "comiss\t{%1, %0|%0, %1}";
7328 else
7329 if (unordered_p)
7330 return "ucomisd\t{%1, %0|%0, %1}";
7331 else
7332 return "comisd\t{%1, %0|%0, %1}";
7333 }
7334
7335 if (! STACK_TOP_P (cmp_op0))
7336 abort ();
7337
7338 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7339
7340 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7341 {
7342 if (stack_top_dies)
7343 {
7344 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7345 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7346 }
7347 else
7348 return "ftst\n\tfnstsw\t%0";
7349 }
7350
7351 if (STACK_REG_P (cmp_op1)
7352 && stack_top_dies
7353 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7354 && REGNO (cmp_op1) != FIRST_STACK_REG)
7355 {
7356 /* If both the top of the 387 stack dies, and the other operand
7357 is also a stack register that dies, then this must be a
7358 `fcompp' float compare */
7359
7360 if (eflags_p)
7361 {
7362 /* There is no double popping fcomi variant. Fortunately,
7363 eflags is immune from the fstp's cc clobbering. */
7364 if (unordered_p)
7365 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7366 else
7367 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7368 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7369 }
7370 else
7371 {
7372 if (unordered_p)
7373 return "fucompp\n\tfnstsw\t%0";
7374 else
7375 return "fcompp\n\tfnstsw\t%0";
7376 }
7377 }
7378 else
7379 {
7380 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7381
7382 static const char * const alt[16] =
7383 {
7384 "fcom%z2\t%y2\n\tfnstsw\t%0",
7385 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7386 "fucom%z2\t%y2\n\tfnstsw\t%0",
7387 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7388
7389 "ficom%z2\t%y2\n\tfnstsw\t%0",
7390 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7391 NULL,
7392 NULL,
7393
7394 "fcomi\t{%y1, %0|%0, %y1}",
7395 "fcomip\t{%y1, %0|%0, %y1}",
7396 "fucomi\t{%y1, %0|%0, %y1}",
7397 "fucomip\t{%y1, %0|%0, %y1}",
7398
7399 NULL,
7400 NULL,
7401 NULL,
7402 NULL
7403 };
7404
7405 int mask;
7406 const char *ret;
7407
7408 mask = eflags_p << 3;
7409 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
7410 mask |= unordered_p << 1;
7411 mask |= stack_top_dies;
7412
7413 if (mask >= 16)
7414 abort ();
7415 ret = alt[mask];
7416 if (ret == NULL)
7417 abort ();
7418
7419 return ret;
7420 }
7421 }
7422
7423 void
7424 ix86_output_addr_vec_elt (FILE *file, int value)
7425 {
7426 const char *directive = ASM_LONG;
7427
7428 if (TARGET_64BIT)
7429 {
7430 #ifdef ASM_QUAD
7431 directive = ASM_QUAD;
7432 #else
7433 abort ();
7434 #endif
7435 }
7436
7437 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7438 }
7439
7440 void
7441 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7442 {
7443 if (TARGET_64BIT)
7444 fprintf (file, "%s%s%d-%s%d\n",
7445 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7446 else if (HAVE_AS_GOTOFF_IN_DATA)
7447 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7448 #if TARGET_MACHO
7449 else if (TARGET_MACHO)
7450 {
7451 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7452 machopic_output_function_base_name (file);
7453 fprintf(file, "\n");
7454 }
7455 #endif
7456 else
7457 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7458 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7459 }
7460 \f
7461 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7462 for the target. */
7463
7464 void
7465 ix86_expand_clear (rtx dest)
7466 {
7467 rtx tmp;
7468
7469 /* We play register width games, which are only valid after reload. */
7470 if (!reload_completed)
7471 abort ();
7472
7473 /* Avoid HImode and its attendant prefix byte. */
7474 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7475 dest = gen_rtx_REG (SImode, REGNO (dest));
7476
7477 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7478
7479 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7480 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7481 {
7482 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7483 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7484 }
7485
7486 emit_insn (tmp);
7487 }
7488
7489 /* X is an unchanging MEM. If it is a constant pool reference, return
7490 the constant pool rtx, else NULL. */
7491
7492 rtx
7493 maybe_get_pool_constant (rtx x)
7494 {
7495 x = ix86_delegitimize_address (XEXP (x, 0));
7496
7497 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7498 return get_pool_constant (x);
7499
7500 return NULL_RTX;
7501 }
7502
7503 void
7504 ix86_expand_move (enum machine_mode mode, rtx operands[])
7505 {
7506 int strict = (reload_in_progress || reload_completed);
7507 rtx op0, op1;
7508 enum tls_model model;
7509
7510 op0 = operands[0];
7511 op1 = operands[1];
7512
7513 if (GET_CODE (op1) == SYMBOL_REF)
7514 {
7515 model = SYMBOL_REF_TLS_MODEL (op1);
7516 if (model)
7517 {
7518 op1 = legitimize_tls_address (op1, model, true);
7519 op1 = force_operand (op1, op0);
7520 if (op1 == op0)
7521 return;
7522 }
7523 }
7524 else if (GET_CODE (op1) == CONST
7525 && GET_CODE (XEXP (op1, 0)) == PLUS
7526 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
7527 {
7528 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
7529 if (model)
7530 {
7531 rtx addend = XEXP (XEXP (op1, 0), 1);
7532 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
7533 op1 = force_operand (op1, NULL);
7534 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
7535 op0, 1, OPTAB_DIRECT);
7536 if (op1 == op0)
7537 return;
7538 }
7539 }
7540
7541 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7542 {
7543 #if TARGET_MACHO
7544 if (MACHOPIC_PURE)
7545 {
7546 rtx temp = ((reload_in_progress
7547 || ((op0 && GET_CODE (op0) == REG)
7548 && mode == Pmode))
7549 ? op0 : gen_reg_rtx (Pmode));
7550 op1 = machopic_indirect_data_reference (op1, temp);
7551 op1 = machopic_legitimize_pic_address (op1, mode,
7552 temp == op1 ? 0 : temp);
7553 }
7554 else if (MACHOPIC_INDIRECT)
7555 op1 = machopic_indirect_data_reference (op1, 0);
7556 if (op0 == op1)
7557 return;
7558 #else
7559 if (GET_CODE (op0) == MEM)
7560 op1 = force_reg (Pmode, op1);
7561 else
7562 op1 = legitimize_address (op1, op1, Pmode);
7563 #endif /* TARGET_MACHO */
7564 }
7565 else
7566 {
7567 if (GET_CODE (op0) == MEM
7568 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7569 || !push_operand (op0, mode))
7570 && GET_CODE (op1) == MEM)
7571 op1 = force_reg (mode, op1);
7572
7573 if (push_operand (op0, mode)
7574 && ! general_no_elim_operand (op1, mode))
7575 op1 = copy_to_mode_reg (mode, op1);
7576
7577 /* Force large constants in 64bit compilation into register
7578 to get them CSEed. */
7579 if (TARGET_64BIT && mode == DImode
7580 && immediate_operand (op1, mode)
7581 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7582 && !register_operand (op0, mode)
7583 && optimize && !reload_completed && !reload_in_progress)
7584 op1 = copy_to_mode_reg (mode, op1);
7585
7586 if (FLOAT_MODE_P (mode))
7587 {
7588 /* If we are loading a floating point constant to a register,
7589 force the value to memory now, since we'll get better code
7590 out the back end. */
7591
7592 if (strict)
7593 ;
7594 else if (GET_CODE (op1) == CONST_DOUBLE)
7595 {
7596 op1 = validize_mem (force_const_mem (mode, op1));
7597 if (!register_operand (op0, mode))
7598 {
7599 rtx temp = gen_reg_rtx (mode);
7600 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7601 emit_move_insn (op0, temp);
7602 return;
7603 }
7604 }
7605 }
7606 }
7607
7608 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7609 }
7610
7611 void
7612 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7613 {
7614 rtx op0 = operands[0], op1 = operands[1];
7615
7616 /* Force constants other than zero into memory. We do not know how
7617 the instructions used to build constants modify the upper 64 bits
7618 of the register, once we have that information we may be able
7619 to handle some of them more efficiently. */
7620 if ((reload_in_progress | reload_completed) == 0
7621 && register_operand (op0, mode)
7622 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
7623 op1 = validize_mem (force_const_mem (mode, op1));
7624
7625 /* Make operand1 a register if it isn't already. */
7626 if (!no_new_pseudos
7627 && !register_operand (op0, mode)
7628 && !register_operand (op1, mode))
7629 {
7630 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
7631 return;
7632 }
7633
7634 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7635 }
7636
7637 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
7638 straight to ix86_expand_vector_move. */
7639
7640 void
7641 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
7642 {
7643 rtx op0, op1, m;
7644
7645 op0 = operands[0];
7646 op1 = operands[1];
7647
7648 if (MEM_P (op1))
7649 {
7650 /* If we're optimizing for size, movups is the smallest. */
7651 if (optimize_size)
7652 {
7653 op0 = gen_lowpart (V4SFmode, op0);
7654 op1 = gen_lowpart (V4SFmode, op1);
7655 emit_insn (gen_sse_movups (op0, op1));
7656 return;
7657 }
7658
7659 /* ??? If we have typed data, then it would appear that using
7660 movdqu is the only way to get unaligned data loaded with
7661 integer type. */
7662 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7663 {
7664 op0 = gen_lowpart (V16QImode, op0);
7665 op1 = gen_lowpart (V16QImode, op1);
7666 emit_insn (gen_sse2_movdqu (op0, op1));
7667 return;
7668 }
7669
7670 if (TARGET_SSE2 && mode == V2DFmode)
7671 {
7672 rtx zero;
7673
7674 /* When SSE registers are split into halves, we can avoid
7675 writing to the top half twice. */
7676 if (TARGET_SSE_SPLIT_REGS)
7677 {
7678 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7679 zero = op0;
7680 }
7681 else
7682 {
7683 /* ??? Not sure about the best option for the Intel chips.
7684 The following would seem to satisfy; the register is
7685 entirely cleared, breaking the dependency chain. We
7686 then store to the upper half, with a dependency depth
7687 of one. A rumor has it that Intel recommends two movsd
7688 followed by an unpacklpd, but this is unconfirmed. And
7689 given that the dependency depth of the unpacklpd would
7690 still be one, I'm not sure why this would be better. */
7691 zero = CONST0_RTX (V2DFmode);
7692 }
7693
7694 m = adjust_address (op1, DFmode, 0);
7695 emit_insn (gen_sse2_loadlpd (op0, zero, m));
7696 m = adjust_address (op1, DFmode, 8);
7697 emit_insn (gen_sse2_loadhpd (op0, op0, m));
7698 }
7699 else
7700 {
7701 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
7702 emit_move_insn (op0, CONST0_RTX (mode));
7703 else
7704 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7705
7706 m = adjust_address (op1, V2SFmode, 0);
7707 emit_insn (gen_sse_loadlps (op0, op0, m));
7708 m = adjust_address (op1, V2SFmode, 8);
7709 emit_insn (gen_sse_loadhps (op0, op0, m));
7710 }
7711 }
7712 else if (MEM_P (op0))
7713 {
7714 /* If we're optimizing for size, movups is the smallest. */
7715 if (optimize_size)
7716 {
7717 op0 = gen_lowpart (V4SFmode, op0);
7718 op1 = gen_lowpart (V4SFmode, op1);
7719 emit_insn (gen_sse_movups (op0, op1));
7720 return;
7721 }
7722
7723 /* ??? Similar to above, only less clear because of quote
7724 typeless stores unquote. */
7725 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
7726 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7727 {
7728 op0 = gen_lowpart (V16QImode, op0);
7729 op1 = gen_lowpart (V16QImode, op1);
7730 emit_insn (gen_sse2_movdqu (op0, op1));
7731 return;
7732 }
7733
7734 if (TARGET_SSE2 && mode == V2DFmode)
7735 {
7736 m = adjust_address (op0, DFmode, 0);
7737 emit_insn (gen_sse2_storelpd (m, op1));
7738 m = adjust_address (op0, DFmode, 8);
7739 emit_insn (gen_sse2_storehpd (m, op1));
7740 }
7741 else
7742 {
7743 if (mode != V4SFmode)
7744 op1 = gen_lowpart (V4SFmode, op1);
7745 m = adjust_address (op0, V2SFmode, 0);
7746 emit_insn (gen_sse_storelps (m, op1));
7747 m = adjust_address (op0, V2SFmode, 8);
7748 emit_insn (gen_sse_storehps (m, op1));
7749 }
7750 }
7751 else
7752 gcc_unreachable ();
7753 }
7754
7755
7756 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
7757 destination to use for the operation. If different from the true
7758 destination in operands[0], a copy operation will be required. */
7759
7760 rtx
7761 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
7762 rtx operands[])
7763 {
7764 int matching_memory;
7765 rtx src1, src2, dst;
7766
7767 dst = operands[0];
7768 src1 = operands[1];
7769 src2 = operands[2];
7770
7771 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7772 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7773 && (rtx_equal_p (dst, src2)
7774 || immediate_operand (src1, mode)))
7775 {
7776 rtx temp = src1;
7777 src1 = src2;
7778 src2 = temp;
7779 }
7780
7781 /* If the destination is memory, and we do not have matching source
7782 operands, do things in registers. */
7783 matching_memory = 0;
7784 if (GET_CODE (dst) == MEM)
7785 {
7786 if (rtx_equal_p (dst, src1))
7787 matching_memory = 1;
7788 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7789 && rtx_equal_p (dst, src2))
7790 matching_memory = 2;
7791 else
7792 dst = gen_reg_rtx (mode);
7793 }
7794
7795 /* Both source operands cannot be in memory. */
7796 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
7797 {
7798 if (matching_memory != 2)
7799 src2 = force_reg (mode, src2);
7800 else
7801 src1 = force_reg (mode, src1);
7802 }
7803
7804 /* If the operation is not commutable, source 1 cannot be a constant
7805 or non-matching memory. */
7806 if ((CONSTANT_P (src1)
7807 || (!matching_memory && GET_CODE (src1) == MEM))
7808 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7809 src1 = force_reg (mode, src1);
7810
7811 /* If optimizing, copy to regs to improve CSE */
7812 if (optimize && ! no_new_pseudos)
7813 {
7814 if (GET_CODE (dst) == MEM)
7815 dst = gen_reg_rtx (mode);
7816 if (GET_CODE (src1) == MEM)
7817 src1 = force_reg (mode, src1);
7818 if (GET_CODE (src2) == MEM)
7819 src2 = force_reg (mode, src2);
7820 }
7821
7822 src1 = operands[1] = src1;
7823 src2 = operands[2] = src2;
7824 return dst;
7825 }
7826
7827 /* Similarly, but assume that the destination has already been
7828 set up properly. */
7829
7830 void
7831 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
7832 enum machine_mode mode, rtx operands[])
7833 {
7834 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
7835 gcc_assert (dst == operands[0]);
7836 }
7837
7838 /* Attempt to expand a binary operator. Make the expansion closer to the
7839 actual machine, then just general_operand, which will allow 3 separate
7840 memory references (one output, two input) in a single insn. */
7841
7842 void
7843 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
7844 rtx operands[])
7845 {
7846 rtx src1, src2, dst, op, clob;
7847
7848 dst = ix86_fixup_binary_operands (code, mode, operands);
7849 src1 = operands[1];
7850 src2 = operands[2];
7851
7852 /* Emit the instruction. */
7853
7854 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
7855 if (reload_in_progress)
7856 {
7857 /* Reload doesn't know about the flags register, and doesn't know that
7858 it doesn't want to clobber it. We can only do this with PLUS. */
7859 if (code != PLUS)
7860 abort ();
7861 emit_insn (op);
7862 }
7863 else
7864 {
7865 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7866 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7867 }
7868
7869 /* Fix up the destination if needed. */
7870 if (dst != operands[0])
7871 emit_move_insn (operands[0], dst);
7872 }
7873
7874 /* Return TRUE or FALSE depending on whether the binary operator meets the
7875 appropriate constraints. */
7876
7877 int
7878 ix86_binary_operator_ok (enum rtx_code code,
7879 enum machine_mode mode ATTRIBUTE_UNUSED,
7880 rtx operands[3])
7881 {
7882 /* Both source operands cannot be in memory. */
7883 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
7884 return 0;
7885 /* If the operation is not commutable, source 1 cannot be a constant. */
7886 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7887 return 0;
7888 /* If the destination is memory, we must have a matching source operand. */
7889 if (GET_CODE (operands[0]) == MEM
7890 && ! (rtx_equal_p (operands[0], operands[1])
7891 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7892 && rtx_equal_p (operands[0], operands[2]))))
7893 return 0;
7894 /* If the operation is not commutable and the source 1 is memory, we must
7895 have a matching destination. */
7896 if (GET_CODE (operands[1]) == MEM
7897 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
7898 && ! rtx_equal_p (operands[0], operands[1]))
7899 return 0;
7900 return 1;
7901 }
7902
7903 /* Attempt to expand a unary operator. Make the expansion closer to the
7904 actual machine, then just general_operand, which will allow 2 separate
7905 memory references (one output, one input) in a single insn. */
7906
7907 void
7908 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
7909 rtx operands[])
7910 {
7911 int matching_memory;
7912 rtx src, dst, op, clob;
7913
7914 dst = operands[0];
7915 src = operands[1];
7916
7917 /* If the destination is memory, and we do not have matching source
7918 operands, do things in registers. */
7919 matching_memory = 0;
7920 if (MEM_P (dst))
7921 {
7922 if (rtx_equal_p (dst, src))
7923 matching_memory = 1;
7924 else
7925 dst = gen_reg_rtx (mode);
7926 }
7927
7928 /* When source operand is memory, destination must match. */
7929 if (MEM_P (src) && !matching_memory)
7930 src = force_reg (mode, src);
7931
7932 /* If optimizing, copy to regs to improve CSE. */
7933 if (optimize && ! no_new_pseudos)
7934 {
7935 if (GET_CODE (dst) == MEM)
7936 dst = gen_reg_rtx (mode);
7937 if (GET_CODE (src) == MEM)
7938 src = force_reg (mode, src);
7939 }
7940
7941 /* Emit the instruction. */
7942
7943 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
7944 if (reload_in_progress || code == NOT)
7945 {
7946 /* Reload doesn't know about the flags register, and doesn't know that
7947 it doesn't want to clobber it. */
7948 if (code != NOT)
7949 abort ();
7950 emit_insn (op);
7951 }
7952 else
7953 {
7954 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7955 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7956 }
7957
7958 /* Fix up the destination if needed. */
7959 if (dst != operands[0])
7960 emit_move_insn (operands[0], dst);
7961 }
7962
7963 /* Return TRUE or FALSE depending on whether the unary operator meets the
7964 appropriate constraints. */
7965
7966 int
7967 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
7968 enum machine_mode mode ATTRIBUTE_UNUSED,
7969 rtx operands[2] ATTRIBUTE_UNUSED)
7970 {
7971 /* If one of operands is memory, source and destination must match. */
7972 if ((GET_CODE (operands[0]) == MEM
7973 || GET_CODE (operands[1]) == MEM)
7974 && ! rtx_equal_p (operands[0], operands[1]))
7975 return FALSE;
7976 return TRUE;
7977 }
7978
7979 /* Generate code for floating point ABS or NEG. */
7980
7981 void
7982 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
7983 rtx operands[])
7984 {
7985 rtx mask, set, use, clob, dst, src;
7986 bool matching_memory;
7987 bool use_sse = false;
7988 bool vector_mode = VECTOR_MODE_P (mode);
7989 enum machine_mode elt_mode = mode;
7990 enum machine_mode vec_mode = VOIDmode;
7991
7992 if (vector_mode)
7993 {
7994 elt_mode = GET_MODE_INNER (mode);
7995 vec_mode = mode;
7996 use_sse = true;
7997 }
7998 if (TARGET_SSE_MATH)
7999 {
8000 if (mode == SFmode)
8001 {
8002 use_sse = true;
8003 vec_mode = V4SFmode;
8004 }
8005 else if (mode == DFmode && TARGET_SSE2)
8006 {
8007 use_sse = true;
8008 vec_mode = V2DFmode;
8009 }
8010 }
8011
8012 /* NEG and ABS performed with SSE use bitwise mask operations.
8013 Create the appropriate mask now. */
8014 if (use_sse)
8015 {
8016 HOST_WIDE_INT hi, lo;
8017 int shift = 63;
8018 rtvec v;
8019
8020 /* Find the sign bit, sign extended to 2*HWI. */
8021 if (elt_mode == SFmode)
8022 lo = 0x80000000, hi = lo < 0;
8023 else if (HOST_BITS_PER_WIDE_INT >= 64)
8024 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8025 else
8026 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8027
8028 /* If we're looking for the absolute value, then we want
8029 the compliment. */
8030 if (code == ABS)
8031 lo = ~lo, hi = ~hi;
8032
8033 /* Force this value into the low part of a fp vector constant. */
8034 mask = immed_double_const (lo, hi, elt_mode == SFmode ? SImode : DImode);
8035 mask = gen_lowpart (elt_mode, mask);
8036
8037 switch (mode)
8038 {
8039 case SFmode:
8040 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8041 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8042 break;
8043
8044 case DFmode:
8045 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8046 break;
8047
8048 case V4SFmode:
8049 v = gen_rtvec (4, mask, mask, mask, mask);
8050 break;
8051
8052 case V4DFmode:
8053 v = gen_rtvec (2, mask, mask);
8054 break;
8055
8056 default:
8057 gcc_unreachable ();
8058 }
8059
8060 mask = gen_rtx_CONST_VECTOR (vec_mode, v);
8061 mask = force_reg (vec_mode, mask);
8062 }
8063 else
8064 {
8065 /* When not using SSE, we don't use the mask, but prefer to keep the
8066 same general form of the insn pattern to reduce duplication when
8067 it comes time to split. */
8068 mask = const0_rtx;
8069 }
8070
8071 dst = operands[0];
8072 src = operands[1];
8073
8074 /* If the destination is memory, and we don't have matching source
8075 operands, do things in registers. */
8076 matching_memory = false;
8077 if (MEM_P (dst))
8078 {
8079 if (rtx_equal_p (dst, src) && (!optimize || no_new_pseudos))
8080 matching_memory = true;
8081 else
8082 dst = gen_reg_rtx (mode);
8083 }
8084 if (MEM_P (src) && !matching_memory)
8085 src = force_reg (mode, src);
8086
8087 if (vector_mode)
8088 {
8089 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8090 set = gen_rtx_SET (VOIDmode, dst, set);
8091 emit_insn (set);
8092 }
8093 else
8094 {
8095 set = gen_rtx_fmt_e (code, mode, src);
8096 set = gen_rtx_SET (VOIDmode, dst, set);
8097 use = gen_rtx_USE (VOIDmode, mask);
8098 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8099 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8100 }
8101
8102 if (dst != operands[0])
8103 emit_move_insn (operands[0], dst);
8104 }
8105
8106 /* Return TRUE or FALSE depending on whether the first SET in INSN
8107 has source and destination with matching CC modes, and that the
8108 CC mode is at least as constrained as REQ_MODE. */
8109
8110 int
8111 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8112 {
8113 rtx set;
8114 enum machine_mode set_mode;
8115
8116 set = PATTERN (insn);
8117 if (GET_CODE (set) == PARALLEL)
8118 set = XVECEXP (set, 0, 0);
8119 if (GET_CODE (set) != SET)
8120 abort ();
8121 if (GET_CODE (SET_SRC (set)) != COMPARE)
8122 abort ();
8123
8124 set_mode = GET_MODE (SET_DEST (set));
8125 switch (set_mode)
8126 {
8127 case CCNOmode:
8128 if (req_mode != CCNOmode
8129 && (req_mode != CCmode
8130 || XEXP (SET_SRC (set), 1) != const0_rtx))
8131 return 0;
8132 break;
8133 case CCmode:
8134 if (req_mode == CCGCmode)
8135 return 0;
8136 /* FALLTHRU */
8137 case CCGCmode:
8138 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8139 return 0;
8140 /* FALLTHRU */
8141 case CCGOCmode:
8142 if (req_mode == CCZmode)
8143 return 0;
8144 /* FALLTHRU */
8145 case CCZmode:
8146 break;
8147
8148 default:
8149 abort ();
8150 }
8151
8152 return (GET_MODE (SET_SRC (set)) == set_mode);
8153 }
8154
8155 /* Generate insn patterns to do an integer compare of OPERANDS. */
8156
8157 static rtx
8158 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8159 {
8160 enum machine_mode cmpmode;
8161 rtx tmp, flags;
8162
8163 cmpmode = SELECT_CC_MODE (code, op0, op1);
8164 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8165
8166 /* This is very simple, but making the interface the same as in the
8167 FP case makes the rest of the code easier. */
8168 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8169 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8170
8171 /* Return the test that should be put into the flags user, i.e.
8172 the bcc, scc, or cmov instruction. */
8173 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8174 }
8175
8176 /* Figure out whether to use ordered or unordered fp comparisons.
8177 Return the appropriate mode to use. */
8178
8179 enum machine_mode
8180 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8181 {
8182 /* ??? In order to make all comparisons reversible, we do all comparisons
8183 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8184 all forms trapping and nontrapping comparisons, we can make inequality
8185 comparisons trapping again, since it results in better code when using
8186 FCOM based compares. */
8187 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8188 }
8189
8190 enum machine_mode
8191 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8192 {
8193 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8194 return ix86_fp_compare_mode (code);
8195 switch (code)
8196 {
8197 /* Only zero flag is needed. */
8198 case EQ: /* ZF=0 */
8199 case NE: /* ZF!=0 */
8200 return CCZmode;
8201 /* Codes needing carry flag. */
8202 case GEU: /* CF=0 */
8203 case GTU: /* CF=0 & ZF=0 */
8204 case LTU: /* CF=1 */
8205 case LEU: /* CF=1 | ZF=1 */
8206 return CCmode;
8207 /* Codes possibly doable only with sign flag when
8208 comparing against zero. */
8209 case GE: /* SF=OF or SF=0 */
8210 case LT: /* SF<>OF or SF=1 */
8211 if (op1 == const0_rtx)
8212 return CCGOCmode;
8213 else
8214 /* For other cases Carry flag is not required. */
8215 return CCGCmode;
8216 /* Codes doable only with sign flag when comparing
8217 against zero, but we miss jump instruction for it
8218 so we need to use relational tests against overflow
8219 that thus needs to be zero. */
8220 case GT: /* ZF=0 & SF=OF */
8221 case LE: /* ZF=1 | SF<>OF */
8222 if (op1 == const0_rtx)
8223 return CCNOmode;
8224 else
8225 return CCGCmode;
8226 /* strcmp pattern do (use flags) and combine may ask us for proper
8227 mode. */
8228 case USE:
8229 return CCmode;
8230 default:
8231 abort ();
8232 }
8233 }
8234
8235 /* Return the fixed registers used for condition codes. */
8236
8237 static bool
8238 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8239 {
8240 *p1 = FLAGS_REG;
8241 *p2 = FPSR_REG;
8242 return true;
8243 }
8244
8245 /* If two condition code modes are compatible, return a condition code
8246 mode which is compatible with both. Otherwise, return
8247 VOIDmode. */
8248
8249 static enum machine_mode
8250 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
8251 {
8252 if (m1 == m2)
8253 return m1;
8254
8255 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
8256 return VOIDmode;
8257
8258 if ((m1 == CCGCmode && m2 == CCGOCmode)
8259 || (m1 == CCGOCmode && m2 == CCGCmode))
8260 return CCGCmode;
8261
8262 switch (m1)
8263 {
8264 default:
8265 abort ();
8266
8267 case CCmode:
8268 case CCGCmode:
8269 case CCGOCmode:
8270 case CCNOmode:
8271 case CCZmode:
8272 switch (m2)
8273 {
8274 default:
8275 return VOIDmode;
8276
8277 case CCmode:
8278 case CCGCmode:
8279 case CCGOCmode:
8280 case CCNOmode:
8281 case CCZmode:
8282 return CCmode;
8283 }
8284
8285 case CCFPmode:
8286 case CCFPUmode:
8287 /* These are only compatible with themselves, which we already
8288 checked above. */
8289 return VOIDmode;
8290 }
8291 }
8292
8293 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8294
8295 int
8296 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
8297 {
8298 enum rtx_code swapped_code = swap_condition (code);
8299 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
8300 || (ix86_fp_comparison_cost (swapped_code)
8301 == ix86_fp_comparison_fcomi_cost (swapped_code)));
8302 }
8303
8304 /* Swap, force into registers, or otherwise massage the two operands
8305 to a fp comparison. The operands are updated in place; the new
8306 comparison code is returned. */
8307
8308 static enum rtx_code
8309 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
8310 {
8311 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
8312 rtx op0 = *pop0, op1 = *pop1;
8313 enum machine_mode op_mode = GET_MODE (op0);
8314 int is_sse = SSE_REG_P (op0) || SSE_REG_P (op1);
8315
8316 /* All of the unordered compare instructions only work on registers.
8317 The same is true of the fcomi compare instructions. The same is
8318 true of the XFmode compare instructions if not comparing with
8319 zero (ftst insn is used in this case). */
8320
8321 if (!is_sse
8322 && (fpcmp_mode == CCFPUmode
8323 || (op_mode == XFmode
8324 && ! (standard_80387_constant_p (op0) == 1
8325 || standard_80387_constant_p (op1) == 1))
8326 || ix86_use_fcomi_compare (code)))
8327 {
8328 op0 = force_reg (op_mode, op0);
8329 op1 = force_reg (op_mode, op1);
8330 }
8331 else
8332 {
8333 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8334 things around if they appear profitable, otherwise force op0
8335 into a register. */
8336
8337 if (standard_80387_constant_p (op0) == 0
8338 || (GET_CODE (op0) == MEM
8339 && ! (standard_80387_constant_p (op1) == 0
8340 || GET_CODE (op1) == MEM)))
8341 {
8342 rtx tmp;
8343 tmp = op0, op0 = op1, op1 = tmp;
8344 code = swap_condition (code);
8345 }
8346
8347 if (GET_CODE (op0) != REG)
8348 op0 = force_reg (op_mode, op0);
8349
8350 if (CONSTANT_P (op1))
8351 {
8352 int tmp = standard_80387_constant_p (op1);
8353 if (tmp == 0)
8354 op1 = validize_mem (force_const_mem (op_mode, op1));
8355 else if (tmp == 1)
8356 {
8357 if (TARGET_CMOVE)
8358 op1 = force_reg (op_mode, op1);
8359 }
8360 else
8361 op1 = force_reg (op_mode, op1);
8362 }
8363 }
8364
8365 /* Try to rearrange the comparison to make it cheaper. */
8366 if (ix86_fp_comparison_cost (code)
8367 > ix86_fp_comparison_cost (swap_condition (code))
8368 && (GET_CODE (op1) == REG || !no_new_pseudos))
8369 {
8370 rtx tmp;
8371 tmp = op0, op0 = op1, op1 = tmp;
8372 code = swap_condition (code);
8373 if (GET_CODE (op0) != REG)
8374 op0 = force_reg (op_mode, op0);
8375 }
8376
8377 *pop0 = op0;
8378 *pop1 = op1;
8379 return code;
8380 }
8381
8382 /* Convert comparison codes we use to represent FP comparison to integer
8383 code that will result in proper branch. Return UNKNOWN if no such code
8384 is available. */
8385
8386 enum rtx_code
8387 ix86_fp_compare_code_to_integer (enum rtx_code code)
8388 {
8389 switch (code)
8390 {
8391 case GT:
8392 return GTU;
8393 case GE:
8394 return GEU;
8395 case ORDERED:
8396 case UNORDERED:
8397 return code;
8398 break;
8399 case UNEQ:
8400 return EQ;
8401 break;
8402 case UNLT:
8403 return LTU;
8404 break;
8405 case UNLE:
8406 return LEU;
8407 break;
8408 case LTGT:
8409 return NE;
8410 break;
8411 default:
8412 return UNKNOWN;
8413 }
8414 }
8415
8416 /* Split comparison code CODE into comparisons we can do using branch
8417 instructions. BYPASS_CODE is comparison code for branch that will
8418 branch around FIRST_CODE and SECOND_CODE. If some of branches
8419 is not required, set value to UNKNOWN.
8420 We never require more than two branches. */
8421
8422 void
8423 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8424 enum rtx_code *first_code,
8425 enum rtx_code *second_code)
8426 {
8427 *first_code = code;
8428 *bypass_code = UNKNOWN;
8429 *second_code = UNKNOWN;
8430
8431 /* The fcomi comparison sets flags as follows:
8432
8433 cmp ZF PF CF
8434 > 0 0 0
8435 < 0 0 1
8436 = 1 0 0
8437 un 1 1 1 */
8438
8439 switch (code)
8440 {
8441 case GT: /* GTU - CF=0 & ZF=0 */
8442 case GE: /* GEU - CF=0 */
8443 case ORDERED: /* PF=0 */
8444 case UNORDERED: /* PF=1 */
8445 case UNEQ: /* EQ - ZF=1 */
8446 case UNLT: /* LTU - CF=1 */
8447 case UNLE: /* LEU - CF=1 | ZF=1 */
8448 case LTGT: /* EQ - ZF=0 */
8449 break;
8450 case LT: /* LTU - CF=1 - fails on unordered */
8451 *first_code = UNLT;
8452 *bypass_code = UNORDERED;
8453 break;
8454 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8455 *first_code = UNLE;
8456 *bypass_code = UNORDERED;
8457 break;
8458 case EQ: /* EQ - ZF=1 - fails on unordered */
8459 *first_code = UNEQ;
8460 *bypass_code = UNORDERED;
8461 break;
8462 case NE: /* NE - ZF=0 - fails on unordered */
8463 *first_code = LTGT;
8464 *second_code = UNORDERED;
8465 break;
8466 case UNGE: /* GEU - CF=0 - fails on unordered */
8467 *first_code = GE;
8468 *second_code = UNORDERED;
8469 break;
8470 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8471 *first_code = GT;
8472 *second_code = UNORDERED;
8473 break;
8474 default:
8475 abort ();
8476 }
8477 if (!TARGET_IEEE_FP)
8478 {
8479 *second_code = UNKNOWN;
8480 *bypass_code = UNKNOWN;
8481 }
8482 }
8483
8484 /* Return cost of comparison done fcom + arithmetics operations on AX.
8485 All following functions do use number of instructions as a cost metrics.
8486 In future this should be tweaked to compute bytes for optimize_size and
8487 take into account performance of various instructions on various CPUs. */
8488 static int
8489 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8490 {
8491 if (!TARGET_IEEE_FP)
8492 return 4;
8493 /* The cost of code output by ix86_expand_fp_compare. */
8494 switch (code)
8495 {
8496 case UNLE:
8497 case UNLT:
8498 case LTGT:
8499 case GT:
8500 case GE:
8501 case UNORDERED:
8502 case ORDERED:
8503 case UNEQ:
8504 return 4;
8505 break;
8506 case LT:
8507 case NE:
8508 case EQ:
8509 case UNGE:
8510 return 5;
8511 break;
8512 case LE:
8513 case UNGT:
8514 return 6;
8515 break;
8516 default:
8517 abort ();
8518 }
8519 }
8520
8521 /* Return cost of comparison done using fcomi operation.
8522 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8523 static int
8524 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8525 {
8526 enum rtx_code bypass_code, first_code, second_code;
8527 /* Return arbitrarily high cost when instruction is not supported - this
8528 prevents gcc from using it. */
8529 if (!TARGET_CMOVE)
8530 return 1024;
8531 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8532 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8533 }
8534
8535 /* Return cost of comparison done using sahf operation.
8536 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8537 static int
8538 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8539 {
8540 enum rtx_code bypass_code, first_code, second_code;
8541 /* Return arbitrarily high cost when instruction is not preferred - this
8542 avoids gcc from using it. */
8543 if (!TARGET_USE_SAHF && !optimize_size)
8544 return 1024;
8545 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8546 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
8547 }
8548
8549 /* Compute cost of the comparison done using any method.
8550 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8551 static int
8552 ix86_fp_comparison_cost (enum rtx_code code)
8553 {
8554 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8555 int min;
8556
8557 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8558 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8559
8560 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8561 if (min > sahf_cost)
8562 min = sahf_cost;
8563 if (min > fcomi_cost)
8564 min = fcomi_cost;
8565 return min;
8566 }
8567
8568 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8569
8570 static rtx
8571 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8572 rtx *second_test, rtx *bypass_test)
8573 {
8574 enum machine_mode fpcmp_mode, intcmp_mode;
8575 rtx tmp, tmp2;
8576 int cost = ix86_fp_comparison_cost (code);
8577 enum rtx_code bypass_code, first_code, second_code;
8578
8579 fpcmp_mode = ix86_fp_compare_mode (code);
8580 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8581
8582 if (second_test)
8583 *second_test = NULL_RTX;
8584 if (bypass_test)
8585 *bypass_test = NULL_RTX;
8586
8587 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8588
8589 /* Do fcomi/sahf based test when profitable. */
8590 if ((bypass_code == UNKNOWN || bypass_test)
8591 && (second_code == UNKNOWN || second_test)
8592 && ix86_fp_comparison_arithmetics_cost (code) > cost)
8593 {
8594 if (TARGET_CMOVE)
8595 {
8596 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8597 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
8598 tmp);
8599 emit_insn (tmp);
8600 }
8601 else
8602 {
8603 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8604 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8605 if (!scratch)
8606 scratch = gen_reg_rtx (HImode);
8607 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8608 emit_insn (gen_x86_sahf_1 (scratch));
8609 }
8610
8611 /* The FP codes work out to act like unsigned. */
8612 intcmp_mode = fpcmp_mode;
8613 code = first_code;
8614 if (bypass_code != UNKNOWN)
8615 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
8616 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8617 const0_rtx);
8618 if (second_code != UNKNOWN)
8619 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
8620 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8621 const0_rtx);
8622 }
8623 else
8624 {
8625 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8626 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8627 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8628 if (!scratch)
8629 scratch = gen_reg_rtx (HImode);
8630 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8631
8632 /* In the unordered case, we have to check C2 for NaN's, which
8633 doesn't happen to work out to anything nice combination-wise.
8634 So do some bit twiddling on the value we've got in AH to come
8635 up with an appropriate set of condition codes. */
8636
8637 intcmp_mode = CCNOmode;
8638 switch (code)
8639 {
8640 case GT:
8641 case UNGT:
8642 if (code == GT || !TARGET_IEEE_FP)
8643 {
8644 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8645 code = EQ;
8646 }
8647 else
8648 {
8649 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8650 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8651 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
8652 intcmp_mode = CCmode;
8653 code = GEU;
8654 }
8655 break;
8656 case LT:
8657 case UNLT:
8658 if (code == LT && TARGET_IEEE_FP)
8659 {
8660 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8661 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
8662 intcmp_mode = CCmode;
8663 code = EQ;
8664 }
8665 else
8666 {
8667 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
8668 code = NE;
8669 }
8670 break;
8671 case GE:
8672 case UNGE:
8673 if (code == GE || !TARGET_IEEE_FP)
8674 {
8675 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
8676 code = EQ;
8677 }
8678 else
8679 {
8680 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8681 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8682 GEN_INT (0x01)));
8683 code = NE;
8684 }
8685 break;
8686 case LE:
8687 case UNLE:
8688 if (code == LE && TARGET_IEEE_FP)
8689 {
8690 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8691 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8692 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8693 intcmp_mode = CCmode;
8694 code = LTU;
8695 }
8696 else
8697 {
8698 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8699 code = NE;
8700 }
8701 break;
8702 case EQ:
8703 case UNEQ:
8704 if (code == EQ && TARGET_IEEE_FP)
8705 {
8706 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8707 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8708 intcmp_mode = CCmode;
8709 code = EQ;
8710 }
8711 else
8712 {
8713 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8714 code = NE;
8715 break;
8716 }
8717 break;
8718 case NE:
8719 case LTGT:
8720 if (code == NE && TARGET_IEEE_FP)
8721 {
8722 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8723 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8724 GEN_INT (0x40)));
8725 code = NE;
8726 }
8727 else
8728 {
8729 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8730 code = EQ;
8731 }
8732 break;
8733
8734 case UNORDERED:
8735 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8736 code = NE;
8737 break;
8738 case ORDERED:
8739 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8740 code = EQ;
8741 break;
8742
8743 default:
8744 abort ();
8745 }
8746 }
8747
8748 /* Return the test that should be put into the flags user, i.e.
8749 the bcc, scc, or cmov instruction. */
8750 return gen_rtx_fmt_ee (code, VOIDmode,
8751 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8752 const0_rtx);
8753 }
8754
8755 rtx
8756 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
8757 {
8758 rtx op0, op1, ret;
8759 op0 = ix86_compare_op0;
8760 op1 = ix86_compare_op1;
8761
8762 if (second_test)
8763 *second_test = NULL_RTX;
8764 if (bypass_test)
8765 *bypass_test = NULL_RTX;
8766
8767 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8768 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8769 second_test, bypass_test);
8770 else
8771 ret = ix86_expand_int_compare (code, op0, op1);
8772
8773 return ret;
8774 }
8775
8776 /* Return true if the CODE will result in nontrivial jump sequence. */
8777 bool
8778 ix86_fp_jump_nontrivial_p (enum rtx_code code)
8779 {
8780 enum rtx_code bypass_code, first_code, second_code;
8781 if (!TARGET_CMOVE)
8782 return true;
8783 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8784 return bypass_code != UNKNOWN || second_code != UNKNOWN;
8785 }
8786
8787 void
8788 ix86_expand_branch (enum rtx_code code, rtx label)
8789 {
8790 rtx tmp;
8791
8792 switch (GET_MODE (ix86_compare_op0))
8793 {
8794 case QImode:
8795 case HImode:
8796 case SImode:
8797 simple:
8798 tmp = ix86_expand_compare (code, NULL, NULL);
8799 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8800 gen_rtx_LABEL_REF (VOIDmode, label),
8801 pc_rtx);
8802 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
8803 return;
8804
8805 case SFmode:
8806 case DFmode:
8807 case XFmode:
8808 {
8809 rtvec vec;
8810 int use_fcomi;
8811 enum rtx_code bypass_code, first_code, second_code;
8812
8813 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
8814 &ix86_compare_op1);
8815
8816 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8817
8818 /* Check whether we will use the natural sequence with one jump. If
8819 so, we can expand jump early. Otherwise delay expansion by
8820 creating compound insn to not confuse optimizers. */
8821 if (bypass_code == UNKNOWN && second_code == UNKNOWN
8822 && TARGET_CMOVE)
8823 {
8824 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
8825 gen_rtx_LABEL_REF (VOIDmode, label),
8826 pc_rtx, NULL_RTX, NULL_RTX);
8827 }
8828 else
8829 {
8830 tmp = gen_rtx_fmt_ee (code, VOIDmode,
8831 ix86_compare_op0, ix86_compare_op1);
8832 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8833 gen_rtx_LABEL_REF (VOIDmode, label),
8834 pc_rtx);
8835 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
8836
8837 use_fcomi = ix86_use_fcomi_compare (code);
8838 vec = rtvec_alloc (3 + !use_fcomi);
8839 RTVEC_ELT (vec, 0) = tmp;
8840 RTVEC_ELT (vec, 1)
8841 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
8842 RTVEC_ELT (vec, 2)
8843 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
8844 if (! use_fcomi)
8845 RTVEC_ELT (vec, 3)
8846 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
8847
8848 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
8849 }
8850 return;
8851 }
8852
8853 case DImode:
8854 if (TARGET_64BIT)
8855 goto simple;
8856 /* Expand DImode branch into multiple compare+branch. */
8857 {
8858 rtx lo[2], hi[2], label2;
8859 enum rtx_code code1, code2, code3;
8860
8861 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
8862 {
8863 tmp = ix86_compare_op0;
8864 ix86_compare_op0 = ix86_compare_op1;
8865 ix86_compare_op1 = tmp;
8866 code = swap_condition (code);
8867 }
8868 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
8869 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
8870
8871 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8872 avoid two branches. This costs one extra insn, so disable when
8873 optimizing for size. */
8874
8875 if ((code == EQ || code == NE)
8876 && (!optimize_size
8877 || hi[1] == const0_rtx || lo[1] == const0_rtx))
8878 {
8879 rtx xor0, xor1;
8880
8881 xor1 = hi[0];
8882 if (hi[1] != const0_rtx)
8883 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
8884 NULL_RTX, 0, OPTAB_WIDEN);
8885
8886 xor0 = lo[0];
8887 if (lo[1] != const0_rtx)
8888 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
8889 NULL_RTX, 0, OPTAB_WIDEN);
8890
8891 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
8892 NULL_RTX, 0, OPTAB_WIDEN);
8893
8894 ix86_compare_op0 = tmp;
8895 ix86_compare_op1 = const0_rtx;
8896 ix86_expand_branch (code, label);
8897 return;
8898 }
8899
8900 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8901 op1 is a constant and the low word is zero, then we can just
8902 examine the high word. */
8903
8904 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
8905 switch (code)
8906 {
8907 case LT: case LTU: case GE: case GEU:
8908 ix86_compare_op0 = hi[0];
8909 ix86_compare_op1 = hi[1];
8910 ix86_expand_branch (code, label);
8911 return;
8912 default:
8913 break;
8914 }
8915
8916 /* Otherwise, we need two or three jumps. */
8917
8918 label2 = gen_label_rtx ();
8919
8920 code1 = code;
8921 code2 = swap_condition (code);
8922 code3 = unsigned_condition (code);
8923
8924 switch (code)
8925 {
8926 case LT: case GT: case LTU: case GTU:
8927 break;
8928
8929 case LE: code1 = LT; code2 = GT; break;
8930 case GE: code1 = GT; code2 = LT; break;
8931 case LEU: code1 = LTU; code2 = GTU; break;
8932 case GEU: code1 = GTU; code2 = LTU; break;
8933
8934 case EQ: code1 = UNKNOWN; code2 = NE; break;
8935 case NE: code2 = UNKNOWN; break;
8936
8937 default:
8938 abort ();
8939 }
8940
8941 /*
8942 * a < b =>
8943 * if (hi(a) < hi(b)) goto true;
8944 * if (hi(a) > hi(b)) goto false;
8945 * if (lo(a) < lo(b)) goto true;
8946 * false:
8947 */
8948
8949 ix86_compare_op0 = hi[0];
8950 ix86_compare_op1 = hi[1];
8951
8952 if (code1 != UNKNOWN)
8953 ix86_expand_branch (code1, label);
8954 if (code2 != UNKNOWN)
8955 ix86_expand_branch (code2, label2);
8956
8957 ix86_compare_op0 = lo[0];
8958 ix86_compare_op1 = lo[1];
8959 ix86_expand_branch (code3, label);
8960
8961 if (code2 != UNKNOWN)
8962 emit_label (label2);
8963 return;
8964 }
8965
8966 default:
8967 abort ();
8968 }
8969 }
8970
8971 /* Split branch based on floating point condition. */
8972 void
8973 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
8974 rtx target1, rtx target2, rtx tmp, rtx pushed)
8975 {
8976 rtx second, bypass;
8977 rtx label = NULL_RTX;
8978 rtx condition;
8979 int bypass_probability = -1, second_probability = -1, probability = -1;
8980 rtx i;
8981
8982 if (target2 != pc_rtx)
8983 {
8984 rtx tmp = target2;
8985 code = reverse_condition_maybe_unordered (code);
8986 target2 = target1;
8987 target1 = tmp;
8988 }
8989
8990 condition = ix86_expand_fp_compare (code, op1, op2,
8991 tmp, &second, &bypass);
8992
8993 /* Remove pushed operand from stack. */
8994 if (pushed)
8995 ix86_free_from_memory (GET_MODE (pushed));
8996
8997 if (split_branch_probability >= 0)
8998 {
8999 /* Distribute the probabilities across the jumps.
9000 Assume the BYPASS and SECOND to be always test
9001 for UNORDERED. */
9002 probability = split_branch_probability;
9003
9004 /* Value of 1 is low enough to make no need for probability
9005 to be updated. Later we may run some experiments and see
9006 if unordered values are more frequent in practice. */
9007 if (bypass)
9008 bypass_probability = 1;
9009 if (second)
9010 second_probability = 1;
9011 }
9012 if (bypass != NULL_RTX)
9013 {
9014 label = gen_label_rtx ();
9015 i = emit_jump_insn (gen_rtx_SET
9016 (VOIDmode, pc_rtx,
9017 gen_rtx_IF_THEN_ELSE (VOIDmode,
9018 bypass,
9019 gen_rtx_LABEL_REF (VOIDmode,
9020 label),
9021 pc_rtx)));
9022 if (bypass_probability >= 0)
9023 REG_NOTES (i)
9024 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9025 GEN_INT (bypass_probability),
9026 REG_NOTES (i));
9027 }
9028 i = emit_jump_insn (gen_rtx_SET
9029 (VOIDmode, pc_rtx,
9030 gen_rtx_IF_THEN_ELSE (VOIDmode,
9031 condition, target1, target2)));
9032 if (probability >= 0)
9033 REG_NOTES (i)
9034 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9035 GEN_INT (probability),
9036 REG_NOTES (i));
9037 if (second != NULL_RTX)
9038 {
9039 i = emit_jump_insn (gen_rtx_SET
9040 (VOIDmode, pc_rtx,
9041 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9042 target2)));
9043 if (second_probability >= 0)
9044 REG_NOTES (i)
9045 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9046 GEN_INT (second_probability),
9047 REG_NOTES (i));
9048 }
9049 if (label != NULL_RTX)
9050 emit_label (label);
9051 }
9052
9053 int
9054 ix86_expand_setcc (enum rtx_code code, rtx dest)
9055 {
9056 rtx ret, tmp, tmpreg, equiv;
9057 rtx second_test, bypass_test;
9058
9059 if (GET_MODE (ix86_compare_op0) == DImode
9060 && !TARGET_64BIT)
9061 return 0; /* FAIL */
9062
9063 if (GET_MODE (dest) != QImode)
9064 abort ();
9065
9066 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9067 PUT_MODE (ret, QImode);
9068
9069 tmp = dest;
9070 tmpreg = dest;
9071
9072 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9073 if (bypass_test || second_test)
9074 {
9075 rtx test = second_test;
9076 int bypass = 0;
9077 rtx tmp2 = gen_reg_rtx (QImode);
9078 if (bypass_test)
9079 {
9080 if (second_test)
9081 abort ();
9082 test = bypass_test;
9083 bypass = 1;
9084 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9085 }
9086 PUT_MODE (test, QImode);
9087 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9088
9089 if (bypass)
9090 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9091 else
9092 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9093 }
9094
9095 /* Attach a REG_EQUAL note describing the comparison result. */
9096 equiv = simplify_gen_relational (code, QImode,
9097 GET_MODE (ix86_compare_op0),
9098 ix86_compare_op0, ix86_compare_op1);
9099 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9100
9101 return 1; /* DONE */
9102 }
9103
9104 /* Expand comparison setting or clearing carry flag. Return true when
9105 successful and set pop for the operation. */
9106 static bool
9107 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9108 {
9109 enum machine_mode mode =
9110 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9111
9112 /* Do not handle DImode compares that go trought special path. Also we can't
9113 deal with FP compares yet. This is possible to add. */
9114 if ((mode == DImode && !TARGET_64BIT))
9115 return false;
9116 if (FLOAT_MODE_P (mode))
9117 {
9118 rtx second_test = NULL, bypass_test = NULL;
9119 rtx compare_op, compare_seq;
9120
9121 /* Shortcut: following common codes never translate into carry flag compares. */
9122 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9123 || code == ORDERED || code == UNORDERED)
9124 return false;
9125
9126 /* These comparisons require zero flag; swap operands so they won't. */
9127 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9128 && !TARGET_IEEE_FP)
9129 {
9130 rtx tmp = op0;
9131 op0 = op1;
9132 op1 = tmp;
9133 code = swap_condition (code);
9134 }
9135
9136 /* Try to expand the comparison and verify that we end up with carry flag
9137 based comparison. This is fails to be true only when we decide to expand
9138 comparison using arithmetic that is not too common scenario. */
9139 start_sequence ();
9140 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9141 &second_test, &bypass_test);
9142 compare_seq = get_insns ();
9143 end_sequence ();
9144
9145 if (second_test || bypass_test)
9146 return false;
9147 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9148 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9149 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9150 else
9151 code = GET_CODE (compare_op);
9152 if (code != LTU && code != GEU)
9153 return false;
9154 emit_insn (compare_seq);
9155 *pop = compare_op;
9156 return true;
9157 }
9158 if (!INTEGRAL_MODE_P (mode))
9159 return false;
9160 switch (code)
9161 {
9162 case LTU:
9163 case GEU:
9164 break;
9165
9166 /* Convert a==0 into (unsigned)a<1. */
9167 case EQ:
9168 case NE:
9169 if (op1 != const0_rtx)
9170 return false;
9171 op1 = const1_rtx;
9172 code = (code == EQ ? LTU : GEU);
9173 break;
9174
9175 /* Convert a>b into b<a or a>=b-1. */
9176 case GTU:
9177 case LEU:
9178 if (GET_CODE (op1) == CONST_INT)
9179 {
9180 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9181 /* Bail out on overflow. We still can swap operands but that
9182 would force loading of the constant into register. */
9183 if (op1 == const0_rtx
9184 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9185 return false;
9186 code = (code == GTU ? GEU : LTU);
9187 }
9188 else
9189 {
9190 rtx tmp = op1;
9191 op1 = op0;
9192 op0 = tmp;
9193 code = (code == GTU ? LTU : GEU);
9194 }
9195 break;
9196
9197 /* Convert a>=0 into (unsigned)a<0x80000000. */
9198 case LT:
9199 case GE:
9200 if (mode == DImode || op1 != const0_rtx)
9201 return false;
9202 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9203 code = (code == LT ? GEU : LTU);
9204 break;
9205 case LE:
9206 case GT:
9207 if (mode == DImode || op1 != constm1_rtx)
9208 return false;
9209 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9210 code = (code == LE ? GEU : LTU);
9211 break;
9212
9213 default:
9214 return false;
9215 }
9216 /* Swapping operands may cause constant to appear as first operand. */
9217 if (!nonimmediate_operand (op0, VOIDmode))
9218 {
9219 if (no_new_pseudos)
9220 return false;
9221 op0 = force_reg (mode, op0);
9222 }
9223 ix86_compare_op0 = op0;
9224 ix86_compare_op1 = op1;
9225 *pop = ix86_expand_compare (code, NULL, NULL);
9226 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
9227 abort ();
9228 return true;
9229 }
9230
9231 int
9232 ix86_expand_int_movcc (rtx operands[])
9233 {
9234 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9235 rtx compare_seq, compare_op;
9236 rtx second_test, bypass_test;
9237 enum machine_mode mode = GET_MODE (operands[0]);
9238 bool sign_bit_compare_p = false;;
9239
9240 start_sequence ();
9241 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9242 compare_seq = get_insns ();
9243 end_sequence ();
9244
9245 compare_code = GET_CODE (compare_op);
9246
9247 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9248 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9249 sign_bit_compare_p = true;
9250
9251 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9252 HImode insns, we'd be swallowed in word prefix ops. */
9253
9254 if ((mode != HImode || TARGET_FAST_PREFIX)
9255 && (mode != DImode || TARGET_64BIT)
9256 && GET_CODE (operands[2]) == CONST_INT
9257 && GET_CODE (operands[3]) == CONST_INT)
9258 {
9259 rtx out = operands[0];
9260 HOST_WIDE_INT ct = INTVAL (operands[2]);
9261 HOST_WIDE_INT cf = INTVAL (operands[3]);
9262 HOST_WIDE_INT diff;
9263
9264 diff = ct - cf;
9265 /* Sign bit compares are better done using shifts than we do by using
9266 sbb. */
9267 if (sign_bit_compare_p
9268 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9269 ix86_compare_op1, &compare_op))
9270 {
9271 /* Detect overlap between destination and compare sources. */
9272 rtx tmp = out;
9273
9274 if (!sign_bit_compare_p)
9275 {
9276 bool fpcmp = false;
9277
9278 compare_code = GET_CODE (compare_op);
9279
9280 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9281 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9282 {
9283 fpcmp = true;
9284 compare_code = ix86_fp_compare_code_to_integer (compare_code);
9285 }
9286
9287 /* To simplify rest of code, restrict to the GEU case. */
9288 if (compare_code == LTU)
9289 {
9290 HOST_WIDE_INT tmp = ct;
9291 ct = cf;
9292 cf = tmp;
9293 compare_code = reverse_condition (compare_code);
9294 code = reverse_condition (code);
9295 }
9296 else
9297 {
9298 if (fpcmp)
9299 PUT_CODE (compare_op,
9300 reverse_condition_maybe_unordered
9301 (GET_CODE (compare_op)));
9302 else
9303 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9304 }
9305 diff = ct - cf;
9306
9307 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
9308 || reg_overlap_mentioned_p (out, ix86_compare_op1))
9309 tmp = gen_reg_rtx (mode);
9310
9311 if (mode == DImode)
9312 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
9313 else
9314 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
9315 }
9316 else
9317 {
9318 if (code == GT || code == GE)
9319 code = reverse_condition (code);
9320 else
9321 {
9322 HOST_WIDE_INT tmp = ct;
9323 ct = cf;
9324 cf = tmp;
9325 diff = ct - cf;
9326 }
9327 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
9328 ix86_compare_op1, VOIDmode, 0, -1);
9329 }
9330
9331 if (diff == 1)
9332 {
9333 /*
9334 * cmpl op0,op1
9335 * sbbl dest,dest
9336 * [addl dest, ct]
9337 *
9338 * Size 5 - 8.
9339 */
9340 if (ct)
9341 tmp = expand_simple_binop (mode, PLUS,
9342 tmp, GEN_INT (ct),
9343 copy_rtx (tmp), 1, OPTAB_DIRECT);
9344 }
9345 else if (cf == -1)
9346 {
9347 /*
9348 * cmpl op0,op1
9349 * sbbl dest,dest
9350 * orl $ct, dest
9351 *
9352 * Size 8.
9353 */
9354 tmp = expand_simple_binop (mode, IOR,
9355 tmp, GEN_INT (ct),
9356 copy_rtx (tmp), 1, OPTAB_DIRECT);
9357 }
9358 else if (diff == -1 && ct)
9359 {
9360 /*
9361 * cmpl op0,op1
9362 * sbbl dest,dest
9363 * notl dest
9364 * [addl dest, cf]
9365 *
9366 * Size 8 - 11.
9367 */
9368 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9369 if (cf)
9370 tmp = expand_simple_binop (mode, PLUS,
9371 copy_rtx (tmp), GEN_INT (cf),
9372 copy_rtx (tmp), 1, OPTAB_DIRECT);
9373 }
9374 else
9375 {
9376 /*
9377 * cmpl op0,op1
9378 * sbbl dest,dest
9379 * [notl dest]
9380 * andl cf - ct, dest
9381 * [addl dest, ct]
9382 *
9383 * Size 8 - 11.
9384 */
9385
9386 if (cf == 0)
9387 {
9388 cf = ct;
9389 ct = 0;
9390 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9391 }
9392
9393 tmp = expand_simple_binop (mode, AND,
9394 copy_rtx (tmp),
9395 gen_int_mode (cf - ct, mode),
9396 copy_rtx (tmp), 1, OPTAB_DIRECT);
9397 if (ct)
9398 tmp = expand_simple_binop (mode, PLUS,
9399 copy_rtx (tmp), GEN_INT (ct),
9400 copy_rtx (tmp), 1, OPTAB_DIRECT);
9401 }
9402
9403 if (!rtx_equal_p (tmp, out))
9404 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9405
9406 return 1; /* DONE */
9407 }
9408
9409 if (diff < 0)
9410 {
9411 HOST_WIDE_INT tmp;
9412 tmp = ct, ct = cf, cf = tmp;
9413 diff = -diff;
9414 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9415 {
9416 /* We may be reversing unordered compare to normal compare, that
9417 is not valid in general (we may convert non-trapping condition
9418 to trapping one), however on i386 we currently emit all
9419 comparisons unordered. */
9420 compare_code = reverse_condition_maybe_unordered (compare_code);
9421 code = reverse_condition_maybe_unordered (code);
9422 }
9423 else
9424 {
9425 compare_code = reverse_condition (compare_code);
9426 code = reverse_condition (code);
9427 }
9428 }
9429
9430 compare_code = UNKNOWN;
9431 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9432 && GET_CODE (ix86_compare_op1) == CONST_INT)
9433 {
9434 if (ix86_compare_op1 == const0_rtx
9435 && (code == LT || code == GE))
9436 compare_code = code;
9437 else if (ix86_compare_op1 == constm1_rtx)
9438 {
9439 if (code == LE)
9440 compare_code = LT;
9441 else if (code == GT)
9442 compare_code = GE;
9443 }
9444 }
9445
9446 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9447 if (compare_code != UNKNOWN
9448 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9449 && (cf == -1 || ct == -1))
9450 {
9451 /* If lea code below could be used, only optimize
9452 if it results in a 2 insn sequence. */
9453
9454 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9455 || diff == 3 || diff == 5 || diff == 9)
9456 || (compare_code == LT && ct == -1)
9457 || (compare_code == GE && cf == -1))
9458 {
9459 /*
9460 * notl op1 (if necessary)
9461 * sarl $31, op1
9462 * orl cf, op1
9463 */
9464 if (ct != -1)
9465 {
9466 cf = ct;
9467 ct = -1;
9468 code = reverse_condition (code);
9469 }
9470
9471 out = emit_store_flag (out, code, ix86_compare_op0,
9472 ix86_compare_op1, VOIDmode, 0, -1);
9473
9474 out = expand_simple_binop (mode, IOR,
9475 out, GEN_INT (cf),
9476 out, 1, OPTAB_DIRECT);
9477 if (out != operands[0])
9478 emit_move_insn (operands[0], out);
9479
9480 return 1; /* DONE */
9481 }
9482 }
9483
9484
9485 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9486 || diff == 3 || diff == 5 || diff == 9)
9487 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9488 && (mode != DImode
9489 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9490 {
9491 /*
9492 * xorl dest,dest
9493 * cmpl op1,op2
9494 * setcc dest
9495 * lea cf(dest*(ct-cf)),dest
9496 *
9497 * Size 14.
9498 *
9499 * This also catches the degenerate setcc-only case.
9500 */
9501
9502 rtx tmp;
9503 int nops;
9504
9505 out = emit_store_flag (out, code, ix86_compare_op0,
9506 ix86_compare_op1, VOIDmode, 0, 1);
9507
9508 nops = 0;
9509 /* On x86_64 the lea instruction operates on Pmode, so we need
9510 to get arithmetics done in proper mode to match. */
9511 if (diff == 1)
9512 tmp = copy_rtx (out);
9513 else
9514 {
9515 rtx out1;
9516 out1 = copy_rtx (out);
9517 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9518 nops++;
9519 if (diff & 1)
9520 {
9521 tmp = gen_rtx_PLUS (mode, tmp, out1);
9522 nops++;
9523 }
9524 }
9525 if (cf != 0)
9526 {
9527 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9528 nops++;
9529 }
9530 if (!rtx_equal_p (tmp, out))
9531 {
9532 if (nops == 1)
9533 out = force_operand (tmp, copy_rtx (out));
9534 else
9535 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9536 }
9537 if (!rtx_equal_p (out, operands[0]))
9538 emit_move_insn (operands[0], copy_rtx (out));
9539
9540 return 1; /* DONE */
9541 }
9542
9543 /*
9544 * General case: Jumpful:
9545 * xorl dest,dest cmpl op1, op2
9546 * cmpl op1, op2 movl ct, dest
9547 * setcc dest jcc 1f
9548 * decl dest movl cf, dest
9549 * andl (cf-ct),dest 1:
9550 * addl ct,dest
9551 *
9552 * Size 20. Size 14.
9553 *
9554 * This is reasonably steep, but branch mispredict costs are
9555 * high on modern cpus, so consider failing only if optimizing
9556 * for space.
9557 */
9558
9559 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9560 && BRANCH_COST >= 2)
9561 {
9562 if (cf == 0)
9563 {
9564 cf = ct;
9565 ct = 0;
9566 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9567 /* We may be reversing unordered compare to normal compare,
9568 that is not valid in general (we may convert non-trapping
9569 condition to trapping one), however on i386 we currently
9570 emit all comparisons unordered. */
9571 code = reverse_condition_maybe_unordered (code);
9572 else
9573 {
9574 code = reverse_condition (code);
9575 if (compare_code != UNKNOWN)
9576 compare_code = reverse_condition (compare_code);
9577 }
9578 }
9579
9580 if (compare_code != UNKNOWN)
9581 {
9582 /* notl op1 (if needed)
9583 sarl $31, op1
9584 andl (cf-ct), op1
9585 addl ct, op1
9586
9587 For x < 0 (resp. x <= -1) there will be no notl,
9588 so if possible swap the constants to get rid of the
9589 complement.
9590 True/false will be -1/0 while code below (store flag
9591 followed by decrement) is 0/-1, so the constants need
9592 to be exchanged once more. */
9593
9594 if (compare_code == GE || !cf)
9595 {
9596 code = reverse_condition (code);
9597 compare_code = LT;
9598 }
9599 else
9600 {
9601 HOST_WIDE_INT tmp = cf;
9602 cf = ct;
9603 ct = tmp;
9604 }
9605
9606 out = emit_store_flag (out, code, ix86_compare_op0,
9607 ix86_compare_op1, VOIDmode, 0, -1);
9608 }
9609 else
9610 {
9611 out = emit_store_flag (out, code, ix86_compare_op0,
9612 ix86_compare_op1, VOIDmode, 0, 1);
9613
9614 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
9615 copy_rtx (out), 1, OPTAB_DIRECT);
9616 }
9617
9618 out = expand_simple_binop (mode, AND, copy_rtx (out),
9619 gen_int_mode (cf - ct, mode),
9620 copy_rtx (out), 1, OPTAB_DIRECT);
9621 if (ct)
9622 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
9623 copy_rtx (out), 1, OPTAB_DIRECT);
9624 if (!rtx_equal_p (out, operands[0]))
9625 emit_move_insn (operands[0], copy_rtx (out));
9626
9627 return 1; /* DONE */
9628 }
9629 }
9630
9631 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9632 {
9633 /* Try a few things more with specific constants and a variable. */
9634
9635 optab op;
9636 rtx var, orig_out, out, tmp;
9637
9638 if (BRANCH_COST <= 2)
9639 return 0; /* FAIL */
9640
9641 /* If one of the two operands is an interesting constant, load a
9642 constant with the above and mask it in with a logical operation. */
9643
9644 if (GET_CODE (operands[2]) == CONST_INT)
9645 {
9646 var = operands[3];
9647 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
9648 operands[3] = constm1_rtx, op = and_optab;
9649 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
9650 operands[3] = const0_rtx, op = ior_optab;
9651 else
9652 return 0; /* FAIL */
9653 }
9654 else if (GET_CODE (operands[3]) == CONST_INT)
9655 {
9656 var = operands[2];
9657 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
9658 operands[2] = constm1_rtx, op = and_optab;
9659 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
9660 operands[2] = const0_rtx, op = ior_optab;
9661 else
9662 return 0; /* FAIL */
9663 }
9664 else
9665 return 0; /* FAIL */
9666
9667 orig_out = operands[0];
9668 tmp = gen_reg_rtx (mode);
9669 operands[0] = tmp;
9670
9671 /* Recurse to get the constant loaded. */
9672 if (ix86_expand_int_movcc (operands) == 0)
9673 return 0; /* FAIL */
9674
9675 /* Mask in the interesting variable. */
9676 out = expand_binop (mode, op, var, tmp, orig_out, 0,
9677 OPTAB_WIDEN);
9678 if (!rtx_equal_p (out, orig_out))
9679 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
9680
9681 return 1; /* DONE */
9682 }
9683
9684 /*
9685 * For comparison with above,
9686 *
9687 * movl cf,dest
9688 * movl ct,tmp
9689 * cmpl op1,op2
9690 * cmovcc tmp,dest
9691 *
9692 * Size 15.
9693 */
9694
9695 if (! nonimmediate_operand (operands[2], mode))
9696 operands[2] = force_reg (mode, operands[2]);
9697 if (! nonimmediate_operand (operands[3], mode))
9698 operands[3] = force_reg (mode, operands[3]);
9699
9700 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9701 {
9702 rtx tmp = gen_reg_rtx (mode);
9703 emit_move_insn (tmp, operands[3]);
9704 operands[3] = tmp;
9705 }
9706 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9707 {
9708 rtx tmp = gen_reg_rtx (mode);
9709 emit_move_insn (tmp, operands[2]);
9710 operands[2] = tmp;
9711 }
9712
9713 if (! register_operand (operands[2], VOIDmode)
9714 && (mode == QImode
9715 || ! register_operand (operands[3], VOIDmode)))
9716 operands[2] = force_reg (mode, operands[2]);
9717
9718 if (mode == QImode
9719 && ! register_operand (operands[3], VOIDmode))
9720 operands[3] = force_reg (mode, operands[3]);
9721
9722 emit_insn (compare_seq);
9723 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9724 gen_rtx_IF_THEN_ELSE (mode,
9725 compare_op, operands[2],
9726 operands[3])));
9727 if (bypass_test)
9728 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9729 gen_rtx_IF_THEN_ELSE (mode,
9730 bypass_test,
9731 copy_rtx (operands[3]),
9732 copy_rtx (operands[0]))));
9733 if (second_test)
9734 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9735 gen_rtx_IF_THEN_ELSE (mode,
9736 second_test,
9737 copy_rtx (operands[2]),
9738 copy_rtx (operands[0]))));
9739
9740 return 1; /* DONE */
9741 }
9742
9743 int
9744 ix86_expand_fp_movcc (rtx operands[])
9745 {
9746 enum machine_mode mode = GET_MODE (operands[0]);
9747 enum rtx_code code = GET_CODE (operands[1]);
9748 rtx tmp, compare_op, second_test, bypass_test;
9749
9750 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
9751 {
9752 rtx cmp_op0, cmp_op1, if_true, if_false;
9753 rtx clob;
9754 enum machine_mode vmode, cmode;
9755 bool is_minmax = false;
9756
9757 cmp_op0 = ix86_compare_op0;
9758 cmp_op1 = ix86_compare_op1;
9759 if_true = operands[2];
9760 if_false = operands[3];
9761
9762 /* Since we've no cmove for sse registers, don't force bad register
9763 allocation just to gain access to it. Deny movcc when the
9764 comparison mode doesn't match the move mode. */
9765 cmode = GET_MODE (cmp_op0);
9766 if (cmode == VOIDmode)
9767 cmode = GET_MODE (cmp_op1);
9768 if (cmode != mode)
9769 return 0;
9770
9771 /* Massage condition to satisfy sse_comparison_operator. In case we
9772 are in non-ieee mode, try to canonicalize the destination operand
9773 to be first in the comparison - this helps reload to avoid extra
9774 moves. */
9775 if (!sse_comparison_operator (operands[1], VOIDmode)
9776 || ((COMMUTATIVE_P (operands[1]) || !TARGET_IEEE_FP)
9777 && rtx_equal_p (operands[0], cmp_op1)))
9778 {
9779 tmp = cmp_op0;
9780 cmp_op0 = cmp_op1;
9781 cmp_op1 = tmp;
9782 code = swap_condition (code);
9783 }
9784
9785 /* Detect conditional moves that exactly match min/max operational
9786 semantics. Note that this is IEEE safe, as long as we don't
9787 interchange the operands. Which is why we keep this in the form
9788 if an IF_THEN_ELSE instead of reducing to SMIN/SMAX. */
9789 if ((code == LT || code == UNGE) && REG_P (cmp_op0) && REG_P (cmp_op1))
9790 {
9791 if (((cmp_op0 == if_true && cmp_op1 == if_false)
9792 || (cmp_op0 == if_false && cmp_op1 == if_true)))
9793 {
9794 is_minmax = true;
9795 if (code == UNGE)
9796 {
9797 code = LT;
9798 tmp = if_true;
9799 if_true = if_false;
9800 if_false = tmp;
9801 }
9802 }
9803 }
9804
9805 if (mode == SFmode)
9806 vmode = V4SFmode;
9807 else if (mode == DFmode)
9808 vmode = V2DFmode;
9809 else
9810 gcc_unreachable ();
9811
9812 cmp_op0 = force_reg (mode, cmp_op0);
9813 if (!nonimmediate_operand (cmp_op1, mode))
9814 cmp_op1 = force_reg (mode, cmp_op1);
9815
9816 tmp = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
9817 gcc_assert (sse_comparison_operator (tmp, VOIDmode));
9818
9819 tmp = gen_rtx_IF_THEN_ELSE (mode, tmp, if_true, if_false);
9820 tmp = gen_rtx_SET (VOIDmode, operands[0], tmp);
9821
9822 if (!is_minmax)
9823 {
9824 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (vmode));
9825 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9826 }
9827
9828 emit_insn (tmp);
9829 return 1;
9830 }
9831
9832 /* The floating point conditional move instructions don't directly
9833 support conditions resulting from a signed integer comparison. */
9834
9835 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9836
9837 /* The floating point conditional move instructions don't directly
9838 support signed integer comparisons. */
9839
9840 if (!fcmov_comparison_operator (compare_op, VOIDmode))
9841 {
9842 if (second_test != NULL || bypass_test != NULL)
9843 abort ();
9844 tmp = gen_reg_rtx (QImode);
9845 ix86_expand_setcc (code, tmp);
9846 code = NE;
9847 ix86_compare_op0 = tmp;
9848 ix86_compare_op1 = const0_rtx;
9849 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9850 }
9851 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9852 {
9853 tmp = gen_reg_rtx (mode);
9854 emit_move_insn (tmp, operands[3]);
9855 operands[3] = tmp;
9856 }
9857 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9858 {
9859 tmp = gen_reg_rtx (mode);
9860 emit_move_insn (tmp, operands[2]);
9861 operands[2] = tmp;
9862 }
9863
9864 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9865 gen_rtx_IF_THEN_ELSE (mode, compare_op,
9866 operands[2], operands[3])));
9867 if (bypass_test)
9868 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9869 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
9870 operands[3], operands[0])));
9871 if (second_test)
9872 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9873 gen_rtx_IF_THEN_ELSE (mode, second_test,
9874 operands[2], operands[0])));
9875
9876 return 1;
9877 }
9878
9879 void
9880 ix86_split_sse_movcc (rtx operands[])
9881 {
9882 rtx dest, scratch, cmp, op_true, op_false, x;
9883 enum machine_mode mode, vmode;
9884
9885 /* Note that the operator CMP has been set up with matching constraints
9886 such that dest is valid for the comparison. Unless one of the true
9887 or false operands are zero, the true operand has already been placed
9888 in SCRATCH. */
9889 dest = operands[0];
9890 scratch = operands[1];
9891 op_true = operands[2];
9892 op_false = operands[3];
9893 cmp = operands[4];
9894
9895 mode = GET_MODE (dest);
9896 vmode = GET_MODE (scratch);
9897
9898 emit_insn (gen_rtx_SET (VOIDmode, dest, cmp));
9899
9900 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9901
9902 if (op_false == CONST0_RTX (mode))
9903 {
9904 op_true = simplify_gen_subreg (vmode, op_true, mode, 0);
9905 x = gen_rtx_AND (vmode, dest, op_true);
9906 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9907 }
9908 else
9909 {
9910 op_false = simplify_gen_subreg (vmode, op_false, mode, 0);
9911
9912 if (op_true == CONST0_RTX (mode))
9913 {
9914 x = gen_rtx_NOT (vmode, dest);
9915 x = gen_rtx_AND (vmode, x, op_false);
9916 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9917 }
9918 else
9919 {
9920 x = gen_rtx_AND (vmode, scratch, dest);
9921 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9922
9923 x = gen_rtx_NOT (vmode, dest);
9924 x = gen_rtx_AND (vmode, x, op_false);
9925 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9926
9927 x = gen_rtx_IOR (vmode, dest, scratch);
9928 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9929 }
9930 }
9931 }
9932
9933 /* Expand conditional increment or decrement using adb/sbb instructions.
9934 The default case using setcc followed by the conditional move can be
9935 done by generic code. */
9936 int
9937 ix86_expand_int_addcc (rtx operands[])
9938 {
9939 enum rtx_code code = GET_CODE (operands[1]);
9940 rtx compare_op;
9941 rtx val = const0_rtx;
9942 bool fpcmp = false;
9943 enum machine_mode mode = GET_MODE (operands[0]);
9944
9945 if (operands[3] != const1_rtx
9946 && operands[3] != constm1_rtx)
9947 return 0;
9948 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9949 ix86_compare_op1, &compare_op))
9950 return 0;
9951 code = GET_CODE (compare_op);
9952
9953 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9954 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9955 {
9956 fpcmp = true;
9957 code = ix86_fp_compare_code_to_integer (code);
9958 }
9959
9960 if (code != LTU)
9961 {
9962 val = constm1_rtx;
9963 if (fpcmp)
9964 PUT_CODE (compare_op,
9965 reverse_condition_maybe_unordered
9966 (GET_CODE (compare_op)));
9967 else
9968 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9969 }
9970 PUT_MODE (compare_op, mode);
9971
9972 /* Construct either adc or sbb insn. */
9973 if ((code == LTU) == (operands[3] == constm1_rtx))
9974 {
9975 switch (GET_MODE (operands[0]))
9976 {
9977 case QImode:
9978 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
9979 break;
9980 case HImode:
9981 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
9982 break;
9983 case SImode:
9984 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
9985 break;
9986 case DImode:
9987 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9988 break;
9989 default:
9990 abort ();
9991 }
9992 }
9993 else
9994 {
9995 switch (GET_MODE (operands[0]))
9996 {
9997 case QImode:
9998 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
9999 break;
10000 case HImode:
10001 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10002 break;
10003 case SImode:
10004 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10005 break;
10006 case DImode:
10007 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10008 break;
10009 default:
10010 abort ();
10011 }
10012 }
10013 return 1; /* DONE */
10014 }
10015
10016
10017 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10018 works for floating pointer parameters and nonoffsetable memories.
10019 For pushes, it returns just stack offsets; the values will be saved
10020 in the right order. Maximally three parts are generated. */
10021
10022 static int
10023 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10024 {
10025 int size;
10026
10027 if (!TARGET_64BIT)
10028 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10029 else
10030 size = (GET_MODE_SIZE (mode) + 4) / 8;
10031
10032 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
10033 abort ();
10034 if (size < 2 || size > 3)
10035 abort ();
10036
10037 /* Optimize constant pool reference to immediates. This is used by fp
10038 moves, that force all constants to memory to allow combining. */
10039 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
10040 {
10041 rtx tmp = maybe_get_pool_constant (operand);
10042 if (tmp)
10043 operand = tmp;
10044 }
10045
10046 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10047 {
10048 /* The only non-offsetable memories we handle are pushes. */
10049 if (! push_operand (operand, VOIDmode))
10050 abort ();
10051
10052 operand = copy_rtx (operand);
10053 PUT_MODE (operand, Pmode);
10054 parts[0] = parts[1] = parts[2] = operand;
10055 }
10056 else if (!TARGET_64BIT)
10057 {
10058 if (mode == DImode)
10059 split_di (&operand, 1, &parts[0], &parts[1]);
10060 else
10061 {
10062 if (REG_P (operand))
10063 {
10064 if (!reload_completed)
10065 abort ();
10066 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
10067 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
10068 if (size == 3)
10069 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
10070 }
10071 else if (offsettable_memref_p (operand))
10072 {
10073 operand = adjust_address (operand, SImode, 0);
10074 parts[0] = operand;
10075 parts[1] = adjust_address (operand, SImode, 4);
10076 if (size == 3)
10077 parts[2] = adjust_address (operand, SImode, 8);
10078 }
10079 else if (GET_CODE (operand) == CONST_DOUBLE)
10080 {
10081 REAL_VALUE_TYPE r;
10082 long l[4];
10083
10084 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10085 switch (mode)
10086 {
10087 case XFmode:
10088 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10089 parts[2] = gen_int_mode (l[2], SImode);
10090 break;
10091 case DFmode:
10092 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
10093 break;
10094 default:
10095 abort ();
10096 }
10097 parts[1] = gen_int_mode (l[1], SImode);
10098 parts[0] = gen_int_mode (l[0], SImode);
10099 }
10100 else
10101 abort ();
10102 }
10103 }
10104 else
10105 {
10106 if (mode == TImode)
10107 split_ti (&operand, 1, &parts[0], &parts[1]);
10108 if (mode == XFmode || mode == TFmode)
10109 {
10110 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
10111 if (REG_P (operand))
10112 {
10113 if (!reload_completed)
10114 abort ();
10115 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
10116 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
10117 }
10118 else if (offsettable_memref_p (operand))
10119 {
10120 operand = adjust_address (operand, DImode, 0);
10121 parts[0] = operand;
10122 parts[1] = adjust_address (operand, upper_mode, 8);
10123 }
10124 else if (GET_CODE (operand) == CONST_DOUBLE)
10125 {
10126 REAL_VALUE_TYPE r;
10127 long l[4];
10128
10129 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10130 real_to_target (l, &r, mode);
10131
10132 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10133 if (HOST_BITS_PER_WIDE_INT >= 64)
10134 parts[0]
10135 = gen_int_mode
10136 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
10137 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
10138 DImode);
10139 else
10140 parts[0] = immed_double_const (l[0], l[1], DImode);
10141
10142 if (upper_mode == SImode)
10143 parts[1] = gen_int_mode (l[2], SImode);
10144 else if (HOST_BITS_PER_WIDE_INT >= 64)
10145 parts[1]
10146 = gen_int_mode
10147 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
10148 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
10149 DImode);
10150 else
10151 parts[1] = immed_double_const (l[2], l[3], DImode);
10152 }
10153 else
10154 abort ();
10155 }
10156 }
10157
10158 return size;
10159 }
10160
10161 /* Emit insns to perform a move or push of DI, DF, and XF values.
10162 Return false when normal moves are needed; true when all required
10163 insns have been emitted. Operands 2-4 contain the input values
10164 int the correct order; operands 5-7 contain the output values. */
10165
10166 void
10167 ix86_split_long_move (rtx operands[])
10168 {
10169 rtx part[2][3];
10170 int nparts;
10171 int push = 0;
10172 int collisions = 0;
10173 enum machine_mode mode = GET_MODE (operands[0]);
10174
10175 /* The DFmode expanders may ask us to move double.
10176 For 64bit target this is single move. By hiding the fact
10177 here we simplify i386.md splitters. */
10178 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
10179 {
10180 /* Optimize constant pool reference to immediates. This is used by
10181 fp moves, that force all constants to memory to allow combining. */
10182
10183 if (GET_CODE (operands[1]) == MEM
10184 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10185 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
10186 operands[1] = get_pool_constant (XEXP (operands[1], 0));
10187 if (push_operand (operands[0], VOIDmode))
10188 {
10189 operands[0] = copy_rtx (operands[0]);
10190 PUT_MODE (operands[0], Pmode);
10191 }
10192 else
10193 operands[0] = gen_lowpart (DImode, operands[0]);
10194 operands[1] = gen_lowpart (DImode, operands[1]);
10195 emit_move_insn (operands[0], operands[1]);
10196 return;
10197 }
10198
10199 /* The only non-offsettable memory we handle is push. */
10200 if (push_operand (operands[0], VOIDmode))
10201 push = 1;
10202 else if (GET_CODE (operands[0]) == MEM
10203 && ! offsettable_memref_p (operands[0]))
10204 abort ();
10205
10206 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
10207 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
10208
10209 /* When emitting push, take care for source operands on the stack. */
10210 if (push && GET_CODE (operands[1]) == MEM
10211 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
10212 {
10213 if (nparts == 3)
10214 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
10215 XEXP (part[1][2], 0));
10216 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
10217 XEXP (part[1][1], 0));
10218 }
10219
10220 /* We need to do copy in the right order in case an address register
10221 of the source overlaps the destination. */
10222 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
10223 {
10224 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
10225 collisions++;
10226 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10227 collisions++;
10228 if (nparts == 3
10229 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
10230 collisions++;
10231
10232 /* Collision in the middle part can be handled by reordering. */
10233 if (collisions == 1 && nparts == 3
10234 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10235 {
10236 rtx tmp;
10237 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
10238 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
10239 }
10240
10241 /* If there are more collisions, we can't handle it by reordering.
10242 Do an lea to the last part and use only one colliding move. */
10243 else if (collisions > 1)
10244 {
10245 rtx base;
10246
10247 collisions = 1;
10248
10249 base = part[0][nparts - 1];
10250
10251 /* Handle the case when the last part isn't valid for lea.
10252 Happens in 64-bit mode storing the 12-byte XFmode. */
10253 if (GET_MODE (base) != Pmode)
10254 base = gen_rtx_REG (Pmode, REGNO (base));
10255
10256 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
10257 part[1][0] = replace_equiv_address (part[1][0], base);
10258 part[1][1] = replace_equiv_address (part[1][1],
10259 plus_constant (base, UNITS_PER_WORD));
10260 if (nparts == 3)
10261 part[1][2] = replace_equiv_address (part[1][2],
10262 plus_constant (base, 8));
10263 }
10264 }
10265
10266 if (push)
10267 {
10268 if (!TARGET_64BIT)
10269 {
10270 if (nparts == 3)
10271 {
10272 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
10273 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
10274 emit_move_insn (part[0][2], part[1][2]);
10275 }
10276 }
10277 else
10278 {
10279 /* In 64bit mode we don't have 32bit push available. In case this is
10280 register, it is OK - we will just use larger counterpart. We also
10281 retype memory - these comes from attempt to avoid REX prefix on
10282 moving of second half of TFmode value. */
10283 if (GET_MODE (part[1][1]) == SImode)
10284 {
10285 if (GET_CODE (part[1][1]) == MEM)
10286 part[1][1] = adjust_address (part[1][1], DImode, 0);
10287 else if (REG_P (part[1][1]))
10288 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
10289 else
10290 abort ();
10291 if (GET_MODE (part[1][0]) == SImode)
10292 part[1][0] = part[1][1];
10293 }
10294 }
10295 emit_move_insn (part[0][1], part[1][1]);
10296 emit_move_insn (part[0][0], part[1][0]);
10297 return;
10298 }
10299
10300 /* Choose correct order to not overwrite the source before it is copied. */
10301 if ((REG_P (part[0][0])
10302 && REG_P (part[1][1])
10303 && (REGNO (part[0][0]) == REGNO (part[1][1])
10304 || (nparts == 3
10305 && REGNO (part[0][0]) == REGNO (part[1][2]))))
10306 || (collisions > 0
10307 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
10308 {
10309 if (nparts == 3)
10310 {
10311 operands[2] = part[0][2];
10312 operands[3] = part[0][1];
10313 operands[4] = part[0][0];
10314 operands[5] = part[1][2];
10315 operands[6] = part[1][1];
10316 operands[7] = part[1][0];
10317 }
10318 else
10319 {
10320 operands[2] = part[0][1];
10321 operands[3] = part[0][0];
10322 operands[5] = part[1][1];
10323 operands[6] = part[1][0];
10324 }
10325 }
10326 else
10327 {
10328 if (nparts == 3)
10329 {
10330 operands[2] = part[0][0];
10331 operands[3] = part[0][1];
10332 operands[4] = part[0][2];
10333 operands[5] = part[1][0];
10334 operands[6] = part[1][1];
10335 operands[7] = part[1][2];
10336 }
10337 else
10338 {
10339 operands[2] = part[0][0];
10340 operands[3] = part[0][1];
10341 operands[5] = part[1][0];
10342 operands[6] = part[1][1];
10343 }
10344 }
10345
10346 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
10347 if (optimize_size)
10348 {
10349 if (GET_CODE (operands[5]) == CONST_INT
10350 && operands[5] != const0_rtx
10351 && REG_P (operands[2]))
10352 {
10353 if (GET_CODE (operands[6]) == CONST_INT
10354 && INTVAL (operands[6]) == INTVAL (operands[5]))
10355 operands[6] = operands[2];
10356
10357 if (nparts == 3
10358 && GET_CODE (operands[7]) == CONST_INT
10359 && INTVAL (operands[7]) == INTVAL (operands[5]))
10360 operands[7] = operands[2];
10361 }
10362
10363 if (nparts == 3
10364 && GET_CODE (operands[6]) == CONST_INT
10365 && operands[6] != const0_rtx
10366 && REG_P (operands[3])
10367 && GET_CODE (operands[7]) == CONST_INT
10368 && INTVAL (operands[7]) == INTVAL (operands[6]))
10369 operands[7] = operands[3];
10370 }
10371
10372 emit_move_insn (operands[2], operands[5]);
10373 emit_move_insn (operands[3], operands[6]);
10374 if (nparts == 3)
10375 emit_move_insn (operands[4], operands[7]);
10376
10377 return;
10378 }
10379
10380 /* Helper function of ix86_split_ashldi used to generate an SImode
10381 left shift by a constant, either using a single shift or
10382 a sequence of add instructions. */
10383
10384 static void
10385 ix86_expand_ashlsi3_const (rtx operand, int count)
10386 {
10387 if (count == 1)
10388 emit_insn (gen_addsi3 (operand, operand, operand));
10389 else if (!optimize_size
10390 && count * ix86_cost->add <= ix86_cost->shift_const)
10391 {
10392 int i;
10393 for (i=0; i<count; i++)
10394 emit_insn (gen_addsi3 (operand, operand, operand));
10395 }
10396 else
10397 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
10398 }
10399
10400 void
10401 ix86_split_ashldi (rtx *operands, rtx scratch)
10402 {
10403 rtx low[2], high[2];
10404 int count;
10405
10406 if (GET_CODE (operands[2]) == CONST_INT)
10407 {
10408 split_di (operands, 2, low, high);
10409 count = INTVAL (operands[2]) & 63;
10410
10411 if (count >= 32)
10412 {
10413 emit_move_insn (high[0], low[1]);
10414 emit_move_insn (low[0], const0_rtx);
10415
10416 if (count > 32)
10417 ix86_expand_ashlsi3_const (high[0], count - 32);
10418 }
10419 else
10420 {
10421 if (!rtx_equal_p (operands[0], operands[1]))
10422 emit_move_insn (operands[0], operands[1]);
10423 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10424 ix86_expand_ashlsi3_const (low[0], count);
10425 }
10426 return;
10427 }
10428
10429 split_di (operands, 1, low, high);
10430
10431 if (operands[1] == const1_rtx)
10432 {
10433 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10434 can be done with two 32-bit shifts, no branches, no cmoves. */
10435 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
10436 {
10437 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
10438
10439 ix86_expand_clear (low[0]);
10440 ix86_expand_clear (high[0]);
10441 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
10442
10443 d = gen_lowpart (QImode, low[0]);
10444 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10445 s = gen_rtx_EQ (QImode, flags, const0_rtx);
10446 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10447
10448 d = gen_lowpart (QImode, high[0]);
10449 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10450 s = gen_rtx_NE (QImode, flags, const0_rtx);
10451 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10452 }
10453
10454 /* Otherwise, we can get the same results by manually performing
10455 a bit extract operation on bit 5, and then performing the two
10456 shifts. The two methods of getting 0/1 into low/high are exactly
10457 the same size. Avoiding the shift in the bit extract case helps
10458 pentium4 a bit; no one else seems to care much either way. */
10459 else
10460 {
10461 rtx x;
10462
10463 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
10464 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
10465 else
10466 x = gen_lowpart (SImode, operands[2]);
10467 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
10468
10469 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
10470 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
10471 emit_move_insn (low[0], high[0]);
10472 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
10473 }
10474
10475 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10476 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
10477 return;
10478 }
10479
10480 if (operands[1] == constm1_rtx)
10481 {
10482 /* For -1LL << N, we can avoid the shld instruction, because we
10483 know that we're shifting 0...31 ones into a -1. */
10484 emit_move_insn (low[0], constm1_rtx);
10485 if (optimize_size)
10486 emit_move_insn (high[0], low[0]);
10487 else
10488 emit_move_insn (high[0], constm1_rtx);
10489 }
10490 else
10491 {
10492 if (!rtx_equal_p (operands[0], operands[1]))
10493 emit_move_insn (operands[0], operands[1]);
10494
10495 split_di (operands, 1, low, high);
10496 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10497 }
10498
10499 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10500
10501 if (TARGET_CMOVE && scratch)
10502 {
10503 ix86_expand_clear (scratch);
10504 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
10505 }
10506 else
10507 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10508 }
10509
10510 void
10511 ix86_split_ashrdi (rtx *operands, rtx scratch)
10512 {
10513 rtx low[2], high[2];
10514 int count;
10515
10516 if (GET_CODE (operands[2]) == CONST_INT)
10517 {
10518 split_di (operands, 2, low, high);
10519 count = INTVAL (operands[2]) & 63;
10520
10521 if (count == 63)
10522 {
10523 emit_move_insn (high[0], high[1]);
10524 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10525 emit_move_insn (low[0], high[0]);
10526
10527 }
10528 else if (count >= 32)
10529 {
10530 emit_move_insn (low[0], high[1]);
10531 emit_move_insn (high[0], low[0]);
10532 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10533 if (count > 32)
10534 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10535 }
10536 else
10537 {
10538 if (!rtx_equal_p (operands[0], operands[1]))
10539 emit_move_insn (operands[0], operands[1]);
10540 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10541 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10542 }
10543 }
10544 else
10545 {
10546 if (!rtx_equal_p (operands[0], operands[1]))
10547 emit_move_insn (operands[0], operands[1]);
10548
10549 split_di (operands, 1, low, high);
10550
10551 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10552 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10553
10554 if (TARGET_CMOVE && scratch)
10555 {
10556 emit_move_insn (scratch, high[0]);
10557 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10558 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10559 scratch));
10560 }
10561 else
10562 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10563 }
10564 }
10565
10566 void
10567 ix86_split_lshrdi (rtx *operands, rtx scratch)
10568 {
10569 rtx low[2], high[2];
10570 int count;
10571
10572 if (GET_CODE (operands[2]) == CONST_INT)
10573 {
10574 split_di (operands, 2, low, high);
10575 count = INTVAL (operands[2]) & 63;
10576
10577 if (count >= 32)
10578 {
10579 emit_move_insn (low[0], high[1]);
10580 ix86_expand_clear (high[0]);
10581
10582 if (count > 32)
10583 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10584 }
10585 else
10586 {
10587 if (!rtx_equal_p (operands[0], operands[1]))
10588 emit_move_insn (operands[0], operands[1]);
10589 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10590 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10591 }
10592 }
10593 else
10594 {
10595 if (!rtx_equal_p (operands[0], operands[1]))
10596 emit_move_insn (operands[0], operands[1]);
10597
10598 split_di (operands, 1, low, high);
10599
10600 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10601 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10602
10603 /* Heh. By reversing the arguments, we can reuse this pattern. */
10604 if (TARGET_CMOVE && scratch)
10605 {
10606 ix86_expand_clear (scratch);
10607 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10608 scratch));
10609 }
10610 else
10611 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10612 }
10613 }
10614
10615 /* Helper function for the string operations below. Dest VARIABLE whether
10616 it is aligned to VALUE bytes. If true, jump to the label. */
10617 static rtx
10618 ix86_expand_aligntest (rtx variable, int value)
10619 {
10620 rtx label = gen_label_rtx ();
10621 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10622 if (GET_MODE (variable) == DImode)
10623 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10624 else
10625 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10626 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10627 1, label);
10628 return label;
10629 }
10630
10631 /* Adjust COUNTER by the VALUE. */
10632 static void
10633 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10634 {
10635 if (GET_MODE (countreg) == DImode)
10636 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10637 else
10638 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10639 }
10640
10641 /* Zero extend possibly SImode EXP to Pmode register. */
10642 rtx
10643 ix86_zero_extend_to_Pmode (rtx exp)
10644 {
10645 rtx r;
10646 if (GET_MODE (exp) == VOIDmode)
10647 return force_reg (Pmode, exp);
10648 if (GET_MODE (exp) == Pmode)
10649 return copy_to_mode_reg (Pmode, exp);
10650 r = gen_reg_rtx (Pmode);
10651 emit_insn (gen_zero_extendsidi2 (r, exp));
10652 return r;
10653 }
10654
10655 /* Expand string move (memcpy) operation. Use i386 string operations when
10656 profitable. expand_clrmem contains similar code. */
10657 int
10658 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10659 {
10660 rtx srcreg, destreg, countreg, srcexp, destexp;
10661 enum machine_mode counter_mode;
10662 HOST_WIDE_INT align = 0;
10663 unsigned HOST_WIDE_INT count = 0;
10664
10665 if (GET_CODE (align_exp) == CONST_INT)
10666 align = INTVAL (align_exp);
10667
10668 /* Can't use any of this if the user has appropriated esi or edi. */
10669 if (global_regs[4] || global_regs[5])
10670 return 0;
10671
10672 /* This simple hack avoids all inlining code and simplifies code below. */
10673 if (!TARGET_ALIGN_STRINGOPS)
10674 align = 64;
10675
10676 if (GET_CODE (count_exp) == CONST_INT)
10677 {
10678 count = INTVAL (count_exp);
10679 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10680 return 0;
10681 }
10682
10683 /* Figure out proper mode for counter. For 32bits it is always SImode,
10684 for 64bits use SImode when possible, otherwise DImode.
10685 Set count to number of bytes copied when known at compile time. */
10686 if (!TARGET_64BIT
10687 || GET_MODE (count_exp) == SImode
10688 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10689 counter_mode = SImode;
10690 else
10691 counter_mode = DImode;
10692
10693 if (counter_mode != SImode && counter_mode != DImode)
10694 abort ();
10695
10696 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10697 if (destreg != XEXP (dst, 0))
10698 dst = replace_equiv_address_nv (dst, destreg);
10699 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10700 if (srcreg != XEXP (src, 0))
10701 src = replace_equiv_address_nv (src, srcreg);
10702
10703 /* When optimizing for size emit simple rep ; movsb instruction for
10704 counts not divisible by 4. */
10705
10706 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10707 {
10708 emit_insn (gen_cld ());
10709 countreg = ix86_zero_extend_to_Pmode (count_exp);
10710 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10711 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
10712 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
10713 destexp, srcexp));
10714 }
10715
10716 /* For constant aligned (or small unaligned) copies use rep movsl
10717 followed by code copying the rest. For PentiumPro ensure 8 byte
10718 alignment to allow rep movsl acceleration. */
10719
10720 else if (count != 0
10721 && (align >= 8
10722 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10723 || optimize_size || count < (unsigned int) 64))
10724 {
10725 unsigned HOST_WIDE_INT offset = 0;
10726 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10727 rtx srcmem, dstmem;
10728
10729 emit_insn (gen_cld ());
10730 if (count & ~(size - 1))
10731 {
10732 countreg = copy_to_mode_reg (counter_mode,
10733 GEN_INT ((count >> (size == 4 ? 2 : 3))
10734 & (TARGET_64BIT ? -1 : 0x3fffffff)));
10735 countreg = ix86_zero_extend_to_Pmode (countreg);
10736
10737 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10738 GEN_INT (size == 4 ? 2 : 3));
10739 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10740 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10741
10742 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10743 countreg, destexp, srcexp));
10744 offset = count & ~(size - 1);
10745 }
10746 if (size == 8 && (count & 0x04))
10747 {
10748 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
10749 offset);
10750 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
10751 offset);
10752 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10753 offset += 4;
10754 }
10755 if (count & 0x02)
10756 {
10757 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
10758 offset);
10759 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
10760 offset);
10761 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10762 offset += 2;
10763 }
10764 if (count & 0x01)
10765 {
10766 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
10767 offset);
10768 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
10769 offset);
10770 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10771 }
10772 }
10773 /* The generic code based on the glibc implementation:
10774 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10775 allowing accelerated copying there)
10776 - copy the data using rep movsl
10777 - copy the rest. */
10778 else
10779 {
10780 rtx countreg2;
10781 rtx label = NULL;
10782 rtx srcmem, dstmem;
10783 int desired_alignment = (TARGET_PENTIUMPRO
10784 && (count == 0 || count >= (unsigned int) 260)
10785 ? 8 : UNITS_PER_WORD);
10786 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10787 dst = change_address (dst, BLKmode, destreg);
10788 src = change_address (src, BLKmode, srcreg);
10789
10790 /* In case we don't know anything about the alignment, default to
10791 library version, since it is usually equally fast and result in
10792 shorter code.
10793
10794 Also emit call when we know that the count is large and call overhead
10795 will not be important. */
10796 if (!TARGET_INLINE_ALL_STRINGOPS
10797 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10798 return 0;
10799
10800 if (TARGET_SINGLE_STRINGOP)
10801 emit_insn (gen_cld ());
10802
10803 countreg2 = gen_reg_rtx (Pmode);
10804 countreg = copy_to_mode_reg (counter_mode, count_exp);
10805
10806 /* We don't use loops to align destination and to copy parts smaller
10807 than 4 bytes, because gcc is able to optimize such code better (in
10808 the case the destination or the count really is aligned, gcc is often
10809 able to predict the branches) and also it is friendlier to the
10810 hardware branch prediction.
10811
10812 Using loops is beneficial for generic case, because we can
10813 handle small counts using the loops. Many CPUs (such as Athlon)
10814 have large REP prefix setup costs.
10815
10816 This is quite costly. Maybe we can revisit this decision later or
10817 add some customizability to this code. */
10818
10819 if (count == 0 && align < desired_alignment)
10820 {
10821 label = gen_label_rtx ();
10822 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10823 LEU, 0, counter_mode, 1, label);
10824 }
10825 if (align <= 1)
10826 {
10827 rtx label = ix86_expand_aligntest (destreg, 1);
10828 srcmem = change_address (src, QImode, srcreg);
10829 dstmem = change_address (dst, QImode, destreg);
10830 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10831 ix86_adjust_counter (countreg, 1);
10832 emit_label (label);
10833 LABEL_NUSES (label) = 1;
10834 }
10835 if (align <= 2)
10836 {
10837 rtx label = ix86_expand_aligntest (destreg, 2);
10838 srcmem = change_address (src, HImode, srcreg);
10839 dstmem = change_address (dst, HImode, destreg);
10840 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10841 ix86_adjust_counter (countreg, 2);
10842 emit_label (label);
10843 LABEL_NUSES (label) = 1;
10844 }
10845 if (align <= 4 && desired_alignment > 4)
10846 {
10847 rtx label = ix86_expand_aligntest (destreg, 4);
10848 srcmem = change_address (src, SImode, srcreg);
10849 dstmem = change_address (dst, SImode, destreg);
10850 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10851 ix86_adjust_counter (countreg, 4);
10852 emit_label (label);
10853 LABEL_NUSES (label) = 1;
10854 }
10855
10856 if (label && desired_alignment > 4 && !TARGET_64BIT)
10857 {
10858 emit_label (label);
10859 LABEL_NUSES (label) = 1;
10860 label = NULL_RTX;
10861 }
10862 if (!TARGET_SINGLE_STRINGOP)
10863 emit_insn (gen_cld ());
10864 if (TARGET_64BIT)
10865 {
10866 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10867 GEN_INT (3)));
10868 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10869 }
10870 else
10871 {
10872 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10873 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10874 }
10875 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10876 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10877 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10878 countreg2, destexp, srcexp));
10879
10880 if (label)
10881 {
10882 emit_label (label);
10883 LABEL_NUSES (label) = 1;
10884 }
10885 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10886 {
10887 srcmem = change_address (src, SImode, srcreg);
10888 dstmem = change_address (dst, SImode, destreg);
10889 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10890 }
10891 if ((align <= 4 || count == 0) && TARGET_64BIT)
10892 {
10893 rtx label = ix86_expand_aligntest (countreg, 4);
10894 srcmem = change_address (src, SImode, srcreg);
10895 dstmem = change_address (dst, SImode, destreg);
10896 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10897 emit_label (label);
10898 LABEL_NUSES (label) = 1;
10899 }
10900 if (align > 2 && count != 0 && (count & 2))
10901 {
10902 srcmem = change_address (src, HImode, srcreg);
10903 dstmem = change_address (dst, HImode, destreg);
10904 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10905 }
10906 if (align <= 2 || count == 0)
10907 {
10908 rtx label = ix86_expand_aligntest (countreg, 2);
10909 srcmem = change_address (src, HImode, srcreg);
10910 dstmem = change_address (dst, HImode, destreg);
10911 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10912 emit_label (label);
10913 LABEL_NUSES (label) = 1;
10914 }
10915 if (align > 1 && count != 0 && (count & 1))
10916 {
10917 srcmem = change_address (src, QImode, srcreg);
10918 dstmem = change_address (dst, QImode, destreg);
10919 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10920 }
10921 if (align <= 1 || count == 0)
10922 {
10923 rtx label = ix86_expand_aligntest (countreg, 1);
10924 srcmem = change_address (src, QImode, srcreg);
10925 dstmem = change_address (dst, QImode, destreg);
10926 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10927 emit_label (label);
10928 LABEL_NUSES (label) = 1;
10929 }
10930 }
10931
10932 return 1;
10933 }
10934
10935 /* Expand string clear operation (bzero). Use i386 string operations when
10936 profitable. expand_movmem contains similar code. */
10937 int
10938 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
10939 {
10940 rtx destreg, zeroreg, countreg, destexp;
10941 enum machine_mode counter_mode;
10942 HOST_WIDE_INT align = 0;
10943 unsigned HOST_WIDE_INT count = 0;
10944
10945 if (GET_CODE (align_exp) == CONST_INT)
10946 align = INTVAL (align_exp);
10947
10948 /* Can't use any of this if the user has appropriated esi. */
10949 if (global_regs[4])
10950 return 0;
10951
10952 /* This simple hack avoids all inlining code and simplifies code below. */
10953 if (!TARGET_ALIGN_STRINGOPS)
10954 align = 32;
10955
10956 if (GET_CODE (count_exp) == CONST_INT)
10957 {
10958 count = INTVAL (count_exp);
10959 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10960 return 0;
10961 }
10962 /* Figure out proper mode for counter. For 32bits it is always SImode,
10963 for 64bits use SImode when possible, otherwise DImode.
10964 Set count to number of bytes copied when known at compile time. */
10965 if (!TARGET_64BIT
10966 || GET_MODE (count_exp) == SImode
10967 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10968 counter_mode = SImode;
10969 else
10970 counter_mode = DImode;
10971
10972 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10973 if (destreg != XEXP (dst, 0))
10974 dst = replace_equiv_address_nv (dst, destreg);
10975
10976
10977 /* When optimizing for size emit simple rep ; movsb instruction for
10978 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10979 sequence is 7 bytes long, so if optimizing for size and count is
10980 small enough that some stosl, stosw and stosb instructions without
10981 rep are shorter, fall back into the next if. */
10982
10983 if ((!optimize || optimize_size)
10984 && (count == 0
10985 || ((count & 0x03)
10986 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
10987 {
10988 emit_insn (gen_cld ());
10989
10990 countreg = ix86_zero_extend_to_Pmode (count_exp);
10991 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
10992 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10993 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
10994 }
10995 else if (count != 0
10996 && (align >= 8
10997 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10998 || optimize_size || count < (unsigned int) 64))
10999 {
11000 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11001 unsigned HOST_WIDE_INT offset = 0;
11002
11003 emit_insn (gen_cld ());
11004
11005 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
11006 if (count & ~(size - 1))
11007 {
11008 unsigned HOST_WIDE_INT repcount;
11009 unsigned int max_nonrep;
11010
11011 repcount = count >> (size == 4 ? 2 : 3);
11012 if (!TARGET_64BIT)
11013 repcount &= 0x3fffffff;
11014
11015 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
11016 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
11017 bytes. In both cases the latter seems to be faster for small
11018 values of N. */
11019 max_nonrep = size == 4 ? 7 : 4;
11020 if (!optimize_size)
11021 switch (ix86_tune)
11022 {
11023 case PROCESSOR_PENTIUM4:
11024 case PROCESSOR_NOCONA:
11025 max_nonrep = 3;
11026 break;
11027 default:
11028 break;
11029 }
11030
11031 if (repcount <= max_nonrep)
11032 while (repcount-- > 0)
11033 {
11034 rtx mem = adjust_automodify_address_nv (dst,
11035 GET_MODE (zeroreg),
11036 destreg, offset);
11037 emit_insn (gen_strset (destreg, mem, zeroreg));
11038 offset += size;
11039 }
11040 else
11041 {
11042 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
11043 countreg = ix86_zero_extend_to_Pmode (countreg);
11044 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11045 GEN_INT (size == 4 ? 2 : 3));
11046 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11047 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
11048 destexp));
11049 offset = count & ~(size - 1);
11050 }
11051 }
11052 if (size == 8 && (count & 0x04))
11053 {
11054 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
11055 offset);
11056 emit_insn (gen_strset (destreg, mem,
11057 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11058 offset += 4;
11059 }
11060 if (count & 0x02)
11061 {
11062 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
11063 offset);
11064 emit_insn (gen_strset (destreg, mem,
11065 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11066 offset += 2;
11067 }
11068 if (count & 0x01)
11069 {
11070 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
11071 offset);
11072 emit_insn (gen_strset (destreg, mem,
11073 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11074 }
11075 }
11076 else
11077 {
11078 rtx countreg2;
11079 rtx label = NULL;
11080 /* Compute desired alignment of the string operation. */
11081 int desired_alignment = (TARGET_PENTIUMPRO
11082 && (count == 0 || count >= (unsigned int) 260)
11083 ? 8 : UNITS_PER_WORD);
11084
11085 /* In case we don't know anything about the alignment, default to
11086 library version, since it is usually equally fast and result in
11087 shorter code.
11088
11089 Also emit call when we know that the count is large and call overhead
11090 will not be important. */
11091 if (!TARGET_INLINE_ALL_STRINGOPS
11092 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11093 return 0;
11094
11095 if (TARGET_SINGLE_STRINGOP)
11096 emit_insn (gen_cld ());
11097
11098 countreg2 = gen_reg_rtx (Pmode);
11099 countreg = copy_to_mode_reg (counter_mode, count_exp);
11100 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
11101 /* Get rid of MEM_OFFSET, it won't be accurate. */
11102 dst = change_address (dst, BLKmode, destreg);
11103
11104 if (count == 0 && align < desired_alignment)
11105 {
11106 label = gen_label_rtx ();
11107 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11108 LEU, 0, counter_mode, 1, label);
11109 }
11110 if (align <= 1)
11111 {
11112 rtx label = ix86_expand_aligntest (destreg, 1);
11113 emit_insn (gen_strset (destreg, dst,
11114 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11115 ix86_adjust_counter (countreg, 1);
11116 emit_label (label);
11117 LABEL_NUSES (label) = 1;
11118 }
11119 if (align <= 2)
11120 {
11121 rtx label = ix86_expand_aligntest (destreg, 2);
11122 emit_insn (gen_strset (destreg, dst,
11123 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11124 ix86_adjust_counter (countreg, 2);
11125 emit_label (label);
11126 LABEL_NUSES (label) = 1;
11127 }
11128 if (align <= 4 && desired_alignment > 4)
11129 {
11130 rtx label = ix86_expand_aligntest (destreg, 4);
11131 emit_insn (gen_strset (destreg, dst,
11132 (TARGET_64BIT
11133 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
11134 : zeroreg)));
11135 ix86_adjust_counter (countreg, 4);
11136 emit_label (label);
11137 LABEL_NUSES (label) = 1;
11138 }
11139
11140 if (label && desired_alignment > 4 && !TARGET_64BIT)
11141 {
11142 emit_label (label);
11143 LABEL_NUSES (label) = 1;
11144 label = NULL_RTX;
11145 }
11146
11147 if (!TARGET_SINGLE_STRINGOP)
11148 emit_insn (gen_cld ());
11149 if (TARGET_64BIT)
11150 {
11151 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11152 GEN_INT (3)));
11153 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11154 }
11155 else
11156 {
11157 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11158 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11159 }
11160 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11161 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
11162
11163 if (label)
11164 {
11165 emit_label (label);
11166 LABEL_NUSES (label) = 1;
11167 }
11168
11169 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11170 emit_insn (gen_strset (destreg, dst,
11171 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11172 if (TARGET_64BIT && (align <= 4 || count == 0))
11173 {
11174 rtx label = ix86_expand_aligntest (countreg, 4);
11175 emit_insn (gen_strset (destreg, dst,
11176 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11177 emit_label (label);
11178 LABEL_NUSES (label) = 1;
11179 }
11180 if (align > 2 && count != 0 && (count & 2))
11181 emit_insn (gen_strset (destreg, dst,
11182 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11183 if (align <= 2 || count == 0)
11184 {
11185 rtx label = ix86_expand_aligntest (countreg, 2);
11186 emit_insn (gen_strset (destreg, dst,
11187 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11188 emit_label (label);
11189 LABEL_NUSES (label) = 1;
11190 }
11191 if (align > 1 && count != 0 && (count & 1))
11192 emit_insn (gen_strset (destreg, dst,
11193 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11194 if (align <= 1 || count == 0)
11195 {
11196 rtx label = ix86_expand_aligntest (countreg, 1);
11197 emit_insn (gen_strset (destreg, dst,
11198 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11199 emit_label (label);
11200 LABEL_NUSES (label) = 1;
11201 }
11202 }
11203 return 1;
11204 }
11205
11206 /* Expand strlen. */
11207 int
11208 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
11209 {
11210 rtx addr, scratch1, scratch2, scratch3, scratch4;
11211
11212 /* The generic case of strlen expander is long. Avoid it's
11213 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11214
11215 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11216 && !TARGET_INLINE_ALL_STRINGOPS
11217 && !optimize_size
11218 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
11219 return 0;
11220
11221 addr = force_reg (Pmode, XEXP (src, 0));
11222 scratch1 = gen_reg_rtx (Pmode);
11223
11224 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11225 && !optimize_size)
11226 {
11227 /* Well it seems that some optimizer does not combine a call like
11228 foo(strlen(bar), strlen(bar));
11229 when the move and the subtraction is done here. It does calculate
11230 the length just once when these instructions are done inside of
11231 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11232 often used and I use one fewer register for the lifetime of
11233 output_strlen_unroll() this is better. */
11234
11235 emit_move_insn (out, addr);
11236
11237 ix86_expand_strlensi_unroll_1 (out, src, align);
11238
11239 /* strlensi_unroll_1 returns the address of the zero at the end of
11240 the string, like memchr(), so compute the length by subtracting
11241 the start address. */
11242 if (TARGET_64BIT)
11243 emit_insn (gen_subdi3 (out, out, addr));
11244 else
11245 emit_insn (gen_subsi3 (out, out, addr));
11246 }
11247 else
11248 {
11249 rtx unspec;
11250 scratch2 = gen_reg_rtx (Pmode);
11251 scratch3 = gen_reg_rtx (Pmode);
11252 scratch4 = force_reg (Pmode, constm1_rtx);
11253
11254 emit_move_insn (scratch3, addr);
11255 eoschar = force_reg (QImode, eoschar);
11256
11257 emit_insn (gen_cld ());
11258 src = replace_equiv_address_nv (src, scratch3);
11259
11260 /* If .md starts supporting :P, this can be done in .md. */
11261 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
11262 scratch4), UNSPEC_SCAS);
11263 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
11264 if (TARGET_64BIT)
11265 {
11266 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
11267 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
11268 }
11269 else
11270 {
11271 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
11272 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
11273 }
11274 }
11275 return 1;
11276 }
11277
11278 /* Expand the appropriate insns for doing strlen if not just doing
11279 repnz; scasb
11280
11281 out = result, initialized with the start address
11282 align_rtx = alignment of the address.
11283 scratch = scratch register, initialized with the startaddress when
11284 not aligned, otherwise undefined
11285
11286 This is just the body. It needs the initializations mentioned above and
11287 some address computing at the end. These things are done in i386.md. */
11288
11289 static void
11290 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
11291 {
11292 int align;
11293 rtx tmp;
11294 rtx align_2_label = NULL_RTX;
11295 rtx align_3_label = NULL_RTX;
11296 rtx align_4_label = gen_label_rtx ();
11297 rtx end_0_label = gen_label_rtx ();
11298 rtx mem;
11299 rtx tmpreg = gen_reg_rtx (SImode);
11300 rtx scratch = gen_reg_rtx (SImode);
11301 rtx cmp;
11302
11303 align = 0;
11304 if (GET_CODE (align_rtx) == CONST_INT)
11305 align = INTVAL (align_rtx);
11306
11307 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
11308
11309 /* Is there a known alignment and is it less than 4? */
11310 if (align < 4)
11311 {
11312 rtx scratch1 = gen_reg_rtx (Pmode);
11313 emit_move_insn (scratch1, out);
11314 /* Is there a known alignment and is it not 2? */
11315 if (align != 2)
11316 {
11317 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
11318 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
11319
11320 /* Leave just the 3 lower bits. */
11321 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
11322 NULL_RTX, 0, OPTAB_WIDEN);
11323
11324 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11325 Pmode, 1, align_4_label);
11326 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
11327 Pmode, 1, align_2_label);
11328 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
11329 Pmode, 1, align_3_label);
11330 }
11331 else
11332 {
11333 /* Since the alignment is 2, we have to check 2 or 0 bytes;
11334 check if is aligned to 4 - byte. */
11335
11336 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
11337 NULL_RTX, 0, OPTAB_WIDEN);
11338
11339 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11340 Pmode, 1, align_4_label);
11341 }
11342
11343 mem = change_address (src, QImode, out);
11344
11345 /* Now compare the bytes. */
11346
11347 /* Compare the first n unaligned byte on a byte per byte basis. */
11348 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
11349 QImode, 1, end_0_label);
11350
11351 /* Increment the address. */
11352 if (TARGET_64BIT)
11353 emit_insn (gen_adddi3 (out, out, const1_rtx));
11354 else
11355 emit_insn (gen_addsi3 (out, out, const1_rtx));
11356
11357 /* Not needed with an alignment of 2 */
11358 if (align != 2)
11359 {
11360 emit_label (align_2_label);
11361
11362 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11363 end_0_label);
11364
11365 if (TARGET_64BIT)
11366 emit_insn (gen_adddi3 (out, out, const1_rtx));
11367 else
11368 emit_insn (gen_addsi3 (out, out, const1_rtx));
11369
11370 emit_label (align_3_label);
11371 }
11372
11373 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11374 end_0_label);
11375
11376 if (TARGET_64BIT)
11377 emit_insn (gen_adddi3 (out, out, const1_rtx));
11378 else
11379 emit_insn (gen_addsi3 (out, out, const1_rtx));
11380 }
11381
11382 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11383 align this loop. It gives only huge programs, but does not help to
11384 speed up. */
11385 emit_label (align_4_label);
11386
11387 mem = change_address (src, SImode, out);
11388 emit_move_insn (scratch, mem);
11389 if (TARGET_64BIT)
11390 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
11391 else
11392 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
11393
11394 /* This formula yields a nonzero result iff one of the bytes is zero.
11395 This saves three branches inside loop and many cycles. */
11396
11397 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
11398 emit_insn (gen_one_cmplsi2 (scratch, scratch));
11399 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
11400 emit_insn (gen_andsi3 (tmpreg, tmpreg,
11401 gen_int_mode (0x80808080, SImode)));
11402 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
11403 align_4_label);
11404
11405 if (TARGET_CMOVE)
11406 {
11407 rtx reg = gen_reg_rtx (SImode);
11408 rtx reg2 = gen_reg_rtx (Pmode);
11409 emit_move_insn (reg, tmpreg);
11410 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11411
11412 /* If zero is not in the first two bytes, move two bytes forward. */
11413 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11414 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11415 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11416 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11417 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11418 reg,
11419 tmpreg)));
11420 /* Emit lea manually to avoid clobbering of flags. */
11421 emit_insn (gen_rtx_SET (SImode, reg2,
11422 gen_rtx_PLUS (Pmode, out, const2_rtx)));
11423
11424 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11425 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11426 emit_insn (gen_rtx_SET (VOIDmode, out,
11427 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11428 reg2,
11429 out)));
11430
11431 }
11432 else
11433 {
11434 rtx end_2_label = gen_label_rtx ();
11435 /* Is zero in the first two bytes? */
11436
11437 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11438 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11439 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11440 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11441 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11442 pc_rtx);
11443 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11444 JUMP_LABEL (tmp) = end_2_label;
11445
11446 /* Not in the first two. Move two bytes forward. */
11447 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11448 if (TARGET_64BIT)
11449 emit_insn (gen_adddi3 (out, out, const2_rtx));
11450 else
11451 emit_insn (gen_addsi3 (out, out, const2_rtx));
11452
11453 emit_label (end_2_label);
11454
11455 }
11456
11457 /* Avoid branch in fixing the byte. */
11458 tmpreg = gen_lowpart (QImode, tmpreg);
11459 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11460 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11461 if (TARGET_64BIT)
11462 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11463 else
11464 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11465
11466 emit_label (end_0_label);
11467 }
11468
11469 void
11470 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11471 rtx callarg2 ATTRIBUTE_UNUSED,
11472 rtx pop, int sibcall)
11473 {
11474 rtx use = NULL, call;
11475
11476 if (pop == const0_rtx)
11477 pop = NULL;
11478 if (TARGET_64BIT && pop)
11479 abort ();
11480
11481 #if TARGET_MACHO
11482 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11483 fnaddr = machopic_indirect_call_target (fnaddr);
11484 #else
11485 /* Static functions and indirect calls don't need the pic register. */
11486 if (! TARGET_64BIT && flag_pic
11487 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11488 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11489 use_reg (&use, pic_offset_table_rtx);
11490
11491 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11492 {
11493 rtx al = gen_rtx_REG (QImode, 0);
11494 emit_move_insn (al, callarg2);
11495 use_reg (&use, al);
11496 }
11497 #endif /* TARGET_MACHO */
11498
11499 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11500 {
11501 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11502 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11503 }
11504 if (sibcall && TARGET_64BIT
11505 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11506 {
11507 rtx addr;
11508 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11509 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11510 emit_move_insn (fnaddr, addr);
11511 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11512 }
11513
11514 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11515 if (retval)
11516 call = gen_rtx_SET (VOIDmode, retval, call);
11517 if (pop)
11518 {
11519 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11520 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11521 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11522 }
11523
11524 call = emit_call_insn (call);
11525 if (use)
11526 CALL_INSN_FUNCTION_USAGE (call) = use;
11527 }
11528
11529 \f
11530 /* Clear stack slot assignments remembered from previous functions.
11531 This is called from INIT_EXPANDERS once before RTL is emitted for each
11532 function. */
11533
11534 static struct machine_function *
11535 ix86_init_machine_status (void)
11536 {
11537 struct machine_function *f;
11538
11539 f = ggc_alloc_cleared (sizeof (struct machine_function));
11540 f->use_fast_prologue_epilogue_nregs = -1;
11541
11542 return f;
11543 }
11544
11545 /* Return a MEM corresponding to a stack slot with mode MODE.
11546 Allocate a new slot if necessary.
11547
11548 The RTL for a function can have several slots available: N is
11549 which slot to use. */
11550
11551 rtx
11552 assign_386_stack_local (enum machine_mode mode, int n)
11553 {
11554 struct stack_local_entry *s;
11555
11556 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11557 abort ();
11558
11559 for (s = ix86_stack_locals; s; s = s->next)
11560 if (s->mode == mode && s->n == n)
11561 return s->rtl;
11562
11563 s = (struct stack_local_entry *)
11564 ggc_alloc (sizeof (struct stack_local_entry));
11565 s->n = n;
11566 s->mode = mode;
11567 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11568
11569 s->next = ix86_stack_locals;
11570 ix86_stack_locals = s;
11571 return s->rtl;
11572 }
11573
11574 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11575
11576 static GTY(()) rtx ix86_tls_symbol;
11577 rtx
11578 ix86_tls_get_addr (void)
11579 {
11580
11581 if (!ix86_tls_symbol)
11582 {
11583 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11584 (TARGET_GNU_TLS && !TARGET_64BIT)
11585 ? "___tls_get_addr"
11586 : "__tls_get_addr");
11587 }
11588
11589 return ix86_tls_symbol;
11590 }
11591 \f
11592 /* Calculate the length of the memory address in the instruction
11593 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11594
11595 int
11596 memory_address_length (rtx addr)
11597 {
11598 struct ix86_address parts;
11599 rtx base, index, disp;
11600 int len;
11601
11602 if (GET_CODE (addr) == PRE_DEC
11603 || GET_CODE (addr) == POST_INC
11604 || GET_CODE (addr) == PRE_MODIFY
11605 || GET_CODE (addr) == POST_MODIFY)
11606 return 0;
11607
11608 if (! ix86_decompose_address (addr, &parts))
11609 abort ();
11610
11611 base = parts.base;
11612 index = parts.index;
11613 disp = parts.disp;
11614 len = 0;
11615
11616 /* Rule of thumb:
11617 - esp as the base always wants an index,
11618 - ebp as the base always wants a displacement. */
11619
11620 /* Register Indirect. */
11621 if (base && !index && !disp)
11622 {
11623 /* esp (for its index) and ebp (for its displacement) need
11624 the two-byte modrm form. */
11625 if (addr == stack_pointer_rtx
11626 || addr == arg_pointer_rtx
11627 || addr == frame_pointer_rtx
11628 || addr == hard_frame_pointer_rtx)
11629 len = 1;
11630 }
11631
11632 /* Direct Addressing. */
11633 else if (disp && !base && !index)
11634 len = 4;
11635
11636 else
11637 {
11638 /* Find the length of the displacement constant. */
11639 if (disp)
11640 {
11641 if (GET_CODE (disp) == CONST_INT
11642 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11643 && base)
11644 len = 1;
11645 else
11646 len = 4;
11647 }
11648 /* ebp always wants a displacement. */
11649 else if (base == hard_frame_pointer_rtx)
11650 len = 1;
11651
11652 /* An index requires the two-byte modrm form.... */
11653 if (index
11654 /* ...like esp, which always wants an index. */
11655 || base == stack_pointer_rtx
11656 || base == arg_pointer_rtx
11657 || base == frame_pointer_rtx)
11658 len += 1;
11659 }
11660
11661 return len;
11662 }
11663
11664 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11665 is set, expect that insn have 8bit immediate alternative. */
11666 int
11667 ix86_attr_length_immediate_default (rtx insn, int shortform)
11668 {
11669 int len = 0;
11670 int i;
11671 extract_insn_cached (insn);
11672 for (i = recog_data.n_operands - 1; i >= 0; --i)
11673 if (CONSTANT_P (recog_data.operand[i]))
11674 {
11675 if (len)
11676 abort ();
11677 if (shortform
11678 && GET_CODE (recog_data.operand[i]) == CONST_INT
11679 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11680 len = 1;
11681 else
11682 {
11683 switch (get_attr_mode (insn))
11684 {
11685 case MODE_QI:
11686 len+=1;
11687 break;
11688 case MODE_HI:
11689 len+=2;
11690 break;
11691 case MODE_SI:
11692 len+=4;
11693 break;
11694 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11695 case MODE_DI:
11696 len+=4;
11697 break;
11698 default:
11699 fatal_insn ("unknown insn mode", insn);
11700 }
11701 }
11702 }
11703 return len;
11704 }
11705 /* Compute default value for "length_address" attribute. */
11706 int
11707 ix86_attr_length_address_default (rtx insn)
11708 {
11709 int i;
11710
11711 if (get_attr_type (insn) == TYPE_LEA)
11712 {
11713 rtx set = PATTERN (insn);
11714 if (GET_CODE (set) == SET)
11715 ;
11716 else if (GET_CODE (set) == PARALLEL
11717 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11718 set = XVECEXP (set, 0, 0);
11719 else
11720 {
11721 #ifdef ENABLE_CHECKING
11722 abort ();
11723 #endif
11724 return 0;
11725 }
11726
11727 return memory_address_length (SET_SRC (set));
11728 }
11729
11730 extract_insn_cached (insn);
11731 for (i = recog_data.n_operands - 1; i >= 0; --i)
11732 if (GET_CODE (recog_data.operand[i]) == MEM)
11733 {
11734 return memory_address_length (XEXP (recog_data.operand[i], 0));
11735 break;
11736 }
11737 return 0;
11738 }
11739 \f
11740 /* Return the maximum number of instructions a cpu can issue. */
11741
11742 static int
11743 ix86_issue_rate (void)
11744 {
11745 switch (ix86_tune)
11746 {
11747 case PROCESSOR_PENTIUM:
11748 case PROCESSOR_K6:
11749 return 2;
11750
11751 case PROCESSOR_PENTIUMPRO:
11752 case PROCESSOR_PENTIUM4:
11753 case PROCESSOR_ATHLON:
11754 case PROCESSOR_K8:
11755 case PROCESSOR_NOCONA:
11756 return 3;
11757
11758 default:
11759 return 1;
11760 }
11761 }
11762
11763 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11764 by DEP_INSN and nothing set by DEP_INSN. */
11765
11766 static int
11767 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11768 {
11769 rtx set, set2;
11770
11771 /* Simplify the test for uninteresting insns. */
11772 if (insn_type != TYPE_SETCC
11773 && insn_type != TYPE_ICMOV
11774 && insn_type != TYPE_FCMOV
11775 && insn_type != TYPE_IBR)
11776 return 0;
11777
11778 if ((set = single_set (dep_insn)) != 0)
11779 {
11780 set = SET_DEST (set);
11781 set2 = NULL_RTX;
11782 }
11783 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11784 && XVECLEN (PATTERN (dep_insn), 0) == 2
11785 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11786 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11787 {
11788 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11789 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11790 }
11791 else
11792 return 0;
11793
11794 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11795 return 0;
11796
11797 /* This test is true if the dependent insn reads the flags but
11798 not any other potentially set register. */
11799 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11800 return 0;
11801
11802 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11803 return 0;
11804
11805 return 1;
11806 }
11807
11808 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11809 address with operands set by DEP_INSN. */
11810
11811 static int
11812 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11813 {
11814 rtx addr;
11815
11816 if (insn_type == TYPE_LEA
11817 && TARGET_PENTIUM)
11818 {
11819 addr = PATTERN (insn);
11820 if (GET_CODE (addr) == SET)
11821 ;
11822 else if (GET_CODE (addr) == PARALLEL
11823 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
11824 addr = XVECEXP (addr, 0, 0);
11825 else
11826 abort ();
11827 addr = SET_SRC (addr);
11828 }
11829 else
11830 {
11831 int i;
11832 extract_insn_cached (insn);
11833 for (i = recog_data.n_operands - 1; i >= 0; --i)
11834 if (GET_CODE (recog_data.operand[i]) == MEM)
11835 {
11836 addr = XEXP (recog_data.operand[i], 0);
11837 goto found;
11838 }
11839 return 0;
11840 found:;
11841 }
11842
11843 return modified_in_p (addr, dep_insn);
11844 }
11845
11846 static int
11847 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
11848 {
11849 enum attr_type insn_type, dep_insn_type;
11850 enum attr_memory memory;
11851 rtx set, set2;
11852 int dep_insn_code_number;
11853
11854 /* Anti and output dependencies have zero cost on all CPUs. */
11855 if (REG_NOTE_KIND (link) != 0)
11856 return 0;
11857
11858 dep_insn_code_number = recog_memoized (dep_insn);
11859
11860 /* If we can't recognize the insns, we can't really do anything. */
11861 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
11862 return cost;
11863
11864 insn_type = get_attr_type (insn);
11865 dep_insn_type = get_attr_type (dep_insn);
11866
11867 switch (ix86_tune)
11868 {
11869 case PROCESSOR_PENTIUM:
11870 /* Address Generation Interlock adds a cycle of latency. */
11871 if (ix86_agi_dependant (insn, dep_insn, insn_type))
11872 cost += 1;
11873
11874 /* ??? Compares pair with jump/setcc. */
11875 if (ix86_flags_dependant (insn, dep_insn, insn_type))
11876 cost = 0;
11877
11878 /* Floating point stores require value to be ready one cycle earlier. */
11879 if (insn_type == TYPE_FMOV
11880 && get_attr_memory (insn) == MEMORY_STORE
11881 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11882 cost += 1;
11883 break;
11884
11885 case PROCESSOR_PENTIUMPRO:
11886 memory = get_attr_memory (insn);
11887
11888 /* INT->FP conversion is expensive. */
11889 if (get_attr_fp_int_src (dep_insn))
11890 cost += 5;
11891
11892 /* There is one cycle extra latency between an FP op and a store. */
11893 if (insn_type == TYPE_FMOV
11894 && (set = single_set (dep_insn)) != NULL_RTX
11895 && (set2 = single_set (insn)) != NULL_RTX
11896 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
11897 && GET_CODE (SET_DEST (set2)) == MEM)
11898 cost += 1;
11899
11900 /* Show ability of reorder buffer to hide latency of load by executing
11901 in parallel with previous instruction in case
11902 previous instruction is not needed to compute the address. */
11903 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11904 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11905 {
11906 /* Claim moves to take one cycle, as core can issue one load
11907 at time and the next load can start cycle later. */
11908 if (dep_insn_type == TYPE_IMOV
11909 || dep_insn_type == TYPE_FMOV)
11910 cost = 1;
11911 else if (cost > 1)
11912 cost--;
11913 }
11914 break;
11915
11916 case PROCESSOR_K6:
11917 memory = get_attr_memory (insn);
11918
11919 /* The esp dependency is resolved before the instruction is really
11920 finished. */
11921 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
11922 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
11923 return 1;
11924
11925 /* INT->FP conversion is expensive. */
11926 if (get_attr_fp_int_src (dep_insn))
11927 cost += 5;
11928
11929 /* Show ability of reorder buffer to hide latency of load by executing
11930 in parallel with previous instruction in case
11931 previous instruction is not needed to compute the address. */
11932 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11933 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11934 {
11935 /* Claim moves to take one cycle, as core can issue one load
11936 at time and the next load can start cycle later. */
11937 if (dep_insn_type == TYPE_IMOV
11938 || dep_insn_type == TYPE_FMOV)
11939 cost = 1;
11940 else if (cost > 2)
11941 cost -= 2;
11942 else
11943 cost = 1;
11944 }
11945 break;
11946
11947 case PROCESSOR_ATHLON:
11948 case PROCESSOR_K8:
11949 memory = get_attr_memory (insn);
11950
11951 /* Show ability of reorder buffer to hide latency of load by executing
11952 in parallel with previous instruction in case
11953 previous instruction is not needed to compute the address. */
11954 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11955 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11956 {
11957 enum attr_unit unit = get_attr_unit (insn);
11958 int loadcost = 3;
11959
11960 /* Because of the difference between the length of integer and
11961 floating unit pipeline preparation stages, the memory operands
11962 for floating point are cheaper.
11963
11964 ??? For Athlon it the difference is most probably 2. */
11965 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
11966 loadcost = 3;
11967 else
11968 loadcost = TARGET_ATHLON ? 2 : 0;
11969
11970 if (cost >= loadcost)
11971 cost -= loadcost;
11972 else
11973 cost = 0;
11974 }
11975
11976 default:
11977 break;
11978 }
11979
11980 return cost;
11981 }
11982
11983 /* How many alternative schedules to try. This should be as wide as the
11984 scheduling freedom in the DFA, but no wider. Making this value too
11985 large results extra work for the scheduler. */
11986
11987 static int
11988 ia32_multipass_dfa_lookahead (void)
11989 {
11990 if (ix86_tune == PROCESSOR_PENTIUM)
11991 return 2;
11992
11993 if (ix86_tune == PROCESSOR_PENTIUMPRO
11994 || ix86_tune == PROCESSOR_K6)
11995 return 1;
11996
11997 else
11998 return 0;
11999 }
12000
12001 \f
12002 /* Compute the alignment given to a constant that is being placed in memory.
12003 EXP is the constant and ALIGN is the alignment that the object would
12004 ordinarily have.
12005 The value of this function is used instead of that alignment to align
12006 the object. */
12007
12008 int
12009 ix86_constant_alignment (tree exp, int align)
12010 {
12011 if (TREE_CODE (exp) == REAL_CST)
12012 {
12013 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
12014 return 64;
12015 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
12016 return 128;
12017 }
12018 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
12019 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
12020 return BITS_PER_WORD;
12021
12022 return align;
12023 }
12024
12025 /* Compute the alignment for a static variable.
12026 TYPE is the data type, and ALIGN is the alignment that
12027 the object would ordinarily have. The value of this function is used
12028 instead of that alignment to align the object. */
12029
12030 int
12031 ix86_data_alignment (tree type, int align)
12032 {
12033 if (AGGREGATE_TYPE_P (type)
12034 && TYPE_SIZE (type)
12035 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12036 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
12037 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
12038 return 256;
12039
12040 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12041 to 16byte boundary. */
12042 if (TARGET_64BIT)
12043 {
12044 if (AGGREGATE_TYPE_P (type)
12045 && TYPE_SIZE (type)
12046 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12047 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
12048 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12049 return 128;
12050 }
12051
12052 if (TREE_CODE (type) == ARRAY_TYPE)
12053 {
12054 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12055 return 64;
12056 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12057 return 128;
12058 }
12059 else if (TREE_CODE (type) == COMPLEX_TYPE)
12060 {
12061
12062 if (TYPE_MODE (type) == DCmode && align < 64)
12063 return 64;
12064 if (TYPE_MODE (type) == XCmode && align < 128)
12065 return 128;
12066 }
12067 else if ((TREE_CODE (type) == RECORD_TYPE
12068 || TREE_CODE (type) == UNION_TYPE
12069 || TREE_CODE (type) == QUAL_UNION_TYPE)
12070 && TYPE_FIELDS (type))
12071 {
12072 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12073 return 64;
12074 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12075 return 128;
12076 }
12077 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12078 || TREE_CODE (type) == INTEGER_TYPE)
12079 {
12080 if (TYPE_MODE (type) == DFmode && align < 64)
12081 return 64;
12082 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12083 return 128;
12084 }
12085
12086 return align;
12087 }
12088
12089 /* Compute the alignment for a local variable.
12090 TYPE is the data type, and ALIGN is the alignment that
12091 the object would ordinarily have. The value of this macro is used
12092 instead of that alignment to align the object. */
12093
12094 int
12095 ix86_local_alignment (tree type, int align)
12096 {
12097 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12098 to 16byte boundary. */
12099 if (TARGET_64BIT)
12100 {
12101 if (AGGREGATE_TYPE_P (type)
12102 && TYPE_SIZE (type)
12103 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12104 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
12105 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12106 return 128;
12107 }
12108 if (TREE_CODE (type) == ARRAY_TYPE)
12109 {
12110 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12111 return 64;
12112 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12113 return 128;
12114 }
12115 else if (TREE_CODE (type) == COMPLEX_TYPE)
12116 {
12117 if (TYPE_MODE (type) == DCmode && align < 64)
12118 return 64;
12119 if (TYPE_MODE (type) == XCmode && align < 128)
12120 return 128;
12121 }
12122 else if ((TREE_CODE (type) == RECORD_TYPE
12123 || TREE_CODE (type) == UNION_TYPE
12124 || TREE_CODE (type) == QUAL_UNION_TYPE)
12125 && TYPE_FIELDS (type))
12126 {
12127 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12128 return 64;
12129 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12130 return 128;
12131 }
12132 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12133 || TREE_CODE (type) == INTEGER_TYPE)
12134 {
12135
12136 if (TYPE_MODE (type) == DFmode && align < 64)
12137 return 64;
12138 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12139 return 128;
12140 }
12141 return align;
12142 }
12143 \f
12144 /* Emit RTL insns to initialize the variable parts of a trampoline.
12145 FNADDR is an RTX for the address of the function's pure code.
12146 CXT is an RTX for the static chain value for the function. */
12147 void
12148 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
12149 {
12150 if (!TARGET_64BIT)
12151 {
12152 /* Compute offset from the end of the jmp to the target function. */
12153 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
12154 plus_constant (tramp, 10),
12155 NULL_RTX, 1, OPTAB_DIRECT);
12156 emit_move_insn (gen_rtx_MEM (QImode, tramp),
12157 gen_int_mode (0xb9, QImode));
12158 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
12159 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
12160 gen_int_mode (0xe9, QImode));
12161 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
12162 }
12163 else
12164 {
12165 int offset = 0;
12166 /* Try to load address using shorter movl instead of movabs.
12167 We may want to support movq for kernel mode, but kernel does not use
12168 trampolines at the moment. */
12169 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
12170 {
12171 fnaddr = copy_to_mode_reg (DImode, fnaddr);
12172 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12173 gen_int_mode (0xbb41, HImode));
12174 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
12175 gen_lowpart (SImode, fnaddr));
12176 offset += 6;
12177 }
12178 else
12179 {
12180 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12181 gen_int_mode (0xbb49, HImode));
12182 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12183 fnaddr);
12184 offset += 10;
12185 }
12186 /* Load static chain using movabs to r10. */
12187 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12188 gen_int_mode (0xba49, HImode));
12189 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12190 cxt);
12191 offset += 10;
12192 /* Jump to the r11 */
12193 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12194 gen_int_mode (0xff49, HImode));
12195 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
12196 gen_int_mode (0xe3, QImode));
12197 offset += 3;
12198 if (offset > TRAMPOLINE_SIZE)
12199 abort ();
12200 }
12201
12202 #ifdef ENABLE_EXECUTE_STACK
12203 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
12204 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
12205 #endif
12206 }
12207 \f
12208 /* Codes for all the SSE/MMX builtins. */
12209 enum ix86_builtins
12210 {
12211 IX86_BUILTIN_ADDPS,
12212 IX86_BUILTIN_ADDSS,
12213 IX86_BUILTIN_DIVPS,
12214 IX86_BUILTIN_DIVSS,
12215 IX86_BUILTIN_MULPS,
12216 IX86_BUILTIN_MULSS,
12217 IX86_BUILTIN_SUBPS,
12218 IX86_BUILTIN_SUBSS,
12219
12220 IX86_BUILTIN_CMPEQPS,
12221 IX86_BUILTIN_CMPLTPS,
12222 IX86_BUILTIN_CMPLEPS,
12223 IX86_BUILTIN_CMPGTPS,
12224 IX86_BUILTIN_CMPGEPS,
12225 IX86_BUILTIN_CMPNEQPS,
12226 IX86_BUILTIN_CMPNLTPS,
12227 IX86_BUILTIN_CMPNLEPS,
12228 IX86_BUILTIN_CMPNGTPS,
12229 IX86_BUILTIN_CMPNGEPS,
12230 IX86_BUILTIN_CMPORDPS,
12231 IX86_BUILTIN_CMPUNORDPS,
12232 IX86_BUILTIN_CMPNEPS,
12233 IX86_BUILTIN_CMPEQSS,
12234 IX86_BUILTIN_CMPLTSS,
12235 IX86_BUILTIN_CMPLESS,
12236 IX86_BUILTIN_CMPNEQSS,
12237 IX86_BUILTIN_CMPNLTSS,
12238 IX86_BUILTIN_CMPNLESS,
12239 IX86_BUILTIN_CMPNGTSS,
12240 IX86_BUILTIN_CMPNGESS,
12241 IX86_BUILTIN_CMPORDSS,
12242 IX86_BUILTIN_CMPUNORDSS,
12243 IX86_BUILTIN_CMPNESS,
12244
12245 IX86_BUILTIN_COMIEQSS,
12246 IX86_BUILTIN_COMILTSS,
12247 IX86_BUILTIN_COMILESS,
12248 IX86_BUILTIN_COMIGTSS,
12249 IX86_BUILTIN_COMIGESS,
12250 IX86_BUILTIN_COMINEQSS,
12251 IX86_BUILTIN_UCOMIEQSS,
12252 IX86_BUILTIN_UCOMILTSS,
12253 IX86_BUILTIN_UCOMILESS,
12254 IX86_BUILTIN_UCOMIGTSS,
12255 IX86_BUILTIN_UCOMIGESS,
12256 IX86_BUILTIN_UCOMINEQSS,
12257
12258 IX86_BUILTIN_CVTPI2PS,
12259 IX86_BUILTIN_CVTPS2PI,
12260 IX86_BUILTIN_CVTSI2SS,
12261 IX86_BUILTIN_CVTSI642SS,
12262 IX86_BUILTIN_CVTSS2SI,
12263 IX86_BUILTIN_CVTSS2SI64,
12264 IX86_BUILTIN_CVTTPS2PI,
12265 IX86_BUILTIN_CVTTSS2SI,
12266 IX86_BUILTIN_CVTTSS2SI64,
12267
12268 IX86_BUILTIN_MAXPS,
12269 IX86_BUILTIN_MAXSS,
12270 IX86_BUILTIN_MINPS,
12271 IX86_BUILTIN_MINSS,
12272
12273 IX86_BUILTIN_LOADUPS,
12274 IX86_BUILTIN_STOREUPS,
12275 IX86_BUILTIN_MOVSS,
12276
12277 IX86_BUILTIN_MOVHLPS,
12278 IX86_BUILTIN_MOVLHPS,
12279 IX86_BUILTIN_LOADHPS,
12280 IX86_BUILTIN_LOADLPS,
12281 IX86_BUILTIN_STOREHPS,
12282 IX86_BUILTIN_STORELPS,
12283
12284 IX86_BUILTIN_MASKMOVQ,
12285 IX86_BUILTIN_MOVMSKPS,
12286 IX86_BUILTIN_PMOVMSKB,
12287
12288 IX86_BUILTIN_MOVNTPS,
12289 IX86_BUILTIN_MOVNTQ,
12290
12291 IX86_BUILTIN_LOADDQU,
12292 IX86_BUILTIN_STOREDQU,
12293
12294 IX86_BUILTIN_PACKSSWB,
12295 IX86_BUILTIN_PACKSSDW,
12296 IX86_BUILTIN_PACKUSWB,
12297
12298 IX86_BUILTIN_PADDB,
12299 IX86_BUILTIN_PADDW,
12300 IX86_BUILTIN_PADDD,
12301 IX86_BUILTIN_PADDQ,
12302 IX86_BUILTIN_PADDSB,
12303 IX86_BUILTIN_PADDSW,
12304 IX86_BUILTIN_PADDUSB,
12305 IX86_BUILTIN_PADDUSW,
12306 IX86_BUILTIN_PSUBB,
12307 IX86_BUILTIN_PSUBW,
12308 IX86_BUILTIN_PSUBD,
12309 IX86_BUILTIN_PSUBQ,
12310 IX86_BUILTIN_PSUBSB,
12311 IX86_BUILTIN_PSUBSW,
12312 IX86_BUILTIN_PSUBUSB,
12313 IX86_BUILTIN_PSUBUSW,
12314
12315 IX86_BUILTIN_PAND,
12316 IX86_BUILTIN_PANDN,
12317 IX86_BUILTIN_POR,
12318 IX86_BUILTIN_PXOR,
12319
12320 IX86_BUILTIN_PAVGB,
12321 IX86_BUILTIN_PAVGW,
12322
12323 IX86_BUILTIN_PCMPEQB,
12324 IX86_BUILTIN_PCMPEQW,
12325 IX86_BUILTIN_PCMPEQD,
12326 IX86_BUILTIN_PCMPGTB,
12327 IX86_BUILTIN_PCMPGTW,
12328 IX86_BUILTIN_PCMPGTD,
12329
12330 IX86_BUILTIN_PMADDWD,
12331
12332 IX86_BUILTIN_PMAXSW,
12333 IX86_BUILTIN_PMAXUB,
12334 IX86_BUILTIN_PMINSW,
12335 IX86_BUILTIN_PMINUB,
12336
12337 IX86_BUILTIN_PMULHUW,
12338 IX86_BUILTIN_PMULHW,
12339 IX86_BUILTIN_PMULLW,
12340
12341 IX86_BUILTIN_PSADBW,
12342 IX86_BUILTIN_PSHUFW,
12343
12344 IX86_BUILTIN_PSLLW,
12345 IX86_BUILTIN_PSLLD,
12346 IX86_BUILTIN_PSLLQ,
12347 IX86_BUILTIN_PSRAW,
12348 IX86_BUILTIN_PSRAD,
12349 IX86_BUILTIN_PSRLW,
12350 IX86_BUILTIN_PSRLD,
12351 IX86_BUILTIN_PSRLQ,
12352 IX86_BUILTIN_PSLLWI,
12353 IX86_BUILTIN_PSLLDI,
12354 IX86_BUILTIN_PSLLQI,
12355 IX86_BUILTIN_PSRAWI,
12356 IX86_BUILTIN_PSRADI,
12357 IX86_BUILTIN_PSRLWI,
12358 IX86_BUILTIN_PSRLDI,
12359 IX86_BUILTIN_PSRLQI,
12360
12361 IX86_BUILTIN_PUNPCKHBW,
12362 IX86_BUILTIN_PUNPCKHWD,
12363 IX86_BUILTIN_PUNPCKHDQ,
12364 IX86_BUILTIN_PUNPCKLBW,
12365 IX86_BUILTIN_PUNPCKLWD,
12366 IX86_BUILTIN_PUNPCKLDQ,
12367
12368 IX86_BUILTIN_SHUFPS,
12369
12370 IX86_BUILTIN_RCPPS,
12371 IX86_BUILTIN_RCPSS,
12372 IX86_BUILTIN_RSQRTPS,
12373 IX86_BUILTIN_RSQRTSS,
12374 IX86_BUILTIN_SQRTPS,
12375 IX86_BUILTIN_SQRTSS,
12376
12377 IX86_BUILTIN_UNPCKHPS,
12378 IX86_BUILTIN_UNPCKLPS,
12379
12380 IX86_BUILTIN_ANDPS,
12381 IX86_BUILTIN_ANDNPS,
12382 IX86_BUILTIN_ORPS,
12383 IX86_BUILTIN_XORPS,
12384
12385 IX86_BUILTIN_EMMS,
12386 IX86_BUILTIN_LDMXCSR,
12387 IX86_BUILTIN_STMXCSR,
12388 IX86_BUILTIN_SFENCE,
12389
12390 /* 3DNow! Original */
12391 IX86_BUILTIN_FEMMS,
12392 IX86_BUILTIN_PAVGUSB,
12393 IX86_BUILTIN_PF2ID,
12394 IX86_BUILTIN_PFACC,
12395 IX86_BUILTIN_PFADD,
12396 IX86_BUILTIN_PFCMPEQ,
12397 IX86_BUILTIN_PFCMPGE,
12398 IX86_BUILTIN_PFCMPGT,
12399 IX86_BUILTIN_PFMAX,
12400 IX86_BUILTIN_PFMIN,
12401 IX86_BUILTIN_PFMUL,
12402 IX86_BUILTIN_PFRCP,
12403 IX86_BUILTIN_PFRCPIT1,
12404 IX86_BUILTIN_PFRCPIT2,
12405 IX86_BUILTIN_PFRSQIT1,
12406 IX86_BUILTIN_PFRSQRT,
12407 IX86_BUILTIN_PFSUB,
12408 IX86_BUILTIN_PFSUBR,
12409 IX86_BUILTIN_PI2FD,
12410 IX86_BUILTIN_PMULHRW,
12411
12412 /* 3DNow! Athlon Extensions */
12413 IX86_BUILTIN_PF2IW,
12414 IX86_BUILTIN_PFNACC,
12415 IX86_BUILTIN_PFPNACC,
12416 IX86_BUILTIN_PI2FW,
12417 IX86_BUILTIN_PSWAPDSI,
12418 IX86_BUILTIN_PSWAPDSF,
12419
12420 /* SSE2 */
12421 IX86_BUILTIN_ADDPD,
12422 IX86_BUILTIN_ADDSD,
12423 IX86_BUILTIN_DIVPD,
12424 IX86_BUILTIN_DIVSD,
12425 IX86_BUILTIN_MULPD,
12426 IX86_BUILTIN_MULSD,
12427 IX86_BUILTIN_SUBPD,
12428 IX86_BUILTIN_SUBSD,
12429
12430 IX86_BUILTIN_CMPEQPD,
12431 IX86_BUILTIN_CMPLTPD,
12432 IX86_BUILTIN_CMPLEPD,
12433 IX86_BUILTIN_CMPGTPD,
12434 IX86_BUILTIN_CMPGEPD,
12435 IX86_BUILTIN_CMPNEQPD,
12436 IX86_BUILTIN_CMPNLTPD,
12437 IX86_BUILTIN_CMPNLEPD,
12438 IX86_BUILTIN_CMPNGTPD,
12439 IX86_BUILTIN_CMPNGEPD,
12440 IX86_BUILTIN_CMPORDPD,
12441 IX86_BUILTIN_CMPUNORDPD,
12442 IX86_BUILTIN_CMPNEPD,
12443 IX86_BUILTIN_CMPEQSD,
12444 IX86_BUILTIN_CMPLTSD,
12445 IX86_BUILTIN_CMPLESD,
12446 IX86_BUILTIN_CMPNEQSD,
12447 IX86_BUILTIN_CMPNLTSD,
12448 IX86_BUILTIN_CMPNLESD,
12449 IX86_BUILTIN_CMPORDSD,
12450 IX86_BUILTIN_CMPUNORDSD,
12451 IX86_BUILTIN_CMPNESD,
12452
12453 IX86_BUILTIN_COMIEQSD,
12454 IX86_BUILTIN_COMILTSD,
12455 IX86_BUILTIN_COMILESD,
12456 IX86_BUILTIN_COMIGTSD,
12457 IX86_BUILTIN_COMIGESD,
12458 IX86_BUILTIN_COMINEQSD,
12459 IX86_BUILTIN_UCOMIEQSD,
12460 IX86_BUILTIN_UCOMILTSD,
12461 IX86_BUILTIN_UCOMILESD,
12462 IX86_BUILTIN_UCOMIGTSD,
12463 IX86_BUILTIN_UCOMIGESD,
12464 IX86_BUILTIN_UCOMINEQSD,
12465
12466 IX86_BUILTIN_MAXPD,
12467 IX86_BUILTIN_MAXSD,
12468 IX86_BUILTIN_MINPD,
12469 IX86_BUILTIN_MINSD,
12470
12471 IX86_BUILTIN_ANDPD,
12472 IX86_BUILTIN_ANDNPD,
12473 IX86_BUILTIN_ORPD,
12474 IX86_BUILTIN_XORPD,
12475
12476 IX86_BUILTIN_SQRTPD,
12477 IX86_BUILTIN_SQRTSD,
12478
12479 IX86_BUILTIN_UNPCKHPD,
12480 IX86_BUILTIN_UNPCKLPD,
12481
12482 IX86_BUILTIN_SHUFPD,
12483
12484 IX86_BUILTIN_LOADUPD,
12485 IX86_BUILTIN_STOREUPD,
12486 IX86_BUILTIN_MOVSD,
12487
12488 IX86_BUILTIN_LOADHPD,
12489 IX86_BUILTIN_LOADLPD,
12490
12491 IX86_BUILTIN_CVTDQ2PD,
12492 IX86_BUILTIN_CVTDQ2PS,
12493
12494 IX86_BUILTIN_CVTPD2DQ,
12495 IX86_BUILTIN_CVTPD2PI,
12496 IX86_BUILTIN_CVTPD2PS,
12497 IX86_BUILTIN_CVTTPD2DQ,
12498 IX86_BUILTIN_CVTTPD2PI,
12499
12500 IX86_BUILTIN_CVTPI2PD,
12501 IX86_BUILTIN_CVTSI2SD,
12502 IX86_BUILTIN_CVTSI642SD,
12503
12504 IX86_BUILTIN_CVTSD2SI,
12505 IX86_BUILTIN_CVTSD2SI64,
12506 IX86_BUILTIN_CVTSD2SS,
12507 IX86_BUILTIN_CVTSS2SD,
12508 IX86_BUILTIN_CVTTSD2SI,
12509 IX86_BUILTIN_CVTTSD2SI64,
12510
12511 IX86_BUILTIN_CVTPS2DQ,
12512 IX86_BUILTIN_CVTPS2PD,
12513 IX86_BUILTIN_CVTTPS2DQ,
12514
12515 IX86_BUILTIN_MOVNTI,
12516 IX86_BUILTIN_MOVNTPD,
12517 IX86_BUILTIN_MOVNTDQ,
12518
12519 /* SSE2 MMX */
12520 IX86_BUILTIN_MASKMOVDQU,
12521 IX86_BUILTIN_MOVMSKPD,
12522 IX86_BUILTIN_PMOVMSKB128,
12523
12524 IX86_BUILTIN_PACKSSWB128,
12525 IX86_BUILTIN_PACKSSDW128,
12526 IX86_BUILTIN_PACKUSWB128,
12527
12528 IX86_BUILTIN_PADDB128,
12529 IX86_BUILTIN_PADDW128,
12530 IX86_BUILTIN_PADDD128,
12531 IX86_BUILTIN_PADDQ128,
12532 IX86_BUILTIN_PADDSB128,
12533 IX86_BUILTIN_PADDSW128,
12534 IX86_BUILTIN_PADDUSB128,
12535 IX86_BUILTIN_PADDUSW128,
12536 IX86_BUILTIN_PSUBB128,
12537 IX86_BUILTIN_PSUBW128,
12538 IX86_BUILTIN_PSUBD128,
12539 IX86_BUILTIN_PSUBQ128,
12540 IX86_BUILTIN_PSUBSB128,
12541 IX86_BUILTIN_PSUBSW128,
12542 IX86_BUILTIN_PSUBUSB128,
12543 IX86_BUILTIN_PSUBUSW128,
12544
12545 IX86_BUILTIN_PAND128,
12546 IX86_BUILTIN_PANDN128,
12547 IX86_BUILTIN_POR128,
12548 IX86_BUILTIN_PXOR128,
12549
12550 IX86_BUILTIN_PAVGB128,
12551 IX86_BUILTIN_PAVGW128,
12552
12553 IX86_BUILTIN_PCMPEQB128,
12554 IX86_BUILTIN_PCMPEQW128,
12555 IX86_BUILTIN_PCMPEQD128,
12556 IX86_BUILTIN_PCMPGTB128,
12557 IX86_BUILTIN_PCMPGTW128,
12558 IX86_BUILTIN_PCMPGTD128,
12559
12560 IX86_BUILTIN_PMADDWD128,
12561
12562 IX86_BUILTIN_PMAXSW128,
12563 IX86_BUILTIN_PMAXUB128,
12564 IX86_BUILTIN_PMINSW128,
12565 IX86_BUILTIN_PMINUB128,
12566
12567 IX86_BUILTIN_PMULUDQ,
12568 IX86_BUILTIN_PMULUDQ128,
12569 IX86_BUILTIN_PMULHUW128,
12570 IX86_BUILTIN_PMULHW128,
12571 IX86_BUILTIN_PMULLW128,
12572
12573 IX86_BUILTIN_PSADBW128,
12574 IX86_BUILTIN_PSHUFHW,
12575 IX86_BUILTIN_PSHUFLW,
12576 IX86_BUILTIN_PSHUFD,
12577
12578 IX86_BUILTIN_PSLLW128,
12579 IX86_BUILTIN_PSLLD128,
12580 IX86_BUILTIN_PSLLQ128,
12581 IX86_BUILTIN_PSRAW128,
12582 IX86_BUILTIN_PSRAD128,
12583 IX86_BUILTIN_PSRLW128,
12584 IX86_BUILTIN_PSRLD128,
12585 IX86_BUILTIN_PSRLQ128,
12586 IX86_BUILTIN_PSLLDQI128,
12587 IX86_BUILTIN_PSLLWI128,
12588 IX86_BUILTIN_PSLLDI128,
12589 IX86_BUILTIN_PSLLQI128,
12590 IX86_BUILTIN_PSRAWI128,
12591 IX86_BUILTIN_PSRADI128,
12592 IX86_BUILTIN_PSRLDQI128,
12593 IX86_BUILTIN_PSRLWI128,
12594 IX86_BUILTIN_PSRLDI128,
12595 IX86_BUILTIN_PSRLQI128,
12596
12597 IX86_BUILTIN_PUNPCKHBW128,
12598 IX86_BUILTIN_PUNPCKHWD128,
12599 IX86_BUILTIN_PUNPCKHDQ128,
12600 IX86_BUILTIN_PUNPCKHQDQ128,
12601 IX86_BUILTIN_PUNPCKLBW128,
12602 IX86_BUILTIN_PUNPCKLWD128,
12603 IX86_BUILTIN_PUNPCKLDQ128,
12604 IX86_BUILTIN_PUNPCKLQDQ128,
12605
12606 IX86_BUILTIN_CLFLUSH,
12607 IX86_BUILTIN_MFENCE,
12608 IX86_BUILTIN_LFENCE,
12609
12610 /* Prescott New Instructions. */
12611 IX86_BUILTIN_ADDSUBPS,
12612 IX86_BUILTIN_HADDPS,
12613 IX86_BUILTIN_HSUBPS,
12614 IX86_BUILTIN_MOVSHDUP,
12615 IX86_BUILTIN_MOVSLDUP,
12616 IX86_BUILTIN_ADDSUBPD,
12617 IX86_BUILTIN_HADDPD,
12618 IX86_BUILTIN_HSUBPD,
12619 IX86_BUILTIN_LDDQU,
12620
12621 IX86_BUILTIN_MONITOR,
12622 IX86_BUILTIN_MWAIT,
12623
12624 IX86_BUILTIN_VEC_INIT_V2SI,
12625 IX86_BUILTIN_VEC_INIT_V4HI,
12626 IX86_BUILTIN_VEC_INIT_V8QI,
12627 IX86_BUILTIN_VEC_EXT_V2DF,
12628 IX86_BUILTIN_VEC_EXT_V2DI,
12629 IX86_BUILTIN_VEC_EXT_V4SF,
12630 IX86_BUILTIN_VEC_EXT_V4SI,
12631 IX86_BUILTIN_VEC_EXT_V8HI,
12632 IX86_BUILTIN_VEC_EXT_V4HI,
12633 IX86_BUILTIN_VEC_SET_V8HI,
12634 IX86_BUILTIN_VEC_SET_V4HI,
12635
12636 IX86_BUILTIN_MAX
12637 };
12638
12639 #define def_builtin(MASK, NAME, TYPE, CODE) \
12640 do { \
12641 if ((MASK) & target_flags \
12642 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
12643 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
12644 NULL, NULL_TREE); \
12645 } while (0)
12646
12647 /* Bits for builtin_description.flag. */
12648
12649 /* Set when we don't support the comparison natively, and should
12650 swap_comparison in order to support it. */
12651 #define BUILTIN_DESC_SWAP_OPERANDS 1
12652
12653 struct builtin_description
12654 {
12655 const unsigned int mask;
12656 const enum insn_code icode;
12657 const char *const name;
12658 const enum ix86_builtins code;
12659 const enum rtx_code comparison;
12660 const unsigned int flag;
12661 };
12662
12663 static const struct builtin_description bdesc_comi[] =
12664 {
12665 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
12666 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
12667 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
12668 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
12669 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
12670 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
12671 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
12672 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
12673 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
12674 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
12675 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
12676 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
12677 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
12678 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
12679 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
12680 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
12681 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
12682 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
12683 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
12684 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
12685 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
12686 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
12687 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
12688 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
12689 };
12690
12691 static const struct builtin_description bdesc_2arg[] =
12692 {
12693 /* SSE */
12694 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
12695 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
12696 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
12697 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
12698 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
12699 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
12700 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
12701 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
12702
12703 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
12704 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
12705 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
12706 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
12707 BUILTIN_DESC_SWAP_OPERANDS },
12708 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
12709 BUILTIN_DESC_SWAP_OPERANDS },
12710 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
12711 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
12712 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
12713 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
12714 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
12715 BUILTIN_DESC_SWAP_OPERANDS },
12716 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
12717 BUILTIN_DESC_SWAP_OPERANDS },
12718 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
12719 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
12720 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
12721 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
12722 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
12723 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
12724 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
12725 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
12726 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
12727 BUILTIN_DESC_SWAP_OPERANDS },
12728 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
12729 BUILTIN_DESC_SWAP_OPERANDS },
12730 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
12731
12732 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
12733 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
12734 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
12735 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
12736
12737 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
12738 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
12739 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
12740 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
12741
12742 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
12743 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
12744 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
12745 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
12746 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
12747
12748 /* MMX */
12749 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
12750 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
12751 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
12752 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
12753 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
12754 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
12755 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
12756 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
12757
12758 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
12759 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
12760 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
12761 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
12762 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
12763 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
12764 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
12765 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
12766
12767 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
12768 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
12769 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
12770
12771 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
12772 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
12773 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
12774 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
12775
12776 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
12777 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
12778
12779 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
12780 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
12781 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
12782 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
12783 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
12784 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
12785
12786 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
12787 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
12788 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
12789 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
12790
12791 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
12792 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
12793 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
12794 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
12795 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
12796 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
12797
12798 /* Special. */
12799 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
12800 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
12801 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
12802
12803 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
12804 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
12805 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
12806
12807 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
12808 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
12809 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
12810 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
12811 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
12812 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
12813
12814 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
12815 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
12816 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
12817 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
12818 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
12819 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
12820
12821 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
12822 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
12823 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
12824 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
12825
12826 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
12827 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
12828
12829 /* SSE2 */
12830 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
12831 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
12832 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
12833 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
12834 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
12835 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
12836 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
12837 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
12838
12839 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
12840 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
12841 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
12842 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
12843 BUILTIN_DESC_SWAP_OPERANDS },
12844 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
12845 BUILTIN_DESC_SWAP_OPERANDS },
12846 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
12847 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
12848 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
12849 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
12850 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
12851 BUILTIN_DESC_SWAP_OPERANDS },
12852 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
12853 BUILTIN_DESC_SWAP_OPERANDS },
12854 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
12855 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12856 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12857 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12858 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12859 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
12860 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
12861 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
12862 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
12863
12864 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12865 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12866 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12867 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12868
12869 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12870 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12871 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12872 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12873
12874 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12875 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12876 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12877
12878 /* SSE2 MMX */
12879 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12880 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12881 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12882 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12883 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12884 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12885 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12886 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12887
12888 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12889 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12890 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12891 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12892 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12893 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12894 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12895 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12896
12897 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12898 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12899
12900 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12901 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12902 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12903 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12904
12905 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12906 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12907
12908 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12909 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12910 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12911 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12912 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12913 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12914
12915 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12916 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12917 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12918 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12919
12920 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12921 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12922 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12923 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12924 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12925 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12926 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12927 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12928
12929 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12930 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12931 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12932
12933 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12934 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12935
12936 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
12937 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
12938
12939 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12940 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12941 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12942
12943 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12944 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12945 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12946
12947 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12948 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12949
12950 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12951
12952 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12953 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12954 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12955 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12956
12957 /* SSE3 MMX */
12958 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12959 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12960 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12961 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12962 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12963 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
12964 };
12965
12966 static const struct builtin_description bdesc_1arg[] =
12967 {
12968 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
12969 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
12970
12971 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
12972 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
12973 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
12974
12975 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
12976 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
12977 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
12978 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
12979 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
12980 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
12981
12982 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
12983 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
12984
12985 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
12986
12987 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
12988 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
12989
12990 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
12991 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
12992 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
12993 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
12994 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
12995
12996 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
12997
12998 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
12999 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
13000 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
13001 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
13002
13003 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
13004 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
13005 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
13006
13007 /* SSE3 */
13008 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
13009 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
13010 };
13011
13012 static void
13013 ix86_init_builtins (void)
13014 {
13015 if (TARGET_MMX)
13016 ix86_init_mmx_sse_builtins ();
13017 }
13018
13019 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
13020 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13021 builtins. */
13022 static void
13023 ix86_init_mmx_sse_builtins (void)
13024 {
13025 const struct builtin_description * d;
13026 size_t i;
13027
13028 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
13029 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13030 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
13031 tree V2DI_type_node
13032 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
13033 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
13034 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
13035 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
13036 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13037 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13038 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
13039
13040 tree pchar_type_node = build_pointer_type (char_type_node);
13041 tree pcchar_type_node = build_pointer_type (
13042 build_type_variant (char_type_node, 1, 0));
13043 tree pfloat_type_node = build_pointer_type (float_type_node);
13044 tree pcfloat_type_node = build_pointer_type (
13045 build_type_variant (float_type_node, 1, 0));
13046 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
13047 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
13048 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
13049
13050 /* Comparisons. */
13051 tree int_ftype_v4sf_v4sf
13052 = build_function_type_list (integer_type_node,
13053 V4SF_type_node, V4SF_type_node, NULL_TREE);
13054 tree v4si_ftype_v4sf_v4sf
13055 = build_function_type_list (V4SI_type_node,
13056 V4SF_type_node, V4SF_type_node, NULL_TREE);
13057 /* MMX/SSE/integer conversions. */
13058 tree int_ftype_v4sf
13059 = build_function_type_list (integer_type_node,
13060 V4SF_type_node, NULL_TREE);
13061 tree int64_ftype_v4sf
13062 = build_function_type_list (long_long_integer_type_node,
13063 V4SF_type_node, NULL_TREE);
13064 tree int_ftype_v8qi
13065 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
13066 tree v4sf_ftype_v4sf_int
13067 = build_function_type_list (V4SF_type_node,
13068 V4SF_type_node, integer_type_node, NULL_TREE);
13069 tree v4sf_ftype_v4sf_int64
13070 = build_function_type_list (V4SF_type_node,
13071 V4SF_type_node, long_long_integer_type_node,
13072 NULL_TREE);
13073 tree v4sf_ftype_v4sf_v2si
13074 = build_function_type_list (V4SF_type_node,
13075 V4SF_type_node, V2SI_type_node, NULL_TREE);
13076
13077 /* Miscellaneous. */
13078 tree v8qi_ftype_v4hi_v4hi
13079 = build_function_type_list (V8QI_type_node,
13080 V4HI_type_node, V4HI_type_node, NULL_TREE);
13081 tree v4hi_ftype_v2si_v2si
13082 = build_function_type_list (V4HI_type_node,
13083 V2SI_type_node, V2SI_type_node, NULL_TREE);
13084 tree v4sf_ftype_v4sf_v4sf_int
13085 = build_function_type_list (V4SF_type_node,
13086 V4SF_type_node, V4SF_type_node,
13087 integer_type_node, NULL_TREE);
13088 tree v2si_ftype_v4hi_v4hi
13089 = build_function_type_list (V2SI_type_node,
13090 V4HI_type_node, V4HI_type_node, NULL_TREE);
13091 tree v4hi_ftype_v4hi_int
13092 = build_function_type_list (V4HI_type_node,
13093 V4HI_type_node, integer_type_node, NULL_TREE);
13094 tree v4hi_ftype_v4hi_di
13095 = build_function_type_list (V4HI_type_node,
13096 V4HI_type_node, long_long_unsigned_type_node,
13097 NULL_TREE);
13098 tree v2si_ftype_v2si_di
13099 = build_function_type_list (V2SI_type_node,
13100 V2SI_type_node, long_long_unsigned_type_node,
13101 NULL_TREE);
13102 tree void_ftype_void
13103 = build_function_type (void_type_node, void_list_node);
13104 tree void_ftype_unsigned
13105 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
13106 tree void_ftype_unsigned_unsigned
13107 = build_function_type_list (void_type_node, unsigned_type_node,
13108 unsigned_type_node, NULL_TREE);
13109 tree void_ftype_pcvoid_unsigned_unsigned
13110 = build_function_type_list (void_type_node, const_ptr_type_node,
13111 unsigned_type_node, unsigned_type_node,
13112 NULL_TREE);
13113 tree unsigned_ftype_void
13114 = build_function_type (unsigned_type_node, void_list_node);
13115 tree v2si_ftype_v4sf
13116 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
13117 /* Loads/stores. */
13118 tree void_ftype_v8qi_v8qi_pchar
13119 = build_function_type_list (void_type_node,
13120 V8QI_type_node, V8QI_type_node,
13121 pchar_type_node, NULL_TREE);
13122 tree v4sf_ftype_pcfloat
13123 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
13124 /* @@@ the type is bogus */
13125 tree v4sf_ftype_v4sf_pv2si
13126 = build_function_type_list (V4SF_type_node,
13127 V4SF_type_node, pv2si_type_node, NULL_TREE);
13128 tree void_ftype_pv2si_v4sf
13129 = build_function_type_list (void_type_node,
13130 pv2si_type_node, V4SF_type_node, NULL_TREE);
13131 tree void_ftype_pfloat_v4sf
13132 = build_function_type_list (void_type_node,
13133 pfloat_type_node, V4SF_type_node, NULL_TREE);
13134 tree void_ftype_pdi_di
13135 = build_function_type_list (void_type_node,
13136 pdi_type_node, long_long_unsigned_type_node,
13137 NULL_TREE);
13138 tree void_ftype_pv2di_v2di
13139 = build_function_type_list (void_type_node,
13140 pv2di_type_node, V2DI_type_node, NULL_TREE);
13141 /* Normal vector unops. */
13142 tree v4sf_ftype_v4sf
13143 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13144
13145 /* Normal vector binops. */
13146 tree v4sf_ftype_v4sf_v4sf
13147 = build_function_type_list (V4SF_type_node,
13148 V4SF_type_node, V4SF_type_node, NULL_TREE);
13149 tree v8qi_ftype_v8qi_v8qi
13150 = build_function_type_list (V8QI_type_node,
13151 V8QI_type_node, V8QI_type_node, NULL_TREE);
13152 tree v4hi_ftype_v4hi_v4hi
13153 = build_function_type_list (V4HI_type_node,
13154 V4HI_type_node, V4HI_type_node, NULL_TREE);
13155 tree v2si_ftype_v2si_v2si
13156 = build_function_type_list (V2SI_type_node,
13157 V2SI_type_node, V2SI_type_node, NULL_TREE);
13158 tree di_ftype_di_di
13159 = build_function_type_list (long_long_unsigned_type_node,
13160 long_long_unsigned_type_node,
13161 long_long_unsigned_type_node, NULL_TREE);
13162
13163 tree v2si_ftype_v2sf
13164 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
13165 tree v2sf_ftype_v2si
13166 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
13167 tree v2si_ftype_v2si
13168 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
13169 tree v2sf_ftype_v2sf
13170 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
13171 tree v2sf_ftype_v2sf_v2sf
13172 = build_function_type_list (V2SF_type_node,
13173 V2SF_type_node, V2SF_type_node, NULL_TREE);
13174 tree v2si_ftype_v2sf_v2sf
13175 = build_function_type_list (V2SI_type_node,
13176 V2SF_type_node, V2SF_type_node, NULL_TREE);
13177 tree pint_type_node = build_pointer_type (integer_type_node);
13178 tree pdouble_type_node = build_pointer_type (double_type_node);
13179 tree pcdouble_type_node = build_pointer_type (
13180 build_type_variant (double_type_node, 1, 0));
13181 tree int_ftype_v2df_v2df
13182 = build_function_type_list (integer_type_node,
13183 V2DF_type_node, V2DF_type_node, NULL_TREE);
13184
13185 tree ti_ftype_ti_ti
13186 = build_function_type_list (intTI_type_node,
13187 intTI_type_node, intTI_type_node, NULL_TREE);
13188 tree void_ftype_pcvoid
13189 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
13190 tree v4sf_ftype_v4si
13191 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
13192 tree v4si_ftype_v4sf
13193 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
13194 tree v2df_ftype_v4si
13195 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
13196 tree v4si_ftype_v2df
13197 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
13198 tree v2si_ftype_v2df
13199 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
13200 tree v4sf_ftype_v2df
13201 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
13202 tree v2df_ftype_v2si
13203 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
13204 tree v2df_ftype_v4sf
13205 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
13206 tree int_ftype_v2df
13207 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
13208 tree int64_ftype_v2df
13209 = build_function_type_list (long_long_integer_type_node,
13210 V2DF_type_node, NULL_TREE);
13211 tree v2df_ftype_v2df_int
13212 = build_function_type_list (V2DF_type_node,
13213 V2DF_type_node, integer_type_node, NULL_TREE);
13214 tree v2df_ftype_v2df_int64
13215 = build_function_type_list (V2DF_type_node,
13216 V2DF_type_node, long_long_integer_type_node,
13217 NULL_TREE);
13218 tree v4sf_ftype_v4sf_v2df
13219 = build_function_type_list (V4SF_type_node,
13220 V4SF_type_node, V2DF_type_node, NULL_TREE);
13221 tree v2df_ftype_v2df_v4sf
13222 = build_function_type_list (V2DF_type_node,
13223 V2DF_type_node, V4SF_type_node, NULL_TREE);
13224 tree v2df_ftype_v2df_v2df_int
13225 = build_function_type_list (V2DF_type_node,
13226 V2DF_type_node, V2DF_type_node,
13227 integer_type_node,
13228 NULL_TREE);
13229 tree v2df_ftype_v2df_pcdouble
13230 = build_function_type_list (V2DF_type_node,
13231 V2DF_type_node, pcdouble_type_node, NULL_TREE);
13232 tree void_ftype_pdouble_v2df
13233 = build_function_type_list (void_type_node,
13234 pdouble_type_node, V2DF_type_node, NULL_TREE);
13235 tree void_ftype_pint_int
13236 = build_function_type_list (void_type_node,
13237 pint_type_node, integer_type_node, NULL_TREE);
13238 tree void_ftype_v16qi_v16qi_pchar
13239 = build_function_type_list (void_type_node,
13240 V16QI_type_node, V16QI_type_node,
13241 pchar_type_node, NULL_TREE);
13242 tree v2df_ftype_pcdouble
13243 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
13244 tree v2df_ftype_v2df_v2df
13245 = build_function_type_list (V2DF_type_node,
13246 V2DF_type_node, V2DF_type_node, NULL_TREE);
13247 tree v16qi_ftype_v16qi_v16qi
13248 = build_function_type_list (V16QI_type_node,
13249 V16QI_type_node, V16QI_type_node, NULL_TREE);
13250 tree v8hi_ftype_v8hi_v8hi
13251 = build_function_type_list (V8HI_type_node,
13252 V8HI_type_node, V8HI_type_node, NULL_TREE);
13253 tree v4si_ftype_v4si_v4si
13254 = build_function_type_list (V4SI_type_node,
13255 V4SI_type_node, V4SI_type_node, NULL_TREE);
13256 tree v2di_ftype_v2di_v2di
13257 = build_function_type_list (V2DI_type_node,
13258 V2DI_type_node, V2DI_type_node, NULL_TREE);
13259 tree v2di_ftype_v2df_v2df
13260 = build_function_type_list (V2DI_type_node,
13261 V2DF_type_node, V2DF_type_node, NULL_TREE);
13262 tree v2df_ftype_v2df
13263 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13264 tree v2di_ftype_v2di_int
13265 = build_function_type_list (V2DI_type_node,
13266 V2DI_type_node, integer_type_node, NULL_TREE);
13267 tree v4si_ftype_v4si_int
13268 = build_function_type_list (V4SI_type_node,
13269 V4SI_type_node, integer_type_node, NULL_TREE);
13270 tree v8hi_ftype_v8hi_int
13271 = build_function_type_list (V8HI_type_node,
13272 V8HI_type_node, integer_type_node, NULL_TREE);
13273 tree v8hi_ftype_v8hi_v2di
13274 = build_function_type_list (V8HI_type_node,
13275 V8HI_type_node, V2DI_type_node, NULL_TREE);
13276 tree v4si_ftype_v4si_v2di
13277 = build_function_type_list (V4SI_type_node,
13278 V4SI_type_node, V2DI_type_node, NULL_TREE);
13279 tree v4si_ftype_v8hi_v8hi
13280 = build_function_type_list (V4SI_type_node,
13281 V8HI_type_node, V8HI_type_node, NULL_TREE);
13282 tree di_ftype_v8qi_v8qi
13283 = build_function_type_list (long_long_unsigned_type_node,
13284 V8QI_type_node, V8QI_type_node, NULL_TREE);
13285 tree di_ftype_v2si_v2si
13286 = build_function_type_list (long_long_unsigned_type_node,
13287 V2SI_type_node, V2SI_type_node, NULL_TREE);
13288 tree v2di_ftype_v16qi_v16qi
13289 = build_function_type_list (V2DI_type_node,
13290 V16QI_type_node, V16QI_type_node, NULL_TREE);
13291 tree v2di_ftype_v4si_v4si
13292 = build_function_type_list (V2DI_type_node,
13293 V4SI_type_node, V4SI_type_node, NULL_TREE);
13294 tree int_ftype_v16qi
13295 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
13296 tree v16qi_ftype_pcchar
13297 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
13298 tree void_ftype_pchar_v16qi
13299 = build_function_type_list (void_type_node,
13300 pchar_type_node, V16QI_type_node, NULL_TREE);
13301
13302 tree float80_type;
13303 tree float128_type;
13304 tree ftype;
13305
13306 /* The __float80 type. */
13307 if (TYPE_MODE (long_double_type_node) == XFmode)
13308 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
13309 "__float80");
13310 else
13311 {
13312 /* The __float80 type. */
13313 float80_type = make_node (REAL_TYPE);
13314 TYPE_PRECISION (float80_type) = 80;
13315 layout_type (float80_type);
13316 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
13317 }
13318
13319 float128_type = make_node (REAL_TYPE);
13320 TYPE_PRECISION (float128_type) = 128;
13321 layout_type (float128_type);
13322 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
13323
13324 /* Add all builtins that are more or less simple operations on two
13325 operands. */
13326 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13327 {
13328 /* Use one of the operands; the target can have a different mode for
13329 mask-generating compares. */
13330 enum machine_mode mode;
13331 tree type;
13332
13333 if (d->name == 0)
13334 continue;
13335 mode = insn_data[d->icode].operand[1].mode;
13336
13337 switch (mode)
13338 {
13339 case V16QImode:
13340 type = v16qi_ftype_v16qi_v16qi;
13341 break;
13342 case V8HImode:
13343 type = v8hi_ftype_v8hi_v8hi;
13344 break;
13345 case V4SImode:
13346 type = v4si_ftype_v4si_v4si;
13347 break;
13348 case V2DImode:
13349 type = v2di_ftype_v2di_v2di;
13350 break;
13351 case V2DFmode:
13352 type = v2df_ftype_v2df_v2df;
13353 break;
13354 case TImode:
13355 type = ti_ftype_ti_ti;
13356 break;
13357 case V4SFmode:
13358 type = v4sf_ftype_v4sf_v4sf;
13359 break;
13360 case V8QImode:
13361 type = v8qi_ftype_v8qi_v8qi;
13362 break;
13363 case V4HImode:
13364 type = v4hi_ftype_v4hi_v4hi;
13365 break;
13366 case V2SImode:
13367 type = v2si_ftype_v2si_v2si;
13368 break;
13369 case DImode:
13370 type = di_ftype_di_di;
13371 break;
13372
13373 default:
13374 abort ();
13375 }
13376
13377 /* Override for comparisons. */
13378 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
13379 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
13380 type = v4si_ftype_v4sf_v4sf;
13381
13382 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
13383 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
13384 type = v2di_ftype_v2df_v2df;
13385
13386 def_builtin (d->mask, d->name, type, d->code);
13387 }
13388
13389 /* Add the remaining MMX insns with somewhat more complicated types. */
13390 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
13391 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
13392 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
13393 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
13394
13395 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
13396 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
13397 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
13398
13399 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
13400 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
13401
13402 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
13403 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
13404
13405 /* comi/ucomi insns. */
13406 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13407 if (d->mask == MASK_SSE2)
13408 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
13409 else
13410 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
13411
13412 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
13413 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
13414 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
13415
13416 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
13417 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
13418 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
13419 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
13420 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
13421 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
13422 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
13423 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
13424 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
13425 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
13426 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
13427
13428 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
13429
13430 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
13431 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
13432
13433 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
13434 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
13435 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
13436 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
13437
13438 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
13439 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
13440 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
13441 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
13442
13443 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
13444
13445 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
13446
13447 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
13448 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
13449 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
13450 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
13451 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
13452 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
13453
13454 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
13455
13456 /* Original 3DNow! */
13457 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
13458 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
13459 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
13460 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
13461 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
13462 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
13463 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
13464 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
13465 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
13466 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
13467 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
13468 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
13469 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
13470 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
13471 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
13472 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
13473 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
13474 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
13475 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
13476 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
13477
13478 /* 3DNow! extension as used in the Athlon CPU. */
13479 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
13480 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
13481 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
13482 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
13483 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
13484 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
13485
13486 /* SSE2 */
13487 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
13488
13489 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
13490 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
13491
13492 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
13493 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
13494
13495 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
13496 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
13497 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
13498 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
13499 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
13500
13501 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
13502 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
13503 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
13504 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
13505
13506 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
13507 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
13508
13509 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
13510
13511 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
13512 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
13513
13514 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
13515 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
13516 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
13517 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
13518 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
13519
13520 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
13521
13522 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
13523 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
13524 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
13525 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
13526
13527 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
13528 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
13529 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
13530
13531 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
13532 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
13533 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
13534 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
13535
13536 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
13537 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
13538 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
13539
13540 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
13541 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
13542
13543 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
13544 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
13545
13546 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
13547 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
13548 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
13549
13550 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
13551 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
13552 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
13553
13554 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
13555 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
13556
13557 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
13558 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
13559 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
13560 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
13561
13562 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
13563 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
13564 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
13565 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
13566
13567 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
13568 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
13569
13570 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
13571
13572 /* Prescott New Instructions. */
13573 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
13574 void_ftype_pcvoid_unsigned_unsigned,
13575 IX86_BUILTIN_MONITOR);
13576 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
13577 void_ftype_unsigned_unsigned,
13578 IX86_BUILTIN_MWAIT);
13579 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
13580 v4sf_ftype_v4sf,
13581 IX86_BUILTIN_MOVSHDUP);
13582 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
13583 v4sf_ftype_v4sf,
13584 IX86_BUILTIN_MOVSLDUP);
13585 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
13586 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
13587
13588 /* Access to the vec_init patterns. */
13589 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
13590 integer_type_node, NULL_TREE);
13591 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
13592 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
13593
13594 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
13595 short_integer_type_node,
13596 short_integer_type_node,
13597 short_integer_type_node, NULL_TREE);
13598 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
13599 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
13600
13601 ftype = build_function_type_list (V8QI_type_node, char_type_node,
13602 char_type_node, char_type_node,
13603 char_type_node, char_type_node,
13604 char_type_node, char_type_node,
13605 char_type_node, NULL_TREE);
13606 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
13607 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
13608
13609 /* Access to the vec_extract patterns. */
13610 ftype = build_function_type_list (double_type_node, V2DF_type_node,
13611 integer_type_node, NULL_TREE);
13612 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
13613 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
13614
13615 ftype = build_function_type_list (long_long_integer_type_node,
13616 V2DI_type_node, integer_type_node,
13617 NULL_TREE);
13618 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
13619 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
13620
13621 ftype = build_function_type_list (float_type_node, V4SF_type_node,
13622 integer_type_node, NULL_TREE);
13623 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
13624 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
13625
13626 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
13627 integer_type_node, NULL_TREE);
13628 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
13629 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
13630
13631 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
13632 integer_type_node, NULL_TREE);
13633 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
13634 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
13635
13636 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
13637 integer_type_node, NULL_TREE);
13638 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
13639 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
13640
13641 /* Access to the vec_set patterns. */
13642 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
13643 intHI_type_node,
13644 integer_type_node, NULL_TREE);
13645 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
13646 ftype, IX86_BUILTIN_VEC_SET_V8HI);
13647
13648 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
13649 intHI_type_node,
13650 integer_type_node, NULL_TREE);
13651 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
13652 ftype, IX86_BUILTIN_VEC_SET_V4HI);
13653 }
13654
13655 /* Errors in the source file can cause expand_expr to return const0_rtx
13656 where we expect a vector. To avoid crashing, use one of the vector
13657 clear instructions. */
13658 static rtx
13659 safe_vector_operand (rtx x, enum machine_mode mode)
13660 {
13661 if (x == const0_rtx)
13662 x = CONST0_RTX (mode);
13663 return x;
13664 }
13665
13666 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
13667
13668 static rtx
13669 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
13670 {
13671 rtx pat, xops[3];
13672 tree arg0 = TREE_VALUE (arglist);
13673 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13674 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13675 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13676 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13677 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13678 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13679
13680 if (VECTOR_MODE_P (mode0))
13681 op0 = safe_vector_operand (op0, mode0);
13682 if (VECTOR_MODE_P (mode1))
13683 op1 = safe_vector_operand (op1, mode1);
13684
13685 if (optimize || !target
13686 || GET_MODE (target) != tmode
13687 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13688 target = gen_reg_rtx (tmode);
13689
13690 if (GET_MODE (op1) == SImode && mode1 == TImode)
13691 {
13692 rtx x = gen_reg_rtx (V4SImode);
13693 emit_insn (gen_sse2_loadd (x, op1));
13694 op1 = gen_lowpart (TImode, x);
13695 }
13696
13697 /* In case the insn wants input operands in modes different from
13698 the result, abort. */
13699 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
13700 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
13701 abort ();
13702
13703 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13704 op0 = copy_to_mode_reg (mode0, op0);
13705 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13706 op1 = copy_to_mode_reg (mode1, op1);
13707
13708 /* ??? Using ix86_fixup_binary_operands is problematic when
13709 we've got mismatched modes. Fake it. */
13710
13711 xops[0] = target;
13712 xops[1] = op0;
13713 xops[2] = op1;
13714
13715 if (tmode == mode0 && tmode == mode1)
13716 {
13717 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
13718 op0 = xops[1];
13719 op1 = xops[2];
13720 }
13721 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
13722 {
13723 op0 = force_reg (mode0, op0);
13724 op1 = force_reg (mode1, op1);
13725 target = gen_reg_rtx (tmode);
13726 }
13727
13728 pat = GEN_FCN (icode) (target, op0, op1);
13729 if (! pat)
13730 return 0;
13731 emit_insn (pat);
13732 return target;
13733 }
13734
13735 /* Subroutine of ix86_expand_builtin to take care of stores. */
13736
13737 static rtx
13738 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
13739 {
13740 rtx pat;
13741 tree arg0 = TREE_VALUE (arglist);
13742 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13743 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13744 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13745 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
13746 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
13747
13748 if (VECTOR_MODE_P (mode1))
13749 op1 = safe_vector_operand (op1, mode1);
13750
13751 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13752 op1 = copy_to_mode_reg (mode1, op1);
13753
13754 pat = GEN_FCN (icode) (op0, op1);
13755 if (pat)
13756 emit_insn (pat);
13757 return 0;
13758 }
13759
13760 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
13761
13762 static rtx
13763 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
13764 rtx target, int do_load)
13765 {
13766 rtx pat;
13767 tree arg0 = TREE_VALUE (arglist);
13768 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13769 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13770 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13771
13772 if (optimize || !target
13773 || GET_MODE (target) != tmode
13774 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13775 target = gen_reg_rtx (tmode);
13776 if (do_load)
13777 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13778 else
13779 {
13780 if (VECTOR_MODE_P (mode0))
13781 op0 = safe_vector_operand (op0, mode0);
13782
13783 if ((optimize && !register_operand (op0, mode0))
13784 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13785 op0 = copy_to_mode_reg (mode0, op0);
13786 }
13787
13788 pat = GEN_FCN (icode) (target, op0);
13789 if (! pat)
13790 return 0;
13791 emit_insn (pat);
13792 return target;
13793 }
13794
13795 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13796 sqrtss, rsqrtss, rcpss. */
13797
13798 static rtx
13799 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
13800 {
13801 rtx pat;
13802 tree arg0 = TREE_VALUE (arglist);
13803 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13804 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13805 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13806
13807 if (optimize || !target
13808 || GET_MODE (target) != tmode
13809 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13810 target = gen_reg_rtx (tmode);
13811
13812 if (VECTOR_MODE_P (mode0))
13813 op0 = safe_vector_operand (op0, mode0);
13814
13815 if ((optimize && !register_operand (op0, mode0))
13816 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13817 op0 = copy_to_mode_reg (mode0, op0);
13818
13819 op1 = op0;
13820 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
13821 op1 = copy_to_mode_reg (mode0, op1);
13822
13823 pat = GEN_FCN (icode) (target, op0, op1);
13824 if (! pat)
13825 return 0;
13826 emit_insn (pat);
13827 return target;
13828 }
13829
13830 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13831
13832 static rtx
13833 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13834 rtx target)
13835 {
13836 rtx pat;
13837 tree arg0 = TREE_VALUE (arglist);
13838 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13839 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13840 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13841 rtx op2;
13842 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13843 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13844 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13845 enum rtx_code comparison = d->comparison;
13846
13847 if (VECTOR_MODE_P (mode0))
13848 op0 = safe_vector_operand (op0, mode0);
13849 if (VECTOR_MODE_P (mode1))
13850 op1 = safe_vector_operand (op1, mode1);
13851
13852 /* Swap operands if we have a comparison that isn't available in
13853 hardware. */
13854 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
13855 {
13856 rtx tmp = gen_reg_rtx (mode1);
13857 emit_move_insn (tmp, op1);
13858 op1 = op0;
13859 op0 = tmp;
13860 }
13861
13862 if (optimize || !target
13863 || GET_MODE (target) != tmode
13864 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13865 target = gen_reg_rtx (tmode);
13866
13867 if ((optimize && !register_operand (op0, mode0))
13868 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13869 op0 = copy_to_mode_reg (mode0, op0);
13870 if ((optimize && !register_operand (op1, mode1))
13871 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13872 op1 = copy_to_mode_reg (mode1, op1);
13873
13874 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13875 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13876 if (! pat)
13877 return 0;
13878 emit_insn (pat);
13879 return target;
13880 }
13881
13882 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13883
13884 static rtx
13885 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13886 rtx target)
13887 {
13888 rtx pat;
13889 tree arg0 = TREE_VALUE (arglist);
13890 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13891 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13892 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13893 rtx op2;
13894 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13895 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13896 enum rtx_code comparison = d->comparison;
13897
13898 if (VECTOR_MODE_P (mode0))
13899 op0 = safe_vector_operand (op0, mode0);
13900 if (VECTOR_MODE_P (mode1))
13901 op1 = safe_vector_operand (op1, mode1);
13902
13903 /* Swap operands if we have a comparison that isn't available in
13904 hardware. */
13905 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
13906 {
13907 rtx tmp = op1;
13908 op1 = op0;
13909 op0 = tmp;
13910 }
13911
13912 target = gen_reg_rtx (SImode);
13913 emit_move_insn (target, const0_rtx);
13914 target = gen_rtx_SUBREG (QImode, target, 0);
13915
13916 if ((optimize && !register_operand (op0, mode0))
13917 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13918 op0 = copy_to_mode_reg (mode0, op0);
13919 if ((optimize && !register_operand (op1, mode1))
13920 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13921 op1 = copy_to_mode_reg (mode1, op1);
13922
13923 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13924 pat = GEN_FCN (d->icode) (op0, op1);
13925 if (! pat)
13926 return 0;
13927 emit_insn (pat);
13928 emit_insn (gen_rtx_SET (VOIDmode,
13929 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13930 gen_rtx_fmt_ee (comparison, QImode,
13931 SET_DEST (pat),
13932 const0_rtx)));
13933
13934 return SUBREG_REG (target);
13935 }
13936
13937 /* Return the integer constant in ARG. Constrain it to be in the range
13938 of the subparts of VEC_TYPE; issue an error if not. */
13939
13940 static int
13941 get_element_number (tree vec_type, tree arg)
13942 {
13943 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
13944
13945 if (!host_integerp (arg, 1)
13946 || (elt = tree_low_cst (arg, 1), elt > max))
13947 {
13948 error ("selector must be an integer constant in the range 0..%i", max);
13949 return 0;
13950 }
13951
13952 return elt;
13953 }
13954
13955 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13956 ix86_expand_vector_init. We DO have language-level syntax for this, in
13957 the form of (type){ init-list }. Except that since we can't place emms
13958 instructions from inside the compiler, we can't allow the use of MMX
13959 registers unless the user explicitly asks for it. So we do *not* define
13960 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
13961 we have builtins invoked by mmintrin.h that gives us license to emit
13962 these sorts of instructions. */
13963
13964 static rtx
13965 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
13966 {
13967 enum machine_mode tmode = TYPE_MODE (type);
13968 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
13969 int i, n_elt = GET_MODE_NUNITS (tmode);
13970 rtvec v = rtvec_alloc (n_elt);
13971
13972 gcc_assert (VECTOR_MODE_P (tmode));
13973
13974 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
13975 {
13976 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
13977 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
13978 }
13979
13980 gcc_assert (arglist == NULL);
13981
13982 if (!target || !register_operand (target, tmode))
13983 target = gen_reg_rtx (tmode);
13984
13985 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
13986 return target;
13987 }
13988
13989 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
13990 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
13991 had a language-level syntax for referencing vector elements. */
13992
13993 static rtx
13994 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
13995 {
13996 enum machine_mode tmode, mode0;
13997 tree arg0, arg1;
13998 int elt;
13999 rtx op0;
14000
14001 arg0 = TREE_VALUE (arglist);
14002 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14003
14004 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14005 elt = get_element_number (TREE_TYPE (arg0), arg1);
14006
14007 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14008 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14009 gcc_assert (VECTOR_MODE_P (mode0));
14010
14011 op0 = force_reg (mode0, op0);
14012
14013 if (optimize || !target || !register_operand (target, tmode))
14014 target = gen_reg_rtx (tmode);
14015
14016 ix86_expand_vector_extract (true, target, op0, elt);
14017
14018 return target;
14019 }
14020
14021 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14022 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
14023 a language-level syntax for referencing vector elements. */
14024
14025 static rtx
14026 ix86_expand_vec_set_builtin (tree arglist)
14027 {
14028 enum machine_mode tmode, mode1;
14029 tree arg0, arg1, arg2;
14030 int elt;
14031 rtx op0, op1;
14032
14033 arg0 = TREE_VALUE (arglist);
14034 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14035 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14036
14037 tmode = TYPE_MODE (TREE_TYPE (arg0));
14038 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14039 gcc_assert (VECTOR_MODE_P (tmode));
14040
14041 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
14042 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
14043 elt = get_element_number (TREE_TYPE (arg0), arg2);
14044
14045 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14046 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14047
14048 op0 = force_reg (tmode, op0);
14049 op1 = force_reg (mode1, op1);
14050
14051 ix86_expand_vector_set (true, op0, op1, elt);
14052
14053 return op0;
14054 }
14055
14056 /* Expand an expression EXP that calls a built-in function,
14057 with result going to TARGET if that's convenient
14058 (and in mode MODE if that's convenient).
14059 SUBTARGET may be used as the target for computing one of EXP's operands.
14060 IGNORE is nonzero if the value is to be ignored. */
14061
14062 static rtx
14063 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14064 enum machine_mode mode ATTRIBUTE_UNUSED,
14065 int ignore ATTRIBUTE_UNUSED)
14066 {
14067 const struct builtin_description *d;
14068 size_t i;
14069 enum insn_code icode;
14070 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
14071 tree arglist = TREE_OPERAND (exp, 1);
14072 tree arg0, arg1, arg2;
14073 rtx op0, op1, op2, pat;
14074 enum machine_mode tmode, mode0, mode1, mode2;
14075 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14076
14077 switch (fcode)
14078 {
14079 case IX86_BUILTIN_EMMS:
14080 emit_insn (gen_mmx_emms ());
14081 return 0;
14082
14083 case IX86_BUILTIN_SFENCE:
14084 emit_insn (gen_sse_sfence ());
14085 return 0;
14086
14087 case IX86_BUILTIN_MASKMOVQ:
14088 case IX86_BUILTIN_MASKMOVDQU:
14089 icode = (fcode == IX86_BUILTIN_MASKMOVQ
14090 ? CODE_FOR_mmx_maskmovq
14091 : CODE_FOR_sse2_maskmovdqu);
14092 /* Note the arg order is different from the operand order. */
14093 arg1 = TREE_VALUE (arglist);
14094 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
14095 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14096 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14097 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14098 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14099 mode0 = insn_data[icode].operand[0].mode;
14100 mode1 = insn_data[icode].operand[1].mode;
14101 mode2 = insn_data[icode].operand[2].mode;
14102
14103 op0 = force_reg (Pmode, op0);
14104 op0 = gen_rtx_MEM (mode1, op0);
14105
14106 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14107 op0 = copy_to_mode_reg (mode0, op0);
14108 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14109 op1 = copy_to_mode_reg (mode1, op1);
14110 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
14111 op2 = copy_to_mode_reg (mode2, op2);
14112 pat = GEN_FCN (icode) (op0, op1, op2);
14113 if (! pat)
14114 return 0;
14115 emit_insn (pat);
14116 return 0;
14117
14118 case IX86_BUILTIN_SQRTSS:
14119 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
14120 case IX86_BUILTIN_RSQRTSS:
14121 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
14122 case IX86_BUILTIN_RCPSS:
14123 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
14124
14125 case IX86_BUILTIN_LOADUPS:
14126 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
14127
14128 case IX86_BUILTIN_STOREUPS:
14129 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
14130
14131 case IX86_BUILTIN_LOADHPS:
14132 case IX86_BUILTIN_LOADLPS:
14133 case IX86_BUILTIN_LOADHPD:
14134 case IX86_BUILTIN_LOADLPD:
14135 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
14136 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
14137 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
14138 : CODE_FOR_sse2_loadlpd);
14139 arg0 = TREE_VALUE (arglist);
14140 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14141 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14142 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14143 tmode = insn_data[icode].operand[0].mode;
14144 mode0 = insn_data[icode].operand[1].mode;
14145 mode1 = insn_data[icode].operand[2].mode;
14146
14147 op0 = force_reg (mode0, op0);
14148 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
14149 if (optimize || target == 0
14150 || GET_MODE (target) != tmode
14151 || !register_operand (target, tmode))
14152 target = gen_reg_rtx (tmode);
14153 pat = GEN_FCN (icode) (target, op0, op1);
14154 if (! pat)
14155 return 0;
14156 emit_insn (pat);
14157 return target;
14158
14159 case IX86_BUILTIN_STOREHPS:
14160 case IX86_BUILTIN_STORELPS:
14161 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
14162 : CODE_FOR_sse_storelps);
14163 arg0 = TREE_VALUE (arglist);
14164 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14165 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14166 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14167 mode0 = insn_data[icode].operand[0].mode;
14168 mode1 = insn_data[icode].operand[1].mode;
14169
14170 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14171 op1 = force_reg (mode1, op1);
14172
14173 pat = GEN_FCN (icode) (op0, op1);
14174 if (! pat)
14175 return 0;
14176 emit_insn (pat);
14177 return const0_rtx;
14178
14179 case IX86_BUILTIN_MOVNTPS:
14180 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
14181 case IX86_BUILTIN_MOVNTQ:
14182 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
14183
14184 case IX86_BUILTIN_LDMXCSR:
14185 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14186 target = assign_386_stack_local (SImode, 0);
14187 emit_move_insn (target, op0);
14188 emit_insn (gen_sse_ldmxcsr (target));
14189 return 0;
14190
14191 case IX86_BUILTIN_STMXCSR:
14192 target = assign_386_stack_local (SImode, 0);
14193 emit_insn (gen_sse_stmxcsr (target));
14194 return copy_to_mode_reg (SImode, target);
14195
14196 case IX86_BUILTIN_SHUFPS:
14197 case IX86_BUILTIN_SHUFPD:
14198 icode = (fcode == IX86_BUILTIN_SHUFPS
14199 ? CODE_FOR_sse_shufps
14200 : CODE_FOR_sse2_shufpd);
14201 arg0 = TREE_VALUE (arglist);
14202 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14203 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14204 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14205 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14206 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14207 tmode = insn_data[icode].operand[0].mode;
14208 mode0 = insn_data[icode].operand[1].mode;
14209 mode1 = insn_data[icode].operand[2].mode;
14210 mode2 = insn_data[icode].operand[3].mode;
14211
14212 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14213 op0 = copy_to_mode_reg (mode0, op0);
14214 if ((optimize && !register_operand (op1, mode1))
14215 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
14216 op1 = copy_to_mode_reg (mode1, op1);
14217 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14218 {
14219 /* @@@ better error message */
14220 error ("mask must be an immediate");
14221 return gen_reg_rtx (tmode);
14222 }
14223 if (optimize || target == 0
14224 || GET_MODE (target) != tmode
14225 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14226 target = gen_reg_rtx (tmode);
14227 pat = GEN_FCN (icode) (target, op0, op1, op2);
14228 if (! pat)
14229 return 0;
14230 emit_insn (pat);
14231 return target;
14232
14233 case IX86_BUILTIN_PSHUFW:
14234 case IX86_BUILTIN_PSHUFD:
14235 case IX86_BUILTIN_PSHUFHW:
14236 case IX86_BUILTIN_PSHUFLW:
14237 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
14238 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
14239 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
14240 : CODE_FOR_mmx_pshufw);
14241 arg0 = TREE_VALUE (arglist);
14242 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14243 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14244 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14245 tmode = insn_data[icode].operand[0].mode;
14246 mode1 = insn_data[icode].operand[1].mode;
14247 mode2 = insn_data[icode].operand[2].mode;
14248
14249 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14250 op0 = copy_to_mode_reg (mode1, op0);
14251 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14252 {
14253 /* @@@ better error message */
14254 error ("mask must be an immediate");
14255 return const0_rtx;
14256 }
14257 if (target == 0
14258 || GET_MODE (target) != tmode
14259 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14260 target = gen_reg_rtx (tmode);
14261 pat = GEN_FCN (icode) (target, op0, op1);
14262 if (! pat)
14263 return 0;
14264 emit_insn (pat);
14265 return target;
14266
14267 case IX86_BUILTIN_PSLLDQI128:
14268 case IX86_BUILTIN_PSRLDQI128:
14269 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
14270 : CODE_FOR_sse2_lshrti3);
14271 arg0 = TREE_VALUE (arglist);
14272 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14273 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14274 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14275 tmode = insn_data[icode].operand[0].mode;
14276 mode1 = insn_data[icode].operand[1].mode;
14277 mode2 = insn_data[icode].operand[2].mode;
14278
14279 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14280 {
14281 op0 = copy_to_reg (op0);
14282 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
14283 }
14284 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14285 {
14286 error ("shift must be an immediate");
14287 return const0_rtx;
14288 }
14289 target = gen_reg_rtx (V2DImode);
14290 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
14291 if (! pat)
14292 return 0;
14293 emit_insn (pat);
14294 return target;
14295
14296 case IX86_BUILTIN_FEMMS:
14297 emit_insn (gen_mmx_femms ());
14298 return NULL_RTX;
14299
14300 case IX86_BUILTIN_PAVGUSB:
14301 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
14302
14303 case IX86_BUILTIN_PF2ID:
14304 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
14305
14306 case IX86_BUILTIN_PFACC:
14307 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
14308
14309 case IX86_BUILTIN_PFADD:
14310 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
14311
14312 case IX86_BUILTIN_PFCMPEQ:
14313 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
14314
14315 case IX86_BUILTIN_PFCMPGE:
14316 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
14317
14318 case IX86_BUILTIN_PFCMPGT:
14319 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
14320
14321 case IX86_BUILTIN_PFMAX:
14322 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
14323
14324 case IX86_BUILTIN_PFMIN:
14325 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
14326
14327 case IX86_BUILTIN_PFMUL:
14328 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
14329
14330 case IX86_BUILTIN_PFRCP:
14331 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
14332
14333 case IX86_BUILTIN_PFRCPIT1:
14334 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
14335
14336 case IX86_BUILTIN_PFRCPIT2:
14337 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
14338
14339 case IX86_BUILTIN_PFRSQIT1:
14340 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
14341
14342 case IX86_BUILTIN_PFRSQRT:
14343 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
14344
14345 case IX86_BUILTIN_PFSUB:
14346 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
14347
14348 case IX86_BUILTIN_PFSUBR:
14349 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
14350
14351 case IX86_BUILTIN_PI2FD:
14352 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
14353
14354 case IX86_BUILTIN_PMULHRW:
14355 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
14356
14357 case IX86_BUILTIN_PF2IW:
14358 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
14359
14360 case IX86_BUILTIN_PFNACC:
14361 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
14362
14363 case IX86_BUILTIN_PFPNACC:
14364 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
14365
14366 case IX86_BUILTIN_PI2FW:
14367 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
14368
14369 case IX86_BUILTIN_PSWAPDSI:
14370 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
14371
14372 case IX86_BUILTIN_PSWAPDSF:
14373 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
14374
14375 case IX86_BUILTIN_SQRTSD:
14376 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
14377 case IX86_BUILTIN_LOADUPD:
14378 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
14379 case IX86_BUILTIN_STOREUPD:
14380 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
14381
14382 case IX86_BUILTIN_MFENCE:
14383 emit_insn (gen_sse2_mfence ());
14384 return 0;
14385 case IX86_BUILTIN_LFENCE:
14386 emit_insn (gen_sse2_lfence ());
14387 return 0;
14388
14389 case IX86_BUILTIN_CLFLUSH:
14390 arg0 = TREE_VALUE (arglist);
14391 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14392 icode = CODE_FOR_sse2_clflush;
14393 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
14394 op0 = copy_to_mode_reg (Pmode, op0);
14395
14396 emit_insn (gen_sse2_clflush (op0));
14397 return 0;
14398
14399 case IX86_BUILTIN_MOVNTPD:
14400 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
14401 case IX86_BUILTIN_MOVNTDQ:
14402 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
14403 case IX86_BUILTIN_MOVNTI:
14404 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
14405
14406 case IX86_BUILTIN_LOADDQU:
14407 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
14408 case IX86_BUILTIN_STOREDQU:
14409 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
14410
14411 case IX86_BUILTIN_MONITOR:
14412 arg0 = TREE_VALUE (arglist);
14413 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14414 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14415 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14416 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14417 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14418 if (!REG_P (op0))
14419 op0 = copy_to_mode_reg (SImode, op0);
14420 if (!REG_P (op1))
14421 op1 = copy_to_mode_reg (SImode, op1);
14422 if (!REG_P (op2))
14423 op2 = copy_to_mode_reg (SImode, op2);
14424 emit_insn (gen_sse3_monitor (op0, op1, op2));
14425 return 0;
14426
14427 case IX86_BUILTIN_MWAIT:
14428 arg0 = TREE_VALUE (arglist);
14429 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14430 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14431 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14432 if (!REG_P (op0))
14433 op0 = copy_to_mode_reg (SImode, op0);
14434 if (!REG_P (op1))
14435 op1 = copy_to_mode_reg (SImode, op1);
14436 emit_insn (gen_sse3_mwait (op0, op1));
14437 return 0;
14438
14439 case IX86_BUILTIN_LDDQU:
14440 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
14441 target, 1);
14442
14443 case IX86_BUILTIN_VEC_INIT_V2SI:
14444 case IX86_BUILTIN_VEC_INIT_V4HI:
14445 case IX86_BUILTIN_VEC_INIT_V8QI:
14446 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
14447
14448 case IX86_BUILTIN_VEC_EXT_V2DF:
14449 case IX86_BUILTIN_VEC_EXT_V2DI:
14450 case IX86_BUILTIN_VEC_EXT_V4SF:
14451 case IX86_BUILTIN_VEC_EXT_V4SI:
14452 case IX86_BUILTIN_VEC_EXT_V8HI:
14453 case IX86_BUILTIN_VEC_EXT_V4HI:
14454 return ix86_expand_vec_ext_builtin (arglist, target);
14455
14456 case IX86_BUILTIN_VEC_SET_V8HI:
14457 case IX86_BUILTIN_VEC_SET_V4HI:
14458 return ix86_expand_vec_set_builtin (arglist);
14459
14460 default:
14461 break;
14462 }
14463
14464 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14465 if (d->code == fcode)
14466 {
14467 /* Compares are treated specially. */
14468 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14469 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
14470 || d->icode == CODE_FOR_sse2_maskcmpv2df3
14471 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14472 return ix86_expand_sse_compare (d, arglist, target);
14473
14474 return ix86_expand_binop_builtin (d->icode, arglist, target);
14475 }
14476
14477 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
14478 if (d->code == fcode)
14479 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
14480
14481 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14482 if (d->code == fcode)
14483 return ix86_expand_sse_comi (d, arglist, target);
14484
14485 gcc_unreachable ();
14486 }
14487
14488 /* Store OPERAND to the memory after reload is completed. This means
14489 that we can't easily use assign_stack_local. */
14490 rtx
14491 ix86_force_to_memory (enum machine_mode mode, rtx operand)
14492 {
14493 rtx result;
14494 if (!reload_completed)
14495 abort ();
14496 if (TARGET_RED_ZONE)
14497 {
14498 result = gen_rtx_MEM (mode,
14499 gen_rtx_PLUS (Pmode,
14500 stack_pointer_rtx,
14501 GEN_INT (-RED_ZONE_SIZE)));
14502 emit_move_insn (result, operand);
14503 }
14504 else if (!TARGET_RED_ZONE && TARGET_64BIT)
14505 {
14506 switch (mode)
14507 {
14508 case HImode:
14509 case SImode:
14510 operand = gen_lowpart (DImode, operand);
14511 /* FALLTHRU */
14512 case DImode:
14513 emit_insn (
14514 gen_rtx_SET (VOIDmode,
14515 gen_rtx_MEM (DImode,
14516 gen_rtx_PRE_DEC (DImode,
14517 stack_pointer_rtx)),
14518 operand));
14519 break;
14520 default:
14521 abort ();
14522 }
14523 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14524 }
14525 else
14526 {
14527 switch (mode)
14528 {
14529 case DImode:
14530 {
14531 rtx operands[2];
14532 split_di (&operand, 1, operands, operands + 1);
14533 emit_insn (
14534 gen_rtx_SET (VOIDmode,
14535 gen_rtx_MEM (SImode,
14536 gen_rtx_PRE_DEC (Pmode,
14537 stack_pointer_rtx)),
14538 operands[1]));
14539 emit_insn (
14540 gen_rtx_SET (VOIDmode,
14541 gen_rtx_MEM (SImode,
14542 gen_rtx_PRE_DEC (Pmode,
14543 stack_pointer_rtx)),
14544 operands[0]));
14545 }
14546 break;
14547 case HImode:
14548 /* It is better to store HImodes as SImodes. */
14549 if (!TARGET_PARTIAL_REG_STALL)
14550 operand = gen_lowpart (SImode, operand);
14551 /* FALLTHRU */
14552 case SImode:
14553 emit_insn (
14554 gen_rtx_SET (VOIDmode,
14555 gen_rtx_MEM (GET_MODE (operand),
14556 gen_rtx_PRE_DEC (SImode,
14557 stack_pointer_rtx)),
14558 operand));
14559 break;
14560 default:
14561 abort ();
14562 }
14563 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14564 }
14565 return result;
14566 }
14567
14568 /* Free operand from the memory. */
14569 void
14570 ix86_free_from_memory (enum machine_mode mode)
14571 {
14572 if (!TARGET_RED_ZONE)
14573 {
14574 int size;
14575
14576 if (mode == DImode || TARGET_64BIT)
14577 size = 8;
14578 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
14579 size = 2;
14580 else
14581 size = 4;
14582 /* Use LEA to deallocate stack space. In peephole2 it will be converted
14583 to pop or add instruction if registers are available. */
14584 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14585 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
14586 GEN_INT (size))));
14587 }
14588 }
14589
14590 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
14591 QImode must go into class Q_REGS.
14592 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
14593 movdf to do mem-to-mem moves through integer regs. */
14594 enum reg_class
14595 ix86_preferred_reload_class (rtx x, enum reg_class class)
14596 {
14597 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
14598 return NO_REGS;
14599 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
14600 {
14601 /* SSE can't load any constant directly yet. */
14602 if (SSE_CLASS_P (class))
14603 return NO_REGS;
14604 /* Floats can load 0 and 1. */
14605 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
14606 {
14607 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
14608 if (MAYBE_SSE_CLASS_P (class))
14609 return (reg_class_subset_p (class, GENERAL_REGS)
14610 ? GENERAL_REGS : FLOAT_REGS);
14611 else
14612 return class;
14613 }
14614 /* General regs can load everything. */
14615 if (reg_class_subset_p (class, GENERAL_REGS))
14616 return GENERAL_REGS;
14617 /* In case we haven't resolved FLOAT or SSE yet, give up. */
14618 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
14619 return NO_REGS;
14620 }
14621 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
14622 return NO_REGS;
14623 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
14624 return Q_REGS;
14625 return class;
14626 }
14627
14628 /* If we are copying between general and FP registers, we need a memory
14629 location. The same is true for SSE and MMX registers.
14630
14631 The macro can't work reliably when one of the CLASSES is class containing
14632 registers from multiple units (SSE, MMX, integer). We avoid this by never
14633 combining those units in single alternative in the machine description.
14634 Ensure that this constraint holds to avoid unexpected surprises.
14635
14636 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
14637 enforce these sanity checks. */
14638 int
14639 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
14640 enum machine_mode mode, int strict)
14641 {
14642 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
14643 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
14644 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
14645 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
14646 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
14647 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
14648 {
14649 if (strict)
14650 abort ();
14651 else
14652 return 1;
14653 }
14654 return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
14655 || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
14656 || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
14657 && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
14658 || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
14659 }
14660 /* Return the cost of moving data from a register in class CLASS1 to
14661 one in class CLASS2.
14662
14663 It is not required that the cost always equal 2 when FROM is the same as TO;
14664 on some machines it is expensive to move between registers if they are not
14665 general registers. */
14666 int
14667 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
14668 enum reg_class class2)
14669 {
14670 /* In case we require secondary memory, compute cost of the store followed
14671 by load. In order to avoid bad register allocation choices, we need
14672 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
14673
14674 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
14675 {
14676 int cost = 1;
14677
14678 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
14679 MEMORY_MOVE_COST (mode, class1, 1));
14680 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
14681 MEMORY_MOVE_COST (mode, class2, 1));
14682
14683 /* In case of copying from general_purpose_register we may emit multiple
14684 stores followed by single load causing memory size mismatch stall.
14685 Count this as arbitrarily high cost of 20. */
14686 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
14687 cost += 20;
14688
14689 /* In the case of FP/MMX moves, the registers actually overlap, and we
14690 have to switch modes in order to treat them differently. */
14691 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
14692 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
14693 cost += 20;
14694
14695 return cost;
14696 }
14697
14698 /* Moves between SSE/MMX and integer unit are expensive. */
14699 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
14700 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
14701 return ix86_cost->mmxsse_to_integer;
14702 if (MAYBE_FLOAT_CLASS_P (class1))
14703 return ix86_cost->fp_move;
14704 if (MAYBE_SSE_CLASS_P (class1))
14705 return ix86_cost->sse_move;
14706 if (MAYBE_MMX_CLASS_P (class1))
14707 return ix86_cost->mmx_move;
14708 return 2;
14709 }
14710
14711 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
14712 int
14713 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
14714 {
14715 /* Flags and only flags can only hold CCmode values. */
14716 if (CC_REGNO_P (regno))
14717 return GET_MODE_CLASS (mode) == MODE_CC;
14718 if (GET_MODE_CLASS (mode) == MODE_CC
14719 || GET_MODE_CLASS (mode) == MODE_RANDOM
14720 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
14721 return 0;
14722 if (FP_REGNO_P (regno))
14723 return VALID_FP_MODE_P (mode);
14724 if (SSE_REGNO_P (regno))
14725 {
14726 /* We implement the move patterns for all vector modes into and
14727 out of SSE registers, even when no operation instructions
14728 are available. */
14729 return (VALID_SSE_REG_MODE (mode)
14730 || VALID_SSE2_REG_MODE (mode)
14731 || VALID_MMX_REG_MODE (mode)
14732 || VALID_MMX_REG_MODE_3DNOW (mode));
14733 }
14734 if (MMX_REGNO_P (regno))
14735 {
14736 /* We implement the move patterns for 3DNOW modes even in MMX mode,
14737 so if the register is available at all, then we can move data of
14738 the given mode into or out of it. */
14739 return (VALID_MMX_REG_MODE (mode)
14740 || VALID_MMX_REG_MODE_3DNOW (mode));
14741 }
14742 /* We handle both integer and floats in the general purpose registers.
14743 In future we should be able to handle vector modes as well. */
14744 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
14745 return 0;
14746 /* Take care for QImode values - they can be in non-QI regs, but then
14747 they do cause partial register stalls. */
14748 if (regno < 4 || mode != QImode || TARGET_64BIT)
14749 return 1;
14750 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
14751 }
14752
14753 /* Return the cost of moving data of mode M between a
14754 register and memory. A value of 2 is the default; this cost is
14755 relative to those in `REGISTER_MOVE_COST'.
14756
14757 If moving between registers and memory is more expensive than
14758 between two registers, you should define this macro to express the
14759 relative cost.
14760
14761 Model also increased moving costs of QImode registers in non
14762 Q_REGS classes.
14763 */
14764 int
14765 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
14766 {
14767 if (FLOAT_CLASS_P (class))
14768 {
14769 int index;
14770 switch (mode)
14771 {
14772 case SFmode:
14773 index = 0;
14774 break;
14775 case DFmode:
14776 index = 1;
14777 break;
14778 case XFmode:
14779 index = 2;
14780 break;
14781 default:
14782 return 100;
14783 }
14784 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
14785 }
14786 if (SSE_CLASS_P (class))
14787 {
14788 int index;
14789 switch (GET_MODE_SIZE (mode))
14790 {
14791 case 4:
14792 index = 0;
14793 break;
14794 case 8:
14795 index = 1;
14796 break;
14797 case 16:
14798 index = 2;
14799 break;
14800 default:
14801 return 100;
14802 }
14803 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
14804 }
14805 if (MMX_CLASS_P (class))
14806 {
14807 int index;
14808 switch (GET_MODE_SIZE (mode))
14809 {
14810 case 4:
14811 index = 0;
14812 break;
14813 case 8:
14814 index = 1;
14815 break;
14816 default:
14817 return 100;
14818 }
14819 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14820 }
14821 switch (GET_MODE_SIZE (mode))
14822 {
14823 case 1:
14824 if (in)
14825 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14826 : ix86_cost->movzbl_load);
14827 else
14828 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14829 : ix86_cost->int_store[0] + 4);
14830 break;
14831 case 2:
14832 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14833 default:
14834 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14835 if (mode == TFmode)
14836 mode = XFmode;
14837 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14838 * (((int) GET_MODE_SIZE (mode)
14839 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14840 }
14841 }
14842
14843 /* Compute a (partial) cost for rtx X. Return true if the complete
14844 cost has been computed, and false if subexpressions should be
14845 scanned. In either case, *TOTAL contains the cost result. */
14846
14847 static bool
14848 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14849 {
14850 enum machine_mode mode = GET_MODE (x);
14851
14852 switch (code)
14853 {
14854 case CONST_INT:
14855 case CONST:
14856 case LABEL_REF:
14857 case SYMBOL_REF:
14858 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
14859 *total = 3;
14860 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
14861 *total = 2;
14862 else if (flag_pic && SYMBOLIC_CONST (x)
14863 && (!TARGET_64BIT
14864 || (!GET_CODE (x) != LABEL_REF
14865 && (GET_CODE (x) != SYMBOL_REF
14866 || !SYMBOL_REF_LOCAL_P (x)))))
14867 *total = 1;
14868 else
14869 *total = 0;
14870 return true;
14871
14872 case CONST_DOUBLE:
14873 if (mode == VOIDmode)
14874 *total = 0;
14875 else
14876 switch (standard_80387_constant_p (x))
14877 {
14878 case 1: /* 0.0 */
14879 *total = 1;
14880 break;
14881 default: /* Other constants */
14882 *total = 2;
14883 break;
14884 case 0:
14885 case -1:
14886 /* Start with (MEM (SYMBOL_REF)), since that's where
14887 it'll probably end up. Add a penalty for size. */
14888 *total = (COSTS_N_INSNS (1)
14889 + (flag_pic != 0 && !TARGET_64BIT)
14890 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14891 break;
14892 }
14893 return true;
14894
14895 case ZERO_EXTEND:
14896 /* The zero extensions is often completely free on x86_64, so make
14897 it as cheap as possible. */
14898 if (TARGET_64BIT && mode == DImode
14899 && GET_MODE (XEXP (x, 0)) == SImode)
14900 *total = 1;
14901 else if (TARGET_ZERO_EXTEND_WITH_AND)
14902 *total = COSTS_N_INSNS (ix86_cost->add);
14903 else
14904 *total = COSTS_N_INSNS (ix86_cost->movzx);
14905 return false;
14906
14907 case SIGN_EXTEND:
14908 *total = COSTS_N_INSNS (ix86_cost->movsx);
14909 return false;
14910
14911 case ASHIFT:
14912 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14913 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14914 {
14915 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14916 if (value == 1)
14917 {
14918 *total = COSTS_N_INSNS (ix86_cost->add);
14919 return false;
14920 }
14921 if ((value == 2 || value == 3)
14922 && ix86_cost->lea <= ix86_cost->shift_const)
14923 {
14924 *total = COSTS_N_INSNS (ix86_cost->lea);
14925 return false;
14926 }
14927 }
14928 /* FALLTHRU */
14929
14930 case ROTATE:
14931 case ASHIFTRT:
14932 case LSHIFTRT:
14933 case ROTATERT:
14934 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14935 {
14936 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14937 {
14938 if (INTVAL (XEXP (x, 1)) > 32)
14939 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14940 else
14941 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14942 }
14943 else
14944 {
14945 if (GET_CODE (XEXP (x, 1)) == AND)
14946 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14947 else
14948 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
14949 }
14950 }
14951 else
14952 {
14953 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14954 *total = COSTS_N_INSNS (ix86_cost->shift_const);
14955 else
14956 *total = COSTS_N_INSNS (ix86_cost->shift_var);
14957 }
14958 return false;
14959
14960 case MULT:
14961 if (FLOAT_MODE_P (mode))
14962 {
14963 *total = COSTS_N_INSNS (ix86_cost->fmul);
14964 return false;
14965 }
14966 else
14967 {
14968 rtx op0 = XEXP (x, 0);
14969 rtx op1 = XEXP (x, 1);
14970 int nbits;
14971 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14972 {
14973 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14974 for (nbits = 0; value != 0; value &= value - 1)
14975 nbits++;
14976 }
14977 else
14978 /* This is arbitrary. */
14979 nbits = 7;
14980
14981 /* Compute costs correctly for widening multiplication. */
14982 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
14983 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
14984 == GET_MODE_SIZE (mode))
14985 {
14986 int is_mulwiden = 0;
14987 enum machine_mode inner_mode = GET_MODE (op0);
14988
14989 if (GET_CODE (op0) == GET_CODE (op1))
14990 is_mulwiden = 1, op1 = XEXP (op1, 0);
14991 else if (GET_CODE (op1) == CONST_INT)
14992 {
14993 if (GET_CODE (op0) == SIGN_EXTEND)
14994 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
14995 == INTVAL (op1);
14996 else
14997 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
14998 }
14999
15000 if (is_mulwiden)
15001 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
15002 }
15003
15004 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15005 + nbits * ix86_cost->mult_bit)
15006 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
15007
15008 return true;
15009 }
15010
15011 case DIV:
15012 case UDIV:
15013 case MOD:
15014 case UMOD:
15015 if (FLOAT_MODE_P (mode))
15016 *total = COSTS_N_INSNS (ix86_cost->fdiv);
15017 else
15018 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
15019 return false;
15020
15021 case PLUS:
15022 if (FLOAT_MODE_P (mode))
15023 *total = COSTS_N_INSNS (ix86_cost->fadd);
15024 else if (GET_MODE_CLASS (mode) == MODE_INT
15025 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
15026 {
15027 if (GET_CODE (XEXP (x, 0)) == PLUS
15028 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
15029 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
15030 && CONSTANT_P (XEXP (x, 1)))
15031 {
15032 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
15033 if (val == 2 || val == 4 || val == 8)
15034 {
15035 *total = COSTS_N_INSNS (ix86_cost->lea);
15036 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15037 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
15038 outer_code);
15039 *total += rtx_cost (XEXP (x, 1), outer_code);
15040 return true;
15041 }
15042 }
15043 else if (GET_CODE (XEXP (x, 0)) == MULT
15044 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
15045 {
15046 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
15047 if (val == 2 || val == 4 || val == 8)
15048 {
15049 *total = COSTS_N_INSNS (ix86_cost->lea);
15050 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15051 *total += rtx_cost (XEXP (x, 1), outer_code);
15052 return true;
15053 }
15054 }
15055 else if (GET_CODE (XEXP (x, 0)) == PLUS)
15056 {
15057 *total = COSTS_N_INSNS (ix86_cost->lea);
15058 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15059 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15060 *total += rtx_cost (XEXP (x, 1), outer_code);
15061 return true;
15062 }
15063 }
15064 /* FALLTHRU */
15065
15066 case MINUS:
15067 if (FLOAT_MODE_P (mode))
15068 {
15069 *total = COSTS_N_INSNS (ix86_cost->fadd);
15070 return false;
15071 }
15072 /* FALLTHRU */
15073
15074 case AND:
15075 case IOR:
15076 case XOR:
15077 if (!TARGET_64BIT && mode == DImode)
15078 {
15079 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
15080 + (rtx_cost (XEXP (x, 0), outer_code)
15081 << (GET_MODE (XEXP (x, 0)) != DImode))
15082 + (rtx_cost (XEXP (x, 1), outer_code)
15083 << (GET_MODE (XEXP (x, 1)) != DImode)));
15084 return true;
15085 }
15086 /* FALLTHRU */
15087
15088 case NEG:
15089 if (FLOAT_MODE_P (mode))
15090 {
15091 *total = COSTS_N_INSNS (ix86_cost->fchs);
15092 return false;
15093 }
15094 /* FALLTHRU */
15095
15096 case NOT:
15097 if (!TARGET_64BIT && mode == DImode)
15098 *total = COSTS_N_INSNS (ix86_cost->add * 2);
15099 else
15100 *total = COSTS_N_INSNS (ix86_cost->add);
15101 return false;
15102
15103 case COMPARE:
15104 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
15105 && XEXP (XEXP (x, 0), 1) == const1_rtx
15106 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
15107 && XEXP (x, 1) == const0_rtx)
15108 {
15109 /* This kind of construct is implemented using test[bwl].
15110 Treat it as if we had an AND. */
15111 *total = (COSTS_N_INSNS (ix86_cost->add)
15112 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
15113 + rtx_cost (const1_rtx, outer_code));
15114 return true;
15115 }
15116 return false;
15117
15118 case FLOAT_EXTEND:
15119 if (!TARGET_SSE_MATH
15120 || mode == XFmode
15121 || (mode == DFmode && !TARGET_SSE2))
15122 *total = 0;
15123 return false;
15124
15125 case ABS:
15126 if (FLOAT_MODE_P (mode))
15127 *total = COSTS_N_INSNS (ix86_cost->fabs);
15128 return false;
15129
15130 case SQRT:
15131 if (FLOAT_MODE_P (mode))
15132 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
15133 return false;
15134
15135 case UNSPEC:
15136 if (XINT (x, 1) == UNSPEC_TP)
15137 *total = 0;
15138 return false;
15139
15140 default:
15141 return false;
15142 }
15143 }
15144
15145 #if TARGET_MACHO
15146
15147 static int current_machopic_label_num;
15148
15149 /* Given a symbol name and its associated stub, write out the
15150 definition of the stub. */
15151
15152 void
15153 machopic_output_stub (FILE *file, const char *symb, const char *stub)
15154 {
15155 unsigned int length;
15156 char *binder_name, *symbol_name, lazy_ptr_name[32];
15157 int label = ++current_machopic_label_num;
15158
15159 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
15160 symb = (*targetm.strip_name_encoding) (symb);
15161
15162 length = strlen (stub);
15163 binder_name = alloca (length + 32);
15164 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
15165
15166 length = strlen (symb);
15167 symbol_name = alloca (length + 32);
15168 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
15169
15170 sprintf (lazy_ptr_name, "L%d$lz", label);
15171
15172 if (MACHOPIC_PURE)
15173 machopic_picsymbol_stub_section ();
15174 else
15175 machopic_symbol_stub_section ();
15176
15177 fprintf (file, "%s:\n", stub);
15178 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15179
15180 if (MACHOPIC_PURE)
15181 {
15182 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
15183 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
15184 fprintf (file, "\tjmp %%edx\n");
15185 }
15186 else
15187 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
15188
15189 fprintf (file, "%s:\n", binder_name);
15190
15191 if (MACHOPIC_PURE)
15192 {
15193 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
15194 fprintf (file, "\tpushl %%eax\n");
15195 }
15196 else
15197 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
15198
15199 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
15200
15201 machopic_lazy_symbol_ptr_section ();
15202 fprintf (file, "%s:\n", lazy_ptr_name);
15203 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15204 fprintf (file, "\t.long %s\n", binder_name);
15205 }
15206 #endif /* TARGET_MACHO */
15207
15208 /* Order the registers for register allocator. */
15209
15210 void
15211 x86_order_regs_for_local_alloc (void)
15212 {
15213 int pos = 0;
15214 int i;
15215
15216 /* First allocate the local general purpose registers. */
15217 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15218 if (GENERAL_REGNO_P (i) && call_used_regs[i])
15219 reg_alloc_order [pos++] = i;
15220
15221 /* Global general purpose registers. */
15222 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15223 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
15224 reg_alloc_order [pos++] = i;
15225
15226 /* x87 registers come first in case we are doing FP math
15227 using them. */
15228 if (!TARGET_SSE_MATH)
15229 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15230 reg_alloc_order [pos++] = i;
15231
15232 /* SSE registers. */
15233 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
15234 reg_alloc_order [pos++] = i;
15235 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
15236 reg_alloc_order [pos++] = i;
15237
15238 /* x87 registers. */
15239 if (TARGET_SSE_MATH)
15240 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15241 reg_alloc_order [pos++] = i;
15242
15243 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
15244 reg_alloc_order [pos++] = i;
15245
15246 /* Initialize the rest of array as we do not allocate some registers
15247 at all. */
15248 while (pos < FIRST_PSEUDO_REGISTER)
15249 reg_alloc_order [pos++] = 0;
15250 }
15251
15252 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
15253 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
15254 #endif
15255
15256 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
15257 struct attribute_spec.handler. */
15258 static tree
15259 ix86_handle_struct_attribute (tree *node, tree name,
15260 tree args ATTRIBUTE_UNUSED,
15261 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
15262 {
15263 tree *type = NULL;
15264 if (DECL_P (*node))
15265 {
15266 if (TREE_CODE (*node) == TYPE_DECL)
15267 type = &TREE_TYPE (*node);
15268 }
15269 else
15270 type = node;
15271
15272 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
15273 || TREE_CODE (*type) == UNION_TYPE)))
15274 {
15275 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
15276 *no_add_attrs = true;
15277 }
15278
15279 else if ((is_attribute_p ("ms_struct", name)
15280 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
15281 || ((is_attribute_p ("gcc_struct", name)
15282 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
15283 {
15284 warning ("%qs incompatible attribute ignored",
15285 IDENTIFIER_POINTER (name));
15286 *no_add_attrs = true;
15287 }
15288
15289 return NULL_TREE;
15290 }
15291
15292 static bool
15293 ix86_ms_bitfield_layout_p (tree record_type)
15294 {
15295 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
15296 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
15297 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
15298 }
15299
15300 /* Returns an expression indicating where the this parameter is
15301 located on entry to the FUNCTION. */
15302
15303 static rtx
15304 x86_this_parameter (tree function)
15305 {
15306 tree type = TREE_TYPE (function);
15307
15308 if (TARGET_64BIT)
15309 {
15310 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
15311 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
15312 }
15313
15314 if (ix86_function_regparm (type, function) > 0)
15315 {
15316 tree parm;
15317
15318 parm = TYPE_ARG_TYPES (type);
15319 /* Figure out whether or not the function has a variable number of
15320 arguments. */
15321 for (; parm; parm = TREE_CHAIN (parm))
15322 if (TREE_VALUE (parm) == void_type_node)
15323 break;
15324 /* If not, the this parameter is in the first argument. */
15325 if (parm)
15326 {
15327 int regno = 0;
15328 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
15329 regno = 2;
15330 return gen_rtx_REG (SImode, regno);
15331 }
15332 }
15333
15334 if (aggregate_value_p (TREE_TYPE (type), type))
15335 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
15336 else
15337 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
15338 }
15339
15340 /* Determine whether x86_output_mi_thunk can succeed. */
15341
15342 static bool
15343 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
15344 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
15345 HOST_WIDE_INT vcall_offset, tree function)
15346 {
15347 /* 64-bit can handle anything. */
15348 if (TARGET_64BIT)
15349 return true;
15350
15351 /* For 32-bit, everything's fine if we have one free register. */
15352 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
15353 return true;
15354
15355 /* Need a free register for vcall_offset. */
15356 if (vcall_offset)
15357 return false;
15358
15359 /* Need a free register for GOT references. */
15360 if (flag_pic && !(*targetm.binds_local_p) (function))
15361 return false;
15362
15363 /* Otherwise ok. */
15364 return true;
15365 }
15366
15367 /* Output the assembler code for a thunk function. THUNK_DECL is the
15368 declaration for the thunk function itself, FUNCTION is the decl for
15369 the target function. DELTA is an immediate constant offset to be
15370 added to THIS. If VCALL_OFFSET is nonzero, the word at
15371 *(*this + vcall_offset) should be added to THIS. */
15372
15373 static void
15374 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
15375 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
15376 HOST_WIDE_INT vcall_offset, tree function)
15377 {
15378 rtx xops[3];
15379 rtx this = x86_this_parameter (function);
15380 rtx this_reg, tmp;
15381
15382 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
15383 pull it in now and let DELTA benefit. */
15384 if (REG_P (this))
15385 this_reg = this;
15386 else if (vcall_offset)
15387 {
15388 /* Put the this parameter into %eax. */
15389 xops[0] = this;
15390 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
15391 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15392 }
15393 else
15394 this_reg = NULL_RTX;
15395
15396 /* Adjust the this parameter by a fixed constant. */
15397 if (delta)
15398 {
15399 xops[0] = GEN_INT (delta);
15400 xops[1] = this_reg ? this_reg : this;
15401 if (TARGET_64BIT)
15402 {
15403 if (!x86_64_general_operand (xops[0], DImode))
15404 {
15405 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15406 xops[1] = tmp;
15407 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
15408 xops[0] = tmp;
15409 xops[1] = this;
15410 }
15411 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15412 }
15413 else
15414 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15415 }
15416
15417 /* Adjust the this parameter by a value stored in the vtable. */
15418 if (vcall_offset)
15419 {
15420 if (TARGET_64BIT)
15421 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15422 else
15423 {
15424 int tmp_regno = 2 /* ECX */;
15425 if (lookup_attribute ("fastcall",
15426 TYPE_ATTRIBUTES (TREE_TYPE (function))))
15427 tmp_regno = 0 /* EAX */;
15428 tmp = gen_rtx_REG (SImode, tmp_regno);
15429 }
15430
15431 xops[0] = gen_rtx_MEM (Pmode, this_reg);
15432 xops[1] = tmp;
15433 if (TARGET_64BIT)
15434 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15435 else
15436 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15437
15438 /* Adjust the this parameter. */
15439 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
15440 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
15441 {
15442 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
15443 xops[0] = GEN_INT (vcall_offset);
15444 xops[1] = tmp2;
15445 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15446 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
15447 }
15448 xops[1] = this_reg;
15449 if (TARGET_64BIT)
15450 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15451 else
15452 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15453 }
15454
15455 /* If necessary, drop THIS back to its stack slot. */
15456 if (this_reg && this_reg != this)
15457 {
15458 xops[0] = this_reg;
15459 xops[1] = this;
15460 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15461 }
15462
15463 xops[0] = XEXP (DECL_RTL (function), 0);
15464 if (TARGET_64BIT)
15465 {
15466 if (!flag_pic || (*targetm.binds_local_p) (function))
15467 output_asm_insn ("jmp\t%P0", xops);
15468 else
15469 {
15470 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
15471 tmp = gen_rtx_CONST (Pmode, tmp);
15472 tmp = gen_rtx_MEM (QImode, tmp);
15473 xops[0] = tmp;
15474 output_asm_insn ("jmp\t%A0", xops);
15475 }
15476 }
15477 else
15478 {
15479 if (!flag_pic || (*targetm.binds_local_p) (function))
15480 output_asm_insn ("jmp\t%P0", xops);
15481 else
15482 #if TARGET_MACHO
15483 if (TARGET_MACHO)
15484 {
15485 rtx sym_ref = XEXP (DECL_RTL (function), 0);
15486 tmp = (gen_rtx_SYMBOL_REF
15487 (Pmode,
15488 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
15489 tmp = gen_rtx_MEM (QImode, tmp);
15490 xops[0] = tmp;
15491 output_asm_insn ("jmp\t%0", xops);
15492 }
15493 else
15494 #endif /* TARGET_MACHO */
15495 {
15496 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
15497 output_set_got (tmp);
15498
15499 xops[1] = tmp;
15500 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
15501 output_asm_insn ("jmp\t{*}%1", xops);
15502 }
15503 }
15504 }
15505
15506 static void
15507 x86_file_start (void)
15508 {
15509 default_file_start ();
15510 if (X86_FILE_START_VERSION_DIRECTIVE)
15511 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
15512 if (X86_FILE_START_FLTUSED)
15513 fputs ("\t.global\t__fltused\n", asm_out_file);
15514 if (ix86_asm_dialect == ASM_INTEL)
15515 fputs ("\t.intel_syntax\n", asm_out_file);
15516 }
15517
15518 int
15519 x86_field_alignment (tree field, int computed)
15520 {
15521 enum machine_mode mode;
15522 tree type = TREE_TYPE (field);
15523
15524 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
15525 return computed;
15526 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
15527 ? get_inner_array_type (type) : type);
15528 if (mode == DFmode || mode == DCmode
15529 || GET_MODE_CLASS (mode) == MODE_INT
15530 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
15531 return MIN (32, computed);
15532 return computed;
15533 }
15534
15535 /* Output assembler code to FILE to increment profiler label # LABELNO
15536 for profiling a function entry. */
15537 void
15538 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
15539 {
15540 if (TARGET_64BIT)
15541 if (flag_pic)
15542 {
15543 #ifndef NO_PROFILE_COUNTERS
15544 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
15545 #endif
15546 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
15547 }
15548 else
15549 {
15550 #ifndef NO_PROFILE_COUNTERS
15551 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
15552 #endif
15553 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15554 }
15555 else if (flag_pic)
15556 {
15557 #ifndef NO_PROFILE_COUNTERS
15558 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
15559 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
15560 #endif
15561 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
15562 }
15563 else
15564 {
15565 #ifndef NO_PROFILE_COUNTERS
15566 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
15567 PROFILE_COUNT_REGISTER);
15568 #endif
15569 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15570 }
15571 }
15572
15573 /* We don't have exact information about the insn sizes, but we may assume
15574 quite safely that we are informed about all 1 byte insns and memory
15575 address sizes. This is enough to eliminate unnecessary padding in
15576 99% of cases. */
15577
15578 static int
15579 min_insn_size (rtx insn)
15580 {
15581 int l = 0;
15582
15583 if (!INSN_P (insn) || !active_insn_p (insn))
15584 return 0;
15585
15586 /* Discard alignments we've emit and jump instructions. */
15587 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
15588 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
15589 return 0;
15590 if (GET_CODE (insn) == JUMP_INSN
15591 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
15592 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
15593 return 0;
15594
15595 /* Important case - calls are always 5 bytes.
15596 It is common to have many calls in the row. */
15597 if (GET_CODE (insn) == CALL_INSN
15598 && symbolic_reference_mentioned_p (PATTERN (insn))
15599 && !SIBLING_CALL_P (insn))
15600 return 5;
15601 if (get_attr_length (insn) <= 1)
15602 return 1;
15603
15604 /* For normal instructions we may rely on the sizes of addresses
15605 and the presence of symbol to require 4 bytes of encoding.
15606 This is not the case for jumps where references are PC relative. */
15607 if (GET_CODE (insn) != JUMP_INSN)
15608 {
15609 l = get_attr_length_address (insn);
15610 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
15611 l = 4;
15612 }
15613 if (l)
15614 return 1+l;
15615 else
15616 return 2;
15617 }
15618
15619 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
15620 window. */
15621
15622 static void
15623 ix86_avoid_jump_misspredicts (void)
15624 {
15625 rtx insn, start = get_insns ();
15626 int nbytes = 0, njumps = 0;
15627 int isjump = 0;
15628
15629 /* Look for all minimal intervals of instructions containing 4 jumps.
15630 The intervals are bounded by START and INSN. NBYTES is the total
15631 size of instructions in the interval including INSN and not including
15632 START. When the NBYTES is smaller than 16 bytes, it is possible
15633 that the end of START and INSN ends up in the same 16byte page.
15634
15635 The smallest offset in the page INSN can start is the case where START
15636 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
15637 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
15638 */
15639 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15640 {
15641
15642 nbytes += min_insn_size (insn);
15643 if (dump_file)
15644 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
15645 INSN_UID (insn), min_insn_size (insn));
15646 if ((GET_CODE (insn) == JUMP_INSN
15647 && GET_CODE (PATTERN (insn)) != ADDR_VEC
15648 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
15649 || GET_CODE (insn) == CALL_INSN)
15650 njumps++;
15651 else
15652 continue;
15653
15654 while (njumps > 3)
15655 {
15656 start = NEXT_INSN (start);
15657 if ((GET_CODE (start) == JUMP_INSN
15658 && GET_CODE (PATTERN (start)) != ADDR_VEC
15659 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
15660 || GET_CODE (start) == CALL_INSN)
15661 njumps--, isjump = 1;
15662 else
15663 isjump = 0;
15664 nbytes -= min_insn_size (start);
15665 }
15666 if (njumps < 0)
15667 abort ();
15668 if (dump_file)
15669 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
15670 INSN_UID (start), INSN_UID (insn), nbytes);
15671
15672 if (njumps == 3 && isjump && nbytes < 16)
15673 {
15674 int padsize = 15 - nbytes + min_insn_size (insn);
15675
15676 if (dump_file)
15677 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
15678 INSN_UID (insn), padsize);
15679 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
15680 }
15681 }
15682 }
15683
15684 /* AMD Athlon works faster
15685 when RET is not destination of conditional jump or directly preceded
15686 by other jump instruction. We avoid the penalty by inserting NOP just
15687 before the RET instructions in such cases. */
15688 static void
15689 ix86_pad_returns (void)
15690 {
15691 edge e;
15692 edge_iterator ei;
15693
15694 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
15695 {
15696 basic_block bb = e->src;
15697 rtx ret = BB_END (bb);
15698 rtx prev;
15699 bool replace = false;
15700
15701 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
15702 || !maybe_hot_bb_p (bb))
15703 continue;
15704 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
15705 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
15706 break;
15707 if (prev && GET_CODE (prev) == CODE_LABEL)
15708 {
15709 edge e;
15710 edge_iterator ei;
15711
15712 FOR_EACH_EDGE (e, ei, bb->preds)
15713 if (EDGE_FREQUENCY (e) && e->src->index >= 0
15714 && !(e->flags & EDGE_FALLTHRU))
15715 replace = true;
15716 }
15717 if (!replace)
15718 {
15719 prev = prev_active_insn (ret);
15720 if (prev
15721 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
15722 || GET_CODE (prev) == CALL_INSN))
15723 replace = true;
15724 /* Empty functions get branch mispredict even when the jump destination
15725 is not visible to us. */
15726 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
15727 replace = true;
15728 }
15729 if (replace)
15730 {
15731 emit_insn_before (gen_return_internal_long (), ret);
15732 delete_insn (ret);
15733 }
15734 }
15735 }
15736
15737 /* Implement machine specific optimizations. We implement padding of returns
15738 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
15739 static void
15740 ix86_reorg (void)
15741 {
15742 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
15743 ix86_pad_returns ();
15744 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
15745 ix86_avoid_jump_misspredicts ();
15746 }
15747
15748 /* Return nonzero when QImode register that must be represented via REX prefix
15749 is used. */
15750 bool
15751 x86_extended_QIreg_mentioned_p (rtx insn)
15752 {
15753 int i;
15754 extract_insn_cached (insn);
15755 for (i = 0; i < recog_data.n_operands; i++)
15756 if (REG_P (recog_data.operand[i])
15757 && REGNO (recog_data.operand[i]) >= 4)
15758 return true;
15759 return false;
15760 }
15761
15762 /* Return nonzero when P points to register encoded via REX prefix.
15763 Called via for_each_rtx. */
15764 static int
15765 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
15766 {
15767 unsigned int regno;
15768 if (!REG_P (*p))
15769 return 0;
15770 regno = REGNO (*p);
15771 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
15772 }
15773
15774 /* Return true when INSN mentions register that must be encoded using REX
15775 prefix. */
15776 bool
15777 x86_extended_reg_mentioned_p (rtx insn)
15778 {
15779 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
15780 }
15781
15782 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15783 optabs would emit if we didn't have TFmode patterns. */
15784
15785 void
15786 x86_emit_floatuns (rtx operands[2])
15787 {
15788 rtx neglab, donelab, i0, i1, f0, in, out;
15789 enum machine_mode mode, inmode;
15790
15791 inmode = GET_MODE (operands[1]);
15792 if (inmode != SImode
15793 && inmode != DImode)
15794 abort ();
15795
15796 out = operands[0];
15797 in = force_reg (inmode, operands[1]);
15798 mode = GET_MODE (out);
15799 neglab = gen_label_rtx ();
15800 donelab = gen_label_rtx ();
15801 i1 = gen_reg_rtx (Pmode);
15802 f0 = gen_reg_rtx (mode);
15803
15804 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
15805
15806 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
15807 emit_jump_insn (gen_jump (donelab));
15808 emit_barrier ();
15809
15810 emit_label (neglab);
15811
15812 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15813 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15814 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
15815 expand_float (f0, i0, 0);
15816 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
15817
15818 emit_label (donelab);
15819 }
15820 \f
15821 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15822 with all elements equal to VAR. Return true if successful. */
15823
15824 static bool
15825 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
15826 rtx target, rtx val)
15827 {
15828 enum machine_mode smode, wsmode, wvmode;
15829 rtx x;
15830
15831 switch (mode)
15832 {
15833 case V2SImode:
15834 case V2SFmode:
15835 if (!mmx_ok && !TARGET_SSE)
15836 return false;
15837 /* FALLTHRU */
15838
15839 case V2DFmode:
15840 case V2DImode:
15841 case V4SFmode:
15842 case V4SImode:
15843 val = force_reg (GET_MODE_INNER (mode), val);
15844 x = gen_rtx_VEC_DUPLICATE (mode, val);
15845 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15846 return true;
15847
15848 case V4HImode:
15849 if (!mmx_ok)
15850 return false;
15851 val = gen_lowpart (SImode, val);
15852 x = gen_rtx_TRUNCATE (HImode, val);
15853 x = gen_rtx_VEC_DUPLICATE (mode, x);
15854 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15855 return true;
15856
15857 case V8QImode:
15858 if (!mmx_ok)
15859 return false;
15860 smode = QImode;
15861 wsmode = HImode;
15862 wvmode = V4HImode;
15863 goto widen;
15864 case V8HImode:
15865 smode = HImode;
15866 wsmode = SImode;
15867 wvmode = V4SImode;
15868 goto widen;
15869 case V16QImode:
15870 smode = QImode;
15871 wsmode = HImode;
15872 wvmode = V8HImode;
15873 goto widen;
15874 widen:
15875 /* Replicate the value once into the next wider mode and recurse. */
15876 val = convert_modes (wsmode, smode, val, true);
15877 x = expand_simple_binop (wsmode, ASHIFT, val,
15878 GEN_INT (GET_MODE_BITSIZE (smode)),
15879 NULL_RTX, 1, OPTAB_LIB_WIDEN);
15880 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
15881
15882 x = gen_reg_rtx (wvmode);
15883 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
15884 gcc_unreachable ();
15885 emit_move_insn (target, gen_lowpart (mode, x));
15886 return true;
15887
15888 default:
15889 return false;
15890 }
15891 }
15892
15893 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15894 whose low element is VAR, and other elements are zero. Return true
15895 if successful. */
15896
15897 static bool
15898 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
15899 rtx target, rtx var)
15900 {
15901 enum machine_mode vsimode;
15902 rtx x;
15903
15904 switch (mode)
15905 {
15906 case V2SFmode:
15907 case V2SImode:
15908 if (!mmx_ok && !TARGET_SSE)
15909 return false;
15910 /* FALLTHRU */
15911
15912 case V2DFmode:
15913 case V2DImode:
15914 var = force_reg (GET_MODE_INNER (mode), var);
15915 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
15916 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15917 return true;
15918
15919 case V4SFmode:
15920 case V4SImode:
15921 var = force_reg (GET_MODE_INNER (mode), var);
15922 x = gen_rtx_VEC_DUPLICATE (mode, var);
15923 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
15924 emit_insn (gen_rtx_SET (VOIDmode, target, x));
15925 return true;
15926
15927 case V8HImode:
15928 case V16QImode:
15929 vsimode = V4SImode;
15930 goto widen;
15931 case V4HImode:
15932 case V8QImode:
15933 if (!mmx_ok)
15934 return false;
15935 vsimode = V2SImode;
15936 goto widen;
15937 widen:
15938 /* Zero extend the variable element to SImode and recurse. */
15939 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
15940
15941 x = gen_reg_rtx (vsimode);
15942 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
15943 gcc_unreachable ();
15944
15945 emit_move_insn (target, gen_lowpart (mode, x));
15946 return true;
15947
15948 default:
15949 return false;
15950 }
15951 }
15952
15953 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
15954 consisting of the values in VALS. It is known that all elements
15955 except ONE_VAR are constants. Return true if successful. */
15956
15957 static bool
15958 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
15959 rtx target, rtx vals, int one_var)
15960 {
15961 rtx var = XVECEXP (vals, 0, one_var);
15962 enum machine_mode wmode;
15963 rtx const_vec, x;
15964
15965 XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
15966 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
15967
15968 switch (mode)
15969 {
15970 case V2DFmode:
15971 case V2DImode:
15972 case V2SFmode:
15973 case V2SImode:
15974 /* For the two element vectors, it's just as easy to use
15975 the general case. */
15976 return false;
15977
15978 case V4SFmode:
15979 case V4SImode:
15980 case V8HImode:
15981 case V4HImode:
15982 break;
15983
15984 case V16QImode:
15985 wmode = V8HImode;
15986 goto widen;
15987 case V8QImode:
15988 wmode = V4HImode;
15989 goto widen;
15990 widen:
15991 /* There's no way to set one QImode entry easily. Combine
15992 the variable value with its adjacent constant value, and
15993 promote to an HImode set. */
15994 x = XVECEXP (vals, 0, one_var ^ 1);
15995 if (one_var & 1)
15996 {
15997 var = convert_modes (HImode, QImode, var, true);
15998 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
15999 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16000 x = GEN_INT (INTVAL (x) & 0xff);
16001 }
16002 else
16003 {
16004 var = convert_modes (HImode, QImode, var, true);
16005 x = gen_int_mode (INTVAL (x) << 8, HImode);
16006 }
16007 if (x != const0_rtx)
16008 var = expand_simple_binop (HImode, IOR, var, x, var,
16009 1, OPTAB_LIB_WIDEN);
16010
16011 x = gen_reg_rtx (wmode);
16012 emit_move_insn (x, gen_lowpart (wmode, const_vec));
16013 ix86_expand_vector_set (mmx_ok, target, var, one_var >> 1);
16014
16015 emit_move_insn (target, gen_lowpart (mode, x));
16016 return true;
16017
16018 default:
16019 return false;
16020 }
16021
16022 emit_move_insn (target, const_vec);
16023 ix86_expand_vector_set (mmx_ok, target, var, one_var);
16024 return true;
16025 }
16026
16027 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
16028 all values variable, and none identical. */
16029
16030 static void
16031 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
16032 rtx target, rtx vals)
16033 {
16034 enum machine_mode half_mode = GET_MODE_INNER (mode);
16035 rtx op0 = NULL, op1 = NULL;
16036 bool use_vec_concat = false;
16037
16038 switch (mode)
16039 {
16040 case V2SFmode:
16041 case V2SImode:
16042 if (!mmx_ok && !TARGET_SSE)
16043 break;
16044 /* FALLTHRU */
16045
16046 case V2DFmode:
16047 case V2DImode:
16048 /* For the two element vectors, we always implement VEC_CONCAT. */
16049 op0 = XVECEXP (vals, 0, 0);
16050 op1 = XVECEXP (vals, 0, 1);
16051 use_vec_concat = true;
16052 break;
16053
16054 case V4SFmode:
16055 half_mode = V2SFmode;
16056 goto half;
16057 case V4SImode:
16058 half_mode = V2SImode;
16059 goto half;
16060 half:
16061 {
16062 rtvec v;
16063
16064 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
16065 Recurse to load the two halves. */
16066
16067 op0 = gen_reg_rtx (half_mode);
16068 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
16069 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
16070
16071 op1 = gen_reg_rtx (half_mode);
16072 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
16073 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
16074
16075 use_vec_concat = true;
16076 }
16077 break;
16078
16079 case V8HImode:
16080 case V16QImode:
16081 case V4HImode:
16082 case V8QImode:
16083 break;
16084
16085 default:
16086 gcc_unreachable ();
16087 }
16088
16089 if (use_vec_concat)
16090 {
16091 if (!register_operand (op0, half_mode))
16092 op0 = force_reg (half_mode, op0);
16093 if (!register_operand (op1, half_mode))
16094 op1 = force_reg (half_mode, op1);
16095
16096 emit_insn (gen_rtx_SET (VOIDmode, target,
16097 gen_rtx_VEC_CONCAT (mode, op0, op1)));
16098 }
16099 else
16100 {
16101 int i, j, n_elts, n_words, n_elt_per_word;
16102 enum machine_mode inner_mode;
16103 rtx words[4], shift;
16104
16105 inner_mode = GET_MODE_INNER (mode);
16106 n_elts = GET_MODE_NUNITS (mode);
16107 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
16108 n_elt_per_word = n_elts / n_words;
16109 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
16110
16111 for (i = 0; i < n_words; ++i)
16112 {
16113 rtx word = NULL_RTX;
16114
16115 for (j = 0; j < n_elt_per_word; ++j)
16116 {
16117 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
16118 elt = convert_modes (word_mode, inner_mode, elt, true);
16119
16120 if (j == 0)
16121 word = elt;
16122 else
16123 {
16124 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
16125 word, 1, OPTAB_LIB_WIDEN);
16126 word = expand_simple_binop (word_mode, IOR, word, elt,
16127 word, 1, OPTAB_LIB_WIDEN);
16128 }
16129 }
16130
16131 words[i] = word;
16132 }
16133
16134 if (n_words == 1)
16135 emit_move_insn (target, gen_lowpart (mode, words[0]));
16136 else if (n_words == 2)
16137 {
16138 rtx tmp = gen_reg_rtx (mode);
16139 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
16140 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
16141 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
16142 emit_move_insn (target, tmp);
16143 }
16144 else if (n_words == 4)
16145 {
16146 rtx tmp = gen_reg_rtx (V4SImode);
16147 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
16148 ix86_expand_vector_init_general (false, V4SImode, target, vals);
16149 emit_move_insn (target, gen_lowpart (mode, tmp));
16150 }
16151 else
16152 gcc_unreachable ();
16153 }
16154 }
16155
16156 /* Initialize vector TARGET via VALS. Suppress the use of MMX
16157 instructions unless MMX_OK is true. */
16158
16159 void
16160 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
16161 {
16162 enum machine_mode mode = GET_MODE (target);
16163 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16164 int n_elts = GET_MODE_NUNITS (mode);
16165 int n_var = 0, one_var = -1;
16166 bool all_same = true, all_const_zero = true;
16167 int i;
16168 rtx x;
16169
16170 for (i = 0; i < n_elts; ++i)
16171 {
16172 x = XVECEXP (vals, 0, i);
16173 if (!CONSTANT_P (x))
16174 n_var++, one_var = i;
16175 else if (x != CONST0_RTX (inner_mode))
16176 all_const_zero = false;
16177 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
16178 all_same = false;
16179 }
16180
16181 /* Constants are best loaded from the constant pool. */
16182 if (n_var == 0)
16183 {
16184 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
16185 return;
16186 }
16187
16188 /* If all values are identical, broadcast the value. */
16189 if (all_same
16190 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
16191 XVECEXP (vals, 0, 0)))
16192 return;
16193
16194 /* Values where only one field is non-constant are best loaded from
16195 the pool and overwritten via move later. */
16196 if (n_var == 1)
16197 {
16198 if (all_const_zero && one_var == 0
16199 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
16200 XVECEXP (vals, 0, 0)))
16201 return;
16202
16203 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
16204 return;
16205 }
16206
16207 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
16208 }
16209
16210 void
16211 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
16212 {
16213 enum machine_mode mode = GET_MODE (target);
16214 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16215 bool use_vec_merge = false;
16216 rtx tmp;
16217
16218 switch (mode)
16219 {
16220 case V2SFmode:
16221 case V2SImode:
16222 if (!mmx_ok)
16223 break;
16224 /* FALLTHRU */
16225
16226 case V2DFmode:
16227 case V2DImode:
16228 {
16229 rtx op0, op1;
16230
16231 /* For the two element vectors, we implement a VEC_CONCAT with
16232 the extraction of the other element. */
16233
16234 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
16235 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
16236
16237 if (elt == 0)
16238 op0 = val, op1 = tmp;
16239 else
16240 op0 = tmp, op1 = val;
16241
16242 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
16243 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16244 }
16245 return;
16246
16247 case V4SFmode:
16248 switch (elt)
16249 {
16250 case 0:
16251 use_vec_merge = true;
16252 break;
16253
16254 case 1:
16255 /* tmp = op0 = A B C D */
16256 tmp = copy_to_reg (target);
16257
16258 /* op0 = C C D D */
16259 emit_insn (gen_sse_unpcklps (target, target, target));
16260
16261 /* op0 = C C D X */
16262 ix86_expand_vector_set (false, target, val, 0);
16263
16264 /* op0 = A B X D */
16265 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16266 GEN_INT (1), GEN_INT (0),
16267 GEN_INT (2), GEN_INT (3)));
16268 return;
16269
16270 case 2:
16271 tmp = copy_to_reg (target);
16272 ix86_expand_vector_set (false, target, val, 0);
16273 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16274 GEN_INT (0), GEN_INT (1),
16275 GEN_INT (0), GEN_INT (3)));
16276 return;
16277
16278 case 3:
16279 tmp = copy_to_reg (target);
16280 ix86_expand_vector_set (false, target, val, 0);
16281 emit_insn (gen_sse_shufps_1 (target, target, tmp,
16282 GEN_INT (0), GEN_INT (1),
16283 GEN_INT (2), GEN_INT (0)));
16284 return;
16285
16286 default:
16287 gcc_unreachable ();
16288 }
16289 break;
16290
16291 case V4SImode:
16292 /* Element 0 handled by vec_merge below. */
16293 if (elt == 0)
16294 {
16295 use_vec_merge = true;
16296 break;
16297 }
16298
16299 if (TARGET_SSE2)
16300 {
16301 /* With SSE2, use integer shuffles to swap element 0 and ELT,
16302 store into element 0, then shuffle them back. */
16303
16304 rtx order[4];
16305
16306 order[0] = GEN_INT (elt);
16307 order[1] = const1_rtx;
16308 order[2] = const2_rtx;
16309 order[3] = GEN_INT (3);
16310 order[elt] = const0_rtx;
16311
16312 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
16313 order[1], order[2], order[3]));
16314
16315 ix86_expand_vector_set (false, target, val, 0);
16316
16317 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
16318 order[1], order[2], order[3]));
16319 }
16320 else
16321 {
16322 /* For SSE1, we have to reuse the V4SF code. */
16323 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
16324 gen_lowpart (SFmode, val), elt);
16325 }
16326 return;
16327
16328 case V8HImode:
16329 use_vec_merge = TARGET_SSE2;
16330 break;
16331 case V4HImode:
16332 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
16333 break;
16334
16335 case V16QImode:
16336 case V8QImode:
16337 default:
16338 break;
16339 }
16340
16341 if (use_vec_merge)
16342 {
16343 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
16344 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
16345 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16346 }
16347 else
16348 {
16349 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
16350
16351 emit_move_insn (mem, target);
16352
16353 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
16354 emit_move_insn (tmp, val);
16355
16356 emit_move_insn (target, mem);
16357 }
16358 }
16359
16360 void
16361 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
16362 {
16363 enum machine_mode mode = GET_MODE (vec);
16364 enum machine_mode inner_mode = GET_MODE_INNER (mode);
16365 bool use_vec_extr = false;
16366 rtx tmp;
16367
16368 switch (mode)
16369 {
16370 case V2SImode:
16371 case V2SFmode:
16372 if (!mmx_ok)
16373 break;
16374 /* FALLTHRU */
16375
16376 case V2DFmode:
16377 case V2DImode:
16378 use_vec_extr = true;
16379 break;
16380
16381 case V4SFmode:
16382 switch (elt)
16383 {
16384 case 0:
16385 tmp = vec;
16386 break;
16387
16388 case 1:
16389 case 3:
16390 tmp = gen_reg_rtx (mode);
16391 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
16392 GEN_INT (elt), GEN_INT (elt),
16393 GEN_INT (elt), GEN_INT (elt)));
16394 break;
16395
16396 case 2:
16397 tmp = gen_reg_rtx (mode);
16398 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
16399 break;
16400
16401 default:
16402 gcc_unreachable ();
16403 }
16404 vec = tmp;
16405 use_vec_extr = true;
16406 elt = 0;
16407 break;
16408
16409 case V4SImode:
16410 if (TARGET_SSE2)
16411 {
16412 switch (elt)
16413 {
16414 case 0:
16415 tmp = vec;
16416 break;
16417
16418 case 1:
16419 case 3:
16420 tmp = gen_reg_rtx (mode);
16421 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
16422 GEN_INT (elt), GEN_INT (elt),
16423 GEN_INT (elt), GEN_INT (elt)));
16424 break;
16425
16426 case 2:
16427 tmp = gen_reg_rtx (mode);
16428 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
16429 break;
16430
16431 default:
16432 gcc_unreachable ();
16433 }
16434 vec = tmp;
16435 use_vec_extr = true;
16436 elt = 0;
16437 }
16438 else
16439 {
16440 /* For SSE1, we have to reuse the V4SF code. */
16441 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
16442 gen_lowpart (V4SFmode, vec), elt);
16443 return;
16444 }
16445 break;
16446
16447 case V8HImode:
16448 use_vec_extr = TARGET_SSE2;
16449 break;
16450 case V4HImode:
16451 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
16452 break;
16453
16454 case V16QImode:
16455 case V8QImode:
16456 /* ??? Could extract the appropriate HImode element and shift. */
16457 default:
16458 break;
16459 }
16460
16461 if (use_vec_extr)
16462 {
16463 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
16464 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
16465
16466 /* Let the rtl optimizers know about the zero extension performed. */
16467 if (inner_mode == HImode)
16468 {
16469 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
16470 target = gen_lowpart (SImode, target);
16471 }
16472
16473 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
16474 }
16475 else
16476 {
16477 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
16478
16479 emit_move_insn (mem, vec);
16480
16481 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
16482 emit_move_insn (target, tmp);
16483 }
16484 }
16485 \f
16486 /* Implements target hook vector_mode_supported_p. */
16487 static bool
16488 ix86_vector_mode_supported_p (enum machine_mode mode)
16489 {
16490 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
16491 return true;
16492 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
16493 return true;
16494 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
16495 return true;
16496 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
16497 return true;
16498 return false;
16499 }
16500
16501 /* Worker function for TARGET_MD_ASM_CLOBBERS.
16502
16503 We do this in the new i386 backend to maintain source compatibility
16504 with the old cc0-based compiler. */
16505
16506 static tree
16507 ix86_md_asm_clobbers (tree clobbers)
16508 {
16509 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
16510 clobbers);
16511 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
16512 clobbers);
16513 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
16514 clobbers);
16515 return clobbers;
16516 }
16517
16518 /* Worker function for REVERSE_CONDITION. */
16519
16520 enum rtx_code
16521 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
16522 {
16523 return (mode != CCFPmode && mode != CCFPUmode
16524 ? reverse_condition (code)
16525 : reverse_condition_maybe_unordered (code));
16526 }
16527
16528 /* Output code to perform an x87 FP register move, from OPERANDS[1]
16529 to OPERANDS[0]. */
16530
16531 const char *
16532 output_387_reg_move (rtx insn, rtx *operands)
16533 {
16534 if (REG_P (operands[1])
16535 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
16536 {
16537 if (REGNO (operands[0]) == FIRST_STACK_REG
16538 && TARGET_USE_FFREEP)
16539 return "ffreep\t%y0";
16540 return "fstp\t%y0";
16541 }
16542 if (STACK_TOP_P (operands[0]))
16543 return "fld%z1\t%y1";
16544 return "fst\t%y0";
16545 }
16546
16547 /* Output code to perform a conditional jump to LABEL, if C2 flag in
16548 FP status register is set. */
16549
16550 void
16551 ix86_emit_fp_unordered_jump (rtx label)
16552 {
16553 rtx reg = gen_reg_rtx (HImode);
16554 rtx temp;
16555
16556 emit_insn (gen_x86_fnstsw_1 (reg));
16557
16558 if (TARGET_USE_SAHF)
16559 {
16560 emit_insn (gen_x86_sahf_1 (reg));
16561
16562 temp = gen_rtx_REG (CCmode, FLAGS_REG);
16563 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
16564 }
16565 else
16566 {
16567 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
16568
16569 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
16570 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
16571 }
16572
16573 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
16574 gen_rtx_LABEL_REF (VOIDmode, label),
16575 pc_rtx);
16576 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
16577 emit_jump_insn (temp);
16578 }
16579
16580 /* Output code to perform a log1p XFmode calculation. */
16581
16582 void ix86_emit_i387_log1p (rtx op0, rtx op1)
16583 {
16584 rtx label1 = gen_label_rtx ();
16585 rtx label2 = gen_label_rtx ();
16586
16587 rtx tmp = gen_reg_rtx (XFmode);
16588 rtx tmp2 = gen_reg_rtx (XFmode);
16589
16590 emit_insn (gen_absxf2 (tmp, op1));
16591 emit_insn (gen_cmpxf (tmp,
16592 CONST_DOUBLE_FROM_REAL_VALUE (
16593 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
16594 XFmode)));
16595 emit_jump_insn (gen_bge (label1));
16596
16597 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
16598 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
16599 emit_jump (label2);
16600
16601 emit_label (label1);
16602 emit_move_insn (tmp, CONST1_RTX (XFmode));
16603 emit_insn (gen_addxf3 (tmp, op1, tmp));
16604 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
16605 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
16606
16607 emit_label (label2);
16608 }
16609
16610 /* Solaris named-section hook. Parameters are as for
16611 named_section_real. */
16612
16613 static void
16614 i386_solaris_elf_named_section (const char *name, unsigned int flags,
16615 tree decl)
16616 {
16617 /* With Binutils 2.15, the "@unwind" marker must be specified on
16618 every occurrence of the ".eh_frame" section, not just the first
16619 one. */
16620 if (TARGET_64BIT
16621 && strcmp (name, ".eh_frame") == 0)
16622 {
16623 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
16624 flags & SECTION_WRITE ? "aw" : "a");
16625 return;
16626 }
16627 default_elf_asm_named_section (name, flags, decl);
16628 }
16629
16630 #include "gt-i386.h"
This page took 0.897814 seconds and 5 git commands to generate.