]> gcc.gnu.org Git - gcc.git/blob - gcc/config/i386/i386.c
a60558448cabf771909a26acb3157a1f6c59caf7
[gcc.git] / gcc / config / i386 / i386.c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
55
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
63
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
107 };
108
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
152 };
153
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
196 };
197
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
240 };
241
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
284 };
285
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
328 };
329
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 5, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
372 };
373
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 5, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
416 };
417
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
460 };
461
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
504 };
505
506 const struct processor_costs *ix86_cost = &pentium_cost;
507
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
519
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_3dnow_a = m_ATHLON_K8;
529 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints = 0;
535 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
536 const int x86_partial_reg_stall = m_PPRO;
537 const int x86_use_loop = m_K6;
538 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
539 const int x86_use_mov0 = m_K6;
540 const int x86_use_cltd = ~(m_PENT | m_K6);
541 const int x86_read_modify_write = ~m_PENT;
542 const int x86_read_modify = ~(m_PENT | m_PPRO);
543 const int x86_split_long_moves = m_PPRO;
544 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
545 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
546 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
547 const int x86_qimode_math = ~(0);
548 const int x86_promote_qi_regs = 0;
549 const int x86_himode_math = ~(m_PPRO);
550 const int x86_promote_hi_regs = m_PPRO;
551 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
552 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
553 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
556 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
557 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
559 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
560 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
562 const int x86_shift1 = ~m_486;
563 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
564 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
565 /* Set for machines where the type and dependencies are resolved on SSE register
566 parts instead of whole registers, so we may maintain just lower part of
567 scalar values in proper format leaving the upper part undefined. */
568 const int x86_sse_partial_regs = m_ATHLON_K8;
569 /* Athlon optimizes partial-register FPS special case, thus avoiding the
570 need for extra instructions beforehand */
571 const int x86_sse_partial_regs_for_cvtsd2ss = 0;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
576 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
577 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
578 /* Some CPU cores are not able to predict more than 4 branch instructions in
579 the 16 byte window. */
580 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
581 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
582
583 /* In case the average insn count for single function invocation is
584 lower than this constant, emit fast (but longer) prologue and
585 epilogue code. */
586 #define FAST_PROLOGUE_INSN_COUNT 20
587
588 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
589 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
590 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
591 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
592
593 /* Array of the smallest class containing reg number REGNO, indexed by
594 REGNO. Used by REGNO_REG_CLASS in i386.h. */
595
596 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
597 {
598 /* ax, dx, cx, bx */
599 AREG, DREG, CREG, BREG,
600 /* si, di, bp, sp */
601 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
602 /* FP registers */
603 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
604 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
605 /* arg pointer */
606 NON_Q_REGS,
607 /* flags, fpsr, dirflag, frame */
608 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
609 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
610 SSE_REGS, SSE_REGS,
611 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
612 MMX_REGS, MMX_REGS,
613 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
614 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
615 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
616 SSE_REGS, SSE_REGS,
617 };
618
619 /* The "default" register map used in 32bit mode. */
620
621 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
622 {
623 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
624 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
625 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
626 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
627 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
629 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
630 };
631
632 static int const x86_64_int_parameter_registers[6] =
633 {
634 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
635 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
636 };
637
638 static int const x86_64_int_return_registers[4] =
639 {
640 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
641 };
642
643 /* The "default" register map used in 64bit mode. */
644 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
645 {
646 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
647 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
648 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
649 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
650 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
651 8,9,10,11,12,13,14,15, /* extended integer registers */
652 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
653 };
654
655 /* Define the register numbers to be used in Dwarf debugging information.
656 The SVR4 reference port C compiler uses the following register numbers
657 in its Dwarf output code:
658 0 for %eax (gcc regno = 0)
659 1 for %ecx (gcc regno = 2)
660 2 for %edx (gcc regno = 1)
661 3 for %ebx (gcc regno = 3)
662 4 for %esp (gcc regno = 7)
663 5 for %ebp (gcc regno = 6)
664 6 for %esi (gcc regno = 4)
665 7 for %edi (gcc regno = 5)
666 The following three DWARF register numbers are never generated by
667 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
668 believes these numbers have these meanings.
669 8 for %eip (no gcc equivalent)
670 9 for %eflags (gcc regno = 17)
671 10 for %trapno (no gcc equivalent)
672 It is not at all clear how we should number the FP stack registers
673 for the x86 architecture. If the version of SDB on x86/svr4 were
674 a bit less brain dead with respect to floating-point then we would
675 have a precedent to follow with respect to DWARF register numbers
676 for x86 FP registers, but the SDB on x86/svr4 is so completely
677 broken with respect to FP registers that it is hardly worth thinking
678 of it as something to strive for compatibility with.
679 The version of x86/svr4 SDB I have at the moment does (partially)
680 seem to believe that DWARF register number 11 is associated with
681 the x86 register %st(0), but that's about all. Higher DWARF
682 register numbers don't seem to be associated with anything in
683 particular, and even for DWARF regno 11, SDB only seems to under-
684 stand that it should say that a variable lives in %st(0) (when
685 asked via an `=' command) if we said it was in DWARF regno 11,
686 but SDB still prints garbage when asked for the value of the
687 variable in question (via a `/' command).
688 (Also note that the labels SDB prints for various FP stack regs
689 when doing an `x' command are all wrong.)
690 Note that these problems generally don't affect the native SVR4
691 C compiler because it doesn't allow the use of -O with -g and
692 because when it is *not* optimizing, it allocates a memory
693 location for each floating-point variable, and the memory
694 location is what gets described in the DWARF AT_location
695 attribute for the variable in question.
696 Regardless of the severe mental illness of the x86/svr4 SDB, we
697 do something sensible here and we use the following DWARF
698 register numbers. Note that these are all stack-top-relative
699 numbers.
700 11 for %st(0) (gcc regno = 8)
701 12 for %st(1) (gcc regno = 9)
702 13 for %st(2) (gcc regno = 10)
703 14 for %st(3) (gcc regno = 11)
704 15 for %st(4) (gcc regno = 12)
705 16 for %st(5) (gcc regno = 13)
706 17 for %st(6) (gcc regno = 14)
707 18 for %st(7) (gcc regno = 15)
708 */
709 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
710 {
711 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
712 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
713 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
714 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
715 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
717 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
718 };
719
720 /* Test and compare insns in i386.md store the information needed to
721 generate branch and scc insns here. */
722
723 rtx ix86_compare_op0 = NULL_RTX;
724 rtx ix86_compare_op1 = NULL_RTX;
725
726 #define MAX_386_STACK_LOCALS 3
727 /* Size of the register save area. */
728 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
729
730 /* Define the structure for the machine field in struct function. */
731
732 struct stack_local_entry GTY(())
733 {
734 unsigned short mode;
735 unsigned short n;
736 rtx rtl;
737 struct stack_local_entry *next;
738 };
739
740 /* Structure describing stack frame layout.
741 Stack grows downward:
742
743 [arguments]
744 <- ARG_POINTER
745 saved pc
746
747 saved frame pointer if frame_pointer_needed
748 <- HARD_FRAME_POINTER
749 [saved regs]
750
751 [padding1] \
752 )
753 [va_arg registers] (
754 > to_allocate <- FRAME_POINTER
755 [frame] (
756 )
757 [padding2] /
758 */
759 struct ix86_frame
760 {
761 int nregs;
762 int padding1;
763 int va_arg_size;
764 HOST_WIDE_INT frame;
765 int padding2;
766 int outgoing_arguments_size;
767 int red_zone_size;
768
769 HOST_WIDE_INT to_allocate;
770 /* The offsets relative to ARG_POINTER. */
771 HOST_WIDE_INT frame_pointer_offset;
772 HOST_WIDE_INT hard_frame_pointer_offset;
773 HOST_WIDE_INT stack_pointer_offset;
774
775 /* When save_regs_using_mov is set, emit prologue using
776 move instead of push instructions. */
777 bool save_regs_using_mov;
778 };
779
780 /* Used to enable/disable debugging features. */
781 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
782 /* Code model option as passed by user. */
783 const char *ix86_cmodel_string;
784 /* Parsed value. */
785 enum cmodel ix86_cmodel;
786 /* Asm dialect. */
787 const char *ix86_asm_string;
788 enum asm_dialect ix86_asm_dialect = ASM_ATT;
789 /* TLS dialext. */
790 const char *ix86_tls_dialect_string;
791 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
792
793 /* Which unit we are generating floating point math for. */
794 enum fpmath_unit ix86_fpmath;
795
796 /* Which cpu are we scheduling for. */
797 enum processor_type ix86_tune;
798 /* Which instruction set architecture to use. */
799 enum processor_type ix86_arch;
800
801 /* Strings to hold which cpu and instruction set architecture to use. */
802 const char *ix86_tune_string; /* for -mtune=<xxx> */
803 const char *ix86_arch_string; /* for -march=<xxx> */
804 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
805
806 /* # of registers to use to pass arguments. */
807 const char *ix86_regparm_string;
808
809 /* true if sse prefetch instruction is not NOOP. */
810 int x86_prefetch_sse;
811
812 /* ix86_regparm_string as a number */
813 int ix86_regparm;
814
815 /* Alignment to use for loops and jumps: */
816
817 /* Power of two alignment for loops. */
818 const char *ix86_align_loops_string;
819
820 /* Power of two alignment for non-loop jumps. */
821 const char *ix86_align_jumps_string;
822
823 /* Power of two alignment for stack boundary in bytes. */
824 const char *ix86_preferred_stack_boundary_string;
825
826 /* Preferred alignment for stack boundary in bits. */
827 unsigned int ix86_preferred_stack_boundary;
828
829 /* Values 1-5: see jump.c */
830 int ix86_branch_cost;
831 const char *ix86_branch_cost_string;
832
833 /* Power of two alignment for functions. */
834 const char *ix86_align_funcs_string;
835
836 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
837 char internal_label_prefix[16];
838 int internal_label_prefix_len;
839 \f
840 static void output_pic_addr_const (FILE *, rtx, int);
841 static void put_condition_code (enum rtx_code, enum machine_mode,
842 int, int, FILE *);
843 static const char *get_some_local_dynamic_name (void);
844 static int get_some_local_dynamic_name_1 (rtx *, void *);
845 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
846 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
847 rtx *);
848 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
849 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
850 enum machine_mode);
851 static rtx get_thread_pointer (int);
852 static rtx legitimize_tls_address (rtx, enum tls_model, int);
853 static void get_pc_thunk_name (char [32], unsigned int);
854 static rtx gen_push (rtx);
855 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
856 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
857 static struct machine_function * ix86_init_machine_status (void);
858 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
859 static int ix86_nsaved_regs (void);
860 static void ix86_emit_save_regs (void);
861 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
862 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
863 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
864 static HOST_WIDE_INT ix86_GOT_alias_set (void);
865 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
866 static rtx ix86_expand_aligntest (rtx, int);
867 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
868 static int ix86_issue_rate (void);
869 static int ix86_adjust_cost (rtx, rtx, rtx, int);
870 static int ia32_multipass_dfa_lookahead (void);
871 static bool ix86_misaligned_mem_ok (enum machine_mode);
872 static void ix86_init_mmx_sse_builtins (void);
873 static rtx x86_this_parameter (tree);
874 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
875 HOST_WIDE_INT, tree);
876 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
877 static void x86_file_start (void);
878 static void ix86_reorg (void);
879 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
880 static tree ix86_build_builtin_va_list (void);
881 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
882 tree, int *, int);
883 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
884 static bool ix86_vector_mode_supported_p (enum machine_mode);
885
886 static int ix86_address_cost (rtx);
887 static bool ix86_cannot_force_const_mem (rtx);
888 static rtx ix86_delegitimize_address (rtx);
889
890 struct builtin_description;
891 static rtx ix86_expand_sse_comi (const struct builtin_description *,
892 tree, rtx);
893 static rtx ix86_expand_sse_compare (const struct builtin_description *,
894 tree, rtx);
895 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
896 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
897 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
898 static rtx ix86_expand_store_builtin (enum insn_code, tree);
899 static rtx safe_vector_operand (rtx, enum machine_mode);
900 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
901 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
902 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
903 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
904 static int ix86_fp_comparison_cost (enum rtx_code code);
905 static unsigned int ix86_select_alt_pic_regnum (void);
906 static int ix86_save_reg (unsigned int, int);
907 static void ix86_compute_frame_layout (struct ix86_frame *);
908 static int ix86_comp_type_attributes (tree, tree);
909 static int ix86_function_regparm (tree, tree);
910 const struct attribute_spec ix86_attribute_table[];
911 static bool ix86_function_ok_for_sibcall (tree, tree);
912 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
913 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
914 static int ix86_value_regno (enum machine_mode);
915 static bool contains_128bit_aligned_vector_p (tree);
916 static rtx ix86_struct_value_rtx (tree, int);
917 static bool ix86_ms_bitfield_layout_p (tree);
918 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
919 static int extended_reg_mentioned_1 (rtx *, void *);
920 static bool ix86_rtx_costs (rtx, int, int, int *);
921 static int min_insn_size (rtx);
922 static tree ix86_md_asm_clobbers (tree clobbers);
923 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
924 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
925 tree, bool);
926
927 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
928 static void ix86_svr3_asm_out_constructor (rtx, int);
929 #endif
930 /* This function is only used on Solaris. */
931 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
932 ATTRIBUTE_UNUSED;
933
934 /* Register class used for passing given 64bit part of the argument.
935 These represent classes as documented by the PS ABI, with the exception
936 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
937 use SF or DFmode move instead of DImode to avoid reformatting penalties.
938
939 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
940 whenever possible (upper half does contain padding).
941 */
942 enum x86_64_reg_class
943 {
944 X86_64_NO_CLASS,
945 X86_64_INTEGER_CLASS,
946 X86_64_INTEGERSI_CLASS,
947 X86_64_SSE_CLASS,
948 X86_64_SSESF_CLASS,
949 X86_64_SSEDF_CLASS,
950 X86_64_SSEUP_CLASS,
951 X86_64_X87_CLASS,
952 X86_64_X87UP_CLASS,
953 X86_64_MEMORY_CLASS
954 };
955 static const char * const x86_64_reg_class_name[] =
956 {"no", "integer", "integerSI", "sse", "sseSF", "sseDF", "sseup", "x87", "x87up", "no"};
957
958 #define MAX_CLASSES 4
959 static int classify_argument (enum machine_mode, tree,
960 enum x86_64_reg_class [MAX_CLASSES], int);
961 static int examine_argument (enum machine_mode, tree, int, int *, int *);
962 static rtx construct_container (enum machine_mode, tree, int, int, int,
963 const int *, int);
964 static enum x86_64_reg_class merge_classes (enum x86_64_reg_class,
965 enum x86_64_reg_class);
966
967 /* Table of constants used by fldpi, fldln2, etc.... */
968 static REAL_VALUE_TYPE ext_80387_constants_table [5];
969 static bool ext_80387_constants_init = 0;
970 static void init_ext_80387_constants (void);
971 \f
972 /* Initialize the GCC target structure. */
973 #undef TARGET_ATTRIBUTE_TABLE
974 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
975 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
976 # undef TARGET_MERGE_DECL_ATTRIBUTES
977 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
978 #endif
979
980 #undef TARGET_COMP_TYPE_ATTRIBUTES
981 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
982
983 #undef TARGET_INIT_BUILTINS
984 #define TARGET_INIT_BUILTINS ix86_init_builtins
985
986 #undef TARGET_EXPAND_BUILTIN
987 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
988
989 #undef TARGET_ASM_FUNCTION_EPILOGUE
990 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
991
992 #undef TARGET_ASM_OPEN_PAREN
993 #define TARGET_ASM_OPEN_PAREN ""
994 #undef TARGET_ASM_CLOSE_PAREN
995 #define TARGET_ASM_CLOSE_PAREN ""
996
997 #undef TARGET_ASM_ALIGNED_HI_OP
998 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
999 #undef TARGET_ASM_ALIGNED_SI_OP
1000 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1001 #ifdef ASM_QUAD
1002 #undef TARGET_ASM_ALIGNED_DI_OP
1003 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1004 #endif
1005
1006 #undef TARGET_ASM_UNALIGNED_HI_OP
1007 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1008 #undef TARGET_ASM_UNALIGNED_SI_OP
1009 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1010 #undef TARGET_ASM_UNALIGNED_DI_OP
1011 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1012
1013 #undef TARGET_SCHED_ADJUST_COST
1014 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1015 #undef TARGET_SCHED_ISSUE_RATE
1016 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1017 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1018 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1019 ia32_multipass_dfa_lookahead
1020
1021 #undef TARGET_VECTORIZE_MISALIGNED_MEM_OK
1022 #define TARGET_VECTORIZE_MISALIGNED_MEM_OK ix86_misaligned_mem_ok
1023
1024 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1025 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1026
1027 #ifdef HAVE_AS_TLS
1028 #undef TARGET_HAVE_TLS
1029 #define TARGET_HAVE_TLS true
1030 #endif
1031 #undef TARGET_CANNOT_FORCE_CONST_MEM
1032 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1033
1034 #undef TARGET_DELEGITIMIZE_ADDRESS
1035 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1036
1037 #undef TARGET_MS_BITFIELD_LAYOUT_P
1038 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1039
1040 #undef TARGET_ASM_OUTPUT_MI_THUNK
1041 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1042 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1043 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1044
1045 #undef TARGET_ASM_FILE_START
1046 #define TARGET_ASM_FILE_START x86_file_start
1047
1048 #undef TARGET_RTX_COSTS
1049 #define TARGET_RTX_COSTS ix86_rtx_costs
1050 #undef TARGET_ADDRESS_COST
1051 #define TARGET_ADDRESS_COST ix86_address_cost
1052
1053 #undef TARGET_FIXED_CONDITION_CODE_REGS
1054 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1055 #undef TARGET_CC_MODES_COMPATIBLE
1056 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1057
1058 #undef TARGET_MACHINE_DEPENDENT_REORG
1059 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1060
1061 #undef TARGET_BUILD_BUILTIN_VA_LIST
1062 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1063
1064 #undef TARGET_MD_ASM_CLOBBERS
1065 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1066
1067 #undef TARGET_PROMOTE_PROTOTYPES
1068 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1069 #undef TARGET_STRUCT_VALUE_RTX
1070 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1071 #undef TARGET_SETUP_INCOMING_VARARGS
1072 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1073 #undef TARGET_MUST_PASS_IN_STACK
1074 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1075 #undef TARGET_PASS_BY_REFERENCE
1076 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1077
1078 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1079 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1080
1081 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1082 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1083
1084 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1085 #undef TARGET_INSERT_ATTRIBUTES
1086 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1087 #endif
1088
1089 struct gcc_target targetm = TARGET_INITIALIZER;
1090
1091 \f
1092 /* The svr4 ABI for the i386 says that records and unions are returned
1093 in memory. */
1094 #ifndef DEFAULT_PCC_STRUCT_RETURN
1095 #define DEFAULT_PCC_STRUCT_RETURN 1
1096 #endif
1097
1098 /* Sometimes certain combinations of command options do not make
1099 sense on a particular target machine. You can define a macro
1100 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1101 defined, is executed once just after all the command options have
1102 been parsed.
1103
1104 Don't use this macro to turn on various extra optimizations for
1105 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1106
1107 void
1108 override_options (void)
1109 {
1110 int i;
1111 int ix86_tune_defaulted = 0;
1112
1113 /* Comes from final.c -- no real reason to change it. */
1114 #define MAX_CODE_ALIGN 16
1115
1116 static struct ptt
1117 {
1118 const struct processor_costs *cost; /* Processor costs */
1119 const int target_enable; /* Target flags to enable. */
1120 const int target_disable; /* Target flags to disable. */
1121 const int align_loop; /* Default alignments. */
1122 const int align_loop_max_skip;
1123 const int align_jump;
1124 const int align_jump_max_skip;
1125 const int align_func;
1126 }
1127 const processor_target_table[PROCESSOR_max] =
1128 {
1129 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1130 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1131 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1132 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1133 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1134 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1135 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1136 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1137 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1138 };
1139
1140 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1141 static struct pta
1142 {
1143 const char *const name; /* processor name or nickname. */
1144 const enum processor_type processor;
1145 const enum pta_flags
1146 {
1147 PTA_SSE = 1,
1148 PTA_SSE2 = 2,
1149 PTA_SSE3 = 4,
1150 PTA_MMX = 8,
1151 PTA_PREFETCH_SSE = 16,
1152 PTA_3DNOW = 32,
1153 PTA_3DNOW_A = 64,
1154 PTA_64BIT = 128
1155 } flags;
1156 }
1157 const processor_alias_table[] =
1158 {
1159 {"i386", PROCESSOR_I386, 0},
1160 {"i486", PROCESSOR_I486, 0},
1161 {"i586", PROCESSOR_PENTIUM, 0},
1162 {"pentium", PROCESSOR_PENTIUM, 0},
1163 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1164 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1165 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1166 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1167 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1168 {"i686", PROCESSOR_PENTIUMPRO, 0},
1169 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1170 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1171 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1172 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1173 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1174 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1175 | PTA_MMX | PTA_PREFETCH_SSE},
1176 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1177 | PTA_MMX | PTA_PREFETCH_SSE},
1178 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1179 | PTA_MMX | PTA_PREFETCH_SSE},
1180 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1181 | PTA_MMX | PTA_PREFETCH_SSE},
1182 {"k6", PROCESSOR_K6, PTA_MMX},
1183 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1184 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1185 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1186 | PTA_3DNOW_A},
1187 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1188 | PTA_3DNOW | PTA_3DNOW_A},
1189 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1190 | PTA_3DNOW_A | PTA_SSE},
1191 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1192 | PTA_3DNOW_A | PTA_SSE},
1193 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1194 | PTA_3DNOW_A | PTA_SSE},
1195 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1196 | PTA_SSE | PTA_SSE2 },
1197 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1198 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1199 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1200 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1201 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1202 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1203 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1204 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1205 };
1206
1207 int const pta_size = ARRAY_SIZE (processor_alias_table);
1208
1209 /* Set the default values for switches whose default depends on TARGET_64BIT
1210 in case they weren't overwritten by command line options. */
1211 if (TARGET_64BIT)
1212 {
1213 if (flag_omit_frame_pointer == 2)
1214 flag_omit_frame_pointer = 1;
1215 if (flag_asynchronous_unwind_tables == 2)
1216 flag_asynchronous_unwind_tables = 1;
1217 if (flag_pcc_struct_return == 2)
1218 flag_pcc_struct_return = 0;
1219 }
1220 else
1221 {
1222 if (flag_omit_frame_pointer == 2)
1223 flag_omit_frame_pointer = 0;
1224 if (flag_asynchronous_unwind_tables == 2)
1225 flag_asynchronous_unwind_tables = 0;
1226 if (flag_pcc_struct_return == 2)
1227 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1228 }
1229
1230 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1231 SUBTARGET_OVERRIDE_OPTIONS;
1232 #endif
1233
1234 if (!ix86_tune_string && ix86_arch_string)
1235 ix86_tune_string = ix86_arch_string;
1236 if (!ix86_tune_string)
1237 {
1238 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1239 ix86_tune_defaulted = 1;
1240 }
1241 if (!ix86_arch_string)
1242 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1243
1244 if (ix86_cmodel_string != 0)
1245 {
1246 if (!strcmp (ix86_cmodel_string, "small"))
1247 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1248 else if (flag_pic)
1249 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1250 else if (!strcmp (ix86_cmodel_string, "32"))
1251 ix86_cmodel = CM_32;
1252 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1253 ix86_cmodel = CM_KERNEL;
1254 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1255 ix86_cmodel = CM_MEDIUM;
1256 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1257 ix86_cmodel = CM_LARGE;
1258 else
1259 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1260 }
1261 else
1262 {
1263 ix86_cmodel = CM_32;
1264 if (TARGET_64BIT)
1265 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1266 }
1267 if (ix86_asm_string != 0)
1268 {
1269 if (!strcmp (ix86_asm_string, "intel"))
1270 ix86_asm_dialect = ASM_INTEL;
1271 else if (!strcmp (ix86_asm_string, "att"))
1272 ix86_asm_dialect = ASM_ATT;
1273 else
1274 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1275 }
1276 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1277 error ("code model %qs not supported in the %s bit mode",
1278 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1279 if (ix86_cmodel == CM_LARGE)
1280 sorry ("code model %<large%> not supported yet");
1281 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1282 sorry ("%i-bit mode not compiled in",
1283 (target_flags & MASK_64BIT) ? 64 : 32);
1284
1285 for (i = 0; i < pta_size; i++)
1286 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1287 {
1288 ix86_arch = processor_alias_table[i].processor;
1289 /* Default cpu tuning to the architecture. */
1290 ix86_tune = ix86_arch;
1291 if (processor_alias_table[i].flags & PTA_MMX
1292 && !(target_flags_explicit & MASK_MMX))
1293 target_flags |= MASK_MMX;
1294 if (processor_alias_table[i].flags & PTA_3DNOW
1295 && !(target_flags_explicit & MASK_3DNOW))
1296 target_flags |= MASK_3DNOW;
1297 if (processor_alias_table[i].flags & PTA_3DNOW_A
1298 && !(target_flags_explicit & MASK_3DNOW_A))
1299 target_flags |= MASK_3DNOW_A;
1300 if (processor_alias_table[i].flags & PTA_SSE
1301 && !(target_flags_explicit & MASK_SSE))
1302 target_flags |= MASK_SSE;
1303 if (processor_alias_table[i].flags & PTA_SSE2
1304 && !(target_flags_explicit & MASK_SSE2))
1305 target_flags |= MASK_SSE2;
1306 if (processor_alias_table[i].flags & PTA_SSE3
1307 && !(target_flags_explicit & MASK_SSE3))
1308 target_flags |= MASK_SSE3;
1309 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1310 x86_prefetch_sse = true;
1311 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1312 error ("CPU you selected does not support x86-64 "
1313 "instruction set");
1314 break;
1315 }
1316
1317 if (i == pta_size)
1318 error ("bad value (%s) for -march= switch", ix86_arch_string);
1319
1320 for (i = 0; i < pta_size; i++)
1321 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1322 {
1323 ix86_tune = processor_alias_table[i].processor;
1324 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1325 {
1326 if (ix86_tune_defaulted)
1327 {
1328 ix86_tune_string = "x86-64";
1329 for (i = 0; i < pta_size; i++)
1330 if (! strcmp (ix86_tune_string,
1331 processor_alias_table[i].name))
1332 break;
1333 ix86_tune = processor_alias_table[i].processor;
1334 }
1335 else
1336 error ("CPU you selected does not support x86-64 "
1337 "instruction set");
1338 }
1339 /* Intel CPUs have always interpreted SSE prefetch instructions as
1340 NOPs; so, we can enable SSE prefetch instructions even when
1341 -mtune (rather than -march) points us to a processor that has them.
1342 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1343 higher processors. */
1344 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1345 x86_prefetch_sse = true;
1346 break;
1347 }
1348 if (i == pta_size)
1349 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1350
1351 if (optimize_size)
1352 ix86_cost = &size_cost;
1353 else
1354 ix86_cost = processor_target_table[ix86_tune].cost;
1355 target_flags |= processor_target_table[ix86_tune].target_enable;
1356 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1357
1358 /* Arrange to set up i386_stack_locals for all functions. */
1359 init_machine_status = ix86_init_machine_status;
1360
1361 /* Validate -mregparm= value. */
1362 if (ix86_regparm_string)
1363 {
1364 i = atoi (ix86_regparm_string);
1365 if (i < 0 || i > REGPARM_MAX)
1366 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1367 else
1368 ix86_regparm = i;
1369 }
1370 else
1371 if (TARGET_64BIT)
1372 ix86_regparm = REGPARM_MAX;
1373
1374 /* If the user has provided any of the -malign-* options,
1375 warn and use that value only if -falign-* is not set.
1376 Remove this code in GCC 3.2 or later. */
1377 if (ix86_align_loops_string)
1378 {
1379 warning ("-malign-loops is obsolete, use -falign-loops");
1380 if (align_loops == 0)
1381 {
1382 i = atoi (ix86_align_loops_string);
1383 if (i < 0 || i > MAX_CODE_ALIGN)
1384 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1385 else
1386 align_loops = 1 << i;
1387 }
1388 }
1389
1390 if (ix86_align_jumps_string)
1391 {
1392 warning ("-malign-jumps is obsolete, use -falign-jumps");
1393 if (align_jumps == 0)
1394 {
1395 i = atoi (ix86_align_jumps_string);
1396 if (i < 0 || i > MAX_CODE_ALIGN)
1397 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1398 else
1399 align_jumps = 1 << i;
1400 }
1401 }
1402
1403 if (ix86_align_funcs_string)
1404 {
1405 warning ("-malign-functions is obsolete, use -falign-functions");
1406 if (align_functions == 0)
1407 {
1408 i = atoi (ix86_align_funcs_string);
1409 if (i < 0 || i > MAX_CODE_ALIGN)
1410 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1411 else
1412 align_functions = 1 << i;
1413 }
1414 }
1415
1416 /* Default align_* from the processor table. */
1417 if (align_loops == 0)
1418 {
1419 align_loops = processor_target_table[ix86_tune].align_loop;
1420 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1421 }
1422 if (align_jumps == 0)
1423 {
1424 align_jumps = processor_target_table[ix86_tune].align_jump;
1425 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1426 }
1427 if (align_functions == 0)
1428 {
1429 align_functions = processor_target_table[ix86_tune].align_func;
1430 }
1431
1432 /* Validate -mpreferred-stack-boundary= value, or provide default.
1433 The default of 128 bits is for Pentium III's SSE __m128, but we
1434 don't want additional code to keep the stack aligned when
1435 optimizing for code size. */
1436 ix86_preferred_stack_boundary = (optimize_size
1437 ? TARGET_64BIT ? 128 : 32
1438 : 128);
1439 if (ix86_preferred_stack_boundary_string)
1440 {
1441 i = atoi (ix86_preferred_stack_boundary_string);
1442 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1443 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1444 TARGET_64BIT ? 4 : 2);
1445 else
1446 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1447 }
1448
1449 /* Validate -mbranch-cost= value, or provide default. */
1450 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1451 if (ix86_branch_cost_string)
1452 {
1453 i = atoi (ix86_branch_cost_string);
1454 if (i < 0 || i > 5)
1455 error ("-mbranch-cost=%d is not between 0 and 5", i);
1456 else
1457 ix86_branch_cost = i;
1458 }
1459
1460 if (ix86_tls_dialect_string)
1461 {
1462 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1463 ix86_tls_dialect = TLS_DIALECT_GNU;
1464 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1465 ix86_tls_dialect = TLS_DIALECT_SUN;
1466 else
1467 error ("bad value (%s) for -mtls-dialect= switch",
1468 ix86_tls_dialect_string);
1469 }
1470
1471 /* Keep nonleaf frame pointers. */
1472 if (TARGET_OMIT_LEAF_FRAME_POINTER)
1473 flag_omit_frame_pointer = 1;
1474
1475 /* If we're doing fast math, we don't care about comparison order
1476 wrt NaNs. This lets us use a shorter comparison sequence. */
1477 if (flag_unsafe_math_optimizations)
1478 target_flags &= ~MASK_IEEE_FP;
1479
1480 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1481 since the insns won't need emulation. */
1482 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1483 target_flags &= ~MASK_NO_FANCY_MATH_387;
1484
1485 /* Turn on SSE2 builtins for -msse3. */
1486 if (TARGET_SSE3)
1487 target_flags |= MASK_SSE2;
1488
1489 /* Turn on SSE builtins for -msse2. */
1490 if (TARGET_SSE2)
1491 target_flags |= MASK_SSE;
1492
1493 if (TARGET_64BIT)
1494 {
1495 if (TARGET_ALIGN_DOUBLE)
1496 error ("-malign-double makes no sense in the 64bit mode");
1497 if (TARGET_RTD)
1498 error ("-mrtd calling convention not supported in the 64bit mode");
1499 /* Enable by default the SSE and MMX builtins. */
1500 target_flags |= (MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE);
1501 ix86_fpmath = FPMATH_SSE;
1502 }
1503 else
1504 {
1505 ix86_fpmath = FPMATH_387;
1506 /* i386 ABI does not specify red zone. It still makes sense to use it
1507 when programmer takes care to stack from being destroyed. */
1508 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1509 target_flags |= MASK_NO_RED_ZONE;
1510 }
1511
1512 if (ix86_fpmath_string != 0)
1513 {
1514 if (! strcmp (ix86_fpmath_string, "387"))
1515 ix86_fpmath = FPMATH_387;
1516 else if (! strcmp (ix86_fpmath_string, "sse"))
1517 {
1518 if (!TARGET_SSE)
1519 {
1520 warning ("SSE instruction set disabled, using 387 arithmetics");
1521 ix86_fpmath = FPMATH_387;
1522 }
1523 else
1524 ix86_fpmath = FPMATH_SSE;
1525 }
1526 else if (! strcmp (ix86_fpmath_string, "387,sse")
1527 || ! strcmp (ix86_fpmath_string, "sse,387"))
1528 {
1529 if (!TARGET_SSE)
1530 {
1531 warning ("SSE instruction set disabled, using 387 arithmetics");
1532 ix86_fpmath = FPMATH_387;
1533 }
1534 else if (!TARGET_80387)
1535 {
1536 warning ("387 instruction set disabled, using SSE arithmetics");
1537 ix86_fpmath = FPMATH_SSE;
1538 }
1539 else
1540 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1541 }
1542 else
1543 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1544 }
1545
1546 /* It makes no sense to ask for just SSE builtins, so MMX is also turned
1547 on by -msse. */
1548 if (TARGET_SSE)
1549 {
1550 target_flags |= MASK_MMX;
1551 x86_prefetch_sse = true;
1552 }
1553
1554 /* If it has 3DNow! it also has MMX so MMX is also turned on by -m3dnow */
1555 if (TARGET_3DNOW)
1556 {
1557 target_flags |= MASK_MMX;
1558 /* If we are targeting the Athlon architecture, enable the 3Dnow/MMX
1559 extensions it adds. */
1560 if (x86_3dnow_a & (1 << ix86_arch))
1561 target_flags |= MASK_3DNOW_A;
1562 }
1563 if ((x86_accumulate_outgoing_args & TUNEMASK)
1564 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1565 && !optimize_size)
1566 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1567
1568 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1569 {
1570 char *p;
1571 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1572 p = strchr (internal_label_prefix, 'X');
1573 internal_label_prefix_len = p - internal_label_prefix;
1574 *p = '\0';
1575 }
1576 /* When scheduling description is not available, disable scheduler pass so it
1577 won't slow down the compilation and make x87 code slower. */
1578 if (!TARGET_SCHEDULE)
1579 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1580 }
1581 \f
1582 void
1583 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1584 {
1585 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1586 make the problem with not enough registers even worse. */
1587 #ifdef INSN_SCHEDULING
1588 if (level > 1)
1589 flag_schedule_insns = 0;
1590 #endif
1591
1592 /* The default values of these switches depend on the TARGET_64BIT
1593 that is not known at this moment. Mark these values with 2 and
1594 let user the to override these. In case there is no command line option
1595 specifying them, we will set the defaults in override_options. */
1596 if (optimize >= 1)
1597 flag_omit_frame_pointer = 2;
1598 flag_pcc_struct_return = 2;
1599 flag_asynchronous_unwind_tables = 2;
1600 }
1601 \f
1602 /* Table of valid machine attributes. */
1603 const struct attribute_spec ix86_attribute_table[] =
1604 {
1605 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1606 /* Stdcall attribute says callee is responsible for popping arguments
1607 if they are not variable. */
1608 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1609 /* Fastcall attribute says callee is responsible for popping arguments
1610 if they are not variable. */
1611 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1612 /* Cdecl attribute says the callee is a normal C declaration */
1613 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1614 /* Regparm attribute specifies how many integer arguments are to be
1615 passed in registers. */
1616 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1617 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1618 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1619 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1620 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1621 #endif
1622 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1623 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1624 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1625 SUBTARGET_ATTRIBUTE_TABLE,
1626 #endif
1627 { NULL, 0, 0, false, false, false, NULL }
1628 };
1629
1630 /* Decide whether we can make a sibling call to a function. DECL is the
1631 declaration of the function being targeted by the call and EXP is the
1632 CALL_EXPR representing the call. */
1633
1634 static bool
1635 ix86_function_ok_for_sibcall (tree decl, tree exp)
1636 {
1637 /* If we are generating position-independent code, we cannot sibcall
1638 optimize any indirect call, or a direct call to a global function,
1639 as the PLT requires %ebx be live. */
1640 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1641 return false;
1642
1643 /* If we are returning floats on the 80387 register stack, we cannot
1644 make a sibcall from a function that doesn't return a float to a
1645 function that does or, conversely, from a function that does return
1646 a float to a function that doesn't; the necessary stack adjustment
1647 would not be executed. */
1648 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1649 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1650 return false;
1651
1652 /* If this call is indirect, we'll need to be able to use a call-clobbered
1653 register for the address of the target function. Make sure that all
1654 such registers are not used for passing parameters. */
1655 if (!decl && !TARGET_64BIT)
1656 {
1657 tree type;
1658
1659 /* We're looking at the CALL_EXPR, we need the type of the function. */
1660 type = TREE_OPERAND (exp, 0); /* pointer expression */
1661 type = TREE_TYPE (type); /* pointer type */
1662 type = TREE_TYPE (type); /* function type */
1663
1664 if (ix86_function_regparm (type, NULL) >= 3)
1665 {
1666 /* ??? Need to count the actual number of registers to be used,
1667 not the possible number of registers. Fix later. */
1668 return false;
1669 }
1670 }
1671
1672 /* Otherwise okay. That also includes certain types of indirect calls. */
1673 return true;
1674 }
1675
1676 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1677 arguments as in struct attribute_spec.handler. */
1678 static tree
1679 ix86_handle_cdecl_attribute (tree *node, tree name,
1680 tree args ATTRIBUTE_UNUSED,
1681 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1682 {
1683 if (TREE_CODE (*node) != FUNCTION_TYPE
1684 && TREE_CODE (*node) != METHOD_TYPE
1685 && TREE_CODE (*node) != FIELD_DECL
1686 && TREE_CODE (*node) != TYPE_DECL)
1687 {
1688 warning ("%qs attribute only applies to functions",
1689 IDENTIFIER_POINTER (name));
1690 *no_add_attrs = true;
1691 }
1692 else
1693 {
1694 if (is_attribute_p ("fastcall", name))
1695 {
1696 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1697 {
1698 error ("fastcall and stdcall attributes are not compatible");
1699 }
1700 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1701 {
1702 error ("fastcall and regparm attributes are not compatible");
1703 }
1704 }
1705 else if (is_attribute_p ("stdcall", name))
1706 {
1707 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1708 {
1709 error ("fastcall and stdcall attributes are not compatible");
1710 }
1711 }
1712 }
1713
1714 if (TARGET_64BIT)
1715 {
1716 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
1717 *no_add_attrs = true;
1718 }
1719
1720 return NULL_TREE;
1721 }
1722
1723 /* Handle a "regparm" attribute;
1724 arguments as in struct attribute_spec.handler. */
1725 static tree
1726 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1727 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1728 {
1729 if (TREE_CODE (*node) != FUNCTION_TYPE
1730 && TREE_CODE (*node) != METHOD_TYPE
1731 && TREE_CODE (*node) != FIELD_DECL
1732 && TREE_CODE (*node) != TYPE_DECL)
1733 {
1734 warning ("%qs attribute only applies to functions",
1735 IDENTIFIER_POINTER (name));
1736 *no_add_attrs = true;
1737 }
1738 else
1739 {
1740 tree cst;
1741
1742 cst = TREE_VALUE (args);
1743 if (TREE_CODE (cst) != INTEGER_CST)
1744 {
1745 warning ("%qs attribute requires an integer constant argument",
1746 IDENTIFIER_POINTER (name));
1747 *no_add_attrs = true;
1748 }
1749 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1750 {
1751 warning ("argument to %qs attribute larger than %d",
1752 IDENTIFIER_POINTER (name), REGPARM_MAX);
1753 *no_add_attrs = true;
1754 }
1755
1756 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1757 {
1758 error ("fastcall and regparm attributes are not compatible");
1759 }
1760 }
1761
1762 return NULL_TREE;
1763 }
1764
1765 /* Return 0 if the attributes for two types are incompatible, 1 if they
1766 are compatible, and 2 if they are nearly compatible (which causes a
1767 warning to be generated). */
1768
1769 static int
1770 ix86_comp_type_attributes (tree type1, tree type2)
1771 {
1772 /* Check for mismatch of non-default calling convention. */
1773 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1774
1775 if (TREE_CODE (type1) != FUNCTION_TYPE)
1776 return 1;
1777
1778 /* Check for mismatched fastcall types */
1779 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1780 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1781 return 0;
1782
1783 /* Check for mismatched return types (cdecl vs stdcall). */
1784 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1785 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1786 return 0;
1787 if (ix86_function_regparm (type1, NULL)
1788 != ix86_function_regparm (type2, NULL))
1789 return 0;
1790 return 1;
1791 }
1792 \f
1793 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1794 DECL may be NULL when calling function indirectly
1795 or considering a libcall. */
1796
1797 static int
1798 ix86_function_regparm (tree type, tree decl)
1799 {
1800 tree attr;
1801 int regparm = ix86_regparm;
1802 bool user_convention = false;
1803
1804 if (!TARGET_64BIT)
1805 {
1806 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1807 if (attr)
1808 {
1809 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1810 user_convention = true;
1811 }
1812
1813 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1814 {
1815 regparm = 2;
1816 user_convention = true;
1817 }
1818
1819 /* Use register calling convention for local functions when possible. */
1820 if (!TARGET_64BIT && !user_convention && decl
1821 && flag_unit_at_a_time && !profile_flag)
1822 {
1823 struct cgraph_local_info *i = cgraph_local_info (decl);
1824 if (i && i->local)
1825 {
1826 /* We can't use regparm(3) for nested functions as these use
1827 static chain pointer in third argument. */
1828 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1829 regparm = 2;
1830 else
1831 regparm = 3;
1832 }
1833 }
1834 }
1835 return regparm;
1836 }
1837
1838 /* Return true if EAX is live at the start of the function. Used by
1839 ix86_expand_prologue to determine if we need special help before
1840 calling allocate_stack_worker. */
1841
1842 static bool
1843 ix86_eax_live_at_start_p (void)
1844 {
1845 /* Cheat. Don't bother working forward from ix86_function_regparm
1846 to the function type to whether an actual argument is located in
1847 eax. Instead just look at cfg info, which is still close enough
1848 to correct at this point. This gives false positives for broken
1849 functions that might use uninitialized data that happens to be
1850 allocated in eax, but who cares? */
1851 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1852 }
1853
1854 /* Value is the number of bytes of arguments automatically
1855 popped when returning from a subroutine call.
1856 FUNDECL is the declaration node of the function (as a tree),
1857 FUNTYPE is the data type of the function (as a tree),
1858 or for a library call it is an identifier node for the subroutine name.
1859 SIZE is the number of bytes of arguments passed on the stack.
1860
1861 On the 80386, the RTD insn may be used to pop them if the number
1862 of args is fixed, but if the number is variable then the caller
1863 must pop them all. RTD can't be used for library calls now
1864 because the library is compiled with the Unix compiler.
1865 Use of RTD is a selectable option, since it is incompatible with
1866 standard Unix calling sequences. If the option is not selected,
1867 the caller must always pop the args.
1868
1869 The attribute stdcall is equivalent to RTD on a per module basis. */
1870
1871 int
1872 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1873 {
1874 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1875
1876 /* Cdecl functions override -mrtd, and never pop the stack. */
1877 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1878
1879 /* Stdcall and fastcall functions will pop the stack if not
1880 variable args. */
1881 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1882 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1883 rtd = 1;
1884
1885 if (rtd
1886 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1887 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1888 == void_type_node)))
1889 return size;
1890 }
1891
1892 /* Lose any fake structure return argument if it is passed on the stack. */
1893 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1894 && !TARGET_64BIT
1895 && !KEEP_AGGREGATE_RETURN_POINTER)
1896 {
1897 int nregs = ix86_function_regparm (funtype, fundecl);
1898
1899 if (!nregs)
1900 return GET_MODE_SIZE (Pmode);
1901 }
1902
1903 return 0;
1904 }
1905 \f
1906 /* Argument support functions. */
1907
1908 /* Return true when register may be used to pass function parameters. */
1909 bool
1910 ix86_function_arg_regno_p (int regno)
1911 {
1912 int i;
1913 if (!TARGET_64BIT)
1914 return (regno < REGPARM_MAX
1915 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1916 if (SSE_REGNO_P (regno) && TARGET_SSE)
1917 return true;
1918 /* RAX is used as hidden argument to va_arg functions. */
1919 if (!regno)
1920 return true;
1921 for (i = 0; i < REGPARM_MAX; i++)
1922 if (regno == x86_64_int_parameter_registers[i])
1923 return true;
1924 return false;
1925 }
1926
1927 /* Return if we do not know how to pass TYPE solely in registers. */
1928
1929 static bool
1930 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
1931 {
1932 if (must_pass_in_stack_var_size_or_pad (mode, type))
1933 return true;
1934 return (!TARGET_64BIT && type && mode == TImode);
1935 }
1936
1937 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1938 for a call to a function whose data type is FNTYPE.
1939 For a library call, FNTYPE is 0. */
1940
1941 void
1942 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1943 tree fntype, /* tree ptr for function decl */
1944 rtx libname, /* SYMBOL_REF of library name or 0 */
1945 tree fndecl)
1946 {
1947 static CUMULATIVE_ARGS zero_cum;
1948 tree param, next_param;
1949
1950 if (TARGET_DEBUG_ARG)
1951 {
1952 fprintf (stderr, "\ninit_cumulative_args (");
1953 if (fntype)
1954 fprintf (stderr, "fntype code = %s, ret code = %s",
1955 tree_code_name[(int) TREE_CODE (fntype)],
1956 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1957 else
1958 fprintf (stderr, "no fntype");
1959
1960 if (libname)
1961 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1962 }
1963
1964 *cum = zero_cum;
1965
1966 /* Set up the number of registers to use for passing arguments. */
1967 if (fntype)
1968 cum->nregs = ix86_function_regparm (fntype, fndecl);
1969 else
1970 cum->nregs = ix86_regparm;
1971 if (TARGET_SSE)
1972 cum->sse_nregs = SSE_REGPARM_MAX;
1973 if (TARGET_MMX)
1974 cum->mmx_nregs = MMX_REGPARM_MAX;
1975 cum->warn_sse = true;
1976 cum->warn_mmx = true;
1977 cum->maybe_vaarg = false;
1978
1979 /* Use ecx and edx registers if function has fastcall attribute */
1980 if (fntype && !TARGET_64BIT)
1981 {
1982 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1983 {
1984 cum->nregs = 2;
1985 cum->fastcall = 1;
1986 }
1987 }
1988
1989 /* Determine if this function has variable arguments. This is
1990 indicated by the last argument being 'void_type_mode' if there
1991 are no variable arguments. If there are variable arguments, then
1992 we won't pass anything in registers in 32-bit mode. */
1993
1994 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
1995 {
1996 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
1997 param != 0; param = next_param)
1998 {
1999 next_param = TREE_CHAIN (param);
2000 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2001 {
2002 if (!TARGET_64BIT)
2003 {
2004 cum->nregs = 0;
2005 cum->sse_nregs = 0;
2006 cum->mmx_nregs = 0;
2007 cum->warn_sse = 0;
2008 cum->warn_mmx = 0;
2009 cum->fastcall = 0;
2010 }
2011 cum->maybe_vaarg = true;
2012 }
2013 }
2014 }
2015 if ((!fntype && !libname)
2016 || (fntype && !TYPE_ARG_TYPES (fntype)))
2017 cum->maybe_vaarg = 1;
2018
2019 if (TARGET_DEBUG_ARG)
2020 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2021
2022 return;
2023 }
2024
2025 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2026 of this code is to classify each 8bytes of incoming argument by the register
2027 class and assign registers accordingly. */
2028
2029 /* Return the union class of CLASS1 and CLASS2.
2030 See the x86-64 PS ABI for details. */
2031
2032 static enum x86_64_reg_class
2033 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2034 {
2035 /* Rule #1: If both classes are equal, this is the resulting class. */
2036 if (class1 == class2)
2037 return class1;
2038
2039 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2040 the other class. */
2041 if (class1 == X86_64_NO_CLASS)
2042 return class2;
2043 if (class2 == X86_64_NO_CLASS)
2044 return class1;
2045
2046 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2047 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2048 return X86_64_MEMORY_CLASS;
2049
2050 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2051 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2052 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2053 return X86_64_INTEGERSI_CLASS;
2054 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2055 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2056 return X86_64_INTEGER_CLASS;
2057
2058 /* Rule #5: If one of the classes is X87 or X87UP class, MEMORY is used. */
2059 if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS
2060 || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS)
2061 return X86_64_MEMORY_CLASS;
2062
2063 /* Rule #6: Otherwise class SSE is used. */
2064 return X86_64_SSE_CLASS;
2065 }
2066
2067 /* Classify the argument of type TYPE and mode MODE.
2068 CLASSES will be filled by the register class used to pass each word
2069 of the operand. The number of words is returned. In case the parameter
2070 should be passed in memory, 0 is returned. As a special case for zero
2071 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2072
2073 BIT_OFFSET is used internally for handling records and specifies offset
2074 of the offset in bits modulo 256 to avoid overflow cases.
2075
2076 See the x86-64 PS ABI for details.
2077 */
2078
2079 static int
2080 classify_argument (enum machine_mode mode, tree type,
2081 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2082 {
2083 HOST_WIDE_INT bytes =
2084 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2085 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2086
2087 /* Variable sized entities are always passed/returned in memory. */
2088 if (bytes < 0)
2089 return 0;
2090
2091 if (mode != VOIDmode
2092 && targetm.calls.must_pass_in_stack (mode, type))
2093 return 0;
2094
2095 if (type && AGGREGATE_TYPE_P (type))
2096 {
2097 int i;
2098 tree field;
2099 enum x86_64_reg_class subclasses[MAX_CLASSES];
2100
2101 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2102 if (bytes > 16)
2103 return 0;
2104
2105 for (i = 0; i < words; i++)
2106 classes[i] = X86_64_NO_CLASS;
2107
2108 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2109 signalize memory class, so handle it as special case. */
2110 if (!words)
2111 {
2112 classes[0] = X86_64_NO_CLASS;
2113 return 1;
2114 }
2115
2116 /* Classify each field of record and merge classes. */
2117 if (TREE_CODE (type) == RECORD_TYPE)
2118 {
2119 /* For classes first merge in the field of the subclasses. */
2120 if (TYPE_BINFO (type))
2121 {
2122 tree binfo, base_binfo;
2123 int basenum;
2124
2125 for (binfo = TYPE_BINFO (type), basenum = 0;
2126 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2127 {
2128 int num;
2129 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2130 tree type = BINFO_TYPE (base_binfo);
2131
2132 num = classify_argument (TYPE_MODE (type),
2133 type, subclasses,
2134 (offset + bit_offset) % 256);
2135 if (!num)
2136 return 0;
2137 for (i = 0; i < num; i++)
2138 {
2139 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2140 classes[i + pos] =
2141 merge_classes (subclasses[i], classes[i + pos]);
2142 }
2143 }
2144 }
2145 /* And now merge the fields of structure. */
2146 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2147 {
2148 if (TREE_CODE (field) == FIELD_DECL)
2149 {
2150 int num;
2151
2152 /* Bitfields are always classified as integer. Handle them
2153 early, since later code would consider them to be
2154 misaligned integers. */
2155 if (DECL_BIT_FIELD (field))
2156 {
2157 for (i = int_bit_position (field) / 8 / 8;
2158 i < (int_bit_position (field)
2159 + tree_low_cst (DECL_SIZE (field), 0)
2160 + 63) / 8 / 8; i++)
2161 classes[i] =
2162 merge_classes (X86_64_INTEGER_CLASS,
2163 classes[i]);
2164 }
2165 else
2166 {
2167 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2168 TREE_TYPE (field), subclasses,
2169 (int_bit_position (field)
2170 + bit_offset) % 256);
2171 if (!num)
2172 return 0;
2173 for (i = 0; i < num; i++)
2174 {
2175 int pos =
2176 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2177 classes[i + pos] =
2178 merge_classes (subclasses[i], classes[i + pos]);
2179 }
2180 }
2181 }
2182 }
2183 }
2184 /* Arrays are handled as small records. */
2185 else if (TREE_CODE (type) == ARRAY_TYPE)
2186 {
2187 int num;
2188 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2189 TREE_TYPE (type), subclasses, bit_offset);
2190 if (!num)
2191 return 0;
2192
2193 /* The partial classes are now full classes. */
2194 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2195 subclasses[0] = X86_64_SSE_CLASS;
2196 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2197 subclasses[0] = X86_64_INTEGER_CLASS;
2198
2199 for (i = 0; i < words; i++)
2200 classes[i] = subclasses[i % num];
2201 }
2202 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2203 else if (TREE_CODE (type) == UNION_TYPE
2204 || TREE_CODE (type) == QUAL_UNION_TYPE)
2205 {
2206 /* For classes first merge in the field of the subclasses. */
2207 if (TYPE_BINFO (type))
2208 {
2209 tree binfo, base_binfo;
2210 int basenum;
2211
2212 for (binfo = TYPE_BINFO (type), basenum = 0;
2213 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2214 {
2215 int num;
2216 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2217 tree type = BINFO_TYPE (base_binfo);
2218
2219 num = classify_argument (TYPE_MODE (type),
2220 type, subclasses,
2221 (offset + (bit_offset % 64)) % 256);
2222 if (!num)
2223 return 0;
2224 for (i = 0; i < num; i++)
2225 {
2226 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2227 classes[i + pos] =
2228 merge_classes (subclasses[i], classes[i + pos]);
2229 }
2230 }
2231 }
2232 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2233 {
2234 if (TREE_CODE (field) == FIELD_DECL)
2235 {
2236 int num;
2237 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2238 TREE_TYPE (field), subclasses,
2239 bit_offset);
2240 if (!num)
2241 return 0;
2242 for (i = 0; i < num; i++)
2243 classes[i] = merge_classes (subclasses[i], classes[i]);
2244 }
2245 }
2246 }
2247 else if (TREE_CODE (type) == SET_TYPE)
2248 {
2249 if (bytes <= 4)
2250 {
2251 classes[0] = X86_64_INTEGERSI_CLASS;
2252 return 1;
2253 }
2254 else if (bytes <= 8)
2255 {
2256 classes[0] = X86_64_INTEGER_CLASS;
2257 return 1;
2258 }
2259 else if (bytes <= 12)
2260 {
2261 classes[0] = X86_64_INTEGER_CLASS;
2262 classes[1] = X86_64_INTEGERSI_CLASS;
2263 return 2;
2264 }
2265 else
2266 {
2267 classes[0] = X86_64_INTEGER_CLASS;
2268 classes[1] = X86_64_INTEGER_CLASS;
2269 return 2;
2270 }
2271 }
2272 else
2273 abort ();
2274
2275 /* Final merger cleanup. */
2276 for (i = 0; i < words; i++)
2277 {
2278 /* If one class is MEMORY, everything should be passed in
2279 memory. */
2280 if (classes[i] == X86_64_MEMORY_CLASS)
2281 return 0;
2282
2283 /* The X86_64_SSEUP_CLASS should be always preceded by
2284 X86_64_SSE_CLASS. */
2285 if (classes[i] == X86_64_SSEUP_CLASS
2286 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2287 classes[i] = X86_64_SSE_CLASS;
2288
2289 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2290 if (classes[i] == X86_64_X87UP_CLASS
2291 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2292 classes[i] = X86_64_SSE_CLASS;
2293 }
2294 return words;
2295 }
2296
2297 /* Compute alignment needed. We align all types to natural boundaries with
2298 exception of XFmode that is aligned to 64bits. */
2299 if (mode != VOIDmode && mode != BLKmode)
2300 {
2301 int mode_alignment = GET_MODE_BITSIZE (mode);
2302
2303 if (mode == XFmode)
2304 mode_alignment = 128;
2305 else if (mode == XCmode)
2306 mode_alignment = 256;
2307 if (COMPLEX_MODE_P (mode))
2308 mode_alignment /= 2;
2309 /* Misaligned fields are always returned in memory. */
2310 if (bit_offset % mode_alignment)
2311 return 0;
2312 }
2313
2314 /* for V1xx modes, just use the base mode */
2315 if (VECTOR_MODE_P (mode)
2316 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2317 mode = GET_MODE_INNER (mode);
2318
2319 /* Classification of atomic types. */
2320 switch (mode)
2321 {
2322 case DImode:
2323 case SImode:
2324 case HImode:
2325 case QImode:
2326 case CSImode:
2327 case CHImode:
2328 case CQImode:
2329 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2330 classes[0] = X86_64_INTEGERSI_CLASS;
2331 else
2332 classes[0] = X86_64_INTEGER_CLASS;
2333 return 1;
2334 case CDImode:
2335 case TImode:
2336 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2337 return 2;
2338 case CTImode:
2339 return 0;
2340 case SFmode:
2341 if (!(bit_offset % 64))
2342 classes[0] = X86_64_SSESF_CLASS;
2343 else
2344 classes[0] = X86_64_SSE_CLASS;
2345 return 1;
2346 case DFmode:
2347 classes[0] = X86_64_SSEDF_CLASS;
2348 return 1;
2349 case XFmode:
2350 classes[0] = X86_64_X87_CLASS;
2351 classes[1] = X86_64_X87UP_CLASS;
2352 return 2;
2353 case TFmode:
2354 classes[0] = X86_64_SSE_CLASS;
2355 classes[1] = X86_64_SSEUP_CLASS;
2356 return 2;
2357 case SCmode:
2358 classes[0] = X86_64_SSE_CLASS;
2359 return 1;
2360 case DCmode:
2361 classes[0] = X86_64_SSEDF_CLASS;
2362 classes[1] = X86_64_SSEDF_CLASS;
2363 return 2;
2364 case XCmode:
2365 case TCmode:
2366 /* These modes are larger than 16 bytes. */
2367 return 0;
2368 case V4SFmode:
2369 case V4SImode:
2370 case V16QImode:
2371 case V8HImode:
2372 case V2DFmode:
2373 case V2DImode:
2374 classes[0] = X86_64_SSE_CLASS;
2375 classes[1] = X86_64_SSEUP_CLASS;
2376 return 2;
2377 case V2SFmode:
2378 case V2SImode:
2379 case V4HImode:
2380 case V8QImode:
2381 classes[0] = X86_64_SSE_CLASS;
2382 return 1;
2383 case BLKmode:
2384 case VOIDmode:
2385 return 0;
2386 default:
2387 if (VECTOR_MODE_P (mode))
2388 {
2389 if (bytes > 16)
2390 return 0;
2391 if (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT)
2392 {
2393 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2394 classes[0] = X86_64_INTEGERSI_CLASS;
2395 else
2396 classes[0] = X86_64_INTEGER_CLASS;
2397 classes[1] = X86_64_INTEGER_CLASS;
2398 return 1 + (bytes > 8);
2399 }
2400 }
2401 abort ();
2402 }
2403 }
2404
2405 /* Examine the argument and return set number of register required in each
2406 class. Return 0 iff parameter should be passed in memory. */
2407 static int
2408 examine_argument (enum machine_mode mode, tree type, int in_return,
2409 int *int_nregs, int *sse_nregs)
2410 {
2411 enum x86_64_reg_class class[MAX_CLASSES];
2412 int n = classify_argument (mode, type, class, 0);
2413
2414 *int_nregs = 0;
2415 *sse_nregs = 0;
2416 if (!n)
2417 return 0;
2418 for (n--; n >= 0; n--)
2419 switch (class[n])
2420 {
2421 case X86_64_INTEGER_CLASS:
2422 case X86_64_INTEGERSI_CLASS:
2423 (*int_nregs)++;
2424 break;
2425 case X86_64_SSE_CLASS:
2426 case X86_64_SSESF_CLASS:
2427 case X86_64_SSEDF_CLASS:
2428 (*sse_nregs)++;
2429 break;
2430 case X86_64_NO_CLASS:
2431 case X86_64_SSEUP_CLASS:
2432 break;
2433 case X86_64_X87_CLASS:
2434 case X86_64_X87UP_CLASS:
2435 if (!in_return)
2436 return 0;
2437 break;
2438 case X86_64_MEMORY_CLASS:
2439 abort ();
2440 }
2441 return 1;
2442 }
2443 /* Construct container for the argument used by GCC interface. See
2444 FUNCTION_ARG for the detailed description. */
2445 static rtx
2446 construct_container (enum machine_mode mode, tree type, int in_return,
2447 int nintregs, int nsseregs, const int * intreg,
2448 int sse_regno)
2449 {
2450 enum machine_mode tmpmode;
2451 int bytes =
2452 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2453 enum x86_64_reg_class class[MAX_CLASSES];
2454 int n;
2455 int i;
2456 int nexps = 0;
2457 int needed_sseregs, needed_intregs;
2458 rtx exp[MAX_CLASSES];
2459 rtx ret;
2460
2461 n = classify_argument (mode, type, class, 0);
2462 if (TARGET_DEBUG_ARG)
2463 {
2464 if (!n)
2465 fprintf (stderr, "Memory class\n");
2466 else
2467 {
2468 fprintf (stderr, "Classes:");
2469 for (i = 0; i < n; i++)
2470 {
2471 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2472 }
2473 fprintf (stderr, "\n");
2474 }
2475 }
2476 if (!n)
2477 return NULL;
2478 if (!examine_argument (mode, type, in_return, &needed_intregs, &needed_sseregs))
2479 return NULL;
2480 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2481 return NULL;
2482
2483 /* First construct simple cases. Avoid SCmode, since we want to use
2484 single register to pass this type. */
2485 if (n == 1 && mode != SCmode)
2486 switch (class[0])
2487 {
2488 case X86_64_INTEGER_CLASS:
2489 case X86_64_INTEGERSI_CLASS:
2490 return gen_rtx_REG (mode, intreg[0]);
2491 case X86_64_SSE_CLASS:
2492 case X86_64_SSESF_CLASS:
2493 case X86_64_SSEDF_CLASS:
2494 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2495 case X86_64_X87_CLASS:
2496 return gen_rtx_REG (mode, FIRST_STACK_REG);
2497 case X86_64_NO_CLASS:
2498 /* Zero sized array, struct or class. */
2499 return NULL;
2500 default:
2501 abort ();
2502 }
2503 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2504 && mode != BLKmode)
2505 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2506 if (n == 2
2507 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2508 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2509 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2510 && class[1] == X86_64_INTEGER_CLASS
2511 && (mode == CDImode || mode == TImode || mode == TFmode)
2512 && intreg[0] + 1 == intreg[1])
2513 return gen_rtx_REG (mode, intreg[0]);
2514 if (n == 4
2515 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS
2516 && class[2] == X86_64_X87_CLASS && class[3] == X86_64_X87UP_CLASS
2517 && mode != BLKmode)
2518 return gen_rtx_REG (XCmode, FIRST_STACK_REG);
2519
2520 /* Otherwise figure out the entries of the PARALLEL. */
2521 for (i = 0; i < n; i++)
2522 {
2523 switch (class[i])
2524 {
2525 case X86_64_NO_CLASS:
2526 break;
2527 case X86_64_INTEGER_CLASS:
2528 case X86_64_INTEGERSI_CLASS:
2529 /* Merge TImodes on aligned occasions here too. */
2530 if (i * 8 + 8 > bytes)
2531 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2532 else if (class[i] == X86_64_INTEGERSI_CLASS)
2533 tmpmode = SImode;
2534 else
2535 tmpmode = DImode;
2536 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2537 if (tmpmode == BLKmode)
2538 tmpmode = DImode;
2539 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2540 gen_rtx_REG (tmpmode, *intreg),
2541 GEN_INT (i*8));
2542 intreg++;
2543 break;
2544 case X86_64_SSESF_CLASS:
2545 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2546 gen_rtx_REG (SFmode,
2547 SSE_REGNO (sse_regno)),
2548 GEN_INT (i*8));
2549 sse_regno++;
2550 break;
2551 case X86_64_SSEDF_CLASS:
2552 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2553 gen_rtx_REG (DFmode,
2554 SSE_REGNO (sse_regno)),
2555 GEN_INT (i*8));
2556 sse_regno++;
2557 break;
2558 case X86_64_SSE_CLASS:
2559 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2560 tmpmode = TImode;
2561 else
2562 tmpmode = DImode;
2563 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2564 gen_rtx_REG (tmpmode,
2565 SSE_REGNO (sse_regno)),
2566 GEN_INT (i*8));
2567 if (tmpmode == TImode)
2568 i++;
2569 sse_regno++;
2570 break;
2571 default:
2572 abort ();
2573 }
2574 }
2575 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2576 for (i = 0; i < nexps; i++)
2577 XVECEXP (ret, 0, i) = exp [i];
2578 return ret;
2579 }
2580
2581 /* Update the data in CUM to advance over an argument
2582 of mode MODE and data type TYPE.
2583 (TYPE is null for libcalls where that information may not be available.) */
2584
2585 void
2586 function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
2587 enum machine_mode mode, /* current arg mode */
2588 tree type, /* type of the argument or 0 if lib support */
2589 int named) /* whether or not the argument was named */
2590 {
2591 int bytes =
2592 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2593 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2594
2595 if (TARGET_DEBUG_ARG)
2596 fprintf (stderr,
2597 "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, mode=%s, named=%d)\n\n",
2598 words, cum->words, cum->nregs, cum->sse_nregs, GET_MODE_NAME (mode), named);
2599 if (TARGET_64BIT)
2600 {
2601 int int_nregs, sse_nregs;
2602 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2603 cum->words += words;
2604 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2605 {
2606 cum->nregs -= int_nregs;
2607 cum->sse_nregs -= sse_nregs;
2608 cum->regno += int_nregs;
2609 cum->sse_regno += sse_nregs;
2610 }
2611 else
2612 cum->words += words;
2613 }
2614 else
2615 {
2616 if (TARGET_SSE && SSE_REG_MODE_P (mode)
2617 && (!type || !AGGREGATE_TYPE_P (type)))
2618 {
2619 cum->sse_words += words;
2620 cum->sse_nregs -= 1;
2621 cum->sse_regno += 1;
2622 if (cum->sse_nregs <= 0)
2623 {
2624 cum->sse_nregs = 0;
2625 cum->sse_regno = 0;
2626 }
2627 }
2628 else if (TARGET_MMX && MMX_REG_MODE_P (mode)
2629 && (!type || !AGGREGATE_TYPE_P (type)))
2630 {
2631 cum->mmx_words += words;
2632 cum->mmx_nregs -= 1;
2633 cum->mmx_regno += 1;
2634 if (cum->mmx_nregs <= 0)
2635 {
2636 cum->mmx_nregs = 0;
2637 cum->mmx_regno = 0;
2638 }
2639 }
2640 else
2641 {
2642 cum->words += words;
2643 cum->nregs -= words;
2644 cum->regno += words;
2645
2646 if (cum->nregs <= 0)
2647 {
2648 cum->nregs = 0;
2649 cum->regno = 0;
2650 }
2651 }
2652 }
2653 return;
2654 }
2655
2656 /* Define where to put the arguments to a function.
2657 Value is zero to push the argument on the stack,
2658 or a hard register in which to store the argument.
2659
2660 MODE is the argument's machine mode.
2661 TYPE is the data type of the argument (as a tree).
2662 This is null for libcalls where that information may
2663 not be available.
2664 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2665 the preceding args and about the function being called.
2666 NAMED is nonzero if this argument is a named parameter
2667 (otherwise it is an extra parameter matching an ellipsis). */
2668
2669 rtx
2670 function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
2671 enum machine_mode mode, /* current arg mode */
2672 tree type, /* type of the argument or 0 if lib support */
2673 int named) /* != 0 for normal args, == 0 for ... args */
2674 {
2675 rtx ret = NULL_RTX;
2676 int bytes =
2677 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2678 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2679 static bool warnedsse, warnedmmx;
2680
2681 /* To simplify the code below, represent vector types with a vector mode
2682 even if MMX/SSE are not active. */
2683 if (type
2684 && TREE_CODE (type) == VECTOR_TYPE
2685 && (bytes == 8 || bytes == 16)
2686 && GET_MODE_CLASS (TYPE_MODE (type)) != MODE_VECTOR_INT
2687 && GET_MODE_CLASS (TYPE_MODE (type)) != MODE_VECTOR_FLOAT)
2688 {
2689 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2690 enum machine_mode newmode
2691 = TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2692 ? MIN_MODE_VECTOR_FLOAT : MIN_MODE_VECTOR_INT;
2693
2694 /* Get the mode which has this inner mode and number of units. */
2695 for (; newmode != VOIDmode; newmode = GET_MODE_WIDER_MODE (newmode))
2696 if (GET_MODE_NUNITS (newmode) == TYPE_VECTOR_SUBPARTS (type)
2697 && GET_MODE_INNER (newmode) == innermode)
2698 {
2699 mode = newmode;
2700 break;
2701 }
2702 }
2703
2704 /* Handle a hidden AL argument containing number of registers for varargs
2705 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2706 any AL settings. */
2707 if (mode == VOIDmode)
2708 {
2709 if (TARGET_64BIT)
2710 return GEN_INT (cum->maybe_vaarg
2711 ? (cum->sse_nregs < 0
2712 ? SSE_REGPARM_MAX
2713 : cum->sse_regno)
2714 : -1);
2715 else
2716 return constm1_rtx;
2717 }
2718 if (TARGET_64BIT)
2719 ret = construct_container (mode, type, 0, cum->nregs, cum->sse_nregs,
2720 &x86_64_int_parameter_registers [cum->regno],
2721 cum->sse_regno);
2722 else
2723 switch (mode)
2724 {
2725 /* For now, pass fp/complex values on the stack. */
2726 default:
2727 break;
2728
2729 case BLKmode:
2730 if (bytes < 0)
2731 break;
2732 /* FALLTHRU */
2733 case DImode:
2734 case SImode:
2735 case HImode:
2736 case QImode:
2737 if (words <= cum->nregs)
2738 {
2739 int regno = cum->regno;
2740
2741 /* Fastcall allocates the first two DWORD (SImode) or
2742 smaller arguments to ECX and EDX. */
2743 if (cum->fastcall)
2744 {
2745 if (mode == BLKmode || mode == DImode)
2746 break;
2747
2748 /* ECX not EAX is the first allocated register. */
2749 if (regno == 0)
2750 regno = 2;
2751 }
2752 ret = gen_rtx_REG (mode, regno);
2753 }
2754 break;
2755 case TImode:
2756 case V16QImode:
2757 case V8HImode:
2758 case V4SImode:
2759 case V2DImode:
2760 case V4SFmode:
2761 case V2DFmode:
2762 if (!type || !AGGREGATE_TYPE_P (type))
2763 {
2764 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2765 {
2766 warnedsse = true;
2767 warning ("SSE vector argument without SSE enabled "
2768 "changes the ABI");
2769 }
2770 if (cum->sse_nregs)
2771 ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG);
2772 }
2773 break;
2774 case V8QImode:
2775 case V4HImode:
2776 case V2SImode:
2777 case V2SFmode:
2778 if (!type || !AGGREGATE_TYPE_P (type))
2779 {
2780 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
2781 {
2782 warnedmmx = true;
2783 warning ("MMX vector argument without MMX enabled "
2784 "changes the ABI");
2785 }
2786 if (cum->mmx_nregs)
2787 ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG);
2788 }
2789 break;
2790 }
2791
2792 if (TARGET_DEBUG_ARG)
2793 {
2794 fprintf (stderr,
2795 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2796 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2797
2798 if (ret)
2799 print_simple_rtl (stderr, ret);
2800 else
2801 fprintf (stderr, ", stack");
2802
2803 fprintf (stderr, " )\n");
2804 }
2805
2806 return ret;
2807 }
2808
2809 /* A C expression that indicates when an argument must be passed by
2810 reference. If nonzero for an argument, a copy of that argument is
2811 made in memory and a pointer to the argument is passed instead of
2812 the argument itself. The pointer is passed in whatever way is
2813 appropriate for passing a pointer to that type. */
2814
2815 static bool
2816 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2817 enum machine_mode mode ATTRIBUTE_UNUSED,
2818 tree type, bool named ATTRIBUTE_UNUSED)
2819 {
2820 if (!TARGET_64BIT)
2821 return 0;
2822
2823 if (type && int_size_in_bytes (type) == -1)
2824 {
2825 if (TARGET_DEBUG_ARG)
2826 fprintf (stderr, "function_arg_pass_by_reference\n");
2827 return 1;
2828 }
2829
2830 return 0;
2831 }
2832
2833 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2834 ABI. Only called if TARGET_SSE. */
2835 static bool
2836 contains_128bit_aligned_vector_p (tree type)
2837 {
2838 enum machine_mode mode = TYPE_MODE (type);
2839 if (SSE_REG_MODE_P (mode)
2840 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2841 return true;
2842 if (TYPE_ALIGN (type) < 128)
2843 return false;
2844
2845 if (AGGREGATE_TYPE_P (type))
2846 {
2847 /* Walk the aggregates recursively. */
2848 if (TREE_CODE (type) == RECORD_TYPE
2849 || TREE_CODE (type) == UNION_TYPE
2850 || TREE_CODE (type) == QUAL_UNION_TYPE)
2851 {
2852 tree field;
2853
2854 if (TYPE_BINFO (type))
2855 {
2856 tree binfo, base_binfo;
2857 int i;
2858
2859 for (binfo = TYPE_BINFO (type), i = 0;
2860 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2861 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo)))
2862 return true;
2863 }
2864 /* And now merge the fields of structure. */
2865 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2866 {
2867 if (TREE_CODE (field) == FIELD_DECL
2868 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2869 return true;
2870 }
2871 }
2872 /* Just for use if some languages passes arrays by value. */
2873 else if (TREE_CODE (type) == ARRAY_TYPE)
2874 {
2875 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2876 return true;
2877 }
2878 else
2879 abort ();
2880 }
2881 return false;
2882 }
2883
2884 /* Gives the alignment boundary, in bits, of an argument with the
2885 specified mode and type. */
2886
2887 int
2888 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2889 {
2890 int align;
2891 if (type)
2892 align = TYPE_ALIGN (type);
2893 else
2894 align = GET_MODE_ALIGNMENT (mode);
2895 if (align < PARM_BOUNDARY)
2896 align = PARM_BOUNDARY;
2897 if (!TARGET_64BIT)
2898 {
2899 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2900 make an exception for SSE modes since these require 128bit
2901 alignment.
2902
2903 The handling here differs from field_alignment. ICC aligns MMX
2904 arguments to 4 byte boundaries, while structure fields are aligned
2905 to 8 byte boundaries. */
2906 if (!TARGET_SSE)
2907 align = PARM_BOUNDARY;
2908 else if (!type)
2909 {
2910 if (!SSE_REG_MODE_P (mode))
2911 align = PARM_BOUNDARY;
2912 }
2913 else
2914 {
2915 if (!contains_128bit_aligned_vector_p (type))
2916 align = PARM_BOUNDARY;
2917 }
2918 }
2919 if (align > 128)
2920 align = 128;
2921 return align;
2922 }
2923
2924 /* Return true if N is a possible register number of function value. */
2925 bool
2926 ix86_function_value_regno_p (int regno)
2927 {
2928 if (!TARGET_64BIT)
2929 {
2930 return ((regno) == 0
2931 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
2932 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
2933 }
2934 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
2935 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
2936 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
2937 }
2938
2939 /* Define how to find the value returned by a function.
2940 VALTYPE is the data type of the value (as a tree).
2941 If the precise function being called is known, FUNC is its FUNCTION_DECL;
2942 otherwise, FUNC is 0. */
2943 rtx
2944 ix86_function_value (tree valtype)
2945 {
2946 if (TARGET_64BIT)
2947 {
2948 rtx ret = construct_container (TYPE_MODE (valtype), valtype, 1,
2949 REGPARM_MAX, SSE_REGPARM_MAX,
2950 x86_64_int_return_registers, 0);
2951 /* For zero sized structures, construct_container return NULL, but we need
2952 to keep rest of compiler happy by returning meaningful value. */
2953 if (!ret)
2954 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
2955 return ret;
2956 }
2957 else
2958 return gen_rtx_REG (TYPE_MODE (valtype),
2959 ix86_value_regno (TYPE_MODE (valtype)));
2960 }
2961
2962 /* Return false iff type is returned in memory. */
2963 int
2964 ix86_return_in_memory (tree type)
2965 {
2966 int needed_intregs, needed_sseregs, size;
2967 enum machine_mode mode = TYPE_MODE (type);
2968
2969 if (TARGET_64BIT)
2970 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
2971
2972 if (mode == BLKmode)
2973 return 1;
2974
2975 size = int_size_in_bytes (type);
2976
2977 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
2978 return 0;
2979
2980 if (VECTOR_MODE_P (mode) || mode == TImode)
2981 {
2982 /* User-created vectors small enough to fit in EAX. */
2983 if (size < 8)
2984 return 0;
2985
2986 /* MMX/3dNow values are returned on the stack, since we've
2987 got to EMMS/FEMMS before returning. */
2988 if (size == 8)
2989 return 1;
2990
2991 /* SSE values are returned in XMM0, except when it doesn't exist. */
2992 if (size == 16)
2993 return (TARGET_SSE ? 0 : 1);
2994 }
2995
2996 if (mode == XFmode)
2997 return 0;
2998
2999 if (size > 12)
3000 return 1;
3001 return 0;
3002 }
3003
3004 /* When returning SSE vector types, we have a choice of either
3005 (1) being abi incompatible with a -march switch, or
3006 (2) generating an error.
3007 Given no good solution, I think the safest thing is one warning.
3008 The user won't be able to use -Werror, but....
3009
3010 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3011 called in response to actually generating a caller or callee that
3012 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3013 via aggregate_value_p for general type probing from tree-ssa. */
3014
3015 static rtx
3016 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3017 {
3018 static bool warned;
3019
3020 if (!TARGET_SSE && type && !warned)
3021 {
3022 /* Look at the return type of the function, not the function type. */
3023 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3024
3025 if (mode == TImode
3026 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3027 {
3028 warned = true;
3029 warning ("SSE vector return without SSE enabled changes the ABI");
3030 }
3031 }
3032
3033 return NULL;
3034 }
3035
3036 /* Define how to find the value returned by a library function
3037 assuming the value has mode MODE. */
3038 rtx
3039 ix86_libcall_value (enum machine_mode mode)
3040 {
3041 if (TARGET_64BIT)
3042 {
3043 switch (mode)
3044 {
3045 case SFmode:
3046 case SCmode:
3047 case DFmode:
3048 case DCmode:
3049 case TFmode:
3050 return gen_rtx_REG (mode, FIRST_SSE_REG);
3051 case XFmode:
3052 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3053 case XCmode:
3054 case TCmode:
3055 return NULL;
3056 default:
3057 return gen_rtx_REG (mode, 0);
3058 }
3059 }
3060 else
3061 return gen_rtx_REG (mode, ix86_value_regno (mode));
3062 }
3063
3064 /* Given a mode, return the register to use for a return value. */
3065
3066 static int
3067 ix86_value_regno (enum machine_mode mode)
3068 {
3069 /* Floating point return values in %st(0). */
3070 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
3071 return FIRST_FLOAT_REG;
3072 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3073 we prevent this case when sse is not available. */
3074 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3075 return FIRST_SSE_REG;
3076 /* Everything else in %eax. */
3077 return 0;
3078 }
3079 \f
3080 /* Create the va_list data type. */
3081
3082 static tree
3083 ix86_build_builtin_va_list (void)
3084 {
3085 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3086
3087 /* For i386 we use plain pointer to argument area. */
3088 if (!TARGET_64BIT)
3089 return build_pointer_type (char_type_node);
3090
3091 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3092 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3093
3094 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3095 unsigned_type_node);
3096 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3097 unsigned_type_node);
3098 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3099 ptr_type_node);
3100 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3101 ptr_type_node);
3102
3103 DECL_FIELD_CONTEXT (f_gpr) = record;
3104 DECL_FIELD_CONTEXT (f_fpr) = record;
3105 DECL_FIELD_CONTEXT (f_ovf) = record;
3106 DECL_FIELD_CONTEXT (f_sav) = record;
3107
3108 TREE_CHAIN (record) = type_decl;
3109 TYPE_NAME (record) = type_decl;
3110 TYPE_FIELDS (record) = f_gpr;
3111 TREE_CHAIN (f_gpr) = f_fpr;
3112 TREE_CHAIN (f_fpr) = f_ovf;
3113 TREE_CHAIN (f_ovf) = f_sav;
3114
3115 layout_type (record);
3116
3117 /* The correct type is an array type of one element. */
3118 return build_array_type (record, build_index_type (size_zero_node));
3119 }
3120
3121 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3122
3123 static void
3124 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3125 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3126 int no_rtl)
3127 {
3128 CUMULATIVE_ARGS next_cum;
3129 rtx save_area = NULL_RTX, mem;
3130 rtx label;
3131 rtx label_ref;
3132 rtx tmp_reg;
3133 rtx nsse_reg;
3134 int set;
3135 tree fntype;
3136 int stdarg_p;
3137 int i;
3138
3139 if (!TARGET_64BIT)
3140 return;
3141
3142 /* Indicate to allocate space on the stack for varargs save area. */
3143 ix86_save_varrargs_registers = 1;
3144
3145 cfun->stack_alignment_needed = 128;
3146
3147 fntype = TREE_TYPE (current_function_decl);
3148 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3149 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3150 != void_type_node));
3151
3152 /* For varargs, we do not want to skip the dummy va_dcl argument.
3153 For stdargs, we do want to skip the last named argument. */
3154 next_cum = *cum;
3155 if (stdarg_p)
3156 function_arg_advance (&next_cum, mode, type, 1);
3157
3158 if (!no_rtl)
3159 save_area = frame_pointer_rtx;
3160
3161 set = get_varargs_alias_set ();
3162
3163 for (i = next_cum.regno; i < ix86_regparm; i++)
3164 {
3165 mem = gen_rtx_MEM (Pmode,
3166 plus_constant (save_area, i * UNITS_PER_WORD));
3167 set_mem_alias_set (mem, set);
3168 emit_move_insn (mem, gen_rtx_REG (Pmode,
3169 x86_64_int_parameter_registers[i]));
3170 }
3171
3172 if (next_cum.sse_nregs)
3173 {
3174 /* Now emit code to save SSE registers. The AX parameter contains number
3175 of SSE parameter registers used to call this function. We use
3176 sse_prologue_save insn template that produces computed jump across
3177 SSE saves. We need some preparation work to get this working. */
3178
3179 label = gen_label_rtx ();
3180 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3181
3182 /* Compute address to jump to :
3183 label - 5*eax + nnamed_sse_arguments*5 */
3184 tmp_reg = gen_reg_rtx (Pmode);
3185 nsse_reg = gen_reg_rtx (Pmode);
3186 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3187 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3188 gen_rtx_MULT (Pmode, nsse_reg,
3189 GEN_INT (4))));
3190 if (next_cum.sse_regno)
3191 emit_move_insn
3192 (nsse_reg,
3193 gen_rtx_CONST (DImode,
3194 gen_rtx_PLUS (DImode,
3195 label_ref,
3196 GEN_INT (next_cum.sse_regno * 4))));
3197 else
3198 emit_move_insn (nsse_reg, label_ref);
3199 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3200
3201 /* Compute address of memory block we save into. We always use pointer
3202 pointing 127 bytes after first byte to store - this is needed to keep
3203 instruction size limited by 4 bytes. */
3204 tmp_reg = gen_reg_rtx (Pmode);
3205 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3206 plus_constant (save_area,
3207 8 * REGPARM_MAX + 127)));
3208 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3209 set_mem_alias_set (mem, set);
3210 set_mem_align (mem, BITS_PER_WORD);
3211
3212 /* And finally do the dirty job! */
3213 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3214 GEN_INT (next_cum.sse_regno), label));
3215 }
3216
3217 }
3218
3219 /* Implement va_start. */
3220
3221 void
3222 ix86_va_start (tree valist, rtx nextarg)
3223 {
3224 HOST_WIDE_INT words, n_gpr, n_fpr;
3225 tree f_gpr, f_fpr, f_ovf, f_sav;
3226 tree gpr, fpr, ovf, sav, t;
3227
3228 /* Only 64bit target needs something special. */
3229 if (!TARGET_64BIT)
3230 {
3231 std_expand_builtin_va_start (valist, nextarg);
3232 return;
3233 }
3234
3235 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3236 f_fpr = TREE_CHAIN (f_gpr);
3237 f_ovf = TREE_CHAIN (f_fpr);
3238 f_sav = TREE_CHAIN (f_ovf);
3239
3240 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3241 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3242 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3243 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3244 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3245
3246 /* Count number of gp and fp argument registers used. */
3247 words = current_function_args_info.words;
3248 n_gpr = current_function_args_info.regno;
3249 n_fpr = current_function_args_info.sse_regno;
3250
3251 if (TARGET_DEBUG_ARG)
3252 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3253 (int) words, (int) n_gpr, (int) n_fpr);
3254
3255 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3256 build_int_cst (NULL_TREE, n_gpr * 8));
3257 TREE_SIDE_EFFECTS (t) = 1;
3258 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3259
3260 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3261 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3262 TREE_SIDE_EFFECTS (t) = 1;
3263 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3264
3265 /* Find the overflow area. */
3266 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3267 if (words != 0)
3268 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3269 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3270 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3271 TREE_SIDE_EFFECTS (t) = 1;
3272 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3273
3274 /* Find the register save area.
3275 Prologue of the function save it right above stack frame. */
3276 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3277 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3278 TREE_SIDE_EFFECTS (t) = 1;
3279 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3280 }
3281
3282 /* Implement va_arg. */
3283
3284 tree
3285 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3286 {
3287 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3288 tree f_gpr, f_fpr, f_ovf, f_sav;
3289 tree gpr, fpr, ovf, sav, t;
3290 int size, rsize;
3291 tree lab_false, lab_over = NULL_TREE;
3292 tree addr, t2;
3293 rtx container;
3294 int indirect_p = 0;
3295 tree ptrtype;
3296
3297 /* Only 64bit target needs something special. */
3298 if (!TARGET_64BIT)
3299 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3300
3301 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3302 f_fpr = TREE_CHAIN (f_gpr);
3303 f_ovf = TREE_CHAIN (f_fpr);
3304 f_sav = TREE_CHAIN (f_ovf);
3305
3306 valist = build_va_arg_indirect_ref (valist);
3307 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3308 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3309 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3310 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3311
3312 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3313 if (indirect_p)
3314 type = build_pointer_type (type);
3315 size = int_size_in_bytes (type);
3316 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3317
3318 container = construct_container (TYPE_MODE (type), type, 0,
3319 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3320 /*
3321 * Pull the value out of the saved registers ...
3322 */
3323
3324 addr = create_tmp_var (ptr_type_node, "addr");
3325 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3326
3327 if (container)
3328 {
3329 int needed_intregs, needed_sseregs;
3330 bool need_temp;
3331 tree int_addr, sse_addr;
3332
3333 lab_false = create_artificial_label ();
3334 lab_over = create_artificial_label ();
3335
3336 examine_argument (TYPE_MODE (type), type, 0,
3337 &needed_intregs, &needed_sseregs);
3338
3339 need_temp = (!REG_P (container)
3340 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3341 || TYPE_ALIGN (type) > 128));
3342
3343 /* In case we are passing structure, verify that it is consecutive block
3344 on the register save area. If not we need to do moves. */
3345 if (!need_temp && !REG_P (container))
3346 {
3347 /* Verify that all registers are strictly consecutive */
3348 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3349 {
3350 int i;
3351
3352 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3353 {
3354 rtx slot = XVECEXP (container, 0, i);
3355 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3356 || INTVAL (XEXP (slot, 1)) != i * 16)
3357 need_temp = 1;
3358 }
3359 }
3360 else
3361 {
3362 int i;
3363
3364 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3365 {
3366 rtx slot = XVECEXP (container, 0, i);
3367 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3368 || INTVAL (XEXP (slot, 1)) != i * 8)
3369 need_temp = 1;
3370 }
3371 }
3372 }
3373 if (!need_temp)
3374 {
3375 int_addr = addr;
3376 sse_addr = addr;
3377 }
3378 else
3379 {
3380 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3381 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3382 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3383 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3384 }
3385 /* First ensure that we fit completely in registers. */
3386 if (needed_intregs)
3387 {
3388 t = build_int_cst (TREE_TYPE (gpr),
3389 (REGPARM_MAX - needed_intregs + 1) * 8);
3390 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3391 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3392 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3393 gimplify_and_add (t, pre_p);
3394 }
3395 if (needed_sseregs)
3396 {
3397 t = build_int_cst (TREE_TYPE (fpr),
3398 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3399 + REGPARM_MAX * 8);
3400 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3401 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3402 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3403 gimplify_and_add (t, pre_p);
3404 }
3405
3406 /* Compute index to start of area used for integer regs. */
3407 if (needed_intregs)
3408 {
3409 /* int_addr = gpr + sav; */
3410 t = build2 (PLUS_EXPR, ptr_type_node, sav, gpr);
3411 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3412 gimplify_and_add (t, pre_p);
3413 }
3414 if (needed_sseregs)
3415 {
3416 /* sse_addr = fpr + sav; */
3417 t = build2 (PLUS_EXPR, ptr_type_node, sav, fpr);
3418 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3419 gimplify_and_add (t, pre_p);
3420 }
3421 if (need_temp)
3422 {
3423 int i;
3424 tree temp = create_tmp_var (type, "va_arg_tmp");
3425
3426 /* addr = &temp; */
3427 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3428 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3429 gimplify_and_add (t, pre_p);
3430
3431 for (i = 0; i < XVECLEN (container, 0); i++)
3432 {
3433 rtx slot = XVECEXP (container, 0, i);
3434 rtx reg = XEXP (slot, 0);
3435 enum machine_mode mode = GET_MODE (reg);
3436 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3437 tree addr_type = build_pointer_type (piece_type);
3438 tree src_addr, src;
3439 int src_offset;
3440 tree dest_addr, dest;
3441
3442 if (SSE_REGNO_P (REGNO (reg)))
3443 {
3444 src_addr = sse_addr;
3445 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3446 }
3447 else
3448 {
3449 src_addr = int_addr;
3450 src_offset = REGNO (reg) * 8;
3451 }
3452 src_addr = fold_convert (addr_type, src_addr);
3453 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3454 size_int (src_offset)));
3455 src = build_va_arg_indirect_ref (src_addr);
3456
3457 dest_addr = fold_convert (addr_type, addr);
3458 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3459 size_int (INTVAL (XEXP (slot, 1)))));
3460 dest = build_va_arg_indirect_ref (dest_addr);
3461
3462 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3463 gimplify_and_add (t, pre_p);
3464 }
3465 }
3466
3467 if (needed_intregs)
3468 {
3469 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3470 build_int_cst (NULL_TREE, needed_intregs * 8));
3471 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3472 gimplify_and_add (t, pre_p);
3473 }
3474 if (needed_sseregs)
3475 {
3476 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3477 build_int_cst (NULL_TREE, needed_sseregs * 16));
3478 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3479 gimplify_and_add (t, pre_p);
3480 }
3481
3482 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3483 gimplify_and_add (t, pre_p);
3484
3485 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3486 append_to_statement_list (t, pre_p);
3487 }
3488
3489 /* ... otherwise out of the overflow area. */
3490
3491 /* Care for on-stack alignment if needed. */
3492 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3493 t = ovf;
3494 else
3495 {
3496 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3497 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3498 build_int_cst (NULL_TREE, align - 1));
3499 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3500 build_int_cst (NULL_TREE, -align));
3501 }
3502 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3503
3504 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3505 gimplify_and_add (t2, pre_p);
3506
3507 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3508 build_int_cst (NULL_TREE, rsize * UNITS_PER_WORD));
3509 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3510 gimplify_and_add (t, pre_p);
3511
3512 if (container)
3513 {
3514 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3515 append_to_statement_list (t, pre_p);
3516 }
3517
3518 ptrtype = build_pointer_type (type);
3519 addr = fold_convert (ptrtype, addr);
3520
3521 if (indirect_p)
3522 addr = build_va_arg_indirect_ref (addr);
3523 return build_va_arg_indirect_ref (addr);
3524 }
3525 \f
3526 /* Return nonzero if OPNUM's MEM should be matched
3527 in movabs* patterns. */
3528
3529 int
3530 ix86_check_movabs (rtx insn, int opnum)
3531 {
3532 rtx set, mem;
3533
3534 set = PATTERN (insn);
3535 if (GET_CODE (set) == PARALLEL)
3536 set = XVECEXP (set, 0, 0);
3537 if (GET_CODE (set) != SET)
3538 abort ();
3539 mem = XEXP (set, opnum);
3540 while (GET_CODE (mem) == SUBREG)
3541 mem = SUBREG_REG (mem);
3542 if (GET_CODE (mem) != MEM)
3543 abort ();
3544 return (volatile_ok || !MEM_VOLATILE_P (mem));
3545 }
3546 \f
3547 /* Initialize the table of extra 80387 mathematical constants. */
3548
3549 static void
3550 init_ext_80387_constants (void)
3551 {
3552 static const char * cst[5] =
3553 {
3554 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3555 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3556 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3557 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3558 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3559 };
3560 int i;
3561
3562 for (i = 0; i < 5; i++)
3563 {
3564 real_from_string (&ext_80387_constants_table[i], cst[i]);
3565 /* Ensure each constant is rounded to XFmode precision. */
3566 real_convert (&ext_80387_constants_table[i],
3567 XFmode, &ext_80387_constants_table[i]);
3568 }
3569
3570 ext_80387_constants_init = 1;
3571 }
3572
3573 /* Return true if the constant is something that can be loaded with
3574 a special instruction. */
3575
3576 int
3577 standard_80387_constant_p (rtx x)
3578 {
3579 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3580 return -1;
3581
3582 if (x == CONST0_RTX (GET_MODE (x)))
3583 return 1;
3584 if (x == CONST1_RTX (GET_MODE (x)))
3585 return 2;
3586
3587 /* For XFmode constants, try to find a special 80387 instruction when
3588 optimizing for size or on those CPUs that benefit from them. */
3589 if (GET_MODE (x) == XFmode
3590 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3591 {
3592 REAL_VALUE_TYPE r;
3593 int i;
3594
3595 if (! ext_80387_constants_init)
3596 init_ext_80387_constants ();
3597
3598 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3599 for (i = 0; i < 5; i++)
3600 if (real_identical (&r, &ext_80387_constants_table[i]))
3601 return i + 3;
3602 }
3603
3604 return 0;
3605 }
3606
3607 /* Return the opcode of the special instruction to be used to load
3608 the constant X. */
3609
3610 const char *
3611 standard_80387_constant_opcode (rtx x)
3612 {
3613 switch (standard_80387_constant_p (x))
3614 {
3615 case 1:
3616 return "fldz";
3617 case 2:
3618 return "fld1";
3619 case 3:
3620 return "fldlg2";
3621 case 4:
3622 return "fldln2";
3623 case 5:
3624 return "fldl2e";
3625 case 6:
3626 return "fldl2t";
3627 case 7:
3628 return "fldpi";
3629 }
3630 abort ();
3631 }
3632
3633 /* Return the CONST_DOUBLE representing the 80387 constant that is
3634 loaded by the specified special instruction. The argument IDX
3635 matches the return value from standard_80387_constant_p. */
3636
3637 rtx
3638 standard_80387_constant_rtx (int idx)
3639 {
3640 int i;
3641
3642 if (! ext_80387_constants_init)
3643 init_ext_80387_constants ();
3644
3645 switch (idx)
3646 {
3647 case 3:
3648 case 4:
3649 case 5:
3650 case 6:
3651 case 7:
3652 i = idx - 3;
3653 break;
3654
3655 default:
3656 abort ();
3657 }
3658
3659 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3660 XFmode);
3661 }
3662
3663 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3664 */
3665 int
3666 standard_sse_constant_p (rtx x)
3667 {
3668 if (x == const0_rtx)
3669 return 1;
3670 return (x == CONST0_RTX (GET_MODE (x)));
3671 }
3672
3673 /* Returns 1 if OP contains a symbol reference */
3674
3675 int
3676 symbolic_reference_mentioned_p (rtx op)
3677 {
3678 const char *fmt;
3679 int i;
3680
3681 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3682 return 1;
3683
3684 fmt = GET_RTX_FORMAT (GET_CODE (op));
3685 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3686 {
3687 if (fmt[i] == 'E')
3688 {
3689 int j;
3690
3691 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3692 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3693 return 1;
3694 }
3695
3696 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3697 return 1;
3698 }
3699
3700 return 0;
3701 }
3702
3703 /* Return 1 if it is appropriate to emit `ret' instructions in the
3704 body of a function. Do this only if the epilogue is simple, needing a
3705 couple of insns. Prior to reloading, we can't tell how many registers
3706 must be saved, so return 0 then. Return 0 if there is no frame
3707 marker to de-allocate.
3708
3709 If NON_SAVING_SETJMP is defined and true, then it is not possible
3710 for the epilogue to be simple, so return 0. This is a special case
3711 since NON_SAVING_SETJMP will not cause regs_ever_live to change
3712 until final, but jump_optimize may need to know sooner if a
3713 `return' is OK. */
3714
3715 int
3716 ix86_can_use_return_insn_p (void)
3717 {
3718 struct ix86_frame frame;
3719
3720 #ifdef NON_SAVING_SETJMP
3721 if (NON_SAVING_SETJMP && current_function_calls_setjmp)
3722 return 0;
3723 #endif
3724
3725 if (! reload_completed || frame_pointer_needed)
3726 return 0;
3727
3728 /* Don't allow more than 32 pop, since that's all we can do
3729 with one instruction. */
3730 if (current_function_pops_args
3731 && current_function_args_size >= 32768)
3732 return 0;
3733
3734 ix86_compute_frame_layout (&frame);
3735 return frame.to_allocate == 0 && frame.nregs == 0;
3736 }
3737 \f
3738 /* Value should be nonzero if functions must have frame pointers.
3739 Zero means the frame pointer need not be set up (and parms may
3740 be accessed via the stack pointer) in functions that seem suitable. */
3741
3742 int
3743 ix86_frame_pointer_required (void)
3744 {
3745 /* If we accessed previous frames, then the generated code expects
3746 to be able to access the saved ebp value in our frame. */
3747 if (cfun->machine->accesses_prev_frame)
3748 return 1;
3749
3750 /* Several x86 os'es need a frame pointer for other reasons,
3751 usually pertaining to setjmp. */
3752 if (SUBTARGET_FRAME_POINTER_REQUIRED)
3753 return 1;
3754
3755 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3756 the frame pointer by default. Turn it back on now if we've not
3757 got a leaf function. */
3758 if (TARGET_OMIT_LEAF_FRAME_POINTER
3759 && (!current_function_is_leaf))
3760 return 1;
3761
3762 if (current_function_profile)
3763 return 1;
3764
3765 return 0;
3766 }
3767
3768 /* Record that the current function accesses previous call frames. */
3769
3770 void
3771 ix86_setup_frame_addresses (void)
3772 {
3773 cfun->machine->accesses_prev_frame = 1;
3774 }
3775 \f
3776 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3777 # define USE_HIDDEN_LINKONCE 1
3778 #else
3779 # define USE_HIDDEN_LINKONCE 0
3780 #endif
3781
3782 static int pic_labels_used;
3783
3784 /* Fills in the label name that should be used for a pc thunk for
3785 the given register. */
3786
3787 static void
3788 get_pc_thunk_name (char name[32], unsigned int regno)
3789 {
3790 if (USE_HIDDEN_LINKONCE)
3791 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
3792 else
3793 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
3794 }
3795
3796
3797 /* This function generates code for -fpic that loads %ebx with
3798 the return address of the caller and then returns. */
3799
3800 void
3801 ix86_file_end (void)
3802 {
3803 rtx xops[2];
3804 int regno;
3805
3806 for (regno = 0; regno < 8; ++regno)
3807 {
3808 char name[32];
3809
3810 if (! ((pic_labels_used >> regno) & 1))
3811 continue;
3812
3813 get_pc_thunk_name (name, regno);
3814
3815 if (USE_HIDDEN_LINKONCE)
3816 {
3817 tree decl;
3818
3819 decl = build_decl (FUNCTION_DECL, get_identifier (name),
3820 error_mark_node);
3821 TREE_PUBLIC (decl) = 1;
3822 TREE_STATIC (decl) = 1;
3823 DECL_ONE_ONLY (decl) = 1;
3824
3825 (*targetm.asm_out.unique_section) (decl, 0);
3826 named_section (decl, NULL, 0);
3827
3828 (*targetm.asm_out.globalize_label) (asm_out_file, name);
3829 fputs ("\t.hidden\t", asm_out_file);
3830 assemble_name (asm_out_file, name);
3831 fputc ('\n', asm_out_file);
3832 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
3833 }
3834 else
3835 {
3836 text_section ();
3837 ASM_OUTPUT_LABEL (asm_out_file, name);
3838 }
3839
3840 xops[0] = gen_rtx_REG (SImode, regno);
3841 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
3842 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
3843 output_asm_insn ("ret", xops);
3844 }
3845
3846 if (NEED_INDICATE_EXEC_STACK)
3847 file_end_indicate_exec_stack ();
3848 }
3849
3850 /* Emit code for the SET_GOT patterns. */
3851
3852 const char *
3853 output_set_got (rtx dest)
3854 {
3855 rtx xops[3];
3856
3857 xops[0] = dest;
3858 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
3859
3860 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
3861 {
3862 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
3863
3864 if (!flag_pic)
3865 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3866 else
3867 output_asm_insn ("call\t%a2", xops);
3868
3869 #if TARGET_MACHO
3870 /* Output the "canonical" label name ("Lxx$pb") here too. This
3871 is what will be referred to by the Mach-O PIC subsystem. */
3872 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
3873 #endif
3874 (*targetm.asm_out.internal_label) (asm_out_file, "L",
3875 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
3876
3877 if (flag_pic)
3878 output_asm_insn ("pop{l}\t%0", xops);
3879 }
3880 else
3881 {
3882 char name[32];
3883 get_pc_thunk_name (name, REGNO (dest));
3884 pic_labels_used |= 1 << REGNO (dest);
3885
3886 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3887 xops[2] = gen_rtx_MEM (QImode, xops[2]);
3888 output_asm_insn ("call\t%X2", xops);
3889 }
3890
3891 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
3892 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
3893 else if (!TARGET_MACHO)
3894 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
3895
3896 return "";
3897 }
3898
3899 /* Generate an "push" pattern for input ARG. */
3900
3901 static rtx
3902 gen_push (rtx arg)
3903 {
3904 return gen_rtx_SET (VOIDmode,
3905 gen_rtx_MEM (Pmode,
3906 gen_rtx_PRE_DEC (Pmode,
3907 stack_pointer_rtx)),
3908 arg);
3909 }
3910
3911 /* Return >= 0 if there is an unused call-clobbered register available
3912 for the entire function. */
3913
3914 static unsigned int
3915 ix86_select_alt_pic_regnum (void)
3916 {
3917 if (current_function_is_leaf && !current_function_profile)
3918 {
3919 int i;
3920 for (i = 2; i >= 0; --i)
3921 if (!regs_ever_live[i])
3922 return i;
3923 }
3924
3925 return INVALID_REGNUM;
3926 }
3927
3928 /* Return 1 if we need to save REGNO. */
3929 static int
3930 ix86_save_reg (unsigned int regno, int maybe_eh_return)
3931 {
3932 if (pic_offset_table_rtx
3933 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
3934 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
3935 || current_function_profile
3936 || current_function_calls_eh_return
3937 || current_function_uses_const_pool))
3938 {
3939 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
3940 return 0;
3941 return 1;
3942 }
3943
3944 if (current_function_calls_eh_return && maybe_eh_return)
3945 {
3946 unsigned i;
3947 for (i = 0; ; i++)
3948 {
3949 unsigned test = EH_RETURN_DATA_REGNO (i);
3950 if (test == INVALID_REGNUM)
3951 break;
3952 if (test == regno)
3953 return 1;
3954 }
3955 }
3956
3957 return (regs_ever_live[regno]
3958 && !call_used_regs[regno]
3959 && !fixed_regs[regno]
3960 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
3961 }
3962
3963 /* Return number of registers to be saved on the stack. */
3964
3965 static int
3966 ix86_nsaved_regs (void)
3967 {
3968 int nregs = 0;
3969 int regno;
3970
3971 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
3972 if (ix86_save_reg (regno, true))
3973 nregs++;
3974 return nregs;
3975 }
3976
3977 /* Return the offset between two registers, one to be eliminated, and the other
3978 its replacement, at the start of a routine. */
3979
3980 HOST_WIDE_INT
3981 ix86_initial_elimination_offset (int from, int to)
3982 {
3983 struct ix86_frame frame;
3984 ix86_compute_frame_layout (&frame);
3985
3986 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
3987 return frame.hard_frame_pointer_offset;
3988 else if (from == FRAME_POINTER_REGNUM
3989 && to == HARD_FRAME_POINTER_REGNUM)
3990 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
3991 else
3992 {
3993 if (to != STACK_POINTER_REGNUM)
3994 abort ();
3995 else if (from == ARG_POINTER_REGNUM)
3996 return frame.stack_pointer_offset;
3997 else if (from != FRAME_POINTER_REGNUM)
3998 abort ();
3999 else
4000 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4001 }
4002 }
4003
4004 /* Fill structure ix86_frame about frame of currently computed function. */
4005
4006 static void
4007 ix86_compute_frame_layout (struct ix86_frame *frame)
4008 {
4009 HOST_WIDE_INT total_size;
4010 unsigned int stack_alignment_needed;
4011 HOST_WIDE_INT offset;
4012 unsigned int preferred_alignment;
4013 HOST_WIDE_INT size = get_frame_size ();
4014
4015 frame->nregs = ix86_nsaved_regs ();
4016 total_size = size;
4017
4018 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4019 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4020
4021 /* During reload iteration the amount of registers saved can change.
4022 Recompute the value as needed. Do not recompute when amount of registers
4023 didn't change as reload does mutiple calls to the function and does not
4024 expect the decision to change within single iteration. */
4025 if (!optimize_size
4026 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4027 {
4028 int count = frame->nregs;
4029
4030 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4031 /* The fast prologue uses move instead of push to save registers. This
4032 is significantly longer, but also executes faster as modern hardware
4033 can execute the moves in parallel, but can't do that for push/pop.
4034
4035 Be careful about choosing what prologue to emit: When function takes
4036 many instructions to execute we may use slow version as well as in
4037 case function is known to be outside hot spot (this is known with
4038 feedback only). Weight the size of function by number of registers
4039 to save as it is cheap to use one or two push instructions but very
4040 slow to use many of them. */
4041 if (count)
4042 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4043 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4044 || (flag_branch_probabilities
4045 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4046 cfun->machine->use_fast_prologue_epilogue = false;
4047 else
4048 cfun->machine->use_fast_prologue_epilogue
4049 = !expensive_function_p (count);
4050 }
4051 if (TARGET_PROLOGUE_USING_MOVE
4052 && cfun->machine->use_fast_prologue_epilogue)
4053 frame->save_regs_using_mov = true;
4054 else
4055 frame->save_regs_using_mov = false;
4056
4057
4058 /* Skip return address and saved base pointer. */
4059 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4060
4061 frame->hard_frame_pointer_offset = offset;
4062
4063 /* Do some sanity checking of stack_alignment_needed and
4064 preferred_alignment, since i386 port is the only using those features
4065 that may break easily. */
4066
4067 if (size && !stack_alignment_needed)
4068 abort ();
4069 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4070 abort ();
4071 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4072 abort ();
4073 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4074 abort ();
4075
4076 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4077 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4078
4079 /* Register save area */
4080 offset += frame->nregs * UNITS_PER_WORD;
4081
4082 /* Va-arg area */
4083 if (ix86_save_varrargs_registers)
4084 {
4085 offset += X86_64_VARARGS_SIZE;
4086 frame->va_arg_size = X86_64_VARARGS_SIZE;
4087 }
4088 else
4089 frame->va_arg_size = 0;
4090
4091 /* Align start of frame for local function. */
4092 frame->padding1 = ((offset + stack_alignment_needed - 1)
4093 & -stack_alignment_needed) - offset;
4094
4095 offset += frame->padding1;
4096
4097 /* Frame pointer points here. */
4098 frame->frame_pointer_offset = offset;
4099
4100 offset += size;
4101
4102 /* Add outgoing arguments area. Can be skipped if we eliminated
4103 all the function calls as dead code.
4104 Skipping is however impossible when function calls alloca. Alloca
4105 expander assumes that last current_function_outgoing_args_size
4106 of stack frame are unused. */
4107 if (ACCUMULATE_OUTGOING_ARGS
4108 && (!current_function_is_leaf || current_function_calls_alloca))
4109 {
4110 offset += current_function_outgoing_args_size;
4111 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4112 }
4113 else
4114 frame->outgoing_arguments_size = 0;
4115
4116 /* Align stack boundary. Only needed if we're calling another function
4117 or using alloca. */
4118 if (!current_function_is_leaf || current_function_calls_alloca)
4119 frame->padding2 = ((offset + preferred_alignment - 1)
4120 & -preferred_alignment) - offset;
4121 else
4122 frame->padding2 = 0;
4123
4124 offset += frame->padding2;
4125
4126 /* We've reached end of stack frame. */
4127 frame->stack_pointer_offset = offset;
4128
4129 /* Size prologue needs to allocate. */
4130 frame->to_allocate =
4131 (size + frame->padding1 + frame->padding2
4132 + frame->outgoing_arguments_size + frame->va_arg_size);
4133
4134 if ((!frame->to_allocate && frame->nregs <= 1)
4135 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4136 frame->save_regs_using_mov = false;
4137
4138 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4139 && current_function_is_leaf)
4140 {
4141 frame->red_zone_size = frame->to_allocate;
4142 if (frame->save_regs_using_mov)
4143 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4144 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4145 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4146 }
4147 else
4148 frame->red_zone_size = 0;
4149 frame->to_allocate -= frame->red_zone_size;
4150 frame->stack_pointer_offset -= frame->red_zone_size;
4151 #if 0
4152 fprintf (stderr, "nregs: %i\n", frame->nregs);
4153 fprintf (stderr, "size: %i\n", size);
4154 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4155 fprintf (stderr, "padding1: %i\n", frame->padding1);
4156 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4157 fprintf (stderr, "padding2: %i\n", frame->padding2);
4158 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4159 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4160 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4161 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4162 frame->hard_frame_pointer_offset);
4163 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4164 #endif
4165 }
4166
4167 /* Emit code to save registers in the prologue. */
4168
4169 static void
4170 ix86_emit_save_regs (void)
4171 {
4172 int regno;
4173 rtx insn;
4174
4175 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4176 if (ix86_save_reg (regno, true))
4177 {
4178 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4179 RTX_FRAME_RELATED_P (insn) = 1;
4180 }
4181 }
4182
4183 /* Emit code to save registers using MOV insns. First register
4184 is restored from POINTER + OFFSET. */
4185 static void
4186 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4187 {
4188 int regno;
4189 rtx insn;
4190
4191 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4192 if (ix86_save_reg (regno, true))
4193 {
4194 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4195 Pmode, offset),
4196 gen_rtx_REG (Pmode, regno));
4197 RTX_FRAME_RELATED_P (insn) = 1;
4198 offset += UNITS_PER_WORD;
4199 }
4200 }
4201
4202 /* Expand prologue or epilogue stack adjustment.
4203 The pattern exist to put a dependency on all ebp-based memory accesses.
4204 STYLE should be negative if instructions should be marked as frame related,
4205 zero if %r11 register is live and cannot be freely used and positive
4206 otherwise. */
4207
4208 static void
4209 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4210 {
4211 rtx insn;
4212
4213 if (! TARGET_64BIT)
4214 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4215 else if (x86_64_immediate_operand (offset, DImode))
4216 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4217 else
4218 {
4219 rtx r11;
4220 /* r11 is used by indirect sibcall return as well, set before the
4221 epilogue and used after the epilogue. ATM indirect sibcall
4222 shouldn't be used together with huge frame sizes in one
4223 function because of the frame_size check in sibcall.c. */
4224 if (style == 0)
4225 abort ();
4226 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4227 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4228 if (style < 0)
4229 RTX_FRAME_RELATED_P (insn) = 1;
4230 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4231 offset));
4232 }
4233 if (style < 0)
4234 RTX_FRAME_RELATED_P (insn) = 1;
4235 }
4236
4237 /* Expand the prologue into a bunch of separate insns. */
4238
4239 void
4240 ix86_expand_prologue (void)
4241 {
4242 rtx insn;
4243 bool pic_reg_used;
4244 struct ix86_frame frame;
4245 HOST_WIDE_INT allocate;
4246
4247 ix86_compute_frame_layout (&frame);
4248
4249 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4250 slower on all targets. Also sdb doesn't like it. */
4251
4252 if (frame_pointer_needed)
4253 {
4254 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4255 RTX_FRAME_RELATED_P (insn) = 1;
4256
4257 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4258 RTX_FRAME_RELATED_P (insn) = 1;
4259 }
4260
4261 allocate = frame.to_allocate;
4262
4263 if (!frame.save_regs_using_mov)
4264 ix86_emit_save_regs ();
4265 else
4266 allocate += frame.nregs * UNITS_PER_WORD;
4267
4268 /* When using red zone we may start register saving before allocating
4269 the stack frame saving one cycle of the prologue. */
4270 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4271 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4272 : stack_pointer_rtx,
4273 -frame.nregs * UNITS_PER_WORD);
4274
4275 if (allocate == 0)
4276 ;
4277 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4278 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4279 GEN_INT (-allocate), -1);
4280 else
4281 {
4282 /* Only valid for Win32. */
4283 rtx eax = gen_rtx_REG (SImode, 0);
4284 bool eax_live = ix86_eax_live_at_start_p ();
4285
4286 if (TARGET_64BIT)
4287 abort ();
4288
4289 if (eax_live)
4290 {
4291 emit_insn (gen_push (eax));
4292 allocate -= 4;
4293 }
4294
4295 insn = emit_move_insn (eax, GEN_INT (allocate));
4296 RTX_FRAME_RELATED_P (insn) = 1;
4297
4298 insn = emit_insn (gen_allocate_stack_worker (eax));
4299 RTX_FRAME_RELATED_P (insn) = 1;
4300
4301 if (eax_live)
4302 {
4303 rtx t;
4304 if (frame_pointer_needed)
4305 t = plus_constant (hard_frame_pointer_rtx,
4306 allocate
4307 - frame.to_allocate
4308 - frame.nregs * UNITS_PER_WORD);
4309 else
4310 t = plus_constant (stack_pointer_rtx, allocate);
4311 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4312 }
4313 }
4314
4315 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4316 {
4317 if (!frame_pointer_needed || !frame.to_allocate)
4318 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4319 else
4320 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4321 -frame.nregs * UNITS_PER_WORD);
4322 }
4323
4324 pic_reg_used = false;
4325 if (pic_offset_table_rtx
4326 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4327 || current_function_profile))
4328 {
4329 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4330
4331 if (alt_pic_reg_used != INVALID_REGNUM)
4332 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4333
4334 pic_reg_used = true;
4335 }
4336
4337 if (pic_reg_used)
4338 {
4339 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4340
4341 /* Even with accurate pre-reload life analysis, we can wind up
4342 deleting all references to the pic register after reload.
4343 Consider if cross-jumping unifies two sides of a branch
4344 controlled by a comparison vs the only read from a global.
4345 In which case, allow the set_got to be deleted, though we're
4346 too late to do anything about the ebx save in the prologue. */
4347 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4348 }
4349
4350 /* Prevent function calls from be scheduled before the call to mcount.
4351 In the pic_reg_used case, make sure that the got load isn't deleted. */
4352 if (current_function_profile)
4353 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4354 }
4355
4356 /* Emit code to restore saved registers using MOV insns. First register
4357 is restored from POINTER + OFFSET. */
4358 static void
4359 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4360 int maybe_eh_return)
4361 {
4362 int regno;
4363 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4364
4365 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4366 if (ix86_save_reg (regno, maybe_eh_return))
4367 {
4368 /* Ensure that adjust_address won't be forced to produce pointer
4369 out of range allowed by x86-64 instruction set. */
4370 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4371 {
4372 rtx r11;
4373
4374 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4375 emit_move_insn (r11, GEN_INT (offset));
4376 emit_insn (gen_adddi3 (r11, r11, pointer));
4377 base_address = gen_rtx_MEM (Pmode, r11);
4378 offset = 0;
4379 }
4380 emit_move_insn (gen_rtx_REG (Pmode, regno),
4381 adjust_address (base_address, Pmode, offset));
4382 offset += UNITS_PER_WORD;
4383 }
4384 }
4385
4386 /* Restore function stack, frame, and registers. */
4387
4388 void
4389 ix86_expand_epilogue (int style)
4390 {
4391 int regno;
4392 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4393 struct ix86_frame frame;
4394 HOST_WIDE_INT offset;
4395
4396 ix86_compute_frame_layout (&frame);
4397
4398 /* Calculate start of saved registers relative to ebp. Special care
4399 must be taken for the normal return case of a function using
4400 eh_return: the eax and edx registers are marked as saved, but not
4401 restored along this path. */
4402 offset = frame.nregs;
4403 if (current_function_calls_eh_return && style != 2)
4404 offset -= 2;
4405 offset *= -UNITS_PER_WORD;
4406
4407 /* If we're only restoring one register and sp is not valid then
4408 using a move instruction to restore the register since it's
4409 less work than reloading sp and popping the register.
4410
4411 The default code result in stack adjustment using add/lea instruction,
4412 while this code results in LEAVE instruction (or discrete equivalent),
4413 so it is profitable in some other cases as well. Especially when there
4414 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4415 and there is exactly one register to pop. This heuristic may need some
4416 tuning in future. */
4417 if ((!sp_valid && frame.nregs <= 1)
4418 || (TARGET_EPILOGUE_USING_MOVE
4419 && cfun->machine->use_fast_prologue_epilogue
4420 && (frame.nregs > 1 || frame.to_allocate))
4421 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4422 || (frame_pointer_needed && TARGET_USE_LEAVE
4423 && cfun->machine->use_fast_prologue_epilogue
4424 && frame.nregs == 1)
4425 || current_function_calls_eh_return)
4426 {
4427 /* Restore registers. We can use ebp or esp to address the memory
4428 locations. If both are available, default to ebp, since offsets
4429 are known to be small. Only exception is esp pointing directly to the
4430 end of block of saved registers, where we may simplify addressing
4431 mode. */
4432
4433 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4434 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4435 frame.to_allocate, style == 2);
4436 else
4437 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4438 offset, style == 2);
4439
4440 /* eh_return epilogues need %ecx added to the stack pointer. */
4441 if (style == 2)
4442 {
4443 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4444
4445 if (frame_pointer_needed)
4446 {
4447 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4448 tmp = plus_constant (tmp, UNITS_PER_WORD);
4449 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4450
4451 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4452 emit_move_insn (hard_frame_pointer_rtx, tmp);
4453
4454 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4455 const0_rtx, style);
4456 }
4457 else
4458 {
4459 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4460 tmp = plus_constant (tmp, (frame.to_allocate
4461 + frame.nregs * UNITS_PER_WORD));
4462 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4463 }
4464 }
4465 else if (!frame_pointer_needed)
4466 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4467 GEN_INT (frame.to_allocate
4468 + frame.nregs * UNITS_PER_WORD),
4469 style);
4470 /* If not an i386, mov & pop is faster than "leave". */
4471 else if (TARGET_USE_LEAVE || optimize_size
4472 || !cfun->machine->use_fast_prologue_epilogue)
4473 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4474 else
4475 {
4476 pro_epilogue_adjust_stack (stack_pointer_rtx,
4477 hard_frame_pointer_rtx,
4478 const0_rtx, style);
4479 if (TARGET_64BIT)
4480 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4481 else
4482 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4483 }
4484 }
4485 else
4486 {
4487 /* First step is to deallocate the stack frame so that we can
4488 pop the registers. */
4489 if (!sp_valid)
4490 {
4491 if (!frame_pointer_needed)
4492 abort ();
4493 pro_epilogue_adjust_stack (stack_pointer_rtx,
4494 hard_frame_pointer_rtx,
4495 GEN_INT (offset), style);
4496 }
4497 else if (frame.to_allocate)
4498 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4499 GEN_INT (frame.to_allocate), style);
4500
4501 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4502 if (ix86_save_reg (regno, false))
4503 {
4504 if (TARGET_64BIT)
4505 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4506 else
4507 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4508 }
4509 if (frame_pointer_needed)
4510 {
4511 /* Leave results in shorter dependency chains on CPUs that are
4512 able to grok it fast. */
4513 if (TARGET_USE_LEAVE)
4514 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4515 else if (TARGET_64BIT)
4516 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4517 else
4518 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4519 }
4520 }
4521
4522 /* Sibcall epilogues don't want a return instruction. */
4523 if (style == 0)
4524 return;
4525
4526 if (current_function_pops_args && current_function_args_size)
4527 {
4528 rtx popc = GEN_INT (current_function_pops_args);
4529
4530 /* i386 can only pop 64K bytes. If asked to pop more, pop
4531 return address, do explicit add, and jump indirectly to the
4532 caller. */
4533
4534 if (current_function_pops_args >= 65536)
4535 {
4536 rtx ecx = gen_rtx_REG (SImode, 2);
4537
4538 /* There is no "pascal" calling convention in 64bit ABI. */
4539 if (TARGET_64BIT)
4540 abort ();
4541
4542 emit_insn (gen_popsi1 (ecx));
4543 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4544 emit_jump_insn (gen_return_indirect_internal (ecx));
4545 }
4546 else
4547 emit_jump_insn (gen_return_pop_internal (popc));
4548 }
4549 else
4550 emit_jump_insn (gen_return_internal ());
4551 }
4552
4553 /* Reset from the function's potential modifications. */
4554
4555 static void
4556 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4557 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4558 {
4559 if (pic_offset_table_rtx)
4560 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4561 }
4562 \f
4563 /* Extract the parts of an RTL expression that is a valid memory address
4564 for an instruction. Return 0 if the structure of the address is
4565 grossly off. Return -1 if the address contains ASHIFT, so it is not
4566 strictly valid, but still used for computing length of lea instruction. */
4567
4568 int
4569 ix86_decompose_address (rtx addr, struct ix86_address *out)
4570 {
4571 rtx base = NULL_RTX;
4572 rtx index = NULL_RTX;
4573 rtx disp = NULL_RTX;
4574 HOST_WIDE_INT scale = 1;
4575 rtx scale_rtx = NULL_RTX;
4576 int retval = 1;
4577 enum ix86_address_seg seg = SEG_DEFAULT;
4578
4579 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4580 base = addr;
4581 else if (GET_CODE (addr) == PLUS)
4582 {
4583 rtx addends[4], op;
4584 int n = 0, i;
4585
4586 op = addr;
4587 do
4588 {
4589 if (n >= 4)
4590 return 0;
4591 addends[n++] = XEXP (op, 1);
4592 op = XEXP (op, 0);
4593 }
4594 while (GET_CODE (op) == PLUS);
4595 if (n >= 4)
4596 return 0;
4597 addends[n] = op;
4598
4599 for (i = n; i >= 0; --i)
4600 {
4601 op = addends[i];
4602 switch (GET_CODE (op))
4603 {
4604 case MULT:
4605 if (index)
4606 return 0;
4607 index = XEXP (op, 0);
4608 scale_rtx = XEXP (op, 1);
4609 break;
4610
4611 case UNSPEC:
4612 if (XINT (op, 1) == UNSPEC_TP
4613 && TARGET_TLS_DIRECT_SEG_REFS
4614 && seg == SEG_DEFAULT)
4615 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4616 else
4617 return 0;
4618 break;
4619
4620 case REG:
4621 case SUBREG:
4622 if (!base)
4623 base = op;
4624 else if (!index)
4625 index = op;
4626 else
4627 return 0;
4628 break;
4629
4630 case CONST:
4631 case CONST_INT:
4632 case SYMBOL_REF:
4633 case LABEL_REF:
4634 if (disp)
4635 return 0;
4636 disp = op;
4637 break;
4638
4639 default:
4640 return 0;
4641 }
4642 }
4643 }
4644 else if (GET_CODE (addr) == MULT)
4645 {
4646 index = XEXP (addr, 0); /* index*scale */
4647 scale_rtx = XEXP (addr, 1);
4648 }
4649 else if (GET_CODE (addr) == ASHIFT)
4650 {
4651 rtx tmp;
4652
4653 /* We're called for lea too, which implements ashift on occasion. */
4654 index = XEXP (addr, 0);
4655 tmp = XEXP (addr, 1);
4656 if (GET_CODE (tmp) != CONST_INT)
4657 return 0;
4658 scale = INTVAL (tmp);
4659 if ((unsigned HOST_WIDE_INT) scale > 3)
4660 return 0;
4661 scale = 1 << scale;
4662 retval = -1;
4663 }
4664 else
4665 disp = addr; /* displacement */
4666
4667 /* Extract the integral value of scale. */
4668 if (scale_rtx)
4669 {
4670 if (GET_CODE (scale_rtx) != CONST_INT)
4671 return 0;
4672 scale = INTVAL (scale_rtx);
4673 }
4674
4675 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4676 if (base && index && scale == 1
4677 && (index == arg_pointer_rtx
4678 || index == frame_pointer_rtx
4679 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
4680 {
4681 rtx tmp = base;
4682 base = index;
4683 index = tmp;
4684 }
4685
4686 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4687 if ((base == hard_frame_pointer_rtx
4688 || base == frame_pointer_rtx
4689 || base == arg_pointer_rtx) && !disp)
4690 disp = const0_rtx;
4691
4692 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4693 Avoid this by transforming to [%esi+0]. */
4694 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4695 && base && !index && !disp
4696 && REG_P (base)
4697 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
4698 disp = const0_rtx;
4699
4700 /* Special case: encode reg+reg instead of reg*2. */
4701 if (!base && index && scale && scale == 2)
4702 base = index, scale = 1;
4703
4704 /* Special case: scaling cannot be encoded without base or displacement. */
4705 if (!base && !disp && index && scale != 1)
4706 disp = const0_rtx;
4707
4708 out->base = base;
4709 out->index = index;
4710 out->disp = disp;
4711 out->scale = scale;
4712 out->seg = seg;
4713
4714 return retval;
4715 }
4716 \f
4717 /* Return cost of the memory address x.
4718 For i386, it is better to use a complex address than let gcc copy
4719 the address into a reg and make a new pseudo. But not if the address
4720 requires to two regs - that would mean more pseudos with longer
4721 lifetimes. */
4722 static int
4723 ix86_address_cost (rtx x)
4724 {
4725 struct ix86_address parts;
4726 int cost = 1;
4727
4728 if (!ix86_decompose_address (x, &parts))
4729 abort ();
4730
4731 /* More complex memory references are better. */
4732 if (parts.disp && parts.disp != const0_rtx)
4733 cost--;
4734 if (parts.seg != SEG_DEFAULT)
4735 cost--;
4736
4737 /* Attempt to minimize number of registers in the address. */
4738 if ((parts.base
4739 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
4740 || (parts.index
4741 && (!REG_P (parts.index)
4742 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
4743 cost++;
4744
4745 if (parts.base
4746 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
4747 && parts.index
4748 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
4749 && parts.base != parts.index)
4750 cost++;
4751
4752 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4753 since it's predecode logic can't detect the length of instructions
4754 and it degenerates to vector decoded. Increase cost of such
4755 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4756 to split such addresses or even refuse such addresses at all.
4757
4758 Following addressing modes are affected:
4759 [base+scale*index]
4760 [scale*index+disp]
4761 [base+index]
4762
4763 The first and last case may be avoidable by explicitly coding the zero in
4764 memory address, but I don't have AMD-K6 machine handy to check this
4765 theory. */
4766
4767 if (TARGET_K6
4768 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
4769 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
4770 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
4771 cost += 10;
4772
4773 return cost;
4774 }
4775 \f
4776 /* If X is a machine specific address (i.e. a symbol or label being
4777 referenced as a displacement from the GOT implemented using an
4778 UNSPEC), then return the base term. Otherwise return X. */
4779
4780 rtx
4781 ix86_find_base_term (rtx x)
4782 {
4783 rtx term;
4784
4785 if (TARGET_64BIT)
4786 {
4787 if (GET_CODE (x) != CONST)
4788 return x;
4789 term = XEXP (x, 0);
4790 if (GET_CODE (term) == PLUS
4791 && (GET_CODE (XEXP (term, 1)) == CONST_INT
4792 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
4793 term = XEXP (term, 0);
4794 if (GET_CODE (term) != UNSPEC
4795 || XINT (term, 1) != UNSPEC_GOTPCREL)
4796 return x;
4797
4798 term = XVECEXP (term, 0, 0);
4799
4800 if (GET_CODE (term) != SYMBOL_REF
4801 && GET_CODE (term) != LABEL_REF)
4802 return x;
4803
4804 return term;
4805 }
4806
4807 term = ix86_delegitimize_address (x);
4808
4809 if (GET_CODE (term) != SYMBOL_REF
4810 && GET_CODE (term) != LABEL_REF)
4811 return x;
4812
4813 return term;
4814 }
4815
4816 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4817 this is used for to form addresses to local data when -fPIC is in
4818 use. */
4819
4820 static bool
4821 darwin_local_data_pic (rtx disp)
4822 {
4823 if (GET_CODE (disp) == MINUS)
4824 {
4825 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
4826 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
4827 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
4828 {
4829 const char *sym_name = XSTR (XEXP (disp, 1), 0);
4830 if (! strcmp (sym_name, "<pic base>"))
4831 return true;
4832 }
4833 }
4834
4835 return false;
4836 }
4837 \f
4838 /* Determine if a given RTX is a valid constant. We already know this
4839 satisfies CONSTANT_P. */
4840
4841 bool
4842 legitimate_constant_p (rtx x)
4843 {
4844 switch (GET_CODE (x))
4845 {
4846 case CONST:
4847 x = XEXP (x, 0);
4848
4849 if (GET_CODE (x) == PLUS)
4850 {
4851 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4852 return false;
4853 x = XEXP (x, 0);
4854 }
4855
4856 if (TARGET_MACHO && darwin_local_data_pic (x))
4857 return true;
4858
4859 /* Only some unspecs are valid as "constants". */
4860 if (GET_CODE (x) == UNSPEC)
4861 switch (XINT (x, 1))
4862 {
4863 case UNSPEC_TPOFF:
4864 case UNSPEC_NTPOFF:
4865 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4866 case UNSPEC_DTPOFF:
4867 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4868 default:
4869 return false;
4870 }
4871
4872 /* We must have drilled down to a symbol. */
4873 if (!symbolic_operand (x, Pmode))
4874 return false;
4875 /* FALLTHRU */
4876
4877 case SYMBOL_REF:
4878 /* TLS symbols are never valid. */
4879 if (tls_symbolic_operand (x, Pmode))
4880 return false;
4881 break;
4882
4883 default:
4884 break;
4885 }
4886
4887 /* Otherwise we handle everything else in the move patterns. */
4888 return true;
4889 }
4890
4891 /* Determine if it's legal to put X into the constant pool. This
4892 is not possible for the address of thread-local symbols, which
4893 is checked above. */
4894
4895 static bool
4896 ix86_cannot_force_const_mem (rtx x)
4897 {
4898 return !legitimate_constant_p (x);
4899 }
4900
4901 /* Determine if a given RTX is a valid constant address. */
4902
4903 bool
4904 constant_address_p (rtx x)
4905 {
4906 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
4907 }
4908
4909 /* Nonzero if the constant value X is a legitimate general operand
4910 when generating PIC code. It is given that flag_pic is on and
4911 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4912
4913 bool
4914 legitimate_pic_operand_p (rtx x)
4915 {
4916 rtx inner;
4917
4918 switch (GET_CODE (x))
4919 {
4920 case CONST:
4921 inner = XEXP (x, 0);
4922
4923 /* Only some unspecs are valid as "constants". */
4924 if (GET_CODE (inner) == UNSPEC)
4925 switch (XINT (inner, 1))
4926 {
4927 case UNSPEC_TPOFF:
4928 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
4929 default:
4930 return false;
4931 }
4932 /* FALLTHRU */
4933
4934 case SYMBOL_REF:
4935 case LABEL_REF:
4936 return legitimate_pic_address_disp_p (x);
4937
4938 default:
4939 return true;
4940 }
4941 }
4942
4943 /* Determine if a given CONST RTX is a valid memory displacement
4944 in PIC mode. */
4945
4946 int
4947 legitimate_pic_address_disp_p (rtx disp)
4948 {
4949 bool saw_plus;
4950
4951 /* In 64bit mode we can allow direct addresses of symbols and labels
4952 when they are not dynamic symbols. */
4953 if (TARGET_64BIT)
4954 {
4955 /* TLS references should always be enclosed in UNSPEC. */
4956 if (tls_symbolic_operand (disp, GET_MODE (disp)))
4957 return 0;
4958 if (GET_CODE (disp) == SYMBOL_REF
4959 && ix86_cmodel == CM_SMALL_PIC
4960 && SYMBOL_REF_LOCAL_P (disp))
4961 return 1;
4962 if (GET_CODE (disp) == LABEL_REF)
4963 return 1;
4964 if (GET_CODE (disp) == CONST
4965 && GET_CODE (XEXP (disp, 0)) == PLUS)
4966 {
4967 rtx op0 = XEXP (XEXP (disp, 0), 0);
4968 rtx op1 = XEXP (XEXP (disp, 0), 1);
4969
4970 /* TLS references should always be enclosed in UNSPEC. */
4971 if (tls_symbolic_operand (op0, GET_MODE (op0)))
4972 return 0;
4973 if (((GET_CODE (op0) == SYMBOL_REF
4974 && ix86_cmodel == CM_SMALL_PIC
4975 && SYMBOL_REF_LOCAL_P (op0))
4976 || GET_CODE (op0) == LABEL_REF)
4977 && GET_CODE (op1) == CONST_INT
4978 && INTVAL (op1) < 16*1024*1024
4979 && INTVAL (op1) >= -16*1024*1024)
4980 return 1;
4981 }
4982 }
4983 if (GET_CODE (disp) != CONST)
4984 return 0;
4985 disp = XEXP (disp, 0);
4986
4987 if (TARGET_64BIT)
4988 {
4989 /* We are unsafe to allow PLUS expressions. This limit allowed distance
4990 of GOT tables. We should not need these anyway. */
4991 if (GET_CODE (disp) != UNSPEC
4992 || XINT (disp, 1) != UNSPEC_GOTPCREL)
4993 return 0;
4994
4995 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
4996 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
4997 return 0;
4998 return 1;
4999 }
5000
5001 saw_plus = false;
5002 if (GET_CODE (disp) == PLUS)
5003 {
5004 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5005 return 0;
5006 disp = XEXP (disp, 0);
5007 saw_plus = true;
5008 }
5009
5010 if (TARGET_MACHO && darwin_local_data_pic (disp))
5011 return 1;
5012
5013 if (GET_CODE (disp) != UNSPEC)
5014 return 0;
5015
5016 switch (XINT (disp, 1))
5017 {
5018 case UNSPEC_GOT:
5019 if (saw_plus)
5020 return false;
5021 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5022 case UNSPEC_GOTOFF:
5023 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5024 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5025 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5026 return false;
5027 case UNSPEC_GOTTPOFF:
5028 case UNSPEC_GOTNTPOFF:
5029 case UNSPEC_INDNTPOFF:
5030 if (saw_plus)
5031 return false;
5032 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5033 case UNSPEC_NTPOFF:
5034 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5035 case UNSPEC_DTPOFF:
5036 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5037 }
5038
5039 return 0;
5040 }
5041
5042 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5043 memory address for an instruction. The MODE argument is the machine mode
5044 for the MEM expression that wants to use this address.
5045
5046 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5047 convert common non-canonical forms to canonical form so that they will
5048 be recognized. */
5049
5050 int
5051 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5052 {
5053 struct ix86_address parts;
5054 rtx base, index, disp;
5055 HOST_WIDE_INT scale;
5056 const char *reason = NULL;
5057 rtx reason_rtx = NULL_RTX;
5058
5059 if (TARGET_DEBUG_ADDR)
5060 {
5061 fprintf (stderr,
5062 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5063 GET_MODE_NAME (mode), strict);
5064 debug_rtx (addr);
5065 }
5066
5067 if (ix86_decompose_address (addr, &parts) <= 0)
5068 {
5069 reason = "decomposition failed";
5070 goto report_error;
5071 }
5072
5073 base = parts.base;
5074 index = parts.index;
5075 disp = parts.disp;
5076 scale = parts.scale;
5077
5078 /* Validate base register.
5079
5080 Don't allow SUBREG's here, it can lead to spill failures when the base
5081 is one word out of a two word structure, which is represented internally
5082 as a DImode int. */
5083
5084 if (base)
5085 {
5086 reason_rtx = base;
5087
5088 if (GET_CODE (base) != REG)
5089 {
5090 reason = "base is not a register";
5091 goto report_error;
5092 }
5093
5094 if (GET_MODE (base) != Pmode)
5095 {
5096 reason = "base is not in Pmode";
5097 goto report_error;
5098 }
5099
5100 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
5101 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
5102 {
5103 reason = "base is not valid";
5104 goto report_error;
5105 }
5106 }
5107
5108 /* Validate index register.
5109
5110 Don't allow SUBREG's here, it can lead to spill failures when the index
5111 is one word out of a two word structure, which is represented internally
5112 as a DImode int. */
5113
5114 if (index)
5115 {
5116 reason_rtx = index;
5117
5118 if (GET_CODE (index) != REG)
5119 {
5120 reason = "index is not a register";
5121 goto report_error;
5122 }
5123
5124 if (GET_MODE (index) != Pmode)
5125 {
5126 reason = "index is not in Pmode";
5127 goto report_error;
5128 }
5129
5130 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
5131 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
5132 {
5133 reason = "index is not valid";
5134 goto report_error;
5135 }
5136 }
5137
5138 /* Validate scale factor. */
5139 if (scale != 1)
5140 {
5141 reason_rtx = GEN_INT (scale);
5142 if (!index)
5143 {
5144 reason = "scale without index";
5145 goto report_error;
5146 }
5147
5148 if (scale != 2 && scale != 4 && scale != 8)
5149 {
5150 reason = "scale is not a valid multiplier";
5151 goto report_error;
5152 }
5153 }
5154
5155 /* Validate displacement. */
5156 if (disp)
5157 {
5158 reason_rtx = disp;
5159
5160 if (GET_CODE (disp) == CONST
5161 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5162 switch (XINT (XEXP (disp, 0), 1))
5163 {
5164 case UNSPEC_GOT:
5165 case UNSPEC_GOTOFF:
5166 case UNSPEC_GOTPCREL:
5167 if (!flag_pic)
5168 abort ();
5169 goto is_legitimate_pic;
5170
5171 case UNSPEC_GOTTPOFF:
5172 case UNSPEC_GOTNTPOFF:
5173 case UNSPEC_INDNTPOFF:
5174 case UNSPEC_NTPOFF:
5175 case UNSPEC_DTPOFF:
5176 break;
5177
5178 default:
5179 reason = "invalid address unspec";
5180 goto report_error;
5181 }
5182
5183 else if (flag_pic && (SYMBOLIC_CONST (disp)
5184 #if TARGET_MACHO
5185 && !machopic_operand_p (disp)
5186 #endif
5187 ))
5188 {
5189 is_legitimate_pic:
5190 if (TARGET_64BIT && (index || base))
5191 {
5192 /* foo@dtpoff(%rX) is ok. */
5193 if (GET_CODE (disp) != CONST
5194 || GET_CODE (XEXP (disp, 0)) != PLUS
5195 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5196 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5197 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5198 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5199 {
5200 reason = "non-constant pic memory reference";
5201 goto report_error;
5202 }
5203 }
5204 else if (! legitimate_pic_address_disp_p (disp))
5205 {
5206 reason = "displacement is an invalid pic construct";
5207 goto report_error;
5208 }
5209
5210 /* This code used to verify that a symbolic pic displacement
5211 includes the pic_offset_table_rtx register.
5212
5213 While this is good idea, unfortunately these constructs may
5214 be created by "adds using lea" optimization for incorrect
5215 code like:
5216
5217 int a;
5218 int foo(int i)
5219 {
5220 return *(&a+i);
5221 }
5222
5223 This code is nonsensical, but results in addressing
5224 GOT table with pic_offset_table_rtx base. We can't
5225 just refuse it easily, since it gets matched by
5226 "addsi3" pattern, that later gets split to lea in the
5227 case output register differs from input. While this
5228 can be handled by separate addsi pattern for this case
5229 that never results in lea, this seems to be easier and
5230 correct fix for crash to disable this test. */
5231 }
5232 else if (GET_CODE (disp) != LABEL_REF
5233 && GET_CODE (disp) != CONST_INT
5234 && (GET_CODE (disp) != CONST
5235 || !legitimate_constant_p (disp))
5236 && (GET_CODE (disp) != SYMBOL_REF
5237 || !legitimate_constant_p (disp)))
5238 {
5239 reason = "displacement is not constant";
5240 goto report_error;
5241 }
5242 else if (TARGET_64BIT
5243 && !x86_64_immediate_operand (disp, VOIDmode))
5244 {
5245 reason = "displacement is out of range";
5246 goto report_error;
5247 }
5248 }
5249
5250 /* Everything looks valid. */
5251 if (TARGET_DEBUG_ADDR)
5252 fprintf (stderr, "Success.\n");
5253 return TRUE;
5254
5255 report_error:
5256 if (TARGET_DEBUG_ADDR)
5257 {
5258 fprintf (stderr, "Error: %s\n", reason);
5259 debug_rtx (reason_rtx);
5260 }
5261 return FALSE;
5262 }
5263 \f
5264 /* Return an unique alias set for the GOT. */
5265
5266 static HOST_WIDE_INT
5267 ix86_GOT_alias_set (void)
5268 {
5269 static HOST_WIDE_INT set = -1;
5270 if (set == -1)
5271 set = new_alias_set ();
5272 return set;
5273 }
5274
5275 /* Return a legitimate reference for ORIG (an address) using the
5276 register REG. If REG is 0, a new pseudo is generated.
5277
5278 There are two types of references that must be handled:
5279
5280 1. Global data references must load the address from the GOT, via
5281 the PIC reg. An insn is emitted to do this load, and the reg is
5282 returned.
5283
5284 2. Static data references, constant pool addresses, and code labels
5285 compute the address as an offset from the GOT, whose base is in
5286 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5287 differentiate them from global data objects. The returned
5288 address is the PIC reg + an unspec constant.
5289
5290 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5291 reg also appears in the address. */
5292
5293 static rtx
5294 legitimize_pic_address (rtx orig, rtx reg)
5295 {
5296 rtx addr = orig;
5297 rtx new = orig;
5298 rtx base;
5299
5300 #if TARGET_MACHO
5301 if (reg == 0)
5302 reg = gen_reg_rtx (Pmode);
5303 /* Use the generic Mach-O PIC machinery. */
5304 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5305 #endif
5306
5307 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5308 new = addr;
5309 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5310 {
5311 /* This symbol may be referenced via a displacement from the PIC
5312 base address (@GOTOFF). */
5313
5314 if (reload_in_progress)
5315 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5316 if (GET_CODE (addr) == CONST)
5317 addr = XEXP (addr, 0);
5318 if (GET_CODE (addr) == PLUS)
5319 {
5320 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5321 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5322 }
5323 else
5324 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5325 new = gen_rtx_CONST (Pmode, new);
5326 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5327
5328 if (reg != 0)
5329 {
5330 emit_move_insn (reg, new);
5331 new = reg;
5332 }
5333 }
5334 else if (GET_CODE (addr) == SYMBOL_REF)
5335 {
5336 if (TARGET_64BIT)
5337 {
5338 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5339 new = gen_rtx_CONST (Pmode, new);
5340 new = gen_const_mem (Pmode, new);
5341 set_mem_alias_set (new, ix86_GOT_alias_set ());
5342
5343 if (reg == 0)
5344 reg = gen_reg_rtx (Pmode);
5345 /* Use directly gen_movsi, otherwise the address is loaded
5346 into register for CSE. We don't want to CSE this addresses,
5347 instead we CSE addresses from the GOT table, so skip this. */
5348 emit_insn (gen_movsi (reg, new));
5349 new = reg;
5350 }
5351 else
5352 {
5353 /* This symbol must be referenced via a load from the
5354 Global Offset Table (@GOT). */
5355
5356 if (reload_in_progress)
5357 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5358 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5359 new = gen_rtx_CONST (Pmode, new);
5360 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5361 new = gen_const_mem (Pmode, new);
5362 set_mem_alias_set (new, ix86_GOT_alias_set ());
5363
5364 if (reg == 0)
5365 reg = gen_reg_rtx (Pmode);
5366 emit_move_insn (reg, new);
5367 new = reg;
5368 }
5369 }
5370 else
5371 {
5372 if (GET_CODE (addr) == CONST)
5373 {
5374 addr = XEXP (addr, 0);
5375
5376 /* We must match stuff we generate before. Assume the only
5377 unspecs that can get here are ours. Not that we could do
5378 anything with them anyway.... */
5379 if (GET_CODE (addr) == UNSPEC
5380 || (GET_CODE (addr) == PLUS
5381 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5382 return orig;
5383 if (GET_CODE (addr) != PLUS)
5384 abort ();
5385 }
5386 if (GET_CODE (addr) == PLUS)
5387 {
5388 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5389
5390 /* Check first to see if this is a constant offset from a @GOTOFF
5391 symbol reference. */
5392 if (local_symbolic_operand (op0, Pmode)
5393 && GET_CODE (op1) == CONST_INT)
5394 {
5395 if (!TARGET_64BIT)
5396 {
5397 if (reload_in_progress)
5398 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5399 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5400 UNSPEC_GOTOFF);
5401 new = gen_rtx_PLUS (Pmode, new, op1);
5402 new = gen_rtx_CONST (Pmode, new);
5403 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5404
5405 if (reg != 0)
5406 {
5407 emit_move_insn (reg, new);
5408 new = reg;
5409 }
5410 }
5411 else
5412 {
5413 if (INTVAL (op1) < -16*1024*1024
5414 || INTVAL (op1) >= 16*1024*1024)
5415 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5416 }
5417 }
5418 else
5419 {
5420 base = legitimize_pic_address (XEXP (addr, 0), reg);
5421 new = legitimize_pic_address (XEXP (addr, 1),
5422 base == reg ? NULL_RTX : reg);
5423
5424 if (GET_CODE (new) == CONST_INT)
5425 new = plus_constant (base, INTVAL (new));
5426 else
5427 {
5428 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5429 {
5430 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5431 new = XEXP (new, 1);
5432 }
5433 new = gen_rtx_PLUS (Pmode, base, new);
5434 }
5435 }
5436 }
5437 }
5438 return new;
5439 }
5440 \f
5441 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5442
5443 static rtx
5444 get_thread_pointer (int to_reg)
5445 {
5446 rtx tp, reg, insn;
5447
5448 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5449 if (!to_reg)
5450 return tp;
5451
5452 reg = gen_reg_rtx (Pmode);
5453 insn = gen_rtx_SET (VOIDmode, reg, tp);
5454 insn = emit_insn (insn);
5455
5456 return reg;
5457 }
5458
5459 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5460 false if we expect this to be used for a memory address and true if
5461 we expect to load the address into a register. */
5462
5463 static rtx
5464 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5465 {
5466 rtx dest, base, off, pic;
5467 int type;
5468
5469 switch (model)
5470 {
5471 case TLS_MODEL_GLOBAL_DYNAMIC:
5472 dest = gen_reg_rtx (Pmode);
5473 if (TARGET_64BIT)
5474 {
5475 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5476
5477 start_sequence ();
5478 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5479 insns = get_insns ();
5480 end_sequence ();
5481
5482 emit_libcall_block (insns, dest, rax, x);
5483 }
5484 else
5485 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5486 break;
5487
5488 case TLS_MODEL_LOCAL_DYNAMIC:
5489 base = gen_reg_rtx (Pmode);
5490 if (TARGET_64BIT)
5491 {
5492 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5493
5494 start_sequence ();
5495 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5496 insns = get_insns ();
5497 end_sequence ();
5498
5499 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5500 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5501 emit_libcall_block (insns, base, rax, note);
5502 }
5503 else
5504 emit_insn (gen_tls_local_dynamic_base_32 (base));
5505
5506 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5507 off = gen_rtx_CONST (Pmode, off);
5508
5509 return gen_rtx_PLUS (Pmode, base, off);
5510
5511 case TLS_MODEL_INITIAL_EXEC:
5512 if (TARGET_64BIT)
5513 {
5514 pic = NULL;
5515 type = UNSPEC_GOTNTPOFF;
5516 }
5517 else if (flag_pic)
5518 {
5519 if (reload_in_progress)
5520 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5521 pic = pic_offset_table_rtx;
5522 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5523 }
5524 else if (!TARGET_GNU_TLS)
5525 {
5526 pic = gen_reg_rtx (Pmode);
5527 emit_insn (gen_set_got (pic));
5528 type = UNSPEC_GOTTPOFF;
5529 }
5530 else
5531 {
5532 pic = NULL;
5533 type = UNSPEC_INDNTPOFF;
5534 }
5535
5536 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5537 off = gen_rtx_CONST (Pmode, off);
5538 if (pic)
5539 off = gen_rtx_PLUS (Pmode, pic, off);
5540 off = gen_const_mem (Pmode, off);
5541 set_mem_alias_set (off, ix86_GOT_alias_set ());
5542
5543 if (TARGET_64BIT || TARGET_GNU_TLS)
5544 {
5545 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5546 off = force_reg (Pmode, off);
5547 return gen_rtx_PLUS (Pmode, base, off);
5548 }
5549 else
5550 {
5551 base = get_thread_pointer (true);
5552 dest = gen_reg_rtx (Pmode);
5553 emit_insn (gen_subsi3 (dest, base, off));
5554 }
5555 break;
5556
5557 case TLS_MODEL_LOCAL_EXEC:
5558 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5559 (TARGET_64BIT || TARGET_GNU_TLS)
5560 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5561 off = gen_rtx_CONST (Pmode, off);
5562
5563 if (TARGET_64BIT || TARGET_GNU_TLS)
5564 {
5565 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5566 return gen_rtx_PLUS (Pmode, base, off);
5567 }
5568 else
5569 {
5570 base = get_thread_pointer (true);
5571 dest = gen_reg_rtx (Pmode);
5572 emit_insn (gen_subsi3 (dest, base, off));
5573 }
5574 break;
5575
5576 default:
5577 abort ();
5578 }
5579
5580 return dest;
5581 }
5582
5583 /* Try machine-dependent ways of modifying an illegitimate address
5584 to be legitimate. If we find one, return the new, valid address.
5585 This macro is used in only one place: `memory_address' in explow.c.
5586
5587 OLDX is the address as it was before break_out_memory_refs was called.
5588 In some cases it is useful to look at this to decide what needs to be done.
5589
5590 MODE and WIN are passed so that this macro can use
5591 GO_IF_LEGITIMATE_ADDRESS.
5592
5593 It is always safe for this macro to do nothing. It exists to recognize
5594 opportunities to optimize the output.
5595
5596 For the 80386, we handle X+REG by loading X into a register R and
5597 using R+REG. R will go in a general reg and indexing will be used.
5598 However, if REG is a broken-out memory address or multiplication,
5599 nothing needs to be done because REG can certainly go in a general reg.
5600
5601 When -fpic is used, special handling is needed for symbolic references.
5602 See comments by legitimize_pic_address in i386.c for details. */
5603
5604 rtx
5605 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5606 {
5607 int changed = 0;
5608 unsigned log;
5609
5610 if (TARGET_DEBUG_ADDR)
5611 {
5612 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5613 GET_MODE_NAME (mode));
5614 debug_rtx (x);
5615 }
5616
5617 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5618 if (log)
5619 return legitimize_tls_address (x, log, false);
5620 if (GET_CODE (x) == CONST
5621 && GET_CODE (XEXP (x, 0)) == PLUS
5622 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5623 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5624 {
5625 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5626 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5627 }
5628
5629 if (flag_pic && SYMBOLIC_CONST (x))
5630 return legitimize_pic_address (x, 0);
5631
5632 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5633 if (GET_CODE (x) == ASHIFT
5634 && GET_CODE (XEXP (x, 1)) == CONST_INT
5635 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
5636 {
5637 changed = 1;
5638 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5639 GEN_INT (1 << log));
5640 }
5641
5642 if (GET_CODE (x) == PLUS)
5643 {
5644 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5645
5646 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5647 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5648 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
5649 {
5650 changed = 1;
5651 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5652 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5653 GEN_INT (1 << log));
5654 }
5655
5656 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5657 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5658 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
5659 {
5660 changed = 1;
5661 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5662 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5663 GEN_INT (1 << log));
5664 }
5665
5666 /* Put multiply first if it isn't already. */
5667 if (GET_CODE (XEXP (x, 1)) == MULT)
5668 {
5669 rtx tmp = XEXP (x, 0);
5670 XEXP (x, 0) = XEXP (x, 1);
5671 XEXP (x, 1) = tmp;
5672 changed = 1;
5673 }
5674
5675 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5676 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5677 created by virtual register instantiation, register elimination, and
5678 similar optimizations. */
5679 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5680 {
5681 changed = 1;
5682 x = gen_rtx_PLUS (Pmode,
5683 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5684 XEXP (XEXP (x, 1), 0)),
5685 XEXP (XEXP (x, 1), 1));
5686 }
5687
5688 /* Canonicalize
5689 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5690 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5691 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5692 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5693 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5694 && CONSTANT_P (XEXP (x, 1)))
5695 {
5696 rtx constant;
5697 rtx other = NULL_RTX;
5698
5699 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5700 {
5701 constant = XEXP (x, 1);
5702 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5703 }
5704 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5705 {
5706 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
5707 other = XEXP (x, 1);
5708 }
5709 else
5710 constant = 0;
5711
5712 if (constant)
5713 {
5714 changed = 1;
5715 x = gen_rtx_PLUS (Pmode,
5716 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
5717 XEXP (XEXP (XEXP (x, 0), 1), 0)),
5718 plus_constant (other, INTVAL (constant)));
5719 }
5720 }
5721
5722 if (changed && legitimate_address_p (mode, x, FALSE))
5723 return x;
5724
5725 if (GET_CODE (XEXP (x, 0)) == MULT)
5726 {
5727 changed = 1;
5728 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
5729 }
5730
5731 if (GET_CODE (XEXP (x, 1)) == MULT)
5732 {
5733 changed = 1;
5734 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
5735 }
5736
5737 if (changed
5738 && GET_CODE (XEXP (x, 1)) == REG
5739 && GET_CODE (XEXP (x, 0)) == REG)
5740 return x;
5741
5742 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
5743 {
5744 changed = 1;
5745 x = legitimize_pic_address (x, 0);
5746 }
5747
5748 if (changed && legitimate_address_p (mode, x, FALSE))
5749 return x;
5750
5751 if (GET_CODE (XEXP (x, 0)) == REG)
5752 {
5753 rtx temp = gen_reg_rtx (Pmode);
5754 rtx val = force_operand (XEXP (x, 1), temp);
5755 if (val != temp)
5756 emit_move_insn (temp, val);
5757
5758 XEXP (x, 1) = temp;
5759 return x;
5760 }
5761
5762 else if (GET_CODE (XEXP (x, 1)) == REG)
5763 {
5764 rtx temp = gen_reg_rtx (Pmode);
5765 rtx val = force_operand (XEXP (x, 0), temp);
5766 if (val != temp)
5767 emit_move_insn (temp, val);
5768
5769 XEXP (x, 0) = temp;
5770 return x;
5771 }
5772 }
5773
5774 return x;
5775 }
5776 \f
5777 /* Print an integer constant expression in assembler syntax. Addition
5778 and subtraction are the only arithmetic that may appear in these
5779 expressions. FILE is the stdio stream to write to, X is the rtx, and
5780 CODE is the operand print code from the output string. */
5781
5782 static void
5783 output_pic_addr_const (FILE *file, rtx x, int code)
5784 {
5785 char buf[256];
5786
5787 switch (GET_CODE (x))
5788 {
5789 case PC:
5790 if (flag_pic)
5791 putc ('.', file);
5792 else
5793 abort ();
5794 break;
5795
5796 case SYMBOL_REF:
5797 /* Mark the decl as referenced so that cgraph will output the function. */
5798 if (SYMBOL_REF_DECL (x))
5799 mark_decl_referenced (SYMBOL_REF_DECL (x));
5800
5801 assemble_name (file, XSTR (x, 0));
5802 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5803 fputs ("@PLT", file);
5804 break;
5805
5806 case LABEL_REF:
5807 x = XEXP (x, 0);
5808 /* FALLTHRU */
5809 case CODE_LABEL:
5810 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5811 assemble_name (asm_out_file, buf);
5812 break;
5813
5814 case CONST_INT:
5815 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5816 break;
5817
5818 case CONST:
5819 /* This used to output parentheses around the expression,
5820 but that does not work on the 386 (either ATT or BSD assembler). */
5821 output_pic_addr_const (file, XEXP (x, 0), code);
5822 break;
5823
5824 case CONST_DOUBLE:
5825 if (GET_MODE (x) == VOIDmode)
5826 {
5827 /* We can use %d if the number is <32 bits and positive. */
5828 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
5829 fprintf (file, "0x%lx%08lx",
5830 (unsigned long) CONST_DOUBLE_HIGH (x),
5831 (unsigned long) CONST_DOUBLE_LOW (x));
5832 else
5833 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5834 }
5835 else
5836 /* We can't handle floating point constants;
5837 PRINT_OPERAND must handle them. */
5838 output_operand_lossage ("floating constant misused");
5839 break;
5840
5841 case PLUS:
5842 /* Some assemblers need integer constants to appear first. */
5843 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5844 {
5845 output_pic_addr_const (file, XEXP (x, 0), code);
5846 putc ('+', file);
5847 output_pic_addr_const (file, XEXP (x, 1), code);
5848 }
5849 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5850 {
5851 output_pic_addr_const (file, XEXP (x, 1), code);
5852 putc ('+', file);
5853 output_pic_addr_const (file, XEXP (x, 0), code);
5854 }
5855 else
5856 abort ();
5857 break;
5858
5859 case MINUS:
5860 if (!TARGET_MACHO)
5861 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
5862 output_pic_addr_const (file, XEXP (x, 0), code);
5863 putc ('-', file);
5864 output_pic_addr_const (file, XEXP (x, 1), code);
5865 if (!TARGET_MACHO)
5866 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
5867 break;
5868
5869 case UNSPEC:
5870 if (XVECLEN (x, 0) != 1)
5871 abort ();
5872 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5873 switch (XINT (x, 1))
5874 {
5875 case UNSPEC_GOT:
5876 fputs ("@GOT", file);
5877 break;
5878 case UNSPEC_GOTOFF:
5879 fputs ("@GOTOFF", file);
5880 break;
5881 case UNSPEC_GOTPCREL:
5882 fputs ("@GOTPCREL(%rip)", file);
5883 break;
5884 case UNSPEC_GOTTPOFF:
5885 /* FIXME: This might be @TPOFF in Sun ld too. */
5886 fputs ("@GOTTPOFF", file);
5887 break;
5888 case UNSPEC_TPOFF:
5889 fputs ("@TPOFF", file);
5890 break;
5891 case UNSPEC_NTPOFF:
5892 if (TARGET_64BIT)
5893 fputs ("@TPOFF", file);
5894 else
5895 fputs ("@NTPOFF", file);
5896 break;
5897 case UNSPEC_DTPOFF:
5898 fputs ("@DTPOFF", file);
5899 break;
5900 case UNSPEC_GOTNTPOFF:
5901 if (TARGET_64BIT)
5902 fputs ("@GOTTPOFF(%rip)", file);
5903 else
5904 fputs ("@GOTNTPOFF", file);
5905 break;
5906 case UNSPEC_INDNTPOFF:
5907 fputs ("@INDNTPOFF", file);
5908 break;
5909 default:
5910 output_operand_lossage ("invalid UNSPEC as operand");
5911 break;
5912 }
5913 break;
5914
5915 default:
5916 output_operand_lossage ("invalid expression as operand");
5917 }
5918 }
5919
5920 /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST.
5921 We need to handle our special PIC relocations. */
5922
5923 void
5924 i386_dwarf_output_addr_const (FILE *file, rtx x)
5925 {
5926 #ifdef ASM_QUAD
5927 fprintf (file, "%s", TARGET_64BIT ? ASM_QUAD : ASM_LONG);
5928 #else
5929 if (TARGET_64BIT)
5930 abort ();
5931 fprintf (file, "%s", ASM_LONG);
5932 #endif
5933 if (flag_pic)
5934 output_pic_addr_const (file, x, '\0');
5935 else
5936 output_addr_const (file, x);
5937 fputc ('\n', file);
5938 }
5939
5940 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
5941 We need to emit DTP-relative relocations. */
5942
5943 void
5944 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
5945 {
5946 fputs (ASM_LONG, file);
5947 output_addr_const (file, x);
5948 fputs ("@DTPOFF", file);
5949 switch (size)
5950 {
5951 case 4:
5952 break;
5953 case 8:
5954 fputs (", 0", file);
5955 break;
5956 default:
5957 abort ();
5958 }
5959 }
5960
5961 /* In the name of slightly smaller debug output, and to cater to
5962 general assembler losage, recognize PIC+GOTOFF and turn it back
5963 into a direct symbol reference. */
5964
5965 static rtx
5966 ix86_delegitimize_address (rtx orig_x)
5967 {
5968 rtx x = orig_x, y;
5969
5970 if (GET_CODE (x) == MEM)
5971 x = XEXP (x, 0);
5972
5973 if (TARGET_64BIT)
5974 {
5975 if (GET_CODE (x) != CONST
5976 || GET_CODE (XEXP (x, 0)) != UNSPEC
5977 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
5978 || GET_CODE (orig_x) != MEM)
5979 return orig_x;
5980 return XVECEXP (XEXP (x, 0), 0, 0);
5981 }
5982
5983 if (GET_CODE (x) != PLUS
5984 || GET_CODE (XEXP (x, 1)) != CONST)
5985 return orig_x;
5986
5987 if (GET_CODE (XEXP (x, 0)) == REG
5988 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5989 /* %ebx + GOT/GOTOFF */
5990 y = NULL;
5991 else if (GET_CODE (XEXP (x, 0)) == PLUS)
5992 {
5993 /* %ebx + %reg * scale + GOT/GOTOFF */
5994 y = XEXP (x, 0);
5995 if (GET_CODE (XEXP (y, 0)) == REG
5996 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
5997 y = XEXP (y, 1);
5998 else if (GET_CODE (XEXP (y, 1)) == REG
5999 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6000 y = XEXP (y, 0);
6001 else
6002 return orig_x;
6003 if (GET_CODE (y) != REG
6004 && GET_CODE (y) != MULT
6005 && GET_CODE (y) != ASHIFT)
6006 return orig_x;
6007 }
6008 else
6009 return orig_x;
6010
6011 x = XEXP (XEXP (x, 1), 0);
6012 if (GET_CODE (x) == UNSPEC
6013 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6014 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6015 {
6016 if (y)
6017 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6018 return XVECEXP (x, 0, 0);
6019 }
6020
6021 if (GET_CODE (x) == PLUS
6022 && GET_CODE (XEXP (x, 0)) == UNSPEC
6023 && GET_CODE (XEXP (x, 1)) == CONST_INT
6024 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6025 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6026 && GET_CODE (orig_x) != MEM)))
6027 {
6028 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6029 if (y)
6030 return gen_rtx_PLUS (Pmode, y, x);
6031 return x;
6032 }
6033
6034 return orig_x;
6035 }
6036 \f
6037 static void
6038 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6039 int fp, FILE *file)
6040 {
6041 const char *suffix;
6042
6043 if (mode == CCFPmode || mode == CCFPUmode)
6044 {
6045 enum rtx_code second_code, bypass_code;
6046 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6047 if (bypass_code != UNKNOWN || second_code != UNKNOWN)
6048 abort ();
6049 code = ix86_fp_compare_code_to_integer (code);
6050 mode = CCmode;
6051 }
6052 if (reverse)
6053 code = reverse_condition (code);
6054
6055 switch (code)
6056 {
6057 case EQ:
6058 suffix = "e";
6059 break;
6060 case NE:
6061 suffix = "ne";
6062 break;
6063 case GT:
6064 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6065 abort ();
6066 suffix = "g";
6067 break;
6068 case GTU:
6069 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6070 Those same assemblers have the same but opposite losage on cmov. */
6071 if (mode != CCmode)
6072 abort ();
6073 suffix = fp ? "nbe" : "a";
6074 break;
6075 case LT:
6076 if (mode == CCNOmode || mode == CCGOCmode)
6077 suffix = "s";
6078 else if (mode == CCmode || mode == CCGCmode)
6079 suffix = "l";
6080 else
6081 abort ();
6082 break;
6083 case LTU:
6084 if (mode != CCmode)
6085 abort ();
6086 suffix = "b";
6087 break;
6088 case GE:
6089 if (mode == CCNOmode || mode == CCGOCmode)
6090 suffix = "ns";
6091 else if (mode == CCmode || mode == CCGCmode)
6092 suffix = "ge";
6093 else
6094 abort ();
6095 break;
6096 case GEU:
6097 /* ??? As above. */
6098 if (mode != CCmode)
6099 abort ();
6100 suffix = fp ? "nb" : "ae";
6101 break;
6102 case LE:
6103 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6104 abort ();
6105 suffix = "le";
6106 break;
6107 case LEU:
6108 if (mode != CCmode)
6109 abort ();
6110 suffix = "be";
6111 break;
6112 case UNORDERED:
6113 suffix = fp ? "u" : "p";
6114 break;
6115 case ORDERED:
6116 suffix = fp ? "nu" : "np";
6117 break;
6118 default:
6119 abort ();
6120 }
6121 fputs (suffix, file);
6122 }
6123
6124 /* Print the name of register X to FILE based on its machine mode and number.
6125 If CODE is 'w', pretend the mode is HImode.
6126 If CODE is 'b', pretend the mode is QImode.
6127 If CODE is 'k', pretend the mode is SImode.
6128 If CODE is 'q', pretend the mode is DImode.
6129 If CODE is 'h', pretend the reg is the `high' byte register.
6130 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6131
6132 void
6133 print_reg (rtx x, int code, FILE *file)
6134 {
6135 if (REGNO (x) == ARG_POINTER_REGNUM
6136 || REGNO (x) == FRAME_POINTER_REGNUM
6137 || REGNO (x) == FLAGS_REG
6138 || REGNO (x) == FPSR_REG)
6139 abort ();
6140
6141 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6142 putc ('%', file);
6143
6144 if (code == 'w' || MMX_REG_P (x))
6145 code = 2;
6146 else if (code == 'b')
6147 code = 1;
6148 else if (code == 'k')
6149 code = 4;
6150 else if (code == 'q')
6151 code = 8;
6152 else if (code == 'y')
6153 code = 3;
6154 else if (code == 'h')
6155 code = 0;
6156 else
6157 code = GET_MODE_SIZE (GET_MODE (x));
6158
6159 /* Irritatingly, AMD extended registers use different naming convention
6160 from the normal registers. */
6161 if (REX_INT_REG_P (x))
6162 {
6163 if (!TARGET_64BIT)
6164 abort ();
6165 switch (code)
6166 {
6167 case 0:
6168 error ("extended registers have no high halves");
6169 break;
6170 case 1:
6171 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6172 break;
6173 case 2:
6174 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6175 break;
6176 case 4:
6177 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6178 break;
6179 case 8:
6180 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6181 break;
6182 default:
6183 error ("unsupported operand size for extended register");
6184 break;
6185 }
6186 return;
6187 }
6188 switch (code)
6189 {
6190 case 3:
6191 if (STACK_TOP_P (x))
6192 {
6193 fputs ("st(0)", file);
6194 break;
6195 }
6196 /* FALLTHRU */
6197 case 8:
6198 case 4:
6199 case 12:
6200 if (! ANY_FP_REG_P (x))
6201 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6202 /* FALLTHRU */
6203 case 16:
6204 case 2:
6205 normal:
6206 fputs (hi_reg_name[REGNO (x)], file);
6207 break;
6208 case 1:
6209 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6210 goto normal;
6211 fputs (qi_reg_name[REGNO (x)], file);
6212 break;
6213 case 0:
6214 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6215 goto normal;
6216 fputs (qi_high_reg_name[REGNO (x)], file);
6217 break;
6218 default:
6219 abort ();
6220 }
6221 }
6222
6223 /* Locate some local-dynamic symbol still in use by this function
6224 so that we can print its name in some tls_local_dynamic_base
6225 pattern. */
6226
6227 static const char *
6228 get_some_local_dynamic_name (void)
6229 {
6230 rtx insn;
6231
6232 if (cfun->machine->some_ld_name)
6233 return cfun->machine->some_ld_name;
6234
6235 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6236 if (INSN_P (insn)
6237 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6238 return cfun->machine->some_ld_name;
6239
6240 abort ();
6241 }
6242
6243 static int
6244 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6245 {
6246 rtx x = *px;
6247
6248 if (GET_CODE (x) == SYMBOL_REF
6249 && local_dynamic_symbolic_operand (x, Pmode))
6250 {
6251 cfun->machine->some_ld_name = XSTR (x, 0);
6252 return 1;
6253 }
6254
6255 return 0;
6256 }
6257
6258 /* Meaning of CODE:
6259 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6260 C -- print opcode suffix for set/cmov insn.
6261 c -- like C, but print reversed condition
6262 F,f -- likewise, but for floating-point.
6263 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6264 otherwise nothing
6265 R -- print the prefix for register names.
6266 z -- print the opcode suffix for the size of the current operand.
6267 * -- print a star (in certain assembler syntax)
6268 A -- print an absolute memory reference.
6269 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6270 s -- print a shift double count, followed by the assemblers argument
6271 delimiter.
6272 b -- print the QImode name of the register for the indicated operand.
6273 %b0 would print %al if operands[0] is reg 0.
6274 w -- likewise, print the HImode name of the register.
6275 k -- likewise, print the SImode name of the register.
6276 q -- likewise, print the DImode name of the register.
6277 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6278 y -- print "st(0)" instead of "st" as a register.
6279 D -- print condition for SSE cmp instruction.
6280 P -- if PIC, print an @PLT suffix.
6281 X -- don't print any sort of PIC '@' suffix for a symbol.
6282 & -- print some in-use local-dynamic symbol name.
6283 */
6284
6285 void
6286 print_operand (FILE *file, rtx x, int code)
6287 {
6288 if (code)
6289 {
6290 switch (code)
6291 {
6292 case '*':
6293 if (ASSEMBLER_DIALECT == ASM_ATT)
6294 putc ('*', file);
6295 return;
6296
6297 case '&':
6298 assemble_name (file, get_some_local_dynamic_name ());
6299 return;
6300
6301 case 'A':
6302 if (ASSEMBLER_DIALECT == ASM_ATT)
6303 putc ('*', file);
6304 else if (ASSEMBLER_DIALECT == ASM_INTEL)
6305 {
6306 /* Intel syntax. For absolute addresses, registers should not
6307 be surrounded by braces. */
6308 if (GET_CODE (x) != REG)
6309 {
6310 putc ('[', file);
6311 PRINT_OPERAND (file, x, 0);
6312 putc (']', file);
6313 return;
6314 }
6315 }
6316 else
6317 abort ();
6318
6319 PRINT_OPERAND (file, x, 0);
6320 return;
6321
6322
6323 case 'L':
6324 if (ASSEMBLER_DIALECT == ASM_ATT)
6325 putc ('l', file);
6326 return;
6327
6328 case 'W':
6329 if (ASSEMBLER_DIALECT == ASM_ATT)
6330 putc ('w', file);
6331 return;
6332
6333 case 'B':
6334 if (ASSEMBLER_DIALECT == ASM_ATT)
6335 putc ('b', file);
6336 return;
6337
6338 case 'Q':
6339 if (ASSEMBLER_DIALECT == ASM_ATT)
6340 putc ('l', file);
6341 return;
6342
6343 case 'S':
6344 if (ASSEMBLER_DIALECT == ASM_ATT)
6345 putc ('s', file);
6346 return;
6347
6348 case 'T':
6349 if (ASSEMBLER_DIALECT == ASM_ATT)
6350 putc ('t', file);
6351 return;
6352
6353 case 'z':
6354 /* 387 opcodes don't get size suffixes if the operands are
6355 registers. */
6356 if (STACK_REG_P (x))
6357 return;
6358
6359 /* Likewise if using Intel opcodes. */
6360 if (ASSEMBLER_DIALECT == ASM_INTEL)
6361 return;
6362
6363 /* This is the size of op from size of operand. */
6364 switch (GET_MODE_SIZE (GET_MODE (x)))
6365 {
6366 case 2:
6367 #ifdef HAVE_GAS_FILDS_FISTS
6368 putc ('s', file);
6369 #endif
6370 return;
6371
6372 case 4:
6373 if (GET_MODE (x) == SFmode)
6374 {
6375 putc ('s', file);
6376 return;
6377 }
6378 else
6379 putc ('l', file);
6380 return;
6381
6382 case 12:
6383 case 16:
6384 putc ('t', file);
6385 return;
6386
6387 case 8:
6388 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6389 {
6390 #ifdef GAS_MNEMONICS
6391 putc ('q', file);
6392 #else
6393 putc ('l', file);
6394 putc ('l', file);
6395 #endif
6396 }
6397 else
6398 putc ('l', file);
6399 return;
6400
6401 default:
6402 abort ();
6403 }
6404
6405 case 'b':
6406 case 'w':
6407 case 'k':
6408 case 'q':
6409 case 'h':
6410 case 'y':
6411 case 'X':
6412 case 'P':
6413 break;
6414
6415 case 's':
6416 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6417 {
6418 PRINT_OPERAND (file, x, 0);
6419 putc (',', file);
6420 }
6421 return;
6422
6423 case 'D':
6424 /* Little bit of braindamage here. The SSE compare instructions
6425 does use completely different names for the comparisons that the
6426 fp conditional moves. */
6427 switch (GET_CODE (x))
6428 {
6429 case EQ:
6430 case UNEQ:
6431 fputs ("eq", file);
6432 break;
6433 case LT:
6434 case UNLT:
6435 fputs ("lt", file);
6436 break;
6437 case LE:
6438 case UNLE:
6439 fputs ("le", file);
6440 break;
6441 case UNORDERED:
6442 fputs ("unord", file);
6443 break;
6444 case NE:
6445 case LTGT:
6446 fputs ("neq", file);
6447 break;
6448 case UNGE:
6449 case GE:
6450 fputs ("nlt", file);
6451 break;
6452 case UNGT:
6453 case GT:
6454 fputs ("nle", file);
6455 break;
6456 case ORDERED:
6457 fputs ("ord", file);
6458 break;
6459 default:
6460 abort ();
6461 break;
6462 }
6463 return;
6464 case 'O':
6465 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6466 if (ASSEMBLER_DIALECT == ASM_ATT)
6467 {
6468 switch (GET_MODE (x))
6469 {
6470 case HImode: putc ('w', file); break;
6471 case SImode:
6472 case SFmode: putc ('l', file); break;
6473 case DImode:
6474 case DFmode: putc ('q', file); break;
6475 default: abort ();
6476 }
6477 putc ('.', file);
6478 }
6479 #endif
6480 return;
6481 case 'C':
6482 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6483 return;
6484 case 'F':
6485 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6486 if (ASSEMBLER_DIALECT == ASM_ATT)
6487 putc ('.', file);
6488 #endif
6489 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6490 return;
6491
6492 /* Like above, but reverse condition */
6493 case 'c':
6494 /* Check to see if argument to %c is really a constant
6495 and not a condition code which needs to be reversed. */
6496 if (!COMPARISON_P (x))
6497 {
6498 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6499 return;
6500 }
6501 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6502 return;
6503 case 'f':
6504 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6505 if (ASSEMBLER_DIALECT == ASM_ATT)
6506 putc ('.', file);
6507 #endif
6508 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6509 return;
6510 case '+':
6511 {
6512 rtx x;
6513
6514 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6515 return;
6516
6517 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6518 if (x)
6519 {
6520 int pred_val = INTVAL (XEXP (x, 0));
6521
6522 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6523 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6524 {
6525 int taken = pred_val > REG_BR_PROB_BASE / 2;
6526 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6527
6528 /* Emit hints only in the case default branch prediction
6529 heuristics would fail. */
6530 if (taken != cputaken)
6531 {
6532 /* We use 3e (DS) prefix for taken branches and
6533 2e (CS) prefix for not taken branches. */
6534 if (taken)
6535 fputs ("ds ; ", file);
6536 else
6537 fputs ("cs ; ", file);
6538 }
6539 }
6540 }
6541 return;
6542 }
6543 default:
6544 output_operand_lossage ("invalid operand code '%c'", code);
6545 }
6546 }
6547
6548 if (GET_CODE (x) == REG)
6549 print_reg (x, code, file);
6550
6551 else if (GET_CODE (x) == MEM)
6552 {
6553 /* No `byte ptr' prefix for call instructions. */
6554 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6555 {
6556 const char * size;
6557 switch (GET_MODE_SIZE (GET_MODE (x)))
6558 {
6559 case 1: size = "BYTE"; break;
6560 case 2: size = "WORD"; break;
6561 case 4: size = "DWORD"; break;
6562 case 8: size = "QWORD"; break;
6563 case 12: size = "XWORD"; break;
6564 case 16: size = "XMMWORD"; break;
6565 default:
6566 abort ();
6567 }
6568
6569 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6570 if (code == 'b')
6571 size = "BYTE";
6572 else if (code == 'w')
6573 size = "WORD";
6574 else if (code == 'k')
6575 size = "DWORD";
6576
6577 fputs (size, file);
6578 fputs (" PTR ", file);
6579 }
6580
6581 x = XEXP (x, 0);
6582 /* Avoid (%rip) for call operands. */
6583 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6584 && GET_CODE (x) != CONST_INT)
6585 output_addr_const (file, x);
6586 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6587 output_operand_lossage ("invalid constraints for operand");
6588 else
6589 output_address (x);
6590 }
6591
6592 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6593 {
6594 REAL_VALUE_TYPE r;
6595 long l;
6596
6597 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6598 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6599
6600 if (ASSEMBLER_DIALECT == ASM_ATT)
6601 putc ('$', file);
6602 fprintf (file, "0x%08lx", l);
6603 }
6604
6605 /* These float cases don't actually occur as immediate operands. */
6606 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6607 {
6608 char dstr[30];
6609
6610 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6611 fprintf (file, "%s", dstr);
6612 }
6613
6614 else if (GET_CODE (x) == CONST_DOUBLE
6615 && GET_MODE (x) == XFmode)
6616 {
6617 char dstr[30];
6618
6619 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6620 fprintf (file, "%s", dstr);
6621 }
6622
6623 else
6624 {
6625 if (code != 'P')
6626 {
6627 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6628 {
6629 if (ASSEMBLER_DIALECT == ASM_ATT)
6630 putc ('$', file);
6631 }
6632 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6633 || GET_CODE (x) == LABEL_REF)
6634 {
6635 if (ASSEMBLER_DIALECT == ASM_ATT)
6636 putc ('$', file);
6637 else
6638 fputs ("OFFSET FLAT:", file);
6639 }
6640 }
6641 if (GET_CODE (x) == CONST_INT)
6642 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6643 else if (flag_pic)
6644 output_pic_addr_const (file, x, code);
6645 else
6646 output_addr_const (file, x);
6647 }
6648 }
6649 \f
6650 /* Print a memory operand whose address is ADDR. */
6651
6652 void
6653 print_operand_address (FILE *file, rtx addr)
6654 {
6655 struct ix86_address parts;
6656 rtx base, index, disp;
6657 int scale;
6658
6659 if (! ix86_decompose_address (addr, &parts))
6660 abort ();
6661
6662 base = parts.base;
6663 index = parts.index;
6664 disp = parts.disp;
6665 scale = parts.scale;
6666
6667 switch (parts.seg)
6668 {
6669 case SEG_DEFAULT:
6670 break;
6671 case SEG_FS:
6672 case SEG_GS:
6673 if (USER_LABEL_PREFIX[0] == 0)
6674 putc ('%', file);
6675 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6676 break;
6677 default:
6678 abort ();
6679 }
6680
6681 if (!base && !index)
6682 {
6683 /* Displacement only requires special attention. */
6684
6685 if (GET_CODE (disp) == CONST_INT)
6686 {
6687 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6688 {
6689 if (USER_LABEL_PREFIX[0] == 0)
6690 putc ('%', file);
6691 fputs ("ds:", file);
6692 }
6693 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6694 }
6695 else if (flag_pic)
6696 output_pic_addr_const (file, disp, 0);
6697 else
6698 output_addr_const (file, disp);
6699
6700 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6701 if (TARGET_64BIT
6702 && ((GET_CODE (disp) == SYMBOL_REF
6703 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6704 || GET_CODE (disp) == LABEL_REF
6705 || (GET_CODE (disp) == CONST
6706 && GET_CODE (XEXP (disp, 0)) == PLUS
6707 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
6708 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
6709 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
6710 fputs ("(%rip)", file);
6711 }
6712 else
6713 {
6714 if (ASSEMBLER_DIALECT == ASM_ATT)
6715 {
6716 if (disp)
6717 {
6718 if (flag_pic)
6719 output_pic_addr_const (file, disp, 0);
6720 else if (GET_CODE (disp) == LABEL_REF)
6721 output_asm_label (disp);
6722 else
6723 output_addr_const (file, disp);
6724 }
6725
6726 putc ('(', file);
6727 if (base)
6728 print_reg (base, 0, file);
6729 if (index)
6730 {
6731 putc (',', file);
6732 print_reg (index, 0, file);
6733 if (scale != 1)
6734 fprintf (file, ",%d", scale);
6735 }
6736 putc (')', file);
6737 }
6738 else
6739 {
6740 rtx offset = NULL_RTX;
6741
6742 if (disp)
6743 {
6744 /* Pull out the offset of a symbol; print any symbol itself. */
6745 if (GET_CODE (disp) == CONST
6746 && GET_CODE (XEXP (disp, 0)) == PLUS
6747 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
6748 {
6749 offset = XEXP (XEXP (disp, 0), 1);
6750 disp = gen_rtx_CONST (VOIDmode,
6751 XEXP (XEXP (disp, 0), 0));
6752 }
6753
6754 if (flag_pic)
6755 output_pic_addr_const (file, disp, 0);
6756 else if (GET_CODE (disp) == LABEL_REF)
6757 output_asm_label (disp);
6758 else if (GET_CODE (disp) == CONST_INT)
6759 offset = disp;
6760 else
6761 output_addr_const (file, disp);
6762 }
6763
6764 putc ('[', file);
6765 if (base)
6766 {
6767 print_reg (base, 0, file);
6768 if (offset)
6769 {
6770 if (INTVAL (offset) >= 0)
6771 putc ('+', file);
6772 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6773 }
6774 }
6775 else if (offset)
6776 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6777 else
6778 putc ('0', file);
6779
6780 if (index)
6781 {
6782 putc ('+', file);
6783 print_reg (index, 0, file);
6784 if (scale != 1)
6785 fprintf (file, "*%d", scale);
6786 }
6787 putc (']', file);
6788 }
6789 }
6790 }
6791
6792 bool
6793 output_addr_const_extra (FILE *file, rtx x)
6794 {
6795 rtx op;
6796
6797 if (GET_CODE (x) != UNSPEC)
6798 return false;
6799
6800 op = XVECEXP (x, 0, 0);
6801 switch (XINT (x, 1))
6802 {
6803 case UNSPEC_GOTTPOFF:
6804 output_addr_const (file, op);
6805 /* FIXME: This might be @TPOFF in Sun ld. */
6806 fputs ("@GOTTPOFF", file);
6807 break;
6808 case UNSPEC_TPOFF:
6809 output_addr_const (file, op);
6810 fputs ("@TPOFF", file);
6811 break;
6812 case UNSPEC_NTPOFF:
6813 output_addr_const (file, op);
6814 if (TARGET_64BIT)
6815 fputs ("@TPOFF", file);
6816 else
6817 fputs ("@NTPOFF", file);
6818 break;
6819 case UNSPEC_DTPOFF:
6820 output_addr_const (file, op);
6821 fputs ("@DTPOFF", file);
6822 break;
6823 case UNSPEC_GOTNTPOFF:
6824 output_addr_const (file, op);
6825 if (TARGET_64BIT)
6826 fputs ("@GOTTPOFF(%rip)", file);
6827 else
6828 fputs ("@GOTNTPOFF", file);
6829 break;
6830 case UNSPEC_INDNTPOFF:
6831 output_addr_const (file, op);
6832 fputs ("@INDNTPOFF", file);
6833 break;
6834
6835 default:
6836 return false;
6837 }
6838
6839 return true;
6840 }
6841 \f
6842 /* Split one or more DImode RTL references into pairs of SImode
6843 references. The RTL can be REG, offsettable MEM, integer constant, or
6844 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6845 split and "num" is its length. lo_half and hi_half are output arrays
6846 that parallel "operands". */
6847
6848 void
6849 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6850 {
6851 while (num--)
6852 {
6853 rtx op = operands[num];
6854
6855 /* simplify_subreg refuse to split volatile memory addresses,
6856 but we still have to handle it. */
6857 if (GET_CODE (op) == MEM)
6858 {
6859 lo_half[num] = adjust_address (op, SImode, 0);
6860 hi_half[num] = adjust_address (op, SImode, 4);
6861 }
6862 else
6863 {
6864 lo_half[num] = simplify_gen_subreg (SImode, op,
6865 GET_MODE (op) == VOIDmode
6866 ? DImode : GET_MODE (op), 0);
6867 hi_half[num] = simplify_gen_subreg (SImode, op,
6868 GET_MODE (op) == VOIDmode
6869 ? DImode : GET_MODE (op), 4);
6870 }
6871 }
6872 }
6873 /* Split one or more TImode RTL references into pairs of SImode
6874 references. The RTL can be REG, offsettable MEM, integer constant, or
6875 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6876 split and "num" is its length. lo_half and hi_half are output arrays
6877 that parallel "operands". */
6878
6879 void
6880 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6881 {
6882 while (num--)
6883 {
6884 rtx op = operands[num];
6885
6886 /* simplify_subreg refuse to split volatile memory addresses, but we
6887 still have to handle it. */
6888 if (GET_CODE (op) == MEM)
6889 {
6890 lo_half[num] = adjust_address (op, DImode, 0);
6891 hi_half[num] = adjust_address (op, DImode, 8);
6892 }
6893 else
6894 {
6895 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
6896 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
6897 }
6898 }
6899 }
6900 \f
6901 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6902 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6903 is the expression of the binary operation. The output may either be
6904 emitted here, or returned to the caller, like all output_* functions.
6905
6906 There is no guarantee that the operands are the same mode, as they
6907 might be within FLOAT or FLOAT_EXTEND expressions. */
6908
6909 #ifndef SYSV386_COMPAT
6910 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6911 wants to fix the assemblers because that causes incompatibility
6912 with gcc. No-one wants to fix gcc because that causes
6913 incompatibility with assemblers... You can use the option of
6914 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6915 #define SYSV386_COMPAT 1
6916 #endif
6917
6918 const char *
6919 output_387_binary_op (rtx insn, rtx *operands)
6920 {
6921 static char buf[30];
6922 const char *p;
6923 const char *ssep;
6924 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]) | SSE_REG_P (operands[2]);
6925
6926 #ifdef ENABLE_CHECKING
6927 /* Even if we do not want to check the inputs, this documents input
6928 constraints. Which helps in understanding the following code. */
6929 if (STACK_REG_P (operands[0])
6930 && ((REG_P (operands[1])
6931 && REGNO (operands[0]) == REGNO (operands[1])
6932 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
6933 || (REG_P (operands[2])
6934 && REGNO (operands[0]) == REGNO (operands[2])
6935 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
6936 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
6937 ; /* ok */
6938 else if (!is_sse)
6939 abort ();
6940 #endif
6941
6942 switch (GET_CODE (operands[3]))
6943 {
6944 case PLUS:
6945 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6946 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6947 p = "fiadd";
6948 else
6949 p = "fadd";
6950 ssep = "add";
6951 break;
6952
6953 case MINUS:
6954 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6955 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6956 p = "fisub";
6957 else
6958 p = "fsub";
6959 ssep = "sub";
6960 break;
6961
6962 case MULT:
6963 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6964 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6965 p = "fimul";
6966 else
6967 p = "fmul";
6968 ssep = "mul";
6969 break;
6970
6971 case DIV:
6972 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6973 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6974 p = "fidiv";
6975 else
6976 p = "fdiv";
6977 ssep = "div";
6978 break;
6979
6980 default:
6981 abort ();
6982 }
6983
6984 if (is_sse)
6985 {
6986 strcpy (buf, ssep);
6987 if (GET_MODE (operands[0]) == SFmode)
6988 strcat (buf, "ss\t{%2, %0|%0, %2}");
6989 else
6990 strcat (buf, "sd\t{%2, %0|%0, %2}");
6991 return buf;
6992 }
6993 strcpy (buf, p);
6994
6995 switch (GET_CODE (operands[3]))
6996 {
6997 case MULT:
6998 case PLUS:
6999 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7000 {
7001 rtx temp = operands[2];
7002 operands[2] = operands[1];
7003 operands[1] = temp;
7004 }
7005
7006 /* know operands[0] == operands[1]. */
7007
7008 if (GET_CODE (operands[2]) == MEM)
7009 {
7010 p = "%z2\t%2";
7011 break;
7012 }
7013
7014 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7015 {
7016 if (STACK_TOP_P (operands[0]))
7017 /* How is it that we are storing to a dead operand[2]?
7018 Well, presumably operands[1] is dead too. We can't
7019 store the result to st(0) as st(0) gets popped on this
7020 instruction. Instead store to operands[2] (which I
7021 think has to be st(1)). st(1) will be popped later.
7022 gcc <= 2.8.1 didn't have this check and generated
7023 assembly code that the Unixware assembler rejected. */
7024 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7025 else
7026 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7027 break;
7028 }
7029
7030 if (STACK_TOP_P (operands[0]))
7031 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7032 else
7033 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7034 break;
7035
7036 case MINUS:
7037 case DIV:
7038 if (GET_CODE (operands[1]) == MEM)
7039 {
7040 p = "r%z1\t%1";
7041 break;
7042 }
7043
7044 if (GET_CODE (operands[2]) == MEM)
7045 {
7046 p = "%z2\t%2";
7047 break;
7048 }
7049
7050 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7051 {
7052 #if SYSV386_COMPAT
7053 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7054 derived assemblers, confusingly reverse the direction of
7055 the operation for fsub{r} and fdiv{r} when the
7056 destination register is not st(0). The Intel assembler
7057 doesn't have this brain damage. Read !SYSV386_COMPAT to
7058 figure out what the hardware really does. */
7059 if (STACK_TOP_P (operands[0]))
7060 p = "{p\t%0, %2|rp\t%2, %0}";
7061 else
7062 p = "{rp\t%2, %0|p\t%0, %2}";
7063 #else
7064 if (STACK_TOP_P (operands[0]))
7065 /* As above for fmul/fadd, we can't store to st(0). */
7066 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7067 else
7068 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7069 #endif
7070 break;
7071 }
7072
7073 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7074 {
7075 #if SYSV386_COMPAT
7076 if (STACK_TOP_P (operands[0]))
7077 p = "{rp\t%0, %1|p\t%1, %0}";
7078 else
7079 p = "{p\t%1, %0|rp\t%0, %1}";
7080 #else
7081 if (STACK_TOP_P (operands[0]))
7082 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7083 else
7084 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7085 #endif
7086 break;
7087 }
7088
7089 if (STACK_TOP_P (operands[0]))
7090 {
7091 if (STACK_TOP_P (operands[1]))
7092 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7093 else
7094 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7095 break;
7096 }
7097 else if (STACK_TOP_P (operands[1]))
7098 {
7099 #if SYSV386_COMPAT
7100 p = "{\t%1, %0|r\t%0, %1}";
7101 #else
7102 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7103 #endif
7104 }
7105 else
7106 {
7107 #if SYSV386_COMPAT
7108 p = "{r\t%2, %0|\t%0, %2}";
7109 #else
7110 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7111 #endif
7112 }
7113 break;
7114
7115 default:
7116 abort ();
7117 }
7118
7119 strcat (buf, p);
7120 return buf;
7121 }
7122
7123 /* Output code to initialize control word copies used by trunc?f?i and
7124 rounding patterns. CURRENT_MODE is set to current control word,
7125 while NEW_MODE is set to new control word. */
7126
7127 void
7128 emit_i387_cw_initialization (rtx current_mode, rtx new_mode, int mode)
7129 {
7130 rtx reg = gen_reg_rtx (HImode);
7131
7132 emit_insn (gen_x86_fnstcw_1 (current_mode));
7133 emit_move_insn (reg, current_mode);
7134
7135 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
7136 && !TARGET_64BIT)
7137 {
7138 switch (mode)
7139 {
7140 case I387_CW_FLOOR:
7141 /* round down toward -oo */
7142 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7143 break;
7144
7145 case I387_CW_CEIL:
7146 /* round up toward +oo */
7147 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7148 break;
7149
7150 case I387_CW_TRUNC:
7151 /* round toward zero (truncate) */
7152 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7153 break;
7154
7155 case I387_CW_MASK_PM:
7156 /* mask precision exception for nearbyint() */
7157 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7158 break;
7159
7160 default:
7161 abort();
7162 }
7163 }
7164 else
7165 {
7166 switch (mode)
7167 {
7168 case I387_CW_FLOOR:
7169 /* round down toward -oo */
7170 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7171 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7172 break;
7173
7174 case I387_CW_CEIL:
7175 /* round up toward +oo */
7176 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7177 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7178 break;
7179
7180 case I387_CW_TRUNC:
7181 /* round toward zero (truncate) */
7182 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7183 break;
7184
7185 case I387_CW_MASK_PM:
7186 /* mask precision exception for nearbyint() */
7187 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7188 break;
7189
7190 default:
7191 abort();
7192 }
7193 }
7194
7195 emit_move_insn (new_mode, reg);
7196 }
7197
7198 /* Output code for INSN to convert a float to a signed int. OPERANDS
7199 are the insn operands. The output may be [HSD]Imode and the input
7200 operand may be [SDX]Fmode. */
7201
7202 const char *
7203 output_fix_trunc (rtx insn, rtx *operands)
7204 {
7205 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7206 int dimode_p = GET_MODE (operands[0]) == DImode;
7207
7208 /* Jump through a hoop or two for DImode, since the hardware has no
7209 non-popping instruction. We used to do this a different way, but
7210 that was somewhat fragile and broke with post-reload splitters. */
7211 if (dimode_p && !stack_top_dies)
7212 output_asm_insn ("fld\t%y1", operands);
7213
7214 if (!STACK_TOP_P (operands[1]))
7215 abort ();
7216
7217 if (GET_CODE (operands[0]) != MEM)
7218 abort ();
7219
7220 output_asm_insn ("fldcw\t%3", operands);
7221 if (stack_top_dies || dimode_p)
7222 output_asm_insn ("fistp%z0\t%0", operands);
7223 else
7224 output_asm_insn ("fist%z0\t%0", operands);
7225 output_asm_insn ("fldcw\t%2", operands);
7226
7227 return "";
7228 }
7229
7230 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7231 should be used and 2 when fnstsw should be used. UNORDERED_P is true
7232 when fucom should be used. */
7233
7234 const char *
7235 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7236 {
7237 int stack_top_dies;
7238 rtx cmp_op0, cmp_op1;
7239 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]);
7240
7241 if (eflags_p == 2)
7242 {
7243 cmp_op0 = operands[1];
7244 cmp_op1 = operands[2];
7245 }
7246 else
7247 {
7248 cmp_op0 = operands[0];
7249 cmp_op1 = operands[1];
7250 }
7251
7252 if (is_sse)
7253 {
7254 if (GET_MODE (operands[0]) == SFmode)
7255 if (unordered_p)
7256 return "ucomiss\t{%1, %0|%0, %1}";
7257 else
7258 return "comiss\t{%1, %0|%0, %1}";
7259 else
7260 if (unordered_p)
7261 return "ucomisd\t{%1, %0|%0, %1}";
7262 else
7263 return "comisd\t{%1, %0|%0, %1}";
7264 }
7265
7266 if (! STACK_TOP_P (cmp_op0))
7267 abort ();
7268
7269 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7270
7271 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7272 {
7273 if (stack_top_dies)
7274 {
7275 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7276 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7277 }
7278 else
7279 return "ftst\n\tfnstsw\t%0";
7280 }
7281
7282 if (STACK_REG_P (cmp_op1)
7283 && stack_top_dies
7284 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7285 && REGNO (cmp_op1) != FIRST_STACK_REG)
7286 {
7287 /* If both the top of the 387 stack dies, and the other operand
7288 is also a stack register that dies, then this must be a
7289 `fcompp' float compare */
7290
7291 if (eflags_p == 1)
7292 {
7293 /* There is no double popping fcomi variant. Fortunately,
7294 eflags is immune from the fstp's cc clobbering. */
7295 if (unordered_p)
7296 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7297 else
7298 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7299 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7300 }
7301 else
7302 {
7303 if (eflags_p == 2)
7304 {
7305 if (unordered_p)
7306 return "fucompp\n\tfnstsw\t%0";
7307 else
7308 return "fcompp\n\tfnstsw\t%0";
7309 }
7310 else
7311 {
7312 if (unordered_p)
7313 return "fucompp";
7314 else
7315 return "fcompp";
7316 }
7317 }
7318 }
7319 else
7320 {
7321 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7322
7323 static const char * const alt[24] =
7324 {
7325 "fcom%z1\t%y1",
7326 "fcomp%z1\t%y1",
7327 "fucom%z1\t%y1",
7328 "fucomp%z1\t%y1",
7329
7330 "ficom%z1\t%y1",
7331 "ficomp%z1\t%y1",
7332 NULL,
7333 NULL,
7334
7335 "fcomi\t{%y1, %0|%0, %y1}",
7336 "fcomip\t{%y1, %0|%0, %y1}",
7337 "fucomi\t{%y1, %0|%0, %y1}",
7338 "fucomip\t{%y1, %0|%0, %y1}",
7339
7340 NULL,
7341 NULL,
7342 NULL,
7343 NULL,
7344
7345 "fcom%z2\t%y2\n\tfnstsw\t%0",
7346 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7347 "fucom%z2\t%y2\n\tfnstsw\t%0",
7348 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7349
7350 "ficom%z2\t%y2\n\tfnstsw\t%0",
7351 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7352 NULL,
7353 NULL
7354 };
7355
7356 int mask;
7357 const char *ret;
7358
7359 mask = eflags_p << 3;
7360 mask |= (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT) << 2;
7361 mask |= unordered_p << 1;
7362 mask |= stack_top_dies;
7363
7364 if (mask >= 24)
7365 abort ();
7366 ret = alt[mask];
7367 if (ret == NULL)
7368 abort ();
7369
7370 return ret;
7371 }
7372 }
7373
7374 void
7375 ix86_output_addr_vec_elt (FILE *file, int value)
7376 {
7377 const char *directive = ASM_LONG;
7378
7379 if (TARGET_64BIT)
7380 {
7381 #ifdef ASM_QUAD
7382 directive = ASM_QUAD;
7383 #else
7384 abort ();
7385 #endif
7386 }
7387
7388 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7389 }
7390
7391 void
7392 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7393 {
7394 if (TARGET_64BIT)
7395 fprintf (file, "%s%s%d-%s%d\n",
7396 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7397 else if (HAVE_AS_GOTOFF_IN_DATA)
7398 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7399 #if TARGET_MACHO
7400 else if (TARGET_MACHO)
7401 {
7402 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7403 machopic_output_function_base_name (file);
7404 fprintf(file, "\n");
7405 }
7406 #endif
7407 else
7408 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7409 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7410 }
7411 \f
7412 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7413 for the target. */
7414
7415 void
7416 ix86_expand_clear (rtx dest)
7417 {
7418 rtx tmp;
7419
7420 /* We play register width games, which are only valid after reload. */
7421 if (!reload_completed)
7422 abort ();
7423
7424 /* Avoid HImode and its attendant prefix byte. */
7425 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7426 dest = gen_rtx_REG (SImode, REGNO (dest));
7427
7428 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7429
7430 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7431 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7432 {
7433 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7434 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7435 }
7436
7437 emit_insn (tmp);
7438 }
7439
7440 /* X is an unchanging MEM. If it is a constant pool reference, return
7441 the constant pool rtx, else NULL. */
7442
7443 rtx
7444 maybe_get_pool_constant (rtx x)
7445 {
7446 x = ix86_delegitimize_address (XEXP (x, 0));
7447
7448 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7449 return get_pool_constant (x);
7450
7451 return NULL_RTX;
7452 }
7453
7454 void
7455 ix86_expand_move (enum machine_mode mode, rtx operands[])
7456 {
7457 int strict = (reload_in_progress || reload_completed);
7458 rtx op0, op1;
7459 enum tls_model model;
7460
7461 op0 = operands[0];
7462 op1 = operands[1];
7463
7464 model = GET_CODE (op1) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (op1) : 0;
7465 if (model)
7466 {
7467 op1 = legitimize_tls_address (op1, model, true);
7468 op1 = force_operand (op1, op0);
7469 if (op1 == op0)
7470 return;
7471 }
7472
7473 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7474 {
7475 #if TARGET_MACHO
7476 if (MACHOPIC_PURE)
7477 {
7478 rtx temp = ((reload_in_progress
7479 || ((op0 && GET_CODE (op0) == REG)
7480 && mode == Pmode))
7481 ? op0 : gen_reg_rtx (Pmode));
7482 op1 = machopic_indirect_data_reference (op1, temp);
7483 op1 = machopic_legitimize_pic_address (op1, mode,
7484 temp == op1 ? 0 : temp);
7485 }
7486 else if (MACHOPIC_INDIRECT)
7487 op1 = machopic_indirect_data_reference (op1, 0);
7488 if (op0 == op1)
7489 return;
7490 #else
7491 if (GET_CODE (op0) == MEM)
7492 op1 = force_reg (Pmode, op1);
7493 else
7494 op1 = legitimize_address (op1, op1, Pmode);
7495 #endif /* TARGET_MACHO */
7496 }
7497 else
7498 {
7499 if (GET_CODE (op0) == MEM
7500 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7501 || !push_operand (op0, mode))
7502 && GET_CODE (op1) == MEM)
7503 op1 = force_reg (mode, op1);
7504
7505 if (push_operand (op0, mode)
7506 && ! general_no_elim_operand (op1, mode))
7507 op1 = copy_to_mode_reg (mode, op1);
7508
7509 /* Force large constants in 64bit compilation into register
7510 to get them CSEed. */
7511 if (TARGET_64BIT && mode == DImode
7512 && immediate_operand (op1, mode)
7513 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7514 && !register_operand (op0, mode)
7515 && optimize && !reload_completed && !reload_in_progress)
7516 op1 = copy_to_mode_reg (mode, op1);
7517
7518 if (FLOAT_MODE_P (mode))
7519 {
7520 /* If we are loading a floating point constant to a register,
7521 force the value to memory now, since we'll get better code
7522 out the back end. */
7523
7524 if (strict)
7525 ;
7526 else if (GET_CODE (op1) == CONST_DOUBLE)
7527 {
7528 op1 = validize_mem (force_const_mem (mode, op1));
7529 if (!register_operand (op0, mode))
7530 {
7531 rtx temp = gen_reg_rtx (mode);
7532 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7533 emit_move_insn (op0, temp);
7534 return;
7535 }
7536 }
7537 }
7538 }
7539
7540 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7541 }
7542
7543 void
7544 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7545 {
7546 /* Force constants other than zero into memory. We do not know how
7547 the instructions used to build constants modify the upper 64 bits
7548 of the register, once we have that information we may be able
7549 to handle some of them more efficiently. */
7550 if ((reload_in_progress | reload_completed) == 0
7551 && register_operand (operands[0], mode)
7552 && CONSTANT_P (operands[1]) && operands[1] != CONST0_RTX (mode))
7553 operands[1] = validize_mem (force_const_mem (mode, operands[1]));
7554
7555 /* Make operand1 a register if it isn't already. */
7556 if (!no_new_pseudos
7557 && !register_operand (operands[0], mode)
7558 && !register_operand (operands[1], mode))
7559 {
7560 rtx temp = force_reg (GET_MODE (operands[1]), operands[1]);
7561 emit_move_insn (operands[0], temp);
7562 return;
7563 }
7564
7565 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7566 }
7567
7568 /* Attempt to expand a binary operator. Make the expansion closer to the
7569 actual machine, then just general_operand, which will allow 3 separate
7570 memory references (one output, two input) in a single insn. */
7571
7572 void
7573 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
7574 rtx operands[])
7575 {
7576 int matching_memory;
7577 rtx src1, src2, dst, op, clob;
7578
7579 dst = operands[0];
7580 src1 = operands[1];
7581 src2 = operands[2];
7582
7583 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7584 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7585 && (rtx_equal_p (dst, src2)
7586 || immediate_operand (src1, mode)))
7587 {
7588 rtx temp = src1;
7589 src1 = src2;
7590 src2 = temp;
7591 }
7592
7593 /* If the destination is memory, and we do not have matching source
7594 operands, do things in registers. */
7595 matching_memory = 0;
7596 if (GET_CODE (dst) == MEM)
7597 {
7598 if (rtx_equal_p (dst, src1))
7599 matching_memory = 1;
7600 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7601 && rtx_equal_p (dst, src2))
7602 matching_memory = 2;
7603 else
7604 dst = gen_reg_rtx (mode);
7605 }
7606
7607 /* Both source operands cannot be in memory. */
7608 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
7609 {
7610 if (matching_memory != 2)
7611 src2 = force_reg (mode, src2);
7612 else
7613 src1 = force_reg (mode, src1);
7614 }
7615
7616 /* If the operation is not commutable, source 1 cannot be a constant
7617 or non-matching memory. */
7618 if ((CONSTANT_P (src1)
7619 || (!matching_memory && GET_CODE (src1) == MEM))
7620 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7621 src1 = force_reg (mode, src1);
7622
7623 /* If optimizing, copy to regs to improve CSE */
7624 if (optimize && ! no_new_pseudos)
7625 {
7626 if (GET_CODE (dst) == MEM)
7627 dst = gen_reg_rtx (mode);
7628 if (GET_CODE (src1) == MEM)
7629 src1 = force_reg (mode, src1);
7630 if (GET_CODE (src2) == MEM)
7631 src2 = force_reg (mode, src2);
7632 }
7633
7634 /* Emit the instruction. */
7635
7636 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
7637 if (reload_in_progress)
7638 {
7639 /* Reload doesn't know about the flags register, and doesn't know that
7640 it doesn't want to clobber it. We can only do this with PLUS. */
7641 if (code != PLUS)
7642 abort ();
7643 emit_insn (op);
7644 }
7645 else
7646 {
7647 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7648 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7649 }
7650
7651 /* Fix up the destination if needed. */
7652 if (dst != operands[0])
7653 emit_move_insn (operands[0], dst);
7654 }
7655
7656 /* Return TRUE or FALSE depending on whether the binary operator meets the
7657 appropriate constraints. */
7658
7659 int
7660 ix86_binary_operator_ok (enum rtx_code code,
7661 enum machine_mode mode ATTRIBUTE_UNUSED,
7662 rtx operands[3])
7663 {
7664 /* Both source operands cannot be in memory. */
7665 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
7666 return 0;
7667 /* If the operation is not commutable, source 1 cannot be a constant. */
7668 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7669 return 0;
7670 /* If the destination is memory, we must have a matching source operand. */
7671 if (GET_CODE (operands[0]) == MEM
7672 && ! (rtx_equal_p (operands[0], operands[1])
7673 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7674 && rtx_equal_p (operands[0], operands[2]))))
7675 return 0;
7676 /* If the operation is not commutable and the source 1 is memory, we must
7677 have a matching destination. */
7678 if (GET_CODE (operands[1]) == MEM
7679 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
7680 && ! rtx_equal_p (operands[0], operands[1]))
7681 return 0;
7682 return 1;
7683 }
7684
7685 /* Attempt to expand a unary operator. Make the expansion closer to the
7686 actual machine, then just general_operand, which will allow 2 separate
7687 memory references (one output, one input) in a single insn. */
7688
7689 void
7690 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
7691 rtx operands[])
7692 {
7693 int matching_memory;
7694 rtx src, dst, op, clob;
7695
7696 dst = operands[0];
7697 src = operands[1];
7698
7699 /* If the destination is memory, and we do not have matching source
7700 operands, do things in registers. */
7701 matching_memory = 0;
7702 if (GET_CODE (dst) == MEM)
7703 {
7704 if (rtx_equal_p (dst, src))
7705 matching_memory = 1;
7706 else
7707 dst = gen_reg_rtx (mode);
7708 }
7709
7710 /* When source operand is memory, destination must match. */
7711 if (!matching_memory && GET_CODE (src) == MEM)
7712 src = force_reg (mode, src);
7713
7714 /* If optimizing, copy to regs to improve CSE */
7715 if (optimize && ! no_new_pseudos)
7716 {
7717 if (GET_CODE (dst) == MEM)
7718 dst = gen_reg_rtx (mode);
7719 if (GET_CODE (src) == MEM)
7720 src = force_reg (mode, src);
7721 }
7722
7723 /* Emit the instruction. */
7724
7725 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
7726 if (reload_in_progress || code == NOT)
7727 {
7728 /* Reload doesn't know about the flags register, and doesn't know that
7729 it doesn't want to clobber it. */
7730 if (code != NOT)
7731 abort ();
7732 emit_insn (op);
7733 }
7734 else
7735 {
7736 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7737 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7738 }
7739
7740 /* Fix up the destination if needed. */
7741 if (dst != operands[0])
7742 emit_move_insn (operands[0], dst);
7743 }
7744
7745 /* Return TRUE or FALSE depending on whether the unary operator meets the
7746 appropriate constraints. */
7747
7748 int
7749 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
7750 enum machine_mode mode ATTRIBUTE_UNUSED,
7751 rtx operands[2] ATTRIBUTE_UNUSED)
7752 {
7753 /* If one of operands is memory, source and destination must match. */
7754 if ((GET_CODE (operands[0]) == MEM
7755 || GET_CODE (operands[1]) == MEM)
7756 && ! rtx_equal_p (operands[0], operands[1]))
7757 return FALSE;
7758 return TRUE;
7759 }
7760
7761 /* Return TRUE or FALSE depending on whether the first SET in INSN
7762 has source and destination with matching CC modes, and that the
7763 CC mode is at least as constrained as REQ_MODE. */
7764
7765 int
7766 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
7767 {
7768 rtx set;
7769 enum machine_mode set_mode;
7770
7771 set = PATTERN (insn);
7772 if (GET_CODE (set) == PARALLEL)
7773 set = XVECEXP (set, 0, 0);
7774 if (GET_CODE (set) != SET)
7775 abort ();
7776 if (GET_CODE (SET_SRC (set)) != COMPARE)
7777 abort ();
7778
7779 set_mode = GET_MODE (SET_DEST (set));
7780 switch (set_mode)
7781 {
7782 case CCNOmode:
7783 if (req_mode != CCNOmode
7784 && (req_mode != CCmode
7785 || XEXP (SET_SRC (set), 1) != const0_rtx))
7786 return 0;
7787 break;
7788 case CCmode:
7789 if (req_mode == CCGCmode)
7790 return 0;
7791 /* FALLTHRU */
7792 case CCGCmode:
7793 if (req_mode == CCGOCmode || req_mode == CCNOmode)
7794 return 0;
7795 /* FALLTHRU */
7796 case CCGOCmode:
7797 if (req_mode == CCZmode)
7798 return 0;
7799 /* FALLTHRU */
7800 case CCZmode:
7801 break;
7802
7803 default:
7804 abort ();
7805 }
7806
7807 return (GET_MODE (SET_SRC (set)) == set_mode);
7808 }
7809
7810 /* Generate insn patterns to do an integer compare of OPERANDS. */
7811
7812 static rtx
7813 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
7814 {
7815 enum machine_mode cmpmode;
7816 rtx tmp, flags;
7817
7818 cmpmode = SELECT_CC_MODE (code, op0, op1);
7819 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
7820
7821 /* This is very simple, but making the interface the same as in the
7822 FP case makes the rest of the code easier. */
7823 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
7824 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
7825
7826 /* Return the test that should be put into the flags user, i.e.
7827 the bcc, scc, or cmov instruction. */
7828 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
7829 }
7830
7831 /* Figure out whether to use ordered or unordered fp comparisons.
7832 Return the appropriate mode to use. */
7833
7834 enum machine_mode
7835 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
7836 {
7837 /* ??? In order to make all comparisons reversible, we do all comparisons
7838 non-trapping when compiling for IEEE. Once gcc is able to distinguish
7839 all forms trapping and nontrapping comparisons, we can make inequality
7840 comparisons trapping again, since it results in better code when using
7841 FCOM based compares. */
7842 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
7843 }
7844
7845 enum machine_mode
7846 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
7847 {
7848 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7849 return ix86_fp_compare_mode (code);
7850 switch (code)
7851 {
7852 /* Only zero flag is needed. */
7853 case EQ: /* ZF=0 */
7854 case NE: /* ZF!=0 */
7855 return CCZmode;
7856 /* Codes needing carry flag. */
7857 case GEU: /* CF=0 */
7858 case GTU: /* CF=0 & ZF=0 */
7859 case LTU: /* CF=1 */
7860 case LEU: /* CF=1 | ZF=1 */
7861 return CCmode;
7862 /* Codes possibly doable only with sign flag when
7863 comparing against zero. */
7864 case GE: /* SF=OF or SF=0 */
7865 case LT: /* SF<>OF or SF=1 */
7866 if (op1 == const0_rtx)
7867 return CCGOCmode;
7868 else
7869 /* For other cases Carry flag is not required. */
7870 return CCGCmode;
7871 /* Codes doable only with sign flag when comparing
7872 against zero, but we miss jump instruction for it
7873 so we need to use relational tests against overflow
7874 that thus needs to be zero. */
7875 case GT: /* ZF=0 & SF=OF */
7876 case LE: /* ZF=1 | SF<>OF */
7877 if (op1 == const0_rtx)
7878 return CCNOmode;
7879 else
7880 return CCGCmode;
7881 /* strcmp pattern do (use flags) and combine may ask us for proper
7882 mode. */
7883 case USE:
7884 return CCmode;
7885 default:
7886 abort ();
7887 }
7888 }
7889
7890 /* Return the fixed registers used for condition codes. */
7891
7892 static bool
7893 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
7894 {
7895 *p1 = FLAGS_REG;
7896 *p2 = FPSR_REG;
7897 return true;
7898 }
7899
7900 /* If two condition code modes are compatible, return a condition code
7901 mode which is compatible with both. Otherwise, return
7902 VOIDmode. */
7903
7904 static enum machine_mode
7905 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
7906 {
7907 if (m1 == m2)
7908 return m1;
7909
7910 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
7911 return VOIDmode;
7912
7913 if ((m1 == CCGCmode && m2 == CCGOCmode)
7914 || (m1 == CCGOCmode && m2 == CCGCmode))
7915 return CCGCmode;
7916
7917 switch (m1)
7918 {
7919 default:
7920 abort ();
7921
7922 case CCmode:
7923 case CCGCmode:
7924 case CCGOCmode:
7925 case CCNOmode:
7926 case CCZmode:
7927 switch (m2)
7928 {
7929 default:
7930 return VOIDmode;
7931
7932 case CCmode:
7933 case CCGCmode:
7934 case CCGOCmode:
7935 case CCNOmode:
7936 case CCZmode:
7937 return CCmode;
7938 }
7939
7940 case CCFPmode:
7941 case CCFPUmode:
7942 /* These are only compatible with themselves, which we already
7943 checked above. */
7944 return VOIDmode;
7945 }
7946 }
7947
7948 /* Return true if we should use an FCOMI instruction for this fp comparison. */
7949
7950 int
7951 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
7952 {
7953 enum rtx_code swapped_code = swap_condition (code);
7954 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
7955 || (ix86_fp_comparison_cost (swapped_code)
7956 == ix86_fp_comparison_fcomi_cost (swapped_code)));
7957 }
7958
7959 /* Swap, force into registers, or otherwise massage the two operands
7960 to a fp comparison. The operands are updated in place; the new
7961 comparison code is returned. */
7962
7963 static enum rtx_code
7964 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
7965 {
7966 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
7967 rtx op0 = *pop0, op1 = *pop1;
7968 enum machine_mode op_mode = GET_MODE (op0);
7969 int is_sse = SSE_REG_P (op0) | SSE_REG_P (op1);
7970
7971 /* All of the unordered compare instructions only work on registers.
7972 The same is true of the fcomi compare instructions. The same is
7973 true of the XFmode compare instructions if not comparing with
7974 zero (ftst insn is used in this case). */
7975
7976 if (!is_sse
7977 && (fpcmp_mode == CCFPUmode
7978 || (op_mode == XFmode
7979 && ! (standard_80387_constant_p (op0) == 1
7980 || standard_80387_constant_p (op1) == 1))
7981 || ix86_use_fcomi_compare (code)))
7982 {
7983 op0 = force_reg (op_mode, op0);
7984 op1 = force_reg (op_mode, op1);
7985 }
7986 else
7987 {
7988 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
7989 things around if they appear profitable, otherwise force op0
7990 into a register. */
7991
7992 if (standard_80387_constant_p (op0) == 0
7993 || (GET_CODE (op0) == MEM
7994 && ! (standard_80387_constant_p (op1) == 0
7995 || GET_CODE (op1) == MEM)))
7996 {
7997 rtx tmp;
7998 tmp = op0, op0 = op1, op1 = tmp;
7999 code = swap_condition (code);
8000 }
8001
8002 if (GET_CODE (op0) != REG)
8003 op0 = force_reg (op_mode, op0);
8004
8005 if (CONSTANT_P (op1))
8006 {
8007 int tmp = standard_80387_constant_p (op1);
8008 if (tmp == 0)
8009 op1 = validize_mem (force_const_mem (op_mode, op1));
8010 else if (tmp == 1)
8011 {
8012 if (TARGET_CMOVE)
8013 op1 = force_reg (op_mode, op1);
8014 }
8015 else
8016 op1 = force_reg (op_mode, op1);
8017 }
8018 }
8019
8020 /* Try to rearrange the comparison to make it cheaper. */
8021 if (ix86_fp_comparison_cost (code)
8022 > ix86_fp_comparison_cost (swap_condition (code))
8023 && (GET_CODE (op1) == REG || !no_new_pseudos))
8024 {
8025 rtx tmp;
8026 tmp = op0, op0 = op1, op1 = tmp;
8027 code = swap_condition (code);
8028 if (GET_CODE (op0) != REG)
8029 op0 = force_reg (op_mode, op0);
8030 }
8031
8032 *pop0 = op0;
8033 *pop1 = op1;
8034 return code;
8035 }
8036
8037 /* Convert comparison codes we use to represent FP comparison to integer
8038 code that will result in proper branch. Return UNKNOWN if no such code
8039 is available. */
8040
8041 enum rtx_code
8042 ix86_fp_compare_code_to_integer (enum rtx_code code)
8043 {
8044 switch (code)
8045 {
8046 case GT:
8047 return GTU;
8048 case GE:
8049 return GEU;
8050 case ORDERED:
8051 case UNORDERED:
8052 return code;
8053 break;
8054 case UNEQ:
8055 return EQ;
8056 break;
8057 case UNLT:
8058 return LTU;
8059 break;
8060 case UNLE:
8061 return LEU;
8062 break;
8063 case LTGT:
8064 return NE;
8065 break;
8066 default:
8067 return UNKNOWN;
8068 }
8069 }
8070
8071 /* Split comparison code CODE into comparisons we can do using branch
8072 instructions. BYPASS_CODE is comparison code for branch that will
8073 branch around FIRST_CODE and SECOND_CODE. If some of branches
8074 is not required, set value to UNKNOWN.
8075 We never require more than two branches. */
8076
8077 void
8078 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8079 enum rtx_code *first_code,
8080 enum rtx_code *second_code)
8081 {
8082 *first_code = code;
8083 *bypass_code = UNKNOWN;
8084 *second_code = UNKNOWN;
8085
8086 /* The fcomi comparison sets flags as follows:
8087
8088 cmp ZF PF CF
8089 > 0 0 0
8090 < 0 0 1
8091 = 1 0 0
8092 un 1 1 1 */
8093
8094 switch (code)
8095 {
8096 case GT: /* GTU - CF=0 & ZF=0 */
8097 case GE: /* GEU - CF=0 */
8098 case ORDERED: /* PF=0 */
8099 case UNORDERED: /* PF=1 */
8100 case UNEQ: /* EQ - ZF=1 */
8101 case UNLT: /* LTU - CF=1 */
8102 case UNLE: /* LEU - CF=1 | ZF=1 */
8103 case LTGT: /* EQ - ZF=0 */
8104 break;
8105 case LT: /* LTU - CF=1 - fails on unordered */
8106 *first_code = UNLT;
8107 *bypass_code = UNORDERED;
8108 break;
8109 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8110 *first_code = UNLE;
8111 *bypass_code = UNORDERED;
8112 break;
8113 case EQ: /* EQ - ZF=1 - fails on unordered */
8114 *first_code = UNEQ;
8115 *bypass_code = UNORDERED;
8116 break;
8117 case NE: /* NE - ZF=0 - fails on unordered */
8118 *first_code = LTGT;
8119 *second_code = UNORDERED;
8120 break;
8121 case UNGE: /* GEU - CF=0 - fails on unordered */
8122 *first_code = GE;
8123 *second_code = UNORDERED;
8124 break;
8125 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8126 *first_code = GT;
8127 *second_code = UNORDERED;
8128 break;
8129 default:
8130 abort ();
8131 }
8132 if (!TARGET_IEEE_FP)
8133 {
8134 *second_code = UNKNOWN;
8135 *bypass_code = UNKNOWN;
8136 }
8137 }
8138
8139 /* Return cost of comparison done fcom + arithmetics operations on AX.
8140 All following functions do use number of instructions as a cost metrics.
8141 In future this should be tweaked to compute bytes for optimize_size and
8142 take into account performance of various instructions on various CPUs. */
8143 static int
8144 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8145 {
8146 if (!TARGET_IEEE_FP)
8147 return 4;
8148 /* The cost of code output by ix86_expand_fp_compare. */
8149 switch (code)
8150 {
8151 case UNLE:
8152 case UNLT:
8153 case LTGT:
8154 case GT:
8155 case GE:
8156 case UNORDERED:
8157 case ORDERED:
8158 case UNEQ:
8159 return 4;
8160 break;
8161 case LT:
8162 case NE:
8163 case EQ:
8164 case UNGE:
8165 return 5;
8166 break;
8167 case LE:
8168 case UNGT:
8169 return 6;
8170 break;
8171 default:
8172 abort ();
8173 }
8174 }
8175
8176 /* Return cost of comparison done using fcomi operation.
8177 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8178 static int
8179 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8180 {
8181 enum rtx_code bypass_code, first_code, second_code;
8182 /* Return arbitrarily high cost when instruction is not supported - this
8183 prevents gcc from using it. */
8184 if (!TARGET_CMOVE)
8185 return 1024;
8186 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8187 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8188 }
8189
8190 /* Return cost of comparison done using sahf operation.
8191 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8192 static int
8193 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8194 {
8195 enum rtx_code bypass_code, first_code, second_code;
8196 /* Return arbitrarily high cost when instruction is not preferred - this
8197 avoids gcc from using it. */
8198 if (!TARGET_USE_SAHF && !optimize_size)
8199 return 1024;
8200 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8201 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
8202 }
8203
8204 /* Compute cost of the comparison done using any method.
8205 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8206 static int
8207 ix86_fp_comparison_cost (enum rtx_code code)
8208 {
8209 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8210 int min;
8211
8212 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8213 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8214
8215 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8216 if (min > sahf_cost)
8217 min = sahf_cost;
8218 if (min > fcomi_cost)
8219 min = fcomi_cost;
8220 return min;
8221 }
8222
8223 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8224
8225 static rtx
8226 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8227 rtx *second_test, rtx *bypass_test)
8228 {
8229 enum machine_mode fpcmp_mode, intcmp_mode;
8230 rtx tmp, tmp2;
8231 int cost = ix86_fp_comparison_cost (code);
8232 enum rtx_code bypass_code, first_code, second_code;
8233
8234 fpcmp_mode = ix86_fp_compare_mode (code);
8235 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8236
8237 if (second_test)
8238 *second_test = NULL_RTX;
8239 if (bypass_test)
8240 *bypass_test = NULL_RTX;
8241
8242 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8243
8244 /* Do fcomi/sahf based test when profitable. */
8245 if ((bypass_code == UNKNOWN || bypass_test)
8246 && (second_code == UNKNOWN || second_test)
8247 && ix86_fp_comparison_arithmetics_cost (code) > cost)
8248 {
8249 if (TARGET_CMOVE)
8250 {
8251 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8252 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
8253 tmp);
8254 emit_insn (tmp);
8255 }
8256 else
8257 {
8258 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8259 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8260 if (!scratch)
8261 scratch = gen_reg_rtx (HImode);
8262 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8263 emit_insn (gen_x86_sahf_1 (scratch));
8264 }
8265
8266 /* The FP codes work out to act like unsigned. */
8267 intcmp_mode = fpcmp_mode;
8268 code = first_code;
8269 if (bypass_code != UNKNOWN)
8270 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
8271 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8272 const0_rtx);
8273 if (second_code != UNKNOWN)
8274 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
8275 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8276 const0_rtx);
8277 }
8278 else
8279 {
8280 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8281 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8282 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8283 if (!scratch)
8284 scratch = gen_reg_rtx (HImode);
8285 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8286
8287 /* In the unordered case, we have to check C2 for NaN's, which
8288 doesn't happen to work out to anything nice combination-wise.
8289 So do some bit twiddling on the value we've got in AH to come
8290 up with an appropriate set of condition codes. */
8291
8292 intcmp_mode = CCNOmode;
8293 switch (code)
8294 {
8295 case GT:
8296 case UNGT:
8297 if (code == GT || !TARGET_IEEE_FP)
8298 {
8299 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8300 code = EQ;
8301 }
8302 else
8303 {
8304 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8305 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8306 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
8307 intcmp_mode = CCmode;
8308 code = GEU;
8309 }
8310 break;
8311 case LT:
8312 case UNLT:
8313 if (code == LT && TARGET_IEEE_FP)
8314 {
8315 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8316 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
8317 intcmp_mode = CCmode;
8318 code = EQ;
8319 }
8320 else
8321 {
8322 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
8323 code = NE;
8324 }
8325 break;
8326 case GE:
8327 case UNGE:
8328 if (code == GE || !TARGET_IEEE_FP)
8329 {
8330 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
8331 code = EQ;
8332 }
8333 else
8334 {
8335 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8336 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8337 GEN_INT (0x01)));
8338 code = NE;
8339 }
8340 break;
8341 case LE:
8342 case UNLE:
8343 if (code == LE && TARGET_IEEE_FP)
8344 {
8345 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8346 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8347 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8348 intcmp_mode = CCmode;
8349 code = LTU;
8350 }
8351 else
8352 {
8353 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8354 code = NE;
8355 }
8356 break;
8357 case EQ:
8358 case UNEQ:
8359 if (code == EQ && TARGET_IEEE_FP)
8360 {
8361 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8362 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8363 intcmp_mode = CCmode;
8364 code = EQ;
8365 }
8366 else
8367 {
8368 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8369 code = NE;
8370 break;
8371 }
8372 break;
8373 case NE:
8374 case LTGT:
8375 if (code == NE && TARGET_IEEE_FP)
8376 {
8377 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8378 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8379 GEN_INT (0x40)));
8380 code = NE;
8381 }
8382 else
8383 {
8384 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8385 code = EQ;
8386 }
8387 break;
8388
8389 case UNORDERED:
8390 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8391 code = NE;
8392 break;
8393 case ORDERED:
8394 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8395 code = EQ;
8396 break;
8397
8398 default:
8399 abort ();
8400 }
8401 }
8402
8403 /* Return the test that should be put into the flags user, i.e.
8404 the bcc, scc, or cmov instruction. */
8405 return gen_rtx_fmt_ee (code, VOIDmode,
8406 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8407 const0_rtx);
8408 }
8409
8410 rtx
8411 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
8412 {
8413 rtx op0, op1, ret;
8414 op0 = ix86_compare_op0;
8415 op1 = ix86_compare_op1;
8416
8417 if (second_test)
8418 *second_test = NULL_RTX;
8419 if (bypass_test)
8420 *bypass_test = NULL_RTX;
8421
8422 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8423 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8424 second_test, bypass_test);
8425 else
8426 ret = ix86_expand_int_compare (code, op0, op1);
8427
8428 return ret;
8429 }
8430
8431 /* Return true if the CODE will result in nontrivial jump sequence. */
8432 bool
8433 ix86_fp_jump_nontrivial_p (enum rtx_code code)
8434 {
8435 enum rtx_code bypass_code, first_code, second_code;
8436 if (!TARGET_CMOVE)
8437 return true;
8438 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8439 return bypass_code != UNKNOWN || second_code != UNKNOWN;
8440 }
8441
8442 void
8443 ix86_expand_branch (enum rtx_code code, rtx label)
8444 {
8445 rtx tmp;
8446
8447 switch (GET_MODE (ix86_compare_op0))
8448 {
8449 case QImode:
8450 case HImode:
8451 case SImode:
8452 simple:
8453 tmp = ix86_expand_compare (code, NULL, NULL);
8454 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8455 gen_rtx_LABEL_REF (VOIDmode, label),
8456 pc_rtx);
8457 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
8458 return;
8459
8460 case SFmode:
8461 case DFmode:
8462 case XFmode:
8463 {
8464 rtvec vec;
8465 int use_fcomi;
8466 enum rtx_code bypass_code, first_code, second_code;
8467
8468 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
8469 &ix86_compare_op1);
8470
8471 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8472
8473 /* Check whether we will use the natural sequence with one jump. If
8474 so, we can expand jump early. Otherwise delay expansion by
8475 creating compound insn to not confuse optimizers. */
8476 if (bypass_code == UNKNOWN && second_code == UNKNOWN
8477 && TARGET_CMOVE)
8478 {
8479 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
8480 gen_rtx_LABEL_REF (VOIDmode, label),
8481 pc_rtx, NULL_RTX);
8482 }
8483 else
8484 {
8485 tmp = gen_rtx_fmt_ee (code, VOIDmode,
8486 ix86_compare_op0, ix86_compare_op1);
8487 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8488 gen_rtx_LABEL_REF (VOIDmode, label),
8489 pc_rtx);
8490 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
8491
8492 use_fcomi = ix86_use_fcomi_compare (code);
8493 vec = rtvec_alloc (3 + !use_fcomi);
8494 RTVEC_ELT (vec, 0) = tmp;
8495 RTVEC_ELT (vec, 1)
8496 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
8497 RTVEC_ELT (vec, 2)
8498 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
8499 if (! use_fcomi)
8500 RTVEC_ELT (vec, 3)
8501 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
8502
8503 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
8504 }
8505 return;
8506 }
8507
8508 case DImode:
8509 if (TARGET_64BIT)
8510 goto simple;
8511 /* Expand DImode branch into multiple compare+branch. */
8512 {
8513 rtx lo[2], hi[2], label2;
8514 enum rtx_code code1, code2, code3;
8515
8516 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
8517 {
8518 tmp = ix86_compare_op0;
8519 ix86_compare_op0 = ix86_compare_op1;
8520 ix86_compare_op1 = tmp;
8521 code = swap_condition (code);
8522 }
8523 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
8524 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
8525
8526 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8527 avoid two branches. This costs one extra insn, so disable when
8528 optimizing for size. */
8529
8530 if ((code == EQ || code == NE)
8531 && (!optimize_size
8532 || hi[1] == const0_rtx || lo[1] == const0_rtx))
8533 {
8534 rtx xor0, xor1;
8535
8536 xor1 = hi[0];
8537 if (hi[1] != const0_rtx)
8538 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
8539 NULL_RTX, 0, OPTAB_WIDEN);
8540
8541 xor0 = lo[0];
8542 if (lo[1] != const0_rtx)
8543 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
8544 NULL_RTX, 0, OPTAB_WIDEN);
8545
8546 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
8547 NULL_RTX, 0, OPTAB_WIDEN);
8548
8549 ix86_compare_op0 = tmp;
8550 ix86_compare_op1 = const0_rtx;
8551 ix86_expand_branch (code, label);
8552 return;
8553 }
8554
8555 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8556 op1 is a constant and the low word is zero, then we can just
8557 examine the high word. */
8558
8559 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
8560 switch (code)
8561 {
8562 case LT: case LTU: case GE: case GEU:
8563 ix86_compare_op0 = hi[0];
8564 ix86_compare_op1 = hi[1];
8565 ix86_expand_branch (code, label);
8566 return;
8567 default:
8568 break;
8569 }
8570
8571 /* Otherwise, we need two or three jumps. */
8572
8573 label2 = gen_label_rtx ();
8574
8575 code1 = code;
8576 code2 = swap_condition (code);
8577 code3 = unsigned_condition (code);
8578
8579 switch (code)
8580 {
8581 case LT: case GT: case LTU: case GTU:
8582 break;
8583
8584 case LE: code1 = LT; code2 = GT; break;
8585 case GE: code1 = GT; code2 = LT; break;
8586 case LEU: code1 = LTU; code2 = GTU; break;
8587 case GEU: code1 = GTU; code2 = LTU; break;
8588
8589 case EQ: code1 = UNKNOWN; code2 = NE; break;
8590 case NE: code2 = UNKNOWN; break;
8591
8592 default:
8593 abort ();
8594 }
8595
8596 /*
8597 * a < b =>
8598 * if (hi(a) < hi(b)) goto true;
8599 * if (hi(a) > hi(b)) goto false;
8600 * if (lo(a) < lo(b)) goto true;
8601 * false:
8602 */
8603
8604 ix86_compare_op0 = hi[0];
8605 ix86_compare_op1 = hi[1];
8606
8607 if (code1 != UNKNOWN)
8608 ix86_expand_branch (code1, label);
8609 if (code2 != UNKNOWN)
8610 ix86_expand_branch (code2, label2);
8611
8612 ix86_compare_op0 = lo[0];
8613 ix86_compare_op1 = lo[1];
8614 ix86_expand_branch (code3, label);
8615
8616 if (code2 != UNKNOWN)
8617 emit_label (label2);
8618 return;
8619 }
8620
8621 default:
8622 abort ();
8623 }
8624 }
8625
8626 /* Split branch based on floating point condition. */
8627 void
8628 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
8629 rtx target1, rtx target2, rtx tmp)
8630 {
8631 rtx second, bypass;
8632 rtx label = NULL_RTX;
8633 rtx condition;
8634 int bypass_probability = -1, second_probability = -1, probability = -1;
8635 rtx i;
8636
8637 if (target2 != pc_rtx)
8638 {
8639 rtx tmp = target2;
8640 code = reverse_condition_maybe_unordered (code);
8641 target2 = target1;
8642 target1 = tmp;
8643 }
8644
8645 condition = ix86_expand_fp_compare (code, op1, op2,
8646 tmp, &second, &bypass);
8647
8648 if (split_branch_probability >= 0)
8649 {
8650 /* Distribute the probabilities across the jumps.
8651 Assume the BYPASS and SECOND to be always test
8652 for UNORDERED. */
8653 probability = split_branch_probability;
8654
8655 /* Value of 1 is low enough to make no need for probability
8656 to be updated. Later we may run some experiments and see
8657 if unordered values are more frequent in practice. */
8658 if (bypass)
8659 bypass_probability = 1;
8660 if (second)
8661 second_probability = 1;
8662 }
8663 if (bypass != NULL_RTX)
8664 {
8665 label = gen_label_rtx ();
8666 i = emit_jump_insn (gen_rtx_SET
8667 (VOIDmode, pc_rtx,
8668 gen_rtx_IF_THEN_ELSE (VOIDmode,
8669 bypass,
8670 gen_rtx_LABEL_REF (VOIDmode,
8671 label),
8672 pc_rtx)));
8673 if (bypass_probability >= 0)
8674 REG_NOTES (i)
8675 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8676 GEN_INT (bypass_probability),
8677 REG_NOTES (i));
8678 }
8679 i = emit_jump_insn (gen_rtx_SET
8680 (VOIDmode, pc_rtx,
8681 gen_rtx_IF_THEN_ELSE (VOIDmode,
8682 condition, target1, target2)));
8683 if (probability >= 0)
8684 REG_NOTES (i)
8685 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8686 GEN_INT (probability),
8687 REG_NOTES (i));
8688 if (second != NULL_RTX)
8689 {
8690 i = emit_jump_insn (gen_rtx_SET
8691 (VOIDmode, pc_rtx,
8692 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
8693 target2)));
8694 if (second_probability >= 0)
8695 REG_NOTES (i)
8696 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8697 GEN_INT (second_probability),
8698 REG_NOTES (i));
8699 }
8700 if (label != NULL_RTX)
8701 emit_label (label);
8702 }
8703
8704 int
8705 ix86_expand_setcc (enum rtx_code code, rtx dest)
8706 {
8707 rtx ret, tmp, tmpreg, equiv;
8708 rtx second_test, bypass_test;
8709
8710 if (GET_MODE (ix86_compare_op0) == DImode
8711 && !TARGET_64BIT)
8712 return 0; /* FAIL */
8713
8714 if (GET_MODE (dest) != QImode)
8715 abort ();
8716
8717 ret = ix86_expand_compare (code, &second_test, &bypass_test);
8718 PUT_MODE (ret, QImode);
8719
8720 tmp = dest;
8721 tmpreg = dest;
8722
8723 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
8724 if (bypass_test || second_test)
8725 {
8726 rtx test = second_test;
8727 int bypass = 0;
8728 rtx tmp2 = gen_reg_rtx (QImode);
8729 if (bypass_test)
8730 {
8731 if (second_test)
8732 abort ();
8733 test = bypass_test;
8734 bypass = 1;
8735 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
8736 }
8737 PUT_MODE (test, QImode);
8738 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
8739
8740 if (bypass)
8741 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
8742 else
8743 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
8744 }
8745
8746 /* Attach a REG_EQUAL note describing the comparison result. */
8747 equiv = simplify_gen_relational (code, QImode,
8748 GET_MODE (ix86_compare_op0),
8749 ix86_compare_op0, ix86_compare_op1);
8750 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
8751
8752 return 1; /* DONE */
8753 }
8754
8755 /* Expand comparison setting or clearing carry flag. Return true when
8756 successful and set pop for the operation. */
8757 static bool
8758 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
8759 {
8760 enum machine_mode mode =
8761 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
8762
8763 /* Do not handle DImode compares that go trought special path. Also we can't
8764 deal with FP compares yet. This is possible to add. */
8765 if ((mode == DImode && !TARGET_64BIT))
8766 return false;
8767 if (FLOAT_MODE_P (mode))
8768 {
8769 rtx second_test = NULL, bypass_test = NULL;
8770 rtx compare_op, compare_seq;
8771
8772 /* Shortcut: following common codes never translate into carry flag compares. */
8773 if (code == EQ || code == NE || code == UNEQ || code == LTGT
8774 || code == ORDERED || code == UNORDERED)
8775 return false;
8776
8777 /* These comparisons require zero flag; swap operands so they won't. */
8778 if ((code == GT || code == UNLE || code == LE || code == UNGT)
8779 && !TARGET_IEEE_FP)
8780 {
8781 rtx tmp = op0;
8782 op0 = op1;
8783 op1 = tmp;
8784 code = swap_condition (code);
8785 }
8786
8787 /* Try to expand the comparison and verify that we end up with carry flag
8788 based comparison. This is fails to be true only when we decide to expand
8789 comparison using arithmetic that is not too common scenario. */
8790 start_sequence ();
8791 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8792 &second_test, &bypass_test);
8793 compare_seq = get_insns ();
8794 end_sequence ();
8795
8796 if (second_test || bypass_test)
8797 return false;
8798 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
8799 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
8800 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
8801 else
8802 code = GET_CODE (compare_op);
8803 if (code != LTU && code != GEU)
8804 return false;
8805 emit_insn (compare_seq);
8806 *pop = compare_op;
8807 return true;
8808 }
8809 if (!INTEGRAL_MODE_P (mode))
8810 return false;
8811 switch (code)
8812 {
8813 case LTU:
8814 case GEU:
8815 break;
8816
8817 /* Convert a==0 into (unsigned)a<1. */
8818 case EQ:
8819 case NE:
8820 if (op1 != const0_rtx)
8821 return false;
8822 op1 = const1_rtx;
8823 code = (code == EQ ? LTU : GEU);
8824 break;
8825
8826 /* Convert a>b into b<a or a>=b-1. */
8827 case GTU:
8828 case LEU:
8829 if (GET_CODE (op1) == CONST_INT)
8830 {
8831 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
8832 /* Bail out on overflow. We still can swap operands but that
8833 would force loading of the constant into register. */
8834 if (op1 == const0_rtx
8835 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
8836 return false;
8837 code = (code == GTU ? GEU : LTU);
8838 }
8839 else
8840 {
8841 rtx tmp = op1;
8842 op1 = op0;
8843 op0 = tmp;
8844 code = (code == GTU ? LTU : GEU);
8845 }
8846 break;
8847
8848 /* Convert a>=0 into (unsigned)a<0x80000000. */
8849 case LT:
8850 case GE:
8851 if (mode == DImode || op1 != const0_rtx)
8852 return false;
8853 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
8854 code = (code == LT ? GEU : LTU);
8855 break;
8856 case LE:
8857 case GT:
8858 if (mode == DImode || op1 != constm1_rtx)
8859 return false;
8860 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
8861 code = (code == LE ? GEU : LTU);
8862 break;
8863
8864 default:
8865 return false;
8866 }
8867 /* Swapping operands may cause constant to appear as first operand. */
8868 if (!nonimmediate_operand (op0, VOIDmode))
8869 {
8870 if (no_new_pseudos)
8871 return false;
8872 op0 = force_reg (mode, op0);
8873 }
8874 ix86_compare_op0 = op0;
8875 ix86_compare_op1 = op1;
8876 *pop = ix86_expand_compare (code, NULL, NULL);
8877 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
8878 abort ();
8879 return true;
8880 }
8881
8882 int
8883 ix86_expand_int_movcc (rtx operands[])
8884 {
8885 enum rtx_code code = GET_CODE (operands[1]), compare_code;
8886 rtx compare_seq, compare_op;
8887 rtx second_test, bypass_test;
8888 enum machine_mode mode = GET_MODE (operands[0]);
8889 bool sign_bit_compare_p = false;;
8890
8891 start_sequence ();
8892 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
8893 compare_seq = get_insns ();
8894 end_sequence ();
8895
8896 compare_code = GET_CODE (compare_op);
8897
8898 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
8899 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
8900 sign_bit_compare_p = true;
8901
8902 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
8903 HImode insns, we'd be swallowed in word prefix ops. */
8904
8905 if ((mode != HImode || TARGET_FAST_PREFIX)
8906 && (mode != DImode || TARGET_64BIT)
8907 && GET_CODE (operands[2]) == CONST_INT
8908 && GET_CODE (operands[3]) == CONST_INT)
8909 {
8910 rtx out = operands[0];
8911 HOST_WIDE_INT ct = INTVAL (operands[2]);
8912 HOST_WIDE_INT cf = INTVAL (operands[3]);
8913 HOST_WIDE_INT diff;
8914
8915 diff = ct - cf;
8916 /* Sign bit compares are better done using shifts than we do by using
8917 sbb. */
8918 if (sign_bit_compare_p
8919 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
8920 ix86_compare_op1, &compare_op))
8921 {
8922 /* Detect overlap between destination and compare sources. */
8923 rtx tmp = out;
8924
8925 if (!sign_bit_compare_p)
8926 {
8927 bool fpcmp = false;
8928
8929 compare_code = GET_CODE (compare_op);
8930
8931 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
8932 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
8933 {
8934 fpcmp = true;
8935 compare_code = ix86_fp_compare_code_to_integer (compare_code);
8936 }
8937
8938 /* To simplify rest of code, restrict to the GEU case. */
8939 if (compare_code == LTU)
8940 {
8941 HOST_WIDE_INT tmp = ct;
8942 ct = cf;
8943 cf = tmp;
8944 compare_code = reverse_condition (compare_code);
8945 code = reverse_condition (code);
8946 }
8947 else
8948 {
8949 if (fpcmp)
8950 PUT_CODE (compare_op,
8951 reverse_condition_maybe_unordered
8952 (GET_CODE (compare_op)));
8953 else
8954 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
8955 }
8956 diff = ct - cf;
8957
8958 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
8959 || reg_overlap_mentioned_p (out, ix86_compare_op1))
8960 tmp = gen_reg_rtx (mode);
8961
8962 if (mode == DImode)
8963 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
8964 else
8965 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
8966 }
8967 else
8968 {
8969 if (code == GT || code == GE)
8970 code = reverse_condition (code);
8971 else
8972 {
8973 HOST_WIDE_INT tmp = ct;
8974 ct = cf;
8975 cf = tmp;
8976 diff = ct - cf;
8977 }
8978 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
8979 ix86_compare_op1, VOIDmode, 0, -1);
8980 }
8981
8982 if (diff == 1)
8983 {
8984 /*
8985 * cmpl op0,op1
8986 * sbbl dest,dest
8987 * [addl dest, ct]
8988 *
8989 * Size 5 - 8.
8990 */
8991 if (ct)
8992 tmp = expand_simple_binop (mode, PLUS,
8993 tmp, GEN_INT (ct),
8994 copy_rtx (tmp), 1, OPTAB_DIRECT);
8995 }
8996 else if (cf == -1)
8997 {
8998 /*
8999 * cmpl op0,op1
9000 * sbbl dest,dest
9001 * orl $ct, dest
9002 *
9003 * Size 8.
9004 */
9005 tmp = expand_simple_binop (mode, IOR,
9006 tmp, GEN_INT (ct),
9007 copy_rtx (tmp), 1, OPTAB_DIRECT);
9008 }
9009 else if (diff == -1 && ct)
9010 {
9011 /*
9012 * cmpl op0,op1
9013 * sbbl dest,dest
9014 * notl dest
9015 * [addl dest, cf]
9016 *
9017 * Size 8 - 11.
9018 */
9019 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9020 if (cf)
9021 tmp = expand_simple_binop (mode, PLUS,
9022 copy_rtx (tmp), GEN_INT (cf),
9023 copy_rtx (tmp), 1, OPTAB_DIRECT);
9024 }
9025 else
9026 {
9027 /*
9028 * cmpl op0,op1
9029 * sbbl dest,dest
9030 * [notl dest]
9031 * andl cf - ct, dest
9032 * [addl dest, ct]
9033 *
9034 * Size 8 - 11.
9035 */
9036
9037 if (cf == 0)
9038 {
9039 cf = ct;
9040 ct = 0;
9041 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9042 }
9043
9044 tmp = expand_simple_binop (mode, AND,
9045 copy_rtx (tmp),
9046 gen_int_mode (cf - ct, mode),
9047 copy_rtx (tmp), 1, OPTAB_DIRECT);
9048 if (ct)
9049 tmp = expand_simple_binop (mode, PLUS,
9050 copy_rtx (tmp), GEN_INT (ct),
9051 copy_rtx (tmp), 1, OPTAB_DIRECT);
9052 }
9053
9054 if (!rtx_equal_p (tmp, out))
9055 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9056
9057 return 1; /* DONE */
9058 }
9059
9060 if (diff < 0)
9061 {
9062 HOST_WIDE_INT tmp;
9063 tmp = ct, ct = cf, cf = tmp;
9064 diff = -diff;
9065 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9066 {
9067 /* We may be reversing unordered compare to normal compare, that
9068 is not valid in general (we may convert non-trapping condition
9069 to trapping one), however on i386 we currently emit all
9070 comparisons unordered. */
9071 compare_code = reverse_condition_maybe_unordered (compare_code);
9072 code = reverse_condition_maybe_unordered (code);
9073 }
9074 else
9075 {
9076 compare_code = reverse_condition (compare_code);
9077 code = reverse_condition (code);
9078 }
9079 }
9080
9081 compare_code = UNKNOWN;
9082 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9083 && GET_CODE (ix86_compare_op1) == CONST_INT)
9084 {
9085 if (ix86_compare_op1 == const0_rtx
9086 && (code == LT || code == GE))
9087 compare_code = code;
9088 else if (ix86_compare_op1 == constm1_rtx)
9089 {
9090 if (code == LE)
9091 compare_code = LT;
9092 else if (code == GT)
9093 compare_code = GE;
9094 }
9095 }
9096
9097 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9098 if (compare_code != UNKNOWN
9099 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9100 && (cf == -1 || ct == -1))
9101 {
9102 /* If lea code below could be used, only optimize
9103 if it results in a 2 insn sequence. */
9104
9105 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9106 || diff == 3 || diff == 5 || diff == 9)
9107 || (compare_code == LT && ct == -1)
9108 || (compare_code == GE && cf == -1))
9109 {
9110 /*
9111 * notl op1 (if necessary)
9112 * sarl $31, op1
9113 * orl cf, op1
9114 */
9115 if (ct != -1)
9116 {
9117 cf = ct;
9118 ct = -1;
9119 code = reverse_condition (code);
9120 }
9121
9122 out = emit_store_flag (out, code, ix86_compare_op0,
9123 ix86_compare_op1, VOIDmode, 0, -1);
9124
9125 out = expand_simple_binop (mode, IOR,
9126 out, GEN_INT (cf),
9127 out, 1, OPTAB_DIRECT);
9128 if (out != operands[0])
9129 emit_move_insn (operands[0], out);
9130
9131 return 1; /* DONE */
9132 }
9133 }
9134
9135
9136 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9137 || diff == 3 || diff == 5 || diff == 9)
9138 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9139 && (mode != DImode
9140 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9141 {
9142 /*
9143 * xorl dest,dest
9144 * cmpl op1,op2
9145 * setcc dest
9146 * lea cf(dest*(ct-cf)),dest
9147 *
9148 * Size 14.
9149 *
9150 * This also catches the degenerate setcc-only case.
9151 */
9152
9153 rtx tmp;
9154 int nops;
9155
9156 out = emit_store_flag (out, code, ix86_compare_op0,
9157 ix86_compare_op1, VOIDmode, 0, 1);
9158
9159 nops = 0;
9160 /* On x86_64 the lea instruction operates on Pmode, so we need
9161 to get arithmetics done in proper mode to match. */
9162 if (diff == 1)
9163 tmp = copy_rtx (out);
9164 else
9165 {
9166 rtx out1;
9167 out1 = copy_rtx (out);
9168 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9169 nops++;
9170 if (diff & 1)
9171 {
9172 tmp = gen_rtx_PLUS (mode, tmp, out1);
9173 nops++;
9174 }
9175 }
9176 if (cf != 0)
9177 {
9178 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9179 nops++;
9180 }
9181 if (!rtx_equal_p (tmp, out))
9182 {
9183 if (nops == 1)
9184 out = force_operand (tmp, copy_rtx (out));
9185 else
9186 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9187 }
9188 if (!rtx_equal_p (out, operands[0]))
9189 emit_move_insn (operands[0], copy_rtx (out));
9190
9191 return 1; /* DONE */
9192 }
9193
9194 /*
9195 * General case: Jumpful:
9196 * xorl dest,dest cmpl op1, op2
9197 * cmpl op1, op2 movl ct, dest
9198 * setcc dest jcc 1f
9199 * decl dest movl cf, dest
9200 * andl (cf-ct),dest 1:
9201 * addl ct,dest
9202 *
9203 * Size 20. Size 14.
9204 *
9205 * This is reasonably steep, but branch mispredict costs are
9206 * high on modern cpus, so consider failing only if optimizing
9207 * for space.
9208 */
9209
9210 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9211 && BRANCH_COST >= 2)
9212 {
9213 if (cf == 0)
9214 {
9215 cf = ct;
9216 ct = 0;
9217 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9218 /* We may be reversing unordered compare to normal compare,
9219 that is not valid in general (we may convert non-trapping
9220 condition to trapping one), however on i386 we currently
9221 emit all comparisons unordered. */
9222 code = reverse_condition_maybe_unordered (code);
9223 else
9224 {
9225 code = reverse_condition (code);
9226 if (compare_code != UNKNOWN)
9227 compare_code = reverse_condition (compare_code);
9228 }
9229 }
9230
9231 if (compare_code != UNKNOWN)
9232 {
9233 /* notl op1 (if needed)
9234 sarl $31, op1
9235 andl (cf-ct), op1
9236 addl ct, op1
9237
9238 For x < 0 (resp. x <= -1) there will be no notl,
9239 so if possible swap the constants to get rid of the
9240 complement.
9241 True/false will be -1/0 while code below (store flag
9242 followed by decrement) is 0/-1, so the constants need
9243 to be exchanged once more. */
9244
9245 if (compare_code == GE || !cf)
9246 {
9247 code = reverse_condition (code);
9248 compare_code = LT;
9249 }
9250 else
9251 {
9252 HOST_WIDE_INT tmp = cf;
9253 cf = ct;
9254 ct = tmp;
9255 }
9256
9257 out = emit_store_flag (out, code, ix86_compare_op0,
9258 ix86_compare_op1, VOIDmode, 0, -1);
9259 }
9260 else
9261 {
9262 out = emit_store_flag (out, code, ix86_compare_op0,
9263 ix86_compare_op1, VOIDmode, 0, 1);
9264
9265 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
9266 copy_rtx (out), 1, OPTAB_DIRECT);
9267 }
9268
9269 out = expand_simple_binop (mode, AND, copy_rtx (out),
9270 gen_int_mode (cf - ct, mode),
9271 copy_rtx (out), 1, OPTAB_DIRECT);
9272 if (ct)
9273 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
9274 copy_rtx (out), 1, OPTAB_DIRECT);
9275 if (!rtx_equal_p (out, operands[0]))
9276 emit_move_insn (operands[0], copy_rtx (out));
9277
9278 return 1; /* DONE */
9279 }
9280 }
9281
9282 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9283 {
9284 /* Try a few things more with specific constants and a variable. */
9285
9286 optab op;
9287 rtx var, orig_out, out, tmp;
9288
9289 if (BRANCH_COST <= 2)
9290 return 0; /* FAIL */
9291
9292 /* If one of the two operands is an interesting constant, load a
9293 constant with the above and mask it in with a logical operation. */
9294
9295 if (GET_CODE (operands[2]) == CONST_INT)
9296 {
9297 var = operands[3];
9298 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
9299 operands[3] = constm1_rtx, op = and_optab;
9300 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
9301 operands[3] = const0_rtx, op = ior_optab;
9302 else
9303 return 0; /* FAIL */
9304 }
9305 else if (GET_CODE (operands[3]) == CONST_INT)
9306 {
9307 var = operands[2];
9308 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
9309 operands[2] = constm1_rtx, op = and_optab;
9310 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
9311 operands[2] = const0_rtx, op = ior_optab;
9312 else
9313 return 0; /* FAIL */
9314 }
9315 else
9316 return 0; /* FAIL */
9317
9318 orig_out = operands[0];
9319 tmp = gen_reg_rtx (mode);
9320 operands[0] = tmp;
9321
9322 /* Recurse to get the constant loaded. */
9323 if (ix86_expand_int_movcc (operands) == 0)
9324 return 0; /* FAIL */
9325
9326 /* Mask in the interesting variable. */
9327 out = expand_binop (mode, op, var, tmp, orig_out, 0,
9328 OPTAB_WIDEN);
9329 if (!rtx_equal_p (out, orig_out))
9330 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
9331
9332 return 1; /* DONE */
9333 }
9334
9335 /*
9336 * For comparison with above,
9337 *
9338 * movl cf,dest
9339 * movl ct,tmp
9340 * cmpl op1,op2
9341 * cmovcc tmp,dest
9342 *
9343 * Size 15.
9344 */
9345
9346 if (! nonimmediate_operand (operands[2], mode))
9347 operands[2] = force_reg (mode, operands[2]);
9348 if (! nonimmediate_operand (operands[3], mode))
9349 operands[3] = force_reg (mode, operands[3]);
9350
9351 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9352 {
9353 rtx tmp = gen_reg_rtx (mode);
9354 emit_move_insn (tmp, operands[3]);
9355 operands[3] = tmp;
9356 }
9357 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9358 {
9359 rtx tmp = gen_reg_rtx (mode);
9360 emit_move_insn (tmp, operands[2]);
9361 operands[2] = tmp;
9362 }
9363
9364 if (! register_operand (operands[2], VOIDmode)
9365 && (mode == QImode
9366 || ! register_operand (operands[3], VOIDmode)))
9367 operands[2] = force_reg (mode, operands[2]);
9368
9369 if (mode == QImode
9370 && ! register_operand (operands[3], VOIDmode))
9371 operands[3] = force_reg (mode, operands[3]);
9372
9373 emit_insn (compare_seq);
9374 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9375 gen_rtx_IF_THEN_ELSE (mode,
9376 compare_op, operands[2],
9377 operands[3])));
9378 if (bypass_test)
9379 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9380 gen_rtx_IF_THEN_ELSE (mode,
9381 bypass_test,
9382 copy_rtx (operands[3]),
9383 copy_rtx (operands[0]))));
9384 if (second_test)
9385 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9386 gen_rtx_IF_THEN_ELSE (mode,
9387 second_test,
9388 copy_rtx (operands[2]),
9389 copy_rtx (operands[0]))));
9390
9391 return 1; /* DONE */
9392 }
9393
9394 int
9395 ix86_expand_fp_movcc (rtx operands[])
9396 {
9397 enum rtx_code code;
9398 rtx tmp;
9399 rtx compare_op, second_test, bypass_test;
9400
9401 /* For SF/DFmode conditional moves based on comparisons
9402 in same mode, we may want to use SSE min/max instructions. */
9403 if (((TARGET_SSE_MATH && GET_MODE (operands[0]) == SFmode)
9404 || (TARGET_SSE2 && TARGET_SSE_MATH && GET_MODE (operands[0]) == DFmode))
9405 && GET_MODE (ix86_compare_op0) == GET_MODE (operands[0])
9406 /* The SSE comparisons does not support the LTGT/UNEQ pair. */
9407 && (!TARGET_IEEE_FP
9408 || (GET_CODE (operands[1]) != LTGT && GET_CODE (operands[1]) != UNEQ))
9409 /* We may be called from the post-reload splitter. */
9410 && (!REG_P (operands[0])
9411 || SSE_REG_P (operands[0])
9412 || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
9413 {
9414 rtx op0 = ix86_compare_op0, op1 = ix86_compare_op1;
9415 code = GET_CODE (operands[1]);
9416
9417 /* See if we have (cross) match between comparison operands and
9418 conditional move operands. */
9419 if (rtx_equal_p (operands[2], op1))
9420 {
9421 rtx tmp = op0;
9422 op0 = op1;
9423 op1 = tmp;
9424 code = reverse_condition_maybe_unordered (code);
9425 }
9426 if (rtx_equal_p (operands[2], op0) && rtx_equal_p (operands[3], op1))
9427 {
9428 /* Check for min operation. */
9429 if (code == LT || code == UNLE)
9430 {
9431 if (code == UNLE)
9432 {
9433 rtx tmp = op0;
9434 op0 = op1;
9435 op1 = tmp;
9436 }
9437 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9438 if (memory_operand (op0, VOIDmode))
9439 op0 = force_reg (GET_MODE (operands[0]), op0);
9440 if (GET_MODE (operands[0]) == SFmode)
9441 emit_insn (gen_minsf3 (operands[0], op0, op1));
9442 else
9443 emit_insn (gen_mindf3 (operands[0], op0, op1));
9444 return 1;
9445 }
9446 /* Check for max operation. */
9447 if (code == GT || code == UNGE)
9448 {
9449 if (code == UNGE)
9450 {
9451 rtx tmp = op0;
9452 op0 = op1;
9453 op1 = tmp;
9454 }
9455 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9456 if (memory_operand (op0, VOIDmode))
9457 op0 = force_reg (GET_MODE (operands[0]), op0);
9458 if (GET_MODE (operands[0]) == SFmode)
9459 emit_insn (gen_maxsf3 (operands[0], op0, op1));
9460 else
9461 emit_insn (gen_maxdf3 (operands[0], op0, op1));
9462 return 1;
9463 }
9464 }
9465 /* Manage condition to be sse_comparison_operator. In case we are
9466 in non-ieee mode, try to canonicalize the destination operand
9467 to be first in the comparison - this helps reload to avoid extra
9468 moves. */
9469 if (!sse_comparison_operator (operands[1], VOIDmode)
9470 || (rtx_equal_p (operands[0], ix86_compare_op1) && !TARGET_IEEE_FP))
9471 {
9472 rtx tmp = ix86_compare_op0;
9473 ix86_compare_op0 = ix86_compare_op1;
9474 ix86_compare_op1 = tmp;
9475 operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])),
9476 VOIDmode, ix86_compare_op0,
9477 ix86_compare_op1);
9478 }
9479 /* Similarly try to manage result to be first operand of conditional
9480 move. We also don't support the NE comparison on SSE, so try to
9481 avoid it. */
9482 if ((rtx_equal_p (operands[0], operands[3])
9483 && (!TARGET_IEEE_FP || GET_CODE (operands[1]) != EQ))
9484 || (GET_CODE (operands[1]) == NE && TARGET_IEEE_FP))
9485 {
9486 rtx tmp = operands[2];
9487 operands[2] = operands[3];
9488 operands[3] = tmp;
9489 operands[1] = gen_rtx_fmt_ee (reverse_condition_maybe_unordered
9490 (GET_CODE (operands[1])),
9491 VOIDmode, ix86_compare_op0,
9492 ix86_compare_op1);
9493 }
9494 if (GET_MODE (operands[0]) == SFmode)
9495 emit_insn (gen_sse_movsfcc (operands[0], operands[1],
9496 operands[2], operands[3],
9497 ix86_compare_op0, ix86_compare_op1));
9498 else
9499 emit_insn (gen_sse_movdfcc (operands[0], operands[1],
9500 operands[2], operands[3],
9501 ix86_compare_op0, ix86_compare_op1));
9502 return 1;
9503 }
9504
9505 /* The floating point conditional move instructions don't directly
9506 support conditions resulting from a signed integer comparison. */
9507
9508 code = GET_CODE (operands[1]);
9509 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9510
9511 /* The floating point conditional move instructions don't directly
9512 support signed integer comparisons. */
9513
9514 if (!fcmov_comparison_operator (compare_op, VOIDmode))
9515 {
9516 if (second_test != NULL || bypass_test != NULL)
9517 abort ();
9518 tmp = gen_reg_rtx (QImode);
9519 ix86_expand_setcc (code, tmp);
9520 code = NE;
9521 ix86_compare_op0 = tmp;
9522 ix86_compare_op1 = const0_rtx;
9523 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9524 }
9525 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9526 {
9527 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9528 emit_move_insn (tmp, operands[3]);
9529 operands[3] = tmp;
9530 }
9531 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9532 {
9533 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9534 emit_move_insn (tmp, operands[2]);
9535 operands[2] = tmp;
9536 }
9537
9538 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9539 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9540 compare_op,
9541 operands[2],
9542 operands[3])));
9543 if (bypass_test)
9544 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9545 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9546 bypass_test,
9547 operands[3],
9548 operands[0])));
9549 if (second_test)
9550 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9551 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9552 second_test,
9553 operands[2],
9554 operands[0])));
9555
9556 return 1;
9557 }
9558
9559 /* Expand conditional increment or decrement using adb/sbb instructions.
9560 The default case using setcc followed by the conditional move can be
9561 done by generic code. */
9562 int
9563 ix86_expand_int_addcc (rtx operands[])
9564 {
9565 enum rtx_code code = GET_CODE (operands[1]);
9566 rtx compare_op;
9567 rtx val = const0_rtx;
9568 bool fpcmp = false;
9569 enum machine_mode mode = GET_MODE (operands[0]);
9570
9571 if (operands[3] != const1_rtx
9572 && operands[3] != constm1_rtx)
9573 return 0;
9574 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9575 ix86_compare_op1, &compare_op))
9576 return 0;
9577 code = GET_CODE (compare_op);
9578
9579 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9580 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9581 {
9582 fpcmp = true;
9583 code = ix86_fp_compare_code_to_integer (code);
9584 }
9585
9586 if (code != LTU)
9587 {
9588 val = constm1_rtx;
9589 if (fpcmp)
9590 PUT_CODE (compare_op,
9591 reverse_condition_maybe_unordered
9592 (GET_CODE (compare_op)));
9593 else
9594 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9595 }
9596 PUT_MODE (compare_op, mode);
9597
9598 /* Construct either adc or sbb insn. */
9599 if ((code == LTU) == (operands[3] == constm1_rtx))
9600 {
9601 switch (GET_MODE (operands[0]))
9602 {
9603 case QImode:
9604 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
9605 break;
9606 case HImode:
9607 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
9608 break;
9609 case SImode:
9610 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
9611 break;
9612 case DImode:
9613 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9614 break;
9615 default:
9616 abort ();
9617 }
9618 }
9619 else
9620 {
9621 switch (GET_MODE (operands[0]))
9622 {
9623 case QImode:
9624 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
9625 break;
9626 case HImode:
9627 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
9628 break;
9629 case SImode:
9630 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
9631 break;
9632 case DImode:
9633 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9634 break;
9635 default:
9636 abort ();
9637 }
9638 }
9639 return 1; /* DONE */
9640 }
9641
9642
9643 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
9644 works for floating pointer parameters and nonoffsetable memories.
9645 For pushes, it returns just stack offsets; the values will be saved
9646 in the right order. Maximally three parts are generated. */
9647
9648 static int
9649 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
9650 {
9651 int size;
9652
9653 if (!TARGET_64BIT)
9654 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
9655 else
9656 size = (GET_MODE_SIZE (mode) + 4) / 8;
9657
9658 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
9659 abort ();
9660 if (size < 2 || size > 3)
9661 abort ();
9662
9663 /* Optimize constant pool reference to immediates. This is used by fp
9664 moves, that force all constants to memory to allow combining. */
9665 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
9666 {
9667 rtx tmp = maybe_get_pool_constant (operand);
9668 if (tmp)
9669 operand = tmp;
9670 }
9671
9672 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
9673 {
9674 /* The only non-offsetable memories we handle are pushes. */
9675 if (! push_operand (operand, VOIDmode))
9676 abort ();
9677
9678 operand = copy_rtx (operand);
9679 PUT_MODE (operand, Pmode);
9680 parts[0] = parts[1] = parts[2] = operand;
9681 }
9682 else if (!TARGET_64BIT)
9683 {
9684 if (mode == DImode)
9685 split_di (&operand, 1, &parts[0], &parts[1]);
9686 else
9687 {
9688 if (REG_P (operand))
9689 {
9690 if (!reload_completed)
9691 abort ();
9692 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
9693 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
9694 if (size == 3)
9695 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
9696 }
9697 else if (offsettable_memref_p (operand))
9698 {
9699 operand = adjust_address (operand, SImode, 0);
9700 parts[0] = operand;
9701 parts[1] = adjust_address (operand, SImode, 4);
9702 if (size == 3)
9703 parts[2] = adjust_address (operand, SImode, 8);
9704 }
9705 else if (GET_CODE (operand) == CONST_DOUBLE)
9706 {
9707 REAL_VALUE_TYPE r;
9708 long l[4];
9709
9710 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
9711 switch (mode)
9712 {
9713 case XFmode:
9714 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
9715 parts[2] = gen_int_mode (l[2], SImode);
9716 break;
9717 case DFmode:
9718 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
9719 break;
9720 default:
9721 abort ();
9722 }
9723 parts[1] = gen_int_mode (l[1], SImode);
9724 parts[0] = gen_int_mode (l[0], SImode);
9725 }
9726 else
9727 abort ();
9728 }
9729 }
9730 else
9731 {
9732 if (mode == TImode)
9733 split_ti (&operand, 1, &parts[0], &parts[1]);
9734 if (mode == XFmode || mode == TFmode)
9735 {
9736 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
9737 if (REG_P (operand))
9738 {
9739 if (!reload_completed)
9740 abort ();
9741 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
9742 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
9743 }
9744 else if (offsettable_memref_p (operand))
9745 {
9746 operand = adjust_address (operand, DImode, 0);
9747 parts[0] = operand;
9748 parts[1] = adjust_address (operand, upper_mode, 8);
9749 }
9750 else if (GET_CODE (operand) == CONST_DOUBLE)
9751 {
9752 REAL_VALUE_TYPE r;
9753 long l[3];
9754
9755 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
9756 real_to_target (l, &r, mode);
9757 /* Do not use shift by 32 to avoid warning on 32bit systems. */
9758 if (HOST_BITS_PER_WIDE_INT >= 64)
9759 parts[0]
9760 = gen_int_mode
9761 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
9762 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
9763 DImode);
9764 else
9765 parts[0] = immed_double_const (l[0], l[1], DImode);
9766 if (upper_mode == SImode)
9767 parts[1] = gen_int_mode (l[2], SImode);
9768 else if (HOST_BITS_PER_WIDE_INT >= 64)
9769 parts[1]
9770 = gen_int_mode
9771 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
9772 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
9773 DImode);
9774 else
9775 parts[1] = immed_double_const (l[2], l[3], DImode);
9776 }
9777 else
9778 abort ();
9779 }
9780 }
9781
9782 return size;
9783 }
9784
9785 /* Emit insns to perform a move or push of DI, DF, and XF values.
9786 Return false when normal moves are needed; true when all required
9787 insns have been emitted. Operands 2-4 contain the input values
9788 int the correct order; operands 5-7 contain the output values. */
9789
9790 void
9791 ix86_split_long_move (rtx operands[])
9792 {
9793 rtx part[2][3];
9794 int nparts;
9795 int push = 0;
9796 int collisions = 0;
9797 enum machine_mode mode = GET_MODE (operands[0]);
9798
9799 /* The DFmode expanders may ask us to move double.
9800 For 64bit target this is single move. By hiding the fact
9801 here we simplify i386.md splitters. */
9802 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
9803 {
9804 /* Optimize constant pool reference to immediates. This is used by
9805 fp moves, that force all constants to memory to allow combining. */
9806
9807 if (GET_CODE (operands[1]) == MEM
9808 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
9809 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
9810 operands[1] = get_pool_constant (XEXP (operands[1], 0));
9811 if (push_operand (operands[0], VOIDmode))
9812 {
9813 operands[0] = copy_rtx (operands[0]);
9814 PUT_MODE (operands[0], Pmode);
9815 }
9816 else
9817 operands[0] = gen_lowpart (DImode, operands[0]);
9818 operands[1] = gen_lowpart (DImode, operands[1]);
9819 emit_move_insn (operands[0], operands[1]);
9820 return;
9821 }
9822
9823 /* The only non-offsettable memory we handle is push. */
9824 if (push_operand (operands[0], VOIDmode))
9825 push = 1;
9826 else if (GET_CODE (operands[0]) == MEM
9827 && ! offsettable_memref_p (operands[0]))
9828 abort ();
9829
9830 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
9831 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
9832
9833 /* When emitting push, take care for source operands on the stack. */
9834 if (push && GET_CODE (operands[1]) == MEM
9835 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
9836 {
9837 if (nparts == 3)
9838 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
9839 XEXP (part[1][2], 0));
9840 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
9841 XEXP (part[1][1], 0));
9842 }
9843
9844 /* We need to do copy in the right order in case an address register
9845 of the source overlaps the destination. */
9846 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
9847 {
9848 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
9849 collisions++;
9850 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
9851 collisions++;
9852 if (nparts == 3
9853 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
9854 collisions++;
9855
9856 /* Collision in the middle part can be handled by reordering. */
9857 if (collisions == 1 && nparts == 3
9858 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
9859 {
9860 rtx tmp;
9861 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
9862 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
9863 }
9864
9865 /* If there are more collisions, we can't handle it by reordering.
9866 Do an lea to the last part and use only one colliding move. */
9867 else if (collisions > 1)
9868 {
9869 rtx base;
9870
9871 collisions = 1;
9872
9873 base = part[0][nparts - 1];
9874
9875 /* Handle the case when the last part isn't valid for lea.
9876 Happens in 64-bit mode storing the 12-byte XFmode. */
9877 if (GET_MODE (base) != Pmode)
9878 base = gen_rtx_REG (Pmode, REGNO (base));
9879
9880 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
9881 part[1][0] = replace_equiv_address (part[1][0], base);
9882 part[1][1] = replace_equiv_address (part[1][1],
9883 plus_constant (base, UNITS_PER_WORD));
9884 if (nparts == 3)
9885 part[1][2] = replace_equiv_address (part[1][2],
9886 plus_constant (base, 8));
9887 }
9888 }
9889
9890 if (push)
9891 {
9892 if (!TARGET_64BIT)
9893 {
9894 if (nparts == 3)
9895 {
9896 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
9897 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
9898 emit_move_insn (part[0][2], part[1][2]);
9899 }
9900 }
9901 else
9902 {
9903 /* In 64bit mode we don't have 32bit push available. In case this is
9904 register, it is OK - we will just use larger counterpart. We also
9905 retype memory - these comes from attempt to avoid REX prefix on
9906 moving of second half of TFmode value. */
9907 if (GET_MODE (part[1][1]) == SImode)
9908 {
9909 if (GET_CODE (part[1][1]) == MEM)
9910 part[1][1] = adjust_address (part[1][1], DImode, 0);
9911 else if (REG_P (part[1][1]))
9912 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
9913 else
9914 abort ();
9915 if (GET_MODE (part[1][0]) == SImode)
9916 part[1][0] = part[1][1];
9917 }
9918 }
9919 emit_move_insn (part[0][1], part[1][1]);
9920 emit_move_insn (part[0][0], part[1][0]);
9921 return;
9922 }
9923
9924 /* Choose correct order to not overwrite the source before it is copied. */
9925 if ((REG_P (part[0][0])
9926 && REG_P (part[1][1])
9927 && (REGNO (part[0][0]) == REGNO (part[1][1])
9928 || (nparts == 3
9929 && REGNO (part[0][0]) == REGNO (part[1][2]))))
9930 || (collisions > 0
9931 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
9932 {
9933 if (nparts == 3)
9934 {
9935 operands[2] = part[0][2];
9936 operands[3] = part[0][1];
9937 operands[4] = part[0][0];
9938 operands[5] = part[1][2];
9939 operands[6] = part[1][1];
9940 operands[7] = part[1][0];
9941 }
9942 else
9943 {
9944 operands[2] = part[0][1];
9945 operands[3] = part[0][0];
9946 operands[5] = part[1][1];
9947 operands[6] = part[1][0];
9948 }
9949 }
9950 else
9951 {
9952 if (nparts == 3)
9953 {
9954 operands[2] = part[0][0];
9955 operands[3] = part[0][1];
9956 operands[4] = part[0][2];
9957 operands[5] = part[1][0];
9958 operands[6] = part[1][1];
9959 operands[7] = part[1][2];
9960 }
9961 else
9962 {
9963 operands[2] = part[0][0];
9964 operands[3] = part[0][1];
9965 operands[5] = part[1][0];
9966 operands[6] = part[1][1];
9967 }
9968 }
9969
9970 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
9971 if (optimize_size)
9972 {
9973 if (GET_CODE (operands[5]) == CONST_INT
9974 && operands[5] != const0_rtx
9975 && REG_P (operands[2]))
9976 {
9977 if (GET_CODE (operands[6]) == CONST_INT
9978 && INTVAL (operands[6]) == INTVAL (operands[5]))
9979 operands[6] = operands[2];
9980
9981 if (nparts == 3
9982 && GET_CODE (operands[7]) == CONST_INT
9983 && INTVAL (operands[7]) == INTVAL (operands[5]))
9984 operands[7] = operands[2];
9985 }
9986
9987 if (nparts == 3
9988 && GET_CODE (operands[6]) == CONST_INT
9989 && operands[6] != const0_rtx
9990 && REG_P (operands[3])
9991 && GET_CODE (operands[7]) == CONST_INT
9992 && INTVAL (operands[7]) == INTVAL (operands[6]))
9993 operands[7] = operands[3];
9994 }
9995
9996 emit_move_insn (operands[2], operands[5]);
9997 emit_move_insn (operands[3], operands[6]);
9998 if (nparts == 3)
9999 emit_move_insn (operands[4], operands[7]);
10000
10001 return;
10002 }
10003
10004 /* Helper function of ix86_split_ashldi used to generate an SImode
10005 left shift by a constant, either using a single shift or
10006 a sequence of add instructions. */
10007
10008 static void
10009 ix86_expand_ashlsi3_const (rtx operand, int count)
10010 {
10011 if (count == 1)
10012 emit_insn (gen_addsi3 (operand, operand, operand));
10013 else if (!optimize_size
10014 && count * ix86_cost->add <= ix86_cost->shift_const)
10015 {
10016 int i;
10017 for (i=0; i<count; i++)
10018 emit_insn (gen_addsi3 (operand, operand, operand));
10019 }
10020 else
10021 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
10022 }
10023
10024 void
10025 ix86_split_ashldi (rtx *operands, rtx scratch)
10026 {
10027 rtx low[2], high[2];
10028 int count;
10029
10030 if (GET_CODE (operands[2]) == CONST_INT)
10031 {
10032 split_di (operands, 2, low, high);
10033 count = INTVAL (operands[2]) & 63;
10034
10035 if (count >= 32)
10036 {
10037 emit_move_insn (high[0], low[1]);
10038 emit_move_insn (low[0], const0_rtx);
10039
10040 if (count > 32)
10041 ix86_expand_ashlsi3_const (high[0], count - 32);
10042 }
10043 else
10044 {
10045 if (!rtx_equal_p (operands[0], operands[1]))
10046 emit_move_insn (operands[0], operands[1]);
10047 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10048 ix86_expand_ashlsi3_const (low[0], count);
10049 }
10050 return;
10051 }
10052
10053 split_di (operands, 1, low, high);
10054
10055 if (operands[1] == const1_rtx)
10056 {
10057 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10058 can be done with two 32-bit shifts, no branches, no cmoves. */
10059 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
10060 {
10061 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
10062
10063 ix86_expand_clear (low[0]);
10064 ix86_expand_clear (high[0]);
10065 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
10066
10067 d = gen_lowpart (QImode, low[0]);
10068 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10069 s = gen_rtx_EQ (QImode, flags, const0_rtx);
10070 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10071
10072 d = gen_lowpart (QImode, high[0]);
10073 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10074 s = gen_rtx_NE (QImode, flags, const0_rtx);
10075 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10076 }
10077
10078 /* Otherwise, we can get the same results by manually performing
10079 a bit extract operation on bit 5, and then performing the two
10080 shifts. The two methods of getting 0/1 into low/high are exactly
10081 the same size. Avoiding the shift in the bit extract case helps
10082 pentium4 a bit; no one else seems to care much either way. */
10083 else
10084 {
10085 rtx x;
10086
10087 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
10088 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
10089 else
10090 x = gen_lowpart (SImode, operands[2]);
10091 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
10092
10093 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
10094 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
10095 emit_move_insn (low[0], high[0]);
10096 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
10097 }
10098
10099 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10100 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
10101 return;
10102 }
10103
10104 if (operands[1] == constm1_rtx)
10105 {
10106 /* For -1LL << N, we can avoid the shld instruction, because we
10107 know that we're shifting 0...31 ones into a -1. */
10108 emit_move_insn (low[0], constm1_rtx);
10109 if (optimize_size)
10110 emit_move_insn (high[0], low[0]);
10111 else
10112 emit_move_insn (high[0], constm1_rtx);
10113 }
10114 else
10115 {
10116 if (!rtx_equal_p (operands[0], operands[1]))
10117 emit_move_insn (operands[0], operands[1]);
10118
10119 split_di (operands, 1, low, high);
10120 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10121 }
10122
10123 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10124
10125 if (TARGET_CMOVE && scratch)
10126 {
10127 ix86_expand_clear (scratch);
10128 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
10129 }
10130 else
10131 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10132 }
10133
10134 void
10135 ix86_split_ashrdi (rtx *operands, rtx scratch)
10136 {
10137 rtx low[2], high[2];
10138 int count;
10139
10140 if (GET_CODE (operands[2]) == CONST_INT)
10141 {
10142 split_di (operands, 2, low, high);
10143 count = INTVAL (operands[2]) & 63;
10144
10145 if (count == 63)
10146 {
10147 emit_move_insn (high[0], high[1]);
10148 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10149 emit_move_insn (low[0], high[0]);
10150
10151 }
10152 else if (count >= 32)
10153 {
10154 emit_move_insn (low[0], high[1]);
10155 emit_move_insn (high[0], low[0]);
10156 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10157 if (count > 32)
10158 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10159 }
10160 else
10161 {
10162 if (!rtx_equal_p (operands[0], operands[1]))
10163 emit_move_insn (operands[0], operands[1]);
10164 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10165 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10166 }
10167 }
10168 else
10169 {
10170 if (!rtx_equal_p (operands[0], operands[1]))
10171 emit_move_insn (operands[0], operands[1]);
10172
10173 split_di (operands, 1, low, high);
10174
10175 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10176 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10177
10178 if (TARGET_CMOVE && scratch)
10179 {
10180 emit_move_insn (scratch, high[0]);
10181 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10182 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10183 scratch));
10184 }
10185 else
10186 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10187 }
10188 }
10189
10190 void
10191 ix86_split_lshrdi (rtx *operands, rtx scratch)
10192 {
10193 rtx low[2], high[2];
10194 int count;
10195
10196 if (GET_CODE (operands[2]) == CONST_INT)
10197 {
10198 split_di (operands, 2, low, high);
10199 count = INTVAL (operands[2]) & 63;
10200
10201 if (count >= 32)
10202 {
10203 emit_move_insn (low[0], high[1]);
10204 ix86_expand_clear (high[0]);
10205
10206 if (count > 32)
10207 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10208 }
10209 else
10210 {
10211 if (!rtx_equal_p (operands[0], operands[1]))
10212 emit_move_insn (operands[0], operands[1]);
10213 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10214 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10215 }
10216 }
10217 else
10218 {
10219 if (!rtx_equal_p (operands[0], operands[1]))
10220 emit_move_insn (operands[0], operands[1]);
10221
10222 split_di (operands, 1, low, high);
10223
10224 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10225 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10226
10227 /* Heh. By reversing the arguments, we can reuse this pattern. */
10228 if (TARGET_CMOVE && scratch)
10229 {
10230 ix86_expand_clear (scratch);
10231 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10232 scratch));
10233 }
10234 else
10235 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10236 }
10237 }
10238
10239 /* Helper function for the string operations below. Dest VARIABLE whether
10240 it is aligned to VALUE bytes. If true, jump to the label. */
10241 static rtx
10242 ix86_expand_aligntest (rtx variable, int value)
10243 {
10244 rtx label = gen_label_rtx ();
10245 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10246 if (GET_MODE (variable) == DImode)
10247 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10248 else
10249 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10250 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10251 1, label);
10252 return label;
10253 }
10254
10255 /* Adjust COUNTER by the VALUE. */
10256 static void
10257 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10258 {
10259 if (GET_MODE (countreg) == DImode)
10260 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10261 else
10262 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10263 }
10264
10265 /* Zero extend possibly SImode EXP to Pmode register. */
10266 rtx
10267 ix86_zero_extend_to_Pmode (rtx exp)
10268 {
10269 rtx r;
10270 if (GET_MODE (exp) == VOIDmode)
10271 return force_reg (Pmode, exp);
10272 if (GET_MODE (exp) == Pmode)
10273 return copy_to_mode_reg (Pmode, exp);
10274 r = gen_reg_rtx (Pmode);
10275 emit_insn (gen_zero_extendsidi2 (r, exp));
10276 return r;
10277 }
10278
10279 /* Expand string move (memcpy) operation. Use i386 string operations when
10280 profitable. expand_clrmem contains similar code. */
10281 int
10282 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10283 {
10284 rtx srcreg, destreg, countreg, srcexp, destexp;
10285 enum machine_mode counter_mode;
10286 HOST_WIDE_INT align = 0;
10287 unsigned HOST_WIDE_INT count = 0;
10288
10289 if (GET_CODE (align_exp) == CONST_INT)
10290 align = INTVAL (align_exp);
10291
10292 /* Can't use any of this if the user has appropriated esi or edi. */
10293 if (global_regs[4] || global_regs[5])
10294 return 0;
10295
10296 /* This simple hack avoids all inlining code and simplifies code below. */
10297 if (!TARGET_ALIGN_STRINGOPS)
10298 align = 64;
10299
10300 if (GET_CODE (count_exp) == CONST_INT)
10301 {
10302 count = INTVAL (count_exp);
10303 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10304 return 0;
10305 }
10306
10307 /* Figure out proper mode for counter. For 32bits it is always SImode,
10308 for 64bits use SImode when possible, otherwise DImode.
10309 Set count to number of bytes copied when known at compile time. */
10310 if (!TARGET_64BIT
10311 || GET_MODE (count_exp) == SImode
10312 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10313 counter_mode = SImode;
10314 else
10315 counter_mode = DImode;
10316
10317 if (counter_mode != SImode && counter_mode != DImode)
10318 abort ();
10319
10320 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10321 if (destreg != XEXP (dst, 0))
10322 dst = replace_equiv_address_nv (dst, destreg);
10323 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10324 if (srcreg != XEXP (src, 0))
10325 src = replace_equiv_address_nv (src, srcreg);
10326
10327 /* When optimizing for size emit simple rep ; movsb instruction for
10328 counts not divisible by 4. */
10329
10330 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10331 {
10332 emit_insn (gen_cld ());
10333 countreg = ix86_zero_extend_to_Pmode (count_exp);
10334 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10335 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
10336 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
10337 destexp, srcexp));
10338 }
10339
10340 /* For constant aligned (or small unaligned) copies use rep movsl
10341 followed by code copying the rest. For PentiumPro ensure 8 byte
10342 alignment to allow rep movsl acceleration. */
10343
10344 else if (count != 0
10345 && (align >= 8
10346 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10347 || optimize_size || count < (unsigned int) 64))
10348 {
10349 unsigned HOST_WIDE_INT offset = 0;
10350 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10351 rtx srcmem, dstmem;
10352
10353 emit_insn (gen_cld ());
10354 if (count & ~(size - 1))
10355 {
10356 countreg = copy_to_mode_reg (counter_mode,
10357 GEN_INT ((count >> (size == 4 ? 2 : 3))
10358 & (TARGET_64BIT ? -1 : 0x3fffffff)));
10359 countreg = ix86_zero_extend_to_Pmode (countreg);
10360
10361 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10362 GEN_INT (size == 4 ? 2 : 3));
10363 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10364 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10365
10366 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10367 countreg, destexp, srcexp));
10368 offset = count & ~(size - 1);
10369 }
10370 if (size == 8 && (count & 0x04))
10371 {
10372 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
10373 offset);
10374 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
10375 offset);
10376 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10377 offset += 4;
10378 }
10379 if (count & 0x02)
10380 {
10381 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
10382 offset);
10383 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
10384 offset);
10385 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10386 offset += 2;
10387 }
10388 if (count & 0x01)
10389 {
10390 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
10391 offset);
10392 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
10393 offset);
10394 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10395 }
10396 }
10397 /* The generic code based on the glibc implementation:
10398 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10399 allowing accelerated copying there)
10400 - copy the data using rep movsl
10401 - copy the rest. */
10402 else
10403 {
10404 rtx countreg2;
10405 rtx label = NULL;
10406 rtx srcmem, dstmem;
10407 int desired_alignment = (TARGET_PENTIUMPRO
10408 && (count == 0 || count >= (unsigned int) 260)
10409 ? 8 : UNITS_PER_WORD);
10410 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10411 dst = change_address (dst, BLKmode, destreg);
10412 src = change_address (src, BLKmode, srcreg);
10413
10414 /* In case we don't know anything about the alignment, default to
10415 library version, since it is usually equally fast and result in
10416 shorter code.
10417
10418 Also emit call when we know that the count is large and call overhead
10419 will not be important. */
10420 if (!TARGET_INLINE_ALL_STRINGOPS
10421 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10422 return 0;
10423
10424 if (TARGET_SINGLE_STRINGOP)
10425 emit_insn (gen_cld ());
10426
10427 countreg2 = gen_reg_rtx (Pmode);
10428 countreg = copy_to_mode_reg (counter_mode, count_exp);
10429
10430 /* We don't use loops to align destination and to copy parts smaller
10431 than 4 bytes, because gcc is able to optimize such code better (in
10432 the case the destination or the count really is aligned, gcc is often
10433 able to predict the branches) and also it is friendlier to the
10434 hardware branch prediction.
10435
10436 Using loops is beneficial for generic case, because we can
10437 handle small counts using the loops. Many CPUs (such as Athlon)
10438 have large REP prefix setup costs.
10439
10440 This is quite costly. Maybe we can revisit this decision later or
10441 add some customizability to this code. */
10442
10443 if (count == 0 && align < desired_alignment)
10444 {
10445 label = gen_label_rtx ();
10446 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10447 LEU, 0, counter_mode, 1, label);
10448 }
10449 if (align <= 1)
10450 {
10451 rtx label = ix86_expand_aligntest (destreg, 1);
10452 srcmem = change_address (src, QImode, srcreg);
10453 dstmem = change_address (dst, QImode, destreg);
10454 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10455 ix86_adjust_counter (countreg, 1);
10456 emit_label (label);
10457 LABEL_NUSES (label) = 1;
10458 }
10459 if (align <= 2)
10460 {
10461 rtx label = ix86_expand_aligntest (destreg, 2);
10462 srcmem = change_address (src, HImode, srcreg);
10463 dstmem = change_address (dst, HImode, destreg);
10464 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10465 ix86_adjust_counter (countreg, 2);
10466 emit_label (label);
10467 LABEL_NUSES (label) = 1;
10468 }
10469 if (align <= 4 && desired_alignment > 4)
10470 {
10471 rtx label = ix86_expand_aligntest (destreg, 4);
10472 srcmem = change_address (src, SImode, srcreg);
10473 dstmem = change_address (dst, SImode, destreg);
10474 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10475 ix86_adjust_counter (countreg, 4);
10476 emit_label (label);
10477 LABEL_NUSES (label) = 1;
10478 }
10479
10480 if (label && desired_alignment > 4 && !TARGET_64BIT)
10481 {
10482 emit_label (label);
10483 LABEL_NUSES (label) = 1;
10484 label = NULL_RTX;
10485 }
10486 if (!TARGET_SINGLE_STRINGOP)
10487 emit_insn (gen_cld ());
10488 if (TARGET_64BIT)
10489 {
10490 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10491 GEN_INT (3)));
10492 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10493 }
10494 else
10495 {
10496 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10497 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10498 }
10499 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10500 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10501 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10502 countreg2, destexp, srcexp));
10503
10504 if (label)
10505 {
10506 emit_label (label);
10507 LABEL_NUSES (label) = 1;
10508 }
10509 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10510 {
10511 srcmem = change_address (src, SImode, srcreg);
10512 dstmem = change_address (dst, SImode, destreg);
10513 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10514 }
10515 if ((align <= 4 || count == 0) && TARGET_64BIT)
10516 {
10517 rtx label = ix86_expand_aligntest (countreg, 4);
10518 srcmem = change_address (src, SImode, srcreg);
10519 dstmem = change_address (dst, SImode, destreg);
10520 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10521 emit_label (label);
10522 LABEL_NUSES (label) = 1;
10523 }
10524 if (align > 2 && count != 0 && (count & 2))
10525 {
10526 srcmem = change_address (src, HImode, srcreg);
10527 dstmem = change_address (dst, HImode, destreg);
10528 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10529 }
10530 if (align <= 2 || count == 0)
10531 {
10532 rtx label = ix86_expand_aligntest (countreg, 2);
10533 srcmem = change_address (src, HImode, srcreg);
10534 dstmem = change_address (dst, HImode, destreg);
10535 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10536 emit_label (label);
10537 LABEL_NUSES (label) = 1;
10538 }
10539 if (align > 1 && count != 0 && (count & 1))
10540 {
10541 srcmem = change_address (src, QImode, srcreg);
10542 dstmem = change_address (dst, QImode, destreg);
10543 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10544 }
10545 if (align <= 1 || count == 0)
10546 {
10547 rtx label = ix86_expand_aligntest (countreg, 1);
10548 srcmem = change_address (src, QImode, srcreg);
10549 dstmem = change_address (dst, QImode, destreg);
10550 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10551 emit_label (label);
10552 LABEL_NUSES (label) = 1;
10553 }
10554 }
10555
10556 return 1;
10557 }
10558
10559 /* Expand string clear operation (bzero). Use i386 string operations when
10560 profitable. expand_movmem contains similar code. */
10561 int
10562 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
10563 {
10564 rtx destreg, zeroreg, countreg, destexp;
10565 enum machine_mode counter_mode;
10566 HOST_WIDE_INT align = 0;
10567 unsigned HOST_WIDE_INT count = 0;
10568
10569 if (GET_CODE (align_exp) == CONST_INT)
10570 align = INTVAL (align_exp);
10571
10572 /* Can't use any of this if the user has appropriated esi. */
10573 if (global_regs[4])
10574 return 0;
10575
10576 /* This simple hack avoids all inlining code and simplifies code below. */
10577 if (!TARGET_ALIGN_STRINGOPS)
10578 align = 32;
10579
10580 if (GET_CODE (count_exp) == CONST_INT)
10581 {
10582 count = INTVAL (count_exp);
10583 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10584 return 0;
10585 }
10586 /* Figure out proper mode for counter. For 32bits it is always SImode,
10587 for 64bits use SImode when possible, otherwise DImode.
10588 Set count to number of bytes copied when known at compile time. */
10589 if (!TARGET_64BIT
10590 || GET_MODE (count_exp) == SImode
10591 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10592 counter_mode = SImode;
10593 else
10594 counter_mode = DImode;
10595
10596 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10597 if (destreg != XEXP (dst, 0))
10598 dst = replace_equiv_address_nv (dst, destreg);
10599
10600
10601 /* When optimizing for size emit simple rep ; movsb instruction for
10602 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10603 sequence is 7 bytes long, so if optimizing for size and count is
10604 small enough that some stosl, stosw and stosb instructions without
10605 rep are shorter, fall back into the next if. */
10606
10607 if ((!optimize || optimize_size)
10608 && (count == 0
10609 || ((count & 0x03)
10610 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
10611 {
10612 emit_insn (gen_cld ());
10613
10614 countreg = ix86_zero_extend_to_Pmode (count_exp);
10615 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
10616 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10617 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
10618 }
10619 else if (count != 0
10620 && (align >= 8
10621 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10622 || optimize_size || count < (unsigned int) 64))
10623 {
10624 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10625 unsigned HOST_WIDE_INT offset = 0;
10626
10627 emit_insn (gen_cld ());
10628
10629 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
10630 if (count & ~(size - 1))
10631 {
10632 unsigned HOST_WIDE_INT repcount;
10633 unsigned int max_nonrep;
10634
10635 repcount = count >> (size == 4 ? 2 : 3);
10636 if (!TARGET_64BIT)
10637 repcount &= 0x3fffffff;
10638
10639 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
10640 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
10641 bytes. In both cases the latter seems to be faster for small
10642 values of N. */
10643 max_nonrep = size == 4 ? 7 : 4;
10644 if (!optimize_size)
10645 switch (ix86_tune)
10646 {
10647 case PROCESSOR_PENTIUM4:
10648 case PROCESSOR_NOCONA:
10649 max_nonrep = 3;
10650 break;
10651 default:
10652 break;
10653 }
10654
10655 if (repcount <= max_nonrep)
10656 while (repcount-- > 0)
10657 {
10658 rtx mem = adjust_automodify_address_nv (dst,
10659 GET_MODE (zeroreg),
10660 destreg, offset);
10661 emit_insn (gen_strset (destreg, mem, zeroreg));
10662 offset += size;
10663 }
10664 else
10665 {
10666 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
10667 countreg = ix86_zero_extend_to_Pmode (countreg);
10668 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10669 GEN_INT (size == 4 ? 2 : 3));
10670 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10671 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
10672 destexp));
10673 offset = count & ~(size - 1);
10674 }
10675 }
10676 if (size == 8 && (count & 0x04))
10677 {
10678 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
10679 offset);
10680 emit_insn (gen_strset (destreg, mem,
10681 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10682 offset += 4;
10683 }
10684 if (count & 0x02)
10685 {
10686 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
10687 offset);
10688 emit_insn (gen_strset (destreg, mem,
10689 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10690 offset += 2;
10691 }
10692 if (count & 0x01)
10693 {
10694 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
10695 offset);
10696 emit_insn (gen_strset (destreg, mem,
10697 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10698 }
10699 }
10700 else
10701 {
10702 rtx countreg2;
10703 rtx label = NULL;
10704 /* Compute desired alignment of the string operation. */
10705 int desired_alignment = (TARGET_PENTIUMPRO
10706 && (count == 0 || count >= (unsigned int) 260)
10707 ? 8 : UNITS_PER_WORD);
10708
10709 /* In case we don't know anything about the alignment, default to
10710 library version, since it is usually equally fast and result in
10711 shorter code.
10712
10713 Also emit call when we know that the count is large and call overhead
10714 will not be important. */
10715 if (!TARGET_INLINE_ALL_STRINGOPS
10716 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10717 return 0;
10718
10719 if (TARGET_SINGLE_STRINGOP)
10720 emit_insn (gen_cld ());
10721
10722 countreg2 = gen_reg_rtx (Pmode);
10723 countreg = copy_to_mode_reg (counter_mode, count_exp);
10724 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
10725 /* Get rid of MEM_OFFSET, it won't be accurate. */
10726 dst = change_address (dst, BLKmode, destreg);
10727
10728 if (count == 0 && align < desired_alignment)
10729 {
10730 label = gen_label_rtx ();
10731 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10732 LEU, 0, counter_mode, 1, label);
10733 }
10734 if (align <= 1)
10735 {
10736 rtx label = ix86_expand_aligntest (destreg, 1);
10737 emit_insn (gen_strset (destreg, dst,
10738 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10739 ix86_adjust_counter (countreg, 1);
10740 emit_label (label);
10741 LABEL_NUSES (label) = 1;
10742 }
10743 if (align <= 2)
10744 {
10745 rtx label = ix86_expand_aligntest (destreg, 2);
10746 emit_insn (gen_strset (destreg, dst,
10747 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10748 ix86_adjust_counter (countreg, 2);
10749 emit_label (label);
10750 LABEL_NUSES (label) = 1;
10751 }
10752 if (align <= 4 && desired_alignment > 4)
10753 {
10754 rtx label = ix86_expand_aligntest (destreg, 4);
10755 emit_insn (gen_strset (destreg, dst,
10756 (TARGET_64BIT
10757 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
10758 : zeroreg)));
10759 ix86_adjust_counter (countreg, 4);
10760 emit_label (label);
10761 LABEL_NUSES (label) = 1;
10762 }
10763
10764 if (label && desired_alignment > 4 && !TARGET_64BIT)
10765 {
10766 emit_label (label);
10767 LABEL_NUSES (label) = 1;
10768 label = NULL_RTX;
10769 }
10770
10771 if (!TARGET_SINGLE_STRINGOP)
10772 emit_insn (gen_cld ());
10773 if (TARGET_64BIT)
10774 {
10775 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10776 GEN_INT (3)));
10777 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10778 }
10779 else
10780 {
10781 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10782 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10783 }
10784 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10785 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
10786
10787 if (label)
10788 {
10789 emit_label (label);
10790 LABEL_NUSES (label) = 1;
10791 }
10792
10793 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10794 emit_insn (gen_strset (destreg, dst,
10795 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10796 if (TARGET_64BIT && (align <= 4 || count == 0))
10797 {
10798 rtx label = ix86_expand_aligntest (countreg, 4);
10799 emit_insn (gen_strset (destreg, dst,
10800 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10801 emit_label (label);
10802 LABEL_NUSES (label) = 1;
10803 }
10804 if (align > 2 && count != 0 && (count & 2))
10805 emit_insn (gen_strset (destreg, dst,
10806 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10807 if (align <= 2 || count == 0)
10808 {
10809 rtx label = ix86_expand_aligntest (countreg, 2);
10810 emit_insn (gen_strset (destreg, dst,
10811 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10812 emit_label (label);
10813 LABEL_NUSES (label) = 1;
10814 }
10815 if (align > 1 && count != 0 && (count & 1))
10816 emit_insn (gen_strset (destreg, dst,
10817 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10818 if (align <= 1 || count == 0)
10819 {
10820 rtx label = ix86_expand_aligntest (countreg, 1);
10821 emit_insn (gen_strset (destreg, dst,
10822 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10823 emit_label (label);
10824 LABEL_NUSES (label) = 1;
10825 }
10826 }
10827 return 1;
10828 }
10829
10830 /* Expand strlen. */
10831 int
10832 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
10833 {
10834 rtx addr, scratch1, scratch2, scratch3, scratch4;
10835
10836 /* The generic case of strlen expander is long. Avoid it's
10837 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
10838
10839 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
10840 && !TARGET_INLINE_ALL_STRINGOPS
10841 && !optimize_size
10842 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
10843 return 0;
10844
10845 addr = force_reg (Pmode, XEXP (src, 0));
10846 scratch1 = gen_reg_rtx (Pmode);
10847
10848 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
10849 && !optimize_size)
10850 {
10851 /* Well it seems that some optimizer does not combine a call like
10852 foo(strlen(bar), strlen(bar));
10853 when the move and the subtraction is done here. It does calculate
10854 the length just once when these instructions are done inside of
10855 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
10856 often used and I use one fewer register for the lifetime of
10857 output_strlen_unroll() this is better. */
10858
10859 emit_move_insn (out, addr);
10860
10861 ix86_expand_strlensi_unroll_1 (out, src, align);
10862
10863 /* strlensi_unroll_1 returns the address of the zero at the end of
10864 the string, like memchr(), so compute the length by subtracting
10865 the start address. */
10866 if (TARGET_64BIT)
10867 emit_insn (gen_subdi3 (out, out, addr));
10868 else
10869 emit_insn (gen_subsi3 (out, out, addr));
10870 }
10871 else
10872 {
10873 rtx unspec;
10874 scratch2 = gen_reg_rtx (Pmode);
10875 scratch3 = gen_reg_rtx (Pmode);
10876 scratch4 = force_reg (Pmode, constm1_rtx);
10877
10878 emit_move_insn (scratch3, addr);
10879 eoschar = force_reg (QImode, eoschar);
10880
10881 emit_insn (gen_cld ());
10882 src = replace_equiv_address_nv (src, scratch3);
10883
10884 /* If .md starts supporting :P, this can be done in .md. */
10885 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
10886 scratch4), UNSPEC_SCAS);
10887 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
10888 if (TARGET_64BIT)
10889 {
10890 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
10891 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
10892 }
10893 else
10894 {
10895 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
10896 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
10897 }
10898 }
10899 return 1;
10900 }
10901
10902 /* Expand the appropriate insns for doing strlen if not just doing
10903 repnz; scasb
10904
10905 out = result, initialized with the start address
10906 align_rtx = alignment of the address.
10907 scratch = scratch register, initialized with the startaddress when
10908 not aligned, otherwise undefined
10909
10910 This is just the body. It needs the initializations mentioned above and
10911 some address computing at the end. These things are done in i386.md. */
10912
10913 static void
10914 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
10915 {
10916 int align;
10917 rtx tmp;
10918 rtx align_2_label = NULL_RTX;
10919 rtx align_3_label = NULL_RTX;
10920 rtx align_4_label = gen_label_rtx ();
10921 rtx end_0_label = gen_label_rtx ();
10922 rtx mem;
10923 rtx tmpreg = gen_reg_rtx (SImode);
10924 rtx scratch = gen_reg_rtx (SImode);
10925 rtx cmp;
10926
10927 align = 0;
10928 if (GET_CODE (align_rtx) == CONST_INT)
10929 align = INTVAL (align_rtx);
10930
10931 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
10932
10933 /* Is there a known alignment and is it less than 4? */
10934 if (align < 4)
10935 {
10936 rtx scratch1 = gen_reg_rtx (Pmode);
10937 emit_move_insn (scratch1, out);
10938 /* Is there a known alignment and is it not 2? */
10939 if (align != 2)
10940 {
10941 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
10942 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
10943
10944 /* Leave just the 3 lower bits. */
10945 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
10946 NULL_RTX, 0, OPTAB_WIDEN);
10947
10948 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
10949 Pmode, 1, align_4_label);
10950 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
10951 Pmode, 1, align_2_label);
10952 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
10953 Pmode, 1, align_3_label);
10954 }
10955 else
10956 {
10957 /* Since the alignment is 2, we have to check 2 or 0 bytes;
10958 check if is aligned to 4 - byte. */
10959
10960 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
10961 NULL_RTX, 0, OPTAB_WIDEN);
10962
10963 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
10964 Pmode, 1, align_4_label);
10965 }
10966
10967 mem = change_address (src, QImode, out);
10968
10969 /* Now compare the bytes. */
10970
10971 /* Compare the first n unaligned byte on a byte per byte basis. */
10972 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
10973 QImode, 1, end_0_label);
10974
10975 /* Increment the address. */
10976 if (TARGET_64BIT)
10977 emit_insn (gen_adddi3 (out, out, const1_rtx));
10978 else
10979 emit_insn (gen_addsi3 (out, out, const1_rtx));
10980
10981 /* Not needed with an alignment of 2 */
10982 if (align != 2)
10983 {
10984 emit_label (align_2_label);
10985
10986 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
10987 end_0_label);
10988
10989 if (TARGET_64BIT)
10990 emit_insn (gen_adddi3 (out, out, const1_rtx));
10991 else
10992 emit_insn (gen_addsi3 (out, out, const1_rtx));
10993
10994 emit_label (align_3_label);
10995 }
10996
10997 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
10998 end_0_label);
10999
11000 if (TARGET_64BIT)
11001 emit_insn (gen_adddi3 (out, out, const1_rtx));
11002 else
11003 emit_insn (gen_addsi3 (out, out, const1_rtx));
11004 }
11005
11006 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11007 align this loop. It gives only huge programs, but does not help to
11008 speed up. */
11009 emit_label (align_4_label);
11010
11011 mem = change_address (src, SImode, out);
11012 emit_move_insn (scratch, mem);
11013 if (TARGET_64BIT)
11014 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
11015 else
11016 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
11017
11018 /* This formula yields a nonzero result iff one of the bytes is zero.
11019 This saves three branches inside loop and many cycles. */
11020
11021 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
11022 emit_insn (gen_one_cmplsi2 (scratch, scratch));
11023 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
11024 emit_insn (gen_andsi3 (tmpreg, tmpreg,
11025 gen_int_mode (0x80808080, SImode)));
11026 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
11027 align_4_label);
11028
11029 if (TARGET_CMOVE)
11030 {
11031 rtx reg = gen_reg_rtx (SImode);
11032 rtx reg2 = gen_reg_rtx (Pmode);
11033 emit_move_insn (reg, tmpreg);
11034 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11035
11036 /* If zero is not in the first two bytes, move two bytes forward. */
11037 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11038 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11039 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11040 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11041 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11042 reg,
11043 tmpreg)));
11044 /* Emit lea manually to avoid clobbering of flags. */
11045 emit_insn (gen_rtx_SET (SImode, reg2,
11046 gen_rtx_PLUS (Pmode, out, const2_rtx)));
11047
11048 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11049 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11050 emit_insn (gen_rtx_SET (VOIDmode, out,
11051 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11052 reg2,
11053 out)));
11054
11055 }
11056 else
11057 {
11058 rtx end_2_label = gen_label_rtx ();
11059 /* Is zero in the first two bytes? */
11060
11061 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11062 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11063 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11064 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11065 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11066 pc_rtx);
11067 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11068 JUMP_LABEL (tmp) = end_2_label;
11069
11070 /* Not in the first two. Move two bytes forward. */
11071 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11072 if (TARGET_64BIT)
11073 emit_insn (gen_adddi3 (out, out, const2_rtx));
11074 else
11075 emit_insn (gen_addsi3 (out, out, const2_rtx));
11076
11077 emit_label (end_2_label);
11078
11079 }
11080
11081 /* Avoid branch in fixing the byte. */
11082 tmpreg = gen_lowpart (QImode, tmpreg);
11083 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11084 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11085 if (TARGET_64BIT)
11086 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11087 else
11088 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11089
11090 emit_label (end_0_label);
11091 }
11092
11093 void
11094 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11095 rtx callarg2 ATTRIBUTE_UNUSED,
11096 rtx pop, int sibcall)
11097 {
11098 rtx use = NULL, call;
11099
11100 if (pop == const0_rtx)
11101 pop = NULL;
11102 if (TARGET_64BIT && pop)
11103 abort ();
11104
11105 #if TARGET_MACHO
11106 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11107 fnaddr = machopic_indirect_call_target (fnaddr);
11108 #else
11109 /* Static functions and indirect calls don't need the pic register. */
11110 if (! TARGET_64BIT && flag_pic
11111 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11112 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11113 use_reg (&use, pic_offset_table_rtx);
11114
11115 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11116 {
11117 rtx al = gen_rtx_REG (QImode, 0);
11118 emit_move_insn (al, callarg2);
11119 use_reg (&use, al);
11120 }
11121 #endif /* TARGET_MACHO */
11122
11123 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11124 {
11125 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11126 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11127 }
11128 if (sibcall && TARGET_64BIT
11129 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11130 {
11131 rtx addr;
11132 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11133 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11134 emit_move_insn (fnaddr, addr);
11135 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11136 }
11137
11138 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11139 if (retval)
11140 call = gen_rtx_SET (VOIDmode, retval, call);
11141 if (pop)
11142 {
11143 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11144 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11145 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11146 }
11147
11148 call = emit_call_insn (call);
11149 if (use)
11150 CALL_INSN_FUNCTION_USAGE (call) = use;
11151 }
11152
11153 \f
11154 /* Clear stack slot assignments remembered from previous functions.
11155 This is called from INIT_EXPANDERS once before RTL is emitted for each
11156 function. */
11157
11158 static struct machine_function *
11159 ix86_init_machine_status (void)
11160 {
11161 struct machine_function *f;
11162
11163 f = ggc_alloc_cleared (sizeof (struct machine_function));
11164 f->use_fast_prologue_epilogue_nregs = -1;
11165
11166 return f;
11167 }
11168
11169 /* Return a MEM corresponding to a stack slot with mode MODE.
11170 Allocate a new slot if necessary.
11171
11172 The RTL for a function can have several slots available: N is
11173 which slot to use. */
11174
11175 rtx
11176 assign_386_stack_local (enum machine_mode mode, int n)
11177 {
11178 struct stack_local_entry *s;
11179
11180 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11181 abort ();
11182
11183 for (s = ix86_stack_locals; s; s = s->next)
11184 if (s->mode == mode && s->n == n)
11185 return s->rtl;
11186
11187 s = (struct stack_local_entry *)
11188 ggc_alloc (sizeof (struct stack_local_entry));
11189 s->n = n;
11190 s->mode = mode;
11191 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11192
11193 s->next = ix86_stack_locals;
11194 ix86_stack_locals = s;
11195 return s->rtl;
11196 }
11197
11198 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11199
11200 static GTY(()) rtx ix86_tls_symbol;
11201 rtx
11202 ix86_tls_get_addr (void)
11203 {
11204
11205 if (!ix86_tls_symbol)
11206 {
11207 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11208 (TARGET_GNU_TLS && !TARGET_64BIT)
11209 ? "___tls_get_addr"
11210 : "__tls_get_addr");
11211 }
11212
11213 return ix86_tls_symbol;
11214 }
11215 \f
11216 /* Calculate the length of the memory address in the instruction
11217 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11218
11219 int
11220 memory_address_length (rtx addr)
11221 {
11222 struct ix86_address parts;
11223 rtx base, index, disp;
11224 int len;
11225
11226 if (GET_CODE (addr) == PRE_DEC
11227 || GET_CODE (addr) == POST_INC
11228 || GET_CODE (addr) == PRE_MODIFY
11229 || GET_CODE (addr) == POST_MODIFY)
11230 return 0;
11231
11232 if (! ix86_decompose_address (addr, &parts))
11233 abort ();
11234
11235 base = parts.base;
11236 index = parts.index;
11237 disp = parts.disp;
11238 len = 0;
11239
11240 /* Rule of thumb:
11241 - esp as the base always wants an index,
11242 - ebp as the base always wants a displacement. */
11243
11244 /* Register Indirect. */
11245 if (base && !index && !disp)
11246 {
11247 /* esp (for its index) and ebp (for its displacement) need
11248 the two-byte modrm form. */
11249 if (addr == stack_pointer_rtx
11250 || addr == arg_pointer_rtx
11251 || addr == frame_pointer_rtx
11252 || addr == hard_frame_pointer_rtx)
11253 len = 1;
11254 }
11255
11256 /* Direct Addressing. */
11257 else if (disp && !base && !index)
11258 len = 4;
11259
11260 else
11261 {
11262 /* Find the length of the displacement constant. */
11263 if (disp)
11264 {
11265 if (GET_CODE (disp) == CONST_INT
11266 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11267 && base)
11268 len = 1;
11269 else
11270 len = 4;
11271 }
11272 /* ebp always wants a displacement. */
11273 else if (base == hard_frame_pointer_rtx)
11274 len = 1;
11275
11276 /* An index requires the two-byte modrm form.... */
11277 if (index
11278 /* ...like esp, which always wants an index. */
11279 || base == stack_pointer_rtx
11280 || base == arg_pointer_rtx
11281 || base == frame_pointer_rtx)
11282 len += 1;
11283 }
11284
11285 return len;
11286 }
11287
11288 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11289 is set, expect that insn have 8bit immediate alternative. */
11290 int
11291 ix86_attr_length_immediate_default (rtx insn, int shortform)
11292 {
11293 int len = 0;
11294 int i;
11295 extract_insn_cached (insn);
11296 for (i = recog_data.n_operands - 1; i >= 0; --i)
11297 if (CONSTANT_P (recog_data.operand[i]))
11298 {
11299 if (len)
11300 abort ();
11301 if (shortform
11302 && GET_CODE (recog_data.operand[i]) == CONST_INT
11303 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11304 len = 1;
11305 else
11306 {
11307 switch (get_attr_mode (insn))
11308 {
11309 case MODE_QI:
11310 len+=1;
11311 break;
11312 case MODE_HI:
11313 len+=2;
11314 break;
11315 case MODE_SI:
11316 len+=4;
11317 break;
11318 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11319 case MODE_DI:
11320 len+=4;
11321 break;
11322 default:
11323 fatal_insn ("unknown insn mode", insn);
11324 }
11325 }
11326 }
11327 return len;
11328 }
11329 /* Compute default value for "length_address" attribute. */
11330 int
11331 ix86_attr_length_address_default (rtx insn)
11332 {
11333 int i;
11334
11335 if (get_attr_type (insn) == TYPE_LEA)
11336 {
11337 rtx set = PATTERN (insn);
11338 if (GET_CODE (set) == SET)
11339 ;
11340 else if (GET_CODE (set) == PARALLEL
11341 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11342 set = XVECEXP (set, 0, 0);
11343 else
11344 {
11345 #ifdef ENABLE_CHECKING
11346 abort ();
11347 #endif
11348 return 0;
11349 }
11350
11351 return memory_address_length (SET_SRC (set));
11352 }
11353
11354 extract_insn_cached (insn);
11355 for (i = recog_data.n_operands - 1; i >= 0; --i)
11356 if (GET_CODE (recog_data.operand[i]) == MEM)
11357 {
11358 return memory_address_length (XEXP (recog_data.operand[i], 0));
11359 break;
11360 }
11361 return 0;
11362 }
11363 \f
11364 /* Return the maximum number of instructions a cpu can issue. */
11365
11366 static int
11367 ix86_issue_rate (void)
11368 {
11369 switch (ix86_tune)
11370 {
11371 case PROCESSOR_PENTIUM:
11372 case PROCESSOR_K6:
11373 return 2;
11374
11375 case PROCESSOR_PENTIUMPRO:
11376 case PROCESSOR_PENTIUM4:
11377 case PROCESSOR_ATHLON:
11378 case PROCESSOR_K8:
11379 case PROCESSOR_NOCONA:
11380 return 3;
11381
11382 default:
11383 return 1;
11384 }
11385 }
11386
11387 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11388 by DEP_INSN and nothing set by DEP_INSN. */
11389
11390 static int
11391 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11392 {
11393 rtx set, set2;
11394
11395 /* Simplify the test for uninteresting insns. */
11396 if (insn_type != TYPE_SETCC
11397 && insn_type != TYPE_ICMOV
11398 && insn_type != TYPE_FCMOV
11399 && insn_type != TYPE_IBR)
11400 return 0;
11401
11402 if ((set = single_set (dep_insn)) != 0)
11403 {
11404 set = SET_DEST (set);
11405 set2 = NULL_RTX;
11406 }
11407 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11408 && XVECLEN (PATTERN (dep_insn), 0) == 2
11409 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11410 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11411 {
11412 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11413 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11414 }
11415 else
11416 return 0;
11417
11418 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11419 return 0;
11420
11421 /* This test is true if the dependent insn reads the flags but
11422 not any other potentially set register. */
11423 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11424 return 0;
11425
11426 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11427 return 0;
11428
11429 return 1;
11430 }
11431
11432 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11433 address with operands set by DEP_INSN. */
11434
11435 static int
11436 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11437 {
11438 rtx addr;
11439
11440 if (insn_type == TYPE_LEA
11441 && TARGET_PENTIUM)
11442 {
11443 addr = PATTERN (insn);
11444 if (GET_CODE (addr) == SET)
11445 ;
11446 else if (GET_CODE (addr) == PARALLEL
11447 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
11448 addr = XVECEXP (addr, 0, 0);
11449 else
11450 abort ();
11451 addr = SET_SRC (addr);
11452 }
11453 else
11454 {
11455 int i;
11456 extract_insn_cached (insn);
11457 for (i = recog_data.n_operands - 1; i >= 0; --i)
11458 if (GET_CODE (recog_data.operand[i]) == MEM)
11459 {
11460 addr = XEXP (recog_data.operand[i], 0);
11461 goto found;
11462 }
11463 return 0;
11464 found:;
11465 }
11466
11467 return modified_in_p (addr, dep_insn);
11468 }
11469
11470 static int
11471 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
11472 {
11473 enum attr_type insn_type, dep_insn_type;
11474 enum attr_memory memory;
11475 rtx set, set2;
11476 int dep_insn_code_number;
11477
11478 /* Anti and output dependencies have zero cost on all CPUs. */
11479 if (REG_NOTE_KIND (link) != 0)
11480 return 0;
11481
11482 dep_insn_code_number = recog_memoized (dep_insn);
11483
11484 /* If we can't recognize the insns, we can't really do anything. */
11485 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
11486 return cost;
11487
11488 insn_type = get_attr_type (insn);
11489 dep_insn_type = get_attr_type (dep_insn);
11490
11491 switch (ix86_tune)
11492 {
11493 case PROCESSOR_PENTIUM:
11494 /* Address Generation Interlock adds a cycle of latency. */
11495 if (ix86_agi_dependant (insn, dep_insn, insn_type))
11496 cost += 1;
11497
11498 /* ??? Compares pair with jump/setcc. */
11499 if (ix86_flags_dependant (insn, dep_insn, insn_type))
11500 cost = 0;
11501
11502 /* Floating point stores require value to be ready one cycle earlier. */
11503 if (insn_type == TYPE_FMOV
11504 && get_attr_memory (insn) == MEMORY_STORE
11505 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11506 cost += 1;
11507 break;
11508
11509 case PROCESSOR_PENTIUMPRO:
11510 memory = get_attr_memory (insn);
11511
11512 /* INT->FP conversion is expensive. */
11513 if (get_attr_fp_int_src (dep_insn))
11514 cost += 5;
11515
11516 /* There is one cycle extra latency between an FP op and a store. */
11517 if (insn_type == TYPE_FMOV
11518 && (set = single_set (dep_insn)) != NULL_RTX
11519 && (set2 = single_set (insn)) != NULL_RTX
11520 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
11521 && GET_CODE (SET_DEST (set2)) == MEM)
11522 cost += 1;
11523
11524 /* Show ability of reorder buffer to hide latency of load by executing
11525 in parallel with previous instruction in case
11526 previous instruction is not needed to compute the address. */
11527 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11528 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11529 {
11530 /* Claim moves to take one cycle, as core can issue one load
11531 at time and the next load can start cycle later. */
11532 if (dep_insn_type == TYPE_IMOV
11533 || dep_insn_type == TYPE_FMOV)
11534 cost = 1;
11535 else if (cost > 1)
11536 cost--;
11537 }
11538 break;
11539
11540 case PROCESSOR_K6:
11541 memory = get_attr_memory (insn);
11542
11543 /* The esp dependency is resolved before the instruction is really
11544 finished. */
11545 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
11546 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
11547 return 1;
11548
11549 /* INT->FP conversion is expensive. */
11550 if (get_attr_fp_int_src (dep_insn))
11551 cost += 5;
11552
11553 /* Show ability of reorder buffer to hide latency of load by executing
11554 in parallel with previous instruction in case
11555 previous instruction is not needed to compute the address. */
11556 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11557 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11558 {
11559 /* Claim moves to take one cycle, as core can issue one load
11560 at time and the next load can start cycle later. */
11561 if (dep_insn_type == TYPE_IMOV
11562 || dep_insn_type == TYPE_FMOV)
11563 cost = 1;
11564 else if (cost > 2)
11565 cost -= 2;
11566 else
11567 cost = 1;
11568 }
11569 break;
11570
11571 case PROCESSOR_ATHLON:
11572 case PROCESSOR_K8:
11573 memory = get_attr_memory (insn);
11574
11575 /* Show ability of reorder buffer to hide latency of load by executing
11576 in parallel with previous instruction in case
11577 previous instruction is not needed to compute the address. */
11578 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11579 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11580 {
11581 enum attr_unit unit = get_attr_unit (insn);
11582 int loadcost = 3;
11583
11584 /* Because of the difference between the length of integer and
11585 floating unit pipeline preparation stages, the memory operands
11586 for floating point are cheaper.
11587
11588 ??? For Athlon it the difference is most probably 2. */
11589 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
11590 loadcost = 3;
11591 else
11592 loadcost = TARGET_ATHLON ? 2 : 0;
11593
11594 if (cost >= loadcost)
11595 cost -= loadcost;
11596 else
11597 cost = 0;
11598 }
11599
11600 default:
11601 break;
11602 }
11603
11604 return cost;
11605 }
11606
11607 /* How many alternative schedules to try. This should be as wide as the
11608 scheduling freedom in the DFA, but no wider. Making this value too
11609 large results extra work for the scheduler. */
11610
11611 static int
11612 ia32_multipass_dfa_lookahead (void)
11613 {
11614 if (ix86_tune == PROCESSOR_PENTIUM)
11615 return 2;
11616
11617 if (ix86_tune == PROCESSOR_PENTIUMPRO
11618 || ix86_tune == PROCESSOR_K6)
11619 return 1;
11620
11621 else
11622 return 0;
11623 }
11624
11625 \f
11626 /* Implement the target hook targetm.vectorize.misaligned_mem_ok. */
11627
11628 static bool
11629 ix86_misaligned_mem_ok (enum machine_mode mode)
11630 {
11631 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
11632 return true;
11633 else
11634 return false;
11635 }
11636
11637 /* Compute the alignment given to a constant that is being placed in memory.
11638 EXP is the constant and ALIGN is the alignment that the object would
11639 ordinarily have.
11640 The value of this function is used instead of that alignment to align
11641 the object. */
11642
11643 int
11644 ix86_constant_alignment (tree exp, int align)
11645 {
11646 if (TREE_CODE (exp) == REAL_CST)
11647 {
11648 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
11649 return 64;
11650 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
11651 return 128;
11652 }
11653 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
11654 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
11655 return BITS_PER_WORD;
11656
11657 return align;
11658 }
11659
11660 /* Compute the alignment for a static variable.
11661 TYPE is the data type, and ALIGN is the alignment that
11662 the object would ordinarily have. The value of this function is used
11663 instead of that alignment to align the object. */
11664
11665 int
11666 ix86_data_alignment (tree type, int align)
11667 {
11668 if (AGGREGATE_TYPE_P (type)
11669 && TYPE_SIZE (type)
11670 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11671 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
11672 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
11673 return 256;
11674
11675 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
11676 to 16byte boundary. */
11677 if (TARGET_64BIT)
11678 {
11679 if (AGGREGATE_TYPE_P (type)
11680 && TYPE_SIZE (type)
11681 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11682 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
11683 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
11684 return 128;
11685 }
11686
11687 if (TREE_CODE (type) == ARRAY_TYPE)
11688 {
11689 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
11690 return 64;
11691 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
11692 return 128;
11693 }
11694 else if (TREE_CODE (type) == COMPLEX_TYPE)
11695 {
11696
11697 if (TYPE_MODE (type) == DCmode && align < 64)
11698 return 64;
11699 if (TYPE_MODE (type) == XCmode && align < 128)
11700 return 128;
11701 }
11702 else if ((TREE_CODE (type) == RECORD_TYPE
11703 || TREE_CODE (type) == UNION_TYPE
11704 || TREE_CODE (type) == QUAL_UNION_TYPE)
11705 && TYPE_FIELDS (type))
11706 {
11707 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
11708 return 64;
11709 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
11710 return 128;
11711 }
11712 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
11713 || TREE_CODE (type) == INTEGER_TYPE)
11714 {
11715 if (TYPE_MODE (type) == DFmode && align < 64)
11716 return 64;
11717 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
11718 return 128;
11719 }
11720
11721 return align;
11722 }
11723
11724 /* Compute the alignment for a local variable.
11725 TYPE is the data type, and ALIGN is the alignment that
11726 the object would ordinarily have. The value of this macro is used
11727 instead of that alignment to align the object. */
11728
11729 int
11730 ix86_local_alignment (tree type, int align)
11731 {
11732 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
11733 to 16byte boundary. */
11734 if (TARGET_64BIT)
11735 {
11736 if (AGGREGATE_TYPE_P (type)
11737 && TYPE_SIZE (type)
11738 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11739 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
11740 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
11741 return 128;
11742 }
11743 if (TREE_CODE (type) == ARRAY_TYPE)
11744 {
11745 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
11746 return 64;
11747 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
11748 return 128;
11749 }
11750 else if (TREE_CODE (type) == COMPLEX_TYPE)
11751 {
11752 if (TYPE_MODE (type) == DCmode && align < 64)
11753 return 64;
11754 if (TYPE_MODE (type) == XCmode && align < 128)
11755 return 128;
11756 }
11757 else if ((TREE_CODE (type) == RECORD_TYPE
11758 || TREE_CODE (type) == UNION_TYPE
11759 || TREE_CODE (type) == QUAL_UNION_TYPE)
11760 && TYPE_FIELDS (type))
11761 {
11762 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
11763 return 64;
11764 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
11765 return 128;
11766 }
11767 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
11768 || TREE_CODE (type) == INTEGER_TYPE)
11769 {
11770
11771 if (TYPE_MODE (type) == DFmode && align < 64)
11772 return 64;
11773 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
11774 return 128;
11775 }
11776 return align;
11777 }
11778 \f
11779 /* Emit RTL insns to initialize the variable parts of a trampoline.
11780 FNADDR is an RTX for the address of the function's pure code.
11781 CXT is an RTX for the static chain value for the function. */
11782 void
11783 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
11784 {
11785 if (!TARGET_64BIT)
11786 {
11787 /* Compute offset from the end of the jmp to the target function. */
11788 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
11789 plus_constant (tramp, 10),
11790 NULL_RTX, 1, OPTAB_DIRECT);
11791 emit_move_insn (gen_rtx_MEM (QImode, tramp),
11792 gen_int_mode (0xb9, QImode));
11793 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
11794 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
11795 gen_int_mode (0xe9, QImode));
11796 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
11797 }
11798 else
11799 {
11800 int offset = 0;
11801 /* Try to load address using shorter movl instead of movabs.
11802 We may want to support movq for kernel mode, but kernel does not use
11803 trampolines at the moment. */
11804 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
11805 {
11806 fnaddr = copy_to_mode_reg (DImode, fnaddr);
11807 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11808 gen_int_mode (0xbb41, HImode));
11809 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
11810 gen_lowpart (SImode, fnaddr));
11811 offset += 6;
11812 }
11813 else
11814 {
11815 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11816 gen_int_mode (0xbb49, HImode));
11817 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
11818 fnaddr);
11819 offset += 10;
11820 }
11821 /* Load static chain using movabs to r10. */
11822 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11823 gen_int_mode (0xba49, HImode));
11824 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
11825 cxt);
11826 offset += 10;
11827 /* Jump to the r11 */
11828 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11829 gen_int_mode (0xff49, HImode));
11830 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
11831 gen_int_mode (0xe3, QImode));
11832 offset += 3;
11833 if (offset > TRAMPOLINE_SIZE)
11834 abort ();
11835 }
11836
11837 #ifdef ENABLE_EXECUTE_STACK
11838 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
11839 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
11840 #endif
11841 }
11842 \f
11843 #define def_builtin(MASK, NAME, TYPE, CODE) \
11844 do { \
11845 if ((MASK) & target_flags \
11846 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
11847 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
11848 NULL, NULL_TREE); \
11849 } while (0)
11850
11851 struct builtin_description
11852 {
11853 const unsigned int mask;
11854 const enum insn_code icode;
11855 const char *const name;
11856 const enum ix86_builtins code;
11857 const enum rtx_code comparison;
11858 const unsigned int flag;
11859 };
11860
11861 static const struct builtin_description bdesc_comi[] =
11862 {
11863 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
11864 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
11865 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
11866 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
11867 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
11868 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
11869 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
11870 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
11871 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
11872 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
11873 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
11874 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
11875 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
11876 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
11877 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
11878 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
11879 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
11880 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
11881 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
11882 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
11883 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
11884 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
11885 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
11886 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
11887 };
11888
11889 static const struct builtin_description bdesc_2arg[] =
11890 {
11891 /* SSE */
11892 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
11893 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
11894 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
11895 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
11896 { MASK_SSE, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
11897 { MASK_SSE, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
11898 { MASK_SSE, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
11899 { MASK_SSE, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
11900
11901 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
11902 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
11903 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
11904 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 },
11905 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 },
11906 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
11907 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 },
11908 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 },
11909 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 },
11910 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 },
11911 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 },
11912 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 },
11913 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
11914 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
11915 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
11916 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
11917 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 },
11918 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 },
11919 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 },
11920 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
11921
11922 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
11923 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
11924 { MASK_SSE, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
11925 { MASK_SSE, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
11926
11927 { MASK_SSE, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
11928 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
11929 { MASK_SSE, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
11930 { MASK_SSE, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
11931
11932 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
11933 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
11934 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
11935 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
11936 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
11937
11938 /* MMX */
11939 { MASK_MMX, CODE_FOR_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
11940 { MASK_MMX, CODE_FOR_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
11941 { MASK_MMX, CODE_FOR_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
11942 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
11943 { MASK_MMX, CODE_FOR_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
11944 { MASK_MMX, CODE_FOR_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
11945 { MASK_MMX, CODE_FOR_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
11946 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
11947
11948 { MASK_MMX, CODE_FOR_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
11949 { MASK_MMX, CODE_FOR_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
11950 { MASK_MMX, CODE_FOR_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
11951 { MASK_MMX, CODE_FOR_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
11952 { MASK_MMX, CODE_FOR_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
11953 { MASK_MMX, CODE_FOR_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
11954 { MASK_MMX, CODE_FOR_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
11955 { MASK_MMX, CODE_FOR_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
11956
11957 { MASK_MMX, CODE_FOR_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
11958 { MASK_MMX, CODE_FOR_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
11959 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
11960
11961 { MASK_MMX, CODE_FOR_mmx_anddi3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
11962 { MASK_MMX, CODE_FOR_mmx_nanddi3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
11963 { MASK_MMX, CODE_FOR_mmx_iordi3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
11964 { MASK_MMX, CODE_FOR_mmx_xordi3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
11965
11966 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
11967 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
11968
11969 { MASK_MMX, CODE_FOR_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
11970 { MASK_MMX, CODE_FOR_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
11971 { MASK_MMX, CODE_FOR_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
11972 { MASK_MMX, CODE_FOR_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
11973 { MASK_MMX, CODE_FOR_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
11974 { MASK_MMX, CODE_FOR_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
11975
11976 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
11977 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
11978 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
11979 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
11980
11981 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
11982 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
11983 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
11984 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
11985 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
11986 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
11987
11988 /* Special. */
11989 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
11990 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
11991 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
11992
11993 { MASK_SSE, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
11994 { MASK_SSE, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
11995 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
11996
11997 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
11998 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
11999 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
12000 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
12001 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
12002 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
12003
12004 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
12005 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
12006 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
12007 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
12008 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
12009 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
12010
12011 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
12012 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
12013 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
12014 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
12015
12016 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
12017 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
12018
12019 /* SSE2 */
12020 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
12021 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
12022 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
12023 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
12024 { MASK_SSE2, CODE_FOR_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
12025 { MASK_SSE2, CODE_FOR_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
12026 { MASK_SSE2, CODE_FOR_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
12027 { MASK_SSE2, CODE_FOR_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
12028
12029 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
12030 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
12031 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
12032 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, 1 },
12033 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, 1 },
12034 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
12035 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, EQ, 0 },
12036 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, LT, 0 },
12037 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, LE, 0 },
12038 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, LT, 1 },
12039 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, LE, 1 },
12040 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, UNORDERED, 0 },
12041 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12042 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12043 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12044 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12045 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, EQ, 0 },
12046 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, LT, 0 },
12047 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, LE, 0 },
12048 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, UNORDERED, 0 },
12049
12050 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12051 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12052 { MASK_SSE2, CODE_FOR_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12053 { MASK_SSE2, CODE_FOR_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12054
12055 { MASK_SSE2, CODE_FOR_sse2_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12056 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12057 { MASK_SSE2, CODE_FOR_sse2_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12058 { MASK_SSE2, CODE_FOR_sse2_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12059
12060 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12061 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12062 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12063
12064 /* SSE2 MMX */
12065 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12066 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12067 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12068 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12069 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12070 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12071 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12072 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12073
12074 { MASK_MMX, CODE_FOR_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12075 { MASK_MMX, CODE_FOR_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12076 { MASK_MMX, CODE_FOR_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12077 { MASK_MMX, CODE_FOR_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12078 { MASK_MMX, CODE_FOR_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12079 { MASK_MMX, CODE_FOR_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12080 { MASK_MMX, CODE_FOR_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12081 { MASK_MMX, CODE_FOR_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12082
12083 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12084 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12085
12086 { MASK_SSE2, CODE_FOR_sse2_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12087 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12088 { MASK_SSE2, CODE_FOR_sse2_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12089 { MASK_SSE2, CODE_FOR_sse2_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12090
12091 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12092 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12093
12094 { MASK_SSE2, CODE_FOR_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12095 { MASK_SSE2, CODE_FOR_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12096 { MASK_SSE2, CODE_FOR_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12097 { MASK_SSE2, CODE_FOR_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12098 { MASK_SSE2, CODE_FOR_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12099 { MASK_SSE2, CODE_FOR_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12100
12101 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12102 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12103 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12104 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12105
12106 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12107 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12108 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12109 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12110 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12111 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12112 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12113 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12114
12115 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12116 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12117 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12118
12119 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12120 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12121
12122 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
12123 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
12124
12125 { MASK_SSE2, CODE_FOR_ashlv8hi3_ti, 0, IX86_BUILTIN_PSLLW128, 0, 0 },
12126 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12127 { MASK_SSE2, CODE_FOR_ashlv4si3_ti, 0, IX86_BUILTIN_PSLLD128, 0, 0 },
12128 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12129 { MASK_SSE2, CODE_FOR_ashlv2di3_ti, 0, IX86_BUILTIN_PSLLQ128, 0, 0 },
12130 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12131
12132 { MASK_SSE2, CODE_FOR_lshrv8hi3_ti, 0, IX86_BUILTIN_PSRLW128, 0, 0 },
12133 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12134 { MASK_SSE2, CODE_FOR_lshrv4si3_ti, 0, IX86_BUILTIN_PSRLD128, 0, 0 },
12135 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12136 { MASK_SSE2, CODE_FOR_lshrv2di3_ti, 0, IX86_BUILTIN_PSRLQ128, 0, 0 },
12137 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12138
12139 { MASK_SSE2, CODE_FOR_ashrv8hi3_ti, 0, IX86_BUILTIN_PSRAW128, 0, 0 },
12140 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12141 { MASK_SSE2, CODE_FOR_ashrv4si3_ti, 0, IX86_BUILTIN_PSRAD128, 0, 0 },
12142 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12143
12144 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12145
12146 { MASK_SSE2, CODE_FOR_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12147 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12148 { MASK_SSE2, CODE_FOR_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12149 { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12150
12151 /* SSE3 MMX */
12152 { MASK_SSE3, CODE_FOR_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12153 { MASK_SSE3, CODE_FOR_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12154 { MASK_SSE3, CODE_FOR_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12155 { MASK_SSE3, CODE_FOR_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12156 { MASK_SSE3, CODE_FOR_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12157 { MASK_SSE3, CODE_FOR_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
12158 };
12159
12160 static const struct builtin_description bdesc_1arg[] =
12161 {
12162 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
12163 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
12164
12165 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
12166 { MASK_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
12167 { MASK_SSE, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
12168
12169 { MASK_SSE, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
12170 { MASK_SSE, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
12171 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
12172 { MASK_SSE, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
12173 { MASK_SSE, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
12174 { MASK_SSE | MASK_64BIT, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
12175
12176 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
12177 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
12178 { MASK_SSE2, CODE_FOR_sse2_movq2dq, 0, IX86_BUILTIN_MOVQ2DQ, 0, 0 },
12179 { MASK_SSE2, CODE_FOR_sse2_movdq2q, 0, IX86_BUILTIN_MOVDQ2Q, 0, 0 },
12180
12181 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
12182
12183 { MASK_SSE2, CODE_FOR_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
12184 { MASK_SSE2, CODE_FOR_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
12185
12186 { MASK_SSE2, CODE_FOR_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
12187 { MASK_SSE2, CODE_FOR_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
12188 { MASK_SSE2, CODE_FOR_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
12189 { MASK_SSE2, CODE_FOR_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
12190 { MASK_SSE2, CODE_FOR_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
12191
12192 { MASK_SSE2, CODE_FOR_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
12193
12194 { MASK_SSE2, CODE_FOR_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
12195 { MASK_SSE2, CODE_FOR_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
12196 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
12197 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
12198
12199 { MASK_SSE2, CODE_FOR_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
12200 { MASK_SSE2, CODE_FOR_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
12201 { MASK_SSE2, CODE_FOR_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
12202
12203 { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 },
12204
12205 /* SSE3 */
12206 { MASK_SSE3, CODE_FOR_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
12207 { MASK_SSE3, CODE_FOR_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
12208 { MASK_SSE3, CODE_FOR_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 }
12209 };
12210
12211 void
12212 ix86_init_builtins (void)
12213 {
12214 if (TARGET_MMX)
12215 ix86_init_mmx_sse_builtins ();
12216 }
12217
12218 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
12219 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
12220 builtins. */
12221 static void
12222 ix86_init_mmx_sse_builtins (void)
12223 {
12224 const struct builtin_description * d;
12225 size_t i;
12226
12227 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
12228 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12229 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12230 tree V2DI_type_node = build_vector_type_for_mode (intDI_type_node, V2DImode);
12231 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
12232 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
12233 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
12234 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12235 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12236 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
12237
12238 tree pchar_type_node = build_pointer_type (char_type_node);
12239 tree pcchar_type_node = build_pointer_type (
12240 build_type_variant (char_type_node, 1, 0));
12241 tree pfloat_type_node = build_pointer_type (float_type_node);
12242 tree pcfloat_type_node = build_pointer_type (
12243 build_type_variant (float_type_node, 1, 0));
12244 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
12245 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
12246 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
12247
12248 /* Comparisons. */
12249 tree int_ftype_v4sf_v4sf
12250 = build_function_type_list (integer_type_node,
12251 V4SF_type_node, V4SF_type_node, NULL_TREE);
12252 tree v4si_ftype_v4sf_v4sf
12253 = build_function_type_list (V4SI_type_node,
12254 V4SF_type_node, V4SF_type_node, NULL_TREE);
12255 /* MMX/SSE/integer conversions. */
12256 tree int_ftype_v4sf
12257 = build_function_type_list (integer_type_node,
12258 V4SF_type_node, NULL_TREE);
12259 tree int64_ftype_v4sf
12260 = build_function_type_list (long_long_integer_type_node,
12261 V4SF_type_node, NULL_TREE);
12262 tree int_ftype_v8qi
12263 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
12264 tree v4sf_ftype_v4sf_int
12265 = build_function_type_list (V4SF_type_node,
12266 V4SF_type_node, integer_type_node, NULL_TREE);
12267 tree v4sf_ftype_v4sf_int64
12268 = build_function_type_list (V4SF_type_node,
12269 V4SF_type_node, long_long_integer_type_node,
12270 NULL_TREE);
12271 tree v4sf_ftype_v4sf_v2si
12272 = build_function_type_list (V4SF_type_node,
12273 V4SF_type_node, V2SI_type_node, NULL_TREE);
12274 tree int_ftype_v4hi_int
12275 = build_function_type_list (integer_type_node,
12276 V4HI_type_node, integer_type_node, NULL_TREE);
12277 tree v4hi_ftype_v4hi_int_int
12278 = build_function_type_list (V4HI_type_node, V4HI_type_node,
12279 integer_type_node, integer_type_node,
12280 NULL_TREE);
12281 /* Miscellaneous. */
12282 tree v8qi_ftype_v4hi_v4hi
12283 = build_function_type_list (V8QI_type_node,
12284 V4HI_type_node, V4HI_type_node, NULL_TREE);
12285 tree v4hi_ftype_v2si_v2si
12286 = build_function_type_list (V4HI_type_node,
12287 V2SI_type_node, V2SI_type_node, NULL_TREE);
12288 tree v4sf_ftype_v4sf_v4sf_int
12289 = build_function_type_list (V4SF_type_node,
12290 V4SF_type_node, V4SF_type_node,
12291 integer_type_node, NULL_TREE);
12292 tree v2si_ftype_v4hi_v4hi
12293 = build_function_type_list (V2SI_type_node,
12294 V4HI_type_node, V4HI_type_node, NULL_TREE);
12295 tree v4hi_ftype_v4hi_int
12296 = build_function_type_list (V4HI_type_node,
12297 V4HI_type_node, integer_type_node, NULL_TREE);
12298 tree v4hi_ftype_v4hi_di
12299 = build_function_type_list (V4HI_type_node,
12300 V4HI_type_node, long_long_unsigned_type_node,
12301 NULL_TREE);
12302 tree v2si_ftype_v2si_di
12303 = build_function_type_list (V2SI_type_node,
12304 V2SI_type_node, long_long_unsigned_type_node,
12305 NULL_TREE);
12306 tree void_ftype_void
12307 = build_function_type (void_type_node, void_list_node);
12308 tree void_ftype_unsigned
12309 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
12310 tree void_ftype_unsigned_unsigned
12311 = build_function_type_list (void_type_node, unsigned_type_node,
12312 unsigned_type_node, NULL_TREE);
12313 tree void_ftype_pcvoid_unsigned_unsigned
12314 = build_function_type_list (void_type_node, const_ptr_type_node,
12315 unsigned_type_node, unsigned_type_node,
12316 NULL_TREE);
12317 tree unsigned_ftype_void
12318 = build_function_type (unsigned_type_node, void_list_node);
12319 tree di_ftype_void
12320 = build_function_type (long_long_unsigned_type_node, void_list_node);
12321 tree v4sf_ftype_void
12322 = build_function_type (V4SF_type_node, void_list_node);
12323 tree v2si_ftype_v4sf
12324 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
12325 /* Loads/stores. */
12326 tree void_ftype_v8qi_v8qi_pchar
12327 = build_function_type_list (void_type_node,
12328 V8QI_type_node, V8QI_type_node,
12329 pchar_type_node, NULL_TREE);
12330 tree v4sf_ftype_pcfloat
12331 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
12332 /* @@@ the type is bogus */
12333 tree v4sf_ftype_v4sf_pv2si
12334 = build_function_type_list (V4SF_type_node,
12335 V4SF_type_node, pv2si_type_node, NULL_TREE);
12336 tree void_ftype_pv2si_v4sf
12337 = build_function_type_list (void_type_node,
12338 pv2si_type_node, V4SF_type_node, NULL_TREE);
12339 tree void_ftype_pfloat_v4sf
12340 = build_function_type_list (void_type_node,
12341 pfloat_type_node, V4SF_type_node, NULL_TREE);
12342 tree void_ftype_pdi_di
12343 = build_function_type_list (void_type_node,
12344 pdi_type_node, long_long_unsigned_type_node,
12345 NULL_TREE);
12346 tree void_ftype_pv2di_v2di
12347 = build_function_type_list (void_type_node,
12348 pv2di_type_node, V2DI_type_node, NULL_TREE);
12349 /* Normal vector unops. */
12350 tree v4sf_ftype_v4sf
12351 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12352
12353 /* Normal vector binops. */
12354 tree v4sf_ftype_v4sf_v4sf
12355 = build_function_type_list (V4SF_type_node,
12356 V4SF_type_node, V4SF_type_node, NULL_TREE);
12357 tree v8qi_ftype_v8qi_v8qi
12358 = build_function_type_list (V8QI_type_node,
12359 V8QI_type_node, V8QI_type_node, NULL_TREE);
12360 tree v4hi_ftype_v4hi_v4hi
12361 = build_function_type_list (V4HI_type_node,
12362 V4HI_type_node, V4HI_type_node, NULL_TREE);
12363 tree v2si_ftype_v2si_v2si
12364 = build_function_type_list (V2SI_type_node,
12365 V2SI_type_node, V2SI_type_node, NULL_TREE);
12366 tree di_ftype_di_di
12367 = build_function_type_list (long_long_unsigned_type_node,
12368 long_long_unsigned_type_node,
12369 long_long_unsigned_type_node, NULL_TREE);
12370
12371 tree v2si_ftype_v2sf
12372 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
12373 tree v2sf_ftype_v2si
12374 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
12375 tree v2si_ftype_v2si
12376 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
12377 tree v2sf_ftype_v2sf
12378 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12379 tree v2sf_ftype_v2sf_v2sf
12380 = build_function_type_list (V2SF_type_node,
12381 V2SF_type_node, V2SF_type_node, NULL_TREE);
12382 tree v2si_ftype_v2sf_v2sf
12383 = build_function_type_list (V2SI_type_node,
12384 V2SF_type_node, V2SF_type_node, NULL_TREE);
12385 tree pint_type_node = build_pointer_type (integer_type_node);
12386 tree pcint_type_node = build_pointer_type (
12387 build_type_variant (integer_type_node, 1, 0));
12388 tree pdouble_type_node = build_pointer_type (double_type_node);
12389 tree pcdouble_type_node = build_pointer_type (
12390 build_type_variant (double_type_node, 1, 0));
12391 tree int_ftype_v2df_v2df
12392 = build_function_type_list (integer_type_node,
12393 V2DF_type_node, V2DF_type_node, NULL_TREE);
12394
12395 tree ti_ftype_void
12396 = build_function_type (intTI_type_node, void_list_node);
12397 tree v2di_ftype_void
12398 = build_function_type (V2DI_type_node, void_list_node);
12399 tree ti_ftype_ti_ti
12400 = build_function_type_list (intTI_type_node,
12401 intTI_type_node, intTI_type_node, NULL_TREE);
12402 tree void_ftype_pcvoid
12403 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
12404 tree v2di_ftype_di
12405 = build_function_type_list (V2DI_type_node,
12406 long_long_unsigned_type_node, NULL_TREE);
12407 tree di_ftype_v2di
12408 = build_function_type_list (long_long_unsigned_type_node,
12409 V2DI_type_node, NULL_TREE);
12410 tree v4sf_ftype_v4si
12411 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
12412 tree v4si_ftype_v4sf
12413 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
12414 tree v2df_ftype_v4si
12415 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
12416 tree v4si_ftype_v2df
12417 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
12418 tree v2si_ftype_v2df
12419 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
12420 tree v4sf_ftype_v2df
12421 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
12422 tree v2df_ftype_v2si
12423 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
12424 tree v2df_ftype_v4sf
12425 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
12426 tree int_ftype_v2df
12427 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
12428 tree int64_ftype_v2df
12429 = build_function_type_list (long_long_integer_type_node,
12430 V2DF_type_node, NULL_TREE);
12431 tree v2df_ftype_v2df_int
12432 = build_function_type_list (V2DF_type_node,
12433 V2DF_type_node, integer_type_node, NULL_TREE);
12434 tree v2df_ftype_v2df_int64
12435 = build_function_type_list (V2DF_type_node,
12436 V2DF_type_node, long_long_integer_type_node,
12437 NULL_TREE);
12438 tree v4sf_ftype_v4sf_v2df
12439 = build_function_type_list (V4SF_type_node,
12440 V4SF_type_node, V2DF_type_node, NULL_TREE);
12441 tree v2df_ftype_v2df_v4sf
12442 = build_function_type_list (V2DF_type_node,
12443 V2DF_type_node, V4SF_type_node, NULL_TREE);
12444 tree v2df_ftype_v2df_v2df_int
12445 = build_function_type_list (V2DF_type_node,
12446 V2DF_type_node, V2DF_type_node,
12447 integer_type_node,
12448 NULL_TREE);
12449 tree v2df_ftype_v2df_pv2si
12450 = build_function_type_list (V2DF_type_node,
12451 V2DF_type_node, pv2si_type_node, NULL_TREE);
12452 tree void_ftype_pv2si_v2df
12453 = build_function_type_list (void_type_node,
12454 pv2si_type_node, V2DF_type_node, NULL_TREE);
12455 tree void_ftype_pdouble_v2df
12456 = build_function_type_list (void_type_node,
12457 pdouble_type_node, V2DF_type_node, NULL_TREE);
12458 tree void_ftype_pint_int
12459 = build_function_type_list (void_type_node,
12460 pint_type_node, integer_type_node, NULL_TREE);
12461 tree void_ftype_v16qi_v16qi_pchar
12462 = build_function_type_list (void_type_node,
12463 V16QI_type_node, V16QI_type_node,
12464 pchar_type_node, NULL_TREE);
12465 tree v2df_ftype_pcdouble
12466 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
12467 tree v2df_ftype_v2df_v2df
12468 = build_function_type_list (V2DF_type_node,
12469 V2DF_type_node, V2DF_type_node, NULL_TREE);
12470 tree v16qi_ftype_v16qi_v16qi
12471 = build_function_type_list (V16QI_type_node,
12472 V16QI_type_node, V16QI_type_node, NULL_TREE);
12473 tree v8hi_ftype_v8hi_v8hi
12474 = build_function_type_list (V8HI_type_node,
12475 V8HI_type_node, V8HI_type_node, NULL_TREE);
12476 tree v4si_ftype_v4si_v4si
12477 = build_function_type_list (V4SI_type_node,
12478 V4SI_type_node, V4SI_type_node, NULL_TREE);
12479 tree v2di_ftype_v2di_v2di
12480 = build_function_type_list (V2DI_type_node,
12481 V2DI_type_node, V2DI_type_node, NULL_TREE);
12482 tree v2di_ftype_v2df_v2df
12483 = build_function_type_list (V2DI_type_node,
12484 V2DF_type_node, V2DF_type_node, NULL_TREE);
12485 tree v2df_ftype_v2df
12486 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12487 tree v2df_ftype_double
12488 = build_function_type_list (V2DF_type_node, double_type_node, NULL_TREE);
12489 tree v2df_ftype_double_double
12490 = build_function_type_list (V2DF_type_node,
12491 double_type_node, double_type_node, NULL_TREE);
12492 tree int_ftype_v8hi_int
12493 = build_function_type_list (integer_type_node,
12494 V8HI_type_node, integer_type_node, NULL_TREE);
12495 tree v8hi_ftype_v8hi_int_int
12496 = build_function_type_list (V8HI_type_node,
12497 V8HI_type_node, integer_type_node,
12498 integer_type_node, NULL_TREE);
12499 tree v2di_ftype_v2di_int
12500 = build_function_type_list (V2DI_type_node,
12501 V2DI_type_node, integer_type_node, NULL_TREE);
12502 tree v4si_ftype_v4si_int
12503 = build_function_type_list (V4SI_type_node,
12504 V4SI_type_node, integer_type_node, NULL_TREE);
12505 tree v8hi_ftype_v8hi_int
12506 = build_function_type_list (V8HI_type_node,
12507 V8HI_type_node, integer_type_node, NULL_TREE);
12508 tree v8hi_ftype_v8hi_v2di
12509 = build_function_type_list (V8HI_type_node,
12510 V8HI_type_node, V2DI_type_node, NULL_TREE);
12511 tree v4si_ftype_v4si_v2di
12512 = build_function_type_list (V4SI_type_node,
12513 V4SI_type_node, V2DI_type_node, NULL_TREE);
12514 tree v4si_ftype_v8hi_v8hi
12515 = build_function_type_list (V4SI_type_node,
12516 V8HI_type_node, V8HI_type_node, NULL_TREE);
12517 tree di_ftype_v8qi_v8qi
12518 = build_function_type_list (long_long_unsigned_type_node,
12519 V8QI_type_node, V8QI_type_node, NULL_TREE);
12520 tree di_ftype_v2si_v2si
12521 = build_function_type_list (long_long_unsigned_type_node,
12522 V2SI_type_node, V2SI_type_node, NULL_TREE);
12523 tree v2di_ftype_v16qi_v16qi
12524 = build_function_type_list (V2DI_type_node,
12525 V16QI_type_node, V16QI_type_node, NULL_TREE);
12526 tree v2di_ftype_v4si_v4si
12527 = build_function_type_list (V2DI_type_node,
12528 V4SI_type_node, V4SI_type_node, NULL_TREE);
12529 tree int_ftype_v16qi
12530 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
12531 tree v16qi_ftype_pcchar
12532 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
12533 tree void_ftype_pchar_v16qi
12534 = build_function_type_list (void_type_node,
12535 pchar_type_node, V16QI_type_node, NULL_TREE);
12536 tree v4si_ftype_pcint
12537 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
12538 tree void_ftype_pcint_v4si
12539 = build_function_type_list (void_type_node,
12540 pcint_type_node, V4SI_type_node, NULL_TREE);
12541 tree v2di_ftype_v2di
12542 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
12543
12544 tree float80_type;
12545 tree float128_type;
12546
12547 /* The __float80 type. */
12548 if (TYPE_MODE (long_double_type_node) == XFmode)
12549 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
12550 "__float80");
12551 else
12552 {
12553 /* The __float80 type. */
12554 float80_type = make_node (REAL_TYPE);
12555 TYPE_PRECISION (float80_type) = 80;
12556 layout_type (float80_type);
12557 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
12558 }
12559
12560 float128_type = make_node (REAL_TYPE);
12561 TYPE_PRECISION (float128_type) = 128;
12562 layout_type (float128_type);
12563 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
12564
12565 /* Add all builtins that are more or less simple operations on two
12566 operands. */
12567 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12568 {
12569 /* Use one of the operands; the target can have a different mode for
12570 mask-generating compares. */
12571 enum machine_mode mode;
12572 tree type;
12573
12574 if (d->name == 0)
12575 continue;
12576 mode = insn_data[d->icode].operand[1].mode;
12577
12578 switch (mode)
12579 {
12580 case V16QImode:
12581 type = v16qi_ftype_v16qi_v16qi;
12582 break;
12583 case V8HImode:
12584 type = v8hi_ftype_v8hi_v8hi;
12585 break;
12586 case V4SImode:
12587 type = v4si_ftype_v4si_v4si;
12588 break;
12589 case V2DImode:
12590 type = v2di_ftype_v2di_v2di;
12591 break;
12592 case V2DFmode:
12593 type = v2df_ftype_v2df_v2df;
12594 break;
12595 case TImode:
12596 type = ti_ftype_ti_ti;
12597 break;
12598 case V4SFmode:
12599 type = v4sf_ftype_v4sf_v4sf;
12600 break;
12601 case V8QImode:
12602 type = v8qi_ftype_v8qi_v8qi;
12603 break;
12604 case V4HImode:
12605 type = v4hi_ftype_v4hi_v4hi;
12606 break;
12607 case V2SImode:
12608 type = v2si_ftype_v2si_v2si;
12609 break;
12610 case DImode:
12611 type = di_ftype_di_di;
12612 break;
12613
12614 default:
12615 abort ();
12616 }
12617
12618 /* Override for comparisons. */
12619 if (d->icode == CODE_FOR_maskcmpv4sf3
12620 || d->icode == CODE_FOR_maskncmpv4sf3
12621 || d->icode == CODE_FOR_vmmaskcmpv4sf3
12622 || d->icode == CODE_FOR_vmmaskncmpv4sf3)
12623 type = v4si_ftype_v4sf_v4sf;
12624
12625 if (d->icode == CODE_FOR_maskcmpv2df3
12626 || d->icode == CODE_FOR_maskncmpv2df3
12627 || d->icode == CODE_FOR_vmmaskcmpv2df3
12628 || d->icode == CODE_FOR_vmmaskncmpv2df3)
12629 type = v2di_ftype_v2df_v2df;
12630
12631 def_builtin (d->mask, d->name, type, d->code);
12632 }
12633
12634 /* Add the remaining MMX insns with somewhat more complicated types. */
12635 def_builtin (MASK_MMX, "__builtin_ia32_mmx_zero", di_ftype_void, IX86_BUILTIN_MMX_ZERO);
12636 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
12637 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
12638 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
12639 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
12640
12641 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
12642 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
12643 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
12644
12645 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
12646 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
12647
12648 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
12649 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
12650
12651 /* comi/ucomi insns. */
12652 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
12653 if (d->mask == MASK_SSE2)
12654 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
12655 else
12656 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
12657
12658 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
12659 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
12660 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
12661
12662 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
12663 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
12664 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
12665 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
12666 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
12667 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
12668 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
12669 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
12670 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
12671 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
12672 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
12673
12674 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW);
12675 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW);
12676
12677 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
12678
12679 def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS);
12680 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
12681 def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS);
12682 def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS);
12683 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
12684 def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS);
12685
12686 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
12687 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
12688 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
12689 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
12690
12691 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
12692 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
12693 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
12694 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
12695
12696 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
12697
12698 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
12699
12700 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
12701 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
12702 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
12703 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
12704 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
12705 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
12706
12707 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
12708
12709 /* Original 3DNow! */
12710 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
12711 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
12712 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
12713 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
12714 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
12715 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
12716 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
12717 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
12718 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
12719 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
12720 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
12721 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
12722 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
12723 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
12724 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
12725 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
12726 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
12727 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
12728 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
12729 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
12730
12731 /* 3DNow! extension as used in the Athlon CPU. */
12732 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
12733 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
12734 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
12735 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
12736 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
12737 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
12738
12739 def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO);
12740
12741 /* SSE2 */
12742 def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128);
12743 def_builtin (MASK_SSE2, "__builtin_ia32_pinsrw128", v8hi_ftype_v8hi_int_int, IX86_BUILTIN_PINSRW128);
12744
12745 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
12746 def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ);
12747 def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q);
12748
12749 def_builtin (MASK_SSE2, "__builtin_ia32_loadapd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADAPD);
12750 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
12751 def_builtin (MASK_SSE2, "__builtin_ia32_loadsd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADSD);
12752 def_builtin (MASK_SSE2, "__builtin_ia32_storeapd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREAPD);
12753 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
12754 def_builtin (MASK_SSE2, "__builtin_ia32_storesd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORESD);
12755
12756 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADHPD);
12757 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADLPD);
12758 def_builtin (MASK_SSE2, "__builtin_ia32_storehpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STOREHPD);
12759 def_builtin (MASK_SSE2, "__builtin_ia32_storelpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STORELPD);
12760
12761 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
12762 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
12763 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
12764 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
12765 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
12766
12767 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
12768 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
12769 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
12770 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
12771
12772 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
12773 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
12774
12775 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
12776
12777 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
12778 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
12779
12780 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
12781 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
12782 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
12783 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
12784 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
12785
12786 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
12787
12788 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
12789 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
12790 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
12791 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
12792
12793 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
12794 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
12795 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
12796
12797 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
12798 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
12799 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
12800 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
12801
12802 def_builtin (MASK_SSE2, "__builtin_ia32_setpd1", v2df_ftype_double, IX86_BUILTIN_SETPD1);
12803 def_builtin (MASK_SSE2, "__builtin_ia32_setpd", v2df_ftype_double_double, IX86_BUILTIN_SETPD);
12804 def_builtin (MASK_SSE2, "__builtin_ia32_setzeropd", ti_ftype_void, IX86_BUILTIN_CLRPD);
12805 def_builtin (MASK_SSE2, "__builtin_ia32_loadpd1", v2df_ftype_pcdouble, IX86_BUILTIN_LOADPD1);
12806 def_builtin (MASK_SSE2, "__builtin_ia32_loadrpd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADRPD);
12807 def_builtin (MASK_SSE2, "__builtin_ia32_storepd1", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREPD1);
12808 def_builtin (MASK_SSE2, "__builtin_ia32_storerpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORERPD);
12809
12810 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
12811 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
12812 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
12813
12814 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqa", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQA);
12815 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
12816 def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD);
12817 def_builtin (MASK_SSE2, "__builtin_ia32_storedqa", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQA);
12818 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
12819 def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED);
12820 def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ);
12821
12822 def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI);
12823
12824 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
12825 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
12826
12827 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
12828 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
12829 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
12830
12831 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
12832 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
12833 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
12834
12835 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
12836 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
12837
12838 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
12839 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
12840 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
12841 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
12842
12843 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
12844 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
12845 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
12846 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
12847
12848 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
12849 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
12850
12851 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
12852
12853 /* Prescott New Instructions. */
12854 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
12855 void_ftype_pcvoid_unsigned_unsigned,
12856 IX86_BUILTIN_MONITOR);
12857 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
12858 void_ftype_unsigned_unsigned,
12859 IX86_BUILTIN_MWAIT);
12860 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
12861 v4sf_ftype_v4sf,
12862 IX86_BUILTIN_MOVSHDUP);
12863 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
12864 v4sf_ftype_v4sf,
12865 IX86_BUILTIN_MOVSLDUP);
12866 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
12867 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
12868 def_builtin (MASK_SSE3, "__builtin_ia32_loadddup",
12869 v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP);
12870 def_builtin (MASK_SSE3, "__builtin_ia32_movddup",
12871 v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP);
12872 }
12873
12874 /* Errors in the source file can cause expand_expr to return const0_rtx
12875 where we expect a vector. To avoid crashing, use one of the vector
12876 clear instructions. */
12877 static rtx
12878 safe_vector_operand (rtx x, enum machine_mode mode)
12879 {
12880 if (x != const0_rtx)
12881 return x;
12882 x = gen_reg_rtx (mode);
12883
12884 if (VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode))
12885 emit_insn (gen_mmx_clrdi (mode == DImode ? x
12886 : gen_rtx_SUBREG (DImode, x, 0)));
12887 else
12888 emit_insn (gen_sse_clrv4sf (mode == V4SFmode ? x
12889 : gen_rtx_SUBREG (V4SFmode, x, 0),
12890 CONST0_RTX (V4SFmode)));
12891 return x;
12892 }
12893
12894 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
12895
12896 static rtx
12897 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
12898 {
12899 rtx pat;
12900 tree arg0 = TREE_VALUE (arglist);
12901 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12902 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12903 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12904 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12905 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12906 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12907
12908 if (VECTOR_MODE_P (mode0))
12909 op0 = safe_vector_operand (op0, mode0);
12910 if (VECTOR_MODE_P (mode1))
12911 op1 = safe_vector_operand (op1, mode1);
12912
12913 if (! target
12914 || GET_MODE (target) != tmode
12915 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12916 target = gen_reg_rtx (tmode);
12917
12918 if (GET_MODE (op1) == SImode && mode1 == TImode)
12919 {
12920 rtx x = gen_reg_rtx (V4SImode);
12921 emit_insn (gen_sse2_loadd (x, op1));
12922 op1 = gen_lowpart (TImode, x);
12923 }
12924
12925 /* In case the insn wants input operands in modes different from
12926 the result, abort. */
12927 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
12928 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
12929 abort ();
12930
12931 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12932 op0 = copy_to_mode_reg (mode0, op0);
12933 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12934 op1 = copy_to_mode_reg (mode1, op1);
12935
12936 /* In the commutative cases, both op0 and op1 are nonimmediate_operand,
12937 yet one of the two must not be a memory. This is normally enforced
12938 by expanders, but we didn't bother to create one here. */
12939 if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
12940 op0 = copy_to_mode_reg (mode0, op0);
12941
12942 pat = GEN_FCN (icode) (target, op0, op1);
12943 if (! pat)
12944 return 0;
12945 emit_insn (pat);
12946 return target;
12947 }
12948
12949 /* Subroutine of ix86_expand_builtin to take care of stores. */
12950
12951 static rtx
12952 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
12953 {
12954 rtx pat;
12955 tree arg0 = TREE_VALUE (arglist);
12956 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12957 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12958 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12959 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12960 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12961
12962 if (VECTOR_MODE_P (mode1))
12963 op1 = safe_vector_operand (op1, mode1);
12964
12965 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12966 op1 = copy_to_mode_reg (mode1, op1);
12967
12968 pat = GEN_FCN (icode) (op0, op1);
12969 if (pat)
12970 emit_insn (pat);
12971 return 0;
12972 }
12973
12974 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
12975
12976 static rtx
12977 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
12978 rtx target, int do_load)
12979 {
12980 rtx pat;
12981 tree arg0 = TREE_VALUE (arglist);
12982 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12983 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12984 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12985
12986 if (! target
12987 || GET_MODE (target) != tmode
12988 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12989 target = gen_reg_rtx (tmode);
12990 if (do_load)
12991 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12992 else
12993 {
12994 if (VECTOR_MODE_P (mode0))
12995 op0 = safe_vector_operand (op0, mode0);
12996
12997 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12998 op0 = copy_to_mode_reg (mode0, op0);
12999 }
13000
13001 pat = GEN_FCN (icode) (target, op0);
13002 if (! pat)
13003 return 0;
13004 emit_insn (pat);
13005 return target;
13006 }
13007
13008 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13009 sqrtss, rsqrtss, rcpss. */
13010
13011 static rtx
13012 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
13013 {
13014 rtx pat;
13015 tree arg0 = TREE_VALUE (arglist);
13016 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13017 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13018 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13019
13020 if (! target
13021 || GET_MODE (target) != tmode
13022 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13023 target = gen_reg_rtx (tmode);
13024
13025 if (VECTOR_MODE_P (mode0))
13026 op0 = safe_vector_operand (op0, mode0);
13027
13028 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13029 op0 = copy_to_mode_reg (mode0, op0);
13030
13031 op1 = op0;
13032 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
13033 op1 = copy_to_mode_reg (mode0, op1);
13034
13035 pat = GEN_FCN (icode) (target, op0, op1);
13036 if (! pat)
13037 return 0;
13038 emit_insn (pat);
13039 return target;
13040 }
13041
13042 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13043
13044 static rtx
13045 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13046 rtx target)
13047 {
13048 rtx pat;
13049 tree arg0 = TREE_VALUE (arglist);
13050 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13051 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13052 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13053 rtx op2;
13054 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13055 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13056 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13057 enum rtx_code comparison = d->comparison;
13058
13059 if (VECTOR_MODE_P (mode0))
13060 op0 = safe_vector_operand (op0, mode0);
13061 if (VECTOR_MODE_P (mode1))
13062 op1 = safe_vector_operand (op1, mode1);
13063
13064 /* Swap operands if we have a comparison that isn't available in
13065 hardware. */
13066 if (d->flag)
13067 {
13068 rtx tmp = gen_reg_rtx (mode1);
13069 emit_move_insn (tmp, op1);
13070 op1 = op0;
13071 op0 = tmp;
13072 }
13073
13074 if (! target
13075 || GET_MODE (target) != tmode
13076 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13077 target = gen_reg_rtx (tmode);
13078
13079 if (! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13080 op0 = copy_to_mode_reg (mode0, op0);
13081 if (! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13082 op1 = copy_to_mode_reg (mode1, op1);
13083
13084 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13085 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13086 if (! pat)
13087 return 0;
13088 emit_insn (pat);
13089 return target;
13090 }
13091
13092 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13093
13094 static rtx
13095 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13096 rtx target)
13097 {
13098 rtx pat;
13099 tree arg0 = TREE_VALUE (arglist);
13100 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13101 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13102 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13103 rtx op2;
13104 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13105 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13106 enum rtx_code comparison = d->comparison;
13107
13108 if (VECTOR_MODE_P (mode0))
13109 op0 = safe_vector_operand (op0, mode0);
13110 if (VECTOR_MODE_P (mode1))
13111 op1 = safe_vector_operand (op1, mode1);
13112
13113 /* Swap operands if we have a comparison that isn't available in
13114 hardware. */
13115 if (d->flag)
13116 {
13117 rtx tmp = op1;
13118 op1 = op0;
13119 op0 = tmp;
13120 }
13121
13122 target = gen_reg_rtx (SImode);
13123 emit_move_insn (target, const0_rtx);
13124 target = gen_rtx_SUBREG (QImode, target, 0);
13125
13126 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13127 op0 = copy_to_mode_reg (mode0, op0);
13128 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13129 op1 = copy_to_mode_reg (mode1, op1);
13130
13131 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13132 pat = GEN_FCN (d->icode) (op0, op1);
13133 if (! pat)
13134 return 0;
13135 emit_insn (pat);
13136 emit_insn (gen_rtx_SET (VOIDmode,
13137 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13138 gen_rtx_fmt_ee (comparison, QImode,
13139 SET_DEST (pat),
13140 const0_rtx)));
13141
13142 return SUBREG_REG (target);
13143 }
13144
13145 /* Expand an expression EXP that calls a built-in function,
13146 with result going to TARGET if that's convenient
13147 (and in mode MODE if that's convenient).
13148 SUBTARGET may be used as the target for computing one of EXP's operands.
13149 IGNORE is nonzero if the value is to be ignored. */
13150
13151 rtx
13152 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13153 enum machine_mode mode ATTRIBUTE_UNUSED,
13154 int ignore ATTRIBUTE_UNUSED)
13155 {
13156 const struct builtin_description *d;
13157 size_t i;
13158 enum insn_code icode;
13159 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
13160 tree arglist = TREE_OPERAND (exp, 1);
13161 tree arg0, arg1, arg2;
13162 rtx op0, op1, op2, pat;
13163 enum machine_mode tmode, mode0, mode1, mode2;
13164 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
13165
13166 switch (fcode)
13167 {
13168 case IX86_BUILTIN_EMMS:
13169 emit_insn (gen_emms ());
13170 return 0;
13171
13172 case IX86_BUILTIN_SFENCE:
13173 emit_insn (gen_sfence ());
13174 return 0;
13175
13176 case IX86_BUILTIN_PEXTRW:
13177 case IX86_BUILTIN_PEXTRW128:
13178 icode = (fcode == IX86_BUILTIN_PEXTRW
13179 ? CODE_FOR_mmx_pextrw
13180 : CODE_FOR_sse2_pextrw);
13181 arg0 = TREE_VALUE (arglist);
13182 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13183 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13184 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13185 tmode = insn_data[icode].operand[0].mode;
13186 mode0 = insn_data[icode].operand[1].mode;
13187 mode1 = insn_data[icode].operand[2].mode;
13188
13189 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13190 op0 = copy_to_mode_reg (mode0, op0);
13191 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13192 {
13193 error ("selector must be an integer constant in the range 0..%i",
13194 fcode == IX86_BUILTIN_PEXTRW ? 3:7);
13195 return gen_reg_rtx (tmode);
13196 }
13197 if (target == 0
13198 || GET_MODE (target) != tmode
13199 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13200 target = gen_reg_rtx (tmode);
13201 pat = GEN_FCN (icode) (target, op0, op1);
13202 if (! pat)
13203 return 0;
13204 emit_insn (pat);
13205 return target;
13206
13207 case IX86_BUILTIN_PINSRW:
13208 case IX86_BUILTIN_PINSRW128:
13209 icode = (fcode == IX86_BUILTIN_PINSRW
13210 ? CODE_FOR_mmx_pinsrw
13211 : CODE_FOR_sse2_pinsrw);
13212 arg0 = TREE_VALUE (arglist);
13213 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13214 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13215 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13216 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13217 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13218 tmode = insn_data[icode].operand[0].mode;
13219 mode0 = insn_data[icode].operand[1].mode;
13220 mode1 = insn_data[icode].operand[2].mode;
13221 mode2 = insn_data[icode].operand[3].mode;
13222
13223 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13224 op0 = copy_to_mode_reg (mode0, op0);
13225 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13226 op1 = copy_to_mode_reg (mode1, op1);
13227 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13228 {
13229 error ("selector must be an integer constant in the range 0..%i",
13230 fcode == IX86_BUILTIN_PINSRW ? 15:255);
13231 return const0_rtx;
13232 }
13233 if (target == 0
13234 || GET_MODE (target) != tmode
13235 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13236 target = gen_reg_rtx (tmode);
13237 pat = GEN_FCN (icode) (target, op0, op1, op2);
13238 if (! pat)
13239 return 0;
13240 emit_insn (pat);
13241 return target;
13242
13243 case IX86_BUILTIN_MASKMOVQ:
13244 case IX86_BUILTIN_MASKMOVDQU:
13245 icode = (fcode == IX86_BUILTIN_MASKMOVQ
13246 ? (TARGET_64BIT ? CODE_FOR_mmx_maskmovq_rex : CODE_FOR_mmx_maskmovq)
13247 : (TARGET_64BIT ? CODE_FOR_sse2_maskmovdqu_rex64
13248 : CODE_FOR_sse2_maskmovdqu));
13249 /* Note the arg order is different from the operand order. */
13250 arg1 = TREE_VALUE (arglist);
13251 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
13252 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13253 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13254 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13255 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13256 mode0 = insn_data[icode].operand[0].mode;
13257 mode1 = insn_data[icode].operand[1].mode;
13258 mode2 = insn_data[icode].operand[2].mode;
13259
13260 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13261 op0 = copy_to_mode_reg (mode0, op0);
13262 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13263 op1 = copy_to_mode_reg (mode1, op1);
13264 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
13265 op2 = copy_to_mode_reg (mode2, op2);
13266 pat = GEN_FCN (icode) (op0, op1, op2);
13267 if (! pat)
13268 return 0;
13269 emit_insn (pat);
13270 return 0;
13271
13272 case IX86_BUILTIN_SQRTSS:
13273 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv4sf2, arglist, target);
13274 case IX86_BUILTIN_RSQRTSS:
13275 return ix86_expand_unop1_builtin (CODE_FOR_vmrsqrtv4sf2, arglist, target);
13276 case IX86_BUILTIN_RCPSS:
13277 return ix86_expand_unop1_builtin (CODE_FOR_vmrcpv4sf2, arglist, target);
13278
13279 case IX86_BUILTIN_LOADAPS:
13280 return ix86_expand_unop_builtin (CODE_FOR_sse_movaps, arglist, target, 1);
13281
13282 case IX86_BUILTIN_LOADUPS:
13283 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
13284
13285 case IX86_BUILTIN_STOREAPS:
13286 return ix86_expand_store_builtin (CODE_FOR_sse_movaps, arglist);
13287
13288 case IX86_BUILTIN_STOREUPS:
13289 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
13290
13291 case IX86_BUILTIN_LOADSS:
13292 return ix86_expand_unop_builtin (CODE_FOR_sse_loadss, arglist, target, 1);
13293
13294 case IX86_BUILTIN_STORESS:
13295 return ix86_expand_store_builtin (CODE_FOR_sse_storess, arglist);
13296
13297 case IX86_BUILTIN_LOADHPS:
13298 case IX86_BUILTIN_LOADLPS:
13299 case IX86_BUILTIN_LOADHPD:
13300 case IX86_BUILTIN_LOADLPD:
13301 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps
13302 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps
13303 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd
13304 : CODE_FOR_sse2_movsd);
13305 arg0 = TREE_VALUE (arglist);
13306 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13307 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13308 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13309 tmode = insn_data[icode].operand[0].mode;
13310 mode0 = insn_data[icode].operand[1].mode;
13311 mode1 = insn_data[icode].operand[2].mode;
13312
13313 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13314 op0 = copy_to_mode_reg (mode0, op0);
13315 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
13316 if (target == 0
13317 || GET_MODE (target) != tmode
13318 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13319 target = gen_reg_rtx (tmode);
13320 pat = GEN_FCN (icode) (target, op0, op1);
13321 if (! pat)
13322 return 0;
13323 emit_insn (pat);
13324 return target;
13325
13326 case IX86_BUILTIN_STOREHPS:
13327 case IX86_BUILTIN_STORELPS:
13328 case IX86_BUILTIN_STOREHPD:
13329 case IX86_BUILTIN_STORELPD:
13330 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps
13331 : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps
13332 : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd
13333 : CODE_FOR_sse2_movsd);
13334 arg0 = TREE_VALUE (arglist);
13335 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13336 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13337 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13338 mode0 = insn_data[icode].operand[1].mode;
13339 mode1 = insn_data[icode].operand[2].mode;
13340
13341 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13342 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13343 op1 = copy_to_mode_reg (mode1, op1);
13344
13345 pat = GEN_FCN (icode) (op0, op0, op1);
13346 if (! pat)
13347 return 0;
13348 emit_insn (pat);
13349 return 0;
13350
13351 case IX86_BUILTIN_MOVNTPS:
13352 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
13353 case IX86_BUILTIN_MOVNTQ:
13354 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
13355
13356 case IX86_BUILTIN_LDMXCSR:
13357 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
13358 target = assign_386_stack_local (SImode, 0);
13359 emit_move_insn (target, op0);
13360 emit_insn (gen_ldmxcsr (target));
13361 return 0;
13362
13363 case IX86_BUILTIN_STMXCSR:
13364 target = assign_386_stack_local (SImode, 0);
13365 emit_insn (gen_stmxcsr (target));
13366 return copy_to_mode_reg (SImode, target);
13367
13368 case IX86_BUILTIN_SHUFPS:
13369 case IX86_BUILTIN_SHUFPD:
13370 icode = (fcode == IX86_BUILTIN_SHUFPS
13371 ? CODE_FOR_sse_shufps
13372 : CODE_FOR_sse2_shufpd);
13373 arg0 = TREE_VALUE (arglist);
13374 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13375 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13376 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13377 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13378 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13379 tmode = insn_data[icode].operand[0].mode;
13380 mode0 = insn_data[icode].operand[1].mode;
13381 mode1 = insn_data[icode].operand[2].mode;
13382 mode2 = insn_data[icode].operand[3].mode;
13383
13384 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13385 op0 = copy_to_mode_reg (mode0, op0);
13386 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13387 op1 = copy_to_mode_reg (mode1, op1);
13388 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13389 {
13390 /* @@@ better error message */
13391 error ("mask must be an immediate");
13392 return gen_reg_rtx (tmode);
13393 }
13394 if (target == 0
13395 || GET_MODE (target) != tmode
13396 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13397 target = gen_reg_rtx (tmode);
13398 pat = GEN_FCN (icode) (target, op0, op1, op2);
13399 if (! pat)
13400 return 0;
13401 emit_insn (pat);
13402 return target;
13403
13404 case IX86_BUILTIN_PSHUFW:
13405 case IX86_BUILTIN_PSHUFD:
13406 case IX86_BUILTIN_PSHUFHW:
13407 case IX86_BUILTIN_PSHUFLW:
13408 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
13409 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
13410 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
13411 : CODE_FOR_mmx_pshufw);
13412 arg0 = TREE_VALUE (arglist);
13413 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13414 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13415 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13416 tmode = insn_data[icode].operand[0].mode;
13417 mode1 = insn_data[icode].operand[1].mode;
13418 mode2 = insn_data[icode].operand[2].mode;
13419
13420 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13421 op0 = copy_to_mode_reg (mode1, op0);
13422 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13423 {
13424 /* @@@ better error message */
13425 error ("mask must be an immediate");
13426 return const0_rtx;
13427 }
13428 if (target == 0
13429 || GET_MODE (target) != tmode
13430 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13431 target = gen_reg_rtx (tmode);
13432 pat = GEN_FCN (icode) (target, op0, op1);
13433 if (! pat)
13434 return 0;
13435 emit_insn (pat);
13436 return target;
13437
13438 case IX86_BUILTIN_PSLLDQI128:
13439 case IX86_BUILTIN_PSRLDQI128:
13440 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
13441 : CODE_FOR_sse2_lshrti3);
13442 arg0 = TREE_VALUE (arglist);
13443 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13444 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13445 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13446 tmode = insn_data[icode].operand[0].mode;
13447 mode1 = insn_data[icode].operand[1].mode;
13448 mode2 = insn_data[icode].operand[2].mode;
13449
13450 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13451 {
13452 op0 = copy_to_reg (op0);
13453 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
13454 }
13455 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13456 {
13457 error ("shift must be an immediate");
13458 return const0_rtx;
13459 }
13460 target = gen_reg_rtx (V2DImode);
13461 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
13462 if (! pat)
13463 return 0;
13464 emit_insn (pat);
13465 return target;
13466
13467 case IX86_BUILTIN_FEMMS:
13468 emit_insn (gen_femms ());
13469 return NULL_RTX;
13470
13471 case IX86_BUILTIN_PAVGUSB:
13472 return ix86_expand_binop_builtin (CODE_FOR_pavgusb, arglist, target);
13473
13474 case IX86_BUILTIN_PF2ID:
13475 return ix86_expand_unop_builtin (CODE_FOR_pf2id, arglist, target, 0);
13476
13477 case IX86_BUILTIN_PFACC:
13478 return ix86_expand_binop_builtin (CODE_FOR_pfacc, arglist, target);
13479
13480 case IX86_BUILTIN_PFADD:
13481 return ix86_expand_binop_builtin (CODE_FOR_addv2sf3, arglist, target);
13482
13483 case IX86_BUILTIN_PFCMPEQ:
13484 return ix86_expand_binop_builtin (CODE_FOR_eqv2sf3, arglist, target);
13485
13486 case IX86_BUILTIN_PFCMPGE:
13487 return ix86_expand_binop_builtin (CODE_FOR_gev2sf3, arglist, target);
13488
13489 case IX86_BUILTIN_PFCMPGT:
13490 return ix86_expand_binop_builtin (CODE_FOR_gtv2sf3, arglist, target);
13491
13492 case IX86_BUILTIN_PFMAX:
13493 return ix86_expand_binop_builtin (CODE_FOR_pfmaxv2sf3, arglist, target);
13494
13495 case IX86_BUILTIN_PFMIN:
13496 return ix86_expand_binop_builtin (CODE_FOR_pfminv2sf3, arglist, target);
13497
13498 case IX86_BUILTIN_PFMUL:
13499 return ix86_expand_binop_builtin (CODE_FOR_mulv2sf3, arglist, target);
13500
13501 case IX86_BUILTIN_PFRCP:
13502 return ix86_expand_unop_builtin (CODE_FOR_pfrcpv2sf2, arglist, target, 0);
13503
13504 case IX86_BUILTIN_PFRCPIT1:
13505 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit1v2sf3, arglist, target);
13506
13507 case IX86_BUILTIN_PFRCPIT2:
13508 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit2v2sf3, arglist, target);
13509
13510 case IX86_BUILTIN_PFRSQIT1:
13511 return ix86_expand_binop_builtin (CODE_FOR_pfrsqit1v2sf3, arglist, target);
13512
13513 case IX86_BUILTIN_PFRSQRT:
13514 return ix86_expand_unop_builtin (CODE_FOR_pfrsqrtv2sf2, arglist, target, 0);
13515
13516 case IX86_BUILTIN_PFSUB:
13517 return ix86_expand_binop_builtin (CODE_FOR_subv2sf3, arglist, target);
13518
13519 case IX86_BUILTIN_PFSUBR:
13520 return ix86_expand_binop_builtin (CODE_FOR_subrv2sf3, arglist, target);
13521
13522 case IX86_BUILTIN_PI2FD:
13523 return ix86_expand_unop_builtin (CODE_FOR_floatv2si2, arglist, target, 0);
13524
13525 case IX86_BUILTIN_PMULHRW:
13526 return ix86_expand_binop_builtin (CODE_FOR_pmulhrwv4hi3, arglist, target);
13527
13528 case IX86_BUILTIN_PF2IW:
13529 return ix86_expand_unop_builtin (CODE_FOR_pf2iw, arglist, target, 0);
13530
13531 case IX86_BUILTIN_PFNACC:
13532 return ix86_expand_binop_builtin (CODE_FOR_pfnacc, arglist, target);
13533
13534 case IX86_BUILTIN_PFPNACC:
13535 return ix86_expand_binop_builtin (CODE_FOR_pfpnacc, arglist, target);
13536
13537 case IX86_BUILTIN_PI2FW:
13538 return ix86_expand_unop_builtin (CODE_FOR_pi2fw, arglist, target, 0);
13539
13540 case IX86_BUILTIN_PSWAPDSI:
13541 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2si2, arglist, target, 0);
13542
13543 case IX86_BUILTIN_PSWAPDSF:
13544 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2sf2, arglist, target, 0);
13545
13546 case IX86_BUILTIN_SSE_ZERO:
13547 target = gen_reg_rtx (V4SFmode);
13548 emit_insn (gen_sse_clrv4sf (target, CONST0_RTX (V4SFmode)));
13549 return target;
13550
13551 case IX86_BUILTIN_MMX_ZERO:
13552 target = gen_reg_rtx (DImode);
13553 emit_insn (gen_mmx_clrdi (target));
13554 return target;
13555
13556 case IX86_BUILTIN_CLRTI:
13557 target = gen_reg_rtx (V2DImode);
13558 emit_insn (gen_sse2_clrti (simplify_gen_subreg (TImode, target, V2DImode, 0)));
13559 return target;
13560
13561
13562 case IX86_BUILTIN_SQRTSD:
13563 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv2df2, arglist, target);
13564 case IX86_BUILTIN_LOADAPD:
13565 return ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, target, 1);
13566 case IX86_BUILTIN_LOADUPD:
13567 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
13568
13569 case IX86_BUILTIN_STOREAPD:
13570 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13571 case IX86_BUILTIN_STOREUPD:
13572 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
13573
13574 case IX86_BUILTIN_LOADSD:
13575 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, target, 1);
13576
13577 case IX86_BUILTIN_STORESD:
13578 return ix86_expand_store_builtin (CODE_FOR_sse2_storesd, arglist);
13579
13580 case IX86_BUILTIN_SETPD1:
13581 target = assign_386_stack_local (DFmode, 0);
13582 arg0 = TREE_VALUE (arglist);
13583 emit_move_insn (adjust_address (target, DFmode, 0),
13584 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
13585 op0 = gen_reg_rtx (V2DFmode);
13586 emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0)));
13587 emit_insn (gen_sse2_shufpd (op0, op0, op0, const0_rtx));
13588 return op0;
13589
13590 case IX86_BUILTIN_SETPD:
13591 target = assign_386_stack_local (V2DFmode, 0);
13592 arg0 = TREE_VALUE (arglist);
13593 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13594 emit_move_insn (adjust_address (target, DFmode, 0),
13595 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
13596 emit_move_insn (adjust_address (target, DFmode, 8),
13597 expand_expr (arg1, NULL_RTX, VOIDmode, 0));
13598 op0 = gen_reg_rtx (V2DFmode);
13599 emit_insn (gen_sse2_movapd (op0, target));
13600 return op0;
13601
13602 case IX86_BUILTIN_LOADRPD:
13603 target = ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist,
13604 gen_reg_rtx (V2DFmode), 1);
13605 emit_insn (gen_sse2_shufpd (target, target, target, const1_rtx));
13606 return target;
13607
13608 case IX86_BUILTIN_LOADPD1:
13609 target = ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist,
13610 gen_reg_rtx (V2DFmode), 1);
13611 emit_insn (gen_sse2_shufpd (target, target, target, const0_rtx));
13612 return target;
13613
13614 case IX86_BUILTIN_STOREPD1:
13615 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13616 case IX86_BUILTIN_STORERPD:
13617 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13618
13619 case IX86_BUILTIN_CLRPD:
13620 target = gen_reg_rtx (V2DFmode);
13621 emit_insn (gen_sse_clrv2df (target));
13622 return target;
13623
13624 case IX86_BUILTIN_MFENCE:
13625 emit_insn (gen_sse2_mfence ());
13626 return 0;
13627 case IX86_BUILTIN_LFENCE:
13628 emit_insn (gen_sse2_lfence ());
13629 return 0;
13630
13631 case IX86_BUILTIN_CLFLUSH:
13632 arg0 = TREE_VALUE (arglist);
13633 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13634 icode = CODE_FOR_sse2_clflush;
13635 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
13636 op0 = copy_to_mode_reg (Pmode, op0);
13637
13638 emit_insn (gen_sse2_clflush (op0));
13639 return 0;
13640
13641 case IX86_BUILTIN_MOVNTPD:
13642 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
13643 case IX86_BUILTIN_MOVNTDQ:
13644 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
13645 case IX86_BUILTIN_MOVNTI:
13646 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
13647
13648 case IX86_BUILTIN_LOADDQA:
13649 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqa, arglist, target, 1);
13650 case IX86_BUILTIN_LOADDQU:
13651 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
13652 case IX86_BUILTIN_LOADD:
13653 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1);
13654
13655 case IX86_BUILTIN_STOREDQA:
13656 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqa, arglist);
13657 case IX86_BUILTIN_STOREDQU:
13658 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
13659 case IX86_BUILTIN_STORED:
13660 return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist);
13661
13662 case IX86_BUILTIN_MONITOR:
13663 arg0 = TREE_VALUE (arglist);
13664 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13665 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13666 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13667 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13668 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13669 if (!REG_P (op0))
13670 op0 = copy_to_mode_reg (SImode, op0);
13671 if (!REG_P (op1))
13672 op1 = copy_to_mode_reg (SImode, op1);
13673 if (!REG_P (op2))
13674 op2 = copy_to_mode_reg (SImode, op2);
13675 emit_insn (gen_monitor (op0, op1, op2));
13676 return 0;
13677
13678 case IX86_BUILTIN_MWAIT:
13679 arg0 = TREE_VALUE (arglist);
13680 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13681 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13682 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13683 if (!REG_P (op0))
13684 op0 = copy_to_mode_reg (SImode, op0);
13685 if (!REG_P (op1))
13686 op1 = copy_to_mode_reg (SImode, op1);
13687 emit_insn (gen_mwait (op0, op1));
13688 return 0;
13689
13690 case IX86_BUILTIN_LOADDDUP:
13691 return ix86_expand_unop_builtin (CODE_FOR_loadddup, arglist, target, 1);
13692
13693 case IX86_BUILTIN_LDDQU:
13694 return ix86_expand_unop_builtin (CODE_FOR_lddqu, arglist, target,
13695 1);
13696
13697 default:
13698 break;
13699 }
13700
13701 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13702 if (d->code == fcode)
13703 {
13704 /* Compares are treated specially. */
13705 if (d->icode == CODE_FOR_maskcmpv4sf3
13706 || d->icode == CODE_FOR_vmmaskcmpv4sf3
13707 || d->icode == CODE_FOR_maskncmpv4sf3
13708 || d->icode == CODE_FOR_vmmaskncmpv4sf3
13709 || d->icode == CODE_FOR_maskcmpv2df3
13710 || d->icode == CODE_FOR_vmmaskcmpv2df3
13711 || d->icode == CODE_FOR_maskncmpv2df3
13712 || d->icode == CODE_FOR_vmmaskncmpv2df3)
13713 return ix86_expand_sse_compare (d, arglist, target);
13714
13715 return ix86_expand_binop_builtin (d->icode, arglist, target);
13716 }
13717
13718 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13719 if (d->code == fcode)
13720 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
13721
13722 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13723 if (d->code == fcode)
13724 return ix86_expand_sse_comi (d, arglist, target);
13725
13726 /* @@@ Should really do something sensible here. */
13727 return 0;
13728 }
13729
13730 /* Store OPERAND to the memory after reload is completed. This means
13731 that we can't easily use assign_stack_local. */
13732 rtx
13733 ix86_force_to_memory (enum machine_mode mode, rtx operand)
13734 {
13735 rtx result;
13736 if (!reload_completed)
13737 abort ();
13738 if (TARGET_RED_ZONE)
13739 {
13740 result = gen_rtx_MEM (mode,
13741 gen_rtx_PLUS (Pmode,
13742 stack_pointer_rtx,
13743 GEN_INT (-RED_ZONE_SIZE)));
13744 emit_move_insn (result, operand);
13745 }
13746 else if (!TARGET_RED_ZONE && TARGET_64BIT)
13747 {
13748 switch (mode)
13749 {
13750 case HImode:
13751 case SImode:
13752 operand = gen_lowpart (DImode, operand);
13753 /* FALLTHRU */
13754 case DImode:
13755 emit_insn (
13756 gen_rtx_SET (VOIDmode,
13757 gen_rtx_MEM (DImode,
13758 gen_rtx_PRE_DEC (DImode,
13759 stack_pointer_rtx)),
13760 operand));
13761 break;
13762 default:
13763 abort ();
13764 }
13765 result = gen_rtx_MEM (mode, stack_pointer_rtx);
13766 }
13767 else
13768 {
13769 switch (mode)
13770 {
13771 case DImode:
13772 {
13773 rtx operands[2];
13774 split_di (&operand, 1, operands, operands + 1);
13775 emit_insn (
13776 gen_rtx_SET (VOIDmode,
13777 gen_rtx_MEM (SImode,
13778 gen_rtx_PRE_DEC (Pmode,
13779 stack_pointer_rtx)),
13780 operands[1]));
13781 emit_insn (
13782 gen_rtx_SET (VOIDmode,
13783 gen_rtx_MEM (SImode,
13784 gen_rtx_PRE_DEC (Pmode,
13785 stack_pointer_rtx)),
13786 operands[0]));
13787 }
13788 break;
13789 case HImode:
13790 /* It is better to store HImodes as SImodes. */
13791 if (!TARGET_PARTIAL_REG_STALL)
13792 operand = gen_lowpart (SImode, operand);
13793 /* FALLTHRU */
13794 case SImode:
13795 emit_insn (
13796 gen_rtx_SET (VOIDmode,
13797 gen_rtx_MEM (GET_MODE (operand),
13798 gen_rtx_PRE_DEC (SImode,
13799 stack_pointer_rtx)),
13800 operand));
13801 break;
13802 default:
13803 abort ();
13804 }
13805 result = gen_rtx_MEM (mode, stack_pointer_rtx);
13806 }
13807 return result;
13808 }
13809
13810 /* Free operand from the memory. */
13811 void
13812 ix86_free_from_memory (enum machine_mode mode)
13813 {
13814 if (!TARGET_RED_ZONE)
13815 {
13816 int size;
13817
13818 if (mode == DImode || TARGET_64BIT)
13819 size = 8;
13820 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
13821 size = 2;
13822 else
13823 size = 4;
13824 /* Use LEA to deallocate stack space. In peephole2 it will be converted
13825 to pop or add instruction if registers are available. */
13826 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13827 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
13828 GEN_INT (size))));
13829 }
13830 }
13831
13832 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
13833 QImode must go into class Q_REGS.
13834 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
13835 movdf to do mem-to-mem moves through integer regs. */
13836 enum reg_class
13837 ix86_preferred_reload_class (rtx x, enum reg_class class)
13838 {
13839 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
13840 return NO_REGS;
13841 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
13842 {
13843 /* SSE can't load any constant directly yet. */
13844 if (SSE_CLASS_P (class))
13845 return NO_REGS;
13846 /* Floats can load 0 and 1. */
13847 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
13848 {
13849 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
13850 if (MAYBE_SSE_CLASS_P (class))
13851 return (reg_class_subset_p (class, GENERAL_REGS)
13852 ? GENERAL_REGS : FLOAT_REGS);
13853 else
13854 return class;
13855 }
13856 /* General regs can load everything. */
13857 if (reg_class_subset_p (class, GENERAL_REGS))
13858 return GENERAL_REGS;
13859 /* In case we haven't resolved FLOAT or SSE yet, give up. */
13860 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
13861 return NO_REGS;
13862 }
13863 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
13864 return NO_REGS;
13865 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
13866 return Q_REGS;
13867 return class;
13868 }
13869
13870 /* If we are copying between general and FP registers, we need a memory
13871 location. The same is true for SSE and MMX registers.
13872
13873 The macro can't work reliably when one of the CLASSES is class containing
13874 registers from multiple units (SSE, MMX, integer). We avoid this by never
13875 combining those units in single alternative in the machine description.
13876 Ensure that this constraint holds to avoid unexpected surprises.
13877
13878 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
13879 enforce these sanity checks. */
13880 int
13881 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
13882 enum machine_mode mode, int strict)
13883 {
13884 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
13885 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
13886 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
13887 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
13888 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
13889 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
13890 {
13891 if (strict)
13892 abort ();
13893 else
13894 return 1;
13895 }
13896 return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
13897 || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
13898 || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
13899 && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
13900 || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
13901 }
13902 /* Return the cost of moving data from a register in class CLASS1 to
13903 one in class CLASS2.
13904
13905 It is not required that the cost always equal 2 when FROM is the same as TO;
13906 on some machines it is expensive to move between registers if they are not
13907 general registers. */
13908 int
13909 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
13910 enum reg_class class2)
13911 {
13912 /* In case we require secondary memory, compute cost of the store followed
13913 by load. In order to avoid bad register allocation choices, we need
13914 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
13915
13916 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
13917 {
13918 int cost = 1;
13919
13920 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
13921 MEMORY_MOVE_COST (mode, class1, 1));
13922 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
13923 MEMORY_MOVE_COST (mode, class2, 1));
13924
13925 /* In case of copying from general_purpose_register we may emit multiple
13926 stores followed by single load causing memory size mismatch stall.
13927 Count this as arbitrarily high cost of 20. */
13928 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
13929 cost += 20;
13930
13931 /* In the case of FP/MMX moves, the registers actually overlap, and we
13932 have to switch modes in order to treat them differently. */
13933 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
13934 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
13935 cost += 20;
13936
13937 return cost;
13938 }
13939
13940 /* Moves between SSE/MMX and integer unit are expensive. */
13941 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
13942 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
13943 return ix86_cost->mmxsse_to_integer;
13944 if (MAYBE_FLOAT_CLASS_P (class1))
13945 return ix86_cost->fp_move;
13946 if (MAYBE_SSE_CLASS_P (class1))
13947 return ix86_cost->sse_move;
13948 if (MAYBE_MMX_CLASS_P (class1))
13949 return ix86_cost->mmx_move;
13950 return 2;
13951 }
13952
13953 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
13954 int
13955 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
13956 {
13957 /* Flags and only flags can only hold CCmode values. */
13958 if (CC_REGNO_P (regno))
13959 return GET_MODE_CLASS (mode) == MODE_CC;
13960 if (GET_MODE_CLASS (mode) == MODE_CC
13961 || GET_MODE_CLASS (mode) == MODE_RANDOM
13962 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
13963 return 0;
13964 if (FP_REGNO_P (regno))
13965 return VALID_FP_MODE_P (mode);
13966 if (SSE_REGNO_P (regno))
13967 return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0);
13968 if (MMX_REGNO_P (regno))
13969 return (TARGET_MMX
13970 ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0);
13971 /* We handle both integer and floats in the general purpose registers.
13972 In future we should be able to handle vector modes as well. */
13973 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
13974 return 0;
13975 /* Take care for QImode values - they can be in non-QI regs, but then
13976 they do cause partial register stalls. */
13977 if (regno < 4 || mode != QImode || TARGET_64BIT)
13978 return 1;
13979 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
13980 }
13981
13982 /* Return the cost of moving data of mode M between a
13983 register and memory. A value of 2 is the default; this cost is
13984 relative to those in `REGISTER_MOVE_COST'.
13985
13986 If moving between registers and memory is more expensive than
13987 between two registers, you should define this macro to express the
13988 relative cost.
13989
13990 Model also increased moving costs of QImode registers in non
13991 Q_REGS classes.
13992 */
13993 int
13994 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
13995 {
13996 if (FLOAT_CLASS_P (class))
13997 {
13998 int index;
13999 switch (mode)
14000 {
14001 case SFmode:
14002 index = 0;
14003 break;
14004 case DFmode:
14005 index = 1;
14006 break;
14007 case XFmode:
14008 index = 2;
14009 break;
14010 default:
14011 return 100;
14012 }
14013 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
14014 }
14015 if (SSE_CLASS_P (class))
14016 {
14017 int index;
14018 switch (GET_MODE_SIZE (mode))
14019 {
14020 case 4:
14021 index = 0;
14022 break;
14023 case 8:
14024 index = 1;
14025 break;
14026 case 16:
14027 index = 2;
14028 break;
14029 default:
14030 return 100;
14031 }
14032 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
14033 }
14034 if (MMX_CLASS_P (class))
14035 {
14036 int index;
14037 switch (GET_MODE_SIZE (mode))
14038 {
14039 case 4:
14040 index = 0;
14041 break;
14042 case 8:
14043 index = 1;
14044 break;
14045 default:
14046 return 100;
14047 }
14048 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14049 }
14050 switch (GET_MODE_SIZE (mode))
14051 {
14052 case 1:
14053 if (in)
14054 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14055 : ix86_cost->movzbl_load);
14056 else
14057 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14058 : ix86_cost->int_store[0] + 4);
14059 break;
14060 case 2:
14061 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14062 default:
14063 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14064 if (mode == TFmode)
14065 mode = XFmode;
14066 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14067 * (((int) GET_MODE_SIZE (mode)
14068 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14069 }
14070 }
14071
14072 /* Compute a (partial) cost for rtx X. Return true if the complete
14073 cost has been computed, and false if subexpressions should be
14074 scanned. In either case, *TOTAL contains the cost result. */
14075
14076 static bool
14077 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14078 {
14079 enum machine_mode mode = GET_MODE (x);
14080
14081 switch (code)
14082 {
14083 case CONST_INT:
14084 case CONST:
14085 case LABEL_REF:
14086 case SYMBOL_REF:
14087 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
14088 *total = 3;
14089 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
14090 *total = 2;
14091 else if (flag_pic && SYMBOLIC_CONST (x)
14092 && (!TARGET_64BIT
14093 || (!GET_CODE (x) != LABEL_REF
14094 && (GET_CODE (x) != SYMBOL_REF
14095 || !SYMBOL_REF_LOCAL_P (x)))))
14096 *total = 1;
14097 else
14098 *total = 0;
14099 return true;
14100
14101 case CONST_DOUBLE:
14102 if (mode == VOIDmode)
14103 *total = 0;
14104 else
14105 switch (standard_80387_constant_p (x))
14106 {
14107 case 1: /* 0.0 */
14108 *total = 1;
14109 break;
14110 default: /* Other constants */
14111 *total = 2;
14112 break;
14113 case 0:
14114 case -1:
14115 /* Start with (MEM (SYMBOL_REF)), since that's where
14116 it'll probably end up. Add a penalty for size. */
14117 *total = (COSTS_N_INSNS (1)
14118 + (flag_pic != 0 && !TARGET_64BIT)
14119 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14120 break;
14121 }
14122 return true;
14123
14124 case ZERO_EXTEND:
14125 /* The zero extensions is often completely free on x86_64, so make
14126 it as cheap as possible. */
14127 if (TARGET_64BIT && mode == DImode
14128 && GET_MODE (XEXP (x, 0)) == SImode)
14129 *total = 1;
14130 else if (TARGET_ZERO_EXTEND_WITH_AND)
14131 *total = COSTS_N_INSNS (ix86_cost->add);
14132 else
14133 *total = COSTS_N_INSNS (ix86_cost->movzx);
14134 return false;
14135
14136 case SIGN_EXTEND:
14137 *total = COSTS_N_INSNS (ix86_cost->movsx);
14138 return false;
14139
14140 case ASHIFT:
14141 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14142 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14143 {
14144 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14145 if (value == 1)
14146 {
14147 *total = COSTS_N_INSNS (ix86_cost->add);
14148 return false;
14149 }
14150 if ((value == 2 || value == 3)
14151 && ix86_cost->lea <= ix86_cost->shift_const)
14152 {
14153 *total = COSTS_N_INSNS (ix86_cost->lea);
14154 return false;
14155 }
14156 }
14157 /* FALLTHRU */
14158
14159 case ROTATE:
14160 case ASHIFTRT:
14161 case LSHIFTRT:
14162 case ROTATERT:
14163 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14164 {
14165 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14166 {
14167 if (INTVAL (XEXP (x, 1)) > 32)
14168 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14169 else
14170 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14171 }
14172 else
14173 {
14174 if (GET_CODE (XEXP (x, 1)) == AND)
14175 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14176 else
14177 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
14178 }
14179 }
14180 else
14181 {
14182 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14183 *total = COSTS_N_INSNS (ix86_cost->shift_const);
14184 else
14185 *total = COSTS_N_INSNS (ix86_cost->shift_var);
14186 }
14187 return false;
14188
14189 case MULT:
14190 if (FLOAT_MODE_P (mode))
14191 {
14192 *total = COSTS_N_INSNS (ix86_cost->fmul);
14193 return false;
14194 }
14195 else
14196 {
14197 rtx op0 = XEXP (x, 0);
14198 rtx op1 = XEXP (x, 1);
14199 int nbits;
14200 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14201 {
14202 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14203 for (nbits = 0; value != 0; value &= value - 1)
14204 nbits++;
14205 }
14206 else
14207 /* This is arbitrary. */
14208 nbits = 7;
14209
14210 /* Compute costs correctly for widening multiplication. */
14211 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
14212 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
14213 == GET_MODE_SIZE (mode))
14214 {
14215 int is_mulwiden = 0;
14216 enum machine_mode inner_mode = GET_MODE (op0);
14217
14218 if (GET_CODE (op0) == GET_CODE (op1))
14219 is_mulwiden = 1, op1 = XEXP (op1, 0);
14220 else if (GET_CODE (op1) == CONST_INT)
14221 {
14222 if (GET_CODE (op0) == SIGN_EXTEND)
14223 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
14224 == INTVAL (op1);
14225 else
14226 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
14227 }
14228
14229 if (is_mulwiden)
14230 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
14231 }
14232
14233 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
14234 + nbits * ix86_cost->mult_bit)
14235 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
14236
14237 return true;
14238 }
14239
14240 case DIV:
14241 case UDIV:
14242 case MOD:
14243 case UMOD:
14244 if (FLOAT_MODE_P (mode))
14245 *total = COSTS_N_INSNS (ix86_cost->fdiv);
14246 else
14247 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
14248 return false;
14249
14250 case PLUS:
14251 if (FLOAT_MODE_P (mode))
14252 *total = COSTS_N_INSNS (ix86_cost->fadd);
14253 else if (GET_MODE_CLASS (mode) == MODE_INT
14254 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
14255 {
14256 if (GET_CODE (XEXP (x, 0)) == PLUS
14257 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
14258 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
14259 && CONSTANT_P (XEXP (x, 1)))
14260 {
14261 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
14262 if (val == 2 || val == 4 || val == 8)
14263 {
14264 *total = COSTS_N_INSNS (ix86_cost->lea);
14265 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
14266 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
14267 outer_code);
14268 *total += rtx_cost (XEXP (x, 1), outer_code);
14269 return true;
14270 }
14271 }
14272 else if (GET_CODE (XEXP (x, 0)) == MULT
14273 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
14274 {
14275 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
14276 if (val == 2 || val == 4 || val == 8)
14277 {
14278 *total = COSTS_N_INSNS (ix86_cost->lea);
14279 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
14280 *total += rtx_cost (XEXP (x, 1), outer_code);
14281 return true;
14282 }
14283 }
14284 else if (GET_CODE (XEXP (x, 0)) == PLUS)
14285 {
14286 *total = COSTS_N_INSNS (ix86_cost->lea);
14287 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
14288 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
14289 *total += rtx_cost (XEXP (x, 1), outer_code);
14290 return true;
14291 }
14292 }
14293 /* FALLTHRU */
14294
14295 case MINUS:
14296 if (FLOAT_MODE_P (mode))
14297 {
14298 *total = COSTS_N_INSNS (ix86_cost->fadd);
14299 return false;
14300 }
14301 /* FALLTHRU */
14302
14303 case AND:
14304 case IOR:
14305 case XOR:
14306 if (!TARGET_64BIT && mode == DImode)
14307 {
14308 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
14309 + (rtx_cost (XEXP (x, 0), outer_code)
14310 << (GET_MODE (XEXP (x, 0)) != DImode))
14311 + (rtx_cost (XEXP (x, 1), outer_code)
14312 << (GET_MODE (XEXP (x, 1)) != DImode)));
14313 return true;
14314 }
14315 /* FALLTHRU */
14316
14317 case NEG:
14318 if (FLOAT_MODE_P (mode))
14319 {
14320 *total = COSTS_N_INSNS (ix86_cost->fchs);
14321 return false;
14322 }
14323 /* FALLTHRU */
14324
14325 case NOT:
14326 if (!TARGET_64BIT && mode == DImode)
14327 *total = COSTS_N_INSNS (ix86_cost->add * 2);
14328 else
14329 *total = COSTS_N_INSNS (ix86_cost->add);
14330 return false;
14331
14332 case COMPARE:
14333 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
14334 && XEXP (XEXP (x, 0), 1) == const1_rtx
14335 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
14336 && XEXP (x, 1) == const0_rtx)
14337 {
14338 /* This kind of construct is implemented using test[bwl].
14339 Treat it as if we had an AND. */
14340 *total = (COSTS_N_INSNS (ix86_cost->add)
14341 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
14342 + rtx_cost (const1_rtx, outer_code));
14343 return true;
14344 }
14345 return false;
14346
14347 case FLOAT_EXTEND:
14348 if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode))
14349 *total = 0;
14350 return false;
14351
14352 case ABS:
14353 if (FLOAT_MODE_P (mode))
14354 *total = COSTS_N_INSNS (ix86_cost->fabs);
14355 return false;
14356
14357 case SQRT:
14358 if (FLOAT_MODE_P (mode))
14359 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
14360 return false;
14361
14362 case UNSPEC:
14363 if (XINT (x, 1) == UNSPEC_TP)
14364 *total = 0;
14365 return false;
14366
14367 default:
14368 return false;
14369 }
14370 }
14371
14372 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
14373 static void
14374 ix86_svr3_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
14375 {
14376 init_section ();
14377 fputs ("\tpushl $", asm_out_file);
14378 assemble_name (asm_out_file, XSTR (symbol, 0));
14379 fputc ('\n', asm_out_file);
14380 }
14381 #endif
14382
14383 #if TARGET_MACHO
14384
14385 static int current_machopic_label_num;
14386
14387 /* Given a symbol name and its associated stub, write out the
14388 definition of the stub. */
14389
14390 void
14391 machopic_output_stub (FILE *file, const char *symb, const char *stub)
14392 {
14393 unsigned int length;
14394 char *binder_name, *symbol_name, lazy_ptr_name[32];
14395 int label = ++current_machopic_label_num;
14396
14397 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
14398 symb = (*targetm.strip_name_encoding) (symb);
14399
14400 length = strlen (stub);
14401 binder_name = alloca (length + 32);
14402 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
14403
14404 length = strlen (symb);
14405 symbol_name = alloca (length + 32);
14406 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
14407
14408 sprintf (lazy_ptr_name, "L%d$lz", label);
14409
14410 if (MACHOPIC_PURE)
14411 machopic_picsymbol_stub_section ();
14412 else
14413 machopic_symbol_stub_section ();
14414
14415 fprintf (file, "%s:\n", stub);
14416 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
14417
14418 if (MACHOPIC_PURE)
14419 {
14420 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
14421 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
14422 fprintf (file, "\tjmp %%edx\n");
14423 }
14424 else
14425 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
14426
14427 fprintf (file, "%s:\n", binder_name);
14428
14429 if (MACHOPIC_PURE)
14430 {
14431 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
14432 fprintf (file, "\tpushl %%eax\n");
14433 }
14434 else
14435 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
14436
14437 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
14438
14439 machopic_lazy_symbol_ptr_section ();
14440 fprintf (file, "%s:\n", lazy_ptr_name);
14441 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
14442 fprintf (file, "\t.long %s\n", binder_name);
14443 }
14444 #endif /* TARGET_MACHO */
14445
14446 /* Order the registers for register allocator. */
14447
14448 void
14449 x86_order_regs_for_local_alloc (void)
14450 {
14451 int pos = 0;
14452 int i;
14453
14454 /* First allocate the local general purpose registers. */
14455 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14456 if (GENERAL_REGNO_P (i) && call_used_regs[i])
14457 reg_alloc_order [pos++] = i;
14458
14459 /* Global general purpose registers. */
14460 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14461 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
14462 reg_alloc_order [pos++] = i;
14463
14464 /* x87 registers come first in case we are doing FP math
14465 using them. */
14466 if (!TARGET_SSE_MATH)
14467 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
14468 reg_alloc_order [pos++] = i;
14469
14470 /* SSE registers. */
14471 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
14472 reg_alloc_order [pos++] = i;
14473 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
14474 reg_alloc_order [pos++] = i;
14475
14476 /* x87 registers. */
14477 if (TARGET_SSE_MATH)
14478 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
14479 reg_alloc_order [pos++] = i;
14480
14481 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
14482 reg_alloc_order [pos++] = i;
14483
14484 /* Initialize the rest of array as we do not allocate some registers
14485 at all. */
14486 while (pos < FIRST_PSEUDO_REGISTER)
14487 reg_alloc_order [pos++] = 0;
14488 }
14489
14490 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
14491 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
14492 #endif
14493
14494 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
14495 struct attribute_spec.handler. */
14496 static tree
14497 ix86_handle_struct_attribute (tree *node, tree name,
14498 tree args ATTRIBUTE_UNUSED,
14499 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
14500 {
14501 tree *type = NULL;
14502 if (DECL_P (*node))
14503 {
14504 if (TREE_CODE (*node) == TYPE_DECL)
14505 type = &TREE_TYPE (*node);
14506 }
14507 else
14508 type = node;
14509
14510 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
14511 || TREE_CODE (*type) == UNION_TYPE)))
14512 {
14513 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
14514 *no_add_attrs = true;
14515 }
14516
14517 else if ((is_attribute_p ("ms_struct", name)
14518 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
14519 || ((is_attribute_p ("gcc_struct", name)
14520 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
14521 {
14522 warning ("%qs incompatible attribute ignored",
14523 IDENTIFIER_POINTER (name));
14524 *no_add_attrs = true;
14525 }
14526
14527 return NULL_TREE;
14528 }
14529
14530 static bool
14531 ix86_ms_bitfield_layout_p (tree record_type)
14532 {
14533 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
14534 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
14535 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
14536 }
14537
14538 /* Returns an expression indicating where the this parameter is
14539 located on entry to the FUNCTION. */
14540
14541 static rtx
14542 x86_this_parameter (tree function)
14543 {
14544 tree type = TREE_TYPE (function);
14545
14546 if (TARGET_64BIT)
14547 {
14548 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
14549 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
14550 }
14551
14552 if (ix86_function_regparm (type, function) > 0)
14553 {
14554 tree parm;
14555
14556 parm = TYPE_ARG_TYPES (type);
14557 /* Figure out whether or not the function has a variable number of
14558 arguments. */
14559 for (; parm; parm = TREE_CHAIN (parm))
14560 if (TREE_VALUE (parm) == void_type_node)
14561 break;
14562 /* If not, the this parameter is in the first argument. */
14563 if (parm)
14564 {
14565 int regno = 0;
14566 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
14567 regno = 2;
14568 return gen_rtx_REG (SImode, regno);
14569 }
14570 }
14571
14572 if (aggregate_value_p (TREE_TYPE (type), type))
14573 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
14574 else
14575 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
14576 }
14577
14578 /* Determine whether x86_output_mi_thunk can succeed. */
14579
14580 static bool
14581 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
14582 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
14583 HOST_WIDE_INT vcall_offset, tree function)
14584 {
14585 /* 64-bit can handle anything. */
14586 if (TARGET_64BIT)
14587 return true;
14588
14589 /* For 32-bit, everything's fine if we have one free register. */
14590 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
14591 return true;
14592
14593 /* Need a free register for vcall_offset. */
14594 if (vcall_offset)
14595 return false;
14596
14597 /* Need a free register for GOT references. */
14598 if (flag_pic && !(*targetm.binds_local_p) (function))
14599 return false;
14600
14601 /* Otherwise ok. */
14602 return true;
14603 }
14604
14605 /* Output the assembler code for a thunk function. THUNK_DECL is the
14606 declaration for the thunk function itself, FUNCTION is the decl for
14607 the target function. DELTA is an immediate constant offset to be
14608 added to THIS. If VCALL_OFFSET is nonzero, the word at
14609 *(*this + vcall_offset) should be added to THIS. */
14610
14611 static void
14612 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
14613 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
14614 HOST_WIDE_INT vcall_offset, tree function)
14615 {
14616 rtx xops[3];
14617 rtx this = x86_this_parameter (function);
14618 rtx this_reg, tmp;
14619
14620 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
14621 pull it in now and let DELTA benefit. */
14622 if (REG_P (this))
14623 this_reg = this;
14624 else if (vcall_offset)
14625 {
14626 /* Put the this parameter into %eax. */
14627 xops[0] = this;
14628 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
14629 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14630 }
14631 else
14632 this_reg = NULL_RTX;
14633
14634 /* Adjust the this parameter by a fixed constant. */
14635 if (delta)
14636 {
14637 xops[0] = GEN_INT (delta);
14638 xops[1] = this_reg ? this_reg : this;
14639 if (TARGET_64BIT)
14640 {
14641 if (!x86_64_general_operand (xops[0], DImode))
14642 {
14643 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
14644 xops[1] = tmp;
14645 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
14646 xops[0] = tmp;
14647 xops[1] = this;
14648 }
14649 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
14650 }
14651 else
14652 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
14653 }
14654
14655 /* Adjust the this parameter by a value stored in the vtable. */
14656 if (vcall_offset)
14657 {
14658 if (TARGET_64BIT)
14659 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
14660 else
14661 {
14662 int tmp_regno = 2 /* ECX */;
14663 if (lookup_attribute ("fastcall",
14664 TYPE_ATTRIBUTES (TREE_TYPE (function))))
14665 tmp_regno = 0 /* EAX */;
14666 tmp = gen_rtx_REG (SImode, tmp_regno);
14667 }
14668
14669 xops[0] = gen_rtx_MEM (Pmode, this_reg);
14670 xops[1] = tmp;
14671 if (TARGET_64BIT)
14672 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
14673 else
14674 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14675
14676 /* Adjust the this parameter. */
14677 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
14678 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
14679 {
14680 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
14681 xops[0] = GEN_INT (vcall_offset);
14682 xops[1] = tmp2;
14683 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
14684 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
14685 }
14686 xops[1] = this_reg;
14687 if (TARGET_64BIT)
14688 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
14689 else
14690 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
14691 }
14692
14693 /* If necessary, drop THIS back to its stack slot. */
14694 if (this_reg && this_reg != this)
14695 {
14696 xops[0] = this_reg;
14697 xops[1] = this;
14698 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14699 }
14700
14701 xops[0] = XEXP (DECL_RTL (function), 0);
14702 if (TARGET_64BIT)
14703 {
14704 if (!flag_pic || (*targetm.binds_local_p) (function))
14705 output_asm_insn ("jmp\t%P0", xops);
14706 else
14707 {
14708 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
14709 tmp = gen_rtx_CONST (Pmode, tmp);
14710 tmp = gen_rtx_MEM (QImode, tmp);
14711 xops[0] = tmp;
14712 output_asm_insn ("jmp\t%A0", xops);
14713 }
14714 }
14715 else
14716 {
14717 if (!flag_pic || (*targetm.binds_local_p) (function))
14718 output_asm_insn ("jmp\t%P0", xops);
14719 else
14720 #if TARGET_MACHO
14721 if (TARGET_MACHO)
14722 {
14723 rtx sym_ref = XEXP (DECL_RTL (function), 0);
14724 tmp = (gen_rtx_SYMBOL_REF
14725 (Pmode,
14726 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
14727 tmp = gen_rtx_MEM (QImode, tmp);
14728 xops[0] = tmp;
14729 output_asm_insn ("jmp\t%0", xops);
14730 }
14731 else
14732 #endif /* TARGET_MACHO */
14733 {
14734 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
14735 output_set_got (tmp);
14736
14737 xops[1] = tmp;
14738 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
14739 output_asm_insn ("jmp\t{*}%1", xops);
14740 }
14741 }
14742 }
14743
14744 static void
14745 x86_file_start (void)
14746 {
14747 default_file_start ();
14748 if (X86_FILE_START_VERSION_DIRECTIVE)
14749 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
14750 if (X86_FILE_START_FLTUSED)
14751 fputs ("\t.global\t__fltused\n", asm_out_file);
14752 if (ix86_asm_dialect == ASM_INTEL)
14753 fputs ("\t.intel_syntax\n", asm_out_file);
14754 }
14755
14756 int
14757 x86_field_alignment (tree field, int computed)
14758 {
14759 enum machine_mode mode;
14760 tree type = TREE_TYPE (field);
14761
14762 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
14763 return computed;
14764 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
14765 ? get_inner_array_type (type) : type);
14766 if (mode == DFmode || mode == DCmode
14767 || GET_MODE_CLASS (mode) == MODE_INT
14768 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
14769 return MIN (32, computed);
14770 return computed;
14771 }
14772
14773 /* Output assembler code to FILE to increment profiler label # LABELNO
14774 for profiling a function entry. */
14775 void
14776 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
14777 {
14778 if (TARGET_64BIT)
14779 if (flag_pic)
14780 {
14781 #ifndef NO_PROFILE_COUNTERS
14782 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
14783 #endif
14784 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
14785 }
14786 else
14787 {
14788 #ifndef NO_PROFILE_COUNTERS
14789 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
14790 #endif
14791 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
14792 }
14793 else if (flag_pic)
14794 {
14795 #ifndef NO_PROFILE_COUNTERS
14796 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
14797 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
14798 #endif
14799 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
14800 }
14801 else
14802 {
14803 #ifndef NO_PROFILE_COUNTERS
14804 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
14805 PROFILE_COUNT_REGISTER);
14806 #endif
14807 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
14808 }
14809 }
14810
14811 /* We don't have exact information about the insn sizes, but we may assume
14812 quite safely that we are informed about all 1 byte insns and memory
14813 address sizes. This is enough to eliminate unnecessary padding in
14814 99% of cases. */
14815
14816 static int
14817 min_insn_size (rtx insn)
14818 {
14819 int l = 0;
14820
14821 if (!INSN_P (insn) || !active_insn_p (insn))
14822 return 0;
14823
14824 /* Discard alignments we've emit and jump instructions. */
14825 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
14826 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
14827 return 0;
14828 if (GET_CODE (insn) == JUMP_INSN
14829 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
14830 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
14831 return 0;
14832
14833 /* Important case - calls are always 5 bytes.
14834 It is common to have many calls in the row. */
14835 if (GET_CODE (insn) == CALL_INSN
14836 && symbolic_reference_mentioned_p (PATTERN (insn))
14837 && !SIBLING_CALL_P (insn))
14838 return 5;
14839 if (get_attr_length (insn) <= 1)
14840 return 1;
14841
14842 /* For normal instructions we may rely on the sizes of addresses
14843 and the presence of symbol to require 4 bytes of encoding.
14844 This is not the case for jumps where references are PC relative. */
14845 if (GET_CODE (insn) != JUMP_INSN)
14846 {
14847 l = get_attr_length_address (insn);
14848 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
14849 l = 4;
14850 }
14851 if (l)
14852 return 1+l;
14853 else
14854 return 2;
14855 }
14856
14857 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
14858 window. */
14859
14860 static void
14861 ix86_avoid_jump_misspredicts (void)
14862 {
14863 rtx insn, start = get_insns ();
14864 int nbytes = 0, njumps = 0;
14865 int isjump = 0;
14866
14867 /* Look for all minimal intervals of instructions containing 4 jumps.
14868 The intervals are bounded by START and INSN. NBYTES is the total
14869 size of instructions in the interval including INSN and not including
14870 START. When the NBYTES is smaller than 16 bytes, it is possible
14871 that the end of START and INSN ends up in the same 16byte page.
14872
14873 The smallest offset in the page INSN can start is the case where START
14874 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
14875 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
14876 */
14877 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14878 {
14879
14880 nbytes += min_insn_size (insn);
14881 if (dump_file)
14882 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
14883 INSN_UID (insn), min_insn_size (insn));
14884 if ((GET_CODE (insn) == JUMP_INSN
14885 && GET_CODE (PATTERN (insn)) != ADDR_VEC
14886 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
14887 || GET_CODE (insn) == CALL_INSN)
14888 njumps++;
14889 else
14890 continue;
14891
14892 while (njumps > 3)
14893 {
14894 start = NEXT_INSN (start);
14895 if ((GET_CODE (start) == JUMP_INSN
14896 && GET_CODE (PATTERN (start)) != ADDR_VEC
14897 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
14898 || GET_CODE (start) == CALL_INSN)
14899 njumps--, isjump = 1;
14900 else
14901 isjump = 0;
14902 nbytes -= min_insn_size (start);
14903 }
14904 if (njumps < 0)
14905 abort ();
14906 if (dump_file)
14907 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
14908 INSN_UID (start), INSN_UID (insn), nbytes);
14909
14910 if (njumps == 3 && isjump && nbytes < 16)
14911 {
14912 int padsize = 15 - nbytes + min_insn_size (insn);
14913
14914 if (dump_file)
14915 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
14916 INSN_UID (insn), padsize);
14917 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
14918 }
14919 }
14920 }
14921
14922 /* AMD Athlon works faster
14923 when RET is not destination of conditional jump or directly preceded
14924 by other jump instruction. We avoid the penalty by inserting NOP just
14925 before the RET instructions in such cases. */
14926 static void
14927 ix86_pad_returns (void)
14928 {
14929 edge e;
14930 edge_iterator ei;
14931
14932 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
14933 {
14934 basic_block bb = e->src;
14935 rtx ret = BB_END (bb);
14936 rtx prev;
14937 bool replace = false;
14938
14939 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
14940 || !maybe_hot_bb_p (bb))
14941 continue;
14942 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
14943 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
14944 break;
14945 if (prev && GET_CODE (prev) == CODE_LABEL)
14946 {
14947 edge e;
14948 edge_iterator ei;
14949
14950 FOR_EACH_EDGE (e, ei, bb->preds)
14951 if (EDGE_FREQUENCY (e) && e->src->index >= 0
14952 && !(e->flags & EDGE_FALLTHRU))
14953 replace = true;
14954 }
14955 if (!replace)
14956 {
14957 prev = prev_active_insn (ret);
14958 if (prev
14959 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
14960 || GET_CODE (prev) == CALL_INSN))
14961 replace = true;
14962 /* Empty functions get branch mispredict even when the jump destination
14963 is not visible to us. */
14964 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
14965 replace = true;
14966 }
14967 if (replace)
14968 {
14969 emit_insn_before (gen_return_internal_long (), ret);
14970 delete_insn (ret);
14971 }
14972 }
14973 }
14974
14975 /* Implement machine specific optimizations. We implement padding of returns
14976 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
14977 static void
14978 ix86_reorg (void)
14979 {
14980 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
14981 ix86_pad_returns ();
14982 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
14983 ix86_avoid_jump_misspredicts ();
14984 }
14985
14986 /* Return nonzero when QImode register that must be represented via REX prefix
14987 is used. */
14988 bool
14989 x86_extended_QIreg_mentioned_p (rtx insn)
14990 {
14991 int i;
14992 extract_insn_cached (insn);
14993 for (i = 0; i < recog_data.n_operands; i++)
14994 if (REG_P (recog_data.operand[i])
14995 && REGNO (recog_data.operand[i]) >= 4)
14996 return true;
14997 return false;
14998 }
14999
15000 /* Return nonzero when P points to register encoded via REX prefix.
15001 Called via for_each_rtx. */
15002 static int
15003 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
15004 {
15005 unsigned int regno;
15006 if (!REG_P (*p))
15007 return 0;
15008 regno = REGNO (*p);
15009 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
15010 }
15011
15012 /* Return true when INSN mentions register that must be encoded using REX
15013 prefix. */
15014 bool
15015 x86_extended_reg_mentioned_p (rtx insn)
15016 {
15017 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
15018 }
15019
15020 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15021 optabs would emit if we didn't have TFmode patterns. */
15022
15023 void
15024 x86_emit_floatuns (rtx operands[2])
15025 {
15026 rtx neglab, donelab, i0, i1, f0, in, out;
15027 enum machine_mode mode, inmode;
15028
15029 inmode = GET_MODE (operands[1]);
15030 if (inmode != SImode
15031 && inmode != DImode)
15032 abort ();
15033
15034 out = operands[0];
15035 in = force_reg (inmode, operands[1]);
15036 mode = GET_MODE (out);
15037 neglab = gen_label_rtx ();
15038 donelab = gen_label_rtx ();
15039 i1 = gen_reg_rtx (Pmode);
15040 f0 = gen_reg_rtx (mode);
15041
15042 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
15043
15044 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
15045 emit_jump_insn (gen_jump (donelab));
15046 emit_barrier ();
15047
15048 emit_label (neglab);
15049
15050 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15051 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15052 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
15053 expand_float (f0, i0, 0);
15054 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
15055
15056 emit_label (donelab);
15057 }
15058
15059 /* Initialize vector TARGET via VALS. */
15060 void
15061 ix86_expand_vector_init (rtx target, rtx vals)
15062 {
15063 enum machine_mode mode = GET_MODE (target);
15064 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
15065 int n_elts = (GET_MODE_SIZE (mode) / elt_size);
15066 int i;
15067
15068 for (i = n_elts - 1; i >= 0; i--)
15069 if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
15070 && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
15071 break;
15072
15073 /* Few special cases first...
15074 ... constants are best loaded from constant pool. */
15075 if (i < 0)
15076 {
15077 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15078 return;
15079 }
15080
15081 /* ... values where only first field is non-constant are best loaded
15082 from the pool and overwritten via move later. */
15083 if (!i)
15084 {
15085 rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0),
15086 GET_MODE_INNER (mode), 0);
15087
15088 op = force_reg (mode, op);
15089 XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode));
15090 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15091 switch (GET_MODE (target))
15092 {
15093 case V2DFmode:
15094 emit_insn (gen_sse2_movsd (target, target, op));
15095 break;
15096 case V4SFmode:
15097 emit_insn (gen_sse_movss (target, target, op));
15098 break;
15099 default:
15100 break;
15101 }
15102 return;
15103 }
15104
15105 /* And the busy sequence doing rotations. */
15106 switch (GET_MODE (target))
15107 {
15108 case V2DFmode:
15109 {
15110 rtx vecop0 =
15111 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0);
15112 rtx vecop1 =
15113 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0);
15114
15115 vecop0 = force_reg (V2DFmode, vecop0);
15116 vecop1 = force_reg (V2DFmode, vecop1);
15117 emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1));
15118 }
15119 break;
15120 case V4SFmode:
15121 {
15122 rtx vecop0 =
15123 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0);
15124 rtx vecop1 =
15125 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0);
15126 rtx vecop2 =
15127 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0);
15128 rtx vecop3 =
15129 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0);
15130 rtx tmp1 = gen_reg_rtx (V4SFmode);
15131 rtx tmp2 = gen_reg_rtx (V4SFmode);
15132
15133 vecop0 = force_reg (V4SFmode, vecop0);
15134 vecop1 = force_reg (V4SFmode, vecop1);
15135 vecop2 = force_reg (V4SFmode, vecop2);
15136 vecop3 = force_reg (V4SFmode, vecop3);
15137 emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3));
15138 emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2));
15139 emit_insn (gen_sse_unpcklps (target, tmp2, tmp1));
15140 }
15141 break;
15142 default:
15143 abort ();
15144 }
15145 }
15146
15147 /* Implements target hook vector_mode_supported_p. */
15148 static bool
15149 ix86_vector_mode_supported_p (enum machine_mode mode)
15150 {
15151 if (TARGET_SSE
15152 && VALID_SSE_REG_MODE (mode))
15153 return true;
15154
15155 else if (TARGET_MMX
15156 && VALID_MMX_REG_MODE (mode))
15157 return true;
15158
15159 else if (TARGET_3DNOW
15160 && VALID_MMX_REG_MODE_3DNOW (mode))
15161 return true;
15162
15163 else
15164 return false;
15165 }
15166
15167 /* Worker function for TARGET_MD_ASM_CLOBBERS.
15168
15169 We do this in the new i386 backend to maintain source compatibility
15170 with the old cc0-based compiler. */
15171
15172 static tree
15173 ix86_md_asm_clobbers (tree clobbers)
15174 {
15175 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
15176 clobbers);
15177 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
15178 clobbers);
15179 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
15180 clobbers);
15181 return clobbers;
15182 }
15183
15184 /* Worker function for REVERSE_CONDITION. */
15185
15186 enum rtx_code
15187 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
15188 {
15189 return (mode != CCFPmode && mode != CCFPUmode
15190 ? reverse_condition (code)
15191 : reverse_condition_maybe_unordered (code));
15192 }
15193
15194 /* Output code to perform an x87 FP register move, from OPERANDS[1]
15195 to OPERANDS[0]. */
15196
15197 const char *
15198 output_387_reg_move (rtx insn, rtx *operands)
15199 {
15200 if (REG_P (operands[1])
15201 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
15202 {
15203 if (REGNO (operands[0]) == FIRST_STACK_REG
15204 && TARGET_USE_FFREEP)
15205 return "ffreep\t%y0";
15206 return "fstp\t%y0";
15207 }
15208 if (STACK_TOP_P (operands[0]))
15209 return "fld%z1\t%y1";
15210 return "fst\t%y0";
15211 }
15212
15213 /* Output code to perform a conditional jump to LABEL, if C2 flag in
15214 FP status register is set. */
15215
15216 void
15217 ix86_emit_fp_unordered_jump (rtx label)
15218 {
15219 rtx reg = gen_reg_rtx (HImode);
15220 rtx temp;
15221
15222 emit_insn (gen_x86_fnstsw_1 (reg));
15223
15224 if (TARGET_USE_SAHF)
15225 {
15226 emit_insn (gen_x86_sahf_1 (reg));
15227
15228 temp = gen_rtx_REG (CCmode, FLAGS_REG);
15229 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
15230 }
15231 else
15232 {
15233 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
15234
15235 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
15236 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
15237 }
15238
15239 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
15240 gen_rtx_LABEL_REF (VOIDmode, label),
15241 pc_rtx);
15242 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
15243 emit_jump_insn (temp);
15244 }
15245
15246 /* Output code to perform a log1p XFmode calculation. */
15247
15248 void ix86_emit_i387_log1p (rtx op0, rtx op1)
15249 {
15250 rtx label1 = gen_label_rtx ();
15251 rtx label2 = gen_label_rtx ();
15252
15253 rtx tmp = gen_reg_rtx (XFmode);
15254 rtx tmp2 = gen_reg_rtx (XFmode);
15255
15256 emit_insn (gen_absxf2 (tmp, op1));
15257 emit_insn (gen_cmpxf (tmp,
15258 CONST_DOUBLE_FROM_REAL_VALUE (
15259 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
15260 XFmode)));
15261 emit_jump_insn (gen_bge (label1));
15262
15263 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
15264 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
15265 emit_jump (label2);
15266
15267 emit_label (label1);
15268 emit_move_insn (tmp, CONST1_RTX (XFmode));
15269 emit_insn (gen_addxf3 (tmp, op1, tmp));
15270 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
15271 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
15272
15273 emit_label (label2);
15274 }
15275
15276 /* Solaris named-section hook. Parameters are as for
15277 named_section_real. */
15278
15279 static void
15280 i386_solaris_elf_named_section (const char *name, unsigned int flags,
15281 tree decl)
15282 {
15283 /* With Binutils 2.15, the "@unwind" marker must be specified on
15284 every occurrence of the ".eh_frame" section, not just the first
15285 one. */
15286 if (TARGET_64BIT
15287 && strcmp (name, ".eh_frame") == 0)
15288 {
15289 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
15290 flags & SECTION_WRITE ? "aw" : "a");
15291 return;
15292 }
15293 default_elf_asm_named_section (name, flags, decl);
15294 }
15295
15296 #include "gt-i386.h"
This page took 0.767263 seconds and 5 git commands to generate.