]>
Commit | Line | Data |
---|---|---|
235d6e4f RS |
1 | /* Subroutines for insn-output.c for Intel 860 |
2 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. | |
3 | Derived from sparc.c. | |
4 | ||
5 | Written by Richard Stallman (rms@ai.mit.edu). | |
6 | ||
7 | Hacked substantially by Ron Guilmette (rfg@ncd.com) to cater | |
8 | to the whims of the System V Release 4 assembler. | |
9 | ||
10 | This file is part of GNU CC. | |
11 | ||
12 | GNU CC is free software; you can redistribute it and/or modify | |
13 | it under the terms of the GNU General Public License as published by | |
14 | the Free Software Foundation; either version 2, or (at your option) | |
15 | any later version. | |
16 | ||
17 | GNU CC is distributed in the hope that it will be useful, | |
18 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | GNU General Public License for more details. | |
21 | ||
22 | You should have received a copy of the GNU General Public License | |
23 | along with GNU CC; see the file COPYING. If not, write to | |
24 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |
25 | ||
26 | ||
27 | #include "config.h" | |
28 | #include "flags.h" | |
29 | #include "rtl.h" | |
30 | #include "regs.h" | |
31 | #include "hard-reg-set.h" | |
32 | #include "real.h" | |
33 | #include "insn-config.h" | |
34 | #include "conditions.h" | |
35 | #include "insn-flags.h" | |
36 | #include "output.h" | |
37 | #include "recog.h" | |
38 | #include "insn-attr.h" | |
39 | ||
40 | #include <stdio.h> | |
41 | ||
42 | static rtx find_addr_reg (); | |
43 | ||
44 | #ifndef I860_REG_PREFIX | |
45 | #define I860_REG_PREFIX "" | |
46 | #endif | |
47 | ||
48 | char *i860_reg_prefix = I860_REG_PREFIX; | |
49 | ||
50 | /* Save information from a "cmpxx" operation until the branch is emitted. */ | |
51 | ||
52 | rtx i860_compare_op0, i860_compare_op1; | |
53 | \f | |
54 | /* Return non-zero if this pattern, can be evaluated safely, even if it | |
55 | was not asked for. */ | |
56 | int | |
57 | safe_insn_src_p (op, mode) | |
58 | rtx op; | |
59 | enum machine_mode mode; | |
60 | { | |
61 | /* Just experimenting. */ | |
62 | ||
63 | /* No floating point src is safe if it contains an arithmetic | |
64 | operation, since that operation may trap. */ | |
65 | switch (GET_CODE (op)) | |
66 | { | |
67 | case CONST_INT: | |
68 | case LABEL_REF: | |
69 | case SYMBOL_REF: | |
70 | case CONST: | |
71 | return 1; | |
72 | ||
73 | case REG: | |
74 | return 1; | |
75 | ||
76 | case MEM: | |
77 | return CONSTANT_ADDRESS_P (XEXP (op, 0)); | |
78 | ||
79 | /* We never need to negate or complement constants. */ | |
80 | case NEG: | |
81 | return (mode != SFmode && mode != DFmode); | |
82 | case NOT: | |
83 | case ZERO_EXTEND: | |
84 | return 1; | |
85 | ||
86 | case EQ: | |
87 | case NE: | |
88 | case LT: | |
89 | case GT: | |
90 | case LE: | |
91 | case GE: | |
92 | case LTU: | |
93 | case GTU: | |
94 | case LEU: | |
95 | case GEU: | |
96 | case MINUS: | |
97 | case PLUS: | |
98 | return (mode != SFmode && mode != DFmode); | |
99 | case AND: | |
100 | case IOR: | |
101 | case XOR: | |
102 | case LSHIFT: | |
103 | case ASHIFT: | |
104 | case ASHIFTRT: | |
105 | case LSHIFTRT: | |
106 | if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0))) | |
107 | || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1)))) | |
108 | return 0; | |
109 | return 1; | |
110 | ||
111 | default: | |
112 | return 0; | |
113 | } | |
114 | } | |
115 | ||
116 | /* Return 1 if REG is clobbered in IN. | |
117 | Return 2 if REG is used in IN. | |
118 | Return 3 if REG is both used and clobbered in IN. | |
119 | Return 0 if neither. */ | |
120 | ||
121 | static int | |
122 | reg_clobbered_p (reg, in) | |
123 | rtx reg; | |
124 | rtx in; | |
125 | { | |
126 | register enum rtx_code code; | |
127 | ||
128 | if (in == 0) | |
129 | return 0; | |
130 | ||
131 | code = GET_CODE (in); | |
132 | ||
133 | if (code == SET || code == CLOBBER) | |
134 | { | |
135 | rtx dest = SET_DEST (in); | |
136 | int set = 0; | |
137 | int used = 0; | |
138 | ||
139 | while (GET_CODE (dest) == STRICT_LOW_PART | |
140 | || GET_CODE (dest) == SUBREG | |
141 | || GET_CODE (dest) == SIGN_EXTRACT | |
142 | || GET_CODE (dest) == ZERO_EXTRACT) | |
143 | dest = XEXP (dest, 0); | |
144 | ||
145 | if (dest == reg) | |
146 | set = 1; | |
147 | else if (GET_CODE (dest) == REG | |
148 | && refers_to_regno_p (REGNO (reg), | |
149 | REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)), | |
150 | SET_DEST (in), 0)) | |
151 | { | |
152 | set = 1; | |
153 | /* Anything that sets just part of the register | |
154 | is considered using as well as setting it. | |
155 | But note that a straight SUBREG of a single-word value | |
156 | clobbers the entire value. */ | |
157 | if (dest != SET_DEST (in) | |
158 | && ! (GET_CODE (SET_DEST (in)) == SUBREG | |
159 | || UNITS_PER_WORD >= GET_MODE_SIZE (GET_MODE (dest)))) | |
160 | used = 1; | |
161 | } | |
162 | ||
163 | if (code == SET) | |
164 | { | |
165 | if (set) | |
166 | used = refers_to_regno_p (REGNO (reg), | |
167 | REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)), | |
168 | SET_SRC (in), 0); | |
169 | else | |
170 | used = refers_to_regno_p (REGNO (reg), | |
171 | REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)), | |
172 | in, 0); | |
173 | } | |
174 | ||
175 | return set + used * 2; | |
176 | } | |
177 | ||
178 | if (refers_to_regno_p (REGNO (reg), | |
179 | REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)), | |
180 | in, 0)) | |
181 | return 2; | |
182 | return 0; | |
183 | } | |
184 | ||
185 | /* Return non-zero if OP can be written to without screwing up | |
186 | GCC's model of what's going on. It is assumed that this operand | |
187 | appears in the dest position of a SET insn in a conditional | |
188 | branch's delay slot. AFTER is the label to start looking from. */ | |
189 | int | |
190 | operand_clobbered_before_used_after (op, after) | |
191 | rtx op; | |
192 | rtx after; | |
193 | { | |
194 | /* Just experimenting. */ | |
195 | if (GET_CODE (op) == CC0) | |
196 | return 1; | |
197 | if (GET_CODE (op) == REG) | |
198 | { | |
199 | rtx insn; | |
200 | ||
201 | if (op == stack_pointer_rtx) | |
202 | return 0; | |
203 | ||
204 | /* Scan forward from the label, to see if the value of OP | |
205 | is clobbered before the first use. */ | |
206 | ||
207 | for (insn = NEXT_INSN (after); insn; insn = NEXT_INSN (insn)) | |
208 | { | |
209 | if (GET_CODE (insn) == NOTE) | |
210 | continue; | |
211 | if (GET_CODE (insn) == INSN | |
212 | || GET_CODE (insn) == JUMP_INSN | |
213 | || GET_CODE (insn) == CALL_INSN) | |
214 | { | |
215 | switch (reg_clobbered_p (op, PATTERN (insn))) | |
216 | { | |
217 | default: | |
218 | return 0; | |
219 | case 1: | |
220 | return 1; | |
221 | case 0: | |
222 | break; | |
223 | } | |
224 | } | |
225 | /* If we reach another label without clobbering OP, | |
226 | then we cannot safely write it here. */ | |
227 | else if (GET_CODE (insn) == CODE_LABEL) | |
228 | return 0; | |
229 | if (GET_CODE (insn) == JUMP_INSN) | |
230 | { | |
231 | if (condjump_p (insn)) | |
232 | return 0; | |
233 | /* This is a jump insn which has already | |
234 | been mangled. We can't tell what it does. */ | |
235 | if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
236 | return 0; | |
237 | if (! JUMP_LABEL (insn)) | |
238 | return 0; | |
239 | /* Keep following jumps. */ | |
240 | insn = JUMP_LABEL (insn); | |
241 | } | |
242 | } | |
243 | return 1; | |
244 | } | |
245 | ||
246 | /* In both of these cases, the first insn executed | |
247 | for this op will be a orh whatever%h,%?r0,%?r31, | |
248 | which is tolerable. */ | |
249 | if (GET_CODE (op) == MEM) | |
250 | return (CONSTANT_ADDRESS_P (XEXP (op, 0))); | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
255 | /* Return non-zero if this pattern, as a source to a "SET", | |
256 | is known to yield an instruction of unit size. */ | |
257 | int | |
258 | single_insn_src_p (op, mode) | |
259 | rtx op; | |
260 | enum machine_mode mode; | |
261 | { | |
262 | switch (GET_CODE (op)) | |
263 | { | |
264 | case CONST_INT: | |
265 | /* This is not always a single insn src, technically, | |
266 | but output_delayed_branch knows how to deal with it. */ | |
267 | return 1; | |
268 | ||
269 | case SYMBOL_REF: | |
270 | case CONST: | |
271 | /* This is not a single insn src, technically, | |
272 | but output_delayed_branch knows how to deal with it. */ | |
273 | return 1; | |
274 | ||
275 | case REG: | |
276 | return 1; | |
277 | ||
278 | case MEM: | |
279 | return 1; | |
280 | ||
281 | /* We never need to negate or complement constants. */ | |
282 | case NEG: | |
283 | return (mode != DFmode); | |
284 | case NOT: | |
285 | case ZERO_EXTEND: | |
286 | return 1; | |
287 | ||
288 | case PLUS: | |
289 | case MINUS: | |
290 | /* Detect cases that require multiple instructions. */ | |
291 | if (CONSTANT_P (XEXP (op, 1)) | |
292 | && !(GET_CODE (XEXP (op, 1)) == CONST_INT | |
293 | && SMALL_INT (XEXP (op, 1)))) | |
294 | return 0; | |
295 | case EQ: | |
296 | case NE: | |
297 | case LT: | |
298 | case GT: | |
299 | case LE: | |
300 | case GE: | |
301 | case LTU: | |
302 | case GTU: | |
303 | case LEU: | |
304 | case GEU: | |
305 | /* Not doing floating point, since they probably | |
306 | take longer than the branch slot they might fill. */ | |
307 | return (mode != SFmode && mode != DFmode); | |
308 | ||
309 | case AND: | |
310 | if (GET_CODE (XEXP (op, 1)) == NOT) | |
311 | { | |
312 | rtx arg = XEXP (XEXP (op, 1), 0); | |
313 | if (CONSTANT_P (arg) | |
314 | && !(GET_CODE (arg) == CONST_INT | |
315 | && (SMALL_INT (arg) | |
316 | || INTVAL (arg) & 0xffff == 0))) | |
317 | return 0; | |
318 | } | |
319 | case IOR: | |
320 | case XOR: | |
321 | /* Both small and round numbers take one instruction; | |
322 | others take two. */ | |
323 | if (CONSTANT_P (XEXP (op, 1)) | |
324 | && !(GET_CODE (XEXP (op, 1)) == CONST_INT | |
325 | && (SMALL_INT (XEXP (op, 1)) | |
326 | || INTVAL (XEXP (op, 1)) & 0xffff == 0))) | |
327 | return 0; | |
328 | ||
329 | case LSHIFT: | |
330 | case ASHIFT: | |
331 | case ASHIFTRT: | |
332 | case LSHIFTRT: | |
333 | return 1; | |
334 | ||
335 | case SUBREG: | |
336 | if (SUBREG_WORD (op) != 0) | |
337 | return 0; | |
338 | return single_insn_src_p (SUBREG_REG (op), mode); | |
339 | ||
340 | /* Not doing floating point, since they probably | |
341 | take longer than the branch slot they might fill. */ | |
342 | case FLOAT_EXTEND: | |
343 | case FLOAT_TRUNCATE: | |
344 | case FLOAT: | |
345 | case FIX: | |
346 | case UNSIGNED_FLOAT: | |
347 | case UNSIGNED_FIX: | |
348 | return 0; | |
349 | ||
350 | default: | |
351 | return 0; | |
352 | } | |
353 | } | |
354 | ||
355 | /* Nonzero only if this *really* is a single insn operand. */ | |
356 | int | |
357 | strict_single_insn_op_p (op, mode) | |
358 | rtx op; | |
359 | enum machine_mode mode; | |
360 | { | |
361 | if (mode == VOIDmode) | |
362 | mode = GET_MODE (op); | |
363 | ||
364 | switch (GET_CODE (op)) | |
365 | { | |
366 | case CC0: | |
367 | return 1; | |
368 | ||
369 | case CONST_INT: | |
370 | if (SMALL_INT (op)) | |
371 | return 1; | |
372 | /* We can put this set insn into delay slot, because this is one | |
373 | insn; `orh'. */ | |
374 | if ((INTVAL (op) & 0xffff) == 0) | |
375 | return 1; | |
376 | return 0; | |
377 | ||
378 | case SYMBOL_REF: | |
379 | return 0; | |
380 | ||
381 | case REG: | |
382 | #if 0 | |
383 | /* This loses when moving an freg to a general reg. */ | |
384 | return HARD_REGNO_NREGS (REGNO (op), mode) == 1; | |
385 | #endif | |
386 | return (mode != DFmode && mode != DImode); | |
387 | ||
388 | case MEM: | |
389 | if (! CONSTANT_ADDRESS_P (XEXP (op, 0))) | |
390 | return (mode != DFmode && mode != DImode); | |
391 | return 0; | |
392 | ||
393 | /* We never need to negate or complement constants. */ | |
394 | case NEG: | |
395 | return (mode != DFmode); | |
396 | case NOT: | |
397 | case ZERO_EXTEND: | |
398 | return 1; | |
399 | ||
400 | case PLUS: | |
401 | case MINUS: | |
402 | /* Detect cases that require multiple instructions. */ | |
403 | if (CONSTANT_P (XEXP (op, 1)) | |
404 | && !(GET_CODE (XEXP (op, 1)) == CONST_INT | |
405 | && SMALL_INT (XEXP (op, 1)))) | |
406 | return 0; | |
407 | case EQ: | |
408 | case NE: | |
409 | case LT: | |
410 | case GT: | |
411 | case LE: | |
412 | case GE: | |
413 | case LTU: | |
414 | case GTU: | |
415 | case LEU: | |
416 | case GEU: | |
417 | return 1; | |
418 | ||
419 | case AND: | |
420 | if (GET_CODE (XEXP (op, 1)) == NOT) | |
421 | { | |
422 | rtx arg = XEXP (XEXP (op, 1), 0); | |
423 | if (CONSTANT_P (arg) | |
424 | && !(GET_CODE (arg) == CONST_INT | |
425 | && (SMALL_INT (arg) | |
426 | || INTVAL (arg) & 0xffff == 0))) | |
427 | return 0; | |
428 | } | |
429 | case IOR: | |
430 | case XOR: | |
431 | /* Both small and round numbers take one instruction; | |
432 | others take two. */ | |
433 | if (CONSTANT_P (XEXP (op, 1)) | |
434 | && !(GET_CODE (XEXP (op, 1)) == CONST_INT | |
435 | && (SMALL_INT (XEXP (op, 1)) | |
436 | || INTVAL (XEXP (op, 1)) & 0xffff == 0))) | |
437 | return 0; | |
438 | ||
439 | case LSHIFT: | |
440 | case ASHIFT: | |
441 | case ASHIFTRT: | |
442 | case LSHIFTRT: | |
443 | return 1; | |
444 | ||
445 | case SUBREG: | |
446 | if (SUBREG_WORD (op) != 0) | |
447 | return 0; | |
448 | return strict_single_insn_op_p (SUBREG_REG (op), mode); | |
449 | ||
450 | case SIGN_EXTEND: | |
451 | if (GET_CODE (XEXP (op, 0)) == MEM | |
452 | && ! CONSTANT_ADDRESS_P (XEXP (XEXP (op, 0), 0))) | |
453 | return 1; | |
454 | return 0; | |
455 | ||
456 | /* Not doing floating point, since they probably | |
457 | take longer than the branch slot they might fill. */ | |
458 | case FLOAT_EXTEND: | |
459 | case FLOAT_TRUNCATE: | |
460 | case FLOAT: | |
461 | case FIX: | |
462 | case UNSIGNED_FLOAT: | |
463 | case UNSIGNED_FIX: | |
464 | return 0; | |
465 | ||
466 | default: | |
467 | return 0; | |
468 | } | |
469 | } | |
470 | \f | |
471 | /* Return truth value of whether OP is a relational operator. */ | |
472 | int | |
473 | relop (op, mode) | |
474 | rtx op; | |
475 | enum machine_mode mode; | |
476 | { | |
477 | switch (GET_CODE (op)) | |
478 | { | |
479 | case EQ: | |
480 | case NE: | |
481 | case GT: | |
482 | case GE: | |
483 | case LT: | |
484 | case LE: | |
485 | case GTU: | |
486 | case GEU: | |
487 | case LTU: | |
488 | case LEU: | |
489 | return 1; | |
490 | } | |
491 | return 0; | |
492 | } | |
493 | \f | |
494 | /* Return non-zero only if OP is a register of mode MODE, | |
495 | or const0_rtx. */ | |
496 | int | |
497 | reg_or_0_operand (op, mode) | |
498 | rtx op; | |
499 | enum machine_mode mode; | |
500 | { | |
501 | return (op == const0_rtx || register_operand (op, mode) | |
502 | || op == CONST0_RTX (mode)); | |
503 | } | |
504 | ||
505 | /* Return truth value of whether OP can be used as an operands in a three | |
506 | address add/subtract insn (such as add %o1,7,%l2) of mode MODE. */ | |
507 | ||
508 | int | |
509 | arith_operand (op, mode) | |
510 | rtx op; | |
511 | enum machine_mode mode; | |
512 | { | |
513 | return (register_operand (op, mode) | |
514 | || (GET_CODE (op) == CONST_INT && SMALL_INT (op))); | |
515 | } | |
516 | ||
517 | /* Return 1 if OP is a valid first operand for a logical insn of mode MODE. */ | |
518 | ||
519 | int | |
520 | logic_operand (op, mode) | |
521 | rtx op; | |
522 | enum machine_mode mode; | |
523 | { | |
524 | return (register_operand (op, mode) | |
525 | || (GET_CODE (op) == CONST_INT && LOGIC_INT (op))); | |
526 | } | |
527 | ||
528 | /* Return 1 if OP is a valid first operand for a shift insn of mode MODE. */ | |
529 | ||
530 | int | |
531 | shift_operand (op, mode) | |
532 | rtx op; | |
533 | enum machine_mode mode; | |
534 | { | |
535 | return (register_operand (op, mode) | |
536 | || (GET_CODE (op) == CONST_INT)); | |
537 | } | |
538 | ||
539 | /* Return 1 if OP is a valid first operand for either a logical insn | |
540 | or an add insn of mode MODE. */ | |
541 | ||
542 | int | |
543 | compare_operand (op, mode) | |
544 | rtx op; | |
545 | enum machine_mode mode; | |
546 | { | |
547 | return (register_operand (op, mode) | |
548 | || (GET_CODE (op) == CONST_INT && SMALL_INT (op) && LOGIC_INT (op))); | |
549 | } | |
550 | ||
551 | /* Return truth value of whether OP can be used as the 5-bit immediate | |
552 | operand of a bte or btne insn. */ | |
553 | ||
554 | int | |
555 | bte_operand (op, mode) | |
556 | rtx op; | |
557 | enum machine_mode mode; | |
558 | { | |
559 | return (register_operand (op, mode) | |
560 | || (GET_CODE (op) == CONST_INT | |
561 | && (unsigned) INTVAL (op) < 0x20)); | |
562 | } | |
563 | ||
564 | /* Return 1 if OP is an indexed memory reference of mode MODE. */ | |
565 | ||
566 | int | |
567 | indexed_operand (op, mode) | |
568 | rtx op; | |
569 | enum machine_mode mode; | |
570 | { | |
571 | return (GET_CODE (op) == MEM && GET_MODE (op) == mode | |
572 | && GET_CODE (XEXP (op, 0)) == PLUS | |
573 | && GET_MODE (XEXP (op, 0)) == SImode | |
574 | && register_operand (XEXP (XEXP (op, 0), 0), SImode) | |
575 | && register_operand (XEXP (XEXP (op, 0), 1), SImode)); | |
576 | } | |
577 | ||
578 | /* Return 1 if OP is a suitable source operand for a load insn | |
579 | with mode MODE. */ | |
580 | ||
581 | int | |
582 | load_operand (op, mode) | |
583 | rtx op; | |
584 | enum machine_mode mode; | |
585 | { | |
586 | return (memory_operand (op, mode) || indexed_operand (op, mode)); | |
587 | } | |
588 | ||
589 | /* Return truth value of whether OP is a integer which fits the | |
590 | range constraining immediate operands in add/subtract insns. */ | |
591 | ||
592 | int | |
593 | small_int (op, mode) | |
594 | rtx op; | |
595 | enum machine_mode mode; | |
596 | { | |
597 | return (GET_CODE (op) == CONST_INT && SMALL_INT (op)); | |
598 | } | |
599 | ||
600 | /* Return truth value of whether OP is a integer which fits the | |
601 | range constraining immediate operands in logic insns. */ | |
602 | ||
603 | int | |
604 | logic_int (op, mode) | |
605 | rtx op; | |
606 | enum machine_mode mode; | |
607 | { | |
608 | return (GET_CODE (op) == CONST_INT && LOGIC_INT (op)); | |
609 | } | |
610 | \f | |
611 | /* Return the best assembler insn template | |
612 | for moving operands[1] into operands[0] as a fullword. */ | |
613 | ||
614 | static char * | |
615 | singlemove_string (operands) | |
616 | rtx *operands; | |
617 | { | |
618 | if (GET_CODE (operands[0]) == MEM) | |
619 | { | |
620 | if (GET_CODE (operands[1]) != MEM) | |
621 | if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0))) | |
622 | { | |
623 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
624 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
625 | && cc_prev_status.mdep == XEXP (operands[0], 0))) | |
626 | { | |
627 | CC_STATUS_INIT; | |
628 | output_asm_insn ("orh %h0,%?r0,%?r31", operands); | |
629 | } | |
630 | cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ; | |
631 | cc_status.mdep = XEXP (operands[0], 0); | |
632 | return "st.l %r1,%L0(%?r31)"; | |
633 | } | |
634 | else | |
635 | return "st.l %r1,%0"; | |
636 | else | |
637 | abort (); | |
638 | #if 0 | |
639 | { | |
640 | rtx xoperands[2]; | |
641 | ||
642 | cc_status.flags &= ~CC_F0_IS_0; | |
643 | xoperands[0] = gen_rtx (REG, SFmode, 32); | |
644 | xoperands[1] = operands[1]; | |
645 | output_asm_insn (singlemove_string (xoperands), xoperands); | |
646 | xoperands[1] = xoperands[0]; | |
647 | xoperands[0] = operands[0]; | |
648 | output_asm_insn (singlemove_string (xoperands), xoperands); | |
649 | return ""; | |
650 | } | |
651 | #endif | |
652 | } | |
653 | if (GET_CODE (operands[1]) == MEM) | |
654 | { | |
655 | if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0))) | |
656 | { | |
657 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
658 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
659 | && cc_prev_status.mdep == XEXP (operands[1], 0))) | |
660 | { | |
661 | CC_STATUS_INIT; | |
662 | output_asm_insn ("orh %h1,%?r0,%?r31", operands); | |
663 | } | |
664 | cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ; | |
665 | cc_status.mdep = XEXP (operands[1], 0); | |
666 | return "ld.l %L1(%?r31),%0"; | |
667 | } | |
668 | return "ld.l %m1,%0"; | |
669 | } | |
670 | if (GET_CODE (operands[1]) == CONST_INT) | |
671 | { | |
672 | if((INTVAL (operands[1]) & 0xffff0000) == 0) | |
673 | return "or %L1,%?r0,%0"; | |
674 | if((INTVAL (operands[1]) & 0x0000ffff) == 0) | |
675 | return "orh %H1,%?r0,%0"; | |
676 | if (operands[1] == const0_rtx) | |
677 | return "mov %?r0,%0"; | |
678 | } | |
679 | return "mov %1,%0"; | |
680 | } | |
681 | \f | |
682 | /* Output assembler code to perform a doubleword move insn | |
683 | with operands OPERANDS. */ | |
684 | ||
685 | char * | |
686 | output_move_double (operands) | |
687 | rtx *operands; | |
688 | { | |
689 | enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1; | |
690 | rtx latehalf[2]; | |
691 | rtx addreg0 = 0, addreg1 = 0; | |
692 | ||
693 | /* First classify both operands. */ | |
694 | ||
695 | if (REG_P (operands[0])) | |
696 | optype0 = REGOP; | |
697 | else if (offsettable_memref_p (operands[0])) | |
698 | optype0 = OFFSOP; | |
699 | else if (GET_CODE (operands[0]) == MEM) | |
700 | optype0 = MEMOP; | |
701 | else | |
702 | optype0 = RNDOP; | |
703 | ||
704 | if (REG_P (operands[1])) | |
705 | optype1 = REGOP; | |
706 | else if (CONSTANT_P (operands[1])) | |
707 | optype1 = CNSTOP; | |
708 | else if (offsettable_memref_p (operands[1])) | |
709 | optype1 = OFFSOP; | |
710 | else if (GET_CODE (operands[1]) == MEM) | |
711 | optype1 = MEMOP; | |
712 | else | |
713 | optype1 = RNDOP; | |
714 | ||
715 | /* Check for the cases that the operand constraints are not | |
716 | supposed to allow to happen. Abort if we get one, | |
717 | because generating code for these cases is painful. */ | |
718 | ||
719 | if (optype0 == RNDOP || optype1 == RNDOP) | |
720 | abort (); | |
721 | ||
722 | /* If an operand is an unoffsettable memory ref, find a register | |
723 | we can increment temporarily to make it refer to the second word. */ | |
724 | ||
725 | if (optype0 == MEMOP) | |
726 | addreg0 = find_addr_reg (XEXP (operands[0], 0)); | |
727 | ||
728 | if (optype1 == MEMOP) | |
729 | addreg1 = find_addr_reg (XEXP (operands[1], 0)); | |
730 | ||
731 | /* ??? Perhaps in some cases move double words | |
732 | if there is a spare pair of floating regs. */ | |
733 | ||
734 | /* Ok, we can do one word at a time. | |
735 | Normally we do the low-numbered word first, | |
736 | but if either operand is autodecrementing then we | |
737 | do the high-numbered word first. | |
738 | ||
739 | In either case, set up in LATEHALF the operands to use | |
740 | for the high-numbered word and in some cases alter the | |
741 | operands in OPERANDS to be suitable for the low-numbered word. */ | |
742 | ||
743 | if (optype0 == REGOP) | |
744 | latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1); | |
745 | else if (optype0 == OFFSOP) | |
746 | latehalf[0] = adj_offsettable_operand (operands[0], 4); | |
747 | else | |
748 | latehalf[0] = operands[0]; | |
749 | ||
750 | if (optype1 == REGOP) | |
751 | latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1); | |
752 | else if (optype1 == OFFSOP) | |
753 | latehalf[1] = adj_offsettable_operand (operands[1], 4); | |
754 | else if (optype1 == CNSTOP) | |
755 | { | |
756 | if (GET_CODE (operands[1]) == CONST_DOUBLE) | |
757 | split_double (operands[1], &operands[1], &latehalf[1]); | |
758 | else if (CONSTANT_P (operands[1])) | |
759 | latehalf[1] = const0_rtx; | |
760 | } | |
761 | else | |
762 | latehalf[1] = operands[1]; | |
763 | ||
764 | /* If the first move would clobber the source of the second one, | |
765 | do them in the other order. | |
766 | ||
767 | RMS says "This happens only for registers; | |
768 | such overlap can't happen in memory unless the user explicitly | |
769 | sets it up, and that is an undefined circumstance." | |
770 | ||
771 | but it happens on the sparc when loading parameter registers, | |
772 | so I am going to define that circumstance, and make it work | |
773 | as expected. */ | |
774 | ||
775 | if (optype0 == REGOP && optype1 == REGOP | |
776 | && REGNO (operands[0]) == REGNO (latehalf[1])) | |
777 | { | |
778 | CC_STATUS_PARTIAL_INIT; | |
779 | /* Make any unoffsettable addresses point at high-numbered word. */ | |
780 | if (addreg0) | |
781 | output_asm_insn ("adds 0x4,%0,%0", &addreg0); | |
782 | if (addreg1) | |
783 | output_asm_insn ("adds 0x4,%0,%0", &addreg1); | |
784 | ||
785 | /* Do that word. */ | |
786 | output_asm_insn (singlemove_string (latehalf), latehalf); | |
787 | ||
788 | /* Undo the adds we just did. */ | |
789 | if (addreg0) | |
790 | output_asm_insn ("adds -0x4,%0,%0", &addreg0); | |
791 | if (addreg1) | |
792 | output_asm_insn ("adds -0x4,%0,%0", &addreg1); | |
793 | ||
794 | /* Do low-numbered word. */ | |
795 | return singlemove_string (operands); | |
796 | } | |
797 | else if (optype0 == REGOP && optype1 != REGOP | |
798 | && reg_overlap_mentioned_p (operands[0], operands[1])) | |
799 | { | |
800 | /* Do the late half first. */ | |
801 | output_asm_insn (singlemove_string (latehalf), latehalf); | |
802 | /* Then clobber. */ | |
803 | return singlemove_string (operands); | |
804 | } | |
805 | ||
806 | /* Normal case: do the two words, low-numbered first. */ | |
807 | ||
808 | output_asm_insn (singlemove_string (operands), operands); | |
809 | ||
810 | CC_STATUS_PARTIAL_INIT; | |
811 | /* Make any unoffsettable addresses point at high-numbered word. */ | |
812 | if (addreg0) | |
813 | output_asm_insn ("adds 0x4,%0,%0", &addreg0); | |
814 | if (addreg1) | |
815 | output_asm_insn ("adds 0x4,%0,%0", &addreg1); | |
816 | ||
817 | /* Do that word. */ | |
818 | output_asm_insn (singlemove_string (latehalf), latehalf); | |
819 | ||
820 | /* Undo the adds we just did. */ | |
821 | if (addreg0) | |
822 | output_asm_insn ("adds -0x4,%0,%0", &addreg0); | |
823 | if (addreg1) | |
824 | output_asm_insn ("adds -0x4,%0,%0", &addreg1); | |
825 | ||
826 | return ""; | |
827 | } | |
828 | \f | |
829 | char * | |
830 | output_fp_move_double (operands) | |
831 | rtx *operands; | |
832 | { | |
833 | /* If the source operand is any sort of zero, use f0 instead. */ | |
834 | ||
835 | if (operands[1] == CONST0_RTX (GET_MODE (operands[1]))) | |
836 | operands[1] = gen_rtx (REG, DFmode, F0_REGNUM); | |
837 | ||
838 | if (FP_REG_P (operands[0])) | |
839 | { | |
840 | if (FP_REG_P (operands[1])) | |
841 | return "fmov.dd %1,%0"; | |
842 | if (GET_CODE (operands[1]) == REG) | |
843 | { | |
844 | output_asm_insn ("ixfr %1,%0", operands); | |
845 | operands[0] = gen_rtx (REG, VOIDmode, REGNO (operands[0]) + 1); | |
846 | operands[1] = gen_rtx (REG, VOIDmode, REGNO (operands[1]) + 1); | |
847 | return "ixfr %1,%0"; | |
848 | } | |
849 | if (operands[1] == CONST0_RTX (DFmode)) | |
850 | return "fmov.dd f0,%0"; | |
851 | if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0))) | |
852 | { | |
853 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
854 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
855 | && cc_prev_status.mdep == XEXP (operands[1], 0))) | |
856 | { | |
857 | CC_STATUS_INIT; | |
858 | output_asm_insn ("orh %h1,%?r0,%?r31", operands); | |
859 | } | |
860 | cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ; | |
861 | cc_status.mdep = XEXP (operands[1], 0); | |
862 | return "fld.d %L1(%?r31),%0"; | |
863 | } | |
864 | return "fld.d %1,%0"; | |
865 | } | |
866 | else if (FP_REG_P (operands[1])) | |
867 | { | |
868 | if (GET_CODE (operands[0]) == REG) | |
869 | { | |
870 | output_asm_insn ("fxfr %1,%0", operands); | |
871 | operands[0] = gen_rtx (REG, VOIDmode, REGNO (operands[0]) + 1); | |
872 | operands[1] = gen_rtx (REG, VOIDmode, REGNO (operands[1]) + 1); | |
873 | return "fxfr %1,%0"; | |
874 | } | |
875 | if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0))) | |
876 | { | |
877 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
878 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
879 | && cc_prev_status.mdep == XEXP (operands[0], 0))) | |
880 | { | |
881 | CC_STATUS_INIT; | |
882 | output_asm_insn ("orh %h0,%?r0,%?r31", operands); | |
883 | } | |
884 | cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ; | |
885 | cc_status.mdep = XEXP (operands[0], 0); | |
886 | return "fst.d %1,%L0(%?r31)"; | |
887 | } | |
888 | return "fst.d %1,%0"; | |
889 | } | |
890 | else | |
891 | abort (); | |
892 | /* NOTREACHED */ | |
893 | return NULL; | |
894 | } | |
895 | \f | |
896 | /* Return a REG that occurs in ADDR with coefficient 1. | |
897 | ADDR can be effectively incremented by incrementing REG. */ | |
898 | ||
899 | static rtx | |
900 | find_addr_reg (addr) | |
901 | rtx addr; | |
902 | { | |
903 | while (GET_CODE (addr) == PLUS) | |
904 | { | |
905 | if (GET_CODE (XEXP (addr, 0)) == REG) | |
906 | addr = XEXP (addr, 0); | |
907 | else if (GET_CODE (XEXP (addr, 1)) == REG) | |
908 | addr = XEXP (addr, 1); | |
909 | else if (CONSTANT_P (XEXP (addr, 0))) | |
910 | addr = XEXP (addr, 1); | |
911 | else if (CONSTANT_P (XEXP (addr, 1))) | |
912 | addr = XEXP (addr, 0); | |
913 | else | |
914 | abort (); | |
915 | } | |
916 | if (GET_CODE (addr) == REG) | |
917 | return addr; | |
918 | abort (); | |
919 | /* NOTREACHED */ | |
920 | return NULL; | |
921 | } | |
922 | ||
923 | /* Return a template for a load instruction with mode MODE and | |
924 | arguments from the string ARGS. | |
925 | ||
926 | This string is in static storage. */ | |
927 | ||
928 | static char * | |
929 | load_opcode (mode, args, reg) | |
930 | enum machine_mode mode; | |
931 | char *args; | |
932 | rtx reg; | |
933 | { | |
934 | static char buf[30]; | |
935 | char *opcode; | |
936 | ||
937 | switch (mode) | |
938 | { | |
939 | case QImode: | |
940 | opcode = "ld.b"; | |
941 | break; | |
942 | ||
943 | case HImode: | |
944 | opcode = "ld.s"; | |
945 | break; | |
946 | ||
947 | case SImode: | |
948 | case SFmode: | |
949 | if (FP_REG_P (reg)) | |
950 | opcode = "fld.l"; | |
951 | else | |
952 | opcode = "ld.l"; | |
953 | break; | |
954 | ||
955 | case DImode: | |
956 | if (!FP_REG_P (reg)) | |
957 | abort (); | |
958 | case DFmode: | |
959 | opcode = "fld.d"; | |
960 | break; | |
961 | ||
962 | default: | |
963 | abort (); | |
964 | } | |
965 | ||
966 | sprintf (buf, "%s %s", opcode, args); | |
967 | return buf; | |
968 | } | |
969 | ||
970 | /* Return a template for a store instruction with mode MODE and | |
971 | arguments from the string ARGS. | |
972 | ||
973 | This string is in static storage. */ | |
974 | ||
975 | static char * | |
976 | store_opcode (mode, args, reg) | |
977 | enum machine_mode mode; | |
978 | char *args; | |
979 | rtx reg; | |
980 | { | |
981 | static char buf[30]; | |
982 | char *opcode; | |
983 | ||
984 | switch (mode) | |
985 | { | |
986 | case QImode: | |
987 | opcode = "st.b"; | |
988 | break; | |
989 | ||
990 | case HImode: | |
991 | opcode = "st.s"; | |
992 | break; | |
993 | ||
994 | case SImode: | |
995 | case SFmode: | |
996 | if (FP_REG_P (reg)) | |
997 | opcode = "fst.l"; | |
998 | else | |
999 | opcode = "st.l"; | |
1000 | break; | |
1001 | ||
1002 | case DImode: | |
1003 | if (!FP_REG_P (reg)) | |
1004 | abort (); | |
1005 | case DFmode: | |
1006 | opcode = "fst.d"; | |
1007 | break; | |
1008 | ||
1009 | default: | |
1010 | abort (); | |
1011 | } | |
1012 | ||
1013 | sprintf (buf, "%s %s", opcode, args); | |
1014 | return buf; | |
1015 | } | |
1016 | \f | |
1017 | /* Output a store-in-memory whose operands are OPERANDS[0,1]. | |
1018 | OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero. | |
1019 | ||
1020 | This function returns a template for an insn. | |
1021 | This is in static storage. | |
1022 | ||
1023 | It may also output some insns directly. | |
1024 | It may alter the values of operands[0] and operands[1]. */ | |
1025 | ||
1026 | char * | |
1027 | output_store (operands) | |
1028 | rtx *operands; | |
1029 | { | |
1030 | enum machine_mode mode = GET_MODE (operands[0]); | |
1031 | rtx address = XEXP (operands[0], 0); | |
1032 | char *string; | |
1033 | ||
1034 | cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ; | |
1035 | cc_status.mdep = address; | |
1036 | ||
1037 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
1038 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
1039 | && address == cc_prev_status.mdep)) | |
1040 | { | |
1041 | CC_STATUS_INIT; | |
1042 | output_asm_insn ("orh %h0,%?r0,%?r31", operands); | |
1043 | cc_prev_status.mdep = address; | |
1044 | } | |
1045 | ||
1046 | /* Store zero in two parts when appropriate. */ | |
1047 | if (mode == DFmode && operands[1] == CONST0_RTX (DFmode)) | |
1048 | return store_opcode (DFmode, "%r1,%L0(%?r31)", operands[1]); | |
1049 | ||
1050 | /* Code below isn't smart enough to move a doubleword in two parts, | |
1051 | so use output_move_double to do that in the cases that require it. */ | |
1052 | if ((mode == DImode || mode == DFmode) | |
1053 | && ! FP_REG_P (operands[1])) | |
1054 | return output_move_double (operands); | |
1055 | ||
1056 | return store_opcode (mode, "%r1,%L0(%?r31)", operands[1]); | |
1057 | } | |
1058 | ||
1059 | /* Output a load-from-memory whose operands are OPERANDS[0,1]. | |
1060 | OPERANDS[0] is a reg, and OPERANDS[1] is a mem. | |
1061 | ||
1062 | This function returns a template for an insn. | |
1063 | This is in static storage. | |
1064 | ||
1065 | It may also output some insns directly. | |
1066 | It may alter the values of operands[0] and operands[1]. */ | |
1067 | ||
1068 | char * | |
1069 | output_load (operands) | |
1070 | rtx *operands; | |
1071 | { | |
1072 | enum machine_mode mode = GET_MODE (operands[0]); | |
1073 | rtx address = XEXP (operands[1], 0); | |
1074 | ||
1075 | /* We don't bother trying to see if we know %hi(address). | |
1076 | This is because we are doing a load, and if we know the | |
1077 | %hi value, we probably also know that value in memory. */ | |
1078 | cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ; | |
1079 | cc_status.mdep = address; | |
1080 | ||
1081 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
1082 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
1083 | && address == cc_prev_status.mdep | |
1084 | && cc_prev_status.mdep == cc_status.mdep)) | |
1085 | { | |
1086 | CC_STATUS_INIT; | |
1087 | output_asm_insn ("orh %h1,%?r0,%?r31", operands); | |
1088 | cc_prev_status.mdep = address; | |
1089 | } | |
1090 | ||
1091 | /* Code below isn't smart enough to move a doubleword in two parts, | |
1092 | so use output_move_double to do that in the cases that require it. */ | |
1093 | if ((mode == DImode || mode == DFmode) | |
1094 | && ! FP_REG_P (operands[0])) | |
1095 | return output_move_double (operands); | |
1096 | ||
1097 | return load_opcode (mode, "%L1(%?r31),%0", operands[0]); | |
1098 | } | |
1099 | \f | |
1100 | #if 0 | |
1101 | /* Load the address specified by OPERANDS[3] into the register | |
1102 | specified by OPERANDS[0]. | |
1103 | ||
1104 | OPERANDS[3] may be the result of a sum, hence it could either be: | |
1105 | ||
1106 | (1) CONST | |
1107 | (2) REG | |
1108 | (2) REG + CONST_INT | |
1109 | (3) REG + REG + CONST_INT | |
1110 | (4) REG + REG (special case of 3). | |
1111 | ||
1112 | Note that (3) is not a legitimate address. | |
1113 | All cases are handled here. */ | |
1114 | ||
1115 | void | |
1116 | output_load_address (operands) | |
1117 | rtx *operands; | |
1118 | { | |
1119 | rtx base, offset; | |
1120 | ||
1121 | if (CONSTANT_P (operands[3])) | |
1122 | { | |
1123 | output_asm_insn ("mov %3,%0", operands); | |
1124 | return; | |
1125 | } | |
1126 | ||
1127 | if (REG_P (operands[3])) | |
1128 | { | |
1129 | if (REGNO (operands[0]) != REGNO (operands[3])) | |
1130 | output_asm_insn ("shl %?r0,%3,%0", operands); | |
1131 | return; | |
1132 | } | |
1133 | ||
1134 | if (GET_CODE (operands[3]) != PLUS) | |
1135 | abort (); | |
1136 | ||
1137 | base = XEXP (operands[3], 0); | |
1138 | offset = XEXP (operands[3], 1); | |
1139 | ||
1140 | if (GET_CODE (base) == CONST_INT) | |
1141 | { | |
1142 | rtx tmp = base; | |
1143 | base = offset; | |
1144 | offset = tmp; | |
1145 | } | |
1146 | ||
1147 | if (GET_CODE (offset) != CONST_INT) | |
1148 | { | |
1149 | /* Operand is (PLUS (REG) (REG)). */ | |
1150 | base = operands[3]; | |
1151 | offset = const0_rtx; | |
1152 | } | |
1153 | ||
1154 | if (REG_P (base)) | |
1155 | { | |
1156 | operands[6] = base; | |
1157 | operands[7] = offset; | |
1158 | CC_STATUS_PARTIAL_INIT; | |
1159 | if (SMALL_INT (offset)) | |
1160 | output_asm_insn ("adds %7,%6,%0", operands); | |
1161 | else | |
1162 | output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands); | |
1163 | } | |
1164 | else if (GET_CODE (base) == PLUS) | |
1165 | { | |
1166 | operands[6] = XEXP (base, 0); | |
1167 | operands[7] = XEXP (base, 1); | |
1168 | operands[8] = offset; | |
1169 | ||
1170 | CC_STATUS_PARTIAL_INIT; | |
1171 | if (SMALL_INT (offset)) | |
1172 | output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands); | |
1173 | else | |
1174 | output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands); | |
1175 | } | |
1176 | else | |
1177 | abort (); | |
1178 | } | |
1179 | #endif | |
1180 | ||
1181 | /* Output code to place a size count SIZE in register REG. | |
1182 | Because block moves are pipelined, we don't include the | |
1183 | first element in the transfer of SIZE to REG. | |
1184 | For this, we subtract ALIGN. (Actually, I think it is not | |
1185 | right to subtract on this machine, so right now we don't.) */ | |
1186 | ||
1187 | static void | |
1188 | output_size_for_block_move (size, reg, align) | |
1189 | rtx size, reg, align; | |
1190 | { | |
1191 | rtx xoperands[3]; | |
1192 | ||
1193 | xoperands[0] = reg; | |
1194 | xoperands[1] = size; | |
1195 | xoperands[2] = align; | |
1196 | ||
1197 | #if 1 | |
1198 | cc_status.flags &= ~ CC_KNOW_HI_R31; | |
1199 | output_asm_insn ("mov %1,%0", xoperands); | |
1200 | #else | |
1201 | if (GET_CODE (size) == REG) | |
1202 | output_asm_insn ("sub %2,%1,%0", xoperands); | |
1203 | else | |
1204 | { | |
1205 | xoperands[1] | |
1206 | = gen_rtx (CONST_INT, VOIDmode, INTVAL (size) - INTVAL (align)); | |
1207 | cc_status.flags &= ~ CC_KNOW_HI_R31; | |
1208 | output_asm_insn ("mov %1,%0", xoperands); | |
1209 | } | |
1210 | #endif | |
1211 | } | |
1212 | ||
1213 | /* Emit code to perform a block move. | |
1214 | ||
1215 | OPERANDS[0] is the destination. | |
1216 | OPERANDS[1] is the source. | |
1217 | OPERANDS[2] is the size. | |
1218 | OPERANDS[3] is the known safe alignment. | |
1219 | OPERANDS[4..6] are pseudos we can safely clobber as temps. */ | |
1220 | ||
1221 | char * | |
1222 | output_block_move (operands) | |
1223 | rtx *operands; | |
1224 | { | |
1225 | /* A vector for our computed operands. Note that load_output_address | |
1226 | makes use of (and can clobber) up to the 8th element of this vector. */ | |
1227 | rtx xoperands[10]; | |
1228 | rtx zoperands[10]; | |
1229 | static int movstrsi_label = 0; | |
1230 | int i, j; | |
1231 | rtx temp1 = operands[4]; | |
1232 | rtx alignrtx = operands[3]; | |
1233 | int align = INTVAL (alignrtx); | |
1234 | int chunk_size; | |
1235 | ||
1236 | xoperands[0] = operands[0]; | |
1237 | xoperands[1] = operands[1]; | |
1238 | xoperands[2] = temp1; | |
1239 | ||
1240 | /* We can't move more than four bytes at a time | |
1241 | because we have only one register to move them through. */ | |
1242 | if (align > 4) | |
1243 | { | |
1244 | align = 4; | |
1245 | alignrtx = gen_rtx (CONST_INT, VOIDmode, 4); | |
1246 | } | |
1247 | ||
1248 | /* Recognize special cases of block moves. These occur | |
1249 | when GNU C++ is forced to treat something as BLKmode | |
1250 | to keep it in memory, when its mode could be represented | |
1251 | with something smaller. | |
1252 | ||
1253 | We cannot do this for global variables, since we don't know | |
1254 | what pages they don't cross. Sigh. */ | |
1255 | if (GET_CODE (operands[2]) == CONST_INT | |
1256 | && ! CONSTANT_ADDRESS_P (operands[0]) | |
1257 | && ! CONSTANT_ADDRESS_P (operands[1])) | |
1258 | { | |
1259 | int size = INTVAL (operands[2]); | |
1260 | rtx op0 = xoperands[0]; | |
1261 | rtx op1 = xoperands[1]; | |
1262 | ||
1263 | if ((align & 3) == 0 && (size & 3) == 0 && (size >> 2) <= 16) | |
1264 | { | |
1265 | if (memory_address_p (SImode, plus_constant (op0, size)) | |
1266 | && memory_address_p (SImode, plus_constant (op1, size))) | |
1267 | { | |
1268 | cc_status.flags &= ~CC_KNOW_HI_R31; | |
1269 | for (i = (size>>2)-1; i >= 0; i--) | |
1270 | { | |
1271 | xoperands[0] = plus_constant (op0, i * 4); | |
1272 | xoperands[1] = plus_constant (op1, i * 4); | |
1273 | output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0", | |
1274 | xoperands); | |
1275 | } | |
1276 | return ""; | |
1277 | } | |
1278 | } | |
1279 | else if ((align & 1) == 0 && (size & 1) == 0 && (size >> 1) <= 16) | |
1280 | { | |
1281 | if (memory_address_p (HImode, plus_constant (op0, size)) | |
1282 | && memory_address_p (HImode, plus_constant (op1, size))) | |
1283 | { | |
1284 | cc_status.flags &= ~CC_KNOW_HI_R31; | |
1285 | for (i = (size>>1)-1; i >= 0; i--) | |
1286 | { | |
1287 | xoperands[0] = plus_constant (op0, i * 2); | |
1288 | xoperands[1] = plus_constant (op1, i * 2); | |
1289 | output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0", | |
1290 | xoperands); | |
1291 | } | |
1292 | return ""; | |
1293 | } | |
1294 | } | |
1295 | else if (size <= 16) | |
1296 | { | |
1297 | if (memory_address_p (QImode, plus_constant (op0, size)) | |
1298 | && memory_address_p (QImode, plus_constant (op1, size))) | |
1299 | { | |
1300 | cc_status.flags &= ~CC_KNOW_HI_R31; | |
1301 | for (i = size-1; i >= 0; i--) | |
1302 | { | |
1303 | xoperands[0] = plus_constant (op0, i); | |
1304 | xoperands[1] = plus_constant (op1, i); | |
1305 | output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0", | |
1306 | xoperands); | |
1307 | } | |
1308 | return ""; | |
1309 | } | |
1310 | } | |
1311 | } | |
1312 | ||
1313 | /* Since we clobber untold things, nix the condition codes. */ | |
1314 | CC_STATUS_INIT; | |
1315 | ||
1316 | /* This is the size of the transfer. | |
1317 | Either use the register which already contains the size, | |
1318 | or use a free register (used by no operands). */ | |
1319 | output_size_for_block_move (operands[2], operands[4], alignrtx); | |
1320 | ||
1321 | #if 0 | |
1322 | /* Also emit code to decrement the size value by ALIGN. */ | |
1323 | zoperands[0] = operands[0]; | |
1324 | zoperands[3] = plus_constant (operands[0], align); | |
1325 | output_load_address (zoperands); | |
1326 | #endif | |
1327 | ||
1328 | /* Generate number for unique label. */ | |
1329 | ||
1330 | xoperands[3] = gen_rtx (CONST_INT, VOIDmode, movstrsi_label++); | |
1331 | ||
1332 | /* Calculate the size of the chunks we will be trying to move first. */ | |
1333 | ||
1334 | #if 0 | |
1335 | if ((align & 3) == 0) | |
1336 | chunk_size = 4; | |
1337 | else if ((align & 1) == 0) | |
1338 | chunk_size = 2; | |
1339 | else | |
1340 | #endif | |
1341 | chunk_size = 1; | |
1342 | ||
1343 | /* Copy the increment (negative) to a register for bla insn. */ | |
1344 | ||
1345 | xoperands[4] = gen_rtx (CONST_INT, VOIDmode, - chunk_size); | |
1346 | xoperands[5] = operands[5]; | |
1347 | output_asm_insn ("adds %4,%?r0,%5", xoperands); | |
1348 | ||
1349 | /* Predecrement the loop counter. This happens again also in the `bla' | |
b4ac57ab RS |
1350 | instruction which precedes the loop, but we need to have it done |
1351 | two times before we enter the loop because of the bizarre semantics | |
235d6e4f RS |
1352 | of the bla instruction. */ |
1353 | ||
1354 | output_asm_insn ("adds %5,%2,%2", xoperands); | |
1355 | ||
1356 | /* Check for the case where the original count was less than or equal to | |
1357 | zero. Avoid going through the loop at all if the original count was | |
1358 | indeed less than or equal to zero. Note that we treat the count as | |
1359 | if it were a signed 32-bit quantity here, rather than an unsigned one, | |
1360 | even though we really shouldn't. We have to do this because of the | |
1361 | semantics of the `ble' instruction, which assume that the count is | |
1362 | a signed 32-bit value. Anyway, in practice it won't matter because | |
1363 | nobody is going to try to do a memcpy() of more than half of the | |
1364 | entire address space (i.e. 2 gigabytes) anyway. */ | |
1365 | ||
1366 | output_asm_insn ("bc .Le%3", xoperands); | |
1367 | ||
1368 | /* Make available a register which is a temporary. */ | |
1369 | ||
1370 | xoperands[6] = operands[6]; | |
1371 | ||
1372 | /* Now the actual loop. | |
1373 | In xoperands, elements 1 and 0 are the input and output vectors. | |
1374 | Element 2 is the loop index. Element 5 is the increment. */ | |
1375 | ||
1376 | output_asm_insn ("subs %1,%5,%1", xoperands); | |
1377 | output_asm_insn ("bla %5,%2,.Lm%3", xoperands); | |
1378 | output_asm_insn ("adds %0,%2,%6", xoperands); | |
1379 | output_asm_insn ("\n.Lm%3:", xoperands); /* Label for bla above. */ | |
1380 | output_asm_insn ("\n.Ls%3:", xoperands); /* Loop start label. */ | |
1381 | output_asm_insn ("adds %5,%6,%6", xoperands); | |
1382 | ||
1383 | /* NOTE: The code here which is supposed to handle the cases where the | |
1384 | sources and destinations are known to start on a 4 or 2 byte boundary | |
1385 | are currently broken. They fail to do anything about the overflow | |
1386 | bytes which might still need to be copied even after we have copied | |
1387 | some number of words or halfwords. Thus, for now we use the lowest | |
1388 | common denominator, i.e. the code which just copies some number of | |
1389 | totally unaligned individual bytes. (See the calculation of | |
1390 | chunk_size above. */ | |
1391 | ||
1392 | if (chunk_size == 4) | |
1393 | { | |
1394 | output_asm_insn ("ld.l %2(%1),%?r31", xoperands); | |
1395 | output_asm_insn ("bla %5,%2,.Ls%3", xoperands); | |
1396 | output_asm_insn ("st.l %?r31,8(%6)", xoperands); | |
1397 | } | |
1398 | else if (chunk_size == 2) | |
1399 | { | |
1400 | output_asm_insn ("ld.s %2(%1),%?r31", xoperands); | |
1401 | output_asm_insn ("bla %5,%2,.Ls%3", xoperands); | |
1402 | output_asm_insn ("st.s %?r31,4(%6)", xoperands); | |
1403 | } | |
1404 | else /* chunk_size == 1 */ | |
1405 | { | |
1406 | output_asm_insn ("ld.b %2(%1),%?r31", xoperands); | |
1407 | output_asm_insn ("bla %5,%2,.Ls%3", xoperands); | |
1408 | output_asm_insn ("st.b %?r31,2(%6)", xoperands); | |
1409 | } | |
1410 | output_asm_insn ("\n.Le%3:", xoperands); /* Here if count <= 0. */ | |
1411 | ||
1412 | return ""; | |
1413 | } | |
1414 | \f | |
1415 | /* Output a delayed branch insn with the delay insn in its | |
1416 | branch slot. The delayed branch insn template is in TEMPLATE, | |
1417 | with operands OPERANDS. The insn in its delay slot is INSN. | |
1418 | ||
1419 | As a special case, since we know that all memory transfers are via | |
1420 | ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory | |
1421 | reference around the branch as | |
1422 | ||
1423 | orh ha%x,%?r0,%?r31 | |
1424 | b ... | |
1425 | ld/st l%x(%?r31),... | |
1426 | ||
1427 | As another special case, we handle loading (SYMBOL_REF ...) and | |
1428 | other large constants around branches as well: | |
1429 | ||
1430 | orh h%x,%?r0,%0 | |
1431 | b ... | |
1432 | or l%x,%0,%1 | |
1433 | ||
1434 | */ | |
1435 | ||
1436 | char * | |
1437 | output_delayed_branch (template, operands, insn) | |
1438 | char *template; | |
1439 | rtx *operands; | |
1440 | rtx insn; | |
1441 | { | |
1442 | rtx src = XVECEXP (PATTERN (insn), 0, 1); | |
1443 | rtx dest = XVECEXP (PATTERN (insn), 0, 0); | |
1444 | ||
1445 | /* See if we are doing some branch together with setting some register | |
1446 | to some 32-bit value which does (or may) have some of the high-order | |
1447 | 16 bits set. If so, we need to set the register in two stages. One | |
1448 | stage must be done before the branch, and the other one can be done | |
1449 | in the delay slot. */ | |
1450 | ||
1451 | if ( (GET_CODE (src) == CONST_INT | |
1452 | && ((unsigned) INTVAL (src) & (unsigned) 0xffff0000) != (unsigned) 0) | |
1453 | || (GET_CODE (src) == SYMBOL_REF) | |
1454 | || (GET_CODE (src) == LABEL_REF) | |
1455 | || (GET_CODE (src) == CONST)) | |
1456 | { | |
1457 | rtx xoperands[2]; | |
1458 | xoperands[0] = dest; | |
1459 | xoperands[1] = src; | |
1460 | ||
1461 | CC_STATUS_PARTIAL_INIT; | |
1462 | /* Output the `orh' insn. */ | |
1463 | output_asm_insn ("orh %H1,%?r0,%0", xoperands); | |
1464 | ||
1465 | /* Output the branch instruction next. */ | |
1466 | output_asm_insn (template, operands); | |
1467 | ||
1468 | /* Now output the `or' insn. */ | |
1469 | output_asm_insn ("or %L1,%0,%0", xoperands); | |
1470 | } | |
1471 | else if ((GET_CODE (src) == MEM | |
1472 | && CONSTANT_ADDRESS_P (XEXP (src, 0))) | |
1473 | || (GET_CODE (dest) == MEM | |
1474 | && CONSTANT_ADDRESS_P (XEXP (dest, 0)))) | |
1475 | { | |
1476 | rtx xoperands[2]; | |
1477 | char *split_template; | |
1478 | xoperands[0] = dest; | |
1479 | xoperands[1] = src; | |
1480 | ||
1481 | /* Output the `orh' insn. */ | |
1482 | if (GET_CODE (src) == MEM) | |
1483 | { | |
1484 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
1485 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
1486 | && cc_prev_status.mdep == XEXP (operands[1], 0))) | |
1487 | { | |
1488 | CC_STATUS_INIT; | |
1489 | output_asm_insn ("orh %h1,%?r0,%?r31", xoperands); | |
1490 | } | |
1491 | split_template = load_opcode (GET_MODE (dest), | |
1492 | "%L1(%?r31),%0", dest); | |
1493 | } | |
1494 | else | |
1495 | { | |
1496 | if (! ((cc_prev_status.flags & CC_KNOW_HI_R31) | |
1497 | && (cc_prev_status.flags & CC_HI_R31_ADJ) | |
1498 | && cc_prev_status.mdep == XEXP (operands[0], 0))) | |
1499 | { | |
1500 | CC_STATUS_INIT; | |
1501 | output_asm_insn ("orh %h0,%?r0,%?r31", xoperands); | |
1502 | } | |
1503 | split_template = store_opcode (GET_MODE (dest), | |
1504 | "%r1,%L0(%?r31)", src); | |
1505 | } | |
1506 | ||
1507 | /* Output the branch instruction next. */ | |
1508 | output_asm_insn (template, operands); | |
1509 | ||
1510 | /* Now output the load or store. | |
1511 | No need to do a CC_STATUS_INIT, because we are branching anyway. */ | |
1512 | output_asm_insn (split_template, xoperands); | |
1513 | } | |
1514 | else | |
1515 | { | |
1516 | int insn_code_number; | |
1517 | rtx pat = gen_rtx (SET, VOIDmode, dest, src); | |
1518 | rtx delay_insn = gen_rtx (INSN, VOIDmode, 0, 0, 0, pat, -1, 0, 0); | |
1519 | int i; | |
1520 | ||
1521 | /* Output the branch instruction first. */ | |
1522 | output_asm_insn (template, operands); | |
1523 | ||
1524 | /* Now recognize the insn which we put in its delay slot. | |
b4ac57ab | 1525 | We must do this after outputting the branch insn, |
235d6e4f RS |
1526 | since operands may just be a pointer to `recog_operand'. */ |
1527 | INSN_CODE (delay_insn) = insn_code_number = recog (pat, delay_insn); | |
1528 | if (insn_code_number == -1) | |
1529 | abort (); | |
1530 | ||
1531 | for (i = 0; i < insn_n_operands[insn_code_number]; i++) | |
1532 | { | |
1533 | if (GET_CODE (recog_operand[i]) == SUBREG) | |
1534 | recog_operand[i] = alter_subreg (recog_operand[i]); | |
1535 | } | |
1536 | ||
1537 | insn_extract (delay_insn); | |
1538 | if (! constrain_operands (insn_code_number, 1)) | |
1539 | fatal_insn_not_found (delay_insn); | |
1540 | ||
1541 | template = insn_template[insn_code_number]; | |
1542 | if (template == 0) | |
1543 | template = (*insn_outfun[insn_code_number]) (recog_operand, delay_insn); | |
1544 | output_asm_insn (template, recog_operand); | |
1545 | } | |
1546 | CC_STATUS_INIT; | |
1547 | return ""; | |
1548 | } | |
1549 | ||
1550 | /* Output a newly constructed insn DELAY_INSN. */ | |
1551 | char * | |
1552 | output_delay_insn (delay_insn) | |
1553 | rtx delay_insn; | |
1554 | { | |
1555 | char *template; | |
1556 | int insn_code_number; | |
1557 | int i; | |
1558 | ||
1559 | /* Now recognize the insn which we put in its delay slot. | |
b4ac57ab | 1560 | We must do this after outputting the branch insn, |
235d6e4f RS |
1561 | since operands may just be a pointer to `recog_operand'. */ |
1562 | insn_code_number = recog_memoized (delay_insn); | |
1563 | if (insn_code_number == -1) | |
1564 | abort (); | |
1565 | ||
1566 | /* Extract the operands of this delay insn. */ | |
1567 | INSN_CODE (delay_insn) = insn_code_number; | |
1568 | insn_extract (delay_insn); | |
1569 | ||
b4ac57ab | 1570 | /* It is possible that this insn has not been properly scanned by final |
235d6e4f RS |
1571 | yet. If this insn's operands don't appear in the peephole's |
1572 | actual operands, then they won't be fixed up by final, so we | |
1573 | make sure they get fixed up here. -- This is a kludge. */ | |
1574 | for (i = 0; i < insn_n_operands[insn_code_number]; i++) | |
1575 | { | |
1576 | if (GET_CODE (recog_operand[i]) == SUBREG) | |
1577 | recog_operand[i] = alter_subreg (recog_operand[i]); | |
1578 | } | |
1579 | ||
1580 | #ifdef REGISTER_CONSTRAINTS | |
1581 | if (! constrain_operands (insn_code_number)) | |
1582 | abort (); | |
1583 | #endif | |
1584 | ||
1585 | cc_prev_status = cc_status; | |
1586 | ||
1587 | /* Update `cc_status' for this instruction. | |
1588 | The instruction's output routine may change it further. | |
1589 | If the output routine for a jump insn needs to depend | |
1590 | on the cc status, it should look at cc_prev_status. */ | |
1591 | ||
1592 | NOTICE_UPDATE_CC (PATTERN (delay_insn), delay_insn); | |
1593 | ||
1594 | /* Now get the template for what this insn would | |
1595 | have been, without the branch. */ | |
1596 | ||
1597 | template = insn_template[insn_code_number]; | |
1598 | if (template == 0) | |
1599 | template = (*insn_outfun[insn_code_number]) (recog_operand, delay_insn); | |
1600 | output_asm_insn (template, recog_operand); | |
1601 | return ""; | |
1602 | } | |
1603 | \f | |
1604 | /* Special routine to convert an SFmode value represented as a | |
1605 | CONST_DOUBLE into its equivalent unsigned long bit pattern. | |
1606 | We convert the value from a double precision floating-point | |
1607 | value to single precision first, and thence to a bit-wise | |
1608 | equivalent unsigned long value. This routine is used when | |
1609 | generating an immediate move of an SFmode value directly | |
1610 | into a general register because the svr4 assembler doesn't | |
1611 | grok floating literals in instruction operand contexts. */ | |
1612 | ||
1613 | unsigned long | |
1614 | sfmode_constant_to_ulong (x) | |
1615 | rtx x; | |
1616 | { | |
05795922 | 1617 | REAL_VALUE_TYPE d; |
235d6e4f RS |
1618 | union { float f; unsigned long i; } u2; |
1619 | ||
1620 | if (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != SFmode) | |
1621 | abort (); | |
1622 | ||
05795922 RS |
1623 | #if TARGET_FLOAT_FORMAT != HOST_FLOAT_FORMAT |
1624 | # error IEEE emulation needed | |
235d6e4f | 1625 | #endif |
b4ac57ab | 1626 | REAL_VALUE_FROM_CONST_DOUBLE (d, x); |
05795922 | 1627 | u2.f = d; |
235d6e4f RS |
1628 | return u2.i; |
1629 | } | |
1630 | \f | |
1631 | /* This function generates the assembly code for function entry. | |
1632 | The macro FUNCTION_PROLOGUE in i860.h is defined to call this function. | |
1633 | ||
1634 | ASM_FILE is a stdio stream to output the code to. | |
1635 | SIZE is an int: how many units of temporary storage to allocate. | |
1636 | ||
1637 | Refer to the array `regs_ever_live' to determine which registers | |
1638 | to save; `regs_ever_live[I]' is nonzero if register number I | |
1639 | is ever used in the function. This macro is responsible for | |
1640 | knowing which registers should not be saved even if used. | |
1641 | ||
1642 | NOTE: `frame_lower_bytes' is the count of bytes which will lie | |
1643 | between the new `fp' value and the new `sp' value after the | |
1644 | prologue is done. `frame_upper_bytes' is the count of bytes | |
1645 | that will lie between the new `fp' and the *old* `sp' value | |
1646 | after the new `fp' is setup (in the prologue). The upper | |
1647 | part of each frame always includes at least 2 words (8 bytes) | |
1648 | to hold the saved frame pointer and the saved return address. | |
1649 | ||
1650 | The svr4 ABI for the i860 now requires that the values of the | |
1651 | stack pointer and frame pointer registers be kept aligned to | |
1652 | 16-byte boundaries at all times. We obey that restriction here. | |
1653 | ||
1654 | The svr4 ABI for the i860 is entirely vague when it comes to specifying | |
1655 | exactly where the "preserved" registers should be saved. The native | |
1656 | svr4 C compiler I now have doesn't help to clarify the requirements | |
1657 | very much because it is plainly out-of-date and non-ABI-compliant | |
1658 | (in at least one important way, i.e. how it generates function | |
1659 | epilogues). | |
1660 | ||
1661 | The native svr4 C compiler saves the "preserved" registers (i.e. | |
1662 | r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative | |
1663 | offsets from the frame pointer). | |
1664 | ||
1665 | Previous versions of GCC also saved the "preserved" registers in the | |
b4ac57ab | 1666 | "negative" part of the frame, but they saved them using positive |
235d6e4f RS |
1667 | offsets from the (adjusted) stack pointer (after it had been adjusted |
1668 | to allocate space for the new frame). That's just plain wrong | |
1669 | because if the current function calls alloca(), the stack pointer | |
1670 | will get moved, and it will be impossible to restore the registers | |
1671 | properly again after that. | |
1672 | ||
1673 | Both compilers handled parameter registers (i.e. r16-r27 and f8-f15) | |
1674 | by copying their values either into various "preserved" registers or | |
1675 | into stack slots in the lower part of the current frame (as seemed | |
1676 | appropriate, depending upon subsequent usage of these values). | |
1677 | ||
1678 | Here we want to save the preserved registers at some offset from the | |
1679 | frame pointer register so as to avoid any possible problems arising | |
1680 | from calls to alloca(). We can either save them at small positive | |
1681 | offsets from the frame pointer, or at small negative offsets from | |
1682 | the frame pointer. If we save them at small negative offsets from | |
1683 | the frame pointer (i.e. in the lower part of the frame) then we | |
1684 | must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how | |
1685 | many bytes of space we plan to use in the lower part of the frame | |
1686 | for this purpose. Since other parts of the compiler reference the | |
1687 | value of STARTING_FRAME_OFFSET long before final() calls this function, | |
1688 | we would have to go ahead and assume the worst-case storage requirements | |
1689 | for saving all of the "preserved" registers (and use that number, i.e. | |
1690 | `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in | |
1691 | the lower part of the frame. That could potentially be very wasteful, | |
1692 | and that wastefulness could really hamper people compiling for embedded | |
1693 | i860 targets with very tight limits on stack space. Thus, we choose | |
1694 | here to save the preserved registers in the upper part of the | |
1695 | frame, so that we can decide at the very last minute how much (or how | |
1696 | little) space we must allocate for this purpose. | |
1697 | ||
1698 | To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved | |
1699 | registers must always be saved so that the saved values of registers | |
1700 | with higher numbers are at higher addresses. We obey that restriction | |
1701 | here. | |
1702 | ||
1703 | There are two somewhat different ways that you can generate prologues | |
1704 | here... i.e. pedantically ABI-compliant, and the "other" way. The | |
b4ac57ab | 1705 | "other" way is more consistent with what is currently generated by the |
235d6e4f RS |
1706 | "native" svr4 C compiler for the i860. That's important if you want |
1707 | to use the current (as of 8/91) incarnation of svr4 SDB for the i860. | |
1708 | The SVR4 SDB for the i860 insists on having function prologues be | |
1709 | non-ABI-compliant! | |
1710 | ||
1711 | To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES | |
1712 | in the i860svr4.h file. (By default this is *not* defined). | |
1713 | ||
1714 | The differences between the ABI-compliant and non-ABI-compliant prologues | |
1715 | are that (a) the ABI version seems to require the use of *signed* | |
1716 | (rather than unsigned) adds and subtracts, and (b) the ordering of | |
1717 | the various steps (e.g. saving preserved registers, saving the | |
1718 | return address, setting up the new frame pointer value) is different. | |
1719 | ||
1720 | For strict ABI compliance, it seems to be the case that the very last | |
1721 | thing that is supposed to happen in the prologue is getting the frame | |
1722 | pointer set to its new value (but only after everything else has | |
1723 | already been properly setup). We do that here, but only if the symbol | |
1724 | I860_STRICT_ABI_PROLOGUES is defined. | |
1725 | */ | |
1726 | ||
1727 | #ifndef STACK_ALIGNMENT | |
1728 | #define STACK_ALIGNMENT 16 | |
1729 | #endif | |
1730 | ||
1731 | extern char call_used_regs[]; | |
1732 | extern int leaf_function_p (); | |
1733 | ||
1734 | char *current_function_original_name; | |
1735 | ||
1736 | static int must_preserve_r1; | |
1737 | static unsigned must_preserve_bytes; | |
1738 | ||
1739 | void | |
1740 | function_prologue (asm_file, local_bytes) | |
1741 | register FILE *asm_file; | |
1742 | register unsigned local_bytes; | |
1743 | { | |
1744 | register unsigned frame_lower_bytes; | |
1745 | register unsigned frame_upper_bytes; | |
1746 | register unsigned total_fsize; | |
1747 | register unsigned preserved_reg_bytes = 0; | |
1748 | register unsigned i; | |
1749 | register unsigned preserved_so_far = 0; | |
1750 | ||
1751 | must_preserve_r1 = (optimize < 2 || ! leaf_function_p ()); | |
1752 | must_preserve_bytes = 4 + (must_preserve_r1 ? 4 : 0); | |
1753 | ||
1754 | /* Count registers that need preserving. Ignore r0. It never needs | |
1755 | preserving. */ | |
1756 | ||
1757 | for (i = 1; i < FIRST_PSEUDO_REGISTER; i++) | |
1758 | { | |
1759 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1760 | preserved_reg_bytes += 4; | |
1761 | } | |
1762 | ||
1763 | /* Round-up the frame_lower_bytes so that it's a multiple of 16. */ | |
1764 | ||
1765 | frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT; | |
1766 | ||
1767 | /* The upper part of each frame will contain the saved fp, | |
1768 | the saved r1, and stack slots for all of the other "preserved" | |
1769 | registers that we find we will need to save & restore. */ | |
1770 | ||
1771 | frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes; | |
1772 | ||
1773 | /* Round-up the frame_upper_bytes so that it's a multiple of 16. */ | |
1774 | ||
1775 | frame_upper_bytes | |
1776 | = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT; | |
1777 | ||
1778 | total_fsize = frame_upper_bytes + frame_lower_bytes; | |
1779 | ||
1780 | #ifndef I860_STRICT_ABI_PROLOGUES | |
1781 | ||
1782 | /* There are two kinds of function prologues. | |
1783 | You use the "small" version if the total frame size is | |
1784 | small enough so that it can fit into an immediate 16-bit | |
1785 | value in one instruction. Otherwise, you use the "large" | |
1786 | version of the function prologue. */ | |
1787 | ||
1788 | if (total_fsize > 0x7fff) | |
1789 | { | |
1790 | /* Adjust the stack pointer. The ABI sez to do this using `adds', | |
1791 | but the native C compiler on svr4 uses `addu'. */ | |
1792 | ||
1793 | fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n", | |
1794 | frame_upper_bytes, i860_reg_prefix, i860_reg_prefix); | |
1795 | ||
1796 | /* Save the old frame pointer. */ | |
1797 | ||
1798 | fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n", | |
1799 | i860_reg_prefix, i860_reg_prefix); | |
1800 | ||
1801 | /* Setup the new frame pointer. The ABI sez to do this after | |
1802 | preserving registers (using adds), but that's not what the | |
1803 | native C compiler on svr4 does. */ | |
1804 | ||
1805 | fprintf (asm_file, "\taddu 0,%ssp,%sfp\n", | |
1806 | i860_reg_prefix, i860_reg_prefix); | |
1807 | ||
1808 | /* Get the value of frame_lower_bytes into r31. */ | |
1809 | ||
1810 | fprintf (asm_file, "\torh %d,%sr0,%sr31\n", | |
1811 | frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix); | |
1812 | fprintf (asm_file, "\tor %d,%sr31,%sr31\n", | |
1813 | frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix); | |
1814 | ||
1815 | /* Now re-adjust the stack pointer using the value in r31. | |
1816 | The ABI sez to do this with `subs' but SDB may prefer `subu'. */ | |
1817 | ||
1818 | fprintf (asm_file, "\tsubu %ssp,%sr31,%ssp\n", | |
1819 | i860_reg_prefix, i860_reg_prefix, i860_reg_prefix); | |
1820 | ||
1821 | /* Preserve registers. The ABI sez to do this before setting | |
1822 | up the new frame pointer, but that's not what the native | |
1823 | C compiler on svr4 does. */ | |
1824 | ||
1825 | for (i = 1; i < 32; i++) | |
1826 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1827 | fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n", | |
1828 | i860_reg_prefix, reg_names[i], | |
1829 | must_preserve_bytes + (4 * preserved_so_far++), | |
1830 | i860_reg_prefix); | |
1831 | ||
1832 | for (i = 32; i < 64; i++) | |
1833 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1834 | fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n", | |
1835 | i860_reg_prefix, reg_names[i], | |
1836 | must_preserve_bytes + (4 * preserved_so_far++), | |
1837 | i860_reg_prefix); | |
1838 | ||
1839 | /* Save the return address. */ | |
1840 | ||
1841 | if (must_preserve_r1) | |
1842 | fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n", | |
1843 | i860_reg_prefix, i860_reg_prefix); | |
1844 | } | |
1845 | else | |
1846 | { | |
1847 | /* Adjust the stack pointer. The ABI sez to do this using `adds', | |
1848 | but the native C compiler on svr4 uses `addu'. */ | |
1849 | ||
1850 | fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n", | |
1851 | total_fsize, i860_reg_prefix, i860_reg_prefix); | |
1852 | ||
1853 | /* Save the old frame pointer. */ | |
1854 | ||
1855 | fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n", | |
1856 | i860_reg_prefix, frame_lower_bytes, i860_reg_prefix); | |
1857 | ||
1858 | /* Setup the new frame pointer. The ABI sez to do this after | |
1859 | preserving registers and after saving the return address, | |
1860 | (and its saz to do this using adds), but that's not what the | |
1861 | native C compiler on svr4 does. */ | |
1862 | ||
1863 | fprintf (asm_file, "\taddu %d,%ssp,%sfp\n", | |
1864 | frame_lower_bytes, i860_reg_prefix, i860_reg_prefix); | |
1865 | ||
1866 | /* Preserve registers. The ABI sez to do this before setting | |
1867 | up the new frame pointer, but that's not what the native | |
1868 | compiler on svr4 does. */ | |
1869 | ||
1870 | for (i = 1; i < 32; i++) | |
1871 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1872 | fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n", | |
1873 | i860_reg_prefix, reg_names[i], | |
1874 | must_preserve_bytes + (4 * preserved_so_far++), | |
1875 | i860_reg_prefix); | |
1876 | ||
1877 | for (i = 32; i < 64; i++) | |
1878 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1879 | fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n", | |
1880 | i860_reg_prefix, reg_names[i], | |
1881 | must_preserve_bytes + (4 * preserved_so_far++), | |
1882 | i860_reg_prefix); | |
1883 | ||
1884 | /* Save the return address. The ABI sez to do this earlier, | |
1885 | and also via an offset from %sp, but the native C compiler | |
1886 | on svr4 does it later (i.e. now) and uses an offset from | |
1887 | %fp. */ | |
1888 | ||
1889 | if (must_preserve_r1) | |
1890 | fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n", | |
1891 | i860_reg_prefix, i860_reg_prefix); | |
1892 | } | |
1893 | ||
1894 | #else /* defined(I860_STRICT_ABI_PROLOGUES) */ | |
1895 | ||
1896 | /* There are two kinds of function prologues. | |
1897 | You use the "small" version if the total frame size is | |
1898 | small enough so that it can fit into an immediate 16-bit | |
1899 | value in one instruction. Otherwise, you use the "large" | |
1900 | version of the function prologue. */ | |
1901 | ||
1902 | if (total_fsize > 0x7fff) | |
1903 | { | |
1904 | /* Adjust the stack pointer (thereby allocating a new frame). */ | |
1905 | ||
1906 | fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n", | |
1907 | frame_upper_bytes, i860_reg_prefix, i860_reg_prefix); | |
1908 | ||
1909 | /* Save the caller's frame pointer. */ | |
1910 | ||
1911 | fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n", | |
1912 | i860_reg_prefix, i860_reg_prefix); | |
1913 | ||
1914 | /* Save return address. */ | |
1915 | ||
1916 | if (must_preserve_r1) | |
1917 | fprintf (asm_file, "\tst.l %sr1,4(%ssp)\n", | |
1918 | i860_reg_prefix, i860_reg_prefix); | |
1919 | ||
1920 | /* Get the value of frame_lower_bytes into r31 for later use. */ | |
1921 | ||
1922 | fprintf (asm_file, "\torh %d,%sr0,%sr31\n", | |
1923 | frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix); | |
1924 | fprintf (asm_file, "\tor %d,%sr31,%sr31\n", | |
1925 | frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix); | |
1926 | ||
1927 | /* Now re-adjust the stack pointer using the value in r31. */ | |
1928 | ||
1929 | fprintf (asm_file, "\tsubs %ssp,%sr31,%ssp\n", | |
1930 | i860_reg_prefix, i860_reg_prefix, i860_reg_prefix); | |
1931 | ||
1932 | /* Pre-compute value to be used as the new frame pointer. */ | |
1933 | ||
1934 | fprintf (asm_file, "\tadds %ssp,%sr31,%sr31\n", | |
1935 | i860_reg_prefix, i860_reg_prefix, i860_reg_prefix); | |
1936 | ||
1937 | /* Preserve registers. */ | |
1938 | ||
1939 | for (i = 1; i < 32; i++) | |
1940 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1941 | fprintf (asm_file, "\tst.l %s%s,%d(%sr31)\n", | |
1942 | i860_reg_prefix, reg_names[i], | |
1943 | must_preserve_bytes + (4 * preserved_so_far++), | |
1944 | i860_reg_prefix); | |
1945 | ||
1946 | for (i = 32; i < 64; i++) | |
1947 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1948 | fprintf (asm_file, "\tfst.l %s%s,%d(%sr31)\n", | |
1949 | i860_reg_prefix, reg_names[i], | |
1950 | must_preserve_bytes + (4 * preserved_so_far++), | |
1951 | i860_reg_prefix); | |
1952 | ||
1953 | /* Actually set the new value of the frame pointer. */ | |
1954 | ||
1955 | fprintf (asm_file, "\tmov %sr31,%sfp\n", | |
1956 | i860_reg_prefix, i860_reg_prefix); | |
1957 | } | |
1958 | else | |
1959 | { | |
1960 | /* Adjust the stack pointer. */ | |
1961 | ||
1962 | fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n", | |
1963 | total_fsize, i860_reg_prefix, i860_reg_prefix); | |
1964 | ||
1965 | /* Save the caller's frame pointer. */ | |
1966 | ||
1967 | fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n", | |
1968 | i860_reg_prefix, frame_lower_bytes, i860_reg_prefix); | |
1969 | ||
1970 | /* Save the return address. */ | |
1971 | ||
1972 | if (must_preserve_r1) | |
1973 | fprintf (asm_file, "\tst.l %sr1,%d(%ssp)\n", | |
1974 | i860_reg_prefix, frame_lower_bytes + 4, i860_reg_prefix); | |
1975 | ||
1976 | /* Preserve registers. */ | |
1977 | ||
1978 | for (i = 1; i < 32; i++) | |
1979 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1980 | fprintf (asm_file, "\tst.l %s%s,%d(%ssp)\n", | |
1981 | i860_reg_prefix, reg_names[i], | |
1982 | frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++), | |
1983 | i860_reg_prefix); | |
1984 | ||
1985 | for (i = 32; i < 64; i++) | |
1986 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
1987 | fprintf (asm_file, "\tfst.l %s%s,%d(%ssp)\n", | |
1988 | i860_reg_prefix, reg_names[i], | |
1989 | frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++), | |
1990 | i860_reg_prefix); | |
1991 | ||
1992 | /* Setup the new frame pointer. */ | |
1993 | ||
1994 | fprintf (asm_file, "\tadds %d,%ssp,%sfp\n", | |
1995 | frame_lower_bytes, i860_reg_prefix, i860_reg_prefix); | |
1996 | } | |
1997 | #endif /* defined(I860_STRICT_ABI_PROLOGUES) */ | |
1998 | ||
1999 | #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX | |
2000 | ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file); | |
2001 | #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */ | |
2002 | } | |
2003 | \f | |
2004 | /* This function generates the assembly code for function exit. | |
2005 | The macro FUNCTION_EPILOGUE in i860.h is defined to call this function. | |
2006 | ||
2007 | ASM_FILE is a stdio stream to output the code to. | |
2008 | SIZE is an int: how many units of temporary storage to allocate. | |
2009 | ||
2010 | The function epilogue should not depend on the current stack pointer! | |
2011 | It should use the frame pointer only. This is mandatory because | |
2012 | of alloca; we also take advantage of it to omit stack adjustments | |
2013 | before returning. | |
2014 | ||
2015 | Note that when we go to restore the preserved register values we must | |
2016 | not try to address their slots by using offsets from the stack pointer. | |
2017 | That's because the stack pointer may have been moved during the function | |
2018 | execution due to a call to alloca(). Rather, we must restore all | |
2019 | preserved registers via offsets from the frame pointer value. | |
2020 | ||
2021 | Note also that when the current frame is being "popped" (by adjusting | |
2022 | the value of the stack pointer) on function exit, we must (for the | |
2023 | sake of alloca) set the new value of the stack pointer based upon | |
2024 | the current value of the frame pointer. We can't just add what we | |
2025 | believe to be the (static) frame size to the stack pointer because | |
2026 | if we did that, and alloca() had been called during this function, | |
2027 | we would end up returning *without* having fully deallocated all of | |
2028 | the space grabbed by alloca. If that happened, and a function | |
2029 | containing one or more alloca() calls was called over and over again, | |
2030 | then the stack would grow without limit! | |
2031 | ||
2032 | Finally note that the epilogues generated here are completely ABI | |
2033 | compliant. They go out of their way to insure that the value in | |
2034 | the frame pointer register is never less than the value in the stack | |
2035 | pointer register. It's not clear why this relationship needs to be | |
2036 | maintained at all times, but maintaining it only costs one extra | |
2037 | instruction, so what the hell. | |
2038 | */ | |
2039 | ||
2040 | void | |
2041 | function_epilogue (asm_file, local_bytes) | |
2042 | register FILE *asm_file; | |
2043 | register unsigned local_bytes; | |
2044 | { | |
2045 | register unsigned frame_upper_bytes; | |
2046 | register unsigned preserved_reg_bytes = 0; | |
2047 | register unsigned i; | |
2048 | register unsigned restored_so_far = 0; | |
2049 | ||
2050 | /* Count the number of registers that were preserved in the prologue. | |
2051 | Ignore r0. It is never preserved. */ | |
2052 | ||
2053 | for (i = 1; i < FIRST_PSEUDO_REGISTER; i++) | |
2054 | { | |
2055 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
2056 | preserved_reg_bytes += 4; | |
2057 | } | |
2058 | ||
2059 | /* The upper part of each frame will contain only saved fp, | |
2060 | the saved r1, and stack slots for all of the other "preserved" | |
2061 | registers that we find we will need to save & restore. */ | |
2062 | ||
2063 | frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes; | |
2064 | ||
2065 | /* Round-up frame_upper_bytes so that t is a multiple of 16. */ | |
2066 | ||
2067 | frame_upper_bytes | |
2068 | = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT; | |
2069 | ||
2070 | /* Restore all of the "preserved" registers that need restoring. */ | |
2071 | ||
2072 | for (i = 1; i < 32; i++) | |
2073 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
2074 | fprintf (asm_file, "\tld.l %d(%sfp),%s%s\n", | |
2075 | must_preserve_bytes + (4 * restored_so_far++), | |
2076 | i860_reg_prefix, i860_reg_prefix, reg_names[i]); | |
2077 | ||
2078 | for (i = 32; i < 64; i++) | |
2079 | if (regs_ever_live[i] && ! call_used_regs[i]) | |
2080 | fprintf (asm_file, "\tfld.l %d(%sfp),%s%s\n", | |
2081 | must_preserve_bytes + (4 * restored_so_far++), | |
2082 | i860_reg_prefix, i860_reg_prefix, reg_names[i]); | |
2083 | ||
2084 | /* Get the value we plan to use to restore the stack pointer into r31. */ | |
2085 | ||
2086 | fprintf (asm_file, "\tadds %d,%sfp,%sr31\n", | |
2087 | frame_upper_bytes, i860_reg_prefix, i860_reg_prefix); | |
2088 | ||
2089 | /* Restore the return address and the old frame pointer. */ | |
2090 | ||
2091 | if (must_preserve_r1) | |
2092 | fprintf (asm_file, "\tld.l 4(%sfp),%sr1\n", | |
2093 | i860_reg_prefix, i860_reg_prefix); | |
2094 | ||
2095 | fprintf (asm_file, "\tld.l 0(%sfp),%sfp\n", | |
2096 | i860_reg_prefix, i860_reg_prefix); | |
2097 | ||
2098 | /* Return and restore the old stack pointer value. */ | |
2099 | ||
2100 | fprintf (asm_file, "\tbri %sr1\n\tmov %sr31,%ssp\n", | |
2101 | i860_reg_prefix, i860_reg_prefix, i860_reg_prefix); | |
2102 | } |