]> gcc.gnu.org Git - gcc.git/blob - gcc/lcm.c
jcf-dump.c (main): Test for correct condition after output file creation.
[gcc.git] / gcc / lcm.c
1 /* Generic partial redundancy elimination with lazy code motion
2 support.
3 Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /* These routines are meant to be used by various optimization
23 passes which can be modeled as lazy code motion problems.
24 Including, but not limited to:
25
26 * Traditional partial redundancy elimination.
27
28 * Placement of caller/caller register save/restores.
29
30 * Load/store motion.
31
32 * Copy motion.
33
34 * Conversion of flat register files to a stacked register
35 model.
36
37 * Dead load/store elimination.
38
39 These routines accept as input:
40
41 * Basic block information (number of blocks, lists of
42 predecessors and successors). Note the granularity
43 does not need to be basic block, they could be statements
44 or functions.
45
46 * Bitmaps of local properties (computed, transparent and
47 anticipatable expressions).
48
49 The output of these routines is bitmap of redundant computations
50 and a bitmap of optimal placement points. */
51
52
53 #include "config.h"
54 #include "system.h"
55
56 #include "rtl.h"
57 #include "regs.h"
58 #include "hard-reg-set.h"
59 #include "flags.h"
60 #include "real.h"
61 #include "insn-config.h"
62 #include "recog.h"
63 #include "basic-block.h"
64 #include "tm_p.h"
65 /* We want target macros for the mode switching code to be able to refer
66 to instruction attribute values. */
67 #include "insn-attr.h"
68
69 /* Edge based LCM routines. */
70 static void compute_antinout_edge PARAMS ((sbitmap *, sbitmap *,
71 sbitmap *, sbitmap *));
72 static void compute_earliest PARAMS ((struct edge_list *, int, sbitmap *,
73 sbitmap *, sbitmap *, sbitmap *,
74 sbitmap *));
75 static void compute_laterin PARAMS ((struct edge_list *, sbitmap *,
76 sbitmap *, sbitmap *, sbitmap *));
77 static void compute_insert_delete PARAMS ((struct edge_list *edge_list,
78 sbitmap *, sbitmap *, sbitmap *,
79 sbitmap *, sbitmap *));
80
81 /* Edge based LCM routines on a reverse flowgraph. */
82 static void compute_farthest PARAMS ((struct edge_list *, int, sbitmap *,
83 sbitmap *, sbitmap*, sbitmap *,
84 sbitmap *));
85 static void compute_nearerout PARAMS ((struct edge_list *, sbitmap *,
86 sbitmap *, sbitmap *, sbitmap *));
87 static void compute_rev_insert_delete PARAMS ((struct edge_list *edge_list,
88 sbitmap *, sbitmap *, sbitmap *,
89 sbitmap *, sbitmap *));
90
91 \f
92 /* Edge based lcm routines. */
93
94 /* Compute expression anticipatability at entrance and exit of each block.
95 This is done based on the flow graph, and not on the pred-succ lists.
96 Other than that, its pretty much identical to compute_antinout. */
97
98 static void
99 compute_antinout_edge (antloc, transp, antin, antout)
100 sbitmap *antloc;
101 sbitmap *transp;
102 sbitmap *antin;
103 sbitmap *antout;
104 {
105 int bb;
106 edge e;
107 basic_block *worklist, *tos;
108
109 /* Allocate a worklist array/queue. Entries are only added to the
110 list if they were not already on the list. So the size is
111 bounded by the number of basic blocks. */
112 tos = worklist = (basic_block *) xmalloc (sizeof (basic_block)
113 * n_basic_blocks);
114
115 /* We want a maximal solution, so make an optimistic initialization of
116 ANTIN. */
117 sbitmap_vector_ones (antin, n_basic_blocks);
118
119 /* Put every block on the worklist; this is necessary because of the
120 optimistic initialization of ANTIN above. */
121 for (bb = 0; bb < n_basic_blocks; bb++)
122 {
123 *tos++ = BASIC_BLOCK (bb);
124 BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
125 }
126
127 /* Mark blocks which are predecessors of the exit block so that we
128 can easily identify them below. */
129 for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
130 e->src->aux = EXIT_BLOCK_PTR;
131
132 /* Iterate until the worklist is empty. */
133 while (tos != worklist)
134 {
135 /* Take the first entry off the worklist. */
136 basic_block b = *--tos;
137 bb = b->index;
138
139 if (b->aux == EXIT_BLOCK_PTR)
140 {
141 /* Do not clear the aux field for blocks which are
142 predecessors of the EXIT block. That way we never
143 add then to the worklist again. */
144 sbitmap_zero (antout[bb]);
145 }
146 else
147 {
148 /* Clear the aux field of this block so that it can be added to
149 the worklist again if necessary. */
150 b->aux = NULL;
151 sbitmap_intersection_of_succs (antout[bb], antin, bb);
152 }
153
154 if (sbitmap_a_or_b_and_c (antin[bb], antloc[bb], transp[bb], antout[bb]))
155 {
156 /* If the in state of this block changed, then we need
157 to add the predecessors of this block to the worklist
158 if they are not already on the worklist. */
159 for (e = b->pred; e; e = e->pred_next)
160 {
161 if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
162 {
163 *tos++ = e->src;
164 e->src->aux = e;
165 }
166 }
167 }
168 }
169 free (tos);
170 }
171
172 /* Compute the earliest vector for edge based lcm. */
173 static void
174 compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest)
175 struct edge_list *edge_list;
176 int n_exprs;
177 sbitmap *antin, *antout, *avout, *kill, *earliest;
178 {
179 sbitmap difference, temp_bitmap;
180 int x, num_edges;
181 basic_block pred, succ;
182
183 num_edges = NUM_EDGES (edge_list);
184
185 difference = sbitmap_alloc (n_exprs);
186 temp_bitmap = sbitmap_alloc (n_exprs);
187
188 for (x = 0; x < num_edges; x++)
189 {
190 pred = INDEX_EDGE_PRED_BB (edge_list, x);
191 succ = INDEX_EDGE_SUCC_BB (edge_list, x);
192 if (pred == ENTRY_BLOCK_PTR)
193 sbitmap_copy (earliest[x], antin[succ->index]);
194 else
195 {
196 if (succ == EXIT_BLOCK_PTR)
197 {
198 sbitmap_zero (earliest[x]);
199 }
200 else
201 {
202 sbitmap_difference (difference, antin[succ->index],
203 avout[pred->index]);
204 sbitmap_not (temp_bitmap, antout[pred->index]);
205 sbitmap_a_and_b_or_c (earliest[x], difference, kill[pred->index],
206 temp_bitmap);
207 }
208 }
209 }
210 free (temp_bitmap);
211 free (difference);
212 }
213
214 /* later(p,s) is dependent on the calculation of laterin(p).
215 laterin(p) is dependent on the calculation of later(p2,p).
216
217 laterin(ENTRY) is defined as all 0's
218 later(ENTRY, succs(ENTRY)) are defined using laterin(ENTRY)
219 laterin(succs(ENTRY)) is defined by later(ENTRY, succs(ENTRY)).
220
221 If we progress in this manner, starting with all basic blocks
222 in the work list, anytime we change later(bb), we need to add
223 succs(bb) to the worklist if they are not already on the worklist.
224
225 Boundary conditions:
226
227 We prime the worklist all the normal basic blocks. The ENTRY block can
228 never be added to the worklist since it is never the successor of any
229 block. We explicitly prevent the EXIT block from being added to the
230 worklist.
231
232 We optimistically initialize LATER. That is the only time this routine
233 will compute LATER for an edge out of the entry block since the entry
234 block is never on the worklist. Thus, LATERIN is neither used nor
235 computed for the ENTRY block.
236
237 Since the EXIT block is never added to the worklist, we will neither
238 use nor compute LATERIN for the exit block. Edges which reach the
239 EXIT block are handled in the normal fashion inside the loop. However,
240 the insertion/deletion computation needs LATERIN(EXIT), so we have
241 to compute it. */
242
243 static void
244 compute_laterin (edge_list, earliest, antloc, later, laterin)
245 struct edge_list *edge_list;
246 sbitmap *earliest, *antloc, *later, *laterin;
247 {
248 int bb, num_edges, i;
249 edge e;
250 basic_block *worklist, *tos;
251
252 num_edges = NUM_EDGES (edge_list);
253
254 /* Allocate a worklist array/queue. Entries are only added to the
255 list if they were not already on the list. So the size is
256 bounded by the number of basic blocks. */
257 tos = worklist = (basic_block *) xmalloc (sizeof (basic_block)
258 * (n_basic_blocks + 1));
259
260 /* Initialize a mapping from each edge to its index. */
261 for (i = 0; i < num_edges; i++)
262 INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i;
263
264 /* We want a maximal solution, so initially consider LATER true for
265 all edges. This allows propagation through a loop since the incoming
266 loop edge will have LATER set, so if all the other incoming edges
267 to the loop are set, then LATERIN will be set for the head of the
268 loop.
269
270 If the optimistic setting of LATER on that edge was incorrect (for
271 example the expression is ANTLOC in a block within the loop) then
272 this algorithm will detect it when we process the block at the head
273 of the optimistic edge. That will requeue the affected blocks. */
274 sbitmap_vector_ones (later, num_edges);
275
276 /* Note that even though we want an optimistic setting of LATER, we
277 do not want to be overly optimistic. Consider an outgoing edge from
278 the entry block. That edge should always have a LATER value the
279 same as EARLIEST for that edge. */
280 for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
281 sbitmap_copy (later[(size_t)e->aux], earliest[(size_t)e->aux]);
282
283 /* Add all the blocks to the worklist. This prevents an early exit from
284 the loop given our optimistic initialization of LATER above. */
285 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
286 {
287 basic_block b = BASIC_BLOCK (bb);
288 *tos++ = b;
289 b->aux = b;
290 }
291
292 /* Iterate until the worklist is empty. */
293 while (tos != worklist)
294 {
295 /* Take the first entry off the worklist. */
296 basic_block b = *--tos;
297 b->aux = NULL;
298
299 /* Compute the intersection of LATERIN for each incoming edge to B. */
300 bb = b->index;
301 sbitmap_ones (laterin[bb]);
302 for (e = b->pred; e != NULL; e = e->pred_next)
303 sbitmap_a_and_b (laterin[bb], laterin[bb], later[(size_t)e->aux]);
304
305 /* Calculate LATER for all outgoing edges. */
306 for (e = b->succ; e != NULL; e = e->succ_next)
307 {
308 if (sbitmap_union_of_diff (later[(size_t) e->aux],
309 earliest[(size_t) e->aux],
310 laterin[e->src->index],
311 antloc[e->src->index]))
312 {
313 /* If LATER for an outgoing edge was changed, then we need
314 to add the target of the outgoing edge to the worklist. */
315 if (e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
316 {
317 *tos++ = e->dest;
318 e->dest->aux = e;
319 }
320 }
321 }
322 }
323
324 /* Computation of insertion and deletion points requires computing LATERIN
325 for the EXIT block. We allocated an extra entry in the LATERIN array
326 for just this purpose. */
327 sbitmap_ones (laterin[n_basic_blocks]);
328 for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next)
329 sbitmap_a_and_b (laterin[n_basic_blocks],
330 laterin[n_basic_blocks],
331 later[(size_t) e->aux]);
332
333 free (tos);
334 }
335
336 /* Compute the insertion and deletion points for edge based LCM. */
337 static void
338 compute_insert_delete (edge_list, antloc, later, laterin,
339 insert, delete)
340 struct edge_list *edge_list;
341 sbitmap *antloc, *later, *laterin, *insert, *delete;
342 {
343 int x;
344
345 for (x = 0; x < n_basic_blocks; x++)
346 sbitmap_difference (delete[x], antloc[x], laterin[x]);
347
348 for (x = 0; x < NUM_EDGES (edge_list); x++)
349 {
350 basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
351 if (b == EXIT_BLOCK_PTR)
352 sbitmap_difference (insert[x], later[x], laterin[n_basic_blocks]);
353 else
354 sbitmap_difference (insert[x], later[x], laterin[b->index]);
355 }
356 }
357
358 /* Given local properties TRANSP, ANTLOC, AVOUT, KILL return the
359 insert and delete vectors for edge based LCM. Returns an
360 edgelist which is used to map the insert vector to what edge
361 an expression should be inserted on. */
362
363 struct edge_list *
364 pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
365 FILE *file ATTRIBUTE_UNUSED;
366 int n_exprs;
367 sbitmap *transp;
368 sbitmap *avloc;
369 sbitmap *antloc;
370 sbitmap *kill;
371 sbitmap **insert;
372 sbitmap **delete;
373 {
374 sbitmap *antin, *antout, *earliest;
375 sbitmap *avin, *avout;
376 sbitmap *later, *laterin;
377 struct edge_list *edge_list;
378 int num_edges;
379
380 edge_list = create_edge_list ();
381 num_edges = NUM_EDGES (edge_list);
382
383 #ifdef LCM_DEBUG_INFO
384 if (file)
385 {
386 fprintf (file, "Edge List:\n");
387 verify_edge_list (file, edge_list);
388 print_edge_list (file, edge_list);
389 dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
390 dump_sbitmap_vector (file, "antloc", "", antloc, n_basic_blocks);
391 dump_sbitmap_vector (file, "avloc", "", avloc, n_basic_blocks);
392 dump_sbitmap_vector (file, "kill", "", kill, n_basic_blocks);
393 }
394 #endif
395
396 /* Compute global availability. */
397 avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
398 avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
399 compute_available (avloc, kill, avout, avin);
400
401
402 free (avin);
403
404 /* Compute global anticipatability. */
405 antin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
406 antout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
407 compute_antinout_edge (antloc, transp, antin, antout);
408
409 #ifdef LCM_DEBUG_INFO
410 if (file)
411 {
412 dump_sbitmap_vector (file, "antin", "", antin, n_basic_blocks);
413 dump_sbitmap_vector (file, "antout", "", antout, n_basic_blocks);
414 }
415 #endif
416
417 /* Compute earliestness. */
418 earliest = sbitmap_vector_alloc (num_edges, n_exprs);
419 compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest);
420
421 #ifdef LCM_DEBUG_INFO
422 if (file)
423 dump_sbitmap_vector (file, "earliest", "", earliest, num_edges);
424 #endif
425
426 free (antout);
427 free (antin);
428 free (avout);
429
430 later = sbitmap_vector_alloc (num_edges, n_exprs);
431 /* Allocate an extra element for the exit block in the laterin vector. */
432 laterin = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
433 compute_laterin (edge_list, earliest, antloc, later, laterin);
434
435
436 #ifdef LCM_DEBUG_INFO
437 if (file)
438 {
439 dump_sbitmap_vector (file, "laterin", "", laterin, n_basic_blocks + 1);
440 dump_sbitmap_vector (file, "later", "", later, num_edges);
441 }
442 #endif
443
444 free (earliest);
445
446 *insert = sbitmap_vector_alloc (num_edges, n_exprs);
447 *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
448 compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
449
450 free (laterin);
451 free (later);
452
453 #ifdef LCM_DEBUG_INFO
454 if (file)
455 {
456 dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
457 dump_sbitmap_vector (file, "pre_delete_map", "", *delete, n_basic_blocks);
458 }
459 #endif
460
461 return edge_list;
462 }
463
464 /* Compute the AVIN and AVOUT vectors from the AVLOC and KILL vectors.
465 Return the number of passes we performed to iterate to a solution. */
466 void
467 compute_available (avloc, kill, avout, avin)
468 sbitmap *avloc, *kill, *avout, *avin;
469 {
470 int bb;
471 edge e;
472 basic_block *worklist, *tos;
473
474 /* Allocate a worklist array/queue. Entries are only added to the
475 list if they were not already on the list. So the size is
476 bounded by the number of basic blocks. */
477 tos = worklist = (basic_block *) xmalloc (sizeof (basic_block)
478 * n_basic_blocks);
479
480 /* We want a maximal solution. */
481 sbitmap_vector_ones (avout, n_basic_blocks);
482
483 /* Put every block on the worklist; this is necessary because of the
484 optimistic initialization of AVOUT above. */
485 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
486 {
487 *tos++ = BASIC_BLOCK (bb);
488 BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
489 }
490
491 /* Mark blocks which are successors of the entry block so that we
492 can easily identify them below. */
493 for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
494 e->dest->aux = ENTRY_BLOCK_PTR;
495
496 /* Iterate until the worklist is empty. */
497 while (tos != worklist)
498 {
499 /* Take the first entry off the worklist. */
500 basic_block b = *--tos;
501 bb = b->index;
502
503 /* If one of the predecessor blocks is the ENTRY block, then the
504 intersection of avouts is the null set. We can identify such blocks
505 by the special value in the AUX field in the block structure. */
506 if (b->aux == ENTRY_BLOCK_PTR)
507 {
508 /* Do not clear the aux field for blocks which are
509 successors of the ENTRY block. That way we never
510 add then to the worklist again. */
511 sbitmap_zero (avin[bb]);
512 }
513 else
514 {
515 /* Clear the aux field of this block so that it can be added to
516 the worklist again if necessary. */
517 b->aux = NULL;
518 sbitmap_intersection_of_preds (avin[bb], avout, bb);
519 }
520
521 if (sbitmap_union_of_diff (avout[bb], avloc[bb], avin[bb], kill[bb]))
522 {
523 /* If the out state of this block changed, then we need
524 to add the successors of this block to the worklist
525 if they are not already on the worklist. */
526 for (e = b->succ; e; e = e->succ_next)
527 {
528 if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
529 {
530 *tos++ = e->dest;
531 e->dest->aux = e;
532 }
533 }
534 }
535 }
536 free (tos);
537 }
538
539 /* Compute the farthest vector for edge based lcm. */
540 static void
541 compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
542 kill, farthest)
543 struct edge_list *edge_list;
544 int n_exprs;
545 sbitmap *st_avout, *st_avin, *st_antin, *kill, *farthest;
546 {
547 sbitmap difference, temp_bitmap;
548 int x, num_edges;
549 basic_block pred, succ;
550
551 num_edges = NUM_EDGES (edge_list);
552
553 difference = sbitmap_alloc (n_exprs);
554 temp_bitmap = sbitmap_alloc (n_exprs);
555
556 for (x = 0; x < num_edges; x++)
557 {
558 pred = INDEX_EDGE_PRED_BB (edge_list, x);
559 succ = INDEX_EDGE_SUCC_BB (edge_list, x);
560 if (succ == EXIT_BLOCK_PTR)
561 sbitmap_copy (farthest[x], st_avout[pred->index]);
562 else
563 {
564 if (pred == ENTRY_BLOCK_PTR)
565 {
566 sbitmap_zero (farthest[x]);
567 }
568 else
569 {
570 sbitmap_difference (difference, st_avout[pred->index],
571 st_antin[succ->index]);
572 sbitmap_not (temp_bitmap, st_avin[succ->index]);
573 sbitmap_a_and_b_or_c (farthest[x], difference,
574 kill[succ->index], temp_bitmap);
575 }
576 }
577 }
578 free (temp_bitmap);
579 free (difference);
580 }
581
582 /* Compute nearer and nearerout vectors for edge based lcm.
583
584 This is the mirror of compute_laterin, additional comments on the
585 implementation can be found before compute_laterin. */
586
587 static void
588 compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
589 struct edge_list *edge_list;
590 sbitmap *farthest, *st_avloc, *nearer, *nearerout;
591 {
592 int bb, num_edges, i;
593 edge e;
594 basic_block *worklist, *tos;
595
596 num_edges = NUM_EDGES (edge_list);
597
598 /* Allocate a worklist array/queue. Entries are only added to the
599 list if they were not already on the list. So the size is
600 bounded by the number of basic blocks. */
601 tos = worklist = (basic_block *) xmalloc (sizeof (basic_block)
602 * (n_basic_blocks + 1));
603
604 /* Initialize NEARER for each edge and build a mapping from an edge to
605 its index. */
606 for (i = 0; i < num_edges; i++)
607 INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i;
608
609 /* We want a maximal solution. */
610 sbitmap_vector_ones (nearer, num_edges);
611
612 /* Note that even though we want an optimistic setting of NEARER, we
613 do not want to be overly optimistic. Consider an incoming edge to
614 the exit block. That edge should always have a NEARER value the
615 same as FARTHEST for that edge. */
616 for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
617 sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]);
618
619 /* Add all the blocks to the worklist. This prevents an early exit
620 from the loop given our optimistic initialization of NEARER. */
621 for (bb = 0; bb < n_basic_blocks; bb++)
622 {
623 basic_block b = BASIC_BLOCK (bb);
624 *tos++ = b;
625 b->aux = b;
626 }
627
628 /* Iterate until the worklist is empty. */
629 while (tos != worklist)
630 {
631 /* Take the first entry off the worklist. */
632 basic_block b = *--tos;
633 b->aux = NULL;
634
635 /* Compute the intersection of NEARER for each outgoing edge from B. */
636 bb = b->index;
637 sbitmap_ones (nearerout[bb]);
638 for (e = b->succ; e != NULL; e = e->succ_next)
639 sbitmap_a_and_b (nearerout[bb], nearerout[bb],
640 nearer[(size_t) e->aux]);
641
642 /* Calculate NEARER for all incoming edges. */
643 for (e = b->pred; e != NULL; e = e->pred_next)
644 {
645 if (sbitmap_union_of_diff (nearer[(size_t) e->aux],
646 farthest[(size_t) e->aux],
647 nearerout[e->dest->index],
648 st_avloc[e->dest->index]))
649 {
650 /* If NEARER for an incoming edge was changed, then we need
651 to add the source of the incoming edge to the worklist. */
652 if (e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
653 {
654 *tos++ = e->src;
655 e->src->aux = e;
656 }
657 }
658 }
659 }
660
661 /* Computation of insertion and deletion points requires computing NEAREROUT
662 for the ENTRY block. We allocated an extra entry in the NEAREROUT array
663 for just this purpose. */
664 sbitmap_ones (nearerout[n_basic_blocks]);
665 for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next)
666 sbitmap_a_and_b (nearerout[n_basic_blocks],
667 nearerout[n_basic_blocks],
668 nearer[(size_t) e->aux]);
669
670 free (tos);
671 }
672
673 /* Compute the insertion and deletion points for edge based LCM. */
674 static void
675 compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
676 insert, delete)
677 struct edge_list *edge_list;
678 sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete;
679 {
680 int x;
681
682 for (x = 0; x < n_basic_blocks; x++)
683 sbitmap_difference (delete[x], st_avloc[x], nearerout[x]);
684
685 for (x = 0; x < NUM_EDGES (edge_list); x++)
686 {
687 basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
688 if (b == ENTRY_BLOCK_PTR)
689 sbitmap_difference (insert[x], nearer[x], nearerout[n_basic_blocks]);
690 else
691 sbitmap_difference (insert[x], nearer[x], nearerout[b->index]);
692 }
693 }
694
695 /* Given local properties TRANSP, ST_AVLOC, ST_ANTLOC, KILL return the
696 insert and delete vectors for edge based reverse LCM. Returns an
697 edgelist which is used to map the insert vector to what edge
698 an expression should be inserted on. */
699
700 struct edge_list *
701 pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
702 insert, delete)
703 FILE *file ATTRIBUTE_UNUSED;
704 int n_exprs;
705 sbitmap *transp;
706 sbitmap *st_avloc;
707 sbitmap *st_antloc;
708 sbitmap *kill;
709 sbitmap **insert;
710 sbitmap **delete;
711 {
712 sbitmap *st_antin, *st_antout;
713 sbitmap *st_avout, *st_avin, *farthest;
714 sbitmap *nearer, *nearerout;
715 struct edge_list *edge_list;
716 int num_edges;
717
718 edge_list = create_edge_list ();
719 num_edges = NUM_EDGES (edge_list);
720
721 st_antin = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
722 st_antout = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
723 sbitmap_vector_zero (st_antin, n_basic_blocks);
724 sbitmap_vector_zero (st_antout, n_basic_blocks);
725 compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
726
727 /* Compute global anticipatability. */
728 st_avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
729 st_avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
730 compute_available (st_avloc, kill, st_avout, st_avin);
731
732 #ifdef LCM_DEBUG_INFO
733 if (file)
734 {
735 fprintf (file, "Edge List:\n");
736 verify_edge_list (file, edge_list);
737 print_edge_list (file, edge_list);
738 dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
739 dump_sbitmap_vector (file, "st_avloc", "", st_avloc, n_basic_blocks);
740 dump_sbitmap_vector (file, "st_antloc", "", st_antloc, n_basic_blocks);
741 dump_sbitmap_vector (file, "st_antin", "", st_antin, n_basic_blocks);
742 dump_sbitmap_vector (file, "st_antout", "", st_antout, n_basic_blocks);
743 dump_sbitmap_vector (file, "st_kill", "", kill, n_basic_blocks);
744 }
745 #endif
746
747 #ifdef LCM_DEBUG_INFO
748 if (file)
749 {
750 dump_sbitmap_vector (file, "st_avout", "", st_avout, n_basic_blocks);
751 dump_sbitmap_vector (file, "st_avin", "", st_avin, n_basic_blocks);
752 }
753 #endif
754
755 /* Compute farthestness. */
756 farthest = sbitmap_vector_alloc (num_edges, n_exprs);
757 compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
758 kill, farthest);
759
760 #ifdef LCM_DEBUG_INFO
761 if (file)
762 dump_sbitmap_vector (file, "farthest", "", farthest, num_edges);
763 #endif
764
765 free (st_avin);
766 free (st_avout);
767
768 nearer = sbitmap_vector_alloc (num_edges, n_exprs);
769 /* Allocate an extra element for the entry block. */
770 nearerout = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
771 compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
772
773 #ifdef LCM_DEBUG_INFO
774 if (file)
775 {
776 dump_sbitmap_vector (file, "nearerout", "", nearerout,
777 n_basic_blocks + 1);
778 dump_sbitmap_vector (file, "nearer", "", nearer, num_edges);
779 }
780 #endif
781
782 free (farthest);
783
784 *insert = sbitmap_vector_alloc (num_edges, n_exprs);
785 *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
786 compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout, *insert, *delete);
787
788 free (nearerout);
789 free (nearer);
790
791 #ifdef LCM_DEBUG_INFO
792 if (file)
793 {
794 dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
795 dump_sbitmap_vector (file, "pre_delete_map", "", *delete, n_basic_blocks);
796 }
797 #endif
798
799 return edge_list;
800 }
801
802 /* MODE SWITCHING */
803 /* The algorithm for setting the modes consists of scanning the insn list
804 and finding all the insns which require a specific mode. Each insn gets
805 a unique struct seginfo element. These structures are inserted into a list
806 for each basic block. For each entity, there is an array of bb_info over
807 the flow graph basic blocks (local var 'bb_info'), and contains a list
808 of all insns within that basic block, in the order they are encountered.
809
810 For each entity, any basic block WITHOUT any insns requiring a specific
811 mode are given a single entry, without a mode. (Each basic block
812 in the flow graph must have at least one entry in the segment table.)
813
814 The LCM algorithm is then run over the flow graph to determine where to
815 place the sets to the highest-priority value in respect of first the first
816 insn in any one block. Any adjustments required to the transparancy
817 vectors are made, then the next iteration starts for the next-lower
818 priority mode, till for each entity all modes are exhasted.
819
820 More details are located in the code for optimize_mode_switching(). */
821
822 /* This structure contains the information for each insn which requires
823 either single or double mode to be set.
824 MODE is the mode this insn must be executed in.
825 INSN_PTR is the insn to be executed.
826 BBNUM is the flow graph basic block this insn occurs in.
827 NEXT is the next insn in the same basic block. */
828 struct seginfo
829 {
830 int mode;
831 rtx insn_ptr;
832 int bbnum;
833 struct seginfo *next;
834 HARD_REG_SET regs_live;
835 };
836
837 struct bb_info
838 {
839 struct seginfo *seginfo;
840 int computing;
841 };
842
843 /* These bitmaps are used for the LCM algorithm. */
844
845 #ifdef OPTIMIZE_MODE_SWITCHING
846 static sbitmap *antic;
847 static sbitmap *transp;
848 static sbitmap *comp;
849 static sbitmap *delete;
850 static sbitmap *insert;
851
852 static struct seginfo * new_seginfo PARAMS ((int, rtx, int, HARD_REG_SET));;
853 static void add_seginfo PARAMS ((struct bb_info *, struct seginfo *));
854 static void reg_dies PARAMS ((rtx, HARD_REG_SET));
855 static void reg_becomes_live PARAMS ((rtx, rtx, void *));
856 static void make_preds_opaque PARAMS ((basic_block, int));
857 #endif
858 \f
859 #ifdef OPTIMIZE_MODE_SWITCHING
860
861 /* This function will allocate a new BBINFO structure, initialized
862 with the FP_MODE, INSN, and basic block BB parameters. */
863
864 static struct seginfo *
865 new_seginfo (mode, insn, bb, regs_live)
866 int mode;
867 rtx insn;
868 int bb;
869 HARD_REG_SET regs_live;
870 {
871 struct seginfo *ptr;
872 ptr = xmalloc (sizeof (struct seginfo));
873 ptr->mode = mode;
874 ptr->insn_ptr = insn;
875 ptr->bbnum = bb;
876 ptr->next = NULL;
877 COPY_HARD_REG_SET (ptr->regs_live, regs_live);
878 return ptr;
879 }
880
881 /* Add a seginfo element to the end of a list.
882 HEAD is a pointer to the list beginning.
883 INFO is the structure to be linked in. */
884
885 static void
886 add_seginfo (head, info)
887 struct bb_info *head;
888 struct seginfo *info;
889 {
890 struct seginfo *ptr;
891
892 if (head->seginfo == NULL)
893 head->seginfo = info;
894 else
895 {
896 ptr = head->seginfo;
897 while (ptr->next != NULL)
898 ptr = ptr->next;
899 ptr->next = info;
900 }
901 }
902
903 /* Make all predecessors of basic block B opaque, recursively, till we hit
904 some that are already non-transparent, or an edge where aux is set; that
905 denotes that a mode set is to be done on that edge.
906 J is the bit number in the bitmaps that corresponds to the entity that
907 we are currently handling mode-switching for. */
908
909 static void
910 make_preds_opaque (b, j)
911 basic_block b;
912 int j;
913 {
914 edge e;
915
916 for (e = b->pred; e; e = e->pred_next)
917 {
918 basic_block pb = e->src;
919 if (e->aux || ! TEST_BIT (transp[pb->index], j))
920 continue;
921 RESET_BIT (transp[pb->index], j);
922 make_preds_opaque (pb, j);
923 }
924 }
925
926 /* Record in LIVE that register REG died. */
927
928 static void
929 reg_dies (reg, live)
930 rtx reg;
931 HARD_REG_SET live;
932 {
933 int regno;
934
935 if (GET_CODE (reg) != REG)
936 return;
937 regno = REGNO (reg);
938 if (regno < FIRST_PSEUDO_REGISTER)
939 {
940 int nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg));
941
942 for (; --nregs >=0; nregs--, regno++)
943 CLEAR_HARD_REG_BIT (live, regno);
944 }
945 }
946
947 /* Record in LIVE that register REG became live.
948 This is called via note_stores. */
949
950 static void
951 reg_becomes_live (reg, setter, live)
952 rtx reg;
953 rtx setter ATTRIBUTE_UNUSED;
954 void *live;
955 {
956 int regno;
957
958 if (GET_CODE (reg) == SUBREG)
959 reg = SUBREG_REG (reg);
960
961 if (GET_CODE (reg) != REG)
962 return;
963
964 regno = REGNO (reg);
965 if (regno < FIRST_PSEUDO_REGISTER)
966 {
967 int nregs = HARD_REGNO_NREGS (regno, GET_MODE (reg));
968
969 for (; nregs-- > 0; regno++)
970 SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno);
971 }
972 }
973 #endif
974
975 /* Find all insns that need a particular mode
976 setting, and insert the necessary mode switches. */
977 void
978 optimize_mode_switching (file)
979 FILE *file ATTRIBUTE_UNUSED;
980 {
981 #ifdef OPTIMIZE_MODE_SWITCHING
982 rtx insn;
983 int bb, e;
984 edge eg;
985 int need_commit = 0;
986 sbitmap *kill;
987 struct edge_list *edge_list;
988 static int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
989 #define N_ENTITIES (sizeof num_modes / sizeof (int))
990 int entity_map[N_ENTITIES];
991 struct bb_info *bb_info[N_ENTITIES];
992 int i, j;
993 int n_entities;
994 int max_num_modes = 0;
995
996 for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
997 {
998 if (OPTIMIZE_MODE_SWITCHING (e))
999 {
1000 /* Create the list of segments within each basic block. */
1001 bb_info[n_entities]
1002 = (struct bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info);
1003 entity_map[n_entities++] = e;
1004 if (num_modes[e] > max_num_modes)
1005 max_num_modes = num_modes[e];
1006 }
1007 }
1008 if (! n_entities)
1009 return;
1010
1011 #ifdef MODE_USES_IN_EXIT_BLOCK
1012 /* For some ABIs a particular mode setting is required at function exit. */
1013
1014 for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next)
1015 {
1016 int bb = eg->src->index;
1017
1018 rtx insn = BLOCK_END (bb);
1019 rtx use = MODE_USES_IN_EXIT_BLOCK;
1020
1021 /* If the block ends with the use of the return value
1022 and / or a return, insert the new use(s) in front of them. */
1023 while ((GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE)
1024 || GET_CODE (insn) == JUMP_INSN)
1025 insn = PREV_INSN (insn);
1026 use = emit_insn_after (use, insn);
1027 if (insn == BLOCK_END (bb))
1028 BLOCK_END (bb) = use;
1029 else if (NEXT_INSN (use) == BLOCK_HEAD (bb))
1030 BLOCK_HEAD (bb) = NEXT_INSN (insn);
1031 }
1032 #endif
1033
1034 /* Create the bitmap vectors. */
1035
1036 antic = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1037 transp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1038 comp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1039
1040 sbitmap_vector_ones (transp, n_basic_blocks);
1041
1042 for (j = n_entities - 1; j >= 0; j--)
1043 {
1044 int e = entity_map[j];
1045 int no_mode = num_modes[e];
1046 struct bb_info *info = bb_info[j];
1047
1048 /* Determine what the first use (if any) need for a mode of entity E is.
1049 This will be th mode that is anticipatable for this block.
1050 Also compute the initial transparency settings. */
1051 for (bb = 0 ; bb < n_basic_blocks; bb++)
1052 {
1053 struct seginfo *ptr;
1054 int last_mode = no_mode;
1055 HARD_REG_SET live_now;
1056
1057 REG_SET_TO_HARD_REG_SET (live_now,
1058 BASIC_BLOCK (bb)->global_live_at_start);
1059 for (insn = BLOCK_HEAD (bb);
1060 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
1061 insn = NEXT_INSN (insn))
1062 {
1063 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1064 {
1065 int mode = MODE_NEEDED (e, insn);
1066 rtx link;
1067
1068 if (mode != no_mode && mode != last_mode)
1069 {
1070 last_mode = mode;
1071 ptr = new_seginfo (mode, insn, bb, live_now);
1072 add_seginfo (info + bb, ptr);
1073 RESET_BIT (transp[bb], j);
1074 }
1075
1076 /* Update LIVE_NOW. */
1077 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1078 if (REG_NOTE_KIND (link) == REG_DEAD)
1079 reg_dies (XEXP (link, 0), live_now);
1080 note_stores (PATTERN (insn), reg_becomes_live, &live_now);
1081 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1082 if (REG_NOTE_KIND (link) == REG_UNUSED)
1083 reg_dies (XEXP (link, 0), live_now);
1084 }
1085 }
1086 info[bb].computing = last_mode;
1087 /* Check for blocks without ANY mode requirements. */
1088 if (last_mode == no_mode)
1089 {
1090 ptr = new_seginfo (no_mode, insn, bb, live_now);
1091 add_seginfo (info + bb, ptr);
1092 }
1093 }
1094 #ifdef MODE_AT_ENTRY
1095 {
1096 int mode = MODE_AT_ENTRY (e);
1097 if (mode != no_mode)
1098 {
1099 for (eg = ENTRY_BLOCK_PTR->succ; eg; eg = eg->succ_next)
1100 {
1101 bb = eg->dest->index;
1102
1103 /* By always making this nontransparent, we save
1104 an extra check in make_preds_opaque. We also
1105 need this to avoid confusing pre_edge_lcm when
1106 antic is cleared but transp and comp are set. */
1107 RESET_BIT (transp[bb], j);
1108
1109 /* If the block already has MODE, pretend it
1110 has none (because we don't need to set it),
1111 but retain whatever mode it computes. */
1112 if (info[bb].seginfo->mode == mode)
1113 {
1114 info[bb].seginfo->mode = no_mode;
1115 }
1116 /* Insert a fake computing definition of MODE into entry blocks
1117 which compute no mode. This represents the mode on entry. */
1118 else if (info[bb].computing == no_mode)
1119 {
1120 info[bb].computing = mode;
1121 info[bb].seginfo->mode = no_mode;
1122 }
1123 }
1124 }
1125 }
1126 #endif /* MODE_AT_ENTRY */
1127 }
1128
1129 kill = sbitmap_vector_alloc (n_basic_blocks, n_entities);
1130 for (i = 0; i < max_num_modes; i++)
1131 {
1132 int current_mode[N_ENTITIES];
1133
1134 /* Set the anticipatable and computing arrays. */
1135 sbitmap_vector_zero (antic, n_basic_blocks);
1136 sbitmap_vector_zero (comp, n_basic_blocks);
1137 for (j = n_entities - 1; j >= 0; j--)
1138 {
1139 int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
1140 struct bb_info *info = bb_info[j];
1141
1142 for (bb = 0 ; bb < n_basic_blocks; bb++)
1143 {
1144
1145 if (info[bb].seginfo->mode == m)
1146 SET_BIT (antic[bb], j);
1147
1148 if (info[bb].computing == m)
1149 SET_BIT (comp[bb], j);
1150 }
1151 }
1152
1153 /* Calculate the optimal locations for the
1154 placement mode switches to modes with priority I. */
1155
1156 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
1157 sbitmap_not (kill[bb], transp[bb]);
1158 edge_list = pre_edge_lcm (file, 1, transp, comp, antic,
1159 kill, &insert, &delete);
1160
1161 for (j = n_entities - 1; j >=0; j--)
1162 {
1163 /* Insert all mode sets that have been inserted by lcm. */
1164 int no_mode = num_modes[entity_map[j]];
1165 /* Wherever we have moved a mode setting upwards in the flow graph,
1166 the blocks between the new setting site and the now redundant
1167 computation ceases to be transparent for any lower-priority
1168 mode of the same entity. First set the aux field of each
1169 insertion site edge non-transparent, then propagate the new
1170 non-transparency from the redundant computation upwards till
1171 we hit an insertion site or an already non-transparent block. */
1172 for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--)
1173 {
1174 edge eg = INDEX_EDGE (edge_list, e);
1175 int mode;
1176 basic_block src_bb;
1177 HARD_REG_SET live_at_edge;
1178 rtx mode_set;
1179
1180 eg->aux = 0;
1181
1182 if (! TEST_BIT (insert[e], j))
1183 continue;
1184
1185 eg->aux = (void *)1;
1186
1187 mode = current_mode[j];
1188 src_bb = eg->src;
1189
1190 REG_SET_TO_HARD_REG_SET (live_at_edge, src_bb->global_live_at_end);
1191 start_sequence ();
1192 EMIT_MODE_SET (entity_map[j], mode, live_at_edge);
1193 mode_set = gen_sequence ();
1194 end_sequence ();
1195
1196 /* If this is an abnormal edge, we'll insert at the end of the
1197 previous block. */
1198 if (eg->flags & EDGE_ABNORMAL)
1199 {
1200
1201 src_bb->end = emit_insn_after (mode_set, src_bb->end);
1202 bb_info[j][src_bb->index].computing = mode;
1203 RESET_BIT (transp[src_bb->index], j);
1204 }
1205 else
1206 {
1207 need_commit = 1;
1208 insert_insn_on_edge (mode_set, eg);
1209 }
1210
1211 }
1212
1213 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
1214 {
1215 if (TEST_BIT (delete[bb], j))
1216 {
1217 make_preds_opaque (BASIC_BLOCK (bb), j);
1218 /* Cancel the 'deleted' mode set. */
1219 bb_info[j][bb].seginfo->mode = no_mode;
1220 }
1221 }
1222 }
1223 free_edge_list (edge_list);
1224 }
1225
1226 /* Now output the remaining mode sets in all the segments. */
1227 for (j = n_entities - 1; j >= 0; j--)
1228 {
1229 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
1230 {
1231 struct seginfo *ptr, *next;
1232 for (ptr = bb_info[j][bb].seginfo; ptr; ptr = next)
1233 {
1234 next = ptr->next;
1235 if (ptr->mode != FP_MODE_NONE)
1236 {
1237 rtx mode_set;
1238
1239 start_sequence ();
1240 EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live);
1241 mode_set = gen_sequence ();
1242 end_sequence ();
1243
1244 emit_block_insn_before (mode_set, ptr->insn_ptr,
1245 BASIC_BLOCK (ptr->bbnum));
1246 }
1247 free (ptr);
1248 }
1249 }
1250 free (bb_info[j]);
1251 }
1252
1253 /* Finished. Free up all the things we've allocated. */
1254
1255 sbitmap_vector_free (kill);
1256 sbitmap_vector_free (antic);
1257 sbitmap_vector_free (transp);
1258 sbitmap_vector_free (comp);
1259 sbitmap_vector_free (delete);
1260 sbitmap_vector_free (insert);
1261
1262 if (need_commit)
1263 commit_edge_insertions ();
1264 #endif /* OPTIMIZE_MODE_SWITCHING */
1265 }
This page took 0.111355 seconds and 5 git commands to generate.