]> gcc.gnu.org Git - gcc.git/blob - gcc/df.c
alpha.h (TARGET_SWITCHES): Turn on MASK_EXPLICIT_RELOCS if the assembler supports it.
[gcc.git] / gcc / df.c
1 /* Dataflow support routines.
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
4 mhayes@redhat.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA.
22
23
24 OVERVIEW:
25
26 This file provides some dataflow routines for computing reaching defs,
27 upward exposed uses, live variables, def-use chains, and use-def
28 chains. The global dataflow is performed using simple iterative
29 methods with a worklist and could be sped up by ordering the blocks
30 with a depth first search order.
31
32 A `struct ref' data structure (ref) is allocated for every register
33 reference (def or use) and this records the insn and bb the ref is
34 found within. The refs are linked together in chains of uses and defs
35 for each insn and for each register. Each ref also has a chain field
36 that links all the use refs for a def or all the def refs for a use.
37 This is used to create use-def or def-use chains.
38
39
40 USAGE:
41
42 Here's an example of using the dataflow routines.
43
44 struct df *df;
45
46 df = df_init ();
47
48 df_analyse (df, 0, DF_ALL);
49
50 df_dump (df, DF_ALL, stderr);
51
52 df_finish (df);
53
54
55 df_init simply creates a poor man's object (df) that needs to be
56 passed to all the dataflow routines. df_finish destroys this
57 object and frees up any allocated memory.
58
59 df_analyse performs the following:
60
61 1. Records defs and uses by scanning the insns in each basic block
62 or by scanning the insns queued by df_insn_modify.
63 2. Links defs and uses into insn-def and insn-use chains.
64 3. Links defs and uses into reg-def and reg-use chains.
65 4. Assigns LUIDs to each insn (for modified blocks).
66 5. Calculates local reaching definitions.
67 6. Calculates global reaching definitions.
68 7. Creates use-def chains.
69 8. Calculates local reaching uses (upwards exposed uses).
70 9. Calculates global reaching uses.
71 10. Creates def-use chains.
72 11. Calculates local live registers.
73 12. Calculates global live registers.
74 13. Calculates register lifetimes and determines local registers.
75
76
77 PHILOSOPHY:
78
79 Note that the dataflow information is not updated for every newly
80 deleted or created insn. If the dataflow information requires
81 updating then all the changed, new, or deleted insns needs to be
82 marked with df_insn_modify (or df_insns_modify) either directly or
83 indirectly (say through calling df_insn_delete). df_insn_modify
84 marks all the modified insns to get processed the next time df_analyse
85 is called.
86
87 Beware that tinkering with insns may invalidate the dataflow information.
88 The philosophy behind these routines is that once the dataflow
89 information has been gathered, the user should store what they require
90 before they tinker with any insn. Once a reg is replaced, for example,
91 then the reg-def/reg-use chains will point to the wrong place. Once a
92 whole lot of changes have been made, df_analyse can be called again
93 to update the dataflow information. Currently, this is not very smart
94 with regard to propagating changes to the dataflow so it should not
95 be called very often.
96
97
98 DATA STRUCTURES:
99
100 The basic object is a REF (reference) and this may either be a DEF
101 (definition) or a USE of a register.
102
103 These are linked into a variety of lists; namely reg-def, reg-use,
104 insn-def, insn-use, def-use, and use-def lists. For example,
105 the reg-def lists contain all the refs that define a given register
106 while the insn-use lists contain all the refs used by an insn.
107
108 Note that the reg-def and reg-use chains are generally short (except for the
109 hard registers) and thus it is much faster to search these chains
110 rather than searching the def or use bitmaps.
111
112 If the insns are in SSA form then the reg-def and use-def lists
113 should only contain the single defining ref.
114
115 TODO:
116
117 1) Incremental dataflow analysis.
118
119 Note that if a loop invariant insn is hoisted (or sunk), we do not
120 need to change the def-use or use-def chains. All we have to do is to
121 change the bb field for all the associated defs and uses and to
122 renumber the LUIDs for the original and new basic blocks of the insn.
123
124 When shadowing loop mems we create new uses and defs for new pseudos
125 so we do not affect the existing dataflow information.
126
127 My current strategy is to queue up all modified, created, or deleted
128 insns so when df_analyse is called we can easily determine all the new
129 or deleted refs. Currently the global dataflow information is
130 recomputed from scratch but this could be propagated more efficiently.
131
132 2) Improved global data flow computation using depth first search.
133
134 3) Reduced memory requirements.
135
136 We could operate a pool of ref structures. When a ref is deleted it
137 gets returned to the pool (say by linking on to a chain of free refs).
138 This will require a pair of bitmaps for defs and uses so that we can
139 tell which ones have been changed. Alternatively, we could
140 periodically squeeze the def and use tables and associated bitmaps and
141 renumber the def and use ids.
142
143 4) Ordering of reg-def and reg-use lists.
144
145 Should the first entry in the def list be the first def (within a BB)?
146 Similarly, should the first entry in the use list be the last use
147 (within a BB)?
148
149 5) Working with a sub-CFG.
150
151 Often the whole CFG does not need to be analysed, for example,
152 when optimising a loop, only certain registers are of interest.
153 Perhaps there should be a bitmap argument to df_analyse to specify
154 which registers should be analysed? */
155
156 #define HANDLE_SUBREG
157
158 #include "config.h"
159 #include "system.h"
160 #include "rtl.h"
161 #include "tm_p.h"
162 #include "insn-config.h"
163 #include "recog.h"
164 #include "function.h"
165 #include "regs.h"
166 #include "obstack.h"
167 #include "hard-reg-set.h"
168 #include "basic-block.h"
169 #include "sbitmap.h"
170 #include "bitmap.h"
171 #include "df.h"
172 #include "fibheap.h"
173
174 #define FOR_ALL_BBS(BB, CODE) \
175 do { \
176 int node_; \
177 for (node_ = 0; node_ < n_basic_blocks; node_++) \
178 {(BB) = BASIC_BLOCK (node_); CODE;};} while (0)
179
180 #define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
181 do { \
182 unsigned int node_; \
183 EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
184 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
185
186 #define FOR_EACH_BB_IN_BITMAP_REV(BITMAP, MIN, BB, CODE) \
187 do { \
188 unsigned int node_; \
189 EXECUTE_IF_SET_IN_BITMAP_REV (BITMAP, node_, \
190 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
191
192 #define FOR_EACH_BB_IN_SBITMAP(BITMAP, MIN, BB, CODE) \
193 do { \
194 unsigned int node_; \
195 EXECUTE_IF_SET_IN_SBITMAP (BITMAP, MIN, node_, \
196 {(BB) = BASIC_BLOCK (node_); CODE;});} while (0)
197
198 #define obstack_chunk_alloc xmalloc
199 #define obstack_chunk_free free
200
201 static struct obstack df_ref_obstack;
202 static struct df *ddf;
203
204 static void df_reg_table_realloc PARAMS((struct df *, int));
205 #if 0
206 static void df_def_table_realloc PARAMS((struct df *, int));
207 #endif
208 static void df_insn_table_realloc PARAMS((struct df *, int));
209 static void df_bitmaps_alloc PARAMS((struct df *, int));
210 static void df_bitmaps_free PARAMS((struct df *, int));
211 static void df_free PARAMS((struct df *));
212 static void df_alloc PARAMS((struct df *, int));
213
214 static rtx df_reg_clobber_gen PARAMS((unsigned int));
215 static rtx df_reg_use_gen PARAMS((unsigned int));
216
217 static inline struct df_link *df_link_create PARAMS((struct ref *,
218 struct df_link *));
219 static struct df_link *df_ref_unlink PARAMS((struct df_link **, struct ref *));
220 static void df_def_unlink PARAMS((struct df *, struct ref *));
221 static void df_use_unlink PARAMS((struct df *, struct ref *));
222 static void df_insn_refs_unlink PARAMS ((struct df *, basic_block, rtx));
223 #if 0
224 static void df_bb_refs_unlink PARAMS ((struct df *, basic_block));
225 static void df_refs_unlink PARAMS ((struct df *, bitmap));
226 #endif
227
228 static struct ref *df_ref_create PARAMS((struct df *,
229 rtx, rtx *, basic_block, rtx,
230 enum df_ref_type, enum df_ref_flags));
231 static void df_ref_record_1 PARAMS((struct df *, rtx, rtx *,
232 basic_block, rtx, enum df_ref_type,
233 enum df_ref_flags));
234 static void df_ref_record PARAMS((struct df *, rtx, rtx *,
235 basic_block bb, rtx, enum df_ref_type,
236 enum df_ref_flags));
237 static void df_def_record_1 PARAMS((struct df *, rtx, basic_block, rtx));
238 static void df_defs_record PARAMS((struct df *, rtx, basic_block, rtx));
239 static void df_uses_record PARAMS((struct df *, rtx *,
240 enum df_ref_type, basic_block, rtx,
241 enum df_ref_flags));
242 static void df_insn_refs_record PARAMS((struct df *, basic_block, rtx));
243 static void df_bb_refs_record PARAMS((struct df *, basic_block));
244 static void df_refs_record PARAMS((struct df *, bitmap));
245
246 static void df_bb_reg_def_chain_create PARAMS((struct df *, basic_block));
247 static void df_reg_def_chain_create PARAMS((struct df *, bitmap));
248 static void df_bb_reg_use_chain_create PARAMS((struct df *, basic_block));
249 static void df_reg_use_chain_create PARAMS((struct df *, bitmap));
250 static void df_bb_du_chain_create PARAMS((struct df *, basic_block, bitmap));
251 static void df_du_chain_create PARAMS((struct df *, bitmap));
252 static void df_bb_ud_chain_create PARAMS((struct df *, basic_block));
253 static void df_ud_chain_create PARAMS((struct df *, bitmap));
254 static void df_bb_rd_local_compute PARAMS((struct df *, basic_block));
255 static void df_rd_local_compute PARAMS((struct df *, bitmap));
256 static void df_bb_ru_local_compute PARAMS((struct df *, basic_block));
257 static void df_ru_local_compute PARAMS((struct df *, bitmap));
258 static void df_bb_lr_local_compute PARAMS((struct df *, basic_block));
259 static void df_lr_local_compute PARAMS((struct df *, bitmap));
260 static void df_bb_reg_info_compute PARAMS((struct df *, basic_block, bitmap));
261 static void df_reg_info_compute PARAMS((struct df *, bitmap));
262
263 static int df_bb_luids_set PARAMS((struct df *df, basic_block));
264 static int df_luids_set PARAMS((struct df *df, bitmap));
265
266 static int df_modified_p PARAMS ((struct df *, bitmap));
267 static int df_refs_queue PARAMS ((struct df *));
268 static int df_refs_process PARAMS ((struct df *));
269 static int df_bb_refs_update PARAMS ((struct df *, basic_block));
270 static int df_refs_update PARAMS ((struct df *));
271 static void df_analyse_1 PARAMS((struct df *, bitmap, int, int));
272
273 static void df_insns_modify PARAMS((struct df *, basic_block,
274 rtx, rtx));
275 static int df_rtx_mem_replace PARAMS ((rtx *, void *));
276 static int df_rtx_reg_replace PARAMS ((rtx *, void *));
277 void df_refs_reg_replace PARAMS ((struct df *, bitmap,
278 struct df_link *, rtx, rtx));
279
280 static int df_def_dominates_all_uses_p PARAMS((struct df *, struct ref *def));
281 static int df_def_dominates_uses_p PARAMS((struct df *,
282 struct ref *def, bitmap));
283 static struct ref *df_bb_regno_last_use_find PARAMS((struct df *, basic_block,
284 unsigned int));
285 static struct ref *df_bb_regno_first_def_find PARAMS((struct df *, basic_block,
286 unsigned int));
287 static struct ref *df_bb_insn_regno_last_use_find PARAMS((struct df *,
288 basic_block,
289 rtx, unsigned int));
290 static struct ref *df_bb_insn_regno_first_def_find PARAMS((struct df *,
291 basic_block,
292 rtx, unsigned int));
293
294 static void df_chain_dump PARAMS((struct df_link *, FILE *file));
295 static void df_chain_dump_regno PARAMS((struct df_link *, FILE *file));
296 static void df_regno_debug PARAMS ((struct df *, unsigned int, FILE *));
297 static void df_ref_debug PARAMS ((struct df *, struct ref *, FILE *));
298 static void df_rd_transfer_function PARAMS ((int, int *, bitmap, bitmap,
299 bitmap, bitmap, void *));
300 static void df_ru_transfer_function PARAMS ((int, int *, bitmap, bitmap,
301 bitmap, bitmap, void *));
302 static void df_lr_transfer_function PARAMS ((int, int *, bitmap, bitmap,
303 bitmap, bitmap, void *));
304 static void hybrid_search_bitmap PARAMS ((basic_block, bitmap *, bitmap *,
305 bitmap *, bitmap *, enum df_flow_dir,
306 enum df_confluence_op,
307 transfer_function_bitmap,
308 sbitmap, sbitmap, void *));
309 static void hybrid_search_sbitmap PARAMS ((basic_block, sbitmap *, sbitmap *,
310 sbitmap *, sbitmap *, enum df_flow_dir,
311 enum df_confluence_op,
312 transfer_function_sbitmap,
313 sbitmap, sbitmap, void *));
314 static inline bool read_modify_subreg_p PARAMS ((rtx));
315
316 \f
317 /* Local memory allocation/deallocation routines. */
318
319
320 /* Increase the insn info table by SIZE more elements. */
321 static void
322 df_insn_table_realloc (df, size)
323 struct df *df;
324 int size;
325 {
326 /* Make table 25 percent larger by default. */
327 if (! size)
328 size = df->insn_size / 4;
329
330 size += df->insn_size;
331
332 df->insns = (struct insn_info *)
333 xrealloc (df->insns, size * sizeof (struct insn_info));
334
335 memset (df->insns + df->insn_size, 0,
336 (size - df->insn_size) * sizeof (struct insn_info));
337
338 df->insn_size = size;
339
340 if (! df->insns_modified)
341 {
342 df->insns_modified = BITMAP_XMALLOC ();
343 bitmap_zero (df->insns_modified);
344 }
345 }
346
347
348 /* Increase the reg info table by SIZE more elements. */
349 static void
350 df_reg_table_realloc (df, size)
351 struct df *df;
352 int size;
353 {
354 /* Make table 25 percent larger by default. */
355 if (! size)
356 size = df->reg_size / 4;
357
358 size += df->reg_size;
359
360 df->regs = (struct reg_info *)
361 xrealloc (df->regs, size * sizeof (struct reg_info));
362
363 /* Zero the new entries. */
364 memset (df->regs + df->reg_size, 0,
365 (size - df->reg_size) * sizeof (struct reg_info));
366
367 df->reg_size = size;
368 }
369
370
371 #if 0
372 /* Not currently used. */
373 static void
374 df_def_table_realloc (df, size)
375 struct df *df;
376 int size;
377 {
378 int i;
379 struct ref *refs;
380
381 /* Make table 25 percent larger by default. */
382 if (! size)
383 size = df->def_size / 4;
384
385 df->def_size += size;
386 df->defs = xrealloc (df->defs,
387 df->def_size * sizeof (*df->defs));
388
389 /* Allocate a new block of memory and link into list of blocks
390 that will need to be freed later. */
391
392 refs = xmalloc (size * sizeof (*refs));
393
394 /* Link all the new refs together, overloading the chain field. */
395 for (i = 0; i < size - 1; i++)
396 refs[i].chain = (struct df_link *)(refs + i + 1);
397 refs[size - 1].chain = 0;
398 }
399 #endif
400
401
402
403 /* Allocate bitmaps for each basic block. */
404 static void
405 df_bitmaps_alloc (df, flags)
406 struct df *df;
407 int flags;
408 {
409 unsigned int i;
410 int dflags = 0;
411
412 /* Free the bitmaps if they need resizing. */
413 if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
414 dflags |= DF_LR | DF_RU;
415 if ((flags & DF_RU) && df->n_uses < df->use_id)
416 dflags |= DF_RU;
417 if ((flags & DF_RD) && df->n_defs < df->def_id)
418 dflags |= DF_RD;
419
420 if (dflags)
421 df_bitmaps_free (df, dflags);
422
423 df->n_defs = df->def_id;
424 df->n_uses = df->use_id;
425
426 for (i = 0; i < df->n_bbs; i++)
427 {
428 basic_block bb = BASIC_BLOCK (i);
429 struct bb_info *bb_info = DF_BB_INFO (df, bb);
430
431 if (flags & DF_RD && ! bb_info->rd_in)
432 {
433 /* Allocate bitmaps for reaching definitions. */
434 bb_info->rd_kill = BITMAP_XMALLOC ();
435 bitmap_zero (bb_info->rd_kill);
436 bb_info->rd_gen = BITMAP_XMALLOC ();
437 bitmap_zero (bb_info->rd_gen);
438 bb_info->rd_in = BITMAP_XMALLOC ();
439 bb_info->rd_out = BITMAP_XMALLOC ();
440 bb_info->rd_valid = 0;
441 }
442
443 if (flags & DF_RU && ! bb_info->ru_in)
444 {
445 /* Allocate bitmaps for upward exposed uses. */
446 bb_info->ru_kill = BITMAP_XMALLOC ();
447 bitmap_zero (bb_info->ru_kill);
448 /* Note the lack of symmetry. */
449 bb_info->ru_gen = BITMAP_XMALLOC ();
450 bitmap_zero (bb_info->ru_gen);
451 bb_info->ru_in = BITMAP_XMALLOC ();
452 bb_info->ru_out = BITMAP_XMALLOC ();
453 bb_info->ru_valid = 0;
454 }
455
456 if (flags & DF_LR && ! bb_info->lr_in)
457 {
458 /* Allocate bitmaps for live variables. */
459 bb_info->lr_def = BITMAP_XMALLOC ();
460 bitmap_zero (bb_info->lr_def);
461 bb_info->lr_use = BITMAP_XMALLOC ();
462 bitmap_zero (bb_info->lr_use);
463 bb_info->lr_in = BITMAP_XMALLOC ();
464 bb_info->lr_out = BITMAP_XMALLOC ();
465 bb_info->lr_valid = 0;
466 }
467 }
468 }
469
470
471 /* Free bitmaps for each basic block. */
472 static void
473 df_bitmaps_free (df, flags)
474 struct df *df ATTRIBUTE_UNUSED;
475 int flags;
476 {
477 unsigned int i;
478
479 for (i = 0; i < df->n_bbs; i++)
480 {
481 basic_block bb = BASIC_BLOCK (i);
482 struct bb_info *bb_info = DF_BB_INFO (df, bb);
483
484 if (!bb_info)
485 continue;
486
487 if ((flags & DF_RD) && bb_info->rd_in)
488 {
489 /* Free bitmaps for reaching definitions. */
490 BITMAP_XFREE (bb_info->rd_kill);
491 bb_info->rd_kill = NULL;
492 BITMAP_XFREE (bb_info->rd_gen);
493 bb_info->rd_gen = NULL;
494 BITMAP_XFREE (bb_info->rd_in);
495 bb_info->rd_in = NULL;
496 BITMAP_XFREE (bb_info->rd_out);
497 bb_info->rd_out = NULL;
498 }
499
500 if ((flags & DF_RU) && bb_info->ru_in)
501 {
502 /* Free bitmaps for upward exposed uses. */
503 BITMAP_XFREE (bb_info->ru_kill);
504 bb_info->ru_kill = NULL;
505 BITMAP_XFREE (bb_info->ru_gen);
506 bb_info->ru_gen = NULL;
507 BITMAP_XFREE (bb_info->ru_in);
508 bb_info->ru_in = NULL;
509 BITMAP_XFREE (bb_info->ru_out);
510 bb_info->ru_out = NULL;
511 }
512
513 if ((flags & DF_LR) && bb_info->lr_in)
514 {
515 /* Free bitmaps for live variables. */
516 BITMAP_XFREE (bb_info->lr_def);
517 bb_info->lr_def = NULL;
518 BITMAP_XFREE (bb_info->lr_use);
519 bb_info->lr_use = NULL;
520 BITMAP_XFREE (bb_info->lr_in);
521 bb_info->lr_in = NULL;
522 BITMAP_XFREE (bb_info->lr_out);
523 bb_info->lr_out = NULL;
524 }
525 }
526 df->flags &= ~(flags & (DF_RD | DF_RU | DF_LR));
527 }
528
529
530 /* Allocate and initialise dataflow memory. */
531 static void
532 df_alloc (df, n_regs)
533 struct df *df;
534 int n_regs;
535 {
536 int n_insns;
537 int i;
538
539 gcc_obstack_init (&df_ref_obstack);
540
541 /* Perhaps we should use LUIDs to save memory for the insn_refs
542 table. This is only a small saving; a few pointers. */
543 n_insns = get_max_uid () + 1;
544
545 df->def_id = 0;
546 df->n_defs = 0;
547 /* Approximate number of defs by number of insns. */
548 df->def_size = n_insns;
549 df->defs = xmalloc (df->def_size * sizeof (*df->defs));
550
551 df->use_id = 0;
552 df->n_uses = 0;
553 /* Approximate number of uses by twice number of insns. */
554 df->use_size = n_insns * 2;
555 df->uses = xmalloc (df->use_size * sizeof (*df->uses));
556
557 df->n_regs = n_regs;
558 df->n_bbs = n_basic_blocks;
559
560 /* Allocate temporary working array used during local dataflow analysis. */
561 df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
562
563 df_insn_table_realloc (df, n_insns);
564
565 df_reg_table_realloc (df, df->n_regs);
566
567 df->bbs_modified = BITMAP_XMALLOC ();
568 bitmap_zero (df->bbs_modified);
569
570 df->flags = 0;
571
572 df->bbs = xcalloc (df->n_bbs, sizeof (struct bb_info));
573
574 df->all_blocks = BITMAP_XMALLOC ();
575 for (i = 0; i < n_basic_blocks; i++)
576 bitmap_set_bit (df->all_blocks, i);
577 }
578
579
580 /* Free all the dataflow info. */
581 static void
582 df_free (df)
583 struct df *df;
584 {
585 df_bitmaps_free (df, DF_ALL);
586
587 if (df->bbs)
588 free (df->bbs);
589 df->bbs = 0;
590
591 if (df->insns)
592 free (df->insns);
593 df->insns = 0;
594 df->insn_size = 0;
595
596 if (df->defs)
597 free (df->defs);
598 df->defs = 0;
599 df->def_size = 0;
600 df->def_id = 0;
601
602 if (df->uses)
603 free (df->uses);
604 df->uses = 0;
605 df->use_size = 0;
606 df->use_id = 0;
607
608 if (df->regs)
609 free (df->regs);
610 df->regs = 0;
611 df->reg_size = 0;
612
613 if (df->bbs_modified)
614 BITMAP_XFREE (df->bbs_modified);
615 df->bbs_modified = 0;
616
617 if (df->insns_modified)
618 BITMAP_XFREE (df->insns_modified);
619 df->insns_modified = 0;
620
621 BITMAP_XFREE (df->all_blocks);
622 df->all_blocks = 0;
623
624 obstack_free (&df_ref_obstack, NULL);
625 }
626 \f
627 /* Local miscellaneous routines. */
628
629 /* Return a USE for register REGNO. */
630 static rtx df_reg_use_gen (regno)
631 unsigned int regno;
632 {
633 rtx reg;
634 rtx use;
635
636 reg = regno >= FIRST_PSEUDO_REGISTER
637 ? regno_reg_rtx[regno] : gen_rtx_REG (reg_raw_mode[regno], regno);
638
639 use = gen_rtx_USE (GET_MODE (reg), reg);
640 return use;
641 }
642
643
644 /* Return a CLOBBER for register REGNO. */
645 static rtx df_reg_clobber_gen (regno)
646 unsigned int regno;
647 {
648 rtx reg;
649 rtx use;
650
651 reg = regno >= FIRST_PSEUDO_REGISTER
652 ? regno_reg_rtx[regno] : gen_rtx_REG (reg_raw_mode[regno], regno);
653
654 use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
655 return use;
656 }
657 \f
658 /* Local chain manipulation routines. */
659
660 /* Create a link in a def-use or use-def chain. */
661 static inline struct df_link *
662 df_link_create (ref, next)
663 struct ref *ref;
664 struct df_link *next;
665 {
666 struct df_link *link;
667
668 link = (struct df_link *) obstack_alloc (&df_ref_obstack,
669 sizeof (*link));
670 link->next = next;
671 link->ref = ref;
672 return link;
673 }
674
675
676 /* Add REF to chain head pointed to by PHEAD. */
677 static struct df_link *
678 df_ref_unlink (phead, ref)
679 struct df_link **phead;
680 struct ref *ref;
681 {
682 struct df_link *link = *phead;
683
684 if (link)
685 {
686 if (! link->next)
687 {
688 /* Only a single ref. It must be the one we want.
689 If not, the def-use and use-def chains are likely to
690 be inconsistent. */
691 if (link->ref != ref)
692 abort ();
693 /* Now have an empty chain. */
694 *phead = NULL;
695 }
696 else
697 {
698 /* Multiple refs. One of them must be us. */
699 if (link->ref == ref)
700 *phead = link->next;
701 else
702 {
703 /* Follow chain. */
704 for (; link->next; link = link->next)
705 {
706 if (link->next->ref == ref)
707 {
708 /* Unlink from list. */
709 link->next = link->next->next;
710 return link->next;
711 }
712 }
713 }
714 }
715 }
716 return link;
717 }
718
719
720 /* Unlink REF from all def-use/use-def chains, etc. */
721 int
722 df_ref_remove (df, ref)
723 struct df *df;
724 struct ref *ref;
725 {
726 if (DF_REF_REG_DEF_P (ref))
727 {
728 df_def_unlink (df, ref);
729 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].defs, ref);
730 }
731 else
732 {
733 df_use_unlink (df, ref);
734 df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].uses, ref);
735 }
736 return 1;
737 }
738
739
740 /* Unlink DEF from use-def and reg-def chains. */
741 static void
742 df_def_unlink (df, def)
743 struct df *df ATTRIBUTE_UNUSED;
744 struct ref *def;
745 {
746 struct df_link *du_link;
747 unsigned int dregno = DF_REF_REGNO (def);
748
749 /* Follow def-use chain to find all the uses of this def. */
750 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
751 {
752 struct ref *use = du_link->ref;
753
754 /* Unlink this def from the use-def chain. */
755 df_ref_unlink (&DF_REF_CHAIN (use), def);
756 }
757 DF_REF_CHAIN (def) = 0;
758
759 /* Unlink def from reg-def chain. */
760 df_ref_unlink (&df->regs[dregno].defs, def);
761
762 df->defs[DF_REF_ID (def)] = 0;
763 }
764
765
766 /* Unlink use from def-use and reg-use chains. */
767 static void
768 df_use_unlink (df, use)
769 struct df *df ATTRIBUTE_UNUSED;
770 struct ref *use;
771 {
772 struct df_link *ud_link;
773 unsigned int uregno = DF_REF_REGNO (use);
774
775 /* Follow use-def chain to find all the defs of this use. */
776 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
777 {
778 struct ref *def = ud_link->ref;
779
780 /* Unlink this use from the def-use chain. */
781 df_ref_unlink (&DF_REF_CHAIN (def), use);
782 }
783 DF_REF_CHAIN (use) = 0;
784
785 /* Unlink use from reg-use chain. */
786 df_ref_unlink (&df->regs[uregno].uses, use);
787
788 df->uses[DF_REF_ID (use)] = 0;
789 }
790 \f
791 /* Local routines for recording refs. */
792
793
794 /* Create a new ref of type DF_REF_TYPE for register REG at address
795 LOC within INSN of BB. */
796 static struct ref *
797 df_ref_create (df, reg, loc, bb, insn, ref_type, ref_flags)
798 struct df *df;
799 rtx reg;
800 rtx *loc;
801 basic_block bb;
802 rtx insn;
803 enum df_ref_type ref_type;
804 enum df_ref_flags ref_flags;
805 {
806 struct ref *this_ref;
807 unsigned int uid;
808
809 this_ref = (struct ref *) obstack_alloc (&df_ref_obstack,
810 sizeof (*this_ref));
811 DF_REF_REG (this_ref) = reg;
812 DF_REF_LOC (this_ref) = loc;
813 DF_REF_BB (this_ref) = bb;
814 DF_REF_INSN (this_ref) = insn;
815 DF_REF_CHAIN (this_ref) = 0;
816 DF_REF_TYPE (this_ref) = ref_type;
817 DF_REF_FLAGS (this_ref) = ref_flags;
818 uid = INSN_UID (insn);
819
820 if (ref_type == DF_REF_REG_DEF)
821 {
822 if (df->def_id >= df->def_size)
823 {
824 /* Make table 25 percent larger. */
825 df->def_size += (df->def_size / 4);
826 df->defs = xrealloc (df->defs,
827 df->def_size * sizeof (*df->defs));
828 }
829 DF_REF_ID (this_ref) = df->def_id;
830 df->defs[df->def_id++] = this_ref;
831 }
832 else
833 {
834 if (df->use_id >= df->use_size)
835 {
836 /* Make table 25 percent larger. */
837 df->use_size += (df->use_size / 4);
838 df->uses = xrealloc (df->uses,
839 df->use_size * sizeof (*df->uses));
840 }
841 DF_REF_ID (this_ref) = df->use_id;
842 df->uses[df->use_id++] = this_ref;
843 }
844 return this_ref;
845 }
846
847
848 /* Create a new reference of type DF_REF_TYPE for a single register REG,
849 used inside the LOC rtx of INSN. */
850 static void
851 df_ref_record_1 (df, reg, loc, bb, insn, ref_type, ref_flags)
852 struct df *df;
853 rtx reg;
854 rtx *loc;
855 basic_block bb;
856 rtx insn;
857 enum df_ref_type ref_type;
858 enum df_ref_flags ref_flags;
859 {
860 df_ref_create (df, reg, loc, bb, insn, ref_type, ref_flags);
861 }
862
863
864 /* Create new references of type DF_REF_TYPE for each part of register REG
865 at address LOC within INSN of BB. */
866 static void
867 df_ref_record (df, reg, loc, bb, insn, ref_type, ref_flags)
868 struct df *df;
869 rtx reg;
870 rtx *loc;
871 basic_block bb;
872 rtx insn;
873 enum df_ref_type ref_type;
874 enum df_ref_flags ref_flags;
875 {
876 unsigned int regno;
877
878 if (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG)
879 abort ();
880
881 /* For the reg allocator we are interested in some SUBREG rtx's, but not
882 all. Notably only those representing a word extraction from a multi-word
883 reg. As written in the docu those should have the form
884 (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
885 XXX Is that true? We could also use the global word_mode variable. */
886 if (GET_CODE (reg) == SUBREG
887 && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
888 || GET_MODE_SIZE (GET_MODE (reg))
889 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
890 {
891 loc = &SUBREG_REG (reg);
892 reg = *loc;
893 }
894
895 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
896 if (regno < FIRST_PSEUDO_REGISTER)
897 {
898 int i;
899 int endregno;
900
901 if (! (df->flags & DF_HARD_REGS))
902 return;
903
904 /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG
905 for the mode, because we only want to add references to regs, which
906 are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
907 reference the whole reg 0 in DI mode (which would also include
908 reg 1, at least, if 0 and 1 are SImode registers). */
909 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (reg));
910
911 for (i = regno; i < endregno; i++)
912 df_ref_record_1 (df, gen_rtx_REG (reg_raw_mode[i], i),
913 loc, bb, insn, ref_type, ref_flags);
914 }
915 else
916 {
917 df_ref_record_1 (df, reg, loc, bb, insn, ref_type, ref_flags);
918 }
919 }
920
921 /* Writes to SUBREG of inndermode wider than word and outermode shorter than
922 word are read-modify-write. */
923
924 static inline bool
925 read_modify_subreg_p (x)
926 rtx x;
927 {
928 if (GET_CODE (x) != SUBREG)
929 return false;
930 if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) <= UNITS_PER_WORD)
931 return false;
932 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
933 return false;
934 return true;
935 }
936
937 /* Process all the registers defined in the rtx, X. */
938 static void
939 df_def_record_1 (df, x, bb, insn)
940 struct df *df;
941 rtx x;
942 basic_block bb;
943 rtx insn;
944 {
945 rtx *loc = &SET_DEST (x);
946 rtx dst = *loc;
947 enum df_ref_flags flags = 0;
948
949 /* Some targets place small structures in registers for
950 return values of functions. */
951 if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
952 {
953 int i;
954
955 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
956 df_def_record_1 (df, XVECEXP (dst, 0, i), bb, insn);
957 return;
958 }
959
960 /* May be, we should flag the use of strict_low_part somehow. Might be
961 handy for the reg allocator. */
962 while (GET_CODE (dst) == STRICT_LOW_PART
963 || GET_CODE (dst) == ZERO_EXTRACT
964 || GET_CODE (dst) == SIGN_EXTRACT
965 || read_modify_subreg_p (dst))
966 {
967 /* Strict low part always contains SUBREG, but we don't want to make
968 it appear outside, as whole register is always considered. */
969 if (GET_CODE (dst) == STRICT_LOW_PART)
970 {
971 loc = &XEXP (dst, 0);
972 dst = *loc;
973 }
974 loc = &XEXP (dst, 0);
975 dst = *loc;
976 flags |= DF_REF_READ_WRITE;
977 }
978
979 if (GET_CODE (dst) == REG
980 || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
981 df_ref_record (df, dst, loc, bb, insn, DF_REF_REG_DEF, flags);
982 }
983
984
985 /* Process all the registers defined in the pattern rtx, X. */
986 static void
987 df_defs_record (df, x, bb, insn)
988 struct df *df;
989 rtx x;
990 basic_block bb;
991 rtx insn;
992 {
993 RTX_CODE code = GET_CODE (x);
994
995 if (code == SET || code == CLOBBER)
996 {
997 /* Mark the single def within the pattern. */
998 df_def_record_1 (df, x, bb, insn);
999 }
1000 else if (code == PARALLEL)
1001 {
1002 int i;
1003
1004 /* Mark the multiple defs within the pattern. */
1005 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1006 {
1007 code = GET_CODE (XVECEXP (x, 0, i));
1008 if (code == SET || code == CLOBBER)
1009 df_def_record_1 (df, XVECEXP (x, 0, i), bb, insn);
1010 }
1011 }
1012 }
1013
1014
1015 /* Process all the registers used in the rtx at address LOC. */
1016 static void
1017 df_uses_record (df, loc, ref_type, bb, insn, flags)
1018 struct df *df;
1019 rtx *loc;
1020 enum df_ref_type ref_type;
1021 basic_block bb;
1022 rtx insn;
1023 enum df_ref_flags flags;
1024 {
1025 RTX_CODE code;
1026 rtx x;
1027 retry:
1028 x = *loc;
1029 if (!x)
1030 return;
1031 code = GET_CODE (x);
1032 switch (code)
1033 {
1034 case LABEL_REF:
1035 case SYMBOL_REF:
1036 case CONST_INT:
1037 case CONST:
1038 case CONST_DOUBLE:
1039 case PC:
1040 case ADDR_VEC:
1041 case ADDR_DIFF_VEC:
1042 return;
1043
1044 case CLOBBER:
1045 /* If we are clobbering a MEM, mark any registers inside the address
1046 as being used. */
1047 if (GET_CODE (XEXP (x, 0)) == MEM)
1048 df_uses_record (df, &XEXP (XEXP (x, 0), 0),
1049 DF_REF_REG_MEM_STORE, bb, insn, flags);
1050
1051 /* If we're clobbering a REG then we have a def so ignore. */
1052 return;
1053
1054 case MEM:
1055 df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, flags);
1056 return;
1057
1058 case SUBREG:
1059 /* While we're here, optimize this case. */
1060
1061 /* In case the SUBREG is not of a register, don't optimize. */
1062 if (GET_CODE (SUBREG_REG (x)) != REG)
1063 {
1064 loc = &SUBREG_REG (x);
1065 df_uses_record (df, loc, ref_type, bb, insn, flags);
1066 return;
1067 }
1068
1069 /* ... Fall through ... */
1070
1071 case REG:
1072 /* See a register (or subreg) other than being set. */
1073 df_ref_record (df, x, loc, bb, insn, ref_type, flags);
1074 return;
1075
1076 case SET:
1077 {
1078 rtx dst = SET_DEST (x);
1079
1080 df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0);
1081
1082 switch (GET_CODE (dst))
1083 {
1084 case SUBREG:
1085 if (read_modify_subreg_p (dst))
1086 {
1087 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
1088 insn, DF_REF_READ_WRITE);
1089 break;
1090 }
1091 /* ... FALLTHRU ... */
1092 case REG:
1093 case PC:
1094 break;
1095 case MEM:
1096 df_uses_record (df, &XEXP (dst, 0),
1097 DF_REF_REG_MEM_STORE,
1098 bb, insn, 0);
1099 break;
1100 case STRICT_LOW_PART:
1101 /* A strict_low_part uses the whole reg not only the subreg. */
1102 dst = XEXP (dst, 0);
1103 if (GET_CODE (dst) != SUBREG)
1104 abort ();
1105 df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
1106 insn, DF_REF_READ_WRITE);
1107 break;
1108 case ZERO_EXTRACT:
1109 case SIGN_EXTRACT:
1110 df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn,
1111 DF_REF_READ_WRITE);
1112 df_uses_record (df, &XEXP (dst, 1), DF_REF_REG_USE, bb, insn, 0);
1113 df_uses_record (df, &XEXP (dst, 2), DF_REF_REG_USE, bb, insn, 0);
1114 dst = XEXP (dst, 0);
1115 break;
1116 default:
1117 abort ();
1118 }
1119 return;
1120 }
1121
1122 case RETURN:
1123 break;
1124
1125 case ASM_OPERANDS:
1126 case UNSPEC_VOLATILE:
1127 case TRAP_IF:
1128 case ASM_INPUT:
1129 {
1130 /* Traditional and volatile asm instructions must be considered to use
1131 and clobber all hard registers, all pseudo-registers and all of
1132 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
1133
1134 Consider for instance a volatile asm that changes the fpu rounding
1135 mode. An insn should not be moved across this even if it only uses
1136 pseudo-regs because it might give an incorrectly rounded result.
1137
1138 For now, just mark any regs we can find in ASM_OPERANDS as
1139 used. */
1140
1141 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1142 We can not just fall through here since then we would be confused
1143 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1144 traditional asms unlike their normal usage. */
1145 if (code == ASM_OPERANDS)
1146 {
1147 int j;
1148
1149 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1150 df_uses_record (df, &ASM_OPERANDS_INPUT (x, j),
1151 DF_REF_REG_USE, bb, insn, 0);
1152 return;
1153 }
1154 break;
1155 }
1156
1157 case PRE_DEC:
1158 case POST_DEC:
1159 case PRE_INC:
1160 case POST_INC:
1161 case PRE_MODIFY:
1162 case POST_MODIFY:
1163 /* Catch the def of the register being modified. */
1164 df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), bb, insn, DF_REF_REG_DEF, DF_REF_READ_WRITE);
1165
1166 /* ... Fall through to handle uses ... */
1167
1168 default:
1169 break;
1170 }
1171
1172 /* Recursively scan the operands of this expression. */
1173 {
1174 const char *fmt = GET_RTX_FORMAT (code);
1175 int i;
1176
1177 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1178 {
1179 if (fmt[i] == 'e')
1180 {
1181 /* Tail recursive case: save a function call level. */
1182 if (i == 0)
1183 {
1184 loc = &XEXP (x, 0);
1185 goto retry;
1186 }
1187 df_uses_record (df, &XEXP (x, i), ref_type, bb, insn, flags);
1188 }
1189 else if (fmt[i] == 'E')
1190 {
1191 int j;
1192 for (j = 0; j < XVECLEN (x, i); j++)
1193 df_uses_record (df, &XVECEXP (x, i, j), ref_type,
1194 bb, insn, flags);
1195 }
1196 }
1197 }
1198 }
1199
1200
1201 /* Record all the df within INSN of basic block BB. */
1202 static void
1203 df_insn_refs_record (df, bb, insn)
1204 struct df *df;
1205 basic_block bb;
1206 rtx insn;
1207 {
1208 int i;
1209
1210 if (INSN_P (insn))
1211 {
1212 rtx note;
1213
1214 /* Record register defs */
1215 df_defs_record (df, PATTERN (insn), bb, insn);
1216
1217 if (df->flags & DF_EQUIV_NOTES)
1218 for (note = REG_NOTES (insn); note;
1219 note = XEXP (note, 1))
1220 {
1221 switch (REG_NOTE_KIND (note))
1222 {
1223 case REG_EQUIV:
1224 case REG_EQUAL:
1225 df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
1226 bb, insn, 0);
1227 default:
1228 break;
1229 }
1230 }
1231
1232 if (GET_CODE (insn) == CALL_INSN)
1233 {
1234 rtx note;
1235 rtx x;
1236
1237 /* Record the registers used to pass arguments. */
1238 for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
1239 note = XEXP (note, 1))
1240 {
1241 if (GET_CODE (XEXP (note, 0)) == USE)
1242 df_uses_record (df, &SET_DEST (XEXP (note, 0)), DF_REF_REG_USE,
1243 bb, insn, 0);
1244 }
1245
1246 /* The stack ptr is used (honorarily) by a CALL insn. */
1247 x = df_reg_use_gen (STACK_POINTER_REGNUM);
1248 df_uses_record (df, &SET_DEST (x), DF_REF_REG_USE, bb, insn, 0);
1249
1250 if (df->flags & DF_HARD_REGS)
1251 {
1252 /* Calls may also reference any of the global registers,
1253 so they are recorded as used. */
1254 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1255 if (global_regs[i])
1256 {
1257 x = df_reg_use_gen (i);
1258 df_uses_record (df, &SET_DEST (x),
1259 DF_REF_REG_USE, bb, insn, 0);
1260 }
1261 }
1262 }
1263
1264 /* Record the register uses. */
1265 df_uses_record (df, &PATTERN (insn),
1266 DF_REF_REG_USE, bb, insn, 0);
1267
1268
1269 if (GET_CODE (insn) == CALL_INSN)
1270 {
1271 rtx note;
1272
1273 if (df->flags & DF_HARD_REGS)
1274 {
1275 /* Kill all registers invalidated by a call. */
1276 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1277 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1278 {
1279 rtx reg_clob = df_reg_clobber_gen (i);
1280 df_defs_record (df, reg_clob, bb, insn);
1281 }
1282 }
1283
1284 /* There may be extra registers to be clobbered. */
1285 for (note = CALL_INSN_FUNCTION_USAGE (insn);
1286 note;
1287 note = XEXP (note, 1))
1288 if (GET_CODE (XEXP (note, 0)) == CLOBBER)
1289 df_defs_record (df, XEXP (note, 0), bb, insn);
1290 }
1291 }
1292 }
1293
1294
1295 /* Record all the refs within the basic block BB. */
1296 static void
1297 df_bb_refs_record (df, bb)
1298 struct df *df;
1299 basic_block bb;
1300 {
1301 rtx insn;
1302
1303 /* Scan the block an insn at a time from beginning to end. */
1304 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1305 {
1306 if (INSN_P (insn))
1307 {
1308 /* Record defs within INSN. */
1309 df_insn_refs_record (df, bb, insn);
1310 }
1311 if (insn == bb->end)
1312 break;
1313 }
1314 }
1315
1316
1317 /* Record all the refs in the basic blocks specified by BLOCKS. */
1318 static void
1319 df_refs_record (df, blocks)
1320 struct df *df;
1321 bitmap blocks;
1322 {
1323 basic_block bb;
1324
1325 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1326 {
1327 df_bb_refs_record (df, bb);
1328 });
1329 }
1330 \f
1331 /* Dataflow analysis routines. */
1332
1333
1334 /* Create reg-def chains for basic block BB. These are a list of
1335 definitions for each register. */
1336 static void
1337 df_bb_reg_def_chain_create (df, bb)
1338 struct df *df;
1339 basic_block bb;
1340 {
1341 rtx insn;
1342
1343 /* Perhaps the defs should be sorted using a depth first search
1344 of the CFG (or possibly a breadth first search). We currently
1345 scan the basic blocks in reverse order so that the first defs
1346 appear at the start of the chain. */
1347
1348 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1349 insn = PREV_INSN (insn))
1350 {
1351 struct df_link *link;
1352 unsigned int uid = INSN_UID (insn);
1353
1354 if (! INSN_P (insn))
1355 continue;
1356
1357 for (link = df->insns[uid].defs; link; link = link->next)
1358 {
1359 struct ref *def = link->ref;
1360 unsigned int dregno = DF_REF_REGNO (def);
1361
1362 df->regs[dregno].defs
1363 = df_link_create (def, df->regs[dregno].defs);
1364 }
1365 }
1366 }
1367
1368
1369 /* Create reg-def chains for each basic block within BLOCKS. These
1370 are a list of definitions for each register. */
1371 static void
1372 df_reg_def_chain_create (df, blocks)
1373 struct df *df;
1374 bitmap blocks;
1375 {
1376 basic_block bb;
1377
1378 FOR_EACH_BB_IN_BITMAP/*_REV*/ (blocks, 0, bb,
1379 {
1380 df_bb_reg_def_chain_create (df, bb);
1381 });
1382 }
1383
1384
1385 /* Create reg-use chains for basic block BB. These are a list of uses
1386 for each register. */
1387 static void
1388 df_bb_reg_use_chain_create (df, bb)
1389 struct df *df;
1390 basic_block bb;
1391 {
1392 rtx insn;
1393
1394 /* Scan in forward order so that the last uses appear at the
1395 start of the chain. */
1396
1397 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1398 insn = NEXT_INSN (insn))
1399 {
1400 struct df_link *link;
1401 unsigned int uid = INSN_UID (insn);
1402
1403 if (! INSN_P (insn))
1404 continue;
1405
1406 for (link = df->insns[uid].uses; link; link = link->next)
1407 {
1408 struct ref *use = link->ref;
1409 unsigned int uregno = DF_REF_REGNO (use);
1410
1411 df->regs[uregno].uses
1412 = df_link_create (use, df->regs[uregno].uses);
1413 }
1414 }
1415 }
1416
1417
1418 /* Create reg-use chains for each basic block within BLOCKS. These
1419 are a list of uses for each register. */
1420 static void
1421 df_reg_use_chain_create (df, blocks)
1422 struct df *df;
1423 bitmap blocks;
1424 {
1425 basic_block bb;
1426
1427 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1428 {
1429 df_bb_reg_use_chain_create (df, bb);
1430 });
1431 }
1432
1433
1434 /* Create def-use chains from reaching use bitmaps for basic block BB. */
1435 static void
1436 df_bb_du_chain_create (df, bb, ru)
1437 struct df *df;
1438 basic_block bb;
1439 bitmap ru;
1440 {
1441 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1442 rtx insn;
1443
1444 bitmap_copy (ru, bb_info->ru_out);
1445
1446 /* For each def in BB create a linked list (chain) of uses
1447 reached from the def. */
1448 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1449 insn = PREV_INSN (insn))
1450 {
1451 struct df_link *def_link;
1452 struct df_link *use_link;
1453 unsigned int uid = INSN_UID (insn);
1454
1455 if (! INSN_P (insn))
1456 continue;
1457
1458 /* For each def in insn... */
1459 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1460 {
1461 struct ref *def = def_link->ref;
1462 unsigned int dregno = DF_REF_REGNO (def);
1463
1464 DF_REF_CHAIN (def) = 0;
1465
1466 /* While the reg-use chains are not essential, it
1467 is _much_ faster to search these short lists rather
1468 than all the reaching uses, especially for large functions. */
1469 for (use_link = df->regs[dregno].uses; use_link;
1470 use_link = use_link->next)
1471 {
1472 struct ref *use = use_link->ref;
1473
1474 if (bitmap_bit_p (ru, DF_REF_ID (use)))
1475 {
1476 DF_REF_CHAIN (def)
1477 = df_link_create (use, DF_REF_CHAIN (def));
1478
1479 bitmap_clear_bit (ru, DF_REF_ID (use));
1480 }
1481 }
1482 }
1483
1484 /* For each use in insn... */
1485 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1486 {
1487 struct ref *use = use_link->ref;
1488 bitmap_set_bit (ru, DF_REF_ID (use));
1489 }
1490 }
1491 }
1492
1493
1494 /* Create def-use chains from reaching use bitmaps for basic blocks
1495 in BLOCKS. */
1496 static void
1497 df_du_chain_create (df, blocks)
1498 struct df *df;
1499 bitmap blocks;
1500 {
1501 bitmap ru;
1502 basic_block bb;
1503
1504 ru = BITMAP_XMALLOC ();
1505
1506 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1507 {
1508 df_bb_du_chain_create (df, bb, ru);
1509 });
1510
1511 BITMAP_XFREE (ru);
1512 }
1513
1514
1515 /* Create use-def chains from reaching def bitmaps for basic block BB. */
1516 static void
1517 df_bb_ud_chain_create (df, bb)
1518 struct df *df;
1519 basic_block bb;
1520 {
1521 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1522 struct ref **reg_def_last = df->reg_def_last;
1523 rtx insn;
1524
1525 memset (reg_def_last, 0, df->n_regs * sizeof (struct ref *));
1526
1527 /* For each use in BB create a linked list (chain) of defs
1528 that reach the use. */
1529 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1530 insn = NEXT_INSN (insn))
1531 {
1532 unsigned int uid = INSN_UID (insn);
1533 struct df_link *use_link;
1534 struct df_link *def_link;
1535
1536 if (! INSN_P (insn))
1537 continue;
1538
1539 /* For each use in insn... */
1540 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1541 {
1542 struct ref *use = use_link->ref;
1543 unsigned int regno = DF_REF_REGNO (use);
1544
1545 DF_REF_CHAIN (use) = 0;
1546
1547 /* Has regno been defined in this BB yet? If so, use
1548 the last def as the single entry for the use-def
1549 chain for this use. Otherwise, we need to add all
1550 the defs using this regno that reach the start of
1551 this BB. */
1552 if (reg_def_last[regno])
1553 {
1554 DF_REF_CHAIN (use)
1555 = df_link_create (reg_def_last[regno], 0);
1556 }
1557 else
1558 {
1559 /* While the reg-def chains are not essential, it is
1560 _much_ faster to search these short lists rather than
1561 all the reaching defs, especially for large
1562 functions. */
1563 for (def_link = df->regs[regno].defs; def_link;
1564 def_link = def_link->next)
1565 {
1566 struct ref *def = def_link->ref;
1567
1568 if (bitmap_bit_p (bb_info->rd_in, DF_REF_ID (def)))
1569 {
1570 DF_REF_CHAIN (use)
1571 = df_link_create (def, DF_REF_CHAIN (use));
1572 }
1573 }
1574 }
1575 }
1576
1577
1578 /* For each def in insn...record the last def of each reg. */
1579 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1580 {
1581 struct ref *def = def_link->ref;
1582 int dregno = DF_REF_REGNO (def);
1583
1584 reg_def_last[dregno] = def;
1585 }
1586 }
1587 }
1588
1589
1590 /* Create use-def chains from reaching def bitmaps for basic blocks
1591 within BLOCKS. */
1592 static void
1593 df_ud_chain_create (df, blocks)
1594 struct df *df;
1595 bitmap blocks;
1596 {
1597 basic_block bb;
1598
1599 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1600 {
1601 df_bb_ud_chain_create (df, bb);
1602 });
1603 }
1604 \f
1605
1606
1607 static void
1608 df_rd_transfer_function (bb, changed, in, out, gen, kill, data)
1609 int bb ATTRIBUTE_UNUSED;
1610 int *changed;
1611 bitmap in, out, gen, kill;
1612 void *data ATTRIBUTE_UNUSED;
1613 {
1614 *changed = bitmap_union_of_diff (out, gen, in, kill);
1615 }
1616 static void
1617 df_ru_transfer_function (bb, changed, in, out, gen, kill, data)
1618 int bb ATTRIBUTE_UNUSED;
1619 int *changed;
1620 bitmap in, out, gen, kill;
1621 void *data ATTRIBUTE_UNUSED;
1622 {
1623 *changed = bitmap_union_of_diff (in, gen, out, kill);
1624 }
1625
1626 static void
1627 df_lr_transfer_function (bb, changed, in, out, use, def, data)
1628 int bb ATTRIBUTE_UNUSED;
1629 int *changed;
1630 bitmap in, out, use, def;
1631 void *data ATTRIBUTE_UNUSED;
1632 {
1633 *changed = bitmap_union_of_diff (in, use, out, def);
1634 }
1635
1636
1637 /* Compute local reaching def info for basic block BB. */
1638 static void
1639 df_bb_rd_local_compute (df, bb)
1640 struct df *df;
1641 basic_block bb;
1642 {
1643 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1644 rtx insn;
1645
1646 for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
1647 insn = NEXT_INSN (insn))
1648 {
1649 unsigned int uid = INSN_UID (insn);
1650 struct df_link *def_link;
1651
1652 if (! INSN_P (insn))
1653 continue;
1654
1655 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1656 {
1657 struct ref *def = def_link->ref;
1658 unsigned int regno = DF_REF_REGNO (def);
1659 struct df_link *def2_link;
1660
1661 for (def2_link = df->regs[regno].defs; def2_link;
1662 def2_link = def2_link->next)
1663 {
1664 struct ref *def2 = def2_link->ref;
1665
1666 /* Add all defs of this reg to the set of kills. This
1667 is greedy since many of these defs will not actually
1668 be killed by this BB but it keeps things a lot
1669 simpler. */
1670 bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
1671
1672 /* Zap from the set of gens for this BB. */
1673 bitmap_clear_bit (bb_info->rd_gen, DF_REF_ID (def2));
1674 }
1675
1676 bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
1677 }
1678 }
1679
1680 bb_info->rd_valid = 1;
1681 }
1682
1683
1684 /* Compute local reaching def info for each basic block within BLOCKS. */
1685 static void
1686 df_rd_local_compute (df, blocks)
1687 struct df *df;
1688 bitmap blocks;
1689 {
1690 basic_block bb;
1691
1692 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1693 {
1694 df_bb_rd_local_compute (df, bb);
1695 });
1696 }
1697
1698
1699 /* Compute local reaching use (upward exposed use) info for basic
1700 block BB. */
1701 static void
1702 df_bb_ru_local_compute (df, bb)
1703 struct df *df;
1704 basic_block bb;
1705 {
1706 /* This is much more tricky than computing reaching defs. With
1707 reaching defs, defs get killed by other defs. With upwards
1708 exposed uses, these get killed by defs with the same regno. */
1709
1710 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1711 rtx insn;
1712
1713
1714 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1715 insn = PREV_INSN (insn))
1716 {
1717 unsigned int uid = INSN_UID (insn);
1718 struct df_link *def_link;
1719 struct df_link *use_link;
1720
1721 if (! INSN_P (insn))
1722 continue;
1723
1724 for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
1725 {
1726 struct ref *def = def_link->ref;
1727 unsigned int dregno = DF_REF_REGNO (def);
1728
1729 for (use_link = df->regs[dregno].uses; use_link;
1730 use_link = use_link->next)
1731 {
1732 struct ref *use = use_link->ref;
1733
1734 /* Add all uses of this reg to the set of kills. This
1735 is greedy since many of these uses will not actually
1736 be killed by this BB but it keeps things a lot
1737 simpler. */
1738 bitmap_set_bit (bb_info->ru_kill, DF_REF_ID (use));
1739
1740 /* Zap from the set of gens for this BB. */
1741 bitmap_clear_bit (bb_info->ru_gen, DF_REF_ID (use));
1742 }
1743 }
1744
1745 for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
1746 {
1747 struct ref *use = use_link->ref;
1748 /* Add use to set of gens in this BB. */
1749 bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use));
1750 }
1751 }
1752 bb_info->ru_valid = 1;
1753 }
1754
1755
1756 /* Compute local reaching use (upward exposed use) info for each basic
1757 block within BLOCKS. */
1758 static void
1759 df_ru_local_compute (df, blocks)
1760 struct df *df;
1761 bitmap blocks;
1762 {
1763 basic_block bb;
1764
1765 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1766 {
1767 df_bb_ru_local_compute (df, bb);
1768 });
1769 }
1770
1771
1772 /* Compute local live variable info for basic block BB. */
1773 static void
1774 df_bb_lr_local_compute (df, bb)
1775 struct df *df;
1776 basic_block bb;
1777 {
1778 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1779 rtx insn;
1780
1781 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1782 insn = PREV_INSN (insn))
1783 {
1784 unsigned int uid = INSN_UID (insn);
1785 struct df_link *link;
1786
1787 if (! INSN_P (insn))
1788 continue;
1789
1790 for (link = df->insns[uid].defs; link; link = link->next)
1791 {
1792 struct ref *def = link->ref;
1793 unsigned int dregno = DF_REF_REGNO (def);
1794
1795 /* Add def to set of defs in this BB. */
1796 bitmap_set_bit (bb_info->lr_def, dregno);
1797
1798 bitmap_clear_bit (bb_info->lr_use, dregno);
1799 }
1800
1801 for (link = df->insns[uid].uses; link; link = link->next)
1802 {
1803 struct ref *use = link->ref;
1804 /* Add use to set of uses in this BB. */
1805 bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use));
1806 }
1807 }
1808 bb_info->lr_valid = 1;
1809 }
1810
1811
1812 /* Compute local live variable info for each basic block within BLOCKS. */
1813 static void
1814 df_lr_local_compute (df, blocks)
1815 struct df *df;
1816 bitmap blocks;
1817 {
1818 basic_block bb;
1819
1820 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1821 {
1822 df_bb_lr_local_compute (df, bb);
1823 });
1824 }
1825
1826
1827 /* Compute register info: lifetime, bb, and number of defs and uses
1828 for basic block BB. */
1829 static void
1830 df_bb_reg_info_compute (df, bb, live)
1831 struct df *df;
1832 basic_block bb;
1833 bitmap live;
1834 {
1835 struct reg_info *reg_info = df->regs;
1836 struct bb_info *bb_info = DF_BB_INFO (df, bb);
1837 rtx insn;
1838
1839 bitmap_copy (live, bb_info->lr_out);
1840
1841 for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
1842 insn = PREV_INSN (insn))
1843 {
1844 unsigned int uid = INSN_UID (insn);
1845 unsigned int regno;
1846 struct df_link *link;
1847
1848 if (! INSN_P (insn))
1849 continue;
1850
1851 for (link = df->insns[uid].defs; link; link = link->next)
1852 {
1853 struct ref *def = link->ref;
1854 unsigned int dregno = DF_REF_REGNO (def);
1855
1856 /* Kill this register. */
1857 bitmap_clear_bit (live, dregno);
1858 reg_info[dregno].n_defs++;
1859 }
1860
1861 for (link = df->insns[uid].uses; link; link = link->next)
1862 {
1863 struct ref *use = link->ref;
1864 unsigned int uregno = DF_REF_REGNO (use);
1865
1866 /* This register is now live. */
1867 bitmap_set_bit (live, uregno);
1868 reg_info[uregno].n_uses++;
1869 }
1870
1871 /* Increment lifetimes of all live registers. */
1872 EXECUTE_IF_SET_IN_BITMAP (live, 0, regno,
1873 {
1874 reg_info[regno].lifetime++;
1875 });
1876 }
1877 }
1878
1879
1880 /* Compute register info: lifetime, bb, and number of defs and uses. */
1881 static void
1882 df_reg_info_compute (df, blocks)
1883 struct df *df;
1884 bitmap blocks;
1885 {
1886 basic_block bb;
1887 bitmap live;
1888
1889 live = BITMAP_XMALLOC ();
1890
1891 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1892 {
1893 df_bb_reg_info_compute (df, bb, live);
1894 });
1895
1896 BITMAP_XFREE (live);
1897 }
1898
1899
1900 /* Assign LUIDs for BB. */
1901 static int
1902 df_bb_luids_set (df, bb)
1903 struct df *df;
1904 basic_block bb;
1905 {
1906 rtx insn;
1907 int luid = 0;
1908
1909 /* The LUIDs are monotonically increasing for each basic block. */
1910
1911 for (insn = bb->head; ; insn = NEXT_INSN (insn))
1912 {
1913 if (INSN_P (insn))
1914 DF_INSN_LUID (df, insn) = luid++;
1915 DF_INSN_LUID (df, insn) = luid;
1916
1917 if (insn == bb->end)
1918 break;
1919 }
1920 return luid;
1921 }
1922
1923
1924 /* Assign LUIDs for each basic block within BLOCKS. */
1925 static int
1926 df_luids_set (df, blocks)
1927 struct df *df;
1928 bitmap blocks;
1929 {
1930 basic_block bb;
1931 int total = 0;
1932
1933 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
1934 {
1935 total += df_bb_luids_set (df, bb);
1936 });
1937 return total;
1938 }
1939
1940 /* Perform dataflow analysis using existing DF structure for blocks
1941 within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
1942 static void
1943 df_analyse_1 (df, blocks, flags, update)
1944 struct df *df;
1945 bitmap blocks;
1946 int flags;
1947 int update;
1948 {
1949 int aflags;
1950 int dflags;
1951 int i;
1952 dflags = 0;
1953 aflags = flags;
1954 if (flags & DF_UD_CHAIN)
1955 aflags |= DF_RD | DF_RD_CHAIN;
1956
1957 if (flags & DF_DU_CHAIN)
1958 aflags |= DF_RU;
1959
1960 if (flags & DF_RU)
1961 aflags |= DF_RU_CHAIN;
1962
1963 if (flags & DF_REG_INFO)
1964 aflags |= DF_LR;
1965
1966 if (! blocks)
1967 blocks = df->all_blocks;
1968
1969 df->flags = flags;
1970 if (update)
1971 {
1972 df_refs_update (df);
1973 /* More fine grained incremental dataflow analysis would be
1974 nice. For now recompute the whole shebang for the
1975 modified blocks. */
1976 #if 0
1977 df_refs_unlink (df, blocks);
1978 #endif
1979 /* All the def-use, use-def chains can be potentially
1980 modified by changes in one block. The size of the
1981 bitmaps can also change. */
1982 }
1983 else
1984 {
1985 /* Scan the function for all register defs and uses. */
1986 df_refs_queue (df);
1987 df_refs_record (df, blocks);
1988
1989 /* Link all the new defs and uses to the insns. */
1990 df_refs_process (df);
1991 }
1992
1993 /* Allocate the bitmaps now the total number of defs and uses are
1994 known. If the number of defs or uses have changed, then
1995 these bitmaps need to be reallocated. */
1996 df_bitmaps_alloc (df, aflags);
1997
1998 /* Set the LUIDs for each specified basic block. */
1999 df_luids_set (df, blocks);
2000
2001 /* Recreate reg-def and reg-use chains from scratch so that first
2002 def is at the head of the reg-def chain and the last use is at
2003 the head of the reg-use chain. This is only important for
2004 regs local to a basic block as it speeds up searching. */
2005 if (aflags & DF_RD_CHAIN)
2006 {
2007 df_reg_def_chain_create (df, blocks);
2008 }
2009
2010 if (aflags & DF_RU_CHAIN)
2011 {
2012 df_reg_use_chain_create (df, blocks);
2013 }
2014
2015 df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
2016 df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
2017 df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
2018 df->inverse_dfs_map = xmalloc (sizeof(int) * n_basic_blocks);
2019 df->inverse_rc_map = xmalloc (sizeof(int) * n_basic_blocks);
2020 df->inverse_rts_map = xmalloc (sizeof(int) * n_basic_blocks);
2021
2022 flow_depth_first_order_compute (df->dfs_order, df->rc_order);
2023 flow_reverse_top_sort_order_compute (df->rts_order);
2024 for (i = 0; i < n_basic_blocks; i ++)
2025 {
2026 df->inverse_dfs_map[df->dfs_order[i]] = i;
2027 df->inverse_rc_map[df->rc_order[i]] = i;
2028 df->inverse_rts_map[df->rts_order[i]] = i;
2029 }
2030 if (aflags & DF_RD)
2031 {
2032 /* Compute the sets of gens and kills for the defs of each bb. */
2033 df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
2034 {
2035 int i;
2036 bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
2037 bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
2038 bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
2039 bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
2040 for (i = 0; i < n_basic_blocks; i ++)
2041 {
2042 in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_in;
2043 out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_out;
2044 gen[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_gen;
2045 kill[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_kill;
2046 }
2047 iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
2048 FORWARD, UNION, df_rd_transfer_function,
2049 df->inverse_rc_map, NULL);
2050 free (in);
2051 free (out);
2052 free (gen);
2053 free (kill);
2054 }
2055 }
2056
2057 if (aflags & DF_UD_CHAIN)
2058 {
2059 /* Create use-def chains. */
2060 df_ud_chain_create (df, df->all_blocks);
2061
2062 if (! (flags & DF_RD))
2063 dflags |= DF_RD;
2064 }
2065
2066 if (aflags & DF_RU)
2067 {
2068 /* Compute the sets of gens and kills for the upwards exposed
2069 uses in each bb. */
2070 df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
2071 {
2072 int i;
2073 bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
2074 bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
2075 bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
2076 bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
2077 for (i = 0; i < n_basic_blocks; i ++)
2078 {
2079 in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_in;
2080 out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_out;
2081 gen[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_gen;
2082 kill[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_kill;
2083 }
2084 iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
2085 BACKWARD, UNION, df_ru_transfer_function,
2086 df->inverse_rts_map, NULL);
2087 free (in);
2088 free (out);
2089 free (gen);
2090 free (kill);
2091 }
2092 }
2093
2094 if (aflags & DF_DU_CHAIN)
2095 {
2096 /* Create def-use chains. */
2097 df_du_chain_create (df, df->all_blocks);
2098
2099 if (! (flags & DF_RU))
2100 dflags |= DF_RU;
2101 }
2102
2103 /* Free up bitmaps that are no longer required. */
2104 if (dflags)
2105 df_bitmaps_free (df, dflags);
2106
2107 if (aflags & DF_LR)
2108 {
2109 /* Compute the sets of defs and uses of live variables. */
2110 df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
2111 {
2112 int i;
2113 bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
2114 bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
2115 bitmap *use = xmalloc (sizeof (bitmap) * n_basic_blocks);
2116 bitmap *def = xmalloc (sizeof (bitmap) * n_basic_blocks);
2117 for (i = 0; i < n_basic_blocks; i ++)
2118 {
2119 in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_in;
2120 out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_out;
2121 use[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_use;
2122 def[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_def;
2123 }
2124 iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
2125 BACKWARD, UNION, df_lr_transfer_function,
2126 df->inverse_rts_map, NULL);
2127 free (in);
2128 free (out);
2129 free (use);
2130 free (def);
2131 }
2132 }
2133
2134 if (aflags & DF_REG_INFO)
2135 {
2136 df_reg_info_compute (df, df->all_blocks);
2137 }
2138 free (df->dfs_order);
2139 free (df->rc_order);
2140 free (df->rts_order);
2141 free (df->inverse_rc_map);
2142 free (df->inverse_dfs_map);
2143 free (df->inverse_rts_map);
2144 }
2145
2146
2147 /* Initialise dataflow analysis. */
2148 struct df *
2149 df_init ()
2150 {
2151 struct df *df;
2152
2153 df = xcalloc (1, sizeof (struct df));
2154
2155 /* Squirrel away a global for debugging. */
2156 ddf = df;
2157
2158 return df;
2159 }
2160
2161
2162 /* Start queuing refs. */
2163 static int
2164 df_refs_queue (df)
2165 struct df *df;
2166 {
2167 df->def_id_save = df->def_id;
2168 df->use_id_save = df->use_id;
2169 /* ???? Perhaps we should save current obstack state so that we can
2170 unwind it. */
2171 return 0;
2172 }
2173
2174
2175 /* Process queued refs. */
2176 static int
2177 df_refs_process (df)
2178 struct df *df;
2179 {
2180 unsigned int i;
2181
2182 /* Build new insn-def chains. */
2183 for (i = df->def_id_save; i != df->def_id; i++)
2184 {
2185 struct ref *def = df->defs[i];
2186 unsigned int uid = DF_REF_INSN_UID (def);
2187
2188 /* Add def to head of def list for INSN. */
2189 df->insns[uid].defs
2190 = df_link_create (def, df->insns[uid].defs);
2191 }
2192
2193 /* Build new insn-use chains. */
2194 for (i = df->use_id_save; i != df->use_id; i++)
2195 {
2196 struct ref *use = df->uses[i];
2197 unsigned int uid = DF_REF_INSN_UID (use);
2198
2199 /* Add use to head of use list for INSN. */
2200 df->insns[uid].uses
2201 = df_link_create (use, df->insns[uid].uses);
2202 }
2203 return 0;
2204 }
2205
2206
2207 /* Update refs for basic block BB. */
2208 static int
2209 df_bb_refs_update (df, bb)
2210 struct df *df;
2211 basic_block bb;
2212 {
2213 rtx insn;
2214 int count = 0;
2215
2216 /* While we have to scan the chain of insns for this BB, we don't
2217 need to allocate and queue a long chain of BB/INSN pairs. Using
2218 a bitmap for insns_modified saves memory and avoids queuing
2219 duplicates. */
2220
2221 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2222 {
2223 unsigned int uid;
2224
2225 uid = INSN_UID (insn);
2226
2227 if (bitmap_bit_p (df->insns_modified, uid))
2228 {
2229 /* Delete any allocated refs of this insn. MPH, FIXME. */
2230 df_insn_refs_unlink (df, bb, insn);
2231
2232 /* Scan the insn for refs. */
2233 df_insn_refs_record (df, bb, insn);
2234
2235
2236 bitmap_clear_bit (df->insns_modified, uid);
2237 count++;
2238 }
2239 if (insn == bb->end)
2240 break;
2241 }
2242 return count;
2243 }
2244
2245
2246 /* Process all the modified/deleted insns that were queued. */
2247 static int
2248 df_refs_update (df)
2249 struct df *df;
2250 {
2251 basic_block bb;
2252 int count = 0;
2253
2254 if ((unsigned int)max_reg_num () >= df->reg_size)
2255 df_reg_table_realloc (df, 0);
2256
2257 df_refs_queue (df);
2258
2259 FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
2260 {
2261 count += df_bb_refs_update (df, bb);
2262 });
2263
2264 df_refs_process (df);
2265 return count;
2266 }
2267
2268
2269 /* Return non-zero if any of the requested blocks in the bitmap
2270 BLOCKS have been modified. */
2271 static int
2272 df_modified_p (df, blocks)
2273 struct df *df;
2274 bitmap blocks;
2275 {
2276 unsigned int j;
2277 int update = 0;
2278
2279 for (j = 0; j < df->n_bbs; j++)
2280 if (bitmap_bit_p (df->bbs_modified, j)
2281 && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, j)))
2282 {
2283 update = 1;
2284 break;
2285 }
2286
2287 return update;
2288 }
2289
2290
2291 /* Analyse dataflow info for the basic blocks specified by the bitmap
2292 BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
2293 modified blocks if BLOCKS is -1. */
2294 int
2295 df_analyse (df, blocks, flags)
2296 struct df *df;
2297 bitmap blocks;
2298 int flags;
2299 {
2300 int update;
2301
2302 /* We could deal with additional basic blocks being created by
2303 rescanning everything again. */
2304 if (df->n_bbs && df->n_bbs != (unsigned int)n_basic_blocks)
2305 abort ();
2306
2307 update = df_modified_p (df, blocks);
2308 if (update || (flags != df->flags))
2309 {
2310 if (! blocks)
2311 {
2312 if (df->n_bbs)
2313 {
2314 /* Recompute everything from scratch. */
2315 df_free (df);
2316 }
2317 /* Allocate and initialise data structures. */
2318 df_alloc (df, max_reg_num ());
2319 df_analyse_1 (df, 0, flags, 0);
2320 update = 1;
2321 }
2322 else
2323 {
2324 if (blocks == (bitmap) -1)
2325 blocks = df->bbs_modified;
2326
2327 if (! df->n_bbs)
2328 abort ();
2329
2330 df_analyse_1 (df, blocks, flags, 1);
2331 bitmap_zero (df->bbs_modified);
2332 }
2333 }
2334 return update;
2335 }
2336
2337
2338 /* Free all the dataflow info and the DF structure. */
2339 void
2340 df_finish (df)
2341 struct df *df;
2342 {
2343 df_free (df);
2344 free (df);
2345 }
2346
2347
2348 /* Unlink INSN from its reference information. */
2349 static void
2350 df_insn_refs_unlink (df, bb, insn)
2351 struct df *df;
2352 basic_block bb ATTRIBUTE_UNUSED;
2353 rtx insn;
2354 {
2355 struct df_link *link;
2356 unsigned int uid;
2357
2358 uid = INSN_UID (insn);
2359
2360 /* Unlink all refs defined by this insn. */
2361 for (link = df->insns[uid].defs; link; link = link->next)
2362 df_def_unlink (df, link->ref);
2363
2364 /* Unlink all refs used by this insn. */
2365 for (link = df->insns[uid].uses; link; link = link->next)
2366 df_use_unlink (df, link->ref);
2367
2368 df->insns[uid].defs = 0;
2369 df->insns[uid].uses = 0;
2370 }
2371
2372
2373 #if 0
2374 /* Unlink all the insns within BB from their reference information. */
2375 static void
2376 df_bb_refs_unlink (df, bb)
2377 struct df *df;
2378 basic_block bb;
2379 {
2380 rtx insn;
2381
2382 /* Scan the block an insn at a time from beginning to end. */
2383 for (insn = bb->head; ; insn = NEXT_INSN (insn))
2384 {
2385 if (INSN_P (insn))
2386 {
2387 /* Unlink refs for INSN. */
2388 df_insn_refs_unlink (df, bb, insn);
2389 }
2390 if (insn == bb->end)
2391 break;
2392 }
2393 }
2394
2395
2396 /* Unlink all the refs in the basic blocks specified by BLOCKS.
2397 Not currently used. */
2398 static void
2399 df_refs_unlink (df, blocks)
2400 struct df *df;
2401 bitmap blocks;
2402 {
2403 basic_block bb;
2404
2405 if (blocks)
2406 {
2407 FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
2408 {
2409 df_bb_refs_unlink (df, bb);
2410 });
2411 }
2412 else
2413 {
2414 FOR_ALL_BBS (bb,
2415 {
2416 df_bb_refs_unlink (df, bb);
2417 });
2418 }
2419 }
2420 #endif
2421 \f
2422 /* Functions to modify insns. */
2423
2424
2425 /* Delete INSN and all its reference information. */
2426 rtx
2427 df_insn_delete (df, bb, insn)
2428 struct df *df;
2429 basic_block bb ATTRIBUTE_UNUSED;
2430 rtx insn;
2431 {
2432 /* If the insn is a jump, we should perhaps call delete_insn to
2433 handle the JUMP_LABEL? */
2434
2435 /* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */
2436 if (insn == bb->head)
2437 abort ();
2438
2439 /* Delete the insn. */
2440 delete_insn (insn);
2441
2442 df_insn_modify (df, bb, insn);
2443
2444 return NEXT_INSN (insn);
2445 }
2446
2447
2448 /* Mark that INSN within BB may have changed (created/modified/deleted).
2449 This may be called multiple times for the same insn. There is no
2450 harm calling this function if the insn wasn't changed; it will just
2451 slow down the rescanning of refs. */
2452 void
2453 df_insn_modify (df, bb, insn)
2454 struct df *df;
2455 basic_block bb;
2456 rtx insn;
2457 {
2458 unsigned int uid;
2459
2460 uid = INSN_UID (insn);
2461
2462 if (uid >= df->insn_size)
2463 df_insn_table_realloc (df, 0);
2464
2465 bitmap_set_bit (df->bbs_modified, bb->index);
2466 bitmap_set_bit (df->insns_modified, uid);
2467
2468 /* For incremental updating on the fly, perhaps we could make a copy
2469 of all the refs of the original insn and turn them into
2470 anti-refs. When df_refs_update finds these anti-refs, it annihilates
2471 the original refs. If validate_change fails then these anti-refs
2472 will just get ignored. */
2473 }
2474
2475
2476 typedef struct replace_args
2477 {
2478 rtx match;
2479 rtx replacement;
2480 rtx insn;
2481 int modified;
2482 } replace_args;
2483
2484
2485 /* Replace mem pointed to by PX with its associated pseudo register.
2486 DATA is actually a pointer to a structure describing the
2487 instruction currently being scanned and the MEM we are currently
2488 replacing. */
2489 static int
2490 df_rtx_mem_replace (px, data)
2491 rtx *px;
2492 void *data;
2493 {
2494 replace_args *args = (replace_args *) data;
2495 rtx mem = *px;
2496
2497 if (mem == NULL_RTX)
2498 return 0;
2499
2500 switch (GET_CODE (mem))
2501 {
2502 case MEM:
2503 break;
2504
2505 case CONST_DOUBLE:
2506 /* We're not interested in the MEM associated with a
2507 CONST_DOUBLE, so there's no need to traverse into one. */
2508 return -1;
2509
2510 default:
2511 /* This is not a MEM. */
2512 return 0;
2513 }
2514
2515 if (!rtx_equal_p (args->match, mem))
2516 /* This is not the MEM we are currently replacing. */
2517 return 0;
2518
2519 /* Actually replace the MEM. */
2520 validate_change (args->insn, px, args->replacement, 1);
2521 args->modified++;
2522
2523 return 0;
2524 }
2525
2526
2527 int
2528 df_insn_mem_replace (df, bb, insn, mem, reg)
2529 struct df *df;
2530 basic_block bb;
2531 rtx insn;
2532 rtx mem;
2533 rtx reg;
2534 {
2535 replace_args args;
2536
2537 args.insn = insn;
2538 args.match = mem;
2539 args.replacement = reg;
2540 args.modified = 0;
2541
2542 /* Search and replace all matching mems within insn. */
2543 for_each_rtx (&insn, df_rtx_mem_replace, &args);
2544
2545 if (args.modified)
2546 df_insn_modify (df, bb, insn);
2547
2548 /* ???? FIXME. We may have a new def or one or more new uses of REG
2549 in INSN. REG should be a new pseudo so it won't affect the
2550 dataflow information that we currently have. We should add
2551 the new uses and defs to INSN and then recreate the chains
2552 when df_analyse is called. */
2553 return args.modified;
2554 }
2555
2556
2557 /* Replace one register with another. Called through for_each_rtx; PX
2558 points to the rtx being scanned. DATA is actually a pointer to a
2559 structure of arguments. */
2560 static int
2561 df_rtx_reg_replace (px, data)
2562 rtx *px;
2563 void *data;
2564 {
2565 rtx x = *px;
2566 replace_args *args = (replace_args *) data;
2567
2568 if (x == NULL_RTX)
2569 return 0;
2570
2571 if (x == args->match)
2572 {
2573 validate_change (args->insn, px, args->replacement, 1);
2574 args->modified++;
2575 }
2576
2577 return 0;
2578 }
2579
2580
2581 /* Replace the reg within every ref on CHAIN that is within the set
2582 BLOCKS of basic blocks with NEWREG. Also update the regs within
2583 REG_NOTES. */
2584 void
2585 df_refs_reg_replace (df, blocks, chain, oldreg, newreg)
2586 struct df *df;
2587 bitmap blocks;
2588 struct df_link *chain;
2589 rtx oldreg;
2590 rtx newreg;
2591 {
2592 struct df_link *link;
2593 replace_args args;
2594
2595 if (! blocks)
2596 blocks = df->all_blocks;
2597
2598 args.match = oldreg;
2599 args.replacement = newreg;
2600 args.modified = 0;
2601
2602 for (link = chain; link; link = link->next)
2603 {
2604 struct ref *ref = link->ref;
2605 rtx insn = DF_REF_INSN (ref);
2606
2607 if (! INSN_P (insn))
2608 continue;
2609
2610 if (bitmap_bit_p (blocks, DF_REF_BBNO (ref)))
2611 {
2612 df_ref_reg_replace (df, ref, oldreg, newreg);
2613
2614 /* Replace occurrences of the reg within the REG_NOTES. */
2615 if ((! link->next || DF_REF_INSN (ref)
2616 != DF_REF_INSN (link->next->ref))
2617 && REG_NOTES (insn))
2618 {
2619 args.insn = insn;
2620 for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
2621 }
2622 }
2623 else
2624 {
2625 /* Temporary check to ensure that we have a grip on which
2626 regs should be replaced. */
2627 abort ();
2628 }
2629 }
2630 }
2631
2632
2633 /* Replace all occurrences of register OLDREG with register NEWREG in
2634 blocks defined by bitmap BLOCKS. This also replaces occurrences of
2635 OLDREG in the REG_NOTES but only for insns containing OLDREG. This
2636 routine expects the reg-use and reg-def chains to be valid. */
2637 int
2638 df_reg_replace (df, blocks, oldreg, newreg)
2639 struct df *df;
2640 bitmap blocks;
2641 rtx oldreg;
2642 rtx newreg;
2643 {
2644 unsigned int oldregno = REGNO (oldreg);
2645
2646 df_refs_reg_replace (df, blocks, df->regs[oldregno].defs, oldreg, newreg);
2647 df_refs_reg_replace (df, blocks, df->regs[oldregno].uses, oldreg, newreg);
2648 return 1;
2649 }
2650
2651
2652 /* Try replacing the reg within REF with NEWREG. Do not modify
2653 def-use/use-def chains. */
2654 int
2655 df_ref_reg_replace (df, ref, oldreg, newreg)
2656 struct df *df;
2657 struct ref *ref;
2658 rtx oldreg;
2659 rtx newreg;
2660 {
2661 /* Check that insn was deleted by being converted into a NOTE. If
2662 so ignore this insn. */
2663 if (! INSN_P (DF_REF_INSN (ref)))
2664 return 0;
2665
2666 if (oldreg && oldreg != DF_REF_REG (ref))
2667 abort ();
2668
2669 if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1))
2670 return 0;
2671
2672 df_insn_modify (df, DF_REF_BB (ref), DF_REF_INSN (ref));
2673 return 1;
2674 }
2675
2676
2677 struct ref*
2678 df_bb_def_use_swap (df, bb, def_insn, use_insn, regno)
2679 struct df * df;
2680 basic_block bb;
2681 rtx def_insn;
2682 rtx use_insn;
2683 unsigned int regno;
2684 {
2685 struct ref *def;
2686 struct ref *use;
2687 int def_uid;
2688 int use_uid;
2689 struct df_link *link;
2690
2691 def = df_bb_insn_regno_first_def_find (df, bb, def_insn, regno);
2692 if (! def)
2693 return 0;
2694
2695 use = df_bb_insn_regno_last_use_find (df, bb, use_insn, regno);
2696 if (! use)
2697 return 0;
2698
2699 /* The USE no longer exists. */
2700 use_uid = INSN_UID (use_insn);
2701 df_use_unlink (df, use);
2702 df_ref_unlink (&df->insns[use_uid].uses, use);
2703
2704 /* The DEF requires shifting so remove it from DEF_INSN
2705 and add it to USE_INSN by reusing LINK. */
2706 def_uid = INSN_UID (def_insn);
2707 link = df_ref_unlink (&df->insns[def_uid].defs, def);
2708 link->ref = def;
2709 link->next = df->insns[use_uid].defs;
2710 df->insns[use_uid].defs = link;
2711
2712 #if 0
2713 link = df_ref_unlink (&df->regs[regno].defs, def);
2714 link->ref = def;
2715 link->next = df->regs[regno].defs;
2716 df->insns[regno].defs = link;
2717 #endif
2718
2719 DF_REF_INSN (def) = use_insn;
2720 return def;
2721 }
2722
2723
2724 /* Record df between FIRST_INSN and LAST_INSN inclusive. All new
2725 insns must be processed by this routine. */
2726 static void
2727 df_insns_modify (df, bb, first_insn, last_insn)
2728 struct df *df;
2729 basic_block bb;
2730 rtx first_insn;
2731 rtx last_insn;
2732 {
2733 rtx insn;
2734
2735 for (insn = first_insn; ; insn = NEXT_INSN (insn))
2736 {
2737 unsigned int uid;
2738
2739 /* A non-const call should not have slipped through the net. If
2740 it does, we need to create a new basic block. Ouch. The
2741 same applies for a label. */
2742 if ((GET_CODE (insn) == CALL_INSN
2743 && ! CONST_OR_PURE_CALL_P (insn))
2744 || GET_CODE (insn) == CODE_LABEL)
2745 abort ();
2746
2747 uid = INSN_UID (insn);
2748
2749 if (uid >= df->insn_size)
2750 df_insn_table_realloc (df, 0);
2751
2752 df_insn_modify (df, bb, insn);
2753
2754 if (insn == last_insn)
2755 break;
2756 }
2757 }
2758
2759
2760 /* Emit PATTERN before INSN within BB. */
2761 rtx
2762 df_pattern_emit_before (df, pattern, bb, insn)
2763 struct df *df ATTRIBUTE_UNUSED;
2764 rtx pattern;
2765 basic_block bb;
2766 rtx insn;
2767 {
2768 rtx ret_insn;
2769 rtx prev_insn = PREV_INSN (insn);
2770
2771 /* We should not be inserting before the start of the block. */
2772 if (insn == bb->head)
2773 abort ();
2774 ret_insn = emit_insn_before (pattern, insn);
2775 if (ret_insn == insn)
2776 return ret_insn;
2777
2778 df_insns_modify (df, bb, NEXT_INSN (prev_insn), ret_insn);
2779 return ret_insn;
2780 }
2781
2782
2783 /* Emit PATTERN after INSN within BB. */
2784 rtx
2785 df_pattern_emit_after (df, pattern, bb, insn)
2786 struct df *df;
2787 rtx pattern;
2788 basic_block bb;
2789 rtx insn;
2790 {
2791 rtx ret_insn;
2792
2793 ret_insn = emit_insn_after (pattern, insn);
2794 if (ret_insn == insn)
2795 return ret_insn;
2796
2797 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2798 return ret_insn;
2799 }
2800
2801
2802 /* Emit jump PATTERN after INSN within BB. */
2803 rtx
2804 df_jump_pattern_emit_after (df, pattern, bb, insn)
2805 struct df *df;
2806 rtx pattern;
2807 basic_block bb;
2808 rtx insn;
2809 {
2810 rtx ret_insn;
2811
2812 ret_insn = emit_jump_insn_after (pattern, insn);
2813 if (ret_insn == insn)
2814 return ret_insn;
2815
2816 df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
2817 return ret_insn;
2818 }
2819
2820
2821 /* Move INSN within BB before BEFORE_INSN within BEFORE_BB.
2822
2823 This function should only be used to move loop invariant insns
2824 out of a loop where it has been proven that the def-use info
2825 will still be valid. */
2826 rtx
2827 df_insn_move_before (df, bb, insn, before_bb, before_insn)
2828 struct df *df;
2829 basic_block bb;
2830 rtx insn;
2831 basic_block before_bb;
2832 rtx before_insn;
2833 {
2834 struct df_link *link;
2835 unsigned int uid;
2836
2837 if (! bb)
2838 return df_pattern_emit_before (df, insn, before_bb, before_insn);
2839
2840 uid = INSN_UID (insn);
2841
2842 /* Change bb for all df defined and used by this insn. */
2843 for (link = df->insns[uid].defs; link; link = link->next)
2844 DF_REF_BB (link->ref) = before_bb;
2845 for (link = df->insns[uid].uses; link; link = link->next)
2846 DF_REF_BB (link->ref) = before_bb;
2847
2848 /* The lifetimes of the registers used in this insn will be reduced
2849 while the lifetimes of the registers defined in this insn
2850 are likely to be increased. */
2851
2852 /* ???? Perhaps all the insns moved should be stored on a list
2853 which df_analyse removes when it recalculates data flow. */
2854
2855 return emit_insn_before (insn, before_insn);
2856 }
2857 \f
2858 /* Functions to query dataflow information. */
2859
2860
2861 int
2862 df_insn_regno_def_p (df, bb, insn, regno)
2863 struct df *df;
2864 basic_block bb ATTRIBUTE_UNUSED;
2865 rtx insn;
2866 unsigned int regno;
2867 {
2868 unsigned int uid;
2869 struct df_link *link;
2870
2871 uid = INSN_UID (insn);
2872
2873 for (link = df->insns[uid].defs; link; link = link->next)
2874 {
2875 struct ref *def = link->ref;
2876
2877 if (DF_REF_REGNO (def) == regno)
2878 return 1;
2879 }
2880
2881 return 0;
2882 }
2883
2884
2885 static int
2886 df_def_dominates_all_uses_p (df, def)
2887 struct df *df ATTRIBUTE_UNUSED;
2888 struct ref *def;
2889 {
2890 struct df_link *du_link;
2891
2892 /* Follow def-use chain to find all the uses of this def. */
2893 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
2894 {
2895 struct ref *use = du_link->ref;
2896 struct df_link *ud_link;
2897
2898 /* Follow use-def chain to check all the defs for this use. */
2899 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
2900 if (ud_link->ref != def)
2901 return 0;
2902 }
2903 return 1;
2904 }
2905
2906
2907 int
2908 df_insn_dominates_all_uses_p (df, bb, insn)
2909 struct df *df;
2910 basic_block bb ATTRIBUTE_UNUSED;
2911 rtx insn;
2912 {
2913 unsigned int uid;
2914 struct df_link *link;
2915
2916 uid = INSN_UID (insn);
2917
2918 for (link = df->insns[uid].defs; link; link = link->next)
2919 {
2920 struct ref *def = link->ref;
2921
2922 if (! df_def_dominates_all_uses_p (df, def))
2923 return 0;
2924 }
2925
2926 return 1;
2927 }
2928
2929
2930 /* Return non-zero if all DF dominates all the uses within the bitmap
2931 BLOCKS. */
2932 static int
2933 df_def_dominates_uses_p (df, def, blocks)
2934 struct df *df ATTRIBUTE_UNUSED;
2935 struct ref *def;
2936 bitmap blocks;
2937 {
2938 struct df_link *du_link;
2939
2940 /* Follow def-use chain to find all the uses of this def. */
2941 for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
2942 {
2943 struct ref *use = du_link->ref;
2944 struct df_link *ud_link;
2945
2946 /* Only worry about the uses within BLOCKS. For example,
2947 consider a register defined within a loop that is live at the
2948 loop exits. */
2949 if (bitmap_bit_p (blocks, DF_REF_BBNO (use)))
2950 {
2951 /* Follow use-def chain to check all the defs for this use. */
2952 for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
2953 if (ud_link->ref != def)
2954 return 0;
2955 }
2956 }
2957 return 1;
2958 }
2959
2960
2961 /* Return non-zero if all the defs of INSN within BB dominates
2962 all the corresponding uses. */
2963 int
2964 df_insn_dominates_uses_p (df, bb, insn, blocks)
2965 struct df *df;
2966 basic_block bb ATTRIBUTE_UNUSED;
2967 rtx insn;
2968 bitmap blocks;
2969 {
2970 unsigned int uid;
2971 struct df_link *link;
2972
2973 uid = INSN_UID (insn);
2974
2975 for (link = df->insns[uid].defs; link; link = link->next)
2976 {
2977 struct ref *def = link->ref;
2978
2979 /* Only consider the defs within BLOCKS. */
2980 if (bitmap_bit_p (blocks, DF_REF_BBNO (def))
2981 && ! df_def_dominates_uses_p (df, def, blocks))
2982 return 0;
2983 }
2984 return 1;
2985 }
2986
2987
2988 /* Return the basic block that REG referenced in or NULL if referenced
2989 in multiple basic blocks. */
2990 basic_block
2991 df_regno_bb (df, regno)
2992 struct df *df;
2993 unsigned int regno;
2994 {
2995 struct df_link *defs = df->regs[regno].defs;
2996 struct df_link *uses = df->regs[regno].uses;
2997 struct ref *def = defs ? defs->ref : 0;
2998 struct ref *use = uses ? uses->ref : 0;
2999 basic_block bb_def = def ? DF_REF_BB (def) : 0;
3000 basic_block bb_use = use ? DF_REF_BB (use) : 0;
3001
3002 /* Compare blocks of first def and last use. ???? FIXME. What if
3003 the reg-def and reg-use lists are not correctly ordered. */
3004 return bb_def == bb_use ? bb_def : 0;
3005 }
3006
3007
3008 /* Return non-zero if REG used in multiple basic blocks. */
3009 int
3010 df_reg_global_p (df, reg)
3011 struct df *df;
3012 rtx reg;
3013 {
3014 return df_regno_bb (df, REGNO (reg)) != 0;
3015 }
3016
3017
3018 /* Return total lifetime (in insns) of REG. */
3019 int
3020 df_reg_lifetime (df, reg)
3021 struct df *df;
3022 rtx reg;
3023 {
3024 return df->regs[REGNO (reg)].lifetime;
3025 }
3026
3027
3028 /* Return non-zero if REG live at start of BB. */
3029 int
3030 df_bb_reg_live_start_p (df, bb, reg)
3031 struct df *df ATTRIBUTE_UNUSED;
3032 basic_block bb;
3033 rtx reg;
3034 {
3035 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3036
3037 #ifdef ENABLE_CHECKING
3038 if (! bb_info->lr_in)
3039 abort ();
3040 #endif
3041
3042 return bitmap_bit_p (bb_info->lr_in, REGNO (reg));
3043 }
3044
3045
3046 /* Return non-zero if REG live at end of BB. */
3047 int
3048 df_bb_reg_live_end_p (df, bb, reg)
3049 struct df *df ATTRIBUTE_UNUSED;
3050 basic_block bb;
3051 rtx reg;
3052 {
3053 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3054
3055 #ifdef ENABLE_CHECKING
3056 if (! bb_info->lr_in)
3057 abort ();
3058 #endif
3059
3060 return bitmap_bit_p (bb_info->lr_out, REGNO (reg));
3061 }
3062
3063
3064 /* Return -1 if life of REG1 before life of REG2, 1 if life of REG1
3065 after life of REG2, or 0, if the lives overlap. */
3066 int
3067 df_bb_regs_lives_compare (df, bb, reg1, reg2)
3068 struct df *df;
3069 basic_block bb;
3070 rtx reg1;
3071 rtx reg2;
3072 {
3073 unsigned int regno1 = REGNO (reg1);
3074 unsigned int regno2 = REGNO (reg2);
3075 struct ref *def1;
3076 struct ref *use1;
3077 struct ref *def2;
3078 struct ref *use2;
3079
3080
3081 /* The regs must be local to BB. */
3082 if (df_regno_bb (df, regno1) != bb
3083 || df_regno_bb (df, regno2) != bb)
3084 abort ();
3085
3086 def2 = df_bb_regno_first_def_find (df, bb, regno2);
3087 use1 = df_bb_regno_last_use_find (df, bb, regno1);
3088
3089 if (DF_INSN_LUID (df, DF_REF_INSN (def2))
3090 > DF_INSN_LUID (df, DF_REF_INSN (use1)))
3091 return -1;
3092
3093 def1 = df_bb_regno_first_def_find (df, bb, regno1);
3094 use2 = df_bb_regno_last_use_find (df, bb, regno2);
3095
3096 if (DF_INSN_LUID (df, DF_REF_INSN (def1))
3097 > DF_INSN_LUID (df, DF_REF_INSN (use2)))
3098 return 1;
3099
3100 return 0;
3101 }
3102
3103
3104 /* Return last use of REGNO within BB. */
3105 static struct ref *
3106 df_bb_regno_last_use_find (df, bb, regno)
3107 struct df * df;
3108 basic_block bb ATTRIBUTE_UNUSED;
3109 unsigned int regno;
3110 {
3111 struct df_link *link;
3112
3113 /* This assumes that the reg-use list is ordered such that for any
3114 BB, the last use is found first. However, since the BBs are not
3115 ordered, the first use in the chain is not necessarily the last
3116 use in the function. */
3117 for (link = df->regs[regno].uses; link; link = link->next)
3118 {
3119 struct ref *use = link->ref;
3120
3121 if (DF_REF_BB (use) == bb)
3122 return use;
3123 }
3124 return 0;
3125 }
3126
3127
3128 /* Return first def of REGNO within BB. */
3129 static struct ref *
3130 df_bb_regno_first_def_find (df, bb, regno)
3131 struct df * df;
3132 basic_block bb ATTRIBUTE_UNUSED;
3133 unsigned int regno;
3134 {
3135 struct df_link *link;
3136
3137 /* This assumes that the reg-def list is ordered such that for any
3138 BB, the first def is found first. However, since the BBs are not
3139 ordered, the first def in the chain is not necessarily the first
3140 def in the function. */
3141 for (link = df->regs[regno].defs; link; link = link->next)
3142 {
3143 struct ref *def = link->ref;
3144
3145 if (DF_REF_BB (def) == bb)
3146 return def;
3147 }
3148 return 0;
3149 }
3150
3151
3152 /* Return first use of REGNO inside INSN within BB. */
3153 static struct ref *
3154 df_bb_insn_regno_last_use_find (df, bb, insn, regno)
3155 struct df * df;
3156 basic_block bb ATTRIBUTE_UNUSED;
3157 rtx insn;
3158 unsigned int regno;
3159 {
3160 unsigned int uid;
3161 struct df_link *link;
3162
3163 uid = INSN_UID (insn);
3164
3165 for (link = df->insns[uid].uses; link; link = link->next)
3166 {
3167 struct ref *use = link->ref;
3168
3169 if (DF_REF_REGNO (use) == regno)
3170 return use;
3171 }
3172
3173 return 0;
3174 }
3175
3176
3177 /* Return first def of REGNO inside INSN within BB. */
3178 static struct ref *
3179 df_bb_insn_regno_first_def_find (df, bb, insn, regno)
3180 struct df * df;
3181 basic_block bb ATTRIBUTE_UNUSED;
3182 rtx insn;
3183 unsigned int regno;
3184 {
3185 unsigned int uid;
3186 struct df_link *link;
3187
3188 uid = INSN_UID (insn);
3189
3190 for (link = df->insns[uid].defs; link; link = link->next)
3191 {
3192 struct ref *def = link->ref;
3193
3194 if (DF_REF_REGNO (def) == regno)
3195 return def;
3196 }
3197
3198 return 0;
3199 }
3200
3201
3202 /* Return insn using REG if the BB contains only a single
3203 use and def of REG. */
3204 rtx
3205 df_bb_single_def_use_insn_find (df, bb, insn, reg)
3206 struct df * df;
3207 basic_block bb;
3208 rtx insn;
3209 rtx reg;
3210 {
3211 struct ref *def;
3212 struct ref *use;
3213 struct df_link *du_link;
3214
3215 def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg));
3216
3217 if (! def)
3218 abort ();
3219
3220 du_link = DF_REF_CHAIN (def);
3221
3222 if (! du_link)
3223 return NULL_RTX;
3224
3225 use = du_link->ref;
3226
3227 /* Check if def is dead. */
3228 if (! use)
3229 return NULL_RTX;
3230
3231 /* Check for multiple uses. */
3232 if (du_link->next)
3233 return NULL_RTX;
3234
3235 return DF_REF_INSN (use);
3236 }
3237 \f
3238 /* Functions for debugging/dumping dataflow information. */
3239
3240
3241 /* Dump a def-use or use-def chain for REF to FILE. */
3242 static void
3243 df_chain_dump (link, file)
3244 struct df_link *link;
3245 FILE *file;
3246 {
3247 fprintf (file, "{ ");
3248 for (; link; link = link->next)
3249 {
3250 fprintf (file, "%c%d ",
3251 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3252 DF_REF_ID (link->ref));
3253 }
3254 fprintf (file, "}");
3255 }
3256
3257 static void
3258 df_chain_dump_regno (link, file)
3259 struct df_link *link;
3260 FILE *file;
3261 {
3262 fprintf (file, "{ ");
3263 for (; link; link = link->next)
3264 {
3265 fprintf (file, "%c%d(%d) ",
3266 DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
3267 DF_REF_ID (link->ref),
3268 DF_REF_REGNO (link->ref));
3269 }
3270 fprintf (file, "}");
3271 }
3272
3273 /* Dump dataflow info. */
3274 void
3275 df_dump (df, flags, file)
3276 struct df *df;
3277 int flags;
3278 FILE *file;
3279 {
3280 unsigned int i;
3281 unsigned int j;
3282
3283 if (! df || ! file)
3284 return;
3285
3286 fprintf (file, "\nDataflow summary:\n");
3287 fprintf (file, "n_regs = %d, n_defs = %d, n_uses = %d, n_bbs = %d\n",
3288 df->n_regs, df->n_defs, df->n_uses, df->n_bbs);
3289
3290 if (flags & DF_RD)
3291 {
3292 fprintf (file, "Reaching defs:\n");
3293 for (i = 0; i < df->n_bbs; i++)
3294 {
3295 basic_block bb = BASIC_BLOCK (i);
3296 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3297
3298 if (! bb_info->rd_in)
3299 continue;
3300
3301 fprintf (file, "bb %d in \t", i);
3302 dump_bitmap (file, bb_info->rd_in);
3303 fprintf (file, "bb %d gen \t", i);
3304 dump_bitmap (file, bb_info->rd_gen);
3305 fprintf (file, "bb %d kill\t", i);
3306 dump_bitmap (file, bb_info->rd_kill);
3307 fprintf (file, "bb %d out \t", i);
3308 dump_bitmap (file, bb_info->rd_out);
3309 }
3310 }
3311
3312 if (flags & DF_UD_CHAIN)
3313 {
3314 fprintf (file, "Use-def chains:\n");
3315 for (j = 0; j < df->n_defs; j++)
3316 {
3317 if (df->defs[j])
3318 {
3319 fprintf (file, "d%d bb %d luid %d insn %d reg %d ",
3320 j, DF_REF_BBNO (df->defs[j]),
3321 DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])),
3322 DF_REF_INSN_UID (df->defs[j]),
3323 DF_REF_REGNO (df->defs[j]));
3324 if (df->defs[j]->flags & DF_REF_READ_WRITE)
3325 fprintf (file, "read/write ");
3326 df_chain_dump (DF_REF_CHAIN (df->defs[j]), file);
3327 fprintf (file, "\n");
3328 }
3329 }
3330 }
3331
3332 if (flags & DF_RU)
3333 {
3334 fprintf (file, "Reaching uses:\n");
3335 for (i = 0; i < df->n_bbs; i++)
3336 {
3337 basic_block bb = BASIC_BLOCK (i);
3338 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3339
3340 if (! bb_info->ru_in)
3341 continue;
3342
3343 fprintf (file, "bb %d in \t", i);
3344 dump_bitmap (file, bb_info->ru_in);
3345 fprintf (file, "bb %d gen \t", i);
3346 dump_bitmap (file, bb_info->ru_gen);
3347 fprintf (file, "bb %d kill\t", i);
3348 dump_bitmap (file, bb_info->ru_kill);
3349 fprintf (file, "bb %d out \t", i);
3350 dump_bitmap (file, bb_info->ru_out);
3351 }
3352 }
3353
3354 if (flags & DF_DU_CHAIN)
3355 {
3356 fprintf (file, "Def-use chains:\n");
3357 for (j = 0; j < df->n_uses; j++)
3358 {
3359 if (df->uses[j])
3360 {
3361 fprintf (file, "u%d bb %d luid %d insn %d reg %d ",
3362 j, DF_REF_BBNO (df->uses[j]),
3363 DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])),
3364 DF_REF_INSN_UID (df->uses[j]),
3365 DF_REF_REGNO (df->uses[j]));
3366 if (df->uses[j]->flags & DF_REF_READ_WRITE)
3367 fprintf (file, "read/write ");
3368 df_chain_dump (DF_REF_CHAIN (df->uses[j]), file);
3369 fprintf (file, "\n");
3370 }
3371 }
3372 }
3373
3374 if (flags & DF_LR)
3375 {
3376 fprintf (file, "Live regs:\n");
3377 for (i = 0; i < df->n_bbs; i++)
3378 {
3379 basic_block bb = BASIC_BLOCK (i);
3380 struct bb_info *bb_info = DF_BB_INFO (df, bb);
3381
3382 if (! bb_info->lr_in)
3383 continue;
3384
3385 fprintf (file, "bb %d in \t", i);
3386 dump_bitmap (file, bb_info->lr_in);
3387 fprintf (file, "bb %d use \t", i);
3388 dump_bitmap (file, bb_info->lr_use);
3389 fprintf (file, "bb %d def \t", i);
3390 dump_bitmap (file, bb_info->lr_def);
3391 fprintf (file, "bb %d out \t", i);
3392 dump_bitmap (file, bb_info->lr_out);
3393 }
3394 }
3395
3396 if (flags & (DF_REG_INFO | DF_RD_CHAIN | DF_RU_CHAIN))
3397 {
3398 struct reg_info *reg_info = df->regs;
3399
3400 fprintf (file, "Register info:\n");
3401 for (j = 0; j < df->n_regs; j++)
3402 {
3403 if (((flags & DF_REG_INFO)
3404 && (reg_info[j].n_uses || reg_info[j].n_defs))
3405 || ((flags & DF_RD_CHAIN) && reg_info[j].defs)
3406 || ((flags & DF_RU_CHAIN) && reg_info[j].uses))
3407 {
3408 fprintf (file, "reg %d", j);
3409 if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
3410 {
3411 basic_block bb = df_regno_bb (df, j);
3412
3413 if (bb)
3414 fprintf (file, " bb %d", bb->index);
3415 else
3416 fprintf (file, " bb ?");
3417 }
3418 if (flags & DF_REG_INFO)
3419 {
3420 fprintf (file, " life %d", reg_info[j].lifetime);
3421 }
3422
3423 if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
3424 {
3425 fprintf (file, " defs ");
3426 if (flags & DF_REG_INFO)
3427 fprintf (file, "%d ", reg_info[j].n_defs);
3428 if (flags & DF_RD_CHAIN)
3429 df_chain_dump (reg_info[j].defs, file);
3430 }
3431
3432 if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
3433 {
3434 fprintf (file, " uses ");
3435 if (flags & DF_REG_INFO)
3436 fprintf (file, "%d ", reg_info[j].n_uses);
3437 if (flags & DF_RU_CHAIN)
3438 df_chain_dump (reg_info[j].uses, file);
3439 }
3440
3441 fprintf (file, "\n");
3442 }
3443 }
3444 }
3445 fprintf (file, "\n");
3446 }
3447
3448
3449 void
3450 df_insn_debug (df, insn, file)
3451 struct df *df;
3452 rtx insn;
3453 FILE *file;
3454 {
3455 unsigned int uid;
3456 int bbi;
3457
3458 uid = INSN_UID (insn);
3459 if (uid >= df->insn_size)
3460 return;
3461
3462 if (df->insns[uid].defs)
3463 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3464 else if (df->insns[uid].uses)
3465 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3466 else
3467 bbi = -1;
3468
3469 fprintf (file, "insn %d bb %d luid %d defs ",
3470 uid, bbi, DF_INSN_LUID (df, insn));
3471 df_chain_dump (df->insns[uid].defs, file);
3472 fprintf (file, " uses ");
3473 df_chain_dump (df->insns[uid].uses, file);
3474 fprintf (file, "\n");
3475 }
3476
3477 void
3478 df_insn_debug_regno (df, insn, file)
3479 struct df *df;
3480 rtx insn;
3481 FILE *file;
3482 {
3483 unsigned int uid;
3484 int bbi;
3485
3486 uid = INSN_UID (insn);
3487 if (uid >= df->insn_size)
3488 return;
3489
3490 if (df->insns[uid].defs)
3491 bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
3492 else if (df->insns[uid].uses)
3493 bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
3494 else
3495 bbi = -1;
3496
3497 fprintf (file, "insn %d bb %d luid %d defs ",
3498 uid, bbi, DF_INSN_LUID (df, insn));
3499 df_chain_dump_regno (df->insns[uid].defs, file);
3500 fprintf (file, " uses ");
3501 df_chain_dump_regno (df->insns[uid].uses, file);
3502 fprintf (file, "\n");
3503 }
3504
3505 static void
3506 df_regno_debug (df, regno, file)
3507 struct df *df;
3508 unsigned int regno;
3509 FILE *file;
3510 {
3511 if (regno >= df->reg_size)
3512 return;
3513
3514 fprintf (file, "reg %d life %d defs ",
3515 regno, df->regs[regno].lifetime);
3516 df_chain_dump (df->regs[regno].defs, file);
3517 fprintf (file, " uses ");
3518 df_chain_dump (df->regs[regno].uses, file);
3519 fprintf (file, "\n");
3520 }
3521
3522
3523 static void
3524 df_ref_debug (df, ref, file)
3525 struct df *df;
3526 struct ref *ref;
3527 FILE *file;
3528 {
3529 fprintf (file, "%c%d ",
3530 DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
3531 DF_REF_ID (ref));
3532 fprintf (file, "reg %d bb %d luid %d insn %d chain ",
3533 DF_REF_REGNO (ref),
3534 DF_REF_BBNO (ref),
3535 DF_INSN_LUID (df, DF_REF_INSN (ref)),
3536 INSN_UID (DF_REF_INSN (ref)));
3537 df_chain_dump (DF_REF_CHAIN (ref), file);
3538 fprintf (file, "\n");
3539 }
3540
3541
3542 void
3543 debug_df_insn (insn)
3544 rtx insn;
3545 {
3546 df_insn_debug (ddf, insn, stderr);
3547 debug_rtx (insn);
3548 }
3549
3550
3551 void
3552 debug_df_reg (reg)
3553 rtx reg;
3554 {
3555 df_regno_debug (ddf, REGNO (reg), stderr);
3556 }
3557
3558
3559 void
3560 debug_df_regno (regno)
3561 unsigned int regno;
3562 {
3563 df_regno_debug (ddf, regno, stderr);
3564 }
3565
3566
3567 void
3568 debug_df_ref (ref)
3569 struct ref *ref;
3570 {
3571 df_ref_debug (ddf, ref, stderr);
3572 }
3573
3574
3575 void
3576 debug_df_defno (defno)
3577 unsigned int defno;
3578 {
3579 df_ref_debug (ddf, ddf->defs[defno], stderr);
3580 }
3581
3582
3583 void
3584 debug_df_useno (defno)
3585 unsigned int defno;
3586 {
3587 df_ref_debug (ddf, ddf->uses[defno], stderr);
3588 }
3589
3590
3591 void
3592 debug_df_chain (link)
3593 struct df_link *link;
3594 {
3595 df_chain_dump (link, stderr);
3596 fputc ('\n', stderr);
3597 }
3598
3599 /* Hybrid search algorithm from "Implementation Techniques for
3600 Efficient Data-Flow Analysis of Large Programs". */
3601 static void
3602 hybrid_search_bitmap (block, in, out, gen, kill, dir,
3603 conf_op, transfun, visited, pending,
3604 data)
3605 basic_block block;
3606 bitmap *in, *out, *gen, *kill;
3607 enum df_flow_dir dir;
3608 enum df_confluence_op conf_op;
3609 transfer_function_bitmap transfun;
3610 sbitmap visited;
3611 sbitmap pending;
3612 void *data;
3613 {
3614 int changed;
3615 int i = block->index;
3616 edge e;
3617 basic_block bb= block;
3618 SET_BIT (visited, block->index);
3619 if (TEST_BIT (pending, block->index))
3620 {
3621 if (dir == FORWARD)
3622 {
3623 /* Calculate <conf_op> of predecessor_outs */
3624 bitmap_zero (in[i]);
3625 for (e = bb->pred; e != 0; e = e->pred_next)
3626 {
3627 if (e->src == ENTRY_BLOCK_PTR)
3628 continue;
3629 switch (conf_op)
3630 {
3631 case UNION:
3632 bitmap_a_or_b (in[i], in[i], out[e->src->index]);
3633 break;
3634 case INTERSECTION:
3635 bitmap_a_and_b (in[i], in[i], out[e->src->index]);
3636 break;
3637 }
3638 }
3639 }
3640 else
3641 {
3642 /* Calculate <conf_op> of successor ins */
3643 bitmap_zero(out[i]);
3644 for (e = bb->succ; e != 0; e = e->succ_next)
3645 {
3646 if (e->dest == EXIT_BLOCK_PTR)
3647 continue;
3648 switch (conf_op)
3649 {
3650 case UNION:
3651 bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
3652 break;
3653 case INTERSECTION:
3654 bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
3655 break;
3656 }
3657 }
3658 }
3659 /* Common part */
3660 (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
3661 RESET_BIT (pending, i);
3662 if (changed)
3663 {
3664 if (dir == FORWARD)
3665 {
3666 for (e = bb->succ; e != 0; e = e->succ_next)
3667 {
3668 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3669 continue;
3670 SET_BIT (pending, e->dest->index);
3671 }
3672 }
3673 else
3674 {
3675 for (e = bb->pred; e != 0; e = e->pred_next)
3676 {
3677 if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
3678 continue;
3679 SET_BIT (pending, e->src->index);
3680 }
3681 }
3682 }
3683 }
3684 if (dir == FORWARD)
3685 {
3686 for (e = bb->succ; e != 0; e = e->succ_next)
3687 {
3688 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3689 continue;
3690 if (!TEST_BIT (visited, e->dest->index))
3691 hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
3692 conf_op, transfun, visited, pending,
3693 data);
3694 }
3695 }
3696 else
3697 {
3698 for (e = bb->pred; e != 0; e = e->pred_next)
3699 {
3700 if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
3701 continue;
3702 if (!TEST_BIT (visited, e->src->index))
3703 hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
3704 conf_op, transfun, visited, pending,
3705 data);
3706 }
3707 }
3708 }
3709
3710
3711 /* Hybrid search for sbitmaps, rather than bitmaps. */
3712 static void
3713 hybrid_search_sbitmap (block, in, out, gen, kill, dir,
3714 conf_op, transfun, visited, pending,
3715 data)
3716 basic_block block;
3717 sbitmap *in, *out, *gen, *kill;
3718 enum df_flow_dir dir;
3719 enum df_confluence_op conf_op;
3720 transfer_function_sbitmap transfun;
3721 sbitmap visited;
3722 sbitmap pending;
3723 void *data;
3724 {
3725 int changed;
3726 int i = block->index;
3727 edge e;
3728 basic_block bb= block;
3729 SET_BIT (visited, block->index);
3730 if (TEST_BIT (pending, block->index))
3731 {
3732 if (dir == FORWARD)
3733 {
3734 /* Calculate <conf_op> of predecessor_outs */
3735 sbitmap_zero (in[i]);
3736 for (e = bb->pred; e != 0; e = e->pred_next)
3737 {
3738 if (e->src == ENTRY_BLOCK_PTR)
3739 continue;
3740 switch (conf_op)
3741 {
3742 case UNION:
3743 sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
3744 break;
3745 case INTERSECTION:
3746 sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
3747 break;
3748 }
3749 }
3750 }
3751 else
3752 {
3753 /* Calculate <conf_op> of successor ins */
3754 sbitmap_zero(out[i]);
3755 for (e = bb->succ; e != 0; e = e->succ_next)
3756 {
3757 if (e->dest == EXIT_BLOCK_PTR)
3758 continue;
3759 switch (conf_op)
3760 {
3761 case UNION:
3762 sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
3763 break;
3764 case INTERSECTION:
3765 sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
3766 break;
3767 }
3768 }
3769 }
3770 /* Common part */
3771 (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
3772 RESET_BIT (pending, i);
3773 if (changed)
3774 {
3775 if (dir == FORWARD)
3776 {
3777 for (e = bb->succ; e != 0; e = e->succ_next)
3778 {
3779 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3780 continue;
3781 SET_BIT (pending, e->dest->index);
3782 }
3783 }
3784 else
3785 {
3786 for (e = bb->pred; e != 0; e = e->pred_next)
3787 {
3788 if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
3789 continue;
3790 SET_BIT (pending, e->src->index);
3791 }
3792 }
3793 }
3794 }
3795 if (dir == FORWARD)
3796 {
3797 for (e = bb->succ; e != 0; e = e->succ_next)
3798 {
3799 if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
3800 continue;
3801 if (!TEST_BIT (visited, e->dest->index))
3802 hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
3803 conf_op, transfun, visited, pending,
3804 data);
3805 }
3806 }
3807 else
3808 {
3809 for (e = bb->pred; e != 0; e = e->pred_next)
3810 {
3811 if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
3812 continue;
3813 if (!TEST_BIT (visited, e->src->index))
3814 hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
3815 conf_op, transfun, visited, pending,
3816 data);
3817 }
3818 }
3819 }
3820
3821
3822
3823
3824 /* gen = GEN set.
3825 kill = KILL set.
3826 in, out = Filled in by function.
3827 blocks = Blocks to analyze.
3828 dir = Dataflow direction.
3829 conf_op = Confluence operation.
3830 transfun = Transfer function.
3831 order = Order to iterate in. (Should map block numbers -> order)
3832 data = Whatever you want. It's passed to the transfer function.
3833
3834 This function will perform iterative bitvector dataflow, producing
3835 the in and out sets. Even if you only want to perform it for a
3836 small number of blocks, the vectors for in and out must be large
3837 enough for *all* blocks, because changing one block might affect
3838 others. However, it'll only put what you say to analyze on the
3839 initial worklist.
3840
3841 For forward problems, you probably want to pass in a mapping of
3842 block number to rc_order (like df->inverse_rc_map).
3843 */
3844 void
3845 iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
3846 dir, conf_op, transfun, order, data)
3847 sbitmap *in, *out, *gen, *kill;
3848 bitmap blocks;
3849 enum df_flow_dir dir;
3850 enum df_confluence_op conf_op;
3851 transfer_function_sbitmap transfun;
3852 int *order;
3853 void *data;
3854 {
3855 int i;
3856 fibheap_t worklist;
3857 basic_block bb;
3858 sbitmap visited, pending;
3859 pending = sbitmap_alloc (n_basic_blocks);
3860 visited = sbitmap_alloc (n_basic_blocks);
3861 sbitmap_zero (pending);
3862 sbitmap_zero (visited);
3863 worklist = fibheap_new ();
3864 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3865 {
3866 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3867 SET_BIT (pending, i);
3868 if (dir == FORWARD)
3869 sbitmap_copy (out[i], gen[i]);
3870 else
3871 sbitmap_copy (in[i], gen[i]);
3872 });
3873 while (sbitmap_first_set_bit (pending) != -1)
3874 {
3875 while (!fibheap_empty (worklist))
3876 {
3877 i = (size_t) fibheap_extract_min (worklist);
3878 bb = BASIC_BLOCK (i);
3879 if (!TEST_BIT (visited, bb->index))
3880 hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
3881 conf_op, transfun, visited, pending, data);
3882 }
3883 if (sbitmap_first_set_bit (pending) != -1)
3884 {
3885 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3886 {
3887 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3888 });
3889 sbitmap_zero (visited);
3890 }
3891 else
3892 {
3893 break;
3894 }
3895 }
3896 sbitmap_free (pending);
3897 sbitmap_free (visited);
3898 fibheap_delete (worklist);
3899 }
3900
3901 /* Exactly the same as iterative_dataflow_sbitmap, except it works on
3902 bitmaps instead */
3903 void
3904 iterative_dataflow_bitmap (in, out, gen, kill, blocks,
3905 dir, conf_op, transfun, order, data)
3906 bitmap *in, *out, *gen, *kill;
3907 bitmap blocks;
3908 enum df_flow_dir dir;
3909 enum df_confluence_op conf_op;
3910 transfer_function_bitmap transfun;
3911 int *order;
3912 void *data;
3913 {
3914 int i;
3915 fibheap_t worklist;
3916 basic_block bb;
3917 sbitmap visited, pending;
3918 pending = sbitmap_alloc (n_basic_blocks);
3919 visited = sbitmap_alloc (n_basic_blocks);
3920 sbitmap_zero (pending);
3921 sbitmap_zero (visited);
3922 worklist = fibheap_new ();
3923 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3924 {
3925 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3926 SET_BIT (pending, i);
3927 if (dir == FORWARD)
3928 bitmap_copy (out[i], gen[i]);
3929 else
3930 bitmap_copy (in[i], gen[i]);
3931 });
3932 while (sbitmap_first_set_bit (pending) != -1)
3933 {
3934 while (!fibheap_empty (worklist))
3935 {
3936 i = (size_t) fibheap_extract_min (worklist);
3937 bb = BASIC_BLOCK (i);
3938 if (!TEST_BIT (visited, bb->index))
3939 hybrid_search_bitmap (bb, in, out, gen, kill, dir,
3940 conf_op, transfun, visited, pending, data);
3941 }
3942 if (sbitmap_first_set_bit (pending) != -1)
3943 {
3944 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
3945 {
3946 fibheap_insert (worklist, order[i], (void *) (size_t) i);
3947 });
3948 sbitmap_zero (visited);
3949 }
3950 else
3951 {
3952 break;
3953 }
3954 }
3955 sbitmap_free (pending);
3956 sbitmap_free (visited);
3957 fibheap_delete (worklist);
3958 }
This page took 0.210448 seconds and 5 git commands to generate.