]> gcc.gnu.org Git - gcc.git/blob - gcc/dse.c
df-scan.c (df_notes_rescan): Do nothing if the instruction does not yet have a basic...
[gcc.git] / gcc / dse.c
1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #undef BASELINE
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "tm.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "df.h"
36 #include "cselib.h"
37 #include "timevar.h"
38 #include "tree-pass.h"
39 #include "alloc-pool.h"
40 #include "alias.h"
41 #include "insn-config.h"
42 #include "expr.h"
43 #include "recog.h"
44 #include "dse.h"
45 #include "optabs.h"
46 #include "dbgcnt.h"
47
48 /* This file contains three techniques for performing Dead Store
49 Elimination (dse).
50
51 * The first technique performs dse locally on any base address. It
52 is based on the cselib which is a local value numbering technique.
53 This technique is local to a basic block but deals with a fairly
54 general addresses.
55
56 * The second technique performs dse globally but is restricted to
57 base addresses that are either constant or are relative to the
58 frame_pointer.
59
60 * The third technique, (which is only done after register allocation)
61 processes the spill spill slots. This differs from the second
62 technique because it takes advantage of the fact that spilling is
63 completely free from the effects of aliasing.
64
65 Logically, dse is a backwards dataflow problem. A store can be
66 deleted if it if cannot be reached in the backward direction by any
67 use of the value being stored. However, the local technique uses a
68 forwards scan of the basic block because cselib requires that the
69 block be processed in that order.
70
71 The pass is logically broken into 7 steps:
72
73 0) Initialization.
74
75 1) The local algorithm, as well as scanning the insns for the two
76 global algorithms.
77
78 2) Analysis to see if the global algs are necessary. In the case
79 of stores base on a constant address, there must be at least two
80 stores to that address, to make it possible to delete some of the
81 stores. In the case of stores off of the frame or spill related
82 stores, only one store to an address is necessary because those
83 stores die at the end of the function.
84
85 3) Set up the global dataflow equations based on processing the
86 info parsed in the first step.
87
88 4) Solve the dataflow equations.
89
90 5) Delete the insns that the global analysis has indicated are
91 unnecessary.
92
93 6) Cleanup.
94
95 This step uses cselib and canon_rtx to build the largest expression
96 possible for each address. This pass is a forwards pass through
97 each basic block. From the point of view of the global technique,
98 the first pass could examine a block in either direction. The
99 forwards ordering is to accommodate cselib.
100
101 We a simplifying assumption: addresses fall into four broad
102 categories:
103
104 1) base has rtx_varies_p == false, offset is constant.
105 2) base has rtx_varies_p == false, offset variable.
106 3) base has rtx_varies_p == true, offset constant.
107 4) base has rtx_varies_p == true, offset variable.
108
109 The local passes are able to process all 4 kinds of addresses. The
110 global pass only handles (1).
111
112 The global problem is formulated as follows:
113
114 A store, S1, to address A, where A is not relative to the stack
115 frame, can be eliminated if all paths from S1 to the end of the
116 of the function contain another store to A before a read to A.
117
118 If the address A is relative to the stack frame, a store S2 to A
119 can be eliminated if there are no paths from S1 that reach the
120 end of the function that read A before another store to A. In
121 this case S2 can be deleted if there are paths to from S2 to the
122 end of the function that have no reads or writes to A. This
123 second case allows stores to the stack frame to be deleted that
124 would otherwise die when the function returns. This cannot be
125 done if stores_off_frame_dead_at_return is not true. See the doc
126 for that variable for when this variable is false.
127
128 The global problem is formulated as a backwards set union
129 dataflow problem where the stores are the gens and reads are the
130 kills. Set union problems are rare and require some special
131 handling given our representation of bitmaps. A straightforward
132 implementation of requires a lot of bitmaps filled with 1s.
133 These are expensive and cumbersome in our bitmap formulation so
134 care has been taken to avoid large vectors filled with 1s. See
135 the comments in bb_info and in the dataflow confluence functions
136 for details.
137
138 There are two places for further enhancements to this algorithm:
139
140 1) The original dse which was embedded in a pass called flow also
141 did local address forwarding. For example in
142
143 A <- r100
144 ... <- A
145
146 flow would replace the right hand side of the second insn with a
147 reference to r100. Most of the information is available to add this
148 to this pass. It has not done it because it is a lot of work in
149 the case that either r100 is assigned to between the first and
150 second insn and/or the second insn is a load of part of the value
151 stored by the first insn.
152
153 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
154 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
155 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
156 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
157
158 2) The cleaning up of spill code is quite profitable. It currently
159 depends on reading tea leaves and chicken entrails left by reload.
160 This pass depends on reload creating a singleton alias set for each
161 spill slot and telling the next dse pass which of these alias sets
162 are the singletons. Rather than analyze the addresses of the
163 spills, dse's spill processing just does analysis of the loads and
164 stores that use those alias sets. There are three cases where this
165 falls short:
166
167 a) Reload sometimes creates the slot for one mode of access, and
168 then inserts loads and/or stores for a smaller mode. In this
169 case, the current code just punts on the slot. The proper thing
170 to do is to back out and use one bit vector position for each
171 byte of the entity associated with the slot. This depends on
172 KNOWING that reload always generates the accesses for each of the
173 bytes in some canonical (read that easy to understand several
174 passes after reload happens) way.
175
176 b) Reload sometimes decides that spill slot it allocated was not
177 large enough for the mode and goes back and allocates more slots
178 with the same mode and alias set. The backout in this case is a
179 little more graceful than (a). In this case the slot is unmarked
180 as being a spill slot and if final address comes out to be based
181 off the frame pointer, the global algorithm handles this slot.
182
183 c) For any pass that may prespill, there is currently no
184 mechanism to tell the dse pass that the slot being used has the
185 special properties that reload uses. It may be that all that is
186 required is to have those passes make the same calls that reload
187 does, assuming that the alias sets can be manipulated in the same
188 way. */
189
190 /* There are limits to the size of constant offsets we model for the
191 global problem. There are certainly test cases, that exceed this
192 limit, however, it is unlikely that there are important programs
193 that really have constant offsets this size. */
194 #define MAX_OFFSET (64 * 1024)
195
196
197 static bitmap scratch = NULL;
198 struct insn_info;
199
200 /* This structure holds information about a candidate store. */
201 struct store_info
202 {
203
204 /* False means this is a clobber. */
205 bool is_set;
206
207 /* The id of the mem group of the base address. If rtx_varies_p is
208 true, this is -1. Otherwise, it is the index into the group
209 table. */
210 int group_id;
211
212 /* This is the cselib value. */
213 cselib_val *cse_base;
214
215 /* This canonized mem. */
216 rtx mem;
217
218 /* The result of get_addr on mem. */
219 rtx mem_addr;
220
221 /* If this is non-zero, it is the alias set of a spill location. */
222 alias_set_type alias_set;
223
224 /* The offset of the first and byte before the last byte associated
225 with the operation. */
226 int begin, end;
227
228 /* An bitmask as wide as the number of bytes in the word that
229 contains a 1 if the byte may be needed. The store is unused if
230 all of the bits are 0. */
231 long positions_needed;
232
233 /* The next store info for this insn. */
234 struct store_info *next;
235
236 /* The right hand side of the store. This is used if there is a
237 subsequent reload of the mems address somewhere later in the
238 basic block. */
239 rtx rhs;
240 };
241
242 typedef struct store_info *store_info_t;
243 static alloc_pool cse_store_info_pool;
244 static alloc_pool rtx_store_info_pool;
245
246 /* This structure holds information about a load. These are only
247 built for rtx bases. */
248 struct read_info
249 {
250 /* The id of the mem group of the base address. */
251 int group_id;
252
253 /* If this is non-zero, it is the alias set of a spill location. */
254 alias_set_type alias_set;
255
256 /* The offset of the first and byte after the last byte associated
257 with the operation. If begin == end == 0, the read did not have
258 a constant offset. */
259 int begin, end;
260
261 /* The mem being read. */
262 rtx mem;
263
264 /* The next read_info for this insn. */
265 struct read_info *next;
266 };
267 typedef struct read_info *read_info_t;
268 static alloc_pool read_info_pool;
269
270
271 /* One of these records is created for each insn. */
272
273 struct insn_info
274 {
275 /* Set true if the insn contains a store but the insn itself cannot
276 be deleted. This is set if the insn is a parallel and there is
277 more than one non dead output or if the insn is in some way
278 volatile. */
279 bool cannot_delete;
280
281 /* This field is only used by the global algorithm. It is set true
282 if the insn contains any read of mem except for a (1). This is
283 also set if the insn is a call or has a clobber mem. If the insn
284 contains a wild read, the use_rec will be null. */
285 bool wild_read;
286
287 /* This field is set for const function calls. Const functions
288 cannot read memory, but they can read the stack because that is
289 where they may get their parms. So having this set is less
290 severe than a wild read, it just means that all of the stores to
291 the stack are killed rather than all stores. */
292 bool stack_read;
293
294 /* This is true if any of the sets within the store contains a
295 cselib base. Such stores can only be deleted by the local
296 algorithm. */
297 bool contains_cselib_groups;
298
299 /* The insn. */
300 rtx insn;
301
302 /* The list of mem sets or mem clobbers that are contained in this
303 insn. If the insn is deletable, it contains only one mem set.
304 But it could also contain clobbers. Insns that contain more than
305 one mem set are not deletable, but each of those mems are here in
306 order to provide info to delete other insns. */
307 store_info_t store_rec;
308
309 /* The linked list of mem uses in this insn. Only the reads from
310 rtx bases are listed here. The reads to cselib bases are
311 completely processed during the first scan and so are never
312 created. */
313 read_info_t read_rec;
314
315 /* The prev insn in the basic block. */
316 struct insn_info * prev_insn;
317
318 /* The linked list of insns that are in consideration for removal in
319 the forwards pass thru the basic block. This pointer may be
320 trash as it is not cleared when a wild read occurs. The only
321 time it is guaranteed to be correct is when the traveral starts
322 at active_local_stores. */
323 struct insn_info * next_local_store;
324 };
325
326 typedef struct insn_info *insn_info_t;
327 static alloc_pool insn_info_pool;
328
329 /* The linked list of stores that are under consideration in this
330 basic block. */
331 static insn_info_t active_local_stores;
332
333 struct bb_info
334 {
335
336 /* Pointer to the insn info for the last insn in the block. These
337 are linked so this is how all of the insns are reached. During
338 scanning this is the current insn being scanned. */
339 insn_info_t last_insn;
340
341 /* The info for the global dataflow problem. */
342
343
344 /* This is set if the transfer function should and in the wild_read
345 bitmap before applying the kill and gen sets. That vector knocks
346 out most of the bits in the bitmap and thus speeds up the
347 operations. */
348 bool apply_wild_read;
349
350 /* The set of store positions that exist in this block before a wild read. */
351 bitmap gen;
352
353 /* The set of load positions that exist in this block above the
354 same position of a store. */
355 bitmap kill;
356
357 /* The set of stores that reach the top of the block without being
358 killed by a read.
359
360 Do not represent the in if it is all ones. Note that this is
361 what the bitvector should logically be initialized to for a set
362 intersection problem. However, like the kill set, this is too
363 expensive. So initially, the in set will only be created for the
364 exit block and any block that contains a wild read. */
365 bitmap in;
366
367 /* The set of stores that reach the bottom of the block from it's
368 successors.
369
370 Do not represent the in if it is all ones. Note that this is
371 what the bitvector should logically be initialized to for a set
372 intersection problem. However, like the kill and in set, this is
373 too expensive. So what is done is that the confluence operator
374 just initializes the vector from one of the out sets of the
375 successors of the block. */
376 bitmap out;
377 };
378
379 typedef struct bb_info *bb_info_t;
380 static alloc_pool bb_info_pool;
381
382 /* Table to hold all bb_infos. */
383 static bb_info_t *bb_table;
384
385 /* There is a group_info for each rtx base that is used to reference
386 memory. There are also not many of the rtx bases because they are
387 very limited in scope. */
388
389 struct group_info
390 {
391 /* The actual base of the address. */
392 rtx rtx_base;
393
394 /* The sequential id of the base. This allows us to have a
395 canonical ordering of these that is not based on addresses. */
396 int id;
397
398 /* A mem wrapped around the base pointer for the group in order to
399 do read dependency. */
400 rtx base_mem;
401
402 /* Canonized version of base_mem, most likely the same thing. */
403 rtx canon_base_mem;
404
405 /* These two sets of two bitmaps are used to keep track of how many
406 stores are actually referencing that position from this base. We
407 only do this for rtx bases as this will be used to assign
408 positions in the bitmaps for the global problem. Bit N is set in
409 store1 on the first store for offset N. Bit N is set in store2
410 for the second store to offset N. This is all we need since we
411 only care about offsets that have two or more stores for them.
412
413 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
414 for 0 and greater offsets.
415
416 There is one special case here, for stores into the stack frame,
417 we will or store1 into store2 before deciding which stores look
418 at globally. This is because stores to the stack frame that have
419 no other reads before the end of the function can also be
420 deleted. */
421 bitmap store1_n, store1_p, store2_n, store2_p;
422
423 /* The positions in this bitmap have the same assignments as the in,
424 out, gen and kill bitmaps. This bitmap is all zeros except for
425 the positions that are occupied by stores for this group. */
426 bitmap group_kill;
427
428 /* True if there are any positions that are to be processed
429 globally. */
430 bool process_globally;
431
432 /* True if the base of this group is either the frame_pointer or
433 hard_frame_pointer. */
434 bool frame_related;
435
436 /* The offset_map is used to map the offsets from this base into
437 positions in the global bitmaps. It is only created after all of
438 the all of stores have been scanned and we know which ones we
439 care about. */
440 int *offset_map_n, *offset_map_p;
441 int offset_map_size_n, offset_map_size_p;
442 };
443 typedef struct group_info *group_info_t;
444 typedef const struct group_info *const_group_info_t;
445 static alloc_pool rtx_group_info_pool;
446
447 /* Tables of group_info structures, hashed by base value. */
448 static htab_t rtx_group_table;
449
450 /* Index into the rtx_group_vec. */
451 static int rtx_group_next_id;
452
453 DEF_VEC_P(group_info_t);
454 DEF_VEC_ALLOC_P(group_info_t,heap);
455
456 static VEC(group_info_t,heap) *rtx_group_vec;
457
458
459 /* This structure holds the set of changes that are being deferred
460 when removing read operation. See replace_read. */
461 struct deferred_change
462 {
463
464 /* The mem that is being replaced. */
465 rtx *loc;
466
467 /* The reg it is being replaced with. */
468 rtx reg;
469
470 struct deferred_change *next;
471 };
472
473 typedef struct deferred_change *deferred_change_t;
474 static alloc_pool deferred_change_pool;
475
476 static deferred_change_t deferred_change_list = NULL;
477
478 /* This are used to hold the alias sets of spill variables. Since
479 these are never aliased and there may be a lot of them, it makes
480 sense to treat them specially. This bitvector is only allocated in
481 calls from dse_record_singleton_alias_set which currently is only
482 made during reload1. So when dse is called before reload this
483 mechanism does nothing. */
484
485 static bitmap clear_alias_sets = NULL;
486
487 /* The set of clear_alias_sets that have been disqualified because
488 there are loads or stores using a different mode than the alias set
489 was registered with. */
490 static bitmap disqualified_clear_alias_sets = NULL;
491
492 /* The group that holds all of the clear_alias_sets. */
493 static group_info_t clear_alias_group;
494
495 /* The modes of the clear_alias_sets. */
496 static htab_t clear_alias_mode_table;
497
498 /* Hash table element to look up the mode for an alias set. */
499 struct clear_alias_mode_holder
500 {
501 alias_set_type alias_set;
502 enum machine_mode mode;
503 };
504
505 static alloc_pool clear_alias_mode_pool;
506
507 /* This is true except for two cases:
508 (1) current_function_stdarg -- i.e. we cannot do this
509 for vararg functions because they play games with the frame.
510 (2) In ada, it is sometimes not safe to do assume that any stores
511 based off the stack frame go dead at the exit to a function. */
512 static bool stores_off_frame_dead_at_return;
513
514 /* Counter for stats. */
515 static int globally_deleted;
516 static int locally_deleted;
517 static int spill_deleted;
518
519 static bitmap all_blocks;
520
521 /* The number of bits used in the global bitmaps. */
522 static unsigned int current_position;
523
524
525 static bool gate_dse (void);
526
527 \f
528 /*----------------------------------------------------------------------------
529 Zeroth step.
530
531 Initialization.
532 ----------------------------------------------------------------------------*/
533
534 /* Hashtable callbacks for maintaining the "bases" field of
535 store_group_info, given that the addresses are function invariants. */
536
537 static int
538 clear_alias_mode_eq (const void *p1, const void *p2)
539 {
540 const struct clear_alias_mode_holder * h1
541 = (const struct clear_alias_mode_holder *) p1;
542 const struct clear_alias_mode_holder * h2
543 = (const struct clear_alias_mode_holder *) p2;
544 return h1->alias_set == h2->alias_set;
545 }
546
547
548 static hashval_t
549 clear_alias_mode_hash (const void *p)
550 {
551 const struct clear_alias_mode_holder *holder
552 = (const struct clear_alias_mode_holder *) p;
553 return holder->alias_set;
554 }
555
556
557 /* Find the entry associated with ALIAS_SET. */
558
559 static struct clear_alias_mode_holder *
560 clear_alias_set_lookup (alias_set_type alias_set)
561 {
562 struct clear_alias_mode_holder tmp_holder;
563 void **slot;
564
565 tmp_holder.alias_set = alias_set;
566 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
567 gcc_assert (*slot);
568
569 return *slot;
570 }
571
572
573 /* Hashtable callbacks for maintaining the "bases" field of
574 store_group_info, given that the addresses are function invariants. */
575
576 static int
577 invariant_group_base_eq (const void *p1, const void *p2)
578 {
579 const_group_info_t gi1 = (const_group_info_t) p1;
580 const_group_info_t gi2 = (const_group_info_t) p2;
581 return rtx_equal_p (gi1->rtx_base, gi2->rtx_base);
582 }
583
584
585 static hashval_t
586 invariant_group_base_hash (const void *p)
587 {
588 const_group_info_t gi = (const_group_info_t) p;
589 int do_not_record;
590 return hash_rtx (gi->rtx_base, Pmode, &do_not_record, NULL, false);
591 }
592
593
594 /* Get the GROUP for BASE. Add a new group if it is not there. */
595
596 static group_info_t
597 get_group_info (rtx base)
598 {
599 struct group_info tmp_gi;
600 group_info_t gi;
601 void **slot;
602
603 if (base)
604 {
605 /* Find the store_base_info structure for BASE, creating a new one
606 if necessary. */
607 tmp_gi.rtx_base = base;
608 slot = htab_find_slot (rtx_group_table, &tmp_gi, INSERT);
609 gi = (group_info_t) *slot;
610 }
611 else
612 {
613 if (!clear_alias_group)
614 {
615 clear_alias_group = gi = pool_alloc (rtx_group_info_pool);
616 memset (gi, 0, sizeof (struct group_info));
617 gi->id = rtx_group_next_id++;
618 gi->store1_n = BITMAP_ALLOC (NULL);
619 gi->store1_p = BITMAP_ALLOC (NULL);
620 gi->store2_n = BITMAP_ALLOC (NULL);
621 gi->store2_p = BITMAP_ALLOC (NULL);
622 gi->group_kill = BITMAP_ALLOC (NULL);
623 gi->process_globally = false;
624 gi->offset_map_size_n = 0;
625 gi->offset_map_size_p = 0;
626 gi->offset_map_n = NULL;
627 gi->offset_map_p = NULL;
628 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
629 }
630 return clear_alias_group;
631 }
632
633 if (gi == NULL)
634 {
635 *slot = gi = pool_alloc (rtx_group_info_pool);
636 gi->rtx_base = base;
637 gi->id = rtx_group_next_id++;
638 gi->base_mem = gen_rtx_MEM (QImode, base);
639 gi->canon_base_mem = canon_rtx (gi->base_mem);
640 gi->store1_n = BITMAP_ALLOC (NULL);
641 gi->store1_p = BITMAP_ALLOC (NULL);
642 gi->store2_n = BITMAP_ALLOC (NULL);
643 gi->store2_p = BITMAP_ALLOC (NULL);
644 gi->group_kill = BITMAP_ALLOC (NULL);
645 gi->process_globally = false;
646 gi->frame_related =
647 (base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
648 gi->offset_map_size_n = 0;
649 gi->offset_map_size_p = 0;
650 gi->offset_map_n = NULL;
651 gi->offset_map_p = NULL;
652 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
653 }
654
655 return gi;
656 }
657
658
659 /* Initialization of data structures. */
660
661 static void
662 dse_step0 (void)
663 {
664 locally_deleted = 0;
665 globally_deleted = 0;
666 spill_deleted = 0;
667
668 scratch = BITMAP_ALLOC (NULL);
669
670 rtx_store_info_pool
671 = create_alloc_pool ("rtx_store_info_pool",
672 sizeof (struct store_info), 100);
673 read_info_pool
674 = create_alloc_pool ("read_info_pool",
675 sizeof (struct read_info), 100);
676 insn_info_pool
677 = create_alloc_pool ("insn_info_pool",
678 sizeof (struct insn_info), 100);
679 bb_info_pool
680 = create_alloc_pool ("bb_info_pool",
681 sizeof (struct bb_info), 100);
682 rtx_group_info_pool
683 = create_alloc_pool ("rtx_group_info_pool",
684 sizeof (struct group_info), 100);
685 deferred_change_pool
686 = create_alloc_pool ("deferred_change_pool",
687 sizeof (struct deferred_change), 10);
688
689 rtx_group_table = htab_create (11, invariant_group_base_hash,
690 invariant_group_base_eq, NULL);
691
692 bb_table = XCNEWVEC (bb_info_t, last_basic_block);
693 rtx_group_next_id = 0;
694
695 stores_off_frame_dead_at_return =
696 (!(TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE
697 && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))
698 && (!current_function_stdarg);
699
700 init_alias_analysis ();
701
702 if (clear_alias_sets)
703 clear_alias_group = get_group_info (NULL);
704 else
705 clear_alias_group = NULL;
706 }
707
708
709 \f
710 /*----------------------------------------------------------------------------
711 First step.
712
713 Scan all of the insns. Any random ordering of the blocks is fine.
714 Each block is scanned in forward order to accommodate cselib which
715 is used to remove stores with non-constant bases.
716 ----------------------------------------------------------------------------*/
717
718 /* Delete all of the store_info recs from INSN_INFO. */
719
720 static void
721 free_store_info (insn_info_t insn_info)
722 {
723 store_info_t store_info = insn_info->store_rec;
724 while (store_info)
725 {
726 store_info_t next = store_info->next;
727 if (store_info->cse_base)
728 pool_free (cse_store_info_pool, store_info);
729 else
730 pool_free (rtx_store_info_pool, store_info);
731 store_info = next;
732 }
733
734 insn_info->cannot_delete = true;
735 insn_info->contains_cselib_groups = false;
736 insn_info->store_rec = NULL;
737 }
738
739
740 struct insn_size {
741 int size;
742 rtx insn;
743 };
744
745
746 /* Add an insn to do the add inside a x if it is a
747 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
748 the size of the mode of the MEM that this is inside of. */
749
750 static int
751 replace_inc_dec (rtx *r, void *d)
752 {
753 rtx x = *r;
754 struct insn_size *data = (struct insn_size *)d;
755 switch (GET_CODE (x))
756 {
757 case PRE_INC:
758 case POST_INC:
759 {
760 rtx r1 = XEXP (x, 0);
761 rtx c = gen_int_mode (Pmode, data->size);
762 add_insn_before (data->insn,
763 gen_rtx_SET (Pmode, r1,
764 gen_rtx_PLUS (Pmode, r1, c)),
765 NULL);
766 return -1;
767 }
768
769 case PRE_DEC:
770 case POST_DEC:
771 {
772 rtx r1 = XEXP (x, 0);
773 rtx c = gen_int_mode (Pmode, -data->size);
774 add_insn_before (data->insn,
775 gen_rtx_SET (Pmode, r1,
776 gen_rtx_PLUS (Pmode, r1, c)),
777 NULL);
778 return -1;
779 }
780
781 case PRE_MODIFY:
782 case POST_MODIFY:
783 {
784 /* We can reuse the add because we are about to delete the
785 insn that contained it. */
786 rtx add = XEXP (x, 0);
787 rtx r1 = XEXP (add, 0);
788 add_insn_before (data->insn,
789 gen_rtx_SET (Pmode, r1, add), NULL);
790 return -1;
791 }
792
793 default:
794 return 0;
795 }
796 }
797
798
799 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
800 and generate an add to replace that. */
801
802 static int
803 replace_inc_dec_mem (rtx *r, void *d)
804 {
805 rtx x = *r;
806 if (GET_CODE (x) == MEM)
807 {
808 struct insn_size data;
809
810 data.size = GET_MODE_SIZE (GET_MODE (x));
811 data.insn = (rtx)d;
812
813 for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
814
815 return -1;
816 }
817 return 0;
818 }
819
820 /* Before we delete INSN, make sure that the auto inc/dec, if it is
821 there, is split into a separate insn. */
822
823 static void
824 check_for_inc_dec (rtx insn)
825 {
826 rtx note = find_reg_note (insn, REG_INC, NULL_RTX);
827 if (note)
828 for_each_rtx (&insn, replace_inc_dec_mem, insn);
829 }
830
831
832 /* Delete the insn and free all of the fields inside INSN_INFO. */
833
834 static void
835 delete_dead_store_insn (insn_info_t insn_info)
836 {
837 read_info_t read_info;
838
839 if (!dbg_cnt (dse))
840 return;
841
842 check_for_inc_dec (insn_info->insn);
843 if (dump_file)
844 {
845 fprintf (dump_file, "Locally deleting insn %d ",
846 INSN_UID (insn_info->insn));
847 if (insn_info->store_rec->alias_set)
848 fprintf (dump_file, "alias set %d\n",
849 (int) insn_info->store_rec->alias_set);
850 else
851 fprintf (dump_file, "\n");
852 }
853
854 free_store_info (insn_info);
855 read_info = insn_info->read_rec;
856
857 while (read_info)
858 {
859 read_info_t next = read_info->next;
860 pool_free (read_info_pool, read_info);
861 read_info = next;
862 }
863 insn_info->read_rec = NULL;
864
865 delete_insn (insn_info->insn);
866 locally_deleted++;
867 insn_info->insn = NULL;
868
869 insn_info->wild_read = false;
870 }
871
872
873 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
874 OFFSET and WIDTH. */
875
876 static void
877 set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
878 {
879 HOST_WIDE_INT i;
880
881 if ((offset > -MAX_OFFSET) && (offset < MAX_OFFSET))
882 for (i=offset; i<offset+width; i++)
883 {
884 bitmap store1;
885 bitmap store2;
886 int ai;
887 if (i < 0)
888 {
889 store1 = group->store1_n;
890 store2 = group->store2_n;
891 ai = -i;
892 }
893 else
894 {
895 store1 = group->store1_p;
896 store2 = group->store2_p;
897 ai = i;
898 }
899
900 if (bitmap_bit_p (store1, ai))
901 bitmap_set_bit (store2, ai);
902 else
903 {
904 bitmap_set_bit (store1, ai);
905 if (i < 0)
906 {
907 if (group->offset_map_size_n < ai)
908 group->offset_map_size_n = ai;
909 }
910 else
911 {
912 if (group->offset_map_size_p < ai)
913 group->offset_map_size_p = ai;
914 }
915 }
916 }
917 }
918
919
920 /* Set the BB_INFO so that the last insn is marked as a wild read. */
921
922 static void
923 add_wild_read (bb_info_t bb_info)
924 {
925 insn_info_t insn_info = bb_info->last_insn;
926 read_info_t *ptr = &insn_info->read_rec;
927
928 while (*ptr)
929 {
930 read_info_t next = (*ptr)->next;
931 if ((*ptr)->alias_set == 0)
932 {
933 pool_free (read_info_pool, *ptr);
934 *ptr = next;
935 }
936 else
937 ptr = &(*ptr)->next;
938 }
939 insn_info->wild_read = true;
940 active_local_stores = NULL;
941 }
942
943
944 /* Return true if X is a constant or one of the registers that behaves
945 as a constant over the life of a function. */
946
947 static bool
948 const_or_frame_p (rtx x)
949 {
950 switch (GET_CODE (x))
951 {
952 case MEM:
953 return MEM_READONLY_P (x);
954
955 case CONST:
956 case CONST_INT:
957 case CONST_DOUBLE:
958 case CONST_VECTOR:
959 case SYMBOL_REF:
960 case LABEL_REF:
961 return true;
962
963 case REG:
964 /* Note that we have to test for the actual rtx used for the frame
965 and arg pointers and not just the register number in case we have
966 eliminated the frame and/or arg pointer and are using it
967 for pseudos. */
968 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
969 /* The arg pointer varies if it is not a fixed register. */
970 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
971 || x == pic_offset_table_rtx)
972 return true;
973 return false;
974
975 default:
976 return false;
977 }
978 }
979
980 /* Take all reasonable action to put the address of MEM into the form
981 that we can do analysis on.
982
983 The gold standard is to get the address into the form: address +
984 OFFSET where address is something that rtx_varies_p considers a
985 constant. When we can get the address in this form, we can do
986 global analysis on it. Note that for constant bases, address is
987 not actually returned, only the group_id. The address can be
988 obtained from that.
989
990 If that fails, we try cselib to get a value we can at least use
991 locally. If that fails we return false.
992
993 The GROUP_ID is set to -1 for cselib bases and the index of the
994 group for non_varying bases.
995
996 FOR_READ is true if this is a mem read and false if not. */
997
998 static bool
999 canon_address (rtx mem,
1000 alias_set_type *alias_set_out,
1001 int *group_id,
1002 HOST_WIDE_INT *offset,
1003 cselib_val **base)
1004 {
1005 rtx mem_address = XEXP (mem, 0);
1006 rtx expanded_address, address;
1007 /* Make sure that cselib is has initialized all of the operands of
1008 the address before asking it to do the subst. */
1009
1010 if (clear_alias_sets)
1011 {
1012 /* If this is a spill, do not do any further processing. */
1013 alias_set_type alias_set = MEM_ALIAS_SET (mem);
1014 if (dump_file)
1015 fprintf (dump_file, "found alias set %d\n", (int) alias_set);
1016 if (bitmap_bit_p (clear_alias_sets, alias_set))
1017 {
1018 struct clear_alias_mode_holder *entry
1019 = clear_alias_set_lookup (alias_set);
1020
1021 /* If the modes do not match, we cannot process this set. */
1022 if (entry->mode != GET_MODE (mem))
1023 {
1024 if (dump_file)
1025 fprintf (dump_file,
1026 "disqualifying alias set %d, (%s) != (%s)\n",
1027 (int) alias_set, GET_MODE_NAME (entry->mode),
1028 GET_MODE_NAME (GET_MODE (mem)));
1029
1030 bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
1031 return false;
1032 }
1033
1034 *alias_set_out = alias_set;
1035 *group_id = clear_alias_group->id;
1036 return true;
1037 }
1038 }
1039
1040 *alias_set_out = 0;
1041
1042 cselib_lookup (mem_address, Pmode, 1);
1043
1044 if (dump_file)
1045 {
1046 fprintf (dump_file, " mem: ");
1047 print_inline_rtx (dump_file, mem_address, 0);
1048 fprintf (dump_file, "\n");
1049 }
1050
1051 /* Use cselib to replace all of the reg references with the full
1052 expression. This will take care of the case where we have
1053
1054 r_x = base + offset;
1055 val = *r_x;
1056
1057 by making it into
1058
1059 val = *(base + offset);
1060 */
1061
1062 expanded_address = cselib_expand_value_rtx (mem_address, scratch, 5);
1063
1064 /* If this fails, just go with the mem_address. */
1065 if (!expanded_address)
1066 expanded_address = mem_address;
1067
1068 /* Split the address into canonical BASE + OFFSET terms. */
1069 address = canon_rtx (expanded_address);
1070
1071 *offset = 0;
1072
1073 if (dump_file)
1074 {
1075 fprintf (dump_file, "\n after cselib_expand address: ");
1076 print_inline_rtx (dump_file, expanded_address, 0);
1077 fprintf (dump_file, "\n");
1078
1079 fprintf (dump_file, "\n after canon_rtx address: ");
1080 print_inline_rtx (dump_file, address, 0);
1081 fprintf (dump_file, "\n");
1082 }
1083
1084 if (GET_CODE (address) == CONST)
1085 address = XEXP (address, 0);
1086
1087 if (GET_CODE (address) == PLUS && GET_CODE (XEXP (address, 1)) == CONST_INT)
1088 {
1089 *offset = INTVAL (XEXP (address, 1));
1090 address = XEXP (address, 0);
1091 }
1092
1093 if (const_or_frame_p (address))
1094 {
1095 group_info_t group = get_group_info (address);
1096
1097 if (dump_file)
1098 fprintf (dump_file, " gid=%d offset=%d \n", group->id, (int)*offset);
1099 *base = NULL;
1100 *group_id = group->id;
1101 }
1102 else
1103 {
1104 *base = cselib_lookup (address, Pmode, true);
1105 *group_id = -1;
1106
1107 if (*base == NULL)
1108 {
1109 if (dump_file)
1110 fprintf (dump_file, " no cselib val - should be a wild read.\n");
1111 return false;
1112 }
1113 if (dump_file)
1114 fprintf (dump_file, " varying cselib base=%d offset = %d\n",
1115 (*base)->value, (int)*offset);
1116 }
1117 return true;
1118 }
1119
1120
1121 /* Clear the rhs field from the active_local_stores array. */
1122
1123 static void
1124 clear_rhs_from_active_local_stores (void)
1125 {
1126 insn_info_t ptr = active_local_stores;
1127
1128 while (ptr)
1129 {
1130 store_info_t store_info = ptr->store_rec;
1131 /* Skip the clobbers. */
1132 while (!store_info->is_set)
1133 store_info = store_info->next;
1134
1135 store_info->rhs = NULL;
1136
1137 ptr = ptr->next_local_store;
1138 }
1139 }
1140
1141
1142 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1143 there is a candidate store, after adding it to the appropriate
1144 local store group if so. */
1145
1146 static int
1147 record_store (rtx body, bb_info_t bb_info)
1148 {
1149 rtx mem;
1150 HOST_WIDE_INT offset = 0;
1151 HOST_WIDE_INT width = 0;
1152 alias_set_type spill_alias_set;
1153 insn_info_t insn_info = bb_info->last_insn;
1154 store_info_t store_info = NULL;
1155 int group_id;
1156 cselib_val *base = NULL;
1157 insn_info_t ptr, last;
1158 bool store_is_unused;
1159
1160 if (GET_CODE (body) != SET && GET_CODE (body) != CLOBBER)
1161 return 0;
1162
1163 /* If this is not used, then this cannot be used to keep the insn
1164 from being deleted. On the other hand, it does provide something
1165 that can be used to prove that another store is dead. */
1166 store_is_unused
1167 = (find_reg_note (insn_info->insn, REG_UNUSED, body) != NULL);
1168
1169 /* Check whether that value is a suitable memory location. */
1170 mem = SET_DEST (body);
1171 if (!MEM_P (mem))
1172 {
1173 /* If the set or clobber is unused, then it does not effect our
1174 ability to get rid of the entire insn. */
1175 if (!store_is_unused)
1176 insn_info->cannot_delete = true;
1177 return 0;
1178 }
1179
1180 /* At this point we know mem is a mem. */
1181 if (GET_MODE (mem) == BLKmode)
1182 {
1183 if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
1184 {
1185 if (dump_file)
1186 fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
1187 add_wild_read (bb_info);
1188 insn_info->cannot_delete = true;
1189 }
1190 else if (!store_is_unused)
1191 {
1192 /* If the set or clobber is unused, then it does not effect our
1193 ability to get rid of the entire insn. */
1194 insn_info->cannot_delete = true;
1195 clear_rhs_from_active_local_stores ();
1196 }
1197 return 0;
1198 }
1199
1200 /* We can still process a volatile mem, we just cannot delete it. */
1201 if (MEM_VOLATILE_P (mem))
1202 insn_info->cannot_delete = true;
1203
1204 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1205 {
1206 clear_rhs_from_active_local_stores ();
1207 return 0;
1208 }
1209
1210 width = GET_MODE_SIZE (GET_MODE (mem));
1211
1212 if (spill_alias_set)
1213 {
1214 bitmap store1 = clear_alias_group->store1_p;
1215 bitmap store2 = clear_alias_group->store2_p;
1216
1217 if (bitmap_bit_p (store1, spill_alias_set))
1218 bitmap_set_bit (store2, spill_alias_set);
1219 else
1220 bitmap_set_bit (store1, spill_alias_set);
1221
1222 if (clear_alias_group->offset_map_size_p < spill_alias_set)
1223 clear_alias_group->offset_map_size_p = spill_alias_set;
1224
1225 store_info = pool_alloc (rtx_store_info_pool);
1226
1227 if (dump_file)
1228 fprintf (dump_file, " processing spill store %d(%s)\n",
1229 (int) spill_alias_set, GET_MODE_NAME (GET_MODE (mem)));
1230 }
1231 else if (group_id >= 0)
1232 {
1233 /* In the restrictive case where the base is a constant or the
1234 frame pointer we can do global analysis. */
1235
1236 group_info_t group
1237 = VEC_index (group_info_t, rtx_group_vec, group_id);
1238
1239 store_info = pool_alloc (rtx_store_info_pool);
1240 set_usage_bits (group, offset, width);
1241
1242 if (dump_file)
1243 fprintf (dump_file, " processing const base store gid=%d[%d..%d)\n",
1244 group_id, (int)offset, (int)(offset+width));
1245 }
1246 else
1247 {
1248 store_info = pool_alloc (cse_store_info_pool);
1249 insn_info->contains_cselib_groups = true;
1250 group_id = -1;
1251
1252 if (dump_file)
1253 fprintf (dump_file, " processing cselib store [%d..%d)\n",
1254 (int)offset, (int)(offset+width));
1255 }
1256
1257 /* Check to see if this stores causes some other stores to be
1258 dead. */
1259 ptr = active_local_stores;
1260 last = NULL;
1261
1262 while (ptr)
1263 {
1264 insn_info_t next = ptr->next_local_store;
1265 store_info_t s_info = ptr->store_rec;
1266 bool delete = true;
1267
1268 /* Skip the clobbers. We delete the active insn if this insn
1269 shadows the set. To have been put on the active list, it
1270 has exactly on set. */
1271 while (!s_info->is_set)
1272 s_info = s_info->next;
1273
1274 if (s_info->alias_set != spill_alias_set)
1275 delete = false;
1276 else if (s_info->alias_set)
1277 {
1278 struct clear_alias_mode_holder *entry
1279 = clear_alias_set_lookup (s_info->alias_set);
1280 /* Generally, spills cannot be processed if and of the
1281 references to the slot have a different mode. But if
1282 we are in the same block and mode is exactly the same
1283 between this store and one before in the same block,
1284 we can still delete it. */
1285 if ((GET_MODE (mem) == GET_MODE (s_info->mem))
1286 && (GET_MODE (mem) == entry->mode))
1287 {
1288 delete = true;
1289 s_info->positions_needed = 0;
1290 }
1291 if (dump_file)
1292 fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
1293 INSN_UID (ptr->insn), (int) s_info->alias_set);
1294 }
1295 else if ((s_info->group_id == group_id)
1296 && (s_info->cse_base == base))
1297 {
1298 HOST_WIDE_INT i;
1299 if (dump_file)
1300 fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
1301 INSN_UID (ptr->insn), s_info->group_id,
1302 (int)s_info->begin, (int)s_info->end);
1303 for (i = offset; i < offset+width; i++)
1304 if (i >= s_info->begin && i < s_info->end)
1305 s_info->positions_needed &= ~(1L << (i - s_info->begin));
1306 }
1307 else if (s_info->rhs)
1308 /* Need to see if it is possible for this store to overwrite
1309 the value of store_info. If it is, set the rhs to NULL to
1310 keep it from being used to remove a load. */
1311 {
1312 if (canon_true_dependence (s_info->mem,
1313 GET_MODE (s_info->mem),
1314 s_info->mem_addr,
1315 mem, rtx_varies_p))
1316 s_info->rhs = NULL;
1317 }
1318
1319 /* An insn can be deleted if every position of every one of
1320 its s_infos is zero. */
1321 if (s_info->positions_needed != 0)
1322 delete = false;
1323
1324 if (delete)
1325 {
1326 insn_info_t insn_to_delete = ptr;
1327
1328 if (last)
1329 last->next_local_store = ptr->next_local_store;
1330 else
1331 active_local_stores = ptr->next_local_store;
1332
1333 delete_dead_store_insn (insn_to_delete);
1334 }
1335 else
1336 last = ptr;
1337
1338 ptr = next;
1339 }
1340
1341 gcc_assert ((unsigned) width < sizeof (store_info->positions_needed) * CHAR_BIT);
1342
1343 /* Finish filling in the store_info. */
1344 store_info->next = insn_info->store_rec;
1345 insn_info->store_rec = store_info;
1346 store_info->mem = canon_rtx (mem);
1347 store_info->alias_set = spill_alias_set;
1348 store_info->mem_addr = get_addr (XEXP (mem, 0));
1349 store_info->cse_base = base;
1350 store_info->positions_needed = (1L << width) - 1;
1351 store_info->group_id = group_id;
1352 store_info->begin = offset;
1353 store_info->end = offset + width;
1354 store_info->is_set = GET_CODE (body) == SET;
1355
1356 if (store_info->is_set
1357 /* No place to keep the value after ra. */
1358 && !reload_completed
1359 /* The careful reviewer may wish to comment my checking that the
1360 rhs of a store is always a reg. */
1361 && REG_P (SET_SRC (body))
1362 /* Sometimes the store and reload is used for truncation and
1363 rounding. */
1364 && !(FLOAT_MODE_P (GET_MODE (mem)) && (flag_float_store)))
1365 store_info->rhs = SET_SRC (body);
1366 else
1367 store_info->rhs = NULL;
1368
1369 /* If this is a clobber, we return 0. We will only be able to
1370 delete this insn if there is only one store USED store, but we
1371 can use the clobber to delete other stores earlier. */
1372 return store_info->is_set ? 1 : 0;
1373 }
1374
1375
1376 static void
1377 dump_insn_info (const char * start, insn_info_t insn_info)
1378 {
1379 fprintf (dump_file, "%s insn=%d %s\n", start,
1380 INSN_UID (insn_info->insn),
1381 insn_info->store_rec ? "has store" : "naked");
1382 }
1383
1384
1385 /* If the modes are different and the value's source and target do not
1386 line up, we need to extract the value from lower part of the rhs of
1387 the store, shift it, and then put it into a form that can be shoved
1388 into the read_insn. This function generates a right SHIFT of a
1389 value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
1390 shift sequence is returned or NULL if we failed to find a
1391 shift. */
1392
1393 static rtx
1394 find_shift_sequence (rtx read_reg,
1395 int access_size,
1396 store_info_t store_info,
1397 read_info_t read_info,
1398 int shift)
1399 {
1400 enum machine_mode store_mode = GET_MODE (store_info->mem);
1401 enum machine_mode read_mode = GET_MODE (read_info->mem);
1402
1403 /* Some machines like the x86 have shift insns for each size of
1404 operand. Other machines like the ppc or the ia-64 may only have
1405 shift insns that shift values within 32 or 64 bit registers.
1406 This loop tries to find the smallest shift insn that will right
1407 justify the value we want to read but is available in one insn on
1408 the machine. */
1409
1410 for (; access_size <= UNITS_PER_WORD; access_size *= 2)
1411 {
1412 rtx target, new_reg;
1413 enum machine_mode new_mode;
1414
1415 /* Try a wider mode if truncating the store mode to ACCESS_SIZE
1416 bytes requires a real instruction. */
1417 if (access_size < GET_MODE_SIZE (store_mode)
1418 && !TRULY_NOOP_TRUNCATION (access_size * BITS_PER_UNIT,
1419 GET_MODE_BITSIZE (store_mode)))
1420 continue;
1421
1422 new_mode = smallest_mode_for_size (access_size * BITS_PER_UNIT,
1423 GET_MODE_CLASS (read_mode));
1424 new_reg = gen_reg_rtx (new_mode);
1425
1426 start_sequence ();
1427
1428 /* In theory we could also check for an ashr. Ian Taylor knows
1429 of one dsp where the cost of these two was not the same. But
1430 this really is a rare case anyway. */
1431 target = expand_binop (new_mode, lshr_optab, new_reg,
1432 GEN_INT (shift), new_reg, 1, OPTAB_DIRECT);
1433
1434 if (target == new_reg)
1435 {
1436 rtx shift_seq = get_insns ();
1437 end_sequence ();
1438
1439 /* If cost is too great, set target to NULL and
1440 let the iteration happen. */
1441 if (shift_seq != NULL)
1442 {
1443 int cost = 0;
1444 rtx insn;
1445
1446 for (insn = shift_seq; insn != NULL_RTX; insn = NEXT_INSN (insn))
1447 if (INSN_P (insn))
1448 cost += insn_rtx_cost (PATTERN (insn));
1449
1450 /* The computation up to here is essentially independent
1451 of the arguments and could be precomputed. It may
1452 not be worth doing so. We could precompute if
1453 worthwhile or at least cache the results. The result
1454 technically depends on SHIFT, ACCESS_SIZE, and
1455 GET_MODE_CLASS (READ_MODE). But in practice the
1456 answer will depend only on ACCESS_SIZE. */
1457
1458 if (cost <= COSTS_N_INSNS (1))
1459 {
1460 /* We found an acceptable shift. Generate a move to
1461 take the value from the store and put it into the
1462 shift pseudo, then shift it, then generate another
1463 move to put in into the target of the read. */
1464 start_sequence ();
1465 emit_move_insn (new_reg, gen_lowpart (new_mode, store_info->rhs));
1466 emit_insn (shift_seq);
1467 convert_move (read_reg, new_reg, 1);
1468
1469 if (dump_file)
1470 {
1471 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1472 REGNO (new_reg), GET_MODE_NAME (new_mode),
1473 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1474
1475 fprintf (dump_file, " -- with shift of r%d by %d\n",
1476 REGNO(new_reg), shift);
1477 fprintf (dump_file, " -- and second extract insn r%d:%s = r%d:%s\n",
1478 REGNO (read_reg), GET_MODE_NAME (read_mode),
1479 REGNO (new_reg), GET_MODE_NAME (new_mode));
1480 }
1481
1482 /* Get the three insn sequence and return it. */
1483 shift_seq = get_insns ();
1484 end_sequence ();
1485 return shift_seq;
1486 }
1487 }
1488 }
1489 else
1490 /* End the sequence. */
1491 end_sequence ();
1492 }
1493
1494 return NULL;
1495 }
1496
1497
1498 /* Take a sequence of:
1499 A <- r1
1500 ...
1501 ... <- A
1502
1503 and change it into
1504 r2 <- r1
1505 A <- r1
1506 ...
1507 ... <- r2
1508
1509 or
1510
1511 r3 <- extract (r1)
1512 r3 <- r3 >> shift
1513 r2 <- extract (r3)
1514 ... <- r2
1515
1516 or
1517
1518 r2 <- extract (r1)
1519 ... <- r2
1520
1521 Depending on the alignment and the mode of the store and
1522 subsequent load.
1523
1524
1525 The STORE_INFO and STORE_INSN are for the store and READ_INFO
1526 and READ_INSN are for the read. Return true if the replacement
1527 went ok. */
1528
1529 static bool
1530 replace_read (store_info_t store_info, insn_info_t store_insn,
1531 read_info_t read_info, insn_info_t read_insn, rtx *loc)
1532 {
1533 enum machine_mode store_mode = GET_MODE (store_info->mem);
1534 enum machine_mode read_mode = GET_MODE (read_info->mem);
1535 int shift;
1536 int access_size; /* In bytes. */
1537 rtx read_reg = gen_reg_rtx (read_mode);
1538 rtx shift_seq = NULL;
1539
1540 if (!dbg_cnt (dse))
1541 return false;
1542
1543 if (GET_MODE_CLASS (read_mode) != GET_MODE_CLASS (store_mode))
1544 return false;
1545
1546 /* To get here the read is within the boundaries of the write so
1547 shift will never be negative. Start out with the shift being in
1548 bytes. */
1549 if (BYTES_BIG_ENDIAN)
1550 shift = store_info->end - read_info->end;
1551 else
1552 shift = read_info->begin - store_info->begin;
1553
1554 access_size = shift + GET_MODE_SIZE (read_mode);
1555
1556 /* From now on it is bits. */
1557 shift *= BITS_PER_UNIT;
1558
1559 /* We need to keep this in perspective. We are replacing a read
1560 with a sequence of insns, but the read will almost certainly be
1561 in cache, so it is not going to be an expensive one. Thus, we
1562 are not willing to do a multi insn shift or worse a subroutine
1563 call to get rid of the read. */
1564 if (shift)
1565 {
1566 if (access_size > UNITS_PER_WORD || FLOAT_MODE_P (store_mode))
1567 return false;
1568
1569 shift_seq = find_shift_sequence (read_reg, access_size, store_info,
1570 read_info, shift);
1571 if (!shift_seq)
1572 return false;
1573 }
1574
1575 if (dump_file)
1576 fprintf (dump_file, "replacing load at %d from store at %d\n",
1577 INSN_UID (read_insn->insn), INSN_UID (store_insn->insn));
1578
1579 if (validate_change (read_insn->insn, loc, read_reg, 0))
1580 {
1581 rtx insns;
1582 deferred_change_t deferred_change = pool_alloc (deferred_change_pool);
1583
1584 if (read_mode == store_mode)
1585 {
1586 start_sequence ();
1587
1588 /* The modes are the same and everything lines up. Just
1589 generate a simple move. */
1590 emit_move_insn (read_reg, store_info->rhs);
1591 if (dump_file)
1592 fprintf (dump_file, " -- adding move insn r%d = r%d\n",
1593 REGNO (read_reg), REGNO (store_info->rhs));
1594 insns = get_insns ();
1595 end_sequence ();
1596 }
1597 else if (shift)
1598 insns = shift_seq;
1599 else
1600 {
1601 /* The modes are different but the lsb are in the same
1602 place, we need to extract the value in the right from the
1603 rhs of the store. */
1604 start_sequence ();
1605 convert_move (read_reg, store_info->rhs, 1);
1606
1607 if (dump_file)
1608 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1609 REGNO (read_reg), GET_MODE_NAME (read_mode),
1610 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1611 insns = get_insns ();
1612 end_sequence ();
1613 }
1614
1615 /* Insert this right before the store insn where it will be safe
1616 from later insns that might change it before the read. */
1617 emit_insn_before (insns, store_insn->insn);
1618
1619 /* And now for the kludge part: cselib croaks if you just
1620 return at this point. There are two reasons for this:
1621
1622 1) Cselib has an idea of how many pseudos there are and
1623 that does not include the new ones we just added.
1624
1625 2) Cselib does not know about the move insn we added
1626 above the store_info, and there is no way to tell it
1627 about it, because it has "moved on".
1628
1629 Problem (1) is fixable with a certain amount of engineering.
1630 Problem (2) is requires starting the bb from scratch. This
1631 could be expensive.
1632
1633 So we are just going to have to lie. The move/extraction
1634 insns are not really an issue, cselib did not see them. But
1635 the use of the new pseudo read_insn is a real problem because
1636 cselib has not scanned this insn. The way that we solve this
1637 problem is that we are just going to put the mem back for now
1638 and when we are finished with the block, we undo this. We
1639 keep a table of mems to get rid of. At the end of the basic
1640 block we can put them back. */
1641
1642 *loc = read_info->mem;
1643 deferred_change->next = deferred_change_list;
1644 deferred_change_list = deferred_change;
1645 deferred_change->loc = loc;
1646 deferred_change->reg = read_reg;
1647
1648 /* Get rid of the read_info, from the point of view of the
1649 rest of dse, play like this read never happened. */
1650 read_insn->read_rec = read_info->next;
1651 pool_free (read_info_pool, read_info);
1652 return true;
1653 }
1654 else
1655 {
1656 if (dump_file)
1657 fprintf (dump_file, " -- validation failure\n");
1658 return false;
1659 }
1660 }
1661
1662 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1663 if LOC is a mem and if it is look at the address and kill any
1664 appropriate stores that may be active. */
1665
1666 static int
1667 check_mem_read_rtx (rtx *loc, void *data)
1668 {
1669 rtx mem = *loc;
1670 bb_info_t bb_info;
1671 insn_info_t insn_info;
1672 HOST_WIDE_INT offset = 0;
1673 HOST_WIDE_INT width = 0;
1674 alias_set_type spill_alias_set = 0;
1675 cselib_val *base = NULL;
1676 int group_id;
1677 read_info_t read_info;
1678
1679 if (!mem || !MEM_P (mem))
1680 return 0;
1681
1682 bb_info = (bb_info_t) data;
1683 insn_info = bb_info->last_insn;
1684
1685 if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
1686 || (MEM_VOLATILE_P (mem)))
1687 {
1688 if (dump_file)
1689 fprintf (dump_file, " adding wild read, volatile or barrier.\n");
1690 add_wild_read (bb_info);
1691 insn_info->cannot_delete = true;
1692 return 0;
1693 }
1694
1695 /* If it is reading readonly mem, then there can be no conflict with
1696 another write. */
1697 if (MEM_READONLY_P (mem))
1698 return 0;
1699
1700 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1701 {
1702 if (dump_file)
1703 fprintf (dump_file, " adding wild read, canon_address failure.\n");
1704 add_wild_read (bb_info);
1705 return 0;
1706 }
1707
1708 if (GET_MODE (mem) == BLKmode)
1709 width = -1;
1710 else
1711 width = GET_MODE_SIZE (GET_MODE (mem));
1712
1713 read_info = pool_alloc (read_info_pool);
1714 read_info->group_id = group_id;
1715 read_info->mem = mem;
1716 read_info->alias_set = spill_alias_set;
1717 read_info->begin = offset;
1718 read_info->end = offset + width;
1719 read_info->next = insn_info->read_rec;
1720 insn_info->read_rec = read_info;
1721
1722 /* We ignore the clobbers in store_info. The is mildly aggressive,
1723 but there really should not be a clobber followed by a read. */
1724
1725 if (spill_alias_set)
1726 {
1727 insn_info_t i_ptr = active_local_stores;
1728 insn_info_t last = NULL;
1729
1730 if (dump_file)
1731 fprintf (dump_file, " processing spill load %d\n",
1732 (int) spill_alias_set);
1733
1734 while (i_ptr)
1735 {
1736 store_info_t store_info = i_ptr->store_rec;
1737
1738 /* Skip the clobbers. */
1739 while (!store_info->is_set)
1740 store_info = store_info->next;
1741
1742 if (store_info->alias_set == spill_alias_set)
1743 {
1744 if (dump_file)
1745 dump_insn_info ("removing from active", i_ptr);
1746
1747 if (last)
1748 last->next_local_store = i_ptr->next_local_store;
1749 else
1750 active_local_stores = i_ptr->next_local_store;
1751 }
1752 else
1753 last = i_ptr;
1754 i_ptr = i_ptr->next_local_store;
1755 }
1756 }
1757 else if (group_id >= 0)
1758 {
1759 /* This is the restricted case where the base is a constant or
1760 the frame pointer and offset is a constant. */
1761 insn_info_t i_ptr = active_local_stores;
1762 insn_info_t last = NULL;
1763
1764 if (dump_file)
1765 {
1766 if (width == -1)
1767 fprintf (dump_file, " processing const load gid=%d[BLK]\n",
1768 group_id);
1769 else
1770 fprintf (dump_file, " processing const load gid=%d[%d..%d)\n",
1771 group_id, (int)offset, (int)(offset+width));
1772 }
1773
1774 while (i_ptr)
1775 {
1776 bool remove = false;
1777 store_info_t store_info = i_ptr->store_rec;
1778
1779 /* Skip the clobbers. */
1780 while (!store_info->is_set)
1781 store_info = store_info->next;
1782
1783 /* There are three cases here. */
1784 if (store_info->group_id < 0)
1785 /* We have a cselib store followed by a read from a
1786 const base. */
1787 remove
1788 = canon_true_dependence (store_info->mem,
1789 GET_MODE (store_info->mem),
1790 store_info->mem_addr,
1791 mem, rtx_varies_p);
1792
1793 else if (group_id == store_info->group_id)
1794 {
1795 /* This is a block mode load. We may get lucky and
1796 canon_true_dependence may save the day. */
1797 if (width == -1)
1798 remove
1799 = canon_true_dependence (store_info->mem,
1800 GET_MODE (store_info->mem),
1801 store_info->mem_addr,
1802 mem, rtx_varies_p);
1803
1804 /* If this read is just reading back something that we just
1805 stored, rewrite the read. */
1806 else
1807 {
1808 if (store_info->rhs
1809 && (offset >= store_info->begin)
1810 && (offset + width <= store_info->end))
1811 {
1812 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1813
1814 if ((store_info->positions_needed & mask) == mask
1815 && replace_read (store_info, i_ptr,
1816 read_info, insn_info, loc))
1817 return 0;
1818 }
1819 /* The bases are the same, just see if the offsets
1820 overlap. */
1821 if ((offset < store_info->end)
1822 && (offset + width > store_info->begin))
1823 remove = true;
1824 }
1825 }
1826
1827 /* else
1828 The else case that is missing here is that the
1829 bases are constant but different. There is nothing
1830 to do here because there is no overlap. */
1831
1832 if (remove)
1833 {
1834 if (dump_file)
1835 dump_insn_info ("removing from active", i_ptr);
1836
1837 if (last)
1838 last->next_local_store = i_ptr->next_local_store;
1839 else
1840 active_local_stores = i_ptr->next_local_store;
1841 }
1842 else
1843 last = i_ptr;
1844 i_ptr = i_ptr->next_local_store;
1845 }
1846 }
1847 else
1848 {
1849 insn_info_t i_ptr = active_local_stores;
1850 insn_info_t last = NULL;
1851 if (dump_file)
1852 {
1853 fprintf (dump_file, " processing cselib load mem:");
1854 print_inline_rtx (dump_file, mem, 0);
1855 fprintf (dump_file, "\n");
1856 }
1857
1858 while (i_ptr)
1859 {
1860 bool remove = false;
1861 store_info_t store_info = i_ptr->store_rec;
1862
1863 if (dump_file)
1864 fprintf (dump_file, " processing cselib load against insn %d\n",
1865 INSN_UID (i_ptr->insn));
1866
1867 /* Skip the clobbers. */
1868 while (!store_info->is_set)
1869 store_info = store_info->next;
1870
1871 /* If this read is just reading back something that we just
1872 stored, rewrite the read. */
1873 if (store_info->rhs
1874 && store_info->group_id == -1
1875 && store_info->cse_base == base
1876 && (offset >= store_info->begin)
1877 && (offset + width <= store_info->end))
1878 {
1879 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1880
1881 if ((store_info->positions_needed & mask) == mask
1882 && replace_read (store_info, i_ptr,
1883 read_info, insn_info, loc))
1884 return 0;
1885 }
1886
1887 if (!store_info->alias_set)
1888 remove = canon_true_dependence (store_info->mem,
1889 GET_MODE (store_info->mem),
1890 store_info->mem_addr,
1891 mem, rtx_varies_p);
1892
1893 if (remove)
1894 {
1895 if (dump_file)
1896 dump_insn_info ("removing from active", i_ptr);
1897
1898 if (last)
1899 last->next_local_store = i_ptr->next_local_store;
1900 else
1901 active_local_stores = i_ptr->next_local_store;
1902 }
1903 else
1904 last = i_ptr;
1905 i_ptr = i_ptr->next_local_store;
1906 }
1907 }
1908 return 0;
1909 }
1910
1911 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1912 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1913 true for any part of *LOC. */
1914
1915 static void
1916 check_mem_read_use (rtx *loc, void *data)
1917 {
1918 for_each_rtx (loc, check_mem_read_rtx, data);
1919 }
1920
1921 /* Apply record_store to all candidate stores in INSN. Mark INSN
1922 if some part of it is not a candidate store and assigns to a
1923 non-register target. */
1924
1925 static void
1926 scan_insn (bb_info_t bb_info, rtx insn)
1927 {
1928 rtx body;
1929 insn_info_t insn_info = pool_alloc (insn_info_pool);
1930 int mems_found = 0;
1931 memset (insn_info, 0, sizeof (struct insn_info));
1932
1933 if (dump_file)
1934 fprintf (dump_file, "\n**scanning insn=%d\n",
1935 INSN_UID (insn));
1936
1937 insn_info->prev_insn = bb_info->last_insn;
1938 insn_info->insn = insn;
1939 bb_info->last_insn = insn_info;
1940
1941
1942 /* Cselib clears the table for this case, so we have to essentially
1943 do the same. */
1944 if (NONJUMP_INSN_P (insn)
1945 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
1946 && MEM_VOLATILE_P (PATTERN (insn)))
1947 {
1948 add_wild_read (bb_info);
1949 insn_info->cannot_delete = true;
1950 return;
1951 }
1952
1953 /* Look at all of the uses in the insn. */
1954 note_uses (&PATTERN (insn), check_mem_read_use, bb_info);
1955
1956 if (CALL_P (insn))
1957 {
1958 insn_info->cannot_delete = true;
1959 /* Const functions cannot do anything bad i.e. read memory,
1960 however, they can read their parameters which may have been
1961 pushed onto the stack. */
1962 if (CONST_OR_PURE_CALL_P (insn) && !pure_call_p (insn))
1963 {
1964 insn_info_t i_ptr = active_local_stores;
1965 insn_info_t last = NULL;
1966
1967 if (dump_file)
1968 fprintf (dump_file, "const call %d\n", INSN_UID (insn));
1969
1970 while (i_ptr)
1971 {
1972 store_info_t store_info = i_ptr->store_rec;
1973
1974 /* Skip the clobbers. */
1975 while (!store_info->is_set)
1976 store_info = store_info->next;
1977
1978 /* Remove the frame related stores. */
1979 if (store_info->group_id >= 0
1980 && VEC_index (group_info_t, rtx_group_vec, store_info->group_id)->frame_related)
1981 {
1982 if (dump_file)
1983 dump_insn_info ("removing from active", i_ptr);
1984
1985 if (last)
1986 last->next_local_store = i_ptr->next_local_store;
1987 else
1988 active_local_stores = i_ptr->next_local_store;
1989 }
1990 else
1991 last = i_ptr;
1992 i_ptr = i_ptr->next_local_store;
1993 }
1994
1995 insn_info->stack_read = true;
1996
1997 return;
1998 }
1999
2000 /* Every other call, including pure functions may read memory. */
2001 add_wild_read (bb_info);
2002 return;
2003 }
2004
2005 /* Assuming that there are sets in these insns, we cannot delete
2006 them. */
2007 if ((GET_CODE (PATTERN (insn)) == CLOBBER)
2008 || volatile_insn_p (PATTERN (insn))
2009 || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
2010 || (RTX_FRAME_RELATED_P (insn))
2011 || find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
2012 insn_info->cannot_delete = true;
2013
2014 body = PATTERN (insn);
2015 if (GET_CODE (body) == PARALLEL)
2016 {
2017 int i;
2018 for (i = 0; i < XVECLEN (body, 0); i++)
2019 mems_found += record_store (XVECEXP (body, 0, i), bb_info);
2020 }
2021 else
2022 mems_found += record_store (body, bb_info);
2023
2024 if (dump_file)
2025 fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
2026 mems_found, insn_info->cannot_delete ? "true" : "false");
2027
2028 /* If we found some sets of mems, and the insn has not been marked
2029 cannot delete, add it into the active_local_stores so that it can
2030 be locally deleted if found dead. Otherwise mark it as cannot
2031 delete. This simplifies the processing later. */
2032 if (mems_found == 1 && !insn_info->cannot_delete)
2033 {
2034 insn_info->next_local_store = active_local_stores;
2035 active_local_stores = insn_info;
2036 }
2037 else
2038 insn_info->cannot_delete = true;
2039 }
2040
2041
2042 /* Remove BASE from the set of active_local_stores. This is a
2043 callback from cselib that is used to get rid of the stores in
2044 active_local_stores. */
2045
2046 static void
2047 remove_useless_values (cselib_val *base)
2048 {
2049 insn_info_t insn_info = active_local_stores;
2050 insn_info_t last = NULL;
2051
2052 while (insn_info)
2053 {
2054 store_info_t store_info = insn_info->store_rec;
2055 bool delete = false;
2056
2057 /* If ANY of the store_infos match the cselib group that is
2058 being deleted, then the insn can not be deleted. */
2059 while (store_info)
2060 {
2061 if ((store_info->group_id == -1)
2062 && (store_info->cse_base == base))
2063 {
2064 delete = true;
2065 break;
2066 }
2067 store_info = store_info->next;
2068 }
2069
2070 if (delete)
2071 {
2072 if (last)
2073 last->next_local_store = insn_info->next_local_store;
2074 else
2075 active_local_stores = insn_info->next_local_store;
2076 free_store_info (insn_info);
2077 }
2078 else
2079 last = insn_info;
2080
2081 insn_info = insn_info->next_local_store;
2082 }
2083 }
2084
2085
2086 /* Do all of step 1. */
2087
2088 static void
2089 dse_step1 (void)
2090 {
2091 basic_block bb;
2092
2093 cselib_init (false);
2094 all_blocks = BITMAP_ALLOC (NULL);
2095 bitmap_set_bit (all_blocks, ENTRY_BLOCK);
2096 bitmap_set_bit (all_blocks, EXIT_BLOCK);
2097
2098 FOR_ALL_BB (bb)
2099 {
2100 insn_info_t ptr;
2101 bb_info_t bb_info = pool_alloc (bb_info_pool);
2102
2103 memset (bb_info, 0, sizeof (struct bb_info));
2104 bitmap_set_bit (all_blocks, bb->index);
2105
2106 bb_table[bb->index] = bb_info;
2107 cselib_discard_hook = remove_useless_values;
2108
2109 if (bb->index >= NUM_FIXED_BLOCKS)
2110 {
2111 rtx insn;
2112
2113 cse_store_info_pool
2114 = create_alloc_pool ("cse_store_info_pool",
2115 sizeof (struct store_info), 100);
2116 active_local_stores = NULL;
2117 cselib_clear_table ();
2118
2119 /* Scan the insns. */
2120 FOR_BB_INSNS (bb, insn)
2121 {
2122 if (INSN_P (insn))
2123 scan_insn (bb_info, insn);
2124 cselib_process_insn (insn);
2125 }
2126
2127 /* This is something of a hack, because the global algorithm
2128 is supposed to take care of the case where stores go dead
2129 at the end of the function. However, the global
2130 algorithm must take a more conservative view of block
2131 mode reads than the local alg does. So to get the case
2132 where you have a store to the frame followed by a non
2133 overlapping block more read, we look at the active local
2134 stores at the end of the function and delete all of the
2135 frame and spill based ones. */
2136 if (stores_off_frame_dead_at_return
2137 && (EDGE_COUNT (bb->succs) == 0
2138 || (single_succ_p (bb)
2139 && single_succ (bb) == EXIT_BLOCK_PTR
2140 && ! current_function_calls_eh_return)))
2141 {
2142 insn_info_t i_ptr = active_local_stores;
2143 while (i_ptr)
2144 {
2145 store_info_t store_info = i_ptr->store_rec;
2146
2147 /* Skip the clobbers. */
2148 while (!store_info->is_set)
2149 store_info = store_info->next;
2150 if (store_info->alias_set)
2151 delete_dead_store_insn (i_ptr);
2152 else
2153 if (store_info->group_id >= 0)
2154 {
2155 group_info_t group
2156 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2157 if (group->frame_related)
2158 delete_dead_store_insn (i_ptr);
2159 }
2160
2161 i_ptr = i_ptr->next_local_store;
2162 }
2163 }
2164
2165 /* Get rid of the loads that were discovered in
2166 replace_read. Cselib is finished with this block. */
2167 while (deferred_change_list)
2168 {
2169 deferred_change_t next = deferred_change_list->next;
2170
2171 /* There is no reason to validate this change. That was
2172 done earlier. */
2173 *deferred_change_list->loc = deferred_change_list->reg;
2174 pool_free (deferred_change_pool, deferred_change_list);
2175 deferred_change_list = next;
2176 }
2177
2178 /* Get rid of all of the cselib based store_infos in this
2179 block and mark the containing insns as not being
2180 deletable. */
2181 ptr = bb_info->last_insn;
2182 while (ptr)
2183 {
2184 if (ptr->contains_cselib_groups)
2185 free_store_info (ptr);
2186 ptr = ptr->prev_insn;
2187 }
2188
2189 free_alloc_pool (cse_store_info_pool);
2190 }
2191 }
2192
2193 cselib_finish ();
2194 htab_empty (rtx_group_table);
2195 }
2196
2197 \f
2198 /*----------------------------------------------------------------------------
2199 Second step.
2200
2201 Assign each byte position in the stores that we are going to
2202 analyze globally to a position in the bitmaps. Returns true if
2203 there are any bit positions assigned.
2204 ----------------------------------------------------------------------------*/
2205
2206 static void
2207 dse_step2_init (void)
2208 {
2209 unsigned int i;
2210 group_info_t group;
2211
2212 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2213 {
2214 /* For all non stack related bases, we only consider a store to
2215 be deletable if there are two or more stores for that
2216 position. This is because it takes one store to make the
2217 other store redundant. However, for the stores that are
2218 stack related, we consider them if there is only one store
2219 for the position. We do this because the stack related
2220 stores can be deleted if their is no read between them and
2221 the end of the function.
2222
2223 To make this work in the current framework, we take the stack
2224 related bases add all of the bits from store1 into store2.
2225 This has the effect of making the eligible even if there is
2226 only one store. */
2227
2228 if (stores_off_frame_dead_at_return && group->frame_related)
2229 {
2230 bitmap_ior_into (group->store2_n, group->store1_n);
2231 bitmap_ior_into (group->store2_p, group->store1_p);
2232 if (dump_file)
2233 fprintf (dump_file, "group %d is frame related ", i);
2234 }
2235
2236 group->offset_map_size_n++;
2237 group->offset_map_n = XNEWVEC (int, group->offset_map_size_n);
2238 group->offset_map_size_p++;
2239 group->offset_map_p = XNEWVEC (int, group->offset_map_size_p);
2240 group->process_globally = false;
2241 if (dump_file)
2242 {
2243 fprintf (dump_file, "group %d(%d+%d): ", i,
2244 (int)bitmap_count_bits (group->store2_n),
2245 (int)bitmap_count_bits (group->store2_p));
2246 bitmap_print (dump_file, group->store2_n, "n ", " ");
2247 bitmap_print (dump_file, group->store2_p, "p ", "\n");
2248 }
2249 }
2250 }
2251
2252
2253 /* Init the offset tables for the normal case. */
2254
2255 static bool
2256 dse_step2_nospill (void)
2257 {
2258 unsigned int i;
2259 group_info_t group;
2260 /* Position 0 is unused because 0 is used in the maps to mean
2261 unused. */
2262 current_position = 1;
2263
2264 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2265 {
2266 bitmap_iterator bi;
2267 unsigned int j;
2268
2269 if (group == clear_alias_group)
2270 continue;
2271
2272 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2273 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2274 bitmap_clear (group->group_kill);
2275
2276 EXECUTE_IF_SET_IN_BITMAP (group->store2_n, 0, j, bi)
2277 {
2278 bitmap_set_bit (group->group_kill, current_position);
2279 group->offset_map_n[j] = current_position++;
2280 group->process_globally = true;
2281 }
2282 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2283 {
2284 bitmap_set_bit (group->group_kill, current_position);
2285 group->offset_map_p[j] = current_position++;
2286 group->process_globally = true;
2287 }
2288 }
2289 return current_position != 1;
2290 }
2291
2292
2293 /* Init the offset tables for the spill case. */
2294
2295 static bool
2296 dse_step2_spill (void)
2297 {
2298 unsigned int j;
2299 group_info_t group = clear_alias_group;
2300 bitmap_iterator bi;
2301
2302 /* Position 0 is unused because 0 is used in the maps to mean
2303 unused. */
2304 current_position = 1;
2305
2306 if (dump_file)
2307 {
2308 bitmap_print (dump_file, clear_alias_sets,
2309 "clear alias sets ", "\n");
2310 bitmap_print (dump_file, disqualified_clear_alias_sets,
2311 "disqualified clear alias sets ", "\n");
2312 }
2313
2314 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2315 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2316 bitmap_clear (group->group_kill);
2317
2318 /* Remove the disqualified positions from the store2_p set. */
2319 bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
2320
2321 /* We do not need to process the store2_n set because
2322 alias_sets are always positive. */
2323 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2324 {
2325 bitmap_set_bit (group->group_kill, current_position);
2326 group->offset_map_p[j] = current_position++;
2327 group->process_globally = true;
2328 }
2329
2330 return current_position != 1;
2331 }
2332
2333
2334 \f
2335 /*----------------------------------------------------------------------------
2336 Third step.
2337
2338 Build the bit vectors for the transfer functions.
2339 ----------------------------------------------------------------------------*/
2340
2341
2342 /* Note that this is NOT a general purpose function. Any mem that has
2343 an alias set registered here expected to be COMPLETELY unaliased:
2344 i.e it's addresses are not and need not be examined.
2345
2346 It is known that all references to this address will have this
2347 alias set and there are NO other references to this address in the
2348 function.
2349
2350 Currently the only place that is known to be clean enough to use
2351 this interface is the code that assigns the spill locations.
2352
2353 All of the mems that have alias_sets registered are subjected to a
2354 very powerful form of dse where function calls, volatile reads and
2355 writes, and reads from random location are not taken into account.
2356
2357 It is also assumed that these locations go dead when the function
2358 returns. This assumption could be relaxed if there were found to
2359 be places that this assumption was not correct.
2360
2361 The MODE is passed in and saved. The mode of each load or store to
2362 a mem with ALIAS_SET is checked against MEM. If the size of that
2363 load or store is different from MODE, processing is halted on this
2364 alias set. For the vast majority of aliases sets, all of the loads
2365 and stores will use the same mode. But vectors are treated
2366 differently: the alias set is established for the entire vector,
2367 but reload will insert loads and stores for individual elements and
2368 we do not necessarily have the information to track those separate
2369 elements. So when we see a mode mismatch, we just bail. */
2370
2371
2372 void
2373 dse_record_singleton_alias_set (alias_set_type alias_set,
2374 enum machine_mode mode)
2375 {
2376 struct clear_alias_mode_holder tmp_holder;
2377 struct clear_alias_mode_holder *entry;
2378 void **slot;
2379
2380 /* If we are not going to run dse, we need to return now or there
2381 will be problems with allocating the bitmaps. */
2382 if ((!gate_dse()) || !alias_set)
2383 return;
2384
2385 if (!clear_alias_sets)
2386 {
2387 clear_alias_sets = BITMAP_ALLOC (NULL);
2388 disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
2389 clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
2390 clear_alias_mode_eq, NULL);
2391 clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
2392 sizeof (struct clear_alias_mode_holder), 100);
2393 }
2394
2395 bitmap_set_bit (clear_alias_sets, alias_set);
2396
2397 tmp_holder.alias_set = alias_set;
2398
2399 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, INSERT);
2400 gcc_assert (*slot == NULL);
2401
2402 *slot = entry = pool_alloc (clear_alias_mode_pool);
2403 entry->alias_set = alias_set;
2404 entry->mode = mode;
2405 }
2406
2407
2408 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2409
2410 void
2411 dse_invalidate_singleton_alias_set (alias_set_type alias_set)
2412 {
2413 if ((!gate_dse()) || !alias_set)
2414 return;
2415
2416 bitmap_clear_bit (clear_alias_sets, alias_set);
2417 }
2418
2419
2420 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2421 there, return 0. */
2422
2423 static int
2424 get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
2425 {
2426 if (offset < 0)
2427 {
2428 HOST_WIDE_INT offset_p = -offset;
2429 if (offset_p >= group_info->offset_map_size_n)
2430 return 0;
2431 return group_info->offset_map_n[offset_p];
2432 }
2433 else
2434 {
2435 if (offset >= group_info->offset_map_size_p)
2436 return 0;
2437 return group_info->offset_map_p[offset];
2438 }
2439 }
2440
2441
2442 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2443 may be NULL. */
2444
2445 static void
2446 scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
2447 {
2448 while (store_info)
2449 {
2450 HOST_WIDE_INT i;
2451 group_info_t group_info
2452 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2453 if (group_info->process_globally)
2454 for (i = store_info->begin; i < store_info->end; i++)
2455 {
2456 int index = get_bitmap_index (group_info, i);
2457 if (index != 0)
2458 {
2459 bitmap_set_bit (gen, index);
2460 if (kill)
2461 bitmap_clear_bit (kill, index);
2462 }
2463 }
2464 store_info = store_info->next;
2465 }
2466 }
2467
2468
2469 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2470 may be NULL. */
2471
2472 static void
2473 scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
2474 {
2475 while (store_info)
2476 {
2477 if (store_info->alias_set)
2478 {
2479 int index = get_bitmap_index (clear_alias_group,
2480 store_info->alias_set);
2481 if (index != 0)
2482 {
2483 bitmap_set_bit (gen, index);
2484 if (kill)
2485 bitmap_clear_bit (kill, index);
2486 }
2487 }
2488 store_info = store_info->next;
2489 }
2490 }
2491
2492
2493 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2494 may be NULL. */
2495
2496 static void
2497 scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
2498 {
2499 read_info_t read_info = insn_info->read_rec;
2500 int i;
2501 group_info_t group;
2502
2503 /* For const function calls kill the stack related stores. */
2504 if (insn_info->stack_read)
2505 {
2506 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2507 if (group->process_globally && group->frame_related)
2508 {
2509 if (kill)
2510 bitmap_ior_into (kill, group->group_kill);
2511 bitmap_and_compl_into (gen, group->group_kill);
2512 }
2513 }
2514
2515 while (read_info)
2516 {
2517 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2518 {
2519 if (group->process_globally)
2520 {
2521 if (i == read_info->group_id)
2522 {
2523 if (read_info->begin > read_info->end)
2524 {
2525 /* Begin > end for block mode reads. */
2526 if (kill)
2527 bitmap_ior_into (kill, group->group_kill);
2528 bitmap_and_compl_into (gen, group->group_kill);
2529 }
2530 else
2531 {
2532 /* The groups are the same, just process the
2533 offsets. */
2534 HOST_WIDE_INT j;
2535 for (j = read_info->begin; j < read_info->end; j++)
2536 {
2537 int index = get_bitmap_index (group, j);
2538 if (index != 0)
2539 {
2540 if (kill)
2541 bitmap_set_bit (kill, index);
2542 bitmap_clear_bit (gen, index);
2543 }
2544 }
2545 }
2546 }
2547 else
2548 {
2549 /* The groups are different, if the alias sets
2550 conflict, clear the entire group. We only need
2551 to apply this test if the read_info is a cselib
2552 read. Anything with a constant base cannot alias
2553 something else with a different constant
2554 base. */
2555 if ((read_info->group_id < 0)
2556 && canon_true_dependence (group->base_mem,
2557 QImode,
2558 group->canon_base_mem,
2559 read_info->mem, rtx_varies_p))
2560 {
2561 if (kill)
2562 bitmap_ior_into (kill, group->group_kill);
2563 bitmap_and_compl_into (gen, group->group_kill);
2564 }
2565 }
2566 }
2567 }
2568
2569 read_info = read_info->next;
2570 }
2571 }
2572
2573 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2574 may be NULL. */
2575
2576 static void
2577 scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
2578 {
2579 while (read_info)
2580 {
2581 if (read_info->alias_set)
2582 {
2583 int index = get_bitmap_index (clear_alias_group,
2584 read_info->alias_set);
2585 if (index != 0)
2586 {
2587 if (kill)
2588 bitmap_set_bit (kill, index);
2589 bitmap_clear_bit (gen, index);
2590 }
2591 }
2592
2593 read_info = read_info->next;
2594 }
2595 }
2596
2597
2598 /* Return the insn in BB_INFO before the first wild read or if there
2599 are no wild reads in the block, return the last insn. */
2600
2601 static insn_info_t
2602 find_insn_before_first_wild_read (bb_info_t bb_info)
2603 {
2604 insn_info_t insn_info = bb_info->last_insn;
2605 insn_info_t last_wild_read = NULL;
2606
2607 while (insn_info)
2608 {
2609 if (insn_info->wild_read)
2610 {
2611 last_wild_read = insn_info->prev_insn;
2612 /* Block starts with wild read. */
2613 if (!last_wild_read)
2614 return NULL;
2615 }
2616
2617 insn_info = insn_info->prev_insn;
2618 }
2619
2620 if (last_wild_read)
2621 return last_wild_read;
2622 else
2623 return bb_info->last_insn;
2624 }
2625
2626
2627 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2628 the block in order to build the gen and kill sets for the block.
2629 We start at ptr which may be the last insn in the block or may be
2630 the first insn with a wild read. In the latter case we are able to
2631 skip the rest of the block because it just does not matter:
2632 anything that happens is hidden by the wild read. */
2633
2634 static void
2635 dse_step3_scan (bool for_spills, basic_block bb)
2636 {
2637 bb_info_t bb_info = bb_table[bb->index];
2638 insn_info_t insn_info;
2639
2640 if (for_spills)
2641 /* There are no wild reads in the spill case. */
2642 insn_info = bb_info->last_insn;
2643 else
2644 insn_info = find_insn_before_first_wild_read (bb_info);
2645
2646 /* In the spill case or in the no_spill case if there is no wild
2647 read in the block, we will need a kill set. */
2648 if (insn_info == bb_info->last_insn)
2649 {
2650 if (bb_info->kill)
2651 bitmap_clear (bb_info->kill);
2652 else
2653 bb_info->kill = BITMAP_ALLOC (NULL);
2654 }
2655 else
2656 if (bb_info->kill)
2657 BITMAP_FREE (bb_info->kill);
2658
2659 while (insn_info)
2660 {
2661 /* There may have been code deleted by the dce pass run before
2662 this phase. */
2663 if (insn_info->insn && INSN_P (insn_info->insn))
2664 {
2665 /* Process the read(s) last. */
2666 if (for_spills)
2667 {
2668 scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2669 scan_reads_spill (insn_info->read_rec, bb_info->gen, bb_info->kill);
2670 }
2671 else
2672 {
2673 scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2674 scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
2675 }
2676 }
2677
2678 insn_info = insn_info->prev_insn;
2679 }
2680 }
2681
2682
2683 /* Set the gen set of the exit block, and also any block with no
2684 successors that does not have a wild read. */
2685
2686 static void
2687 dse_step3_exit_block_scan (bb_info_t bb_info)
2688 {
2689 /* The gen set is all 0's for the exit block except for the
2690 frame_pointer_group. */
2691
2692 if (stores_off_frame_dead_at_return)
2693 {
2694 unsigned int i;
2695 group_info_t group;
2696
2697 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2698 {
2699 if (group->process_globally && group->frame_related)
2700 bitmap_ior_into (bb_info->gen, group->group_kill);
2701 }
2702 }
2703 }
2704
2705
2706 /* Find all of the blocks that are not backwards reachable from the
2707 exit block or any block with no successors (BB). These are the
2708 infinite loops or infinite self loops. These blocks will still
2709 have their bits set in UNREACHABLE_BLOCKS. */
2710
2711 static void
2712 mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
2713 {
2714 edge e;
2715 edge_iterator ei;
2716
2717 if (TEST_BIT (unreachable_blocks, bb->index))
2718 {
2719 RESET_BIT (unreachable_blocks, bb->index);
2720 FOR_EACH_EDGE (e, ei, bb->preds)
2721 {
2722 mark_reachable_blocks (unreachable_blocks, e->src);
2723 }
2724 }
2725 }
2726
2727 /* Build the transfer functions for the function. */
2728
2729 static void
2730 dse_step3 (bool for_spills)
2731 {
2732 basic_block bb;
2733 sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
2734 sbitmap_iterator sbi;
2735 bitmap all_ones = NULL;
2736 unsigned int i;
2737
2738 sbitmap_ones (unreachable_blocks);
2739
2740 FOR_ALL_BB (bb)
2741 {
2742 bb_info_t bb_info = bb_table[bb->index];
2743 if (bb_info->gen)
2744 bitmap_clear (bb_info->gen);
2745 else
2746 bb_info->gen = BITMAP_ALLOC (NULL);
2747
2748 if (bb->index == ENTRY_BLOCK)
2749 ;
2750 else if (bb->index == EXIT_BLOCK)
2751 dse_step3_exit_block_scan (bb_info);
2752 else
2753 dse_step3_scan (for_spills, bb);
2754 if (EDGE_COUNT (bb->succs) == 0)
2755 mark_reachable_blocks (unreachable_blocks, bb);
2756
2757 /* If this is the second time dataflow is run, delete the old
2758 sets. */
2759 if (bb_info->in)
2760 BITMAP_FREE (bb_info->in);
2761 if (bb_info->out)
2762 BITMAP_FREE (bb_info->out);
2763 }
2764
2765 /* For any block in an infinite loop, we must initialize the out set
2766 to all ones. This could be expensive, but almost never occurs in
2767 practice. However, it is common in regression tests. */
2768 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks, 0, i, sbi)
2769 {
2770 if (bitmap_bit_p (all_blocks, i))
2771 {
2772 bb_info_t bb_info = bb_table[i];
2773 if (!all_ones)
2774 {
2775 unsigned int j;
2776 group_info_t group;
2777
2778 all_ones = BITMAP_ALLOC (NULL);
2779 for (j = 0; VEC_iterate (group_info_t, rtx_group_vec, j, group); j++)
2780 bitmap_ior_into (all_ones, group->group_kill);
2781 }
2782 if (!bb_info->out)
2783 {
2784 bb_info->out = BITMAP_ALLOC (NULL);
2785 bitmap_copy (bb_info->out, all_ones);
2786 }
2787 }
2788 }
2789
2790 if (all_ones)
2791 BITMAP_FREE (all_ones);
2792 sbitmap_free (unreachable_blocks);
2793 }
2794
2795
2796 \f
2797 /*----------------------------------------------------------------------------
2798 Fourth step.
2799
2800 Solve the bitvector equations.
2801 ----------------------------------------------------------------------------*/
2802
2803
2804 /* Confluence function for blocks with no successors. Create an out
2805 set from the gen set of the exit block. This block logically has
2806 the exit block as a successor. */
2807
2808
2809
2810 static void
2811 dse_confluence_0 (basic_block bb)
2812 {
2813 bb_info_t bb_info = bb_table[bb->index];
2814
2815 if (bb->index == EXIT_BLOCK)
2816 return;
2817
2818 if (!bb_info->out)
2819 {
2820 bb_info->out = BITMAP_ALLOC (NULL);
2821 bitmap_copy (bb_info->out, bb_table[EXIT_BLOCK]->gen);
2822 }
2823 }
2824
2825 /* Propagate the information from the in set of the dest of E to the
2826 out set of the src of E. If the various in or out sets are not
2827 there, that means they are all ones. */
2828
2829 static void
2830 dse_confluence_n (edge e)
2831 {
2832 bb_info_t src_info = bb_table[e->src->index];
2833 bb_info_t dest_info = bb_table[e->dest->index];
2834
2835 if (dest_info->in)
2836 {
2837 if (src_info->out)
2838 bitmap_and_into (src_info->out, dest_info->in);
2839 else
2840 {
2841 src_info->out = BITMAP_ALLOC (NULL);
2842 bitmap_copy (src_info->out, dest_info->in);
2843 }
2844 }
2845 }
2846
2847
2848 /* Propagate the info from the out to the in set of BB_INDEX's basic
2849 block. There are three cases:
2850
2851 1) The block has no kill set. In this case the kill set is all
2852 ones. It does not matter what the out set of the block is, none of
2853 the info can reach the top. The only thing that reaches the top is
2854 the gen set and we just copy the set.
2855
2856 2) There is a kill set but no out set and bb has successors. In
2857 this case we just return. Eventually an out set will be created and
2858 it is better to wait than to create a set of ones.
2859
2860 3) There is both a kill and out set. We apply the obvious transfer
2861 function.
2862 */
2863
2864 static bool
2865 dse_transfer_function (int bb_index)
2866 {
2867 bb_info_t bb_info = bb_table[bb_index];
2868
2869 if (bb_info->kill)
2870 {
2871 if (bb_info->out)
2872 {
2873 /* Case 3 above. */
2874 if (bb_info->in)
2875 return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2876 bb_info->out, bb_info->kill);
2877 else
2878 {
2879 bb_info->in = BITMAP_ALLOC (NULL);
2880 bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2881 bb_info->out, bb_info->kill);
2882 return true;
2883 }
2884 }
2885 else
2886 /* Case 2 above. */
2887 return false;
2888 }
2889 else
2890 {
2891 /* Case 1 above. If there is already an in set, nothing
2892 happens. */
2893 if (bb_info->in)
2894 return false;
2895 else
2896 {
2897 bb_info->in = BITMAP_ALLOC (NULL);
2898 bitmap_copy (bb_info->in, bb_info->gen);
2899 return true;
2900 }
2901 }
2902 }
2903
2904 /* Solve the dataflow equations. */
2905
2906 static void
2907 dse_step4 (void)
2908 {
2909 df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
2910 dse_confluence_n, dse_transfer_function,
2911 all_blocks, df_get_postorder (DF_BACKWARD),
2912 df_get_n_blocks (DF_BACKWARD));
2913 if (dump_file)
2914 {
2915 basic_block bb;
2916
2917 fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
2918 FOR_ALL_BB (bb)
2919 {
2920 bb_info_t bb_info = bb_table[bb->index];
2921
2922 df_print_bb_index (bb, dump_file);
2923 if (bb_info->in)
2924 bitmap_print (dump_file, bb_info->in, " in: ", "\n");
2925 else
2926 fprintf (dump_file, " in: *MISSING*\n");
2927 if (bb_info->gen)
2928 bitmap_print (dump_file, bb_info->gen, " gen: ", "\n");
2929 else
2930 fprintf (dump_file, " gen: *MISSING*\n");
2931 if (bb_info->kill)
2932 bitmap_print (dump_file, bb_info->kill, " kill: ", "\n");
2933 else
2934 fprintf (dump_file, " kill: *MISSING*\n");
2935 if (bb_info->out)
2936 bitmap_print (dump_file, bb_info->out, " out: ", "\n");
2937 else
2938 fprintf (dump_file, " out: *MISSING*\n\n");
2939 }
2940 }
2941 }
2942
2943
2944 \f
2945 /*----------------------------------------------------------------------------
2946 Fifth step.
2947
2948 Delete the stores that can only be deleted using the global information.
2949 ----------------------------------------------------------------------------*/
2950
2951
2952 static void
2953 dse_step5_nospill (void)
2954 {
2955 basic_block bb;
2956 FOR_EACH_BB (bb)
2957 {
2958 bb_info_t bb_info = bb_table[bb->index];
2959 insn_info_t insn_info = bb_info->last_insn;
2960 bitmap v = bb_info->out;
2961
2962 while (insn_info)
2963 {
2964 bool deleted = false;
2965 if (dump_file && insn_info->insn)
2966 {
2967 fprintf (dump_file, "starting to process insn %d\n",
2968 INSN_UID (insn_info->insn));
2969 bitmap_print (dump_file, v, " v: ", "\n");
2970 }
2971
2972 /* There may have been code deleted by the dce pass run before
2973 this phase. */
2974 if (insn_info->insn
2975 && INSN_P (insn_info->insn)
2976 && (!insn_info->cannot_delete)
2977 && (!bitmap_empty_p (v)))
2978 {
2979 store_info_t store_info = insn_info->store_rec;
2980
2981 /* Try to delete the current insn. */
2982 deleted = true;
2983
2984 /* Skip the clobbers. */
2985 while (!store_info->is_set)
2986 store_info = store_info->next;
2987
2988 if (store_info->alias_set)
2989 deleted = false;
2990 else
2991 {
2992 HOST_WIDE_INT i;
2993 group_info_t group_info
2994 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2995
2996 for (i = store_info->begin; i < store_info->end; i++)
2997 {
2998 int index = get_bitmap_index (group_info, i);
2999
3000 if (dump_file)
3001 fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
3002 if (index == 0 || !bitmap_bit_p (v, index))
3003 {
3004 if (dump_file)
3005 fprintf (dump_file, "failing at i = %d\n", (int)i);
3006 deleted = false;
3007 break;
3008 }
3009 }
3010 }
3011 if (deleted)
3012 {
3013 if (dbg_cnt (dse))
3014 {
3015 check_for_inc_dec (insn_info->insn);
3016 delete_insn (insn_info->insn);
3017 insn_info->insn = NULL;
3018 globally_deleted++;
3019 }
3020 }
3021 }
3022 /* We do want to process the local info if the insn was
3023 deleted. For instance, if the insn did a wild read, we
3024 no longer need to trash the info. */
3025 if (insn_info->insn
3026 && INSN_P (insn_info->insn)
3027 && (!deleted))
3028 {
3029 scan_stores_nospill (insn_info->store_rec, v, NULL);
3030 if (insn_info->wild_read)
3031 {
3032 if (dump_file)
3033 fprintf (dump_file, "wild read\n");
3034 bitmap_clear (v);
3035 }
3036 else if (insn_info->read_rec)
3037 {
3038 if (dump_file)
3039 fprintf (dump_file, "regular read\n");
3040 scan_reads_nospill (insn_info, v, NULL);
3041 }
3042 }
3043
3044 insn_info = insn_info->prev_insn;
3045 }
3046 }
3047 }
3048
3049
3050 static void
3051 dse_step5_spill (void)
3052 {
3053 basic_block bb;
3054 FOR_EACH_BB (bb)
3055 {
3056 bb_info_t bb_info = bb_table[bb->index];
3057 insn_info_t insn_info = bb_info->last_insn;
3058 bitmap v = bb_info->out;
3059
3060 while (insn_info)
3061 {
3062 bool deleted = false;
3063 /* There may have been code deleted by the dce pass run before
3064 this phase. */
3065 if (insn_info->insn
3066 && INSN_P (insn_info->insn)
3067 && (!insn_info->cannot_delete)
3068 && (!bitmap_empty_p (v)))
3069 {
3070 /* Try to delete the current insn. */
3071 store_info_t store_info = insn_info->store_rec;
3072 deleted = true;
3073
3074 while (store_info)
3075 {
3076 if (store_info->alias_set)
3077 {
3078 int index = get_bitmap_index (clear_alias_group,
3079 store_info->alias_set);
3080 if (index == 0 || !bitmap_bit_p (v, index))
3081 {
3082 deleted = false;
3083 break;
3084 }
3085 }
3086 else
3087 deleted = false;
3088 store_info = store_info->next;
3089 }
3090 if (deleted && dbg_cnt (dse))
3091 {
3092 if (dump_file)
3093 fprintf (dump_file, "Spill deleting insn %d\n",
3094 INSN_UID (insn_info->insn));
3095 check_for_inc_dec (insn_info->insn);
3096 delete_insn (insn_info->insn);
3097 spill_deleted++;
3098 insn_info->insn = NULL;
3099 }
3100 }
3101
3102 if (insn_info->insn
3103 && INSN_P (insn_info->insn)
3104 && (!deleted))
3105 {
3106 scan_stores_spill (insn_info->store_rec, v, NULL);
3107 scan_reads_spill (insn_info->read_rec, v, NULL);
3108 }
3109
3110 insn_info = insn_info->prev_insn;
3111 }
3112 }
3113 }
3114
3115
3116 \f
3117 /*----------------------------------------------------------------------------
3118 Sixth step.
3119
3120 Destroy everything left standing.
3121 ----------------------------------------------------------------------------*/
3122
3123 static void
3124 dse_step6 (bool global_done)
3125 {
3126 unsigned int i;
3127 group_info_t group;
3128 basic_block bb;
3129
3130 if (global_done)
3131 {
3132 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3133 {
3134 free (group->offset_map_n);
3135 free (group->offset_map_p);
3136 BITMAP_FREE (group->store1_n);
3137 BITMAP_FREE (group->store1_p);
3138 BITMAP_FREE (group->store2_n);
3139 BITMAP_FREE (group->store2_p);
3140 BITMAP_FREE (group->group_kill);
3141 }
3142
3143 FOR_ALL_BB (bb)
3144 {
3145 bb_info_t bb_info = bb_table[bb->index];
3146 BITMAP_FREE (bb_info->gen);
3147 if (bb_info->kill)
3148 BITMAP_FREE (bb_info->kill);
3149 if (bb_info->in)
3150 BITMAP_FREE (bb_info->in);
3151 if (bb_info->out)
3152 BITMAP_FREE (bb_info->out);
3153 }
3154 }
3155 else
3156 {
3157 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3158 {
3159 BITMAP_FREE (group->store1_n);
3160 BITMAP_FREE (group->store1_p);
3161 BITMAP_FREE (group->store2_n);
3162 BITMAP_FREE (group->store2_p);
3163 BITMAP_FREE (group->group_kill);
3164 }
3165 }
3166
3167 if (clear_alias_sets)
3168 {
3169 BITMAP_FREE (clear_alias_sets);
3170 BITMAP_FREE (disqualified_clear_alias_sets);
3171 free_alloc_pool (clear_alias_mode_pool);
3172 htab_delete (clear_alias_mode_table);
3173 }
3174
3175 end_alias_analysis ();
3176 free (bb_table);
3177 htab_delete (rtx_group_table);
3178 VEC_free (group_info_t, heap, rtx_group_vec);
3179 BITMAP_FREE (all_blocks);
3180 BITMAP_FREE (scratch);
3181
3182 free_alloc_pool (rtx_store_info_pool);
3183 free_alloc_pool (read_info_pool);
3184 free_alloc_pool (insn_info_pool);
3185 free_alloc_pool (bb_info_pool);
3186 free_alloc_pool (rtx_group_info_pool);
3187 free_alloc_pool (deferred_change_pool);
3188 }
3189
3190
3191
3192 /* -------------------------------------------------------------------------
3193 DSE
3194 ------------------------------------------------------------------------- */
3195
3196 /* Callback for running pass_rtl_dse. */
3197
3198 static unsigned int
3199 rest_of_handle_dse (void)
3200 {
3201 bool did_global = false;
3202
3203 df_set_flags (DF_DEFER_INSN_RESCAN);
3204
3205 dse_step0 ();
3206 dse_step1 ();
3207 dse_step2_init ();
3208 if (dse_step2_nospill ())
3209 {
3210 df_set_flags (DF_LR_RUN_DCE);
3211 df_analyze ();
3212 did_global = true;
3213 if (dump_file)
3214 fprintf (dump_file, "doing global processing\n");
3215 dse_step3 (false);
3216 dse_step4 ();
3217 dse_step5_nospill ();
3218 }
3219
3220 /* For the instance of dse that runs after reload, we make a special
3221 pass to process the spills. These are special in that they are
3222 totally transparent, i.e, there is no aliasing issues that need
3223 to be considered. This means that the wild reads that kill
3224 everything else do not apply here. */
3225 if (clear_alias_sets && dse_step2_spill ())
3226 {
3227 if (!did_global)
3228 {
3229 df_set_flags (DF_LR_RUN_DCE);
3230 df_analyze ();
3231 }
3232 did_global = true;
3233 if (dump_file)
3234 fprintf (dump_file, "doing global spill processing\n");
3235 dse_step3 (true);
3236 dse_step4 ();
3237 dse_step5_spill ();
3238 }
3239
3240 dse_step6 (did_global);
3241
3242 if (dump_file)
3243 fprintf (dump_file, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3244 locally_deleted, globally_deleted, spill_deleted);
3245 return 0;
3246 }
3247
3248 static bool
3249 gate_dse (void)
3250 {
3251 return optimize > 0 && flag_dse;
3252 }
3253
3254 struct tree_opt_pass pass_rtl_dse1 =
3255 {
3256 "dse1", /* name */
3257 gate_dse, /* gate */
3258 rest_of_handle_dse, /* execute */
3259 NULL, /* sub */
3260 NULL, /* next */
3261 0, /* static_pass_number */
3262 TV_DSE1, /* tv_id */
3263 0, /* properties_required */
3264 0, /* properties_provided */
3265 0, /* properties_destroyed */
3266 0, /* todo_flags_start */
3267 TODO_dump_func |
3268 TODO_df_finish | TODO_verify_rtl_sharing |
3269 TODO_ggc_collect, /* todo_flags_finish */
3270 'w' /* letter */
3271 };
3272
3273 struct tree_opt_pass pass_rtl_dse2 =
3274 {
3275 "dse2", /* name */
3276 gate_dse, /* gate */
3277 rest_of_handle_dse, /* execute */
3278 NULL, /* sub */
3279 NULL, /* next */
3280 0, /* static_pass_number */
3281 TV_DSE2, /* tv_id */
3282 0, /* properties_required */
3283 0, /* properties_provided */
3284 0, /* properties_destroyed */
3285 0, /* todo_flags_start */
3286 TODO_dump_func |
3287 TODO_df_finish | TODO_verify_rtl_sharing |
3288 TODO_ggc_collect, /* todo_flags_finish */
3289 'w' /* letter */
3290 };
This page took 0.189562 seconds and 6 git commands to generate.