]> gcc.gnu.org Git - gcc.git/blob - gcc/dse.c
re PR rtl-optimization/33653 (volatile memory access optimized away)
[gcc.git] / gcc / dse.c
1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #undef BASELINE
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "tm.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "df.h"
36 #include "cselib.h"
37 #include "timevar.h"
38 #include "tree-pass.h"
39 #include "alloc-pool.h"
40 #include "alias.h"
41 #include "insn-config.h"
42 #include "expr.h"
43 #include "recog.h"
44 #include "dse.h"
45 #include "optabs.h"
46 #include "dbgcnt.h"
47
48 /* This file contains three techniques for performing Dead Store
49 Elimination (dse).
50
51 * The first technique performs dse locally on any base address. It
52 is based on the cselib which is a local value numbering technique.
53 This technique is local to a basic block but deals with a fairly
54 general addresses.
55
56 * The second technique performs dse globally but is restricted to
57 base addresses that are either constant or are relative to the
58 frame_pointer.
59
60 * The third technique, (which is only done after register allocation)
61 processes the spill spill slots. This differs from the second
62 technique because it takes advantage of the fact that spilling is
63 completely free from the effects of aliasing.
64
65 Logically, dse is a backwards dataflow problem. A store can be
66 deleted if it if cannot be reached in the backward direction by any
67 use of the value being stored. However, the local technique uses a
68 forwards scan of the basic block because cselib requires that the
69 block be processed in that order.
70
71 The pass is logically broken into 7 steps:
72
73 0) Initialization.
74
75 1) The local algorithm, as well as scanning the insns for the two
76 global algorithms.
77
78 2) Analysis to see if the global algs are necessary. In the case
79 of stores base on a constant address, there must be at least two
80 stores to that address, to make it possible to delete some of the
81 stores. In the case of stores off of the frame or spill related
82 stores, only one store to an address is necessary because those
83 stores die at the end of the function.
84
85 3) Set up the global dataflow equations based on processing the
86 info parsed in the first step.
87
88 4) Solve the dataflow equations.
89
90 5) Delete the insns that the global analysis has indicated are
91 unnecessary.
92
93 6) Cleanup.
94
95 This step uses cselib and canon_rtx to build the largest expression
96 possible for each address. This pass is a forwards pass through
97 each basic block. From the point of view of the global technique,
98 the first pass could examine a block in either direction. The
99 forwards ordering is to accommodate cselib.
100
101 We a simplifying assumption: addresses fall into four broad
102 categories:
103
104 1) base has rtx_varies_p == false, offset is constant.
105 2) base has rtx_varies_p == false, offset variable.
106 3) base has rtx_varies_p == true, offset constant.
107 4) base has rtx_varies_p == true, offset variable.
108
109 The local passes are able to process all 4 kinds of addresses. The
110 global pass only handles (1).
111
112 The global problem is formulated as follows:
113
114 A store, S1, to address A, where A is not relative to the stack
115 frame, can be eliminated if all paths from S1 to the end of the
116 of the function contain another store to A before a read to A.
117
118 If the address A is relative to the stack frame, a store S2 to A
119 can be eliminated if there are no paths from S1 that reach the
120 end of the function that read A before another store to A. In
121 this case S2 can be deleted if there are paths to from S2 to the
122 end of the function that have no reads or writes to A. This
123 second case allows stores to the stack frame to be deleted that
124 would otherwise die when the function returns. This cannot be
125 done if stores_off_frame_dead_at_return is not true. See the doc
126 for that variable for when this variable is false.
127
128 The global problem is formulated as a backwards set union
129 dataflow problem where the stores are the gens and reads are the
130 kills. Set union problems are rare and require some special
131 handling given our representation of bitmaps. A straightforward
132 implementation of requires a lot of bitmaps filled with 1s.
133 These are expensive and cumbersome in our bitmap formulation so
134 care has been taken to avoid large vectors filled with 1s. See
135 the comments in bb_info and in the dataflow confluence functions
136 for details.
137
138 There are two places for further enhancements to this algorithm:
139
140 1) The original dse which was embedded in a pass called flow also
141 did local address forwarding. For example in
142
143 A <- r100
144 ... <- A
145
146 flow would replace the right hand side of the second insn with a
147 reference to r100. Most of the information is available to add this
148 to this pass. It has not done it because it is a lot of work in
149 the case that either r100 is assigned to between the first and
150 second insn and/or the second insn is a load of part of the value
151 stored by the first insn.
152
153 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
154 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
155 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
156 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
157
158 2) The cleaning up of spill code is quite profitable. It currently
159 depends on reading tea leaves and chicken entrails left by reload.
160 This pass depends on reload creating a singleton alias set for each
161 spill slot and telling the next dse pass which of these alias sets
162 are the singletons. Rather than analyze the addresses of the
163 spills, dse's spill processing just does analysis of the loads and
164 stores that use those alias sets. There are three cases where this
165 falls short:
166
167 a) Reload sometimes creates the slot for one mode of access, and
168 then inserts loads and/or stores for a smaller mode. In this
169 case, the current code just punts on the slot. The proper thing
170 to do is to back out and use one bit vector position for each
171 byte of the entity associated with the slot. This depends on
172 KNOWING that reload always generates the accesses for each of the
173 bytes in some canonical (read that easy to understand several
174 passes after reload happens) way.
175
176 b) Reload sometimes decides that spill slot it allocated was not
177 large enough for the mode and goes back and allocates more slots
178 with the same mode and alias set. The backout in this case is a
179 little more graceful than (a). In this case the slot is unmarked
180 as being a spill slot and if final address comes out to be based
181 off the frame pointer, the global algorithm handles this slot.
182
183 c) For any pass that may prespill, there is currently no
184 mechanism to tell the dse pass that the slot being used has the
185 special properties that reload uses. It may be that all that is
186 required is to have those passes make the same calls that reload
187 does, assuming that the alias sets can be manipulated in the same
188 way. */
189
190 /* There are limits to the size of constant offsets we model for the
191 global problem. There are certainly test cases, that exceed this
192 limit, however, it is unlikely that there are important programs
193 that really have constant offsets this size. */
194 #define MAX_OFFSET (64 * 1024)
195
196
197 static bitmap scratch = NULL;
198 struct insn_info;
199
200 /* This structure holds information about a candidate store. */
201 struct store_info
202 {
203
204 /* False means this is a clobber. */
205 bool is_set;
206
207 /* The id of the mem group of the base address. If rtx_varies_p is
208 true, this is -1. Otherwise, it is the index into the group
209 table. */
210 int group_id;
211
212 /* This is the cselib value. */
213 cselib_val *cse_base;
214
215 /* This canonized mem. */
216 rtx mem;
217
218 /* The result of get_addr on mem. */
219 rtx mem_addr;
220
221 /* If this is non-zero, it is the alias set of a spill location. */
222 alias_set_type alias_set;
223
224 /* The offset of the first and byte before the last byte associated
225 with the operation. */
226 int begin, end;
227
228 /* An bitmask as wide as the number of bytes in the word that
229 contains a 1 if the byte may be needed. The store is unused if
230 all of the bits are 0. */
231 long positions_needed;
232
233 /* The next store info for this insn. */
234 struct store_info *next;
235
236 /* The right hand side of the store. This is used if there is a
237 subsequent reload of the mems address somewhere later in the
238 basic block. */
239 rtx rhs;
240 };
241
242 typedef struct store_info *store_info_t;
243 static alloc_pool cse_store_info_pool;
244 static alloc_pool rtx_store_info_pool;
245
246 /* This structure holds information about a load. These are only
247 built for rtx bases. */
248 struct read_info
249 {
250 /* The id of the mem group of the base address. */
251 int group_id;
252
253 /* If this is non-zero, it is the alias set of a spill location. */
254 alias_set_type alias_set;
255
256 /* The offset of the first and byte after the last byte associated
257 with the operation. If begin == end == 0, the read did not have
258 a constant offset. */
259 int begin, end;
260
261 /* The mem being read. */
262 rtx mem;
263
264 /* The next read_info for this insn. */
265 struct read_info *next;
266 };
267 typedef struct read_info *read_info_t;
268 static alloc_pool read_info_pool;
269
270
271 /* One of these records is created for each insn. */
272
273 struct insn_info
274 {
275 /* Set true if the insn contains a store but the insn itself cannot
276 be deleted. This is set if the insn is a parallel and there is
277 more than one non dead output or if the insn is in some way
278 volatile. */
279 bool cannot_delete;
280
281 /* This field is only used by the global algorithm. It is set true
282 if the insn contains any read of mem except for a (1). This is
283 also set if the insn is a call or has a clobber mem. If the insn
284 contains a wild read, the use_rec will be null. */
285 bool wild_read;
286
287 /* This field is set for const function calls. Const functions
288 cannot read memory, but they can read the stack because that is
289 where they may get their parms. So having this set is less
290 severe than a wild read, it just means that all of the stores to
291 the stack are killed rather than all stores. */
292 bool stack_read;
293
294 /* This is true if any of the sets within the store contains a
295 cselib base. Such stores can only be deleted by the local
296 algorithm. */
297 bool contains_cselib_groups;
298
299 /* The insn. */
300 rtx insn;
301
302 /* The list of mem sets or mem clobbers that are contained in this
303 insn. If the insn is deletable, it contains only one mem set.
304 But it could also contain clobbers. Insns that contain more than
305 one mem set are not deletable, but each of those mems are here in
306 order to provide info to delete other insns. */
307 store_info_t store_rec;
308
309 /* The linked list of mem uses in this insn. Only the reads from
310 rtx bases are listed here. The reads to cselib bases are
311 completely processed during the first scan and so are never
312 created. */
313 read_info_t read_rec;
314
315 /* The prev insn in the basic block. */
316 struct insn_info * prev_insn;
317
318 /* The linked list of insns that are in consideration for removal in
319 the forwards pass thru the basic block. This pointer may be
320 trash as it is not cleared when a wild read occurs. The only
321 time it is guaranteed to be correct is when the traveral starts
322 at active_local_stores. */
323 struct insn_info * next_local_store;
324 };
325
326 typedef struct insn_info *insn_info_t;
327 static alloc_pool insn_info_pool;
328
329 /* The linked list of stores that are under consideration in this
330 basic block. */
331 static insn_info_t active_local_stores;
332
333 struct bb_info
334 {
335
336 /* Pointer to the insn info for the last insn in the block. These
337 are linked so this is how all of the insns are reached. During
338 scanning this is the current insn being scanned. */
339 insn_info_t last_insn;
340
341 /* The info for the global dataflow problem. */
342
343
344 /* This is set if the transfer function should and in the wild_read
345 bitmap before applying the kill and gen sets. That vector knocks
346 out most of the bits in the bitmap and thus speeds up the
347 operations. */
348 bool apply_wild_read;
349
350 /* The set of store positions that exist in this block before a wild read. */
351 bitmap gen;
352
353 /* The set of load positions that exist in this block above the
354 same position of a store. */
355 bitmap kill;
356
357 /* The set of stores that reach the top of the block without being
358 killed by a read.
359
360 Do not represent the in if it is all ones. Note that this is
361 what the bitvector should logically be initialized to for a set
362 intersection problem. However, like the kill set, this is too
363 expensive. So initially, the in set will only be created for the
364 exit block and any block that contains a wild read. */
365 bitmap in;
366
367 /* The set of stores that reach the bottom of the block from it's
368 successors.
369
370 Do not represent the in if it is all ones. Note that this is
371 what the bitvector should logically be initialized to for a set
372 intersection problem. However, like the kill and in set, this is
373 too expensive. So what is done is that the confluence operator
374 just initializes the vector from one of the out sets of the
375 successors of the block. */
376 bitmap out;
377 };
378
379 typedef struct bb_info *bb_info_t;
380 static alloc_pool bb_info_pool;
381
382 /* Table to hold all bb_infos. */
383 static bb_info_t *bb_table;
384
385 /* There is a group_info for each rtx base that is used to reference
386 memory. There are also not many of the rtx bases because they are
387 very limited in scope. */
388
389 struct group_info
390 {
391 /* The actual base of the address. */
392 rtx rtx_base;
393
394 /* The sequential id of the base. This allows us to have a
395 canonical ordering of these that is not based on addresses. */
396 int id;
397
398 /* A mem wrapped around the base pointer for the group in order to
399 do read dependency. */
400 rtx base_mem;
401
402 /* Canonized version of base_mem, most likely the same thing. */
403 rtx canon_base_mem;
404
405 /* These two sets of two bitmaps are used to keep track of how many
406 stores are actually referencing that position from this base. We
407 only do this for rtx bases as this will be used to assign
408 positions in the bitmaps for the global problem. Bit N is set in
409 store1 on the first store for offset N. Bit N is set in store2
410 for the second store to offset N. This is all we need since we
411 only care about offsets that have two or more stores for them.
412
413 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
414 for 0 and greater offsets.
415
416 There is one special case here, for stores into the stack frame,
417 we will or store1 into store2 before deciding which stores look
418 at globally. This is because stores to the stack frame that have
419 no other reads before the end of the function can also be
420 deleted. */
421 bitmap store1_n, store1_p, store2_n, store2_p;
422
423 /* The positions in this bitmap have the same assignments as the in,
424 out, gen and kill bitmaps. This bitmap is all zeros except for
425 the positions that are occupied by stores for this group. */
426 bitmap group_kill;
427
428 /* True if there are any positions that are to be processed
429 globally. */
430 bool process_globally;
431
432 /* True if the base of this group is either the frame_pointer or
433 hard_frame_pointer. */
434 bool frame_related;
435
436 /* The offset_map is used to map the offsets from this base into
437 positions in the global bitmaps. It is only created after all of
438 the all of stores have been scanned and we know which ones we
439 care about. */
440 int *offset_map_n, *offset_map_p;
441 int offset_map_size_n, offset_map_size_p;
442 };
443 typedef struct group_info *group_info_t;
444 typedef const struct group_info *const_group_info_t;
445 static alloc_pool rtx_group_info_pool;
446
447 /* Tables of group_info structures, hashed by base value. */
448 static htab_t rtx_group_table;
449
450 /* Index into the rtx_group_vec. */
451 static int rtx_group_next_id;
452
453 DEF_VEC_P(group_info_t);
454 DEF_VEC_ALLOC_P(group_info_t,heap);
455
456 static VEC(group_info_t,heap) *rtx_group_vec;
457
458
459 /* This structure holds the set of changes that are being deferred
460 when removing read operation. See replace_read. */
461 struct deferred_change
462 {
463
464 /* The mem that is being replaced. */
465 rtx *loc;
466
467 /* The reg it is being replaced with. */
468 rtx reg;
469
470 struct deferred_change *next;
471 };
472
473 typedef struct deferred_change *deferred_change_t;
474 static alloc_pool deferred_change_pool;
475
476 static deferred_change_t deferred_change_list = NULL;
477
478 /* This are used to hold the alias sets of spill variables. Since
479 these are never aliased and there may be a lot of them, it makes
480 sense to treat them specially. This bitvector is only allocated in
481 calls from dse_record_singleton_alias_set which currently is only
482 made during reload1. So when dse is called before reload this
483 mechanism does nothing. */
484
485 static bitmap clear_alias_sets = NULL;
486
487 /* The set of clear_alias_sets that have been disqualified because
488 there are loads or stores using a different mode than the alias set
489 was registered with. */
490 static bitmap disqualified_clear_alias_sets = NULL;
491
492 /* The group that holds all of the clear_alias_sets. */
493 static group_info_t clear_alias_group;
494
495 /* The modes of the clear_alias_sets. */
496 static htab_t clear_alias_mode_table;
497
498 /* Hash table element to look up the mode for an alias set. */
499 struct clear_alias_mode_holder
500 {
501 alias_set_type alias_set;
502 enum machine_mode mode;
503 };
504
505 static alloc_pool clear_alias_mode_pool;
506
507 /* This is true except for two cases:
508 (1) current_function_stdarg -- i.e. we cannot do this
509 for vararg functions because they play games with the frame.
510 (2) In ada, it is sometimes not safe to do assume that any stores
511 based off the stack frame go dead at the exit to a function. */
512 static bool stores_off_frame_dead_at_return;
513
514 /* Counter for stats. */
515 static int globally_deleted;
516 static int locally_deleted;
517 static int spill_deleted;
518
519 static bitmap all_blocks;
520
521 /* The number of bits used in the global bitmaps. */
522 static unsigned int current_position;
523
524
525 static bool gate_dse (void);
526
527 \f
528 /*----------------------------------------------------------------------------
529 Zeroth step.
530
531 Initialization.
532 ----------------------------------------------------------------------------*/
533
534 /* Hashtable callbacks for maintaining the "bases" field of
535 store_group_info, given that the addresses are function invariants. */
536
537 static int
538 clear_alias_mode_eq (const void *p1, const void *p2)
539 {
540 const struct clear_alias_mode_holder * h1
541 = (const struct clear_alias_mode_holder *) p1;
542 const struct clear_alias_mode_holder * h2
543 = (const struct clear_alias_mode_holder *) p2;
544 return h1->alias_set == h2->alias_set;
545 }
546
547
548 static hashval_t
549 clear_alias_mode_hash (const void *p)
550 {
551 const struct clear_alias_mode_holder *holder
552 = (const struct clear_alias_mode_holder *) p;
553 return holder->alias_set;
554 }
555
556
557 /* Find the entry associated with ALIAS_SET. */
558
559 static struct clear_alias_mode_holder *
560 clear_alias_set_lookup (alias_set_type alias_set)
561 {
562 struct clear_alias_mode_holder tmp_holder;
563 void **slot;
564
565 tmp_holder.alias_set = alias_set;
566 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
567 gcc_assert (*slot);
568
569 return *slot;
570 }
571
572
573 /* Hashtable callbacks for maintaining the "bases" field of
574 store_group_info, given that the addresses are function invariants. */
575
576 static int
577 invariant_group_base_eq (const void *p1, const void *p2)
578 {
579 const_group_info_t gi1 = (const_group_info_t) p1;
580 const_group_info_t gi2 = (const_group_info_t) p2;
581 return rtx_equal_p (gi1->rtx_base, gi2->rtx_base);
582 }
583
584
585 static hashval_t
586 invariant_group_base_hash (const void *p)
587 {
588 const_group_info_t gi = (const_group_info_t) p;
589 int do_not_record;
590 return hash_rtx (gi->rtx_base, Pmode, &do_not_record, NULL, false);
591 }
592
593
594 /* Get the GROUP for BASE. Add a new group if it is not there. */
595
596 static group_info_t
597 get_group_info (rtx base)
598 {
599 struct group_info tmp_gi;
600 group_info_t gi;
601 void **slot;
602
603 if (base)
604 {
605 /* Find the store_base_info structure for BASE, creating a new one
606 if necessary. */
607 tmp_gi.rtx_base = base;
608 slot = htab_find_slot (rtx_group_table, &tmp_gi, INSERT);
609 gi = (group_info_t) *slot;
610 }
611 else
612 {
613 if (!clear_alias_group)
614 {
615 clear_alias_group = gi = pool_alloc (rtx_group_info_pool);
616 memset (gi, 0, sizeof (struct group_info));
617 gi->id = rtx_group_next_id++;
618 gi->store1_n = BITMAP_ALLOC (NULL);
619 gi->store1_p = BITMAP_ALLOC (NULL);
620 gi->store2_n = BITMAP_ALLOC (NULL);
621 gi->store2_p = BITMAP_ALLOC (NULL);
622 gi->group_kill = BITMAP_ALLOC (NULL);
623 gi->process_globally = false;
624 gi->offset_map_size_n = 0;
625 gi->offset_map_size_p = 0;
626 gi->offset_map_n = NULL;
627 gi->offset_map_p = NULL;
628 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
629 }
630 return clear_alias_group;
631 }
632
633 if (gi == NULL)
634 {
635 *slot = gi = pool_alloc (rtx_group_info_pool);
636 gi->rtx_base = base;
637 gi->id = rtx_group_next_id++;
638 gi->base_mem = gen_rtx_MEM (QImode, base);
639 gi->canon_base_mem = canon_rtx (gi->base_mem);
640 gi->store1_n = BITMAP_ALLOC (NULL);
641 gi->store1_p = BITMAP_ALLOC (NULL);
642 gi->store2_n = BITMAP_ALLOC (NULL);
643 gi->store2_p = BITMAP_ALLOC (NULL);
644 gi->group_kill = BITMAP_ALLOC (NULL);
645 gi->process_globally = false;
646 gi->frame_related =
647 (base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
648 gi->offset_map_size_n = 0;
649 gi->offset_map_size_p = 0;
650 gi->offset_map_n = NULL;
651 gi->offset_map_p = NULL;
652 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
653 }
654
655 return gi;
656 }
657
658
659 /* Initialization of data structures. */
660
661 static void
662 dse_step0 (void)
663 {
664 locally_deleted = 0;
665 globally_deleted = 0;
666 spill_deleted = 0;
667
668 scratch = BITMAP_ALLOC (NULL);
669
670 rtx_store_info_pool
671 = create_alloc_pool ("rtx_store_info_pool",
672 sizeof (struct store_info), 100);
673 read_info_pool
674 = create_alloc_pool ("read_info_pool",
675 sizeof (struct read_info), 100);
676 insn_info_pool
677 = create_alloc_pool ("insn_info_pool",
678 sizeof (struct insn_info), 100);
679 bb_info_pool
680 = create_alloc_pool ("bb_info_pool",
681 sizeof (struct bb_info), 100);
682 rtx_group_info_pool
683 = create_alloc_pool ("rtx_group_info_pool",
684 sizeof (struct group_info), 100);
685 deferred_change_pool
686 = create_alloc_pool ("deferred_change_pool",
687 sizeof (struct deferred_change), 10);
688
689 rtx_group_table = htab_create (11, invariant_group_base_hash,
690 invariant_group_base_eq, NULL);
691
692 bb_table = XCNEWVEC (bb_info_t, last_basic_block);
693 rtx_group_next_id = 0;
694
695 stores_off_frame_dead_at_return =
696 (!(TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE
697 && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))
698 && (!current_function_stdarg);
699
700 init_alias_analysis ();
701
702 if (clear_alias_sets)
703 clear_alias_group = get_group_info (NULL);
704 else
705 clear_alias_group = NULL;
706 }
707
708
709 \f
710 /*----------------------------------------------------------------------------
711 First step.
712
713 Scan all of the insns. Any random ordering of the blocks is fine.
714 Each block is scanned in forward order to accommodate cselib which
715 is used to remove stores with non-constant bases.
716 ----------------------------------------------------------------------------*/
717
718 /* Delete all of the store_info recs from INSN_INFO. */
719
720 static void
721 free_store_info (insn_info_t insn_info)
722 {
723 store_info_t store_info = insn_info->store_rec;
724 while (store_info)
725 {
726 store_info_t next = store_info->next;
727 if (store_info->cse_base)
728 pool_free (cse_store_info_pool, store_info);
729 else
730 pool_free (rtx_store_info_pool, store_info);
731 store_info = next;
732 }
733
734 insn_info->cannot_delete = true;
735 insn_info->contains_cselib_groups = false;
736 insn_info->store_rec = NULL;
737 }
738
739
740 struct insn_size {
741 int size;
742 rtx insn;
743 };
744
745
746 /* Add an insn to do the add inside a x if it is a
747 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
748 the size of the mode of the MEM that this is inside of. */
749
750 static int
751 replace_inc_dec (rtx *r, void *d)
752 {
753 rtx x = *r;
754 struct insn_size *data = (struct insn_size *)d;
755 switch (GET_CODE (x))
756 {
757 case PRE_INC:
758 case POST_INC:
759 {
760 rtx r1 = XEXP (x, 0);
761 rtx c = gen_int_mode (Pmode, data->size);
762 add_insn_before (data->insn,
763 gen_rtx_SET (Pmode, r1,
764 gen_rtx_PLUS (Pmode, r1, c)),
765 NULL);
766 return -1;
767 }
768
769 case PRE_DEC:
770 case POST_DEC:
771 {
772 rtx r1 = XEXP (x, 0);
773 rtx c = gen_int_mode (Pmode, -data->size);
774 add_insn_before (data->insn,
775 gen_rtx_SET (Pmode, r1,
776 gen_rtx_PLUS (Pmode, r1, c)),
777 NULL);
778 return -1;
779 }
780
781 case PRE_MODIFY:
782 case POST_MODIFY:
783 {
784 /* We can reuse the add because we are about to delete the
785 insn that contained it. */
786 rtx add = XEXP (x, 0);
787 rtx r1 = XEXP (add, 0);
788 add_insn_before (data->insn,
789 gen_rtx_SET (Pmode, r1, add), NULL);
790 return -1;
791 }
792
793 default:
794 return 0;
795 }
796 }
797
798
799 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
800 and generate an add to replace that. */
801
802 static int
803 replace_inc_dec_mem (rtx *r, void *d)
804 {
805 rtx x = *r;
806 if (GET_CODE (x) == MEM)
807 {
808 struct insn_size data;
809
810 data.size = GET_MODE_SIZE (GET_MODE (x));
811 data.insn = (rtx)d;
812
813 for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
814
815 return -1;
816 }
817 return 0;
818 }
819
820 /* Before we delete INSN, make sure that the auto inc/dec, if it is
821 there, is split into a separate insn. */
822
823 static void
824 check_for_inc_dec (rtx insn)
825 {
826 rtx note = find_reg_note (insn, REG_INC, NULL_RTX);
827 if (note)
828 for_each_rtx (&insn, replace_inc_dec_mem, insn);
829 }
830
831
832 /* Delete the insn and free all of the fields inside INSN_INFO. */
833
834 static void
835 delete_dead_store_insn (insn_info_t insn_info)
836 {
837 read_info_t read_info;
838
839 if (!dbg_cnt (dse))
840 return;
841
842 check_for_inc_dec (insn_info->insn);
843 if (dump_file)
844 {
845 fprintf (dump_file, "Locally deleting insn %d ",
846 INSN_UID (insn_info->insn));
847 if (insn_info->store_rec->alias_set)
848 fprintf (dump_file, "alias set %d\n",
849 (int) insn_info->store_rec->alias_set);
850 else
851 fprintf (dump_file, "\n");
852 }
853
854 free_store_info (insn_info);
855 read_info = insn_info->read_rec;
856
857 while (read_info)
858 {
859 read_info_t next = read_info->next;
860 pool_free (read_info_pool, read_info);
861 read_info = next;
862 }
863 insn_info->read_rec = NULL;
864
865 delete_insn (insn_info->insn);
866 locally_deleted++;
867 insn_info->insn = NULL;
868
869 insn_info->wild_read = false;
870 }
871
872
873 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
874 OFFSET and WIDTH. */
875
876 static void
877 set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
878 {
879 HOST_WIDE_INT i;
880
881 if ((offset > -MAX_OFFSET) && (offset < MAX_OFFSET))
882 for (i=offset; i<offset+width; i++)
883 {
884 bitmap store1;
885 bitmap store2;
886 int ai;
887 if (i < 0)
888 {
889 store1 = group->store1_n;
890 store2 = group->store2_n;
891 ai = -i;
892 }
893 else
894 {
895 store1 = group->store1_p;
896 store2 = group->store2_p;
897 ai = i;
898 }
899
900 if (bitmap_bit_p (store1, ai))
901 bitmap_set_bit (store2, ai);
902 else
903 {
904 bitmap_set_bit (store1, ai);
905 if (i < 0)
906 {
907 if (group->offset_map_size_n < ai)
908 group->offset_map_size_n = ai;
909 }
910 else
911 {
912 if (group->offset_map_size_p < ai)
913 group->offset_map_size_p = ai;
914 }
915 }
916 }
917 }
918
919
920 /* Set the BB_INFO so that the last insn is marked as a wild read. */
921
922 static void
923 add_wild_read (bb_info_t bb_info)
924 {
925 insn_info_t insn_info = bb_info->last_insn;
926 read_info_t *ptr = &insn_info->read_rec;
927
928 while (*ptr)
929 {
930 read_info_t next = (*ptr)->next;
931 if ((*ptr)->alias_set == 0)
932 {
933 pool_free (read_info_pool, *ptr);
934 *ptr = next;
935 }
936 else
937 ptr = &(*ptr)->next;
938 }
939 insn_info->wild_read = true;
940 active_local_stores = NULL;
941 }
942
943
944 /* Return true if X is a constant or one of the registers that behaves
945 as a constant over the life of a function. */
946
947 static bool
948 const_or_frame_p (rtx x)
949 {
950 switch (GET_CODE (x))
951 {
952 case MEM:
953 return MEM_READONLY_P (x);
954
955 case CONST:
956 case CONST_INT:
957 case CONST_DOUBLE:
958 case CONST_VECTOR:
959 case SYMBOL_REF:
960 case LABEL_REF:
961 return true;
962
963 case REG:
964 /* Note that we have to test for the actual rtx used for the frame
965 and arg pointers and not just the register number in case we have
966 eliminated the frame and/or arg pointer and are using it
967 for pseudos. */
968 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
969 /* The arg pointer varies if it is not a fixed register. */
970 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
971 || x == pic_offset_table_rtx)
972 return true;
973 return false;
974
975 default:
976 return false;
977 }
978 }
979
980 /* Take all reasonable action to put the address of MEM into the form
981 that we can do analysis on.
982
983 The gold standard is to get the address into the form: address +
984 OFFSET where address is something that rtx_varies_p considers a
985 constant. When we can get the address in this form, we can do
986 global analysis on it. Note that for constant bases, address is
987 not actually returned, only the group_id. The address can be
988 obtained from that.
989
990 If that fails, we try cselib to get a value we can at least use
991 locally. If that fails we return false.
992
993 The GROUP_ID is set to -1 for cselib bases and the index of the
994 group for non_varying bases.
995
996 FOR_READ is true if this is a mem read and false if not. */
997
998 static bool
999 canon_address (rtx mem,
1000 alias_set_type *alias_set_out,
1001 int *group_id,
1002 HOST_WIDE_INT *offset,
1003 cselib_val **base)
1004 {
1005 rtx mem_address = XEXP (mem, 0);
1006 rtx expanded_address, address;
1007 /* Make sure that cselib is has initialized all of the operands of
1008 the address before asking it to do the subst. */
1009
1010 if (clear_alias_sets)
1011 {
1012 /* If this is a spill, do not do any further processing. */
1013 alias_set_type alias_set = MEM_ALIAS_SET (mem);
1014 if (dump_file)
1015 fprintf (dump_file, "found alias set %d\n", (int) alias_set);
1016 if (bitmap_bit_p (clear_alias_sets, alias_set))
1017 {
1018 struct clear_alias_mode_holder *entry
1019 = clear_alias_set_lookup (alias_set);
1020
1021 /* If the modes do not match, we cannot process this set. */
1022 if (entry->mode != GET_MODE (mem))
1023 {
1024 if (dump_file)
1025 fprintf (dump_file,
1026 "disqualifying alias set %d, (%s) != (%s)\n",
1027 (int) alias_set, GET_MODE_NAME (entry->mode),
1028 GET_MODE_NAME (GET_MODE (mem)));
1029
1030 bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
1031 return false;
1032 }
1033
1034 *alias_set_out = alias_set;
1035 *group_id = clear_alias_group->id;
1036 return true;
1037 }
1038 }
1039
1040 *alias_set_out = 0;
1041
1042 cselib_lookup (mem_address, Pmode, 1);
1043
1044 if (dump_file)
1045 {
1046 fprintf (dump_file, " mem: ");
1047 print_inline_rtx (dump_file, mem_address, 0);
1048 fprintf (dump_file, "\n");
1049 }
1050
1051 /* Use cselib to replace all of the reg references with the full
1052 expression. This will take care of the case where we have
1053
1054 r_x = base + offset;
1055 val = *r_x;
1056
1057 by making it into
1058
1059 val = *(base + offset);
1060 */
1061
1062 expanded_address = cselib_expand_value_rtx (mem_address, scratch, 5);
1063
1064 /* If this fails, just go with the mem_address. */
1065 if (!expanded_address)
1066 expanded_address = mem_address;
1067
1068 /* Split the address into canonical BASE + OFFSET terms. */
1069 address = canon_rtx (expanded_address);
1070
1071 *offset = 0;
1072
1073 if (dump_file)
1074 {
1075 fprintf (dump_file, "\n after cselib_expand address: ");
1076 print_inline_rtx (dump_file, expanded_address, 0);
1077 fprintf (dump_file, "\n");
1078
1079 fprintf (dump_file, "\n after canon_rtx address: ");
1080 print_inline_rtx (dump_file, address, 0);
1081 fprintf (dump_file, "\n");
1082 }
1083
1084 if (GET_CODE (address) == CONST)
1085 address = XEXP (address, 0);
1086
1087 if (GET_CODE (address) == PLUS && GET_CODE (XEXP (address, 1)) == CONST_INT)
1088 {
1089 *offset = INTVAL (XEXP (address, 1));
1090 address = XEXP (address, 0);
1091 }
1092
1093 if (const_or_frame_p (address))
1094 {
1095 group_info_t group = get_group_info (address);
1096
1097 if (dump_file)
1098 fprintf (dump_file, " gid=%d offset=%d \n", group->id, (int)*offset);
1099 *base = NULL;
1100 *group_id = group->id;
1101 }
1102 else
1103 {
1104 *base = cselib_lookup (address, Pmode, true);
1105 *group_id = -1;
1106
1107 if (*base == NULL)
1108 {
1109 if (dump_file)
1110 fprintf (dump_file, " no cselib val - should be a wild read.\n");
1111 return false;
1112 }
1113 if (dump_file)
1114 fprintf (dump_file, " varying cselib base=%d offset = %d\n",
1115 (*base)->value, (int)*offset);
1116 }
1117 return true;
1118 }
1119
1120
1121 /* Clear the rhs field from the active_local_stores array. */
1122
1123 static void
1124 clear_rhs_from_active_local_stores (void)
1125 {
1126 insn_info_t ptr = active_local_stores;
1127
1128 while (ptr)
1129 {
1130 store_info_t store_info = ptr->store_rec;
1131 /* Skip the clobbers. */
1132 while (!store_info->is_set)
1133 store_info = store_info->next;
1134
1135 store_info->rhs = NULL;
1136
1137 ptr = ptr->next_local_store;
1138 }
1139 }
1140
1141
1142 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1143 there is a candidate store, after adding it to the appropriate
1144 local store group if so. */
1145
1146 static int
1147 record_store (rtx body, bb_info_t bb_info)
1148 {
1149 rtx mem;
1150 HOST_WIDE_INT offset = 0;
1151 HOST_WIDE_INT width = 0;
1152 alias_set_type spill_alias_set;
1153 insn_info_t insn_info = bb_info->last_insn;
1154 store_info_t store_info = NULL;
1155 int group_id;
1156 cselib_val *base = NULL;
1157 insn_info_t ptr, last;
1158 bool store_is_unused;
1159
1160 if (GET_CODE (body) != SET && GET_CODE (body) != CLOBBER)
1161 return 0;
1162
1163 /* If this is not used, then this cannot be used to keep the insn
1164 from being deleted. On the other hand, it does provide something
1165 that can be used to prove that another store is dead. */
1166 store_is_unused
1167 = (find_reg_note (insn_info->insn, REG_UNUSED, body) != NULL);
1168
1169 /* Check whether that value is a suitable memory location. */
1170 mem = SET_DEST (body);
1171 if (!MEM_P (mem))
1172 {
1173 /* If the set or clobber is unused, then it does not effect our
1174 ability to get rid of the entire insn. */
1175 if (!store_is_unused)
1176 insn_info->cannot_delete = true;
1177 return 0;
1178 }
1179
1180 /* At this point we know mem is a mem. */
1181 if (GET_MODE (mem) == BLKmode)
1182 {
1183 if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
1184 {
1185 if (dump_file)
1186 fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
1187 add_wild_read (bb_info);
1188 insn_info->cannot_delete = true;
1189 }
1190 else if (!store_is_unused)
1191 {
1192 /* If the set or clobber is unused, then it does not effect our
1193 ability to get rid of the entire insn. */
1194 insn_info->cannot_delete = true;
1195 clear_rhs_from_active_local_stores ();
1196 }
1197 return 0;
1198 }
1199
1200 /* We can still process a volatile mem, we just cannot delete it. */
1201 if (MEM_VOLATILE_P (mem))
1202 insn_info->cannot_delete = true;
1203
1204 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1205 {
1206 clear_rhs_from_active_local_stores ();
1207 return 0;
1208 }
1209
1210 width = GET_MODE_SIZE (GET_MODE (mem));
1211
1212 if (spill_alias_set)
1213 {
1214 bitmap store1 = clear_alias_group->store1_p;
1215 bitmap store2 = clear_alias_group->store2_p;
1216
1217 if (bitmap_bit_p (store1, spill_alias_set))
1218 bitmap_set_bit (store2, spill_alias_set);
1219 else
1220 bitmap_set_bit (store1, spill_alias_set);
1221
1222 if (clear_alias_group->offset_map_size_p < spill_alias_set)
1223 clear_alias_group->offset_map_size_p = spill_alias_set;
1224
1225 store_info = pool_alloc (rtx_store_info_pool);
1226
1227 if (dump_file)
1228 fprintf (dump_file, " processing spill store %d(%s)\n",
1229 (int) spill_alias_set, GET_MODE_NAME (GET_MODE (mem)));
1230 }
1231 else if (group_id >= 0)
1232 {
1233 /* In the restrictive case where the base is a constant or the
1234 frame pointer we can do global analysis. */
1235
1236 group_info_t group
1237 = VEC_index (group_info_t, rtx_group_vec, group_id);
1238
1239 store_info = pool_alloc (rtx_store_info_pool);
1240 set_usage_bits (group, offset, width);
1241
1242 if (dump_file)
1243 fprintf (dump_file, " processing const base store gid=%d[%d..%d)\n",
1244 group_id, (int)offset, (int)(offset+width));
1245 }
1246 else
1247 {
1248 store_info = pool_alloc (cse_store_info_pool);
1249 insn_info->contains_cselib_groups = true;
1250 group_id = -1;
1251
1252 if (dump_file)
1253 fprintf (dump_file, " processing cselib store [%d..%d)\n",
1254 (int)offset, (int)(offset+width));
1255 }
1256
1257 /* Check to see if this stores causes some other stores to be
1258 dead. */
1259 ptr = active_local_stores;
1260 last = NULL;
1261
1262 while (ptr)
1263 {
1264 insn_info_t next = ptr->next_local_store;
1265 store_info_t s_info = ptr->store_rec;
1266 bool delete = true;
1267
1268 /* Skip the clobbers. We delete the active insn if this insn
1269 shadows the set. To have been put on the active list, it
1270 has exactly on set. */
1271 while (!s_info->is_set)
1272 s_info = s_info->next;
1273
1274 if (s_info->alias_set != spill_alias_set)
1275 delete = false;
1276 else if (s_info->alias_set)
1277 {
1278 struct clear_alias_mode_holder *entry
1279 = clear_alias_set_lookup (s_info->alias_set);
1280 /* Generally, spills cannot be processed if and of the
1281 references to the slot have a different mode. But if
1282 we are in the same block and mode is exactly the same
1283 between this store and one before in the same block,
1284 we can still delete it. */
1285 if ((GET_MODE (mem) == GET_MODE (s_info->mem))
1286 && (GET_MODE (mem) == entry->mode))
1287 {
1288 delete = true;
1289 s_info->positions_needed = 0;
1290 }
1291 if (dump_file)
1292 fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
1293 INSN_UID (ptr->insn), (int) s_info->alias_set);
1294 }
1295 else if ((s_info->group_id == group_id)
1296 && (s_info->cse_base == base))
1297 {
1298 HOST_WIDE_INT i;
1299 if (dump_file)
1300 fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
1301 INSN_UID (ptr->insn), s_info->group_id,
1302 (int)s_info->begin, (int)s_info->end);
1303 for (i = offset; i < offset+width; i++)
1304 if (i >= s_info->begin && i < s_info->end)
1305 s_info->positions_needed &= ~(1L << (i - s_info->begin));
1306 }
1307 else if (s_info->rhs)
1308 /* Need to see if it is possible for this store to overwrite
1309 the value of store_info. If it is, set the rhs to NULL to
1310 keep it from being used to remove a load. */
1311 {
1312 if (canon_true_dependence (s_info->mem,
1313 GET_MODE (s_info->mem),
1314 s_info->mem_addr,
1315 mem, rtx_varies_p))
1316 s_info->rhs = NULL;
1317 }
1318
1319 /* An insn can be deleted if every position of every one of
1320 its s_infos is zero. */
1321 if (s_info->positions_needed != 0)
1322 delete = false;
1323
1324 if (delete)
1325 {
1326 insn_info_t insn_to_delete = ptr;
1327
1328 if (last)
1329 last->next_local_store = ptr->next_local_store;
1330 else
1331 active_local_stores = ptr->next_local_store;
1332
1333 delete_dead_store_insn (insn_to_delete);
1334 }
1335 else
1336 last = ptr;
1337
1338 ptr = next;
1339 }
1340
1341 gcc_assert ((unsigned) width < sizeof (store_info->positions_needed) * CHAR_BIT);
1342
1343 /* Finish filling in the store_info. */
1344 store_info->next = insn_info->store_rec;
1345 insn_info->store_rec = store_info;
1346 store_info->mem = canon_rtx (mem);
1347 store_info->alias_set = spill_alias_set;
1348 store_info->mem_addr = get_addr (XEXP (mem, 0));
1349 store_info->cse_base = base;
1350 store_info->positions_needed = (1L << width) - 1;
1351 store_info->group_id = group_id;
1352 store_info->begin = offset;
1353 store_info->end = offset + width;
1354 store_info->is_set = GET_CODE (body) == SET;
1355
1356 if (store_info->is_set
1357 /* No place to keep the value after ra. */
1358 && !reload_completed
1359 /* The careful reviewer may wish to comment my checking that the
1360 rhs of a store is always a reg. */
1361 && REG_P (SET_SRC (body))
1362 /* Sometimes the store and reload is used for truncation and
1363 rounding. */
1364 && !(FLOAT_MODE_P (GET_MODE (mem)) && (flag_float_store)))
1365 store_info->rhs = SET_SRC (body);
1366 else
1367 store_info->rhs = NULL;
1368
1369 /* If this is a clobber, we return 0. We will only be able to
1370 delete this insn if there is only one store USED store, but we
1371 can use the clobber to delete other stores earlier. */
1372 return store_info->is_set ? 1 : 0;
1373 }
1374
1375
1376 static void
1377 dump_insn_info (const char * start, insn_info_t insn_info)
1378 {
1379 fprintf (dump_file, "%s insn=%d %s\n", start,
1380 INSN_UID (insn_info->insn),
1381 insn_info->store_rec ? "has store" : "naked");
1382 }
1383
1384
1385 /* If the modes are different and the value's source and target do not
1386 line up, we need to extract the value from lower part of the rhs of
1387 the store, shift it, and then put it into a form that can be shoved
1388 into the read_insn. This function generates a right SHIFT of a
1389 value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
1390 shift sequence is returned or NULL if we failed to find a
1391 shift. */
1392
1393 static rtx
1394 find_shift_sequence (rtx read_reg,
1395 int access_size,
1396 store_info_t store_info,
1397 read_info_t read_info,
1398 int shift)
1399 {
1400 enum machine_mode store_mode = GET_MODE (store_info->mem);
1401 enum machine_mode read_mode = GET_MODE (read_info->mem);
1402 rtx chosen_seq = NULL;
1403
1404 /* Some machines like the x86 have shift insns for each size of
1405 operand. Other machines like the ppc or the ia-64 may only have
1406 shift insns that shift values within 32 or 64 bit registers.
1407 This loop tries to find the smallest shift insn that will right
1408 justify the value we want to read but is available in one insn on
1409 the machine. */
1410
1411 for (; access_size < UNITS_PER_WORD; access_size *= 2)
1412 {
1413 rtx target, new_reg, shift_seq, insn;
1414 enum machine_mode new_mode;
1415 int cost;
1416
1417 /* Try a wider mode if truncating the store mode to ACCESS_SIZE
1418 bytes requires a real instruction. */
1419 if (access_size < GET_MODE_SIZE (store_mode)
1420 && !TRULY_NOOP_TRUNCATION (access_size * BITS_PER_UNIT,
1421 GET_MODE_BITSIZE (store_mode)))
1422 continue;
1423
1424 new_mode = smallest_mode_for_size (access_size * BITS_PER_UNIT,
1425 GET_MODE_CLASS (read_mode));
1426 new_reg = gen_reg_rtx (new_mode);
1427
1428 start_sequence ();
1429
1430 /* In theory we could also check for an ashr. Ian Taylor knows
1431 of one dsp where the cost of these two was not the same. But
1432 this really is a rare case anyway. */
1433 target = expand_binop (new_mode, lshr_optab, new_reg,
1434 GEN_INT (shift), new_reg, 1, OPTAB_DIRECT);
1435
1436 shift_seq = get_insns ();
1437 end_sequence ();
1438
1439 if (target != new_reg || shift_seq == NULL)
1440 continue;
1441
1442 cost = 0;
1443 for (insn = shift_seq; insn != NULL_RTX; insn = NEXT_INSN (insn))
1444 if (INSN_P (insn))
1445 cost += insn_rtx_cost (PATTERN (insn));
1446
1447 /* The computation up to here is essentially independent
1448 of the arguments and could be precomputed. It may
1449 not be worth doing so. We could precompute if
1450 worthwhile or at least cache the results. The result
1451 technically depends on SHIFT, ACCESS_SIZE, and
1452 GET_MODE_CLASS (READ_MODE). But in practice the
1453 answer will depend only on ACCESS_SIZE. */
1454
1455 if (cost > COSTS_N_INSNS (1))
1456 continue;
1457
1458 /* We found an acceptable shift. Generate a move to
1459 take the value from the store and put it into the
1460 shift pseudo, then shift it, then generate another
1461 move to put in into the target of the read. */
1462 start_sequence ();
1463 emit_move_insn (new_reg, gen_lowpart (new_mode, store_info->rhs));
1464 emit_insn (shift_seq);
1465 convert_move (read_reg, new_reg, 1);
1466
1467 if (dump_file)
1468 {
1469 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1470 REGNO (new_reg), GET_MODE_NAME (new_mode),
1471 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1472
1473 fprintf (dump_file, " -- with shift of r%d by %d\n",
1474 REGNO(new_reg), shift);
1475 fprintf (dump_file, " -- and second extract insn r%d:%s = r%d:%s\n",
1476 REGNO (read_reg), GET_MODE_NAME (read_mode),
1477 REGNO (new_reg), GET_MODE_NAME (new_mode));
1478 }
1479
1480 /* Get the three insn sequence and return it. */
1481 chosen_seq = get_insns ();
1482 end_sequence ();
1483 break;
1484 }
1485
1486 return chosen_seq;
1487 }
1488
1489
1490 /* Take a sequence of:
1491 A <- r1
1492 ...
1493 ... <- A
1494
1495 and change it into
1496 r2 <- r1
1497 A <- r1
1498 ...
1499 ... <- r2
1500
1501 or
1502
1503 r3 <- extract (r1)
1504 r3 <- r3 >> shift
1505 r2 <- extract (r3)
1506 ... <- r2
1507
1508 or
1509
1510 r2 <- extract (r1)
1511 ... <- r2
1512
1513 Depending on the alignment and the mode of the store and
1514 subsequent load.
1515
1516
1517 The STORE_INFO and STORE_INSN are for the store and READ_INFO
1518 and READ_INSN are for the read. Return true if the replacement
1519 went ok. */
1520
1521 static bool
1522 replace_read (store_info_t store_info, insn_info_t store_insn,
1523 read_info_t read_info, insn_info_t read_insn, rtx *loc)
1524 {
1525 enum machine_mode store_mode = GET_MODE (store_info->mem);
1526 enum machine_mode read_mode = GET_MODE (read_info->mem);
1527 int shift;
1528 int access_size; /* In bytes. */
1529 rtx read_reg = gen_reg_rtx (read_mode);
1530 rtx shift_seq = NULL;
1531
1532 if (!dbg_cnt (dse))
1533 return false;
1534
1535 if (GET_MODE_CLASS (read_mode) != GET_MODE_CLASS (store_mode))
1536 return false;
1537
1538 /* To get here the read is within the boundaries of the write so
1539 shift will never be negative. Start out with the shift being in
1540 bytes. */
1541 if (BYTES_BIG_ENDIAN)
1542 shift = store_info->end - read_info->end;
1543 else
1544 shift = read_info->begin - store_info->begin;
1545
1546 access_size = shift + GET_MODE_SIZE (read_mode);
1547
1548 /* From now on it is bits. */
1549 shift *= BITS_PER_UNIT;
1550
1551 /* We need to keep this in perspective. We are replacing a read
1552 with a sequence of insns, but the read will almost certainly be
1553 in cache, so it is not going to be an expensive one. Thus, we
1554 are not willing to do a multi insn shift or worse a subroutine
1555 call to get rid of the read. */
1556 if (shift)
1557 {
1558 if (access_size > UNITS_PER_WORD || FLOAT_MODE_P (store_mode))
1559 return false;
1560
1561 shift_seq = find_shift_sequence (read_reg, access_size, store_info,
1562 read_info, shift);
1563 if (!shift_seq)
1564 return false;
1565 }
1566
1567 if (dump_file)
1568 fprintf (dump_file, "replacing load at %d from store at %d\n",
1569 INSN_UID (read_insn->insn), INSN_UID (store_insn->insn));
1570
1571 if (validate_change (read_insn->insn, loc, read_reg, 0))
1572 {
1573 rtx insns;
1574 deferred_change_t deferred_change = pool_alloc (deferred_change_pool);
1575
1576 if (read_mode == store_mode)
1577 {
1578 start_sequence ();
1579
1580 /* The modes are the same and everything lines up. Just
1581 generate a simple move. */
1582 emit_move_insn (read_reg, store_info->rhs);
1583 if (dump_file)
1584 fprintf (dump_file, " -- adding move insn r%d = r%d\n",
1585 REGNO (read_reg), REGNO (store_info->rhs));
1586 insns = get_insns ();
1587 end_sequence ();
1588 }
1589 else if (shift)
1590 insns = shift_seq;
1591 else
1592 {
1593 /* The modes are different but the lsb are in the same
1594 place, we need to extract the value in the right from the
1595 rhs of the store. */
1596 start_sequence ();
1597 convert_move (read_reg, store_info->rhs, 1);
1598
1599 if (dump_file)
1600 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1601 REGNO (read_reg), GET_MODE_NAME (read_mode),
1602 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1603 insns = get_insns ();
1604 end_sequence ();
1605 }
1606
1607 /* Insert this right before the store insn where it will be safe
1608 from later insns that might change it before the read. */
1609 emit_insn_before (insns, store_insn->insn);
1610
1611 /* And now for the kludge part: cselib croaks if you just
1612 return at this point. There are two reasons for this:
1613
1614 1) Cselib has an idea of how many pseudos there are and
1615 that does not include the new ones we just added.
1616
1617 2) Cselib does not know about the move insn we added
1618 above the store_info, and there is no way to tell it
1619 about it, because it has "moved on".
1620
1621 Problem (1) is fixable with a certain amount of engineering.
1622 Problem (2) is requires starting the bb from scratch. This
1623 could be expensive.
1624
1625 So we are just going to have to lie. The move/extraction
1626 insns are not really an issue, cselib did not see them. But
1627 the use of the new pseudo read_insn is a real problem because
1628 cselib has not scanned this insn. The way that we solve this
1629 problem is that we are just going to put the mem back for now
1630 and when we are finished with the block, we undo this. We
1631 keep a table of mems to get rid of. At the end of the basic
1632 block we can put them back. */
1633
1634 *loc = read_info->mem;
1635 deferred_change->next = deferred_change_list;
1636 deferred_change_list = deferred_change;
1637 deferred_change->loc = loc;
1638 deferred_change->reg = read_reg;
1639
1640 /* Get rid of the read_info, from the point of view of the
1641 rest of dse, play like this read never happened. */
1642 read_insn->read_rec = read_info->next;
1643 pool_free (read_info_pool, read_info);
1644 return true;
1645 }
1646 else
1647 {
1648 if (dump_file)
1649 fprintf (dump_file, " -- validation failure\n");
1650 return false;
1651 }
1652 }
1653
1654 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1655 if LOC is a mem and if it is look at the address and kill any
1656 appropriate stores that may be active. */
1657
1658 static int
1659 check_mem_read_rtx (rtx *loc, void *data)
1660 {
1661 rtx mem = *loc;
1662 bb_info_t bb_info;
1663 insn_info_t insn_info;
1664 HOST_WIDE_INT offset = 0;
1665 HOST_WIDE_INT width = 0;
1666 alias_set_type spill_alias_set = 0;
1667 cselib_val *base = NULL;
1668 int group_id;
1669 read_info_t read_info;
1670
1671 if (!mem || !MEM_P (mem))
1672 return 0;
1673
1674 bb_info = (bb_info_t) data;
1675 insn_info = bb_info->last_insn;
1676
1677 if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
1678 || (MEM_VOLATILE_P (mem)))
1679 {
1680 if (dump_file)
1681 fprintf (dump_file, " adding wild read, volatile or barrier.\n");
1682 add_wild_read (bb_info);
1683 insn_info->cannot_delete = true;
1684 return 0;
1685 }
1686
1687 /* If it is reading readonly mem, then there can be no conflict with
1688 another write. */
1689 if (MEM_READONLY_P (mem))
1690 return 0;
1691
1692 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1693 {
1694 if (dump_file)
1695 fprintf (dump_file, " adding wild read, canon_address failure.\n");
1696 add_wild_read (bb_info);
1697 return 0;
1698 }
1699
1700 if (GET_MODE (mem) == BLKmode)
1701 width = -1;
1702 else
1703 width = GET_MODE_SIZE (GET_MODE (mem));
1704
1705 read_info = pool_alloc (read_info_pool);
1706 read_info->group_id = group_id;
1707 read_info->mem = mem;
1708 read_info->alias_set = spill_alias_set;
1709 read_info->begin = offset;
1710 read_info->end = offset + width;
1711 read_info->next = insn_info->read_rec;
1712 insn_info->read_rec = read_info;
1713
1714 /* We ignore the clobbers in store_info. The is mildly aggressive,
1715 but there really should not be a clobber followed by a read. */
1716
1717 if (spill_alias_set)
1718 {
1719 insn_info_t i_ptr = active_local_stores;
1720 insn_info_t last = NULL;
1721
1722 if (dump_file)
1723 fprintf (dump_file, " processing spill load %d\n",
1724 (int) spill_alias_set);
1725
1726 while (i_ptr)
1727 {
1728 store_info_t store_info = i_ptr->store_rec;
1729
1730 /* Skip the clobbers. */
1731 while (!store_info->is_set)
1732 store_info = store_info->next;
1733
1734 if (store_info->alias_set == spill_alias_set)
1735 {
1736 if (dump_file)
1737 dump_insn_info ("removing from active", i_ptr);
1738
1739 if (last)
1740 last->next_local_store = i_ptr->next_local_store;
1741 else
1742 active_local_stores = i_ptr->next_local_store;
1743 }
1744 else
1745 last = i_ptr;
1746 i_ptr = i_ptr->next_local_store;
1747 }
1748 }
1749 else if (group_id >= 0)
1750 {
1751 /* This is the restricted case where the base is a constant or
1752 the frame pointer and offset is a constant. */
1753 insn_info_t i_ptr = active_local_stores;
1754 insn_info_t last = NULL;
1755
1756 if (dump_file)
1757 {
1758 if (width == -1)
1759 fprintf (dump_file, " processing const load gid=%d[BLK]\n",
1760 group_id);
1761 else
1762 fprintf (dump_file, " processing const load gid=%d[%d..%d)\n",
1763 group_id, (int)offset, (int)(offset+width));
1764 }
1765
1766 while (i_ptr)
1767 {
1768 bool remove = false;
1769 store_info_t store_info = i_ptr->store_rec;
1770
1771 /* Skip the clobbers. */
1772 while (!store_info->is_set)
1773 store_info = store_info->next;
1774
1775 /* There are three cases here. */
1776 if (store_info->group_id < 0)
1777 /* We have a cselib store followed by a read from a
1778 const base. */
1779 remove
1780 = canon_true_dependence (store_info->mem,
1781 GET_MODE (store_info->mem),
1782 store_info->mem_addr,
1783 mem, rtx_varies_p);
1784
1785 else if (group_id == store_info->group_id)
1786 {
1787 /* This is a block mode load. We may get lucky and
1788 canon_true_dependence may save the day. */
1789 if (width == -1)
1790 remove
1791 = canon_true_dependence (store_info->mem,
1792 GET_MODE (store_info->mem),
1793 store_info->mem_addr,
1794 mem, rtx_varies_p);
1795
1796 /* If this read is just reading back something that we just
1797 stored, rewrite the read. */
1798 else
1799 {
1800 if (store_info->rhs
1801 && (offset >= store_info->begin)
1802 && (offset + width <= store_info->end))
1803 {
1804 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1805
1806 if ((store_info->positions_needed & mask) == mask
1807 && replace_read (store_info, i_ptr,
1808 read_info, insn_info, loc))
1809 return 0;
1810 }
1811 /* The bases are the same, just see if the offsets
1812 overlap. */
1813 if ((offset < store_info->end)
1814 && (offset + width > store_info->begin))
1815 remove = true;
1816 }
1817 }
1818
1819 /* else
1820 The else case that is missing here is that the
1821 bases are constant but different. There is nothing
1822 to do here because there is no overlap. */
1823
1824 if (remove)
1825 {
1826 if (dump_file)
1827 dump_insn_info ("removing from active", i_ptr);
1828
1829 if (last)
1830 last->next_local_store = i_ptr->next_local_store;
1831 else
1832 active_local_stores = i_ptr->next_local_store;
1833 }
1834 else
1835 last = i_ptr;
1836 i_ptr = i_ptr->next_local_store;
1837 }
1838 }
1839 else
1840 {
1841 insn_info_t i_ptr = active_local_stores;
1842 insn_info_t last = NULL;
1843 if (dump_file)
1844 {
1845 fprintf (dump_file, " processing cselib load mem:");
1846 print_inline_rtx (dump_file, mem, 0);
1847 fprintf (dump_file, "\n");
1848 }
1849
1850 while (i_ptr)
1851 {
1852 bool remove = false;
1853 store_info_t store_info = i_ptr->store_rec;
1854
1855 if (dump_file)
1856 fprintf (dump_file, " processing cselib load against insn %d\n",
1857 INSN_UID (i_ptr->insn));
1858
1859 /* Skip the clobbers. */
1860 while (!store_info->is_set)
1861 store_info = store_info->next;
1862
1863 /* If this read is just reading back something that we just
1864 stored, rewrite the read. */
1865 if (store_info->rhs
1866 && store_info->group_id == -1
1867 && store_info->cse_base == base
1868 && (offset >= store_info->begin)
1869 && (offset + width <= store_info->end))
1870 {
1871 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1872
1873 if ((store_info->positions_needed & mask) == mask
1874 && replace_read (store_info, i_ptr,
1875 read_info, insn_info, loc))
1876 return 0;
1877 }
1878
1879 if (!store_info->alias_set)
1880 remove = canon_true_dependence (store_info->mem,
1881 GET_MODE (store_info->mem),
1882 store_info->mem_addr,
1883 mem, rtx_varies_p);
1884
1885 if (remove)
1886 {
1887 if (dump_file)
1888 dump_insn_info ("removing from active", i_ptr);
1889
1890 if (last)
1891 last->next_local_store = i_ptr->next_local_store;
1892 else
1893 active_local_stores = i_ptr->next_local_store;
1894 }
1895 else
1896 last = i_ptr;
1897 i_ptr = i_ptr->next_local_store;
1898 }
1899 }
1900 return 0;
1901 }
1902
1903 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1904 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1905 true for any part of *LOC. */
1906
1907 static void
1908 check_mem_read_use (rtx *loc, void *data)
1909 {
1910 for_each_rtx (loc, check_mem_read_rtx, data);
1911 }
1912
1913 /* Apply record_store to all candidate stores in INSN. Mark INSN
1914 if some part of it is not a candidate store and assigns to a
1915 non-register target. */
1916
1917 static void
1918 scan_insn (bb_info_t bb_info, rtx insn)
1919 {
1920 rtx body;
1921 insn_info_t insn_info = pool_alloc (insn_info_pool);
1922 int mems_found = 0;
1923 memset (insn_info, 0, sizeof (struct insn_info));
1924
1925 if (dump_file)
1926 fprintf (dump_file, "\n**scanning insn=%d\n",
1927 INSN_UID (insn));
1928
1929 insn_info->prev_insn = bb_info->last_insn;
1930 insn_info->insn = insn;
1931 bb_info->last_insn = insn_info;
1932
1933
1934 /* Cselib clears the table for this case, so we have to essentially
1935 do the same. */
1936 if (NONJUMP_INSN_P (insn)
1937 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
1938 && MEM_VOLATILE_P (PATTERN (insn)))
1939 {
1940 add_wild_read (bb_info);
1941 insn_info->cannot_delete = true;
1942 return;
1943 }
1944
1945 /* Look at all of the uses in the insn. */
1946 note_uses (&PATTERN (insn), check_mem_read_use, bb_info);
1947
1948 if (CALL_P (insn))
1949 {
1950 insn_info->cannot_delete = true;
1951 /* Const functions cannot do anything bad i.e. read memory,
1952 however, they can read their parameters which may have been
1953 pushed onto the stack. */
1954 if (CONST_OR_PURE_CALL_P (insn) && !pure_call_p (insn))
1955 {
1956 insn_info_t i_ptr = active_local_stores;
1957 insn_info_t last = NULL;
1958
1959 if (dump_file)
1960 fprintf (dump_file, "const call %d\n", INSN_UID (insn));
1961
1962 while (i_ptr)
1963 {
1964 store_info_t store_info = i_ptr->store_rec;
1965
1966 /* Skip the clobbers. */
1967 while (!store_info->is_set)
1968 store_info = store_info->next;
1969
1970 /* Remove the frame related stores. */
1971 if (store_info->group_id >= 0
1972 && VEC_index (group_info_t, rtx_group_vec, store_info->group_id)->frame_related)
1973 {
1974 if (dump_file)
1975 dump_insn_info ("removing from active", i_ptr);
1976
1977 if (last)
1978 last->next_local_store = i_ptr->next_local_store;
1979 else
1980 active_local_stores = i_ptr->next_local_store;
1981 }
1982 else
1983 last = i_ptr;
1984 i_ptr = i_ptr->next_local_store;
1985 }
1986
1987 insn_info->stack_read = true;
1988
1989 return;
1990 }
1991
1992 /* Every other call, including pure functions may read memory. */
1993 add_wild_read (bb_info);
1994 return;
1995 }
1996
1997 /* Assuming that there are sets in these insns, we cannot delete
1998 them. */
1999 if ((GET_CODE (PATTERN (insn)) == CLOBBER)
2000 || volatile_refs_p (PATTERN (insn))
2001 || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
2002 || (RTX_FRAME_RELATED_P (insn))
2003 || find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
2004 insn_info->cannot_delete = true;
2005
2006 body = PATTERN (insn);
2007 if (GET_CODE (body) == PARALLEL)
2008 {
2009 int i;
2010 for (i = 0; i < XVECLEN (body, 0); i++)
2011 mems_found += record_store (XVECEXP (body, 0, i), bb_info);
2012 }
2013 else
2014 mems_found += record_store (body, bb_info);
2015
2016 if (dump_file)
2017 fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
2018 mems_found, insn_info->cannot_delete ? "true" : "false");
2019
2020 /* If we found some sets of mems, and the insn has not been marked
2021 cannot delete, add it into the active_local_stores so that it can
2022 be locally deleted if found dead. Otherwise mark it as cannot
2023 delete. This simplifies the processing later. */
2024 if (mems_found == 1 && !insn_info->cannot_delete)
2025 {
2026 insn_info->next_local_store = active_local_stores;
2027 active_local_stores = insn_info;
2028 }
2029 else
2030 insn_info->cannot_delete = true;
2031 }
2032
2033
2034 /* Remove BASE from the set of active_local_stores. This is a
2035 callback from cselib that is used to get rid of the stores in
2036 active_local_stores. */
2037
2038 static void
2039 remove_useless_values (cselib_val *base)
2040 {
2041 insn_info_t insn_info = active_local_stores;
2042 insn_info_t last = NULL;
2043
2044 while (insn_info)
2045 {
2046 store_info_t store_info = insn_info->store_rec;
2047 bool delete = false;
2048
2049 /* If ANY of the store_infos match the cselib group that is
2050 being deleted, then the insn can not be deleted. */
2051 while (store_info)
2052 {
2053 if ((store_info->group_id == -1)
2054 && (store_info->cse_base == base))
2055 {
2056 delete = true;
2057 break;
2058 }
2059 store_info = store_info->next;
2060 }
2061
2062 if (delete)
2063 {
2064 if (last)
2065 last->next_local_store = insn_info->next_local_store;
2066 else
2067 active_local_stores = insn_info->next_local_store;
2068 free_store_info (insn_info);
2069 }
2070 else
2071 last = insn_info;
2072
2073 insn_info = insn_info->next_local_store;
2074 }
2075 }
2076
2077
2078 /* Do all of step 1. */
2079
2080 static void
2081 dse_step1 (void)
2082 {
2083 basic_block bb;
2084
2085 cselib_init (false);
2086 all_blocks = BITMAP_ALLOC (NULL);
2087 bitmap_set_bit (all_blocks, ENTRY_BLOCK);
2088 bitmap_set_bit (all_blocks, EXIT_BLOCK);
2089
2090 FOR_ALL_BB (bb)
2091 {
2092 insn_info_t ptr;
2093 bb_info_t bb_info = pool_alloc (bb_info_pool);
2094
2095 memset (bb_info, 0, sizeof (struct bb_info));
2096 bitmap_set_bit (all_blocks, bb->index);
2097
2098 bb_table[bb->index] = bb_info;
2099 cselib_discard_hook = remove_useless_values;
2100
2101 if (bb->index >= NUM_FIXED_BLOCKS)
2102 {
2103 rtx insn;
2104
2105 cse_store_info_pool
2106 = create_alloc_pool ("cse_store_info_pool",
2107 sizeof (struct store_info), 100);
2108 active_local_stores = NULL;
2109 cselib_clear_table ();
2110
2111 /* Scan the insns. */
2112 FOR_BB_INSNS (bb, insn)
2113 {
2114 if (INSN_P (insn))
2115 scan_insn (bb_info, insn);
2116 cselib_process_insn (insn);
2117 }
2118
2119 /* This is something of a hack, because the global algorithm
2120 is supposed to take care of the case where stores go dead
2121 at the end of the function. However, the global
2122 algorithm must take a more conservative view of block
2123 mode reads than the local alg does. So to get the case
2124 where you have a store to the frame followed by a non
2125 overlapping block more read, we look at the active local
2126 stores at the end of the function and delete all of the
2127 frame and spill based ones. */
2128 if (stores_off_frame_dead_at_return
2129 && (EDGE_COUNT (bb->succs) == 0
2130 || (single_succ_p (bb)
2131 && single_succ (bb) == EXIT_BLOCK_PTR
2132 && ! current_function_calls_eh_return)))
2133 {
2134 insn_info_t i_ptr = active_local_stores;
2135 while (i_ptr)
2136 {
2137 store_info_t store_info = i_ptr->store_rec;
2138
2139 /* Skip the clobbers. */
2140 while (!store_info->is_set)
2141 store_info = store_info->next;
2142 if (store_info->alias_set)
2143 delete_dead_store_insn (i_ptr);
2144 else
2145 if (store_info->group_id >= 0)
2146 {
2147 group_info_t group
2148 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2149 if (group->frame_related)
2150 delete_dead_store_insn (i_ptr);
2151 }
2152
2153 i_ptr = i_ptr->next_local_store;
2154 }
2155 }
2156
2157 /* Get rid of the loads that were discovered in
2158 replace_read. Cselib is finished with this block. */
2159 while (deferred_change_list)
2160 {
2161 deferred_change_t next = deferred_change_list->next;
2162
2163 /* There is no reason to validate this change. That was
2164 done earlier. */
2165 *deferred_change_list->loc = deferred_change_list->reg;
2166 pool_free (deferred_change_pool, deferred_change_list);
2167 deferred_change_list = next;
2168 }
2169
2170 /* Get rid of all of the cselib based store_infos in this
2171 block and mark the containing insns as not being
2172 deletable. */
2173 ptr = bb_info->last_insn;
2174 while (ptr)
2175 {
2176 if (ptr->contains_cselib_groups)
2177 free_store_info (ptr);
2178 ptr = ptr->prev_insn;
2179 }
2180
2181 free_alloc_pool (cse_store_info_pool);
2182 }
2183 }
2184
2185 cselib_finish ();
2186 htab_empty (rtx_group_table);
2187 }
2188
2189 \f
2190 /*----------------------------------------------------------------------------
2191 Second step.
2192
2193 Assign each byte position in the stores that we are going to
2194 analyze globally to a position in the bitmaps. Returns true if
2195 there are any bit positions assigned.
2196 ----------------------------------------------------------------------------*/
2197
2198 static void
2199 dse_step2_init (void)
2200 {
2201 unsigned int i;
2202 group_info_t group;
2203
2204 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2205 {
2206 /* For all non stack related bases, we only consider a store to
2207 be deletable if there are two or more stores for that
2208 position. This is because it takes one store to make the
2209 other store redundant. However, for the stores that are
2210 stack related, we consider them if there is only one store
2211 for the position. We do this because the stack related
2212 stores can be deleted if their is no read between them and
2213 the end of the function.
2214
2215 To make this work in the current framework, we take the stack
2216 related bases add all of the bits from store1 into store2.
2217 This has the effect of making the eligible even if there is
2218 only one store. */
2219
2220 if (stores_off_frame_dead_at_return && group->frame_related)
2221 {
2222 bitmap_ior_into (group->store2_n, group->store1_n);
2223 bitmap_ior_into (group->store2_p, group->store1_p);
2224 if (dump_file)
2225 fprintf (dump_file, "group %d is frame related ", i);
2226 }
2227
2228 group->offset_map_size_n++;
2229 group->offset_map_n = XNEWVEC (int, group->offset_map_size_n);
2230 group->offset_map_size_p++;
2231 group->offset_map_p = XNEWVEC (int, group->offset_map_size_p);
2232 group->process_globally = false;
2233 if (dump_file)
2234 {
2235 fprintf (dump_file, "group %d(%d+%d): ", i,
2236 (int)bitmap_count_bits (group->store2_n),
2237 (int)bitmap_count_bits (group->store2_p));
2238 bitmap_print (dump_file, group->store2_n, "n ", " ");
2239 bitmap_print (dump_file, group->store2_p, "p ", "\n");
2240 }
2241 }
2242 }
2243
2244
2245 /* Init the offset tables for the normal case. */
2246
2247 static bool
2248 dse_step2_nospill (void)
2249 {
2250 unsigned int i;
2251 group_info_t group;
2252 /* Position 0 is unused because 0 is used in the maps to mean
2253 unused. */
2254 current_position = 1;
2255
2256 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2257 {
2258 bitmap_iterator bi;
2259 unsigned int j;
2260
2261 if (group == clear_alias_group)
2262 continue;
2263
2264 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2265 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2266 bitmap_clear (group->group_kill);
2267
2268 EXECUTE_IF_SET_IN_BITMAP (group->store2_n, 0, j, bi)
2269 {
2270 bitmap_set_bit (group->group_kill, current_position);
2271 group->offset_map_n[j] = current_position++;
2272 group->process_globally = true;
2273 }
2274 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2275 {
2276 bitmap_set_bit (group->group_kill, current_position);
2277 group->offset_map_p[j] = current_position++;
2278 group->process_globally = true;
2279 }
2280 }
2281 return current_position != 1;
2282 }
2283
2284
2285 /* Init the offset tables for the spill case. */
2286
2287 static bool
2288 dse_step2_spill (void)
2289 {
2290 unsigned int j;
2291 group_info_t group = clear_alias_group;
2292 bitmap_iterator bi;
2293
2294 /* Position 0 is unused because 0 is used in the maps to mean
2295 unused. */
2296 current_position = 1;
2297
2298 if (dump_file)
2299 {
2300 bitmap_print (dump_file, clear_alias_sets,
2301 "clear alias sets ", "\n");
2302 bitmap_print (dump_file, disqualified_clear_alias_sets,
2303 "disqualified clear alias sets ", "\n");
2304 }
2305
2306 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2307 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2308 bitmap_clear (group->group_kill);
2309
2310 /* Remove the disqualified positions from the store2_p set. */
2311 bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
2312
2313 /* We do not need to process the store2_n set because
2314 alias_sets are always positive. */
2315 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2316 {
2317 bitmap_set_bit (group->group_kill, current_position);
2318 group->offset_map_p[j] = current_position++;
2319 group->process_globally = true;
2320 }
2321
2322 return current_position != 1;
2323 }
2324
2325
2326 \f
2327 /*----------------------------------------------------------------------------
2328 Third step.
2329
2330 Build the bit vectors for the transfer functions.
2331 ----------------------------------------------------------------------------*/
2332
2333
2334 /* Note that this is NOT a general purpose function. Any mem that has
2335 an alias set registered here expected to be COMPLETELY unaliased:
2336 i.e it's addresses are not and need not be examined.
2337
2338 It is known that all references to this address will have this
2339 alias set and there are NO other references to this address in the
2340 function.
2341
2342 Currently the only place that is known to be clean enough to use
2343 this interface is the code that assigns the spill locations.
2344
2345 All of the mems that have alias_sets registered are subjected to a
2346 very powerful form of dse where function calls, volatile reads and
2347 writes, and reads from random location are not taken into account.
2348
2349 It is also assumed that these locations go dead when the function
2350 returns. This assumption could be relaxed if there were found to
2351 be places that this assumption was not correct.
2352
2353 The MODE is passed in and saved. The mode of each load or store to
2354 a mem with ALIAS_SET is checked against MEM. If the size of that
2355 load or store is different from MODE, processing is halted on this
2356 alias set. For the vast majority of aliases sets, all of the loads
2357 and stores will use the same mode. But vectors are treated
2358 differently: the alias set is established for the entire vector,
2359 but reload will insert loads and stores for individual elements and
2360 we do not necessarily have the information to track those separate
2361 elements. So when we see a mode mismatch, we just bail. */
2362
2363
2364 void
2365 dse_record_singleton_alias_set (alias_set_type alias_set,
2366 enum machine_mode mode)
2367 {
2368 struct clear_alias_mode_holder tmp_holder;
2369 struct clear_alias_mode_holder *entry;
2370 void **slot;
2371
2372 /* If we are not going to run dse, we need to return now or there
2373 will be problems with allocating the bitmaps. */
2374 if ((!gate_dse()) || !alias_set)
2375 return;
2376
2377 if (!clear_alias_sets)
2378 {
2379 clear_alias_sets = BITMAP_ALLOC (NULL);
2380 disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
2381 clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
2382 clear_alias_mode_eq, NULL);
2383 clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
2384 sizeof (struct clear_alias_mode_holder), 100);
2385 }
2386
2387 bitmap_set_bit (clear_alias_sets, alias_set);
2388
2389 tmp_holder.alias_set = alias_set;
2390
2391 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, INSERT);
2392 gcc_assert (*slot == NULL);
2393
2394 *slot = entry = pool_alloc (clear_alias_mode_pool);
2395 entry->alias_set = alias_set;
2396 entry->mode = mode;
2397 }
2398
2399
2400 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2401
2402 void
2403 dse_invalidate_singleton_alias_set (alias_set_type alias_set)
2404 {
2405 if ((!gate_dse()) || !alias_set)
2406 return;
2407
2408 bitmap_clear_bit (clear_alias_sets, alias_set);
2409 }
2410
2411
2412 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2413 there, return 0. */
2414
2415 static int
2416 get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
2417 {
2418 if (offset < 0)
2419 {
2420 HOST_WIDE_INT offset_p = -offset;
2421 if (offset_p >= group_info->offset_map_size_n)
2422 return 0;
2423 return group_info->offset_map_n[offset_p];
2424 }
2425 else
2426 {
2427 if (offset >= group_info->offset_map_size_p)
2428 return 0;
2429 return group_info->offset_map_p[offset];
2430 }
2431 }
2432
2433
2434 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2435 may be NULL. */
2436
2437 static void
2438 scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
2439 {
2440 while (store_info)
2441 {
2442 HOST_WIDE_INT i;
2443 group_info_t group_info
2444 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2445 if (group_info->process_globally)
2446 for (i = store_info->begin; i < store_info->end; i++)
2447 {
2448 int index = get_bitmap_index (group_info, i);
2449 if (index != 0)
2450 {
2451 bitmap_set_bit (gen, index);
2452 if (kill)
2453 bitmap_clear_bit (kill, index);
2454 }
2455 }
2456 store_info = store_info->next;
2457 }
2458 }
2459
2460
2461 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2462 may be NULL. */
2463
2464 static void
2465 scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
2466 {
2467 while (store_info)
2468 {
2469 if (store_info->alias_set)
2470 {
2471 int index = get_bitmap_index (clear_alias_group,
2472 store_info->alias_set);
2473 if (index != 0)
2474 {
2475 bitmap_set_bit (gen, index);
2476 if (kill)
2477 bitmap_clear_bit (kill, index);
2478 }
2479 }
2480 store_info = store_info->next;
2481 }
2482 }
2483
2484
2485 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2486 may be NULL. */
2487
2488 static void
2489 scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
2490 {
2491 read_info_t read_info = insn_info->read_rec;
2492 int i;
2493 group_info_t group;
2494
2495 /* For const function calls kill the stack related stores. */
2496 if (insn_info->stack_read)
2497 {
2498 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2499 if (group->process_globally && group->frame_related)
2500 {
2501 if (kill)
2502 bitmap_ior_into (kill, group->group_kill);
2503 bitmap_and_compl_into (gen, group->group_kill);
2504 }
2505 }
2506
2507 while (read_info)
2508 {
2509 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2510 {
2511 if (group->process_globally)
2512 {
2513 if (i == read_info->group_id)
2514 {
2515 if (read_info->begin > read_info->end)
2516 {
2517 /* Begin > end for block mode reads. */
2518 if (kill)
2519 bitmap_ior_into (kill, group->group_kill);
2520 bitmap_and_compl_into (gen, group->group_kill);
2521 }
2522 else
2523 {
2524 /* The groups are the same, just process the
2525 offsets. */
2526 HOST_WIDE_INT j;
2527 for (j = read_info->begin; j < read_info->end; j++)
2528 {
2529 int index = get_bitmap_index (group, j);
2530 if (index != 0)
2531 {
2532 if (kill)
2533 bitmap_set_bit (kill, index);
2534 bitmap_clear_bit (gen, index);
2535 }
2536 }
2537 }
2538 }
2539 else
2540 {
2541 /* The groups are different, if the alias sets
2542 conflict, clear the entire group. We only need
2543 to apply this test if the read_info is a cselib
2544 read. Anything with a constant base cannot alias
2545 something else with a different constant
2546 base. */
2547 if ((read_info->group_id < 0)
2548 && canon_true_dependence (group->base_mem,
2549 QImode,
2550 group->canon_base_mem,
2551 read_info->mem, rtx_varies_p))
2552 {
2553 if (kill)
2554 bitmap_ior_into (kill, group->group_kill);
2555 bitmap_and_compl_into (gen, group->group_kill);
2556 }
2557 }
2558 }
2559 }
2560
2561 read_info = read_info->next;
2562 }
2563 }
2564
2565 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2566 may be NULL. */
2567
2568 static void
2569 scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
2570 {
2571 while (read_info)
2572 {
2573 if (read_info->alias_set)
2574 {
2575 int index = get_bitmap_index (clear_alias_group,
2576 read_info->alias_set);
2577 if (index != 0)
2578 {
2579 if (kill)
2580 bitmap_set_bit (kill, index);
2581 bitmap_clear_bit (gen, index);
2582 }
2583 }
2584
2585 read_info = read_info->next;
2586 }
2587 }
2588
2589
2590 /* Return the insn in BB_INFO before the first wild read or if there
2591 are no wild reads in the block, return the last insn. */
2592
2593 static insn_info_t
2594 find_insn_before_first_wild_read (bb_info_t bb_info)
2595 {
2596 insn_info_t insn_info = bb_info->last_insn;
2597 insn_info_t last_wild_read = NULL;
2598
2599 while (insn_info)
2600 {
2601 if (insn_info->wild_read)
2602 {
2603 last_wild_read = insn_info->prev_insn;
2604 /* Block starts with wild read. */
2605 if (!last_wild_read)
2606 return NULL;
2607 }
2608
2609 insn_info = insn_info->prev_insn;
2610 }
2611
2612 if (last_wild_read)
2613 return last_wild_read;
2614 else
2615 return bb_info->last_insn;
2616 }
2617
2618
2619 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2620 the block in order to build the gen and kill sets for the block.
2621 We start at ptr which may be the last insn in the block or may be
2622 the first insn with a wild read. In the latter case we are able to
2623 skip the rest of the block because it just does not matter:
2624 anything that happens is hidden by the wild read. */
2625
2626 static void
2627 dse_step3_scan (bool for_spills, basic_block bb)
2628 {
2629 bb_info_t bb_info = bb_table[bb->index];
2630 insn_info_t insn_info;
2631
2632 if (for_spills)
2633 /* There are no wild reads in the spill case. */
2634 insn_info = bb_info->last_insn;
2635 else
2636 insn_info = find_insn_before_first_wild_read (bb_info);
2637
2638 /* In the spill case or in the no_spill case if there is no wild
2639 read in the block, we will need a kill set. */
2640 if (insn_info == bb_info->last_insn)
2641 {
2642 if (bb_info->kill)
2643 bitmap_clear (bb_info->kill);
2644 else
2645 bb_info->kill = BITMAP_ALLOC (NULL);
2646 }
2647 else
2648 if (bb_info->kill)
2649 BITMAP_FREE (bb_info->kill);
2650
2651 while (insn_info)
2652 {
2653 /* There may have been code deleted by the dce pass run before
2654 this phase. */
2655 if (insn_info->insn && INSN_P (insn_info->insn))
2656 {
2657 /* Process the read(s) last. */
2658 if (for_spills)
2659 {
2660 scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2661 scan_reads_spill (insn_info->read_rec, bb_info->gen, bb_info->kill);
2662 }
2663 else
2664 {
2665 scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2666 scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
2667 }
2668 }
2669
2670 insn_info = insn_info->prev_insn;
2671 }
2672 }
2673
2674
2675 /* Set the gen set of the exit block, and also any block with no
2676 successors that does not have a wild read. */
2677
2678 static void
2679 dse_step3_exit_block_scan (bb_info_t bb_info)
2680 {
2681 /* The gen set is all 0's for the exit block except for the
2682 frame_pointer_group. */
2683
2684 if (stores_off_frame_dead_at_return)
2685 {
2686 unsigned int i;
2687 group_info_t group;
2688
2689 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2690 {
2691 if (group->process_globally && group->frame_related)
2692 bitmap_ior_into (bb_info->gen, group->group_kill);
2693 }
2694 }
2695 }
2696
2697
2698 /* Find all of the blocks that are not backwards reachable from the
2699 exit block or any block with no successors (BB). These are the
2700 infinite loops or infinite self loops. These blocks will still
2701 have their bits set in UNREACHABLE_BLOCKS. */
2702
2703 static void
2704 mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
2705 {
2706 edge e;
2707 edge_iterator ei;
2708
2709 if (TEST_BIT (unreachable_blocks, bb->index))
2710 {
2711 RESET_BIT (unreachable_blocks, bb->index);
2712 FOR_EACH_EDGE (e, ei, bb->preds)
2713 {
2714 mark_reachable_blocks (unreachable_blocks, e->src);
2715 }
2716 }
2717 }
2718
2719 /* Build the transfer functions for the function. */
2720
2721 static void
2722 dse_step3 (bool for_spills)
2723 {
2724 basic_block bb;
2725 sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
2726 sbitmap_iterator sbi;
2727 bitmap all_ones = NULL;
2728 unsigned int i;
2729
2730 sbitmap_ones (unreachable_blocks);
2731
2732 FOR_ALL_BB (bb)
2733 {
2734 bb_info_t bb_info = bb_table[bb->index];
2735 if (bb_info->gen)
2736 bitmap_clear (bb_info->gen);
2737 else
2738 bb_info->gen = BITMAP_ALLOC (NULL);
2739
2740 if (bb->index == ENTRY_BLOCK)
2741 ;
2742 else if (bb->index == EXIT_BLOCK)
2743 dse_step3_exit_block_scan (bb_info);
2744 else
2745 dse_step3_scan (for_spills, bb);
2746 if (EDGE_COUNT (bb->succs) == 0)
2747 mark_reachable_blocks (unreachable_blocks, bb);
2748
2749 /* If this is the second time dataflow is run, delete the old
2750 sets. */
2751 if (bb_info->in)
2752 BITMAP_FREE (bb_info->in);
2753 if (bb_info->out)
2754 BITMAP_FREE (bb_info->out);
2755 }
2756
2757 /* For any block in an infinite loop, we must initialize the out set
2758 to all ones. This could be expensive, but almost never occurs in
2759 practice. However, it is common in regression tests. */
2760 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks, 0, i, sbi)
2761 {
2762 if (bitmap_bit_p (all_blocks, i))
2763 {
2764 bb_info_t bb_info = bb_table[i];
2765 if (!all_ones)
2766 {
2767 unsigned int j;
2768 group_info_t group;
2769
2770 all_ones = BITMAP_ALLOC (NULL);
2771 for (j = 0; VEC_iterate (group_info_t, rtx_group_vec, j, group); j++)
2772 bitmap_ior_into (all_ones, group->group_kill);
2773 }
2774 if (!bb_info->out)
2775 {
2776 bb_info->out = BITMAP_ALLOC (NULL);
2777 bitmap_copy (bb_info->out, all_ones);
2778 }
2779 }
2780 }
2781
2782 if (all_ones)
2783 BITMAP_FREE (all_ones);
2784 sbitmap_free (unreachable_blocks);
2785 }
2786
2787
2788 \f
2789 /*----------------------------------------------------------------------------
2790 Fourth step.
2791
2792 Solve the bitvector equations.
2793 ----------------------------------------------------------------------------*/
2794
2795
2796 /* Confluence function for blocks with no successors. Create an out
2797 set from the gen set of the exit block. This block logically has
2798 the exit block as a successor. */
2799
2800
2801
2802 static void
2803 dse_confluence_0 (basic_block bb)
2804 {
2805 bb_info_t bb_info = bb_table[bb->index];
2806
2807 if (bb->index == EXIT_BLOCK)
2808 return;
2809
2810 if (!bb_info->out)
2811 {
2812 bb_info->out = BITMAP_ALLOC (NULL);
2813 bitmap_copy (bb_info->out, bb_table[EXIT_BLOCK]->gen);
2814 }
2815 }
2816
2817 /* Propagate the information from the in set of the dest of E to the
2818 out set of the src of E. If the various in or out sets are not
2819 there, that means they are all ones. */
2820
2821 static void
2822 dse_confluence_n (edge e)
2823 {
2824 bb_info_t src_info = bb_table[e->src->index];
2825 bb_info_t dest_info = bb_table[e->dest->index];
2826
2827 if (dest_info->in)
2828 {
2829 if (src_info->out)
2830 bitmap_and_into (src_info->out, dest_info->in);
2831 else
2832 {
2833 src_info->out = BITMAP_ALLOC (NULL);
2834 bitmap_copy (src_info->out, dest_info->in);
2835 }
2836 }
2837 }
2838
2839
2840 /* Propagate the info from the out to the in set of BB_INDEX's basic
2841 block. There are three cases:
2842
2843 1) The block has no kill set. In this case the kill set is all
2844 ones. It does not matter what the out set of the block is, none of
2845 the info can reach the top. The only thing that reaches the top is
2846 the gen set and we just copy the set.
2847
2848 2) There is a kill set but no out set and bb has successors. In
2849 this case we just return. Eventually an out set will be created and
2850 it is better to wait than to create a set of ones.
2851
2852 3) There is both a kill and out set. We apply the obvious transfer
2853 function.
2854 */
2855
2856 static bool
2857 dse_transfer_function (int bb_index)
2858 {
2859 bb_info_t bb_info = bb_table[bb_index];
2860
2861 if (bb_info->kill)
2862 {
2863 if (bb_info->out)
2864 {
2865 /* Case 3 above. */
2866 if (bb_info->in)
2867 return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2868 bb_info->out, bb_info->kill);
2869 else
2870 {
2871 bb_info->in = BITMAP_ALLOC (NULL);
2872 bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2873 bb_info->out, bb_info->kill);
2874 return true;
2875 }
2876 }
2877 else
2878 /* Case 2 above. */
2879 return false;
2880 }
2881 else
2882 {
2883 /* Case 1 above. If there is already an in set, nothing
2884 happens. */
2885 if (bb_info->in)
2886 return false;
2887 else
2888 {
2889 bb_info->in = BITMAP_ALLOC (NULL);
2890 bitmap_copy (bb_info->in, bb_info->gen);
2891 return true;
2892 }
2893 }
2894 }
2895
2896 /* Solve the dataflow equations. */
2897
2898 static void
2899 dse_step4 (void)
2900 {
2901 df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
2902 dse_confluence_n, dse_transfer_function,
2903 all_blocks, df_get_postorder (DF_BACKWARD),
2904 df_get_n_blocks (DF_BACKWARD));
2905 if (dump_file)
2906 {
2907 basic_block bb;
2908
2909 fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
2910 FOR_ALL_BB (bb)
2911 {
2912 bb_info_t bb_info = bb_table[bb->index];
2913
2914 df_print_bb_index (bb, dump_file);
2915 if (bb_info->in)
2916 bitmap_print (dump_file, bb_info->in, " in: ", "\n");
2917 else
2918 fprintf (dump_file, " in: *MISSING*\n");
2919 if (bb_info->gen)
2920 bitmap_print (dump_file, bb_info->gen, " gen: ", "\n");
2921 else
2922 fprintf (dump_file, " gen: *MISSING*\n");
2923 if (bb_info->kill)
2924 bitmap_print (dump_file, bb_info->kill, " kill: ", "\n");
2925 else
2926 fprintf (dump_file, " kill: *MISSING*\n");
2927 if (bb_info->out)
2928 bitmap_print (dump_file, bb_info->out, " out: ", "\n");
2929 else
2930 fprintf (dump_file, " out: *MISSING*\n\n");
2931 }
2932 }
2933 }
2934
2935
2936 \f
2937 /*----------------------------------------------------------------------------
2938 Fifth step.
2939
2940 Delete the stores that can only be deleted using the global information.
2941 ----------------------------------------------------------------------------*/
2942
2943
2944 static void
2945 dse_step5_nospill (void)
2946 {
2947 basic_block bb;
2948 FOR_EACH_BB (bb)
2949 {
2950 bb_info_t bb_info = bb_table[bb->index];
2951 insn_info_t insn_info = bb_info->last_insn;
2952 bitmap v = bb_info->out;
2953
2954 while (insn_info)
2955 {
2956 bool deleted = false;
2957 if (dump_file && insn_info->insn)
2958 {
2959 fprintf (dump_file, "starting to process insn %d\n",
2960 INSN_UID (insn_info->insn));
2961 bitmap_print (dump_file, v, " v: ", "\n");
2962 }
2963
2964 /* There may have been code deleted by the dce pass run before
2965 this phase. */
2966 if (insn_info->insn
2967 && INSN_P (insn_info->insn)
2968 && (!insn_info->cannot_delete)
2969 && (!bitmap_empty_p (v)))
2970 {
2971 store_info_t store_info = insn_info->store_rec;
2972
2973 /* Try to delete the current insn. */
2974 deleted = true;
2975
2976 /* Skip the clobbers. */
2977 while (!store_info->is_set)
2978 store_info = store_info->next;
2979
2980 if (store_info->alias_set)
2981 deleted = false;
2982 else
2983 {
2984 HOST_WIDE_INT i;
2985 group_info_t group_info
2986 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2987
2988 for (i = store_info->begin; i < store_info->end; i++)
2989 {
2990 int index = get_bitmap_index (group_info, i);
2991
2992 if (dump_file)
2993 fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
2994 if (index == 0 || !bitmap_bit_p (v, index))
2995 {
2996 if (dump_file)
2997 fprintf (dump_file, "failing at i = %d\n", (int)i);
2998 deleted = false;
2999 break;
3000 }
3001 }
3002 }
3003 if (deleted)
3004 {
3005 if (dbg_cnt (dse))
3006 {
3007 check_for_inc_dec (insn_info->insn);
3008 delete_insn (insn_info->insn);
3009 insn_info->insn = NULL;
3010 globally_deleted++;
3011 }
3012 }
3013 }
3014 /* We do want to process the local info if the insn was
3015 deleted. For instance, if the insn did a wild read, we
3016 no longer need to trash the info. */
3017 if (insn_info->insn
3018 && INSN_P (insn_info->insn)
3019 && (!deleted))
3020 {
3021 scan_stores_nospill (insn_info->store_rec, v, NULL);
3022 if (insn_info->wild_read)
3023 {
3024 if (dump_file)
3025 fprintf (dump_file, "wild read\n");
3026 bitmap_clear (v);
3027 }
3028 else if (insn_info->read_rec)
3029 {
3030 if (dump_file)
3031 fprintf (dump_file, "regular read\n");
3032 scan_reads_nospill (insn_info, v, NULL);
3033 }
3034 }
3035
3036 insn_info = insn_info->prev_insn;
3037 }
3038 }
3039 }
3040
3041
3042 static void
3043 dse_step5_spill (void)
3044 {
3045 basic_block bb;
3046 FOR_EACH_BB (bb)
3047 {
3048 bb_info_t bb_info = bb_table[bb->index];
3049 insn_info_t insn_info = bb_info->last_insn;
3050 bitmap v = bb_info->out;
3051
3052 while (insn_info)
3053 {
3054 bool deleted = false;
3055 /* There may have been code deleted by the dce pass run before
3056 this phase. */
3057 if (insn_info->insn
3058 && INSN_P (insn_info->insn)
3059 && (!insn_info->cannot_delete)
3060 && (!bitmap_empty_p (v)))
3061 {
3062 /* Try to delete the current insn. */
3063 store_info_t store_info = insn_info->store_rec;
3064 deleted = true;
3065
3066 while (store_info)
3067 {
3068 if (store_info->alias_set)
3069 {
3070 int index = get_bitmap_index (clear_alias_group,
3071 store_info->alias_set);
3072 if (index == 0 || !bitmap_bit_p (v, index))
3073 {
3074 deleted = false;
3075 break;
3076 }
3077 }
3078 else
3079 deleted = false;
3080 store_info = store_info->next;
3081 }
3082 if (deleted && dbg_cnt (dse))
3083 {
3084 if (dump_file)
3085 fprintf (dump_file, "Spill deleting insn %d\n",
3086 INSN_UID (insn_info->insn));
3087 check_for_inc_dec (insn_info->insn);
3088 delete_insn (insn_info->insn);
3089 spill_deleted++;
3090 insn_info->insn = NULL;
3091 }
3092 }
3093
3094 if (insn_info->insn
3095 && INSN_P (insn_info->insn)
3096 && (!deleted))
3097 {
3098 scan_stores_spill (insn_info->store_rec, v, NULL);
3099 scan_reads_spill (insn_info->read_rec, v, NULL);
3100 }
3101
3102 insn_info = insn_info->prev_insn;
3103 }
3104 }
3105 }
3106
3107
3108 \f
3109 /*----------------------------------------------------------------------------
3110 Sixth step.
3111
3112 Destroy everything left standing.
3113 ----------------------------------------------------------------------------*/
3114
3115 static void
3116 dse_step6 (bool global_done)
3117 {
3118 unsigned int i;
3119 group_info_t group;
3120 basic_block bb;
3121
3122 if (global_done)
3123 {
3124 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3125 {
3126 free (group->offset_map_n);
3127 free (group->offset_map_p);
3128 BITMAP_FREE (group->store1_n);
3129 BITMAP_FREE (group->store1_p);
3130 BITMAP_FREE (group->store2_n);
3131 BITMAP_FREE (group->store2_p);
3132 BITMAP_FREE (group->group_kill);
3133 }
3134
3135 FOR_ALL_BB (bb)
3136 {
3137 bb_info_t bb_info = bb_table[bb->index];
3138 BITMAP_FREE (bb_info->gen);
3139 if (bb_info->kill)
3140 BITMAP_FREE (bb_info->kill);
3141 if (bb_info->in)
3142 BITMAP_FREE (bb_info->in);
3143 if (bb_info->out)
3144 BITMAP_FREE (bb_info->out);
3145 }
3146 }
3147 else
3148 {
3149 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3150 {
3151 BITMAP_FREE (group->store1_n);
3152 BITMAP_FREE (group->store1_p);
3153 BITMAP_FREE (group->store2_n);
3154 BITMAP_FREE (group->store2_p);
3155 BITMAP_FREE (group->group_kill);
3156 }
3157 }
3158
3159 if (clear_alias_sets)
3160 {
3161 BITMAP_FREE (clear_alias_sets);
3162 BITMAP_FREE (disqualified_clear_alias_sets);
3163 free_alloc_pool (clear_alias_mode_pool);
3164 htab_delete (clear_alias_mode_table);
3165 }
3166
3167 end_alias_analysis ();
3168 free (bb_table);
3169 htab_delete (rtx_group_table);
3170 VEC_free (group_info_t, heap, rtx_group_vec);
3171 BITMAP_FREE (all_blocks);
3172 BITMAP_FREE (scratch);
3173
3174 free_alloc_pool (rtx_store_info_pool);
3175 free_alloc_pool (read_info_pool);
3176 free_alloc_pool (insn_info_pool);
3177 free_alloc_pool (bb_info_pool);
3178 free_alloc_pool (rtx_group_info_pool);
3179 free_alloc_pool (deferred_change_pool);
3180 }
3181
3182
3183
3184 /* -------------------------------------------------------------------------
3185 DSE
3186 ------------------------------------------------------------------------- */
3187
3188 /* Callback for running pass_rtl_dse. */
3189
3190 static unsigned int
3191 rest_of_handle_dse (void)
3192 {
3193 bool did_global = false;
3194
3195 df_set_flags (DF_DEFER_INSN_RESCAN);
3196
3197 dse_step0 ();
3198 dse_step1 ();
3199 dse_step2_init ();
3200 if (dse_step2_nospill ())
3201 {
3202 df_set_flags (DF_LR_RUN_DCE);
3203 df_analyze ();
3204 did_global = true;
3205 if (dump_file)
3206 fprintf (dump_file, "doing global processing\n");
3207 dse_step3 (false);
3208 dse_step4 ();
3209 dse_step5_nospill ();
3210 }
3211
3212 /* For the instance of dse that runs after reload, we make a special
3213 pass to process the spills. These are special in that they are
3214 totally transparent, i.e, there is no aliasing issues that need
3215 to be considered. This means that the wild reads that kill
3216 everything else do not apply here. */
3217 if (clear_alias_sets && dse_step2_spill ())
3218 {
3219 if (!did_global)
3220 {
3221 df_set_flags (DF_LR_RUN_DCE);
3222 df_analyze ();
3223 }
3224 did_global = true;
3225 if (dump_file)
3226 fprintf (dump_file, "doing global spill processing\n");
3227 dse_step3 (true);
3228 dse_step4 ();
3229 dse_step5_spill ();
3230 }
3231
3232 dse_step6 (did_global);
3233
3234 if (dump_file)
3235 fprintf (dump_file, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3236 locally_deleted, globally_deleted, spill_deleted);
3237 return 0;
3238 }
3239
3240 static bool
3241 gate_dse (void)
3242 {
3243 return optimize > 0 && flag_dse;
3244 }
3245
3246 struct tree_opt_pass pass_rtl_dse1 =
3247 {
3248 "dse1", /* name */
3249 gate_dse, /* gate */
3250 rest_of_handle_dse, /* execute */
3251 NULL, /* sub */
3252 NULL, /* next */
3253 0, /* static_pass_number */
3254 TV_DSE1, /* tv_id */
3255 0, /* properties_required */
3256 0, /* properties_provided */
3257 0, /* properties_destroyed */
3258 0, /* todo_flags_start */
3259 TODO_dump_func |
3260 TODO_df_finish | TODO_verify_rtl_sharing |
3261 TODO_ggc_collect, /* todo_flags_finish */
3262 'w' /* letter */
3263 };
3264
3265 struct tree_opt_pass pass_rtl_dse2 =
3266 {
3267 "dse2", /* name */
3268 gate_dse, /* gate */
3269 rest_of_handle_dse, /* execute */
3270 NULL, /* sub */
3271 NULL, /* next */
3272 0, /* static_pass_number */
3273 TV_DSE2, /* tv_id */
3274 0, /* properties_required */
3275 0, /* properties_provided */
3276 0, /* properties_destroyed */
3277 0, /* todo_flags_start */
3278 TODO_dump_func |
3279 TODO_df_finish | TODO_verify_rtl_sharing |
3280 TODO_ggc_collect, /* todo_flags_finish */
3281 'w' /* letter */
3282 };
This page took 0.178665 seconds and 6 git commands to generate.