]> gcc.gnu.org Git - gcc.git/blob - gcc/dse.c
dse.c (record_store): Use HOST_BITS_PER_WIDE_INT instead of size of positions_needed...
[gcc.git] / gcc / dse.c
1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #undef BASELINE
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "tm.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "regs.h"
34 #include "hard-reg-set.h"
35 #include "flags.h"
36 #include "df.h"
37 #include "cselib.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "alloc-pool.h"
41 #include "alias.h"
42 #include "insn-config.h"
43 #include "expr.h"
44 #include "recog.h"
45 #include "dse.h"
46 #include "optabs.h"
47 #include "dbgcnt.h"
48
49 /* This file contains three techniques for performing Dead Store
50 Elimination (dse).
51
52 * The first technique performs dse locally on any base address. It
53 is based on the cselib which is a local value numbering technique.
54 This technique is local to a basic block but deals with a fairly
55 general addresses.
56
57 * The second technique performs dse globally but is restricted to
58 base addresses that are either constant or are relative to the
59 frame_pointer.
60
61 * The third technique, (which is only done after register allocation)
62 processes the spill spill slots. This differs from the second
63 technique because it takes advantage of the fact that spilling is
64 completely free from the effects of aliasing.
65
66 Logically, dse is a backwards dataflow problem. A store can be
67 deleted if it if cannot be reached in the backward direction by any
68 use of the value being stored. However, the local technique uses a
69 forwards scan of the basic block because cselib requires that the
70 block be processed in that order.
71
72 The pass is logically broken into 7 steps:
73
74 0) Initialization.
75
76 1) The local algorithm, as well as scanning the insns for the two
77 global algorithms.
78
79 2) Analysis to see if the global algs are necessary. In the case
80 of stores base on a constant address, there must be at least two
81 stores to that address, to make it possible to delete some of the
82 stores. In the case of stores off of the frame or spill related
83 stores, only one store to an address is necessary because those
84 stores die at the end of the function.
85
86 3) Set up the global dataflow equations based on processing the
87 info parsed in the first step.
88
89 4) Solve the dataflow equations.
90
91 5) Delete the insns that the global analysis has indicated are
92 unnecessary.
93
94 6) Cleanup.
95
96 This step uses cselib and canon_rtx to build the largest expression
97 possible for each address. This pass is a forwards pass through
98 each basic block. From the point of view of the global technique,
99 the first pass could examine a block in either direction. The
100 forwards ordering is to accommodate cselib.
101
102 We a simplifying assumption: addresses fall into four broad
103 categories:
104
105 1) base has rtx_varies_p == false, offset is constant.
106 2) base has rtx_varies_p == false, offset variable.
107 3) base has rtx_varies_p == true, offset constant.
108 4) base has rtx_varies_p == true, offset variable.
109
110 The local passes are able to process all 4 kinds of addresses. The
111 global pass only handles (1).
112
113 The global problem is formulated as follows:
114
115 A store, S1, to address A, where A is not relative to the stack
116 frame, can be eliminated if all paths from S1 to the end of the
117 of the function contain another store to A before a read to A.
118
119 If the address A is relative to the stack frame, a store S2 to A
120 can be eliminated if there are no paths from S1 that reach the
121 end of the function that read A before another store to A. In
122 this case S2 can be deleted if there are paths to from S2 to the
123 end of the function that have no reads or writes to A. This
124 second case allows stores to the stack frame to be deleted that
125 would otherwise die when the function returns. This cannot be
126 done if stores_off_frame_dead_at_return is not true. See the doc
127 for that variable for when this variable is false.
128
129 The global problem is formulated as a backwards set union
130 dataflow problem where the stores are the gens and reads are the
131 kills. Set union problems are rare and require some special
132 handling given our representation of bitmaps. A straightforward
133 implementation of requires a lot of bitmaps filled with 1s.
134 These are expensive and cumbersome in our bitmap formulation so
135 care has been taken to avoid large vectors filled with 1s. See
136 the comments in bb_info and in the dataflow confluence functions
137 for details.
138
139 There are two places for further enhancements to this algorithm:
140
141 1) The original dse which was embedded in a pass called flow also
142 did local address forwarding. For example in
143
144 A <- r100
145 ... <- A
146
147 flow would replace the right hand side of the second insn with a
148 reference to r100. Most of the information is available to add this
149 to this pass. It has not done it because it is a lot of work in
150 the case that either r100 is assigned to between the first and
151 second insn and/or the second insn is a load of part of the value
152 stored by the first insn.
153
154 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
155 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
156 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
157 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
158
159 2) The cleaning up of spill code is quite profitable. It currently
160 depends on reading tea leaves and chicken entrails left by reload.
161 This pass depends on reload creating a singleton alias set for each
162 spill slot and telling the next dse pass which of these alias sets
163 are the singletons. Rather than analyze the addresses of the
164 spills, dse's spill processing just does analysis of the loads and
165 stores that use those alias sets. There are three cases where this
166 falls short:
167
168 a) Reload sometimes creates the slot for one mode of access, and
169 then inserts loads and/or stores for a smaller mode. In this
170 case, the current code just punts on the slot. The proper thing
171 to do is to back out and use one bit vector position for each
172 byte of the entity associated with the slot. This depends on
173 KNOWING that reload always generates the accesses for each of the
174 bytes in some canonical (read that easy to understand several
175 passes after reload happens) way.
176
177 b) Reload sometimes decides that spill slot it allocated was not
178 large enough for the mode and goes back and allocates more slots
179 with the same mode and alias set. The backout in this case is a
180 little more graceful than (a). In this case the slot is unmarked
181 as being a spill slot and if final address comes out to be based
182 off the frame pointer, the global algorithm handles this slot.
183
184 c) For any pass that may prespill, there is currently no
185 mechanism to tell the dse pass that the slot being used has the
186 special properties that reload uses. It may be that all that is
187 required is to have those passes make the same calls that reload
188 does, assuming that the alias sets can be manipulated in the same
189 way. */
190
191 /* There are limits to the size of constant offsets we model for the
192 global problem. There are certainly test cases, that exceed this
193 limit, however, it is unlikely that there are important programs
194 that really have constant offsets this size. */
195 #define MAX_OFFSET (64 * 1024)
196
197
198 static bitmap scratch = NULL;
199 struct insn_info;
200
201 /* This structure holds information about a candidate store. */
202 struct store_info
203 {
204
205 /* False means this is a clobber. */
206 bool is_set;
207
208 /* The id of the mem group of the base address. If rtx_varies_p is
209 true, this is -1. Otherwise, it is the index into the group
210 table. */
211 int group_id;
212
213 /* This is the cselib value. */
214 cselib_val *cse_base;
215
216 /* This canonized mem. */
217 rtx mem;
218
219 /* The result of get_addr on mem. */
220 rtx mem_addr;
221
222 /* If this is non-zero, it is the alias set of a spill location. */
223 alias_set_type alias_set;
224
225 /* The offset of the first and byte before the last byte associated
226 with the operation. */
227 int begin, end;
228
229 /* An bitmask as wide as the number of bytes in the word that
230 contains a 1 if the byte may be needed. The store is unused if
231 all of the bits are 0. */
232 unsigned HOST_WIDE_INT positions_needed;
233
234 /* The next store info for this insn. */
235 struct store_info *next;
236
237 /* The right hand side of the store. This is used if there is a
238 subsequent reload of the mems address somewhere later in the
239 basic block. */
240 rtx rhs;
241 };
242
243 /* Return a bitmask with the first N low bits set. */
244
245 static unsigned HOST_WIDE_INT
246 lowpart_bitmask (int n)
247 {
248 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT) 0;
249 return mask >> (HOST_BITS_PER_WIDE_INT - n);
250 }
251
252 typedef struct store_info *store_info_t;
253 static alloc_pool cse_store_info_pool;
254 static alloc_pool rtx_store_info_pool;
255
256 /* This structure holds information about a load. These are only
257 built for rtx bases. */
258 struct read_info
259 {
260 /* The id of the mem group of the base address. */
261 int group_id;
262
263 /* If this is non-zero, it is the alias set of a spill location. */
264 alias_set_type alias_set;
265
266 /* The offset of the first and byte after the last byte associated
267 with the operation. If begin == end == 0, the read did not have
268 a constant offset. */
269 int begin, end;
270
271 /* The mem being read. */
272 rtx mem;
273
274 /* The next read_info for this insn. */
275 struct read_info *next;
276 };
277 typedef struct read_info *read_info_t;
278 static alloc_pool read_info_pool;
279
280
281 /* One of these records is created for each insn. */
282
283 struct insn_info
284 {
285 /* Set true if the insn contains a store but the insn itself cannot
286 be deleted. This is set if the insn is a parallel and there is
287 more than one non dead output or if the insn is in some way
288 volatile. */
289 bool cannot_delete;
290
291 /* This field is only used by the global algorithm. It is set true
292 if the insn contains any read of mem except for a (1). This is
293 also set if the insn is a call or has a clobber mem. If the insn
294 contains a wild read, the use_rec will be null. */
295 bool wild_read;
296
297 /* This field is only used for the processing of const functions.
298 These functions cannot read memory, but they can read the stack
299 because that is where they may get their parms. We need to be
300 this conservative because, like the store motion pass, we don't
301 consider CALL_INSN_FUNCTION_USAGE when processing call insns.
302 Moreover, we need to distinguish two cases:
303 1. Before reload (register elimination), the stores related to
304 outgoing arguments are stack pointer based and thus deemed
305 of non-constant base in this pass. This requires special
306 handling but also means that the frame pointer based stores
307 need not be killed upon encountering a const function call.
308 2. After reload, the stores related to outgoing arguments can be
309 either stack pointer or hard frame pointer based. This means
310 that we have no other choice than also killing all the frame
311 pointer based stores upon encountering a const function call.
312 This field is set after reload for const function calls. Having
313 this set is less severe than a wild read, it just means that all
314 the frame related stores are killed rather than all the stores. */
315 bool frame_read;
316
317 /* This field is only used for the processing of const functions.
318 It is set if the insn may contain a stack pointer based store. */
319 bool stack_pointer_based;
320
321 /* This is true if any of the sets within the store contains a
322 cselib base. Such stores can only be deleted by the local
323 algorithm. */
324 bool contains_cselib_groups;
325
326 /* The insn. */
327 rtx insn;
328
329 /* The list of mem sets or mem clobbers that are contained in this
330 insn. If the insn is deletable, it contains only one mem set.
331 But it could also contain clobbers. Insns that contain more than
332 one mem set are not deletable, but each of those mems are here in
333 order to provide info to delete other insns. */
334 store_info_t store_rec;
335
336 /* The linked list of mem uses in this insn. Only the reads from
337 rtx bases are listed here. The reads to cselib bases are
338 completely processed during the first scan and so are never
339 created. */
340 read_info_t read_rec;
341
342 /* The prev insn in the basic block. */
343 struct insn_info * prev_insn;
344
345 /* The linked list of insns that are in consideration for removal in
346 the forwards pass thru the basic block. This pointer may be
347 trash as it is not cleared when a wild read occurs. The only
348 time it is guaranteed to be correct is when the traveral starts
349 at active_local_stores. */
350 struct insn_info * next_local_store;
351 };
352
353 typedef struct insn_info *insn_info_t;
354 static alloc_pool insn_info_pool;
355
356 /* The linked list of stores that are under consideration in this
357 basic block. */
358 static insn_info_t active_local_stores;
359
360 struct bb_info
361 {
362
363 /* Pointer to the insn info for the last insn in the block. These
364 are linked so this is how all of the insns are reached. During
365 scanning this is the current insn being scanned. */
366 insn_info_t last_insn;
367
368 /* The info for the global dataflow problem. */
369
370
371 /* This is set if the transfer function should and in the wild_read
372 bitmap before applying the kill and gen sets. That vector knocks
373 out most of the bits in the bitmap and thus speeds up the
374 operations. */
375 bool apply_wild_read;
376
377 /* The set of store positions that exist in this block before a wild read. */
378 bitmap gen;
379
380 /* The set of load positions that exist in this block above the
381 same position of a store. */
382 bitmap kill;
383
384 /* The set of stores that reach the top of the block without being
385 killed by a read.
386
387 Do not represent the in if it is all ones. Note that this is
388 what the bitvector should logically be initialized to for a set
389 intersection problem. However, like the kill set, this is too
390 expensive. So initially, the in set will only be created for the
391 exit block and any block that contains a wild read. */
392 bitmap in;
393
394 /* The set of stores that reach the bottom of the block from it's
395 successors.
396
397 Do not represent the in if it is all ones. Note that this is
398 what the bitvector should logically be initialized to for a set
399 intersection problem. However, like the kill and in set, this is
400 too expensive. So what is done is that the confluence operator
401 just initializes the vector from one of the out sets of the
402 successors of the block. */
403 bitmap out;
404 };
405
406 typedef struct bb_info *bb_info_t;
407 static alloc_pool bb_info_pool;
408
409 /* Table to hold all bb_infos. */
410 static bb_info_t *bb_table;
411
412 /* There is a group_info for each rtx base that is used to reference
413 memory. There are also not many of the rtx bases because they are
414 very limited in scope. */
415
416 struct group_info
417 {
418 /* The actual base of the address. */
419 rtx rtx_base;
420
421 /* The sequential id of the base. This allows us to have a
422 canonical ordering of these that is not based on addresses. */
423 int id;
424
425 /* A mem wrapped around the base pointer for the group in order to
426 do read dependency. */
427 rtx base_mem;
428
429 /* Canonized version of base_mem, most likely the same thing. */
430 rtx canon_base_mem;
431
432 /* These two sets of two bitmaps are used to keep track of how many
433 stores are actually referencing that position from this base. We
434 only do this for rtx bases as this will be used to assign
435 positions in the bitmaps for the global problem. Bit N is set in
436 store1 on the first store for offset N. Bit N is set in store2
437 for the second store to offset N. This is all we need since we
438 only care about offsets that have two or more stores for them.
439
440 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
441 for 0 and greater offsets.
442
443 There is one special case here, for stores into the stack frame,
444 we will or store1 into store2 before deciding which stores look
445 at globally. This is because stores to the stack frame that have
446 no other reads before the end of the function can also be
447 deleted. */
448 bitmap store1_n, store1_p, store2_n, store2_p;
449
450 /* The positions in this bitmap have the same assignments as the in,
451 out, gen and kill bitmaps. This bitmap is all zeros except for
452 the positions that are occupied by stores for this group. */
453 bitmap group_kill;
454
455 /* True if there are any positions that are to be processed
456 globally. */
457 bool process_globally;
458
459 /* True if the base of this group is either the frame_pointer or
460 hard_frame_pointer. */
461 bool frame_related;
462
463 /* The offset_map is used to map the offsets from this base into
464 positions in the global bitmaps. It is only created after all of
465 the all of stores have been scanned and we know which ones we
466 care about. */
467 int *offset_map_n, *offset_map_p;
468 int offset_map_size_n, offset_map_size_p;
469 };
470 typedef struct group_info *group_info_t;
471 typedef const struct group_info *const_group_info_t;
472 static alloc_pool rtx_group_info_pool;
473
474 /* Tables of group_info structures, hashed by base value. */
475 static htab_t rtx_group_table;
476
477 /* Index into the rtx_group_vec. */
478 static int rtx_group_next_id;
479
480 DEF_VEC_P(group_info_t);
481 DEF_VEC_ALLOC_P(group_info_t,heap);
482
483 static VEC(group_info_t,heap) *rtx_group_vec;
484
485
486 /* This structure holds the set of changes that are being deferred
487 when removing read operation. See replace_read. */
488 struct deferred_change
489 {
490
491 /* The mem that is being replaced. */
492 rtx *loc;
493
494 /* The reg it is being replaced with. */
495 rtx reg;
496
497 struct deferred_change *next;
498 };
499
500 typedef struct deferred_change *deferred_change_t;
501 static alloc_pool deferred_change_pool;
502
503 static deferred_change_t deferred_change_list = NULL;
504
505 /* This are used to hold the alias sets of spill variables. Since
506 these are never aliased and there may be a lot of them, it makes
507 sense to treat them specially. This bitvector is only allocated in
508 calls from dse_record_singleton_alias_set which currently is only
509 made during reload1. So when dse is called before reload this
510 mechanism does nothing. */
511
512 static bitmap clear_alias_sets = NULL;
513
514 /* The set of clear_alias_sets that have been disqualified because
515 there are loads or stores using a different mode than the alias set
516 was registered with. */
517 static bitmap disqualified_clear_alias_sets = NULL;
518
519 /* The group that holds all of the clear_alias_sets. */
520 static group_info_t clear_alias_group;
521
522 /* The modes of the clear_alias_sets. */
523 static htab_t clear_alias_mode_table;
524
525 /* Hash table element to look up the mode for an alias set. */
526 struct clear_alias_mode_holder
527 {
528 alias_set_type alias_set;
529 enum machine_mode mode;
530 };
531
532 static alloc_pool clear_alias_mode_pool;
533
534 /* This is true except if current_function_stdarg -- i.e. we cannot do
535 this for vararg functions because they play games with the frame. */
536 static bool stores_off_frame_dead_at_return;
537
538 /* Counter for stats. */
539 static int globally_deleted;
540 static int locally_deleted;
541 static int spill_deleted;
542
543 static bitmap all_blocks;
544
545 /* The number of bits used in the global bitmaps. */
546 static unsigned int current_position;
547
548
549 static bool gate_dse (void);
550 static bool gate_dse1 (void);
551 static bool gate_dse2 (void);
552
553 \f
554 /*----------------------------------------------------------------------------
555 Zeroth step.
556
557 Initialization.
558 ----------------------------------------------------------------------------*/
559
560 /* Hashtable callbacks for maintaining the "bases" field of
561 store_group_info, given that the addresses are function invariants. */
562
563 static int
564 clear_alias_mode_eq (const void *p1, const void *p2)
565 {
566 const struct clear_alias_mode_holder * h1
567 = (const struct clear_alias_mode_holder *) p1;
568 const struct clear_alias_mode_holder * h2
569 = (const struct clear_alias_mode_holder *) p2;
570 return h1->alias_set == h2->alias_set;
571 }
572
573
574 static hashval_t
575 clear_alias_mode_hash (const void *p)
576 {
577 const struct clear_alias_mode_holder *holder
578 = (const struct clear_alias_mode_holder *) p;
579 return holder->alias_set;
580 }
581
582
583 /* Find the entry associated with ALIAS_SET. */
584
585 static struct clear_alias_mode_holder *
586 clear_alias_set_lookup (alias_set_type alias_set)
587 {
588 struct clear_alias_mode_holder tmp_holder;
589 void **slot;
590
591 tmp_holder.alias_set = alias_set;
592 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
593 gcc_assert (*slot);
594
595 return *slot;
596 }
597
598
599 /* Hashtable callbacks for maintaining the "bases" field of
600 store_group_info, given that the addresses are function invariants. */
601
602 static int
603 invariant_group_base_eq (const void *p1, const void *p2)
604 {
605 const_group_info_t gi1 = (const_group_info_t) p1;
606 const_group_info_t gi2 = (const_group_info_t) p2;
607 return rtx_equal_p (gi1->rtx_base, gi2->rtx_base);
608 }
609
610
611 static hashval_t
612 invariant_group_base_hash (const void *p)
613 {
614 const_group_info_t gi = (const_group_info_t) p;
615 int do_not_record;
616 return hash_rtx (gi->rtx_base, Pmode, &do_not_record, NULL, false);
617 }
618
619
620 /* Get the GROUP for BASE. Add a new group if it is not there. */
621
622 static group_info_t
623 get_group_info (rtx base)
624 {
625 struct group_info tmp_gi;
626 group_info_t gi;
627 void **slot;
628
629 if (base)
630 {
631 /* Find the store_base_info structure for BASE, creating a new one
632 if necessary. */
633 tmp_gi.rtx_base = base;
634 slot = htab_find_slot (rtx_group_table, &tmp_gi, INSERT);
635 gi = (group_info_t) *slot;
636 }
637 else
638 {
639 if (!clear_alias_group)
640 {
641 clear_alias_group = gi = pool_alloc (rtx_group_info_pool);
642 memset (gi, 0, sizeof (struct group_info));
643 gi->id = rtx_group_next_id++;
644 gi->store1_n = BITMAP_ALLOC (NULL);
645 gi->store1_p = BITMAP_ALLOC (NULL);
646 gi->store2_n = BITMAP_ALLOC (NULL);
647 gi->store2_p = BITMAP_ALLOC (NULL);
648 gi->group_kill = BITMAP_ALLOC (NULL);
649 gi->process_globally = false;
650 gi->offset_map_size_n = 0;
651 gi->offset_map_size_p = 0;
652 gi->offset_map_n = NULL;
653 gi->offset_map_p = NULL;
654 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
655 }
656 return clear_alias_group;
657 }
658
659 if (gi == NULL)
660 {
661 *slot = gi = pool_alloc (rtx_group_info_pool);
662 gi->rtx_base = base;
663 gi->id = rtx_group_next_id++;
664 gi->base_mem = gen_rtx_MEM (QImode, base);
665 gi->canon_base_mem = canon_rtx (gi->base_mem);
666 gi->store1_n = BITMAP_ALLOC (NULL);
667 gi->store1_p = BITMAP_ALLOC (NULL);
668 gi->store2_n = BITMAP_ALLOC (NULL);
669 gi->store2_p = BITMAP_ALLOC (NULL);
670 gi->group_kill = BITMAP_ALLOC (NULL);
671 gi->process_globally = false;
672 gi->frame_related =
673 (base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
674 gi->offset_map_size_n = 0;
675 gi->offset_map_size_p = 0;
676 gi->offset_map_n = NULL;
677 gi->offset_map_p = NULL;
678 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
679 }
680
681 return gi;
682 }
683
684
685 /* Initialization of data structures. */
686
687 static void
688 dse_step0 (void)
689 {
690 locally_deleted = 0;
691 globally_deleted = 0;
692 spill_deleted = 0;
693
694 scratch = BITMAP_ALLOC (NULL);
695
696 rtx_store_info_pool
697 = create_alloc_pool ("rtx_store_info_pool",
698 sizeof (struct store_info), 100);
699 read_info_pool
700 = create_alloc_pool ("read_info_pool",
701 sizeof (struct read_info), 100);
702 insn_info_pool
703 = create_alloc_pool ("insn_info_pool",
704 sizeof (struct insn_info), 100);
705 bb_info_pool
706 = create_alloc_pool ("bb_info_pool",
707 sizeof (struct bb_info), 100);
708 rtx_group_info_pool
709 = create_alloc_pool ("rtx_group_info_pool",
710 sizeof (struct group_info), 100);
711 deferred_change_pool
712 = create_alloc_pool ("deferred_change_pool",
713 sizeof (struct deferred_change), 10);
714
715 rtx_group_table = htab_create (11, invariant_group_base_hash,
716 invariant_group_base_eq, NULL);
717
718 bb_table = XCNEWVEC (bb_info_t, last_basic_block);
719 rtx_group_next_id = 0;
720
721 stores_off_frame_dead_at_return = !current_function_stdarg;
722
723 init_alias_analysis ();
724
725 if (clear_alias_sets)
726 clear_alias_group = get_group_info (NULL);
727 else
728 clear_alias_group = NULL;
729 }
730
731
732 \f
733 /*----------------------------------------------------------------------------
734 First step.
735
736 Scan all of the insns. Any random ordering of the blocks is fine.
737 Each block is scanned in forward order to accommodate cselib which
738 is used to remove stores with non-constant bases.
739 ----------------------------------------------------------------------------*/
740
741 /* Delete all of the store_info recs from INSN_INFO. */
742
743 static void
744 free_store_info (insn_info_t insn_info)
745 {
746 store_info_t store_info = insn_info->store_rec;
747 while (store_info)
748 {
749 store_info_t next = store_info->next;
750 if (store_info->cse_base)
751 pool_free (cse_store_info_pool, store_info);
752 else
753 pool_free (rtx_store_info_pool, store_info);
754 store_info = next;
755 }
756
757 insn_info->cannot_delete = true;
758 insn_info->contains_cselib_groups = false;
759 insn_info->store_rec = NULL;
760 }
761
762
763 struct insn_size {
764 int size;
765 rtx insn;
766 };
767
768
769 /* Add an insn to do the add inside a x if it is a
770 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
771 the size of the mode of the MEM that this is inside of. */
772
773 static int
774 replace_inc_dec (rtx *r, void *d)
775 {
776 rtx x = *r;
777 struct insn_size *data = (struct insn_size *)d;
778 switch (GET_CODE (x))
779 {
780 case PRE_INC:
781 case POST_INC:
782 {
783 rtx r1 = XEXP (x, 0);
784 rtx c = gen_int_mode (Pmode, data->size);
785 add_insn_before (data->insn,
786 gen_rtx_SET (Pmode, r1,
787 gen_rtx_PLUS (Pmode, r1, c)),
788 NULL);
789 return -1;
790 }
791
792 case PRE_DEC:
793 case POST_DEC:
794 {
795 rtx r1 = XEXP (x, 0);
796 rtx c = gen_int_mode (Pmode, -data->size);
797 add_insn_before (data->insn,
798 gen_rtx_SET (Pmode, r1,
799 gen_rtx_PLUS (Pmode, r1, c)),
800 NULL);
801 return -1;
802 }
803
804 case PRE_MODIFY:
805 case POST_MODIFY:
806 {
807 /* We can reuse the add because we are about to delete the
808 insn that contained it. */
809 rtx add = XEXP (x, 0);
810 rtx r1 = XEXP (add, 0);
811 add_insn_before (data->insn,
812 gen_rtx_SET (Pmode, r1, add), NULL);
813 return -1;
814 }
815
816 default:
817 return 0;
818 }
819 }
820
821
822 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
823 and generate an add to replace that. */
824
825 static int
826 replace_inc_dec_mem (rtx *r, void *d)
827 {
828 rtx x = *r;
829 if (GET_CODE (x) == MEM)
830 {
831 struct insn_size data;
832
833 data.size = GET_MODE_SIZE (GET_MODE (x));
834 data.insn = (rtx)d;
835
836 for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
837
838 return -1;
839 }
840 return 0;
841 }
842
843 /* Before we delete INSN, make sure that the auto inc/dec, if it is
844 there, is split into a separate insn. */
845
846 static void
847 check_for_inc_dec (rtx insn)
848 {
849 rtx note = find_reg_note (insn, REG_INC, NULL_RTX);
850 if (note)
851 for_each_rtx (&insn, replace_inc_dec_mem, insn);
852 }
853
854
855 /* Delete the insn and free all of the fields inside INSN_INFO. */
856
857 static void
858 delete_dead_store_insn (insn_info_t insn_info)
859 {
860 read_info_t read_info;
861
862 if (!dbg_cnt (dse))
863 return;
864
865 check_for_inc_dec (insn_info->insn);
866 if (dump_file)
867 {
868 fprintf (dump_file, "Locally deleting insn %d ",
869 INSN_UID (insn_info->insn));
870 if (insn_info->store_rec->alias_set)
871 fprintf (dump_file, "alias set %d\n",
872 (int) insn_info->store_rec->alias_set);
873 else
874 fprintf (dump_file, "\n");
875 }
876
877 free_store_info (insn_info);
878 read_info = insn_info->read_rec;
879
880 while (read_info)
881 {
882 read_info_t next = read_info->next;
883 pool_free (read_info_pool, read_info);
884 read_info = next;
885 }
886 insn_info->read_rec = NULL;
887
888 delete_insn (insn_info->insn);
889 locally_deleted++;
890 insn_info->insn = NULL;
891
892 insn_info->wild_read = false;
893 }
894
895
896 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
897 OFFSET and WIDTH. */
898
899 static void
900 set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
901 {
902 HOST_WIDE_INT i;
903
904 if ((offset > -MAX_OFFSET) && (offset < MAX_OFFSET))
905 for (i=offset; i<offset+width; i++)
906 {
907 bitmap store1;
908 bitmap store2;
909 int ai;
910 if (i < 0)
911 {
912 store1 = group->store1_n;
913 store2 = group->store2_n;
914 ai = -i;
915 }
916 else
917 {
918 store1 = group->store1_p;
919 store2 = group->store2_p;
920 ai = i;
921 }
922
923 if (bitmap_bit_p (store1, ai))
924 bitmap_set_bit (store2, ai);
925 else
926 {
927 bitmap_set_bit (store1, ai);
928 if (i < 0)
929 {
930 if (group->offset_map_size_n < ai)
931 group->offset_map_size_n = ai;
932 }
933 else
934 {
935 if (group->offset_map_size_p < ai)
936 group->offset_map_size_p = ai;
937 }
938 }
939 }
940 }
941
942
943 /* Set the BB_INFO so that the last insn is marked as a wild read. */
944
945 static void
946 add_wild_read (bb_info_t bb_info)
947 {
948 insn_info_t insn_info = bb_info->last_insn;
949 read_info_t *ptr = &insn_info->read_rec;
950
951 while (*ptr)
952 {
953 read_info_t next = (*ptr)->next;
954 if ((*ptr)->alias_set == 0)
955 {
956 pool_free (read_info_pool, *ptr);
957 *ptr = next;
958 }
959 else
960 ptr = &(*ptr)->next;
961 }
962 insn_info->wild_read = true;
963 active_local_stores = NULL;
964 }
965
966
967 /* Return true if X is a constant or one of the registers that behave
968 as a constant over the life of a function. This is equivalent to
969 !rtx_varies_p for memory addresses. */
970
971 static bool
972 const_or_frame_p (rtx x)
973 {
974 switch (GET_CODE (x))
975 {
976 case MEM:
977 return MEM_READONLY_P (x);
978
979 case CONST:
980 case CONST_INT:
981 case CONST_DOUBLE:
982 case CONST_VECTOR:
983 case SYMBOL_REF:
984 case LABEL_REF:
985 return true;
986
987 case REG:
988 /* Note that we have to test for the actual rtx used for the frame
989 and arg pointers and not just the register number in case we have
990 eliminated the frame and/or arg pointer and are using it
991 for pseudos. */
992 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
993 /* The arg pointer varies if it is not a fixed register. */
994 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
995 || x == pic_offset_table_rtx)
996 return true;
997 return false;
998
999 default:
1000 return false;
1001 }
1002 }
1003
1004 /* Take all reasonable action to put the address of MEM into the form
1005 that we can do analysis on.
1006
1007 The gold standard is to get the address into the form: address +
1008 OFFSET where address is something that rtx_varies_p considers a
1009 constant. When we can get the address in this form, we can do
1010 global analysis on it. Note that for constant bases, address is
1011 not actually returned, only the group_id. The address can be
1012 obtained from that.
1013
1014 If that fails, we try cselib to get a value we can at least use
1015 locally. If that fails we return false.
1016
1017 The GROUP_ID is set to -1 for cselib bases and the index of the
1018 group for non_varying bases.
1019
1020 FOR_READ is true if this is a mem read and false if not. */
1021
1022 static bool
1023 canon_address (rtx mem,
1024 alias_set_type *alias_set_out,
1025 int *group_id,
1026 HOST_WIDE_INT *offset,
1027 cselib_val **base)
1028 {
1029 rtx mem_address = XEXP (mem, 0);
1030 rtx expanded_address, address;
1031 /* Make sure that cselib is has initialized all of the operands of
1032 the address before asking it to do the subst. */
1033
1034 if (clear_alias_sets)
1035 {
1036 /* If this is a spill, do not do any further processing. */
1037 alias_set_type alias_set = MEM_ALIAS_SET (mem);
1038 if (dump_file)
1039 fprintf (dump_file, "found alias set %d\n", (int) alias_set);
1040 if (bitmap_bit_p (clear_alias_sets, alias_set))
1041 {
1042 struct clear_alias_mode_holder *entry
1043 = clear_alias_set_lookup (alias_set);
1044
1045 /* If the modes do not match, we cannot process this set. */
1046 if (entry->mode != GET_MODE (mem))
1047 {
1048 if (dump_file)
1049 fprintf (dump_file,
1050 "disqualifying alias set %d, (%s) != (%s)\n",
1051 (int) alias_set, GET_MODE_NAME (entry->mode),
1052 GET_MODE_NAME (GET_MODE (mem)));
1053
1054 bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
1055 return false;
1056 }
1057
1058 *alias_set_out = alias_set;
1059 *group_id = clear_alias_group->id;
1060 return true;
1061 }
1062 }
1063
1064 *alias_set_out = 0;
1065
1066 cselib_lookup (mem_address, Pmode, 1);
1067
1068 if (dump_file)
1069 {
1070 fprintf (dump_file, " mem: ");
1071 print_inline_rtx (dump_file, mem_address, 0);
1072 fprintf (dump_file, "\n");
1073 }
1074
1075 /* Use cselib to replace all of the reg references with the full
1076 expression. This will take care of the case where we have
1077
1078 r_x = base + offset;
1079 val = *r_x;
1080
1081 by making it into
1082
1083 val = *(base + offset);
1084 */
1085
1086 expanded_address = cselib_expand_value_rtx (mem_address, scratch, 5);
1087
1088 /* If this fails, just go with the mem_address. */
1089 if (!expanded_address)
1090 expanded_address = mem_address;
1091
1092 /* Split the address into canonical BASE + OFFSET terms. */
1093 address = canon_rtx (expanded_address);
1094
1095 *offset = 0;
1096
1097 if (dump_file)
1098 {
1099 fprintf (dump_file, "\n after cselib_expand address: ");
1100 print_inline_rtx (dump_file, expanded_address, 0);
1101 fprintf (dump_file, "\n");
1102
1103 fprintf (dump_file, "\n after canon_rtx address: ");
1104 print_inline_rtx (dump_file, address, 0);
1105 fprintf (dump_file, "\n");
1106 }
1107
1108 if (GET_CODE (address) == CONST)
1109 address = XEXP (address, 0);
1110
1111 if (GET_CODE (address) == PLUS && GET_CODE (XEXP (address, 1)) == CONST_INT)
1112 {
1113 *offset = INTVAL (XEXP (address, 1));
1114 address = XEXP (address, 0);
1115 }
1116
1117 if (const_or_frame_p (address))
1118 {
1119 group_info_t group = get_group_info (address);
1120
1121 if (dump_file)
1122 fprintf (dump_file, " gid=%d offset=%d \n", group->id, (int)*offset);
1123 *base = NULL;
1124 *group_id = group->id;
1125 }
1126 else
1127 {
1128 *base = cselib_lookup (address, Pmode, true);
1129 *group_id = -1;
1130
1131 if (*base == NULL)
1132 {
1133 if (dump_file)
1134 fprintf (dump_file, " no cselib val - should be a wild read.\n");
1135 return false;
1136 }
1137 if (dump_file)
1138 fprintf (dump_file, " varying cselib base=%d offset = %d\n",
1139 (*base)->value, (int)*offset);
1140 }
1141 return true;
1142 }
1143
1144
1145 /* Clear the rhs field from the active_local_stores array. */
1146
1147 static void
1148 clear_rhs_from_active_local_stores (void)
1149 {
1150 insn_info_t ptr = active_local_stores;
1151
1152 while (ptr)
1153 {
1154 store_info_t store_info = ptr->store_rec;
1155 /* Skip the clobbers. */
1156 while (!store_info->is_set)
1157 store_info = store_info->next;
1158
1159 store_info->rhs = NULL;
1160
1161 ptr = ptr->next_local_store;
1162 }
1163 }
1164
1165
1166 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1167 there is a candidate store, after adding it to the appropriate
1168 local store group if so. */
1169
1170 static int
1171 record_store (rtx body, bb_info_t bb_info)
1172 {
1173 rtx mem;
1174 HOST_WIDE_INT offset = 0;
1175 HOST_WIDE_INT width = 0;
1176 alias_set_type spill_alias_set;
1177 insn_info_t insn_info = bb_info->last_insn;
1178 store_info_t store_info = NULL;
1179 int group_id;
1180 cselib_val *base = NULL;
1181 insn_info_t ptr, last;
1182 bool store_is_unused;
1183
1184 if (GET_CODE (body) != SET && GET_CODE (body) != CLOBBER)
1185 return 0;
1186
1187 /* If this is not used, then this cannot be used to keep the insn
1188 from being deleted. On the other hand, it does provide something
1189 that can be used to prove that another store is dead. */
1190 store_is_unused
1191 = (find_reg_note (insn_info->insn, REG_UNUSED, body) != NULL);
1192
1193 /* Check whether that value is a suitable memory location. */
1194 mem = SET_DEST (body);
1195 if (!MEM_P (mem))
1196 {
1197 /* If the set or clobber is unused, then it does not effect our
1198 ability to get rid of the entire insn. */
1199 if (!store_is_unused)
1200 insn_info->cannot_delete = true;
1201 return 0;
1202 }
1203
1204 /* At this point we know mem is a mem. */
1205 if (GET_MODE (mem) == BLKmode)
1206 {
1207 if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
1208 {
1209 if (dump_file)
1210 fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
1211 add_wild_read (bb_info);
1212 insn_info->cannot_delete = true;
1213 }
1214 else if (!store_is_unused)
1215 {
1216 /* If the set or clobber is unused, then it does not effect our
1217 ability to get rid of the entire insn. */
1218 insn_info->cannot_delete = true;
1219 clear_rhs_from_active_local_stores ();
1220 }
1221 return 0;
1222 }
1223
1224 /* We can still process a volatile mem, we just cannot delete it. */
1225 if (MEM_VOLATILE_P (mem))
1226 insn_info->cannot_delete = true;
1227
1228 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1229 {
1230 clear_rhs_from_active_local_stores ();
1231 return 0;
1232 }
1233
1234 width = GET_MODE_SIZE (GET_MODE (mem));
1235
1236 if (spill_alias_set)
1237 {
1238 bitmap store1 = clear_alias_group->store1_p;
1239 bitmap store2 = clear_alias_group->store2_p;
1240
1241 if (bitmap_bit_p (store1, spill_alias_set))
1242 bitmap_set_bit (store2, spill_alias_set);
1243 else
1244 bitmap_set_bit (store1, spill_alias_set);
1245
1246 if (clear_alias_group->offset_map_size_p < spill_alias_set)
1247 clear_alias_group->offset_map_size_p = spill_alias_set;
1248
1249 store_info = pool_alloc (rtx_store_info_pool);
1250
1251 if (dump_file)
1252 fprintf (dump_file, " processing spill store %d(%s)\n",
1253 (int) spill_alias_set, GET_MODE_NAME (GET_MODE (mem)));
1254 }
1255 else if (group_id >= 0)
1256 {
1257 /* In the restrictive case where the base is a constant or the
1258 frame pointer we can do global analysis. */
1259
1260 group_info_t group
1261 = VEC_index (group_info_t, rtx_group_vec, group_id);
1262
1263 store_info = pool_alloc (rtx_store_info_pool);
1264 set_usage_bits (group, offset, width);
1265
1266 if (dump_file)
1267 fprintf (dump_file, " processing const base store gid=%d[%d..%d)\n",
1268 group_id, (int)offset, (int)(offset+width));
1269 }
1270 else
1271 {
1272 rtx base_term = find_base_term (XEXP (mem, 0));
1273 if (!base_term
1274 || (GET_CODE (base_term) == ADDRESS
1275 && GET_MODE (base_term) == Pmode
1276 && XEXP (base_term, 0) == stack_pointer_rtx))
1277 insn_info->stack_pointer_based = true;
1278 insn_info->contains_cselib_groups = true;
1279
1280 store_info = pool_alloc (cse_store_info_pool);
1281 group_id = -1;
1282
1283 if (dump_file)
1284 fprintf (dump_file, " processing cselib store [%d..%d)\n",
1285 (int)offset, (int)(offset+width));
1286 }
1287
1288 /* Check to see if this stores causes some other stores to be
1289 dead. */
1290 ptr = active_local_stores;
1291 last = NULL;
1292
1293 while (ptr)
1294 {
1295 insn_info_t next = ptr->next_local_store;
1296 store_info_t s_info = ptr->store_rec;
1297 bool delete = true;
1298
1299 /* Skip the clobbers. We delete the active insn if this insn
1300 shadows the set. To have been put on the active list, it
1301 has exactly on set. */
1302 while (!s_info->is_set)
1303 s_info = s_info->next;
1304
1305 if (s_info->alias_set != spill_alias_set)
1306 delete = false;
1307 else if (s_info->alias_set)
1308 {
1309 struct clear_alias_mode_holder *entry
1310 = clear_alias_set_lookup (s_info->alias_set);
1311 /* Generally, spills cannot be processed if and of the
1312 references to the slot have a different mode. But if
1313 we are in the same block and mode is exactly the same
1314 between this store and one before in the same block,
1315 we can still delete it. */
1316 if ((GET_MODE (mem) == GET_MODE (s_info->mem))
1317 && (GET_MODE (mem) == entry->mode))
1318 {
1319 delete = true;
1320 s_info->positions_needed = (unsigned HOST_WIDE_INT) 0;
1321 }
1322 if (dump_file)
1323 fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
1324 INSN_UID (ptr->insn), (int) s_info->alias_set);
1325 }
1326 else if ((s_info->group_id == group_id)
1327 && (s_info->cse_base == base))
1328 {
1329 HOST_WIDE_INT i;
1330 if (dump_file)
1331 fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
1332 INSN_UID (ptr->insn), s_info->group_id,
1333 (int)s_info->begin, (int)s_info->end);
1334 for (i = offset; i < offset+width; i++)
1335 if (i >= s_info->begin && i < s_info->end)
1336 s_info->positions_needed
1337 &= ~(((unsigned HOST_WIDE_INT) 1) << (i - s_info->begin));
1338 }
1339 else if (s_info->rhs)
1340 /* Need to see if it is possible for this store to overwrite
1341 the value of store_info. If it is, set the rhs to NULL to
1342 keep it from being used to remove a load. */
1343 {
1344 if (canon_true_dependence (s_info->mem,
1345 GET_MODE (s_info->mem),
1346 s_info->mem_addr,
1347 mem, rtx_varies_p))
1348 s_info->rhs = NULL;
1349 }
1350
1351 /* An insn can be deleted if every position of every one of
1352 its s_infos is zero. */
1353 if (s_info->positions_needed != (unsigned HOST_WIDE_INT) 0)
1354 delete = false;
1355
1356 if (delete)
1357 {
1358 insn_info_t insn_to_delete = ptr;
1359
1360 if (last)
1361 last->next_local_store = ptr->next_local_store;
1362 else
1363 active_local_stores = ptr->next_local_store;
1364
1365 delete_dead_store_insn (insn_to_delete);
1366 }
1367 else
1368 last = ptr;
1369
1370 ptr = next;
1371 }
1372
1373 gcc_assert ((unsigned) width <= HOST_BITS_PER_WIDE_INT);
1374
1375 /* Finish filling in the store_info. */
1376 store_info->next = insn_info->store_rec;
1377 insn_info->store_rec = store_info;
1378 store_info->mem = canon_rtx (mem);
1379 store_info->alias_set = spill_alias_set;
1380 store_info->mem_addr = get_addr (XEXP (mem, 0));
1381 store_info->cse_base = base;
1382 store_info->positions_needed = lowpart_bitmask (width);
1383 store_info->group_id = group_id;
1384 store_info->begin = offset;
1385 store_info->end = offset + width;
1386 store_info->is_set = GET_CODE (body) == SET;
1387
1388 if (store_info->is_set
1389 /* No place to keep the value after ra. */
1390 && !reload_completed
1391 && (REG_P (SET_SRC (body))
1392 || GET_CODE (SET_SRC (body)) == SUBREG
1393 || CONSTANT_P (SET_SRC (body)))
1394 /* Sometimes the store and reload is used for truncation and
1395 rounding. */
1396 && !(FLOAT_MODE_P (GET_MODE (mem)) && (flag_float_store)))
1397 store_info->rhs = SET_SRC (body);
1398 else
1399 store_info->rhs = NULL;
1400
1401 /* If this is a clobber, we return 0. We will only be able to
1402 delete this insn if there is only one store USED store, but we
1403 can use the clobber to delete other stores earlier. */
1404 return store_info->is_set ? 1 : 0;
1405 }
1406
1407
1408 static void
1409 dump_insn_info (const char * start, insn_info_t insn_info)
1410 {
1411 fprintf (dump_file, "%s insn=%d %s\n", start,
1412 INSN_UID (insn_info->insn),
1413 insn_info->store_rec ? "has store" : "naked");
1414 }
1415
1416
1417 /* If the modes are different and the value's source and target do not
1418 line up, we need to extract the value from lower part of the rhs of
1419 the store, shift it, and then put it into a form that can be shoved
1420 into the read_insn. This function generates a right SHIFT of a
1421 value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
1422 shift sequence is returned or NULL if we failed to find a
1423 shift. */
1424
1425 static rtx
1426 find_shift_sequence (int access_size,
1427 store_info_t store_info,
1428 read_info_t read_info,
1429 int shift)
1430 {
1431 enum machine_mode store_mode = GET_MODE (store_info->mem);
1432 enum machine_mode read_mode = GET_MODE (read_info->mem);
1433 enum machine_mode new_mode;
1434 rtx read_reg = NULL;
1435
1436 /* Some machines like the x86 have shift insns for each size of
1437 operand. Other machines like the ppc or the ia-64 may only have
1438 shift insns that shift values within 32 or 64 bit registers.
1439 This loop tries to find the smallest shift insn that will right
1440 justify the value we want to read but is available in one insn on
1441 the machine. */
1442
1443 for (new_mode = smallest_mode_for_size (access_size * BITS_PER_UNIT,
1444 MODE_INT);
1445 GET_MODE_BITSIZE (new_mode) <= BITS_PER_WORD;
1446 new_mode = GET_MODE_WIDER_MODE (new_mode))
1447 {
1448 rtx target, new_reg, shift_seq, insn, new_lhs;
1449 int cost;
1450
1451 /* Try a wider mode if truncating the store mode to NEW_MODE
1452 requires a real instruction. */
1453 if (GET_MODE_BITSIZE (new_mode) < GET_MODE_BITSIZE (store_mode)
1454 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (new_mode),
1455 GET_MODE_BITSIZE (store_mode)))
1456 continue;
1457
1458 /* Also try a wider mode if the necessary punning is either not
1459 desirable or not possible. */
1460 if (!CONSTANT_P (store_info->rhs)
1461 && !MODES_TIEABLE_P (new_mode, store_mode))
1462 continue;
1463 new_lhs = simplify_gen_subreg (new_mode, copy_rtx (store_info->rhs),
1464 store_mode, 0);
1465 if (new_lhs == NULL_RTX)
1466 continue;
1467
1468 new_reg = gen_reg_rtx (new_mode);
1469
1470 start_sequence ();
1471
1472 /* In theory we could also check for an ashr. Ian Taylor knows
1473 of one dsp where the cost of these two was not the same. But
1474 this really is a rare case anyway. */
1475 target = expand_binop (new_mode, lshr_optab, new_reg,
1476 GEN_INT (shift), new_reg, 1, OPTAB_DIRECT);
1477
1478 shift_seq = get_insns ();
1479 end_sequence ();
1480
1481 if (target != new_reg || shift_seq == NULL)
1482 continue;
1483
1484 cost = 0;
1485 for (insn = shift_seq; insn != NULL_RTX; insn = NEXT_INSN (insn))
1486 if (INSN_P (insn))
1487 cost += insn_rtx_cost (PATTERN (insn));
1488
1489 /* The computation up to here is essentially independent
1490 of the arguments and could be precomputed. It may
1491 not be worth doing so. We could precompute if
1492 worthwhile or at least cache the results. The result
1493 technically depends on both SHIFT and ACCESS_SIZE,
1494 but in practice the answer will depend only on ACCESS_SIZE. */
1495
1496 if (cost > COSTS_N_INSNS (1))
1497 continue;
1498
1499 /* We found an acceptable shift. Generate a move to
1500 take the value from the store and put it into the
1501 shift pseudo, then shift it, then generate another
1502 move to put in into the target of the read. */
1503 emit_move_insn (new_reg, new_lhs);
1504 emit_insn (shift_seq);
1505 read_reg = extract_low_bits (read_mode, new_mode, new_reg);
1506 break;
1507 }
1508
1509 return read_reg;
1510 }
1511
1512
1513 /* Take a sequence of:
1514 A <- r1
1515 ...
1516 ... <- A
1517
1518 and change it into
1519 r2 <- r1
1520 A <- r1
1521 ...
1522 ... <- r2
1523
1524 or
1525
1526 r3 <- extract (r1)
1527 r3 <- r3 >> shift
1528 r2 <- extract (r3)
1529 ... <- r2
1530
1531 or
1532
1533 r2 <- extract (r1)
1534 ... <- r2
1535
1536 Depending on the alignment and the mode of the store and
1537 subsequent load.
1538
1539
1540 The STORE_INFO and STORE_INSN are for the store and READ_INFO
1541 and READ_INSN are for the read. Return true if the replacement
1542 went ok. */
1543
1544 static bool
1545 replace_read (store_info_t store_info, insn_info_t store_insn,
1546 read_info_t read_info, insn_info_t read_insn, rtx *loc)
1547 {
1548 enum machine_mode store_mode = GET_MODE (store_info->mem);
1549 enum machine_mode read_mode = GET_MODE (read_info->mem);
1550 int shift;
1551 int access_size; /* In bytes. */
1552 rtx insns, read_reg;
1553
1554 if (!dbg_cnt (dse))
1555 return false;
1556
1557 /* To get here the read is within the boundaries of the write so
1558 shift will never be negative. Start out with the shift being in
1559 bytes. */
1560 if (BYTES_BIG_ENDIAN)
1561 shift = store_info->end - read_info->end;
1562 else
1563 shift = read_info->begin - store_info->begin;
1564
1565 access_size = shift + GET_MODE_SIZE (read_mode);
1566
1567 /* From now on it is bits. */
1568 shift *= BITS_PER_UNIT;
1569
1570 /* Create a sequence of instructions to set up the read register.
1571 This sequence goes immediately before the store and its result
1572 is read by the load.
1573
1574 We need to keep this in perspective. We are replacing a read
1575 with a sequence of insns, but the read will almost certainly be
1576 in cache, so it is not going to be an expensive one. Thus, we
1577 are not willing to do a multi insn shift or worse a subroutine
1578 call to get rid of the read. */
1579 if (dump_file)
1580 fprintf (dump_file, "trying to replace %smode load in insn %d"
1581 " from %smode store in insn %d\n",
1582 GET_MODE_NAME (read_mode), INSN_UID (read_insn->insn),
1583 GET_MODE_NAME (store_mode), INSN_UID (store_insn->insn));
1584 start_sequence ();
1585 if (shift)
1586 read_reg = find_shift_sequence (access_size, store_info, read_info, shift);
1587 else
1588 read_reg = extract_low_bits (read_mode, store_mode,
1589 copy_rtx (store_info->rhs));
1590 if (read_reg == NULL_RTX)
1591 {
1592 end_sequence ();
1593 if (dump_file)
1594 fprintf (dump_file, " -- could not extract bits of stored value\n");
1595 return false;
1596 }
1597 /* Force the value into a new register so that it won't be clobbered
1598 between the store and the load. */
1599 read_reg = copy_to_mode_reg (read_mode, read_reg);
1600 insns = get_insns ();
1601 end_sequence ();
1602
1603 if (validate_change (read_insn->insn, loc, read_reg, 0))
1604 {
1605 deferred_change_t deferred_change = pool_alloc (deferred_change_pool);
1606
1607 /* Insert this right before the store insn where it will be safe
1608 from later insns that might change it before the read. */
1609 emit_insn_before (insns, store_insn->insn);
1610
1611 /* And now for the kludge part: cselib croaks if you just
1612 return at this point. There are two reasons for this:
1613
1614 1) Cselib has an idea of how many pseudos there are and
1615 that does not include the new ones we just added.
1616
1617 2) Cselib does not know about the move insn we added
1618 above the store_info, and there is no way to tell it
1619 about it, because it has "moved on".
1620
1621 Problem (1) is fixable with a certain amount of engineering.
1622 Problem (2) is requires starting the bb from scratch. This
1623 could be expensive.
1624
1625 So we are just going to have to lie. The move/extraction
1626 insns are not really an issue, cselib did not see them. But
1627 the use of the new pseudo read_insn is a real problem because
1628 cselib has not scanned this insn. The way that we solve this
1629 problem is that we are just going to put the mem back for now
1630 and when we are finished with the block, we undo this. We
1631 keep a table of mems to get rid of. At the end of the basic
1632 block we can put them back. */
1633
1634 *loc = read_info->mem;
1635 deferred_change->next = deferred_change_list;
1636 deferred_change_list = deferred_change;
1637 deferred_change->loc = loc;
1638 deferred_change->reg = read_reg;
1639
1640 /* Get rid of the read_info, from the point of view of the
1641 rest of dse, play like this read never happened. */
1642 read_insn->read_rec = read_info->next;
1643 pool_free (read_info_pool, read_info);
1644 if (dump_file)
1645 {
1646 fprintf (dump_file, " -- replaced the loaded MEM with ");
1647 print_simple_rtl (dump_file, read_reg);
1648 fprintf (dump_file, "\n");
1649 }
1650 return true;
1651 }
1652 else
1653 {
1654 if (dump_file)
1655 {
1656 fprintf (dump_file, " -- replacing the loaded MEM with ");
1657 print_simple_rtl (dump_file, read_reg);
1658 fprintf (dump_file, " led to an invalid instruction\n");
1659 }
1660 return false;
1661 }
1662 }
1663
1664 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1665 if LOC is a mem and if it is look at the address and kill any
1666 appropriate stores that may be active. */
1667
1668 static int
1669 check_mem_read_rtx (rtx *loc, void *data)
1670 {
1671 rtx mem = *loc;
1672 bb_info_t bb_info;
1673 insn_info_t insn_info;
1674 HOST_WIDE_INT offset = 0;
1675 HOST_WIDE_INT width = 0;
1676 alias_set_type spill_alias_set = 0;
1677 cselib_val *base = NULL;
1678 int group_id;
1679 read_info_t read_info;
1680
1681 if (!mem || !MEM_P (mem))
1682 return 0;
1683
1684 bb_info = (bb_info_t) data;
1685 insn_info = bb_info->last_insn;
1686
1687 if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
1688 || (MEM_VOLATILE_P (mem)))
1689 {
1690 if (dump_file)
1691 fprintf (dump_file, " adding wild read, volatile or barrier.\n");
1692 add_wild_read (bb_info);
1693 insn_info->cannot_delete = true;
1694 return 0;
1695 }
1696
1697 /* If it is reading readonly mem, then there can be no conflict with
1698 another write. */
1699 if (MEM_READONLY_P (mem))
1700 return 0;
1701
1702 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1703 {
1704 if (dump_file)
1705 fprintf (dump_file, " adding wild read, canon_address failure.\n");
1706 add_wild_read (bb_info);
1707 return 0;
1708 }
1709
1710 if (GET_MODE (mem) == BLKmode)
1711 width = -1;
1712 else
1713 width = GET_MODE_SIZE (GET_MODE (mem));
1714
1715 read_info = pool_alloc (read_info_pool);
1716 read_info->group_id = group_id;
1717 read_info->mem = mem;
1718 read_info->alias_set = spill_alias_set;
1719 read_info->begin = offset;
1720 read_info->end = offset + width;
1721 read_info->next = insn_info->read_rec;
1722 insn_info->read_rec = read_info;
1723
1724 /* We ignore the clobbers in store_info. The is mildly aggressive,
1725 but there really should not be a clobber followed by a read. */
1726
1727 if (spill_alias_set)
1728 {
1729 insn_info_t i_ptr = active_local_stores;
1730 insn_info_t last = NULL;
1731
1732 if (dump_file)
1733 fprintf (dump_file, " processing spill load %d\n",
1734 (int) spill_alias_set);
1735
1736 while (i_ptr)
1737 {
1738 store_info_t store_info = i_ptr->store_rec;
1739
1740 /* Skip the clobbers. */
1741 while (!store_info->is_set)
1742 store_info = store_info->next;
1743
1744 if (store_info->alias_set == spill_alias_set)
1745 {
1746 if (dump_file)
1747 dump_insn_info ("removing from active", i_ptr);
1748
1749 if (last)
1750 last->next_local_store = i_ptr->next_local_store;
1751 else
1752 active_local_stores = i_ptr->next_local_store;
1753 }
1754 else
1755 last = i_ptr;
1756 i_ptr = i_ptr->next_local_store;
1757 }
1758 }
1759 else if (group_id >= 0)
1760 {
1761 /* This is the restricted case where the base is a constant or
1762 the frame pointer and offset is a constant. */
1763 insn_info_t i_ptr = active_local_stores;
1764 insn_info_t last = NULL;
1765
1766 if (dump_file)
1767 {
1768 if (width == -1)
1769 fprintf (dump_file, " processing const load gid=%d[BLK]\n",
1770 group_id);
1771 else
1772 fprintf (dump_file, " processing const load gid=%d[%d..%d)\n",
1773 group_id, (int)offset, (int)(offset+width));
1774 }
1775
1776 while (i_ptr)
1777 {
1778 bool remove = false;
1779 store_info_t store_info = i_ptr->store_rec;
1780
1781 /* Skip the clobbers. */
1782 while (!store_info->is_set)
1783 store_info = store_info->next;
1784
1785 /* There are three cases here. */
1786 if (store_info->group_id < 0)
1787 /* We have a cselib store followed by a read from a
1788 const base. */
1789 remove
1790 = canon_true_dependence (store_info->mem,
1791 GET_MODE (store_info->mem),
1792 store_info->mem_addr,
1793 mem, rtx_varies_p);
1794
1795 else if (group_id == store_info->group_id)
1796 {
1797 /* This is a block mode load. We may get lucky and
1798 canon_true_dependence may save the day. */
1799 if (width == -1)
1800 remove
1801 = canon_true_dependence (store_info->mem,
1802 GET_MODE (store_info->mem),
1803 store_info->mem_addr,
1804 mem, rtx_varies_p);
1805
1806 /* If this read is just reading back something that we just
1807 stored, rewrite the read. */
1808 else
1809 {
1810 if (store_info->rhs
1811 && (offset >= store_info->begin)
1812 && (offset + width <= store_info->end))
1813 {
1814 unsigned HOST_WIDE_INT mask
1815 = (lowpart_bitmask (width)
1816 << (offset - store_info->begin));
1817
1818 if ((store_info->positions_needed & mask) == mask
1819 && replace_read (store_info, i_ptr,
1820 read_info, insn_info, loc))
1821 return 0;
1822 }
1823 /* The bases are the same, just see if the offsets
1824 overlap. */
1825 if ((offset < store_info->end)
1826 && (offset + width > store_info->begin))
1827 remove = true;
1828 }
1829 }
1830
1831 /* else
1832 The else case that is missing here is that the
1833 bases are constant but different. There is nothing
1834 to do here because there is no overlap. */
1835
1836 if (remove)
1837 {
1838 if (dump_file)
1839 dump_insn_info ("removing from active", i_ptr);
1840
1841 if (last)
1842 last->next_local_store = i_ptr->next_local_store;
1843 else
1844 active_local_stores = i_ptr->next_local_store;
1845 }
1846 else
1847 last = i_ptr;
1848 i_ptr = i_ptr->next_local_store;
1849 }
1850 }
1851 else
1852 {
1853 insn_info_t i_ptr = active_local_stores;
1854 insn_info_t last = NULL;
1855 if (dump_file)
1856 {
1857 fprintf (dump_file, " processing cselib load mem:");
1858 print_inline_rtx (dump_file, mem, 0);
1859 fprintf (dump_file, "\n");
1860 }
1861
1862 while (i_ptr)
1863 {
1864 bool remove = false;
1865 store_info_t store_info = i_ptr->store_rec;
1866
1867 if (dump_file)
1868 fprintf (dump_file, " processing cselib load against insn %d\n",
1869 INSN_UID (i_ptr->insn));
1870
1871 /* Skip the clobbers. */
1872 while (!store_info->is_set)
1873 store_info = store_info->next;
1874
1875 /* If this read is just reading back something that we just
1876 stored, rewrite the read. */
1877 if (store_info->rhs
1878 && store_info->group_id == -1
1879 && store_info->cse_base == base
1880 && (offset >= store_info->begin)
1881 && (offset + width <= store_info->end))
1882 {
1883 unsigned HOST_WIDE_INT mask
1884 = (lowpart_bitmask (width)
1885 << (offset - store_info->begin));
1886
1887 if ((store_info->positions_needed & mask) == mask
1888 && replace_read (store_info, i_ptr,
1889 read_info, insn_info, loc))
1890 return 0;
1891 }
1892
1893 if (!store_info->alias_set)
1894 remove = canon_true_dependence (store_info->mem,
1895 GET_MODE (store_info->mem),
1896 store_info->mem_addr,
1897 mem, rtx_varies_p);
1898
1899 if (remove)
1900 {
1901 if (dump_file)
1902 dump_insn_info ("removing from active", i_ptr);
1903
1904 if (last)
1905 last->next_local_store = i_ptr->next_local_store;
1906 else
1907 active_local_stores = i_ptr->next_local_store;
1908 }
1909 else
1910 last = i_ptr;
1911 i_ptr = i_ptr->next_local_store;
1912 }
1913 }
1914 return 0;
1915 }
1916
1917 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1918 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1919 true for any part of *LOC. */
1920
1921 static void
1922 check_mem_read_use (rtx *loc, void *data)
1923 {
1924 for_each_rtx (loc, check_mem_read_rtx, data);
1925 }
1926
1927 /* Apply record_store to all candidate stores in INSN. Mark INSN
1928 if some part of it is not a candidate store and assigns to a
1929 non-register target. */
1930
1931 static void
1932 scan_insn (bb_info_t bb_info, rtx insn)
1933 {
1934 rtx body;
1935 insn_info_t insn_info = pool_alloc (insn_info_pool);
1936 int mems_found = 0;
1937 memset (insn_info, 0, sizeof (struct insn_info));
1938
1939 if (dump_file)
1940 fprintf (dump_file, "\n**scanning insn=%d\n",
1941 INSN_UID (insn));
1942
1943 insn_info->prev_insn = bb_info->last_insn;
1944 insn_info->insn = insn;
1945 bb_info->last_insn = insn_info;
1946
1947
1948 /* Cselib clears the table for this case, so we have to essentially
1949 do the same. */
1950 if (NONJUMP_INSN_P (insn)
1951 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
1952 && MEM_VOLATILE_P (PATTERN (insn)))
1953 {
1954 add_wild_read (bb_info);
1955 insn_info->cannot_delete = true;
1956 return;
1957 }
1958
1959 /* Look at all of the uses in the insn. */
1960 note_uses (&PATTERN (insn), check_mem_read_use, bb_info);
1961
1962 if (CALL_P (insn))
1963 {
1964 insn_info->cannot_delete = true;
1965
1966 /* Const functions cannot do anything bad i.e. read memory,
1967 however, they can read their parameters which may have
1968 been pushed onto the stack. */
1969 if (CONST_OR_PURE_CALL_P (insn) && !pure_call_p (insn))
1970 {
1971 insn_info_t i_ptr = active_local_stores;
1972 insn_info_t last = NULL;
1973
1974 if (dump_file)
1975 fprintf (dump_file, "const call %d\n", INSN_UID (insn));
1976
1977 /* See the head comment of the frame_read field. */
1978 if (reload_completed)
1979 insn_info->frame_read = true;
1980
1981 /* Loop over the active stores and remove those which are
1982 killed by the const function call. */
1983 while (i_ptr)
1984 {
1985 bool remove_store = false;
1986
1987 /* The stack pointer based stores are always killed. */
1988 if (i_ptr->stack_pointer_based)
1989 remove_store = true;
1990
1991 /* If the frame is read, the frame related stores are killed. */
1992 else if (insn_info->frame_read)
1993 {
1994 store_info_t store_info = i_ptr->store_rec;
1995
1996 /* Skip the clobbers. */
1997 while (!store_info->is_set)
1998 store_info = store_info->next;
1999
2000 if (store_info->group_id >= 0
2001 && VEC_index (group_info_t, rtx_group_vec,
2002 store_info->group_id)->frame_related)
2003 remove_store = true;
2004 }
2005
2006 if (remove_store)
2007 {
2008 if (dump_file)
2009 dump_insn_info ("removing from active", i_ptr);
2010
2011 if (last)
2012 last->next_local_store = i_ptr->next_local_store;
2013 else
2014 active_local_stores = i_ptr->next_local_store;
2015 }
2016 else
2017 last = i_ptr;
2018
2019 i_ptr = i_ptr->next_local_store;
2020 }
2021 }
2022
2023 else
2024 /* Every other call, including pure functions, may read memory. */
2025 add_wild_read (bb_info);
2026
2027 return;
2028 }
2029
2030 /* Assuming that there are sets in these insns, we cannot delete
2031 them. */
2032 if ((GET_CODE (PATTERN (insn)) == CLOBBER)
2033 || volatile_refs_p (PATTERN (insn))
2034 || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
2035 || (RTX_FRAME_RELATED_P (insn))
2036 || find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
2037 insn_info->cannot_delete = true;
2038
2039 body = PATTERN (insn);
2040 if (GET_CODE (body) == PARALLEL)
2041 {
2042 int i;
2043 for (i = 0; i < XVECLEN (body, 0); i++)
2044 mems_found += record_store (XVECEXP (body, 0, i), bb_info);
2045 }
2046 else
2047 mems_found += record_store (body, bb_info);
2048
2049 if (dump_file)
2050 fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
2051 mems_found, insn_info->cannot_delete ? "true" : "false");
2052
2053 /* If we found some sets of mems, and the insn has not been marked
2054 cannot delete, add it into the active_local_stores so that it can
2055 be locally deleted if found dead. Otherwise mark it as cannot
2056 delete. This simplifies the processing later. */
2057 if (mems_found == 1 && !insn_info->cannot_delete)
2058 {
2059 insn_info->next_local_store = active_local_stores;
2060 active_local_stores = insn_info;
2061 }
2062 else
2063 insn_info->cannot_delete = true;
2064 }
2065
2066
2067 /* Remove BASE from the set of active_local_stores. This is a
2068 callback from cselib that is used to get rid of the stores in
2069 active_local_stores. */
2070
2071 static void
2072 remove_useless_values (cselib_val *base)
2073 {
2074 insn_info_t insn_info = active_local_stores;
2075 insn_info_t last = NULL;
2076
2077 while (insn_info)
2078 {
2079 store_info_t store_info = insn_info->store_rec;
2080 bool delete = false;
2081
2082 /* If ANY of the store_infos match the cselib group that is
2083 being deleted, then the insn can not be deleted. */
2084 while (store_info)
2085 {
2086 if ((store_info->group_id == -1)
2087 && (store_info->cse_base == base))
2088 {
2089 delete = true;
2090 break;
2091 }
2092 store_info = store_info->next;
2093 }
2094
2095 if (delete)
2096 {
2097 if (last)
2098 last->next_local_store = insn_info->next_local_store;
2099 else
2100 active_local_stores = insn_info->next_local_store;
2101 free_store_info (insn_info);
2102 }
2103 else
2104 last = insn_info;
2105
2106 insn_info = insn_info->next_local_store;
2107 }
2108 }
2109
2110
2111 /* Do all of step 1. */
2112
2113 static void
2114 dse_step1 (void)
2115 {
2116 basic_block bb;
2117
2118 cselib_init (false);
2119 all_blocks = BITMAP_ALLOC (NULL);
2120 bitmap_set_bit (all_blocks, ENTRY_BLOCK);
2121 bitmap_set_bit (all_blocks, EXIT_BLOCK);
2122
2123 FOR_ALL_BB (bb)
2124 {
2125 insn_info_t ptr;
2126 bb_info_t bb_info = pool_alloc (bb_info_pool);
2127
2128 memset (bb_info, 0, sizeof (struct bb_info));
2129 bitmap_set_bit (all_blocks, bb->index);
2130
2131 bb_table[bb->index] = bb_info;
2132 cselib_discard_hook = remove_useless_values;
2133
2134 if (bb->index >= NUM_FIXED_BLOCKS)
2135 {
2136 rtx insn;
2137
2138 cse_store_info_pool
2139 = create_alloc_pool ("cse_store_info_pool",
2140 sizeof (struct store_info), 100);
2141 active_local_stores = NULL;
2142 cselib_clear_table ();
2143
2144 /* Scan the insns. */
2145 FOR_BB_INSNS (bb, insn)
2146 {
2147 if (INSN_P (insn))
2148 scan_insn (bb_info, insn);
2149 cselib_process_insn (insn);
2150 }
2151
2152 /* This is something of a hack, because the global algorithm
2153 is supposed to take care of the case where stores go dead
2154 at the end of the function. However, the global
2155 algorithm must take a more conservative view of block
2156 mode reads than the local alg does. So to get the case
2157 where you have a store to the frame followed by a non
2158 overlapping block more read, we look at the active local
2159 stores at the end of the function and delete all of the
2160 frame and spill based ones. */
2161 if (stores_off_frame_dead_at_return
2162 && (EDGE_COUNT (bb->succs) == 0
2163 || (single_succ_p (bb)
2164 && single_succ (bb) == EXIT_BLOCK_PTR
2165 && ! current_function_calls_eh_return)))
2166 {
2167 insn_info_t i_ptr = active_local_stores;
2168 while (i_ptr)
2169 {
2170 store_info_t store_info = i_ptr->store_rec;
2171
2172 /* Skip the clobbers. */
2173 while (!store_info->is_set)
2174 store_info = store_info->next;
2175 if (store_info->alias_set)
2176 delete_dead_store_insn (i_ptr);
2177 else
2178 if (store_info->group_id >= 0)
2179 {
2180 group_info_t group
2181 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2182 if (group->frame_related)
2183 delete_dead_store_insn (i_ptr);
2184 }
2185
2186 i_ptr = i_ptr->next_local_store;
2187 }
2188 }
2189
2190 /* Get rid of the loads that were discovered in
2191 replace_read. Cselib is finished with this block. */
2192 while (deferred_change_list)
2193 {
2194 deferred_change_t next = deferred_change_list->next;
2195
2196 /* There is no reason to validate this change. That was
2197 done earlier. */
2198 *deferred_change_list->loc = deferred_change_list->reg;
2199 pool_free (deferred_change_pool, deferred_change_list);
2200 deferred_change_list = next;
2201 }
2202
2203 /* Get rid of all of the cselib based store_infos in this
2204 block and mark the containing insns as not being
2205 deletable. */
2206 ptr = bb_info->last_insn;
2207 while (ptr)
2208 {
2209 if (ptr->contains_cselib_groups)
2210 free_store_info (ptr);
2211 ptr = ptr->prev_insn;
2212 }
2213
2214 free_alloc_pool (cse_store_info_pool);
2215 }
2216 }
2217
2218 cselib_finish ();
2219 htab_empty (rtx_group_table);
2220 }
2221
2222 \f
2223 /*----------------------------------------------------------------------------
2224 Second step.
2225
2226 Assign each byte position in the stores that we are going to
2227 analyze globally to a position in the bitmaps. Returns true if
2228 there are any bit positions assigned.
2229 ----------------------------------------------------------------------------*/
2230
2231 static void
2232 dse_step2_init (void)
2233 {
2234 unsigned int i;
2235 group_info_t group;
2236
2237 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2238 {
2239 /* For all non stack related bases, we only consider a store to
2240 be deletable if there are two or more stores for that
2241 position. This is because it takes one store to make the
2242 other store redundant. However, for the stores that are
2243 stack related, we consider them if there is only one store
2244 for the position. We do this because the stack related
2245 stores can be deleted if their is no read between them and
2246 the end of the function.
2247
2248 To make this work in the current framework, we take the stack
2249 related bases add all of the bits from store1 into store2.
2250 This has the effect of making the eligible even if there is
2251 only one store. */
2252
2253 if (stores_off_frame_dead_at_return && group->frame_related)
2254 {
2255 bitmap_ior_into (group->store2_n, group->store1_n);
2256 bitmap_ior_into (group->store2_p, group->store1_p);
2257 if (dump_file)
2258 fprintf (dump_file, "group %d is frame related ", i);
2259 }
2260
2261 group->offset_map_size_n++;
2262 group->offset_map_n = XNEWVEC (int, group->offset_map_size_n);
2263 group->offset_map_size_p++;
2264 group->offset_map_p = XNEWVEC (int, group->offset_map_size_p);
2265 group->process_globally = false;
2266 if (dump_file)
2267 {
2268 fprintf (dump_file, "group %d(%d+%d): ", i,
2269 (int)bitmap_count_bits (group->store2_n),
2270 (int)bitmap_count_bits (group->store2_p));
2271 bitmap_print (dump_file, group->store2_n, "n ", " ");
2272 bitmap_print (dump_file, group->store2_p, "p ", "\n");
2273 }
2274 }
2275 }
2276
2277
2278 /* Init the offset tables for the normal case. */
2279
2280 static bool
2281 dse_step2_nospill (void)
2282 {
2283 unsigned int i;
2284 group_info_t group;
2285 /* Position 0 is unused because 0 is used in the maps to mean
2286 unused. */
2287 current_position = 1;
2288
2289 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2290 {
2291 bitmap_iterator bi;
2292 unsigned int j;
2293
2294 if (group == clear_alias_group)
2295 continue;
2296
2297 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2298 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2299 bitmap_clear (group->group_kill);
2300
2301 EXECUTE_IF_SET_IN_BITMAP (group->store2_n, 0, j, bi)
2302 {
2303 bitmap_set_bit (group->group_kill, current_position);
2304 group->offset_map_n[j] = current_position++;
2305 group->process_globally = true;
2306 }
2307 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2308 {
2309 bitmap_set_bit (group->group_kill, current_position);
2310 group->offset_map_p[j] = current_position++;
2311 group->process_globally = true;
2312 }
2313 }
2314 return current_position != 1;
2315 }
2316
2317
2318 /* Init the offset tables for the spill case. */
2319
2320 static bool
2321 dse_step2_spill (void)
2322 {
2323 unsigned int j;
2324 group_info_t group = clear_alias_group;
2325 bitmap_iterator bi;
2326
2327 /* Position 0 is unused because 0 is used in the maps to mean
2328 unused. */
2329 current_position = 1;
2330
2331 if (dump_file)
2332 {
2333 bitmap_print (dump_file, clear_alias_sets,
2334 "clear alias sets ", "\n");
2335 bitmap_print (dump_file, disqualified_clear_alias_sets,
2336 "disqualified clear alias sets ", "\n");
2337 }
2338
2339 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2340 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2341 bitmap_clear (group->group_kill);
2342
2343 /* Remove the disqualified positions from the store2_p set. */
2344 bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
2345
2346 /* We do not need to process the store2_n set because
2347 alias_sets are always positive. */
2348 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2349 {
2350 bitmap_set_bit (group->group_kill, current_position);
2351 group->offset_map_p[j] = current_position++;
2352 group->process_globally = true;
2353 }
2354
2355 return current_position != 1;
2356 }
2357
2358
2359 \f
2360 /*----------------------------------------------------------------------------
2361 Third step.
2362
2363 Build the bit vectors for the transfer functions.
2364 ----------------------------------------------------------------------------*/
2365
2366
2367 /* Note that this is NOT a general purpose function. Any mem that has
2368 an alias set registered here expected to be COMPLETELY unaliased:
2369 i.e it's addresses are not and need not be examined.
2370
2371 It is known that all references to this address will have this
2372 alias set and there are NO other references to this address in the
2373 function.
2374
2375 Currently the only place that is known to be clean enough to use
2376 this interface is the code that assigns the spill locations.
2377
2378 All of the mems that have alias_sets registered are subjected to a
2379 very powerful form of dse where function calls, volatile reads and
2380 writes, and reads from random location are not taken into account.
2381
2382 It is also assumed that these locations go dead when the function
2383 returns. This assumption could be relaxed if there were found to
2384 be places that this assumption was not correct.
2385
2386 The MODE is passed in and saved. The mode of each load or store to
2387 a mem with ALIAS_SET is checked against MEM. If the size of that
2388 load or store is different from MODE, processing is halted on this
2389 alias set. For the vast majority of aliases sets, all of the loads
2390 and stores will use the same mode. But vectors are treated
2391 differently: the alias set is established for the entire vector,
2392 but reload will insert loads and stores for individual elements and
2393 we do not necessarily have the information to track those separate
2394 elements. So when we see a mode mismatch, we just bail. */
2395
2396
2397 void
2398 dse_record_singleton_alias_set (alias_set_type alias_set,
2399 enum machine_mode mode)
2400 {
2401 struct clear_alias_mode_holder tmp_holder;
2402 struct clear_alias_mode_holder *entry;
2403 void **slot;
2404
2405 /* If we are not going to run dse, we need to return now or there
2406 will be problems with allocating the bitmaps. */
2407 if ((!gate_dse()) || !alias_set)
2408 return;
2409
2410 if (!clear_alias_sets)
2411 {
2412 clear_alias_sets = BITMAP_ALLOC (NULL);
2413 disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
2414 clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
2415 clear_alias_mode_eq, NULL);
2416 clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
2417 sizeof (struct clear_alias_mode_holder), 100);
2418 }
2419
2420 bitmap_set_bit (clear_alias_sets, alias_set);
2421
2422 tmp_holder.alias_set = alias_set;
2423
2424 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, INSERT);
2425 gcc_assert (*slot == NULL);
2426
2427 *slot = entry = pool_alloc (clear_alias_mode_pool);
2428 entry->alias_set = alias_set;
2429 entry->mode = mode;
2430 }
2431
2432
2433 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2434
2435 void
2436 dse_invalidate_singleton_alias_set (alias_set_type alias_set)
2437 {
2438 if ((!gate_dse()) || !alias_set)
2439 return;
2440
2441 bitmap_clear_bit (clear_alias_sets, alias_set);
2442 }
2443
2444
2445 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2446 there, return 0. */
2447
2448 static int
2449 get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
2450 {
2451 if (offset < 0)
2452 {
2453 HOST_WIDE_INT offset_p = -offset;
2454 if (offset_p >= group_info->offset_map_size_n)
2455 return 0;
2456 return group_info->offset_map_n[offset_p];
2457 }
2458 else
2459 {
2460 if (offset >= group_info->offset_map_size_p)
2461 return 0;
2462 return group_info->offset_map_p[offset];
2463 }
2464 }
2465
2466
2467 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2468 may be NULL. */
2469
2470 static void
2471 scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
2472 {
2473 while (store_info)
2474 {
2475 HOST_WIDE_INT i;
2476 group_info_t group_info
2477 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2478 if (group_info->process_globally)
2479 for (i = store_info->begin; i < store_info->end; i++)
2480 {
2481 int index = get_bitmap_index (group_info, i);
2482 if (index != 0)
2483 {
2484 bitmap_set_bit (gen, index);
2485 if (kill)
2486 bitmap_clear_bit (kill, index);
2487 }
2488 }
2489 store_info = store_info->next;
2490 }
2491 }
2492
2493
2494 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2495 may be NULL. */
2496
2497 static void
2498 scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
2499 {
2500 while (store_info)
2501 {
2502 if (store_info->alias_set)
2503 {
2504 int index = get_bitmap_index (clear_alias_group,
2505 store_info->alias_set);
2506 if (index != 0)
2507 {
2508 bitmap_set_bit (gen, index);
2509 if (kill)
2510 bitmap_clear_bit (kill, index);
2511 }
2512 }
2513 store_info = store_info->next;
2514 }
2515 }
2516
2517
2518 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2519 may be NULL. */
2520
2521 static void
2522 scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
2523 {
2524 read_info_t read_info = insn_info->read_rec;
2525 int i;
2526 group_info_t group;
2527
2528 /* If this insn reads the frame, kill all the frame related stores. */
2529 if (insn_info->frame_read)
2530 {
2531 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2532 if (group->process_globally && group->frame_related)
2533 {
2534 if (kill)
2535 bitmap_ior_into (kill, group->group_kill);
2536 bitmap_and_compl_into (gen, group->group_kill);
2537 }
2538 }
2539
2540 while (read_info)
2541 {
2542 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2543 {
2544 if (group->process_globally)
2545 {
2546 if (i == read_info->group_id)
2547 {
2548 if (read_info->begin > read_info->end)
2549 {
2550 /* Begin > end for block mode reads. */
2551 if (kill)
2552 bitmap_ior_into (kill, group->group_kill);
2553 bitmap_and_compl_into (gen, group->group_kill);
2554 }
2555 else
2556 {
2557 /* The groups are the same, just process the
2558 offsets. */
2559 HOST_WIDE_INT j;
2560 for (j = read_info->begin; j < read_info->end; j++)
2561 {
2562 int index = get_bitmap_index (group, j);
2563 if (index != 0)
2564 {
2565 if (kill)
2566 bitmap_set_bit (kill, index);
2567 bitmap_clear_bit (gen, index);
2568 }
2569 }
2570 }
2571 }
2572 else
2573 {
2574 /* The groups are different, if the alias sets
2575 conflict, clear the entire group. We only need
2576 to apply this test if the read_info is a cselib
2577 read. Anything with a constant base cannot alias
2578 something else with a different constant
2579 base. */
2580 if ((read_info->group_id < 0)
2581 && canon_true_dependence (group->base_mem,
2582 QImode,
2583 group->canon_base_mem,
2584 read_info->mem, rtx_varies_p))
2585 {
2586 if (kill)
2587 bitmap_ior_into (kill, group->group_kill);
2588 bitmap_and_compl_into (gen, group->group_kill);
2589 }
2590 }
2591 }
2592 }
2593
2594 read_info = read_info->next;
2595 }
2596 }
2597
2598 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2599 may be NULL. */
2600
2601 static void
2602 scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
2603 {
2604 while (read_info)
2605 {
2606 if (read_info->alias_set)
2607 {
2608 int index = get_bitmap_index (clear_alias_group,
2609 read_info->alias_set);
2610 if (index != 0)
2611 {
2612 if (kill)
2613 bitmap_set_bit (kill, index);
2614 bitmap_clear_bit (gen, index);
2615 }
2616 }
2617
2618 read_info = read_info->next;
2619 }
2620 }
2621
2622
2623 /* Return the insn in BB_INFO before the first wild read or if there
2624 are no wild reads in the block, return the last insn. */
2625
2626 static insn_info_t
2627 find_insn_before_first_wild_read (bb_info_t bb_info)
2628 {
2629 insn_info_t insn_info = bb_info->last_insn;
2630 insn_info_t last_wild_read = NULL;
2631
2632 while (insn_info)
2633 {
2634 if (insn_info->wild_read)
2635 {
2636 last_wild_read = insn_info->prev_insn;
2637 /* Block starts with wild read. */
2638 if (!last_wild_read)
2639 return NULL;
2640 }
2641
2642 insn_info = insn_info->prev_insn;
2643 }
2644
2645 if (last_wild_read)
2646 return last_wild_read;
2647 else
2648 return bb_info->last_insn;
2649 }
2650
2651
2652 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2653 the block in order to build the gen and kill sets for the block.
2654 We start at ptr which may be the last insn in the block or may be
2655 the first insn with a wild read. In the latter case we are able to
2656 skip the rest of the block because it just does not matter:
2657 anything that happens is hidden by the wild read. */
2658
2659 static void
2660 dse_step3_scan (bool for_spills, basic_block bb)
2661 {
2662 bb_info_t bb_info = bb_table[bb->index];
2663 insn_info_t insn_info;
2664
2665 if (for_spills)
2666 /* There are no wild reads in the spill case. */
2667 insn_info = bb_info->last_insn;
2668 else
2669 insn_info = find_insn_before_first_wild_read (bb_info);
2670
2671 /* In the spill case or in the no_spill case if there is no wild
2672 read in the block, we will need a kill set. */
2673 if (insn_info == bb_info->last_insn)
2674 {
2675 if (bb_info->kill)
2676 bitmap_clear (bb_info->kill);
2677 else
2678 bb_info->kill = BITMAP_ALLOC (NULL);
2679 }
2680 else
2681 if (bb_info->kill)
2682 BITMAP_FREE (bb_info->kill);
2683
2684 while (insn_info)
2685 {
2686 /* There may have been code deleted by the dce pass run before
2687 this phase. */
2688 if (insn_info->insn && INSN_P (insn_info->insn))
2689 {
2690 /* Process the read(s) last. */
2691 if (for_spills)
2692 {
2693 scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2694 scan_reads_spill (insn_info->read_rec, bb_info->gen, bb_info->kill);
2695 }
2696 else
2697 {
2698 scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2699 scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
2700 }
2701 }
2702
2703 insn_info = insn_info->prev_insn;
2704 }
2705 }
2706
2707
2708 /* Set the gen set of the exit block, and also any block with no
2709 successors that does not have a wild read. */
2710
2711 static void
2712 dse_step3_exit_block_scan (bb_info_t bb_info)
2713 {
2714 /* The gen set is all 0's for the exit block except for the
2715 frame_pointer_group. */
2716
2717 if (stores_off_frame_dead_at_return)
2718 {
2719 unsigned int i;
2720 group_info_t group;
2721
2722 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2723 {
2724 if (group->process_globally && group->frame_related)
2725 bitmap_ior_into (bb_info->gen, group->group_kill);
2726 }
2727 }
2728 }
2729
2730
2731 /* Find all of the blocks that are not backwards reachable from the
2732 exit block or any block with no successors (BB). These are the
2733 infinite loops or infinite self loops. These blocks will still
2734 have their bits set in UNREACHABLE_BLOCKS. */
2735
2736 static void
2737 mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
2738 {
2739 edge e;
2740 edge_iterator ei;
2741
2742 if (TEST_BIT (unreachable_blocks, bb->index))
2743 {
2744 RESET_BIT (unreachable_blocks, bb->index);
2745 FOR_EACH_EDGE (e, ei, bb->preds)
2746 {
2747 mark_reachable_blocks (unreachable_blocks, e->src);
2748 }
2749 }
2750 }
2751
2752 /* Build the transfer functions for the function. */
2753
2754 static void
2755 dse_step3 (bool for_spills)
2756 {
2757 basic_block bb;
2758 sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
2759 sbitmap_iterator sbi;
2760 bitmap all_ones = NULL;
2761 unsigned int i;
2762
2763 sbitmap_ones (unreachable_blocks);
2764
2765 FOR_ALL_BB (bb)
2766 {
2767 bb_info_t bb_info = bb_table[bb->index];
2768 if (bb_info->gen)
2769 bitmap_clear (bb_info->gen);
2770 else
2771 bb_info->gen = BITMAP_ALLOC (NULL);
2772
2773 if (bb->index == ENTRY_BLOCK)
2774 ;
2775 else if (bb->index == EXIT_BLOCK)
2776 dse_step3_exit_block_scan (bb_info);
2777 else
2778 dse_step3_scan (for_spills, bb);
2779 if (EDGE_COUNT (bb->succs) == 0)
2780 mark_reachable_blocks (unreachable_blocks, bb);
2781
2782 /* If this is the second time dataflow is run, delete the old
2783 sets. */
2784 if (bb_info->in)
2785 BITMAP_FREE (bb_info->in);
2786 if (bb_info->out)
2787 BITMAP_FREE (bb_info->out);
2788 }
2789
2790 /* For any block in an infinite loop, we must initialize the out set
2791 to all ones. This could be expensive, but almost never occurs in
2792 practice. However, it is common in regression tests. */
2793 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks, 0, i, sbi)
2794 {
2795 if (bitmap_bit_p (all_blocks, i))
2796 {
2797 bb_info_t bb_info = bb_table[i];
2798 if (!all_ones)
2799 {
2800 unsigned int j;
2801 group_info_t group;
2802
2803 all_ones = BITMAP_ALLOC (NULL);
2804 for (j = 0; VEC_iterate (group_info_t, rtx_group_vec, j, group); j++)
2805 bitmap_ior_into (all_ones, group->group_kill);
2806 }
2807 if (!bb_info->out)
2808 {
2809 bb_info->out = BITMAP_ALLOC (NULL);
2810 bitmap_copy (bb_info->out, all_ones);
2811 }
2812 }
2813 }
2814
2815 if (all_ones)
2816 BITMAP_FREE (all_ones);
2817 sbitmap_free (unreachable_blocks);
2818 }
2819
2820
2821 \f
2822 /*----------------------------------------------------------------------------
2823 Fourth step.
2824
2825 Solve the bitvector equations.
2826 ----------------------------------------------------------------------------*/
2827
2828
2829 /* Confluence function for blocks with no successors. Create an out
2830 set from the gen set of the exit block. This block logically has
2831 the exit block as a successor. */
2832
2833
2834
2835 static void
2836 dse_confluence_0 (basic_block bb)
2837 {
2838 bb_info_t bb_info = bb_table[bb->index];
2839
2840 if (bb->index == EXIT_BLOCK)
2841 return;
2842
2843 if (!bb_info->out)
2844 {
2845 bb_info->out = BITMAP_ALLOC (NULL);
2846 bitmap_copy (bb_info->out, bb_table[EXIT_BLOCK]->gen);
2847 }
2848 }
2849
2850 /* Propagate the information from the in set of the dest of E to the
2851 out set of the src of E. If the various in or out sets are not
2852 there, that means they are all ones. */
2853
2854 static void
2855 dse_confluence_n (edge e)
2856 {
2857 bb_info_t src_info = bb_table[e->src->index];
2858 bb_info_t dest_info = bb_table[e->dest->index];
2859
2860 if (dest_info->in)
2861 {
2862 if (src_info->out)
2863 bitmap_and_into (src_info->out, dest_info->in);
2864 else
2865 {
2866 src_info->out = BITMAP_ALLOC (NULL);
2867 bitmap_copy (src_info->out, dest_info->in);
2868 }
2869 }
2870 }
2871
2872
2873 /* Propagate the info from the out to the in set of BB_INDEX's basic
2874 block. There are three cases:
2875
2876 1) The block has no kill set. In this case the kill set is all
2877 ones. It does not matter what the out set of the block is, none of
2878 the info can reach the top. The only thing that reaches the top is
2879 the gen set and we just copy the set.
2880
2881 2) There is a kill set but no out set and bb has successors. In
2882 this case we just return. Eventually an out set will be created and
2883 it is better to wait than to create a set of ones.
2884
2885 3) There is both a kill and out set. We apply the obvious transfer
2886 function.
2887 */
2888
2889 static bool
2890 dse_transfer_function (int bb_index)
2891 {
2892 bb_info_t bb_info = bb_table[bb_index];
2893
2894 if (bb_info->kill)
2895 {
2896 if (bb_info->out)
2897 {
2898 /* Case 3 above. */
2899 if (bb_info->in)
2900 return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2901 bb_info->out, bb_info->kill);
2902 else
2903 {
2904 bb_info->in = BITMAP_ALLOC (NULL);
2905 bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2906 bb_info->out, bb_info->kill);
2907 return true;
2908 }
2909 }
2910 else
2911 /* Case 2 above. */
2912 return false;
2913 }
2914 else
2915 {
2916 /* Case 1 above. If there is already an in set, nothing
2917 happens. */
2918 if (bb_info->in)
2919 return false;
2920 else
2921 {
2922 bb_info->in = BITMAP_ALLOC (NULL);
2923 bitmap_copy (bb_info->in, bb_info->gen);
2924 return true;
2925 }
2926 }
2927 }
2928
2929 /* Solve the dataflow equations. */
2930
2931 static void
2932 dse_step4 (void)
2933 {
2934 df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
2935 dse_confluence_n, dse_transfer_function,
2936 all_blocks, df_get_postorder (DF_BACKWARD),
2937 df_get_n_blocks (DF_BACKWARD));
2938 if (dump_file)
2939 {
2940 basic_block bb;
2941
2942 fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
2943 FOR_ALL_BB (bb)
2944 {
2945 bb_info_t bb_info = bb_table[bb->index];
2946
2947 df_print_bb_index (bb, dump_file);
2948 if (bb_info->in)
2949 bitmap_print (dump_file, bb_info->in, " in: ", "\n");
2950 else
2951 fprintf (dump_file, " in: *MISSING*\n");
2952 if (bb_info->gen)
2953 bitmap_print (dump_file, bb_info->gen, " gen: ", "\n");
2954 else
2955 fprintf (dump_file, " gen: *MISSING*\n");
2956 if (bb_info->kill)
2957 bitmap_print (dump_file, bb_info->kill, " kill: ", "\n");
2958 else
2959 fprintf (dump_file, " kill: *MISSING*\n");
2960 if (bb_info->out)
2961 bitmap_print (dump_file, bb_info->out, " out: ", "\n");
2962 else
2963 fprintf (dump_file, " out: *MISSING*\n\n");
2964 }
2965 }
2966 }
2967
2968
2969 \f
2970 /*----------------------------------------------------------------------------
2971 Fifth step.
2972
2973 Delete the stores that can only be deleted using the global information.
2974 ----------------------------------------------------------------------------*/
2975
2976
2977 static void
2978 dse_step5_nospill (void)
2979 {
2980 basic_block bb;
2981 FOR_EACH_BB (bb)
2982 {
2983 bb_info_t bb_info = bb_table[bb->index];
2984 insn_info_t insn_info = bb_info->last_insn;
2985 bitmap v = bb_info->out;
2986
2987 while (insn_info)
2988 {
2989 bool deleted = false;
2990 if (dump_file && insn_info->insn)
2991 {
2992 fprintf (dump_file, "starting to process insn %d\n",
2993 INSN_UID (insn_info->insn));
2994 bitmap_print (dump_file, v, " v: ", "\n");
2995 }
2996
2997 /* There may have been code deleted by the dce pass run before
2998 this phase. */
2999 if (insn_info->insn
3000 && INSN_P (insn_info->insn)
3001 && (!insn_info->cannot_delete)
3002 && (!bitmap_empty_p (v)))
3003 {
3004 store_info_t store_info = insn_info->store_rec;
3005
3006 /* Try to delete the current insn. */
3007 deleted = true;
3008
3009 /* Skip the clobbers. */
3010 while (!store_info->is_set)
3011 store_info = store_info->next;
3012
3013 if (store_info->alias_set)
3014 deleted = false;
3015 else
3016 {
3017 HOST_WIDE_INT i;
3018 group_info_t group_info
3019 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
3020
3021 for (i = store_info->begin; i < store_info->end; i++)
3022 {
3023 int index = get_bitmap_index (group_info, i);
3024
3025 if (dump_file)
3026 fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
3027 if (index == 0 || !bitmap_bit_p (v, index))
3028 {
3029 if (dump_file)
3030 fprintf (dump_file, "failing at i = %d\n", (int)i);
3031 deleted = false;
3032 break;
3033 }
3034 }
3035 }
3036 if (deleted)
3037 {
3038 if (dbg_cnt (dse))
3039 {
3040 check_for_inc_dec (insn_info->insn);
3041 delete_insn (insn_info->insn);
3042 insn_info->insn = NULL;
3043 globally_deleted++;
3044 }
3045 }
3046 }
3047 /* We do want to process the local info if the insn was
3048 deleted. For instance, if the insn did a wild read, we
3049 no longer need to trash the info. */
3050 if (insn_info->insn
3051 && INSN_P (insn_info->insn)
3052 && (!deleted))
3053 {
3054 scan_stores_nospill (insn_info->store_rec, v, NULL);
3055 if (insn_info->wild_read)
3056 {
3057 if (dump_file)
3058 fprintf (dump_file, "wild read\n");
3059 bitmap_clear (v);
3060 }
3061 else if (insn_info->read_rec)
3062 {
3063 if (dump_file)
3064 fprintf (dump_file, "regular read\n");
3065 scan_reads_nospill (insn_info, v, NULL);
3066 }
3067 }
3068
3069 insn_info = insn_info->prev_insn;
3070 }
3071 }
3072 }
3073
3074
3075 static void
3076 dse_step5_spill (void)
3077 {
3078 basic_block bb;
3079 FOR_EACH_BB (bb)
3080 {
3081 bb_info_t bb_info = bb_table[bb->index];
3082 insn_info_t insn_info = bb_info->last_insn;
3083 bitmap v = bb_info->out;
3084
3085 while (insn_info)
3086 {
3087 bool deleted = false;
3088 /* There may have been code deleted by the dce pass run before
3089 this phase. */
3090 if (insn_info->insn
3091 && INSN_P (insn_info->insn)
3092 && (!insn_info->cannot_delete)
3093 && (!bitmap_empty_p (v)))
3094 {
3095 /* Try to delete the current insn. */
3096 store_info_t store_info = insn_info->store_rec;
3097 deleted = true;
3098
3099 while (store_info)
3100 {
3101 if (store_info->alias_set)
3102 {
3103 int index = get_bitmap_index (clear_alias_group,
3104 store_info->alias_set);
3105 if (index == 0 || !bitmap_bit_p (v, index))
3106 {
3107 deleted = false;
3108 break;
3109 }
3110 }
3111 else
3112 deleted = false;
3113 store_info = store_info->next;
3114 }
3115 if (deleted && dbg_cnt (dse))
3116 {
3117 if (dump_file)
3118 fprintf (dump_file, "Spill deleting insn %d\n",
3119 INSN_UID (insn_info->insn));
3120 check_for_inc_dec (insn_info->insn);
3121 delete_insn (insn_info->insn);
3122 spill_deleted++;
3123 insn_info->insn = NULL;
3124 }
3125 }
3126
3127 if (insn_info->insn
3128 && INSN_P (insn_info->insn)
3129 && (!deleted))
3130 {
3131 scan_stores_spill (insn_info->store_rec, v, NULL);
3132 scan_reads_spill (insn_info->read_rec, v, NULL);
3133 }
3134
3135 insn_info = insn_info->prev_insn;
3136 }
3137 }
3138 }
3139
3140
3141 \f
3142 /*----------------------------------------------------------------------------
3143 Sixth step.
3144
3145 Destroy everything left standing.
3146 ----------------------------------------------------------------------------*/
3147
3148 static void
3149 dse_step6 (bool global_done)
3150 {
3151 unsigned int i;
3152 group_info_t group;
3153 basic_block bb;
3154
3155 if (global_done)
3156 {
3157 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3158 {
3159 free (group->offset_map_n);
3160 free (group->offset_map_p);
3161 BITMAP_FREE (group->store1_n);
3162 BITMAP_FREE (group->store1_p);
3163 BITMAP_FREE (group->store2_n);
3164 BITMAP_FREE (group->store2_p);
3165 BITMAP_FREE (group->group_kill);
3166 }
3167
3168 FOR_ALL_BB (bb)
3169 {
3170 bb_info_t bb_info = bb_table[bb->index];
3171 BITMAP_FREE (bb_info->gen);
3172 if (bb_info->kill)
3173 BITMAP_FREE (bb_info->kill);
3174 if (bb_info->in)
3175 BITMAP_FREE (bb_info->in);
3176 if (bb_info->out)
3177 BITMAP_FREE (bb_info->out);
3178 }
3179 }
3180 else
3181 {
3182 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3183 {
3184 BITMAP_FREE (group->store1_n);
3185 BITMAP_FREE (group->store1_p);
3186 BITMAP_FREE (group->store2_n);
3187 BITMAP_FREE (group->store2_p);
3188 BITMAP_FREE (group->group_kill);
3189 }
3190 }
3191
3192 if (clear_alias_sets)
3193 {
3194 BITMAP_FREE (clear_alias_sets);
3195 BITMAP_FREE (disqualified_clear_alias_sets);
3196 free_alloc_pool (clear_alias_mode_pool);
3197 htab_delete (clear_alias_mode_table);
3198 }
3199
3200 end_alias_analysis ();
3201 free (bb_table);
3202 htab_delete (rtx_group_table);
3203 VEC_free (group_info_t, heap, rtx_group_vec);
3204 BITMAP_FREE (all_blocks);
3205 BITMAP_FREE (scratch);
3206
3207 free_alloc_pool (rtx_store_info_pool);
3208 free_alloc_pool (read_info_pool);
3209 free_alloc_pool (insn_info_pool);
3210 free_alloc_pool (bb_info_pool);
3211 free_alloc_pool (rtx_group_info_pool);
3212 free_alloc_pool (deferred_change_pool);
3213 }
3214
3215
3216
3217 /* -------------------------------------------------------------------------
3218 DSE
3219 ------------------------------------------------------------------------- */
3220
3221 /* Callback for running pass_rtl_dse. */
3222
3223 static unsigned int
3224 rest_of_handle_dse (void)
3225 {
3226 bool did_global = false;
3227
3228 df_set_flags (DF_DEFER_INSN_RESCAN);
3229
3230 dse_step0 ();
3231 dse_step1 ();
3232 dse_step2_init ();
3233 if (dse_step2_nospill ())
3234 {
3235 df_set_flags (DF_LR_RUN_DCE);
3236 df_analyze ();
3237 did_global = true;
3238 if (dump_file)
3239 fprintf (dump_file, "doing global processing\n");
3240 dse_step3 (false);
3241 dse_step4 ();
3242 dse_step5_nospill ();
3243 }
3244
3245 /* For the instance of dse that runs after reload, we make a special
3246 pass to process the spills. These are special in that they are
3247 totally transparent, i.e, there is no aliasing issues that need
3248 to be considered. This means that the wild reads that kill
3249 everything else do not apply here. */
3250 if (clear_alias_sets && dse_step2_spill ())
3251 {
3252 if (!did_global)
3253 {
3254 df_set_flags (DF_LR_RUN_DCE);
3255 df_analyze ();
3256 }
3257 did_global = true;
3258 if (dump_file)
3259 fprintf (dump_file, "doing global spill processing\n");
3260 dse_step3 (true);
3261 dse_step4 ();
3262 dse_step5_spill ();
3263 }
3264
3265 dse_step6 (did_global);
3266
3267 if (dump_file)
3268 fprintf (dump_file, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3269 locally_deleted, globally_deleted, spill_deleted);
3270 return 0;
3271 }
3272
3273 static bool
3274 gate_dse (void)
3275 {
3276 return gate_dse1 () || gate_dse2 ();
3277 }
3278
3279 static bool
3280 gate_dse1 (void)
3281 {
3282 return optimize > 0 && flag_dse
3283 && dbg_cnt (dse1);
3284 }
3285
3286 static bool
3287 gate_dse2 (void)
3288 {
3289 return optimize > 0 && flag_dse
3290 && dbg_cnt (dse2);
3291 }
3292
3293 struct rtl_opt_pass pass_rtl_dse1 =
3294 {
3295 {
3296 RTL_PASS,
3297 "dse1", /* name */
3298 gate_dse1, /* gate */
3299 rest_of_handle_dse, /* execute */
3300 NULL, /* sub */
3301 NULL, /* next */
3302 0, /* static_pass_number */
3303 TV_DSE1, /* tv_id */
3304 0, /* properties_required */
3305 0, /* properties_provided */
3306 0, /* properties_destroyed */
3307 0, /* todo_flags_start */
3308 TODO_dump_func |
3309 TODO_df_finish | TODO_verify_rtl_sharing |
3310 TODO_ggc_collect /* todo_flags_finish */
3311 }
3312 };
3313
3314 struct rtl_opt_pass pass_rtl_dse2 =
3315 {
3316 {
3317 RTL_PASS,
3318 "dse2", /* name */
3319 gate_dse2, /* gate */
3320 rest_of_handle_dse, /* execute */
3321 NULL, /* sub */
3322 NULL, /* next */
3323 0, /* static_pass_number */
3324 TV_DSE2, /* tv_id */
3325 0, /* properties_required */
3326 0, /* properties_provided */
3327 0, /* properties_destroyed */
3328 0, /* todo_flags_start */
3329 TODO_dump_func |
3330 TODO_df_finish | TODO_verify_rtl_sharing |
3331 TODO_ggc_collect /* todo_flags_finish */
3332 }
3333 };
This page took 0.185814 seconds and 6 git commands to generate.