]> gcc.gnu.org Git - gcc.git/blob - gcc/gcse.c
gcse.c (find_rtx_in_ldst): Handle NULL pre_ldst_table.
[gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "coretypes.h"
149 #include "tm.h"
150 #include "toplev.h"
151
152 #include "rtl.h"
153 #include "tree.h"
154 #include "tm_p.h"
155 #include "regs.h"
156 #include "hard-reg-set.h"
157 #include "flags.h"
158 #include "real.h"
159 #include "insn-config.h"
160 #include "recog.h"
161 #include "basic-block.h"
162 #include "output.h"
163 #include "function.h"
164 #include "expr.h"
165 #include "except.h"
166 #include "ggc.h"
167 #include "params.h"
168 #include "cselib.h"
169 #include "intl.h"
170 #include "obstack.h"
171 #include "timevar.h"
172 #include "tree-pass.h"
173 #include "hashtab.h"
174
175 /* Propagate flow information through back edges and thus enable PRE's
176 moving loop invariant calculations out of loops.
177
178 Originally this tended to create worse overall code, but several
179 improvements during the development of PRE seem to have made following
180 back edges generally a win.
181
182 Note much of the loop invariant code motion done here would normally
183 be done by loop.c, which has more heuristics for when to move invariants
184 out of loops. At some point we might need to move some of those
185 heuristics into gcse.c. */
186
187 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
188 are a superset of those done by GCSE.
189
190 We perform the following steps:
191
192 1) Compute basic block information.
193
194 2) Compute table of places where registers are set.
195
196 3) Perform copy/constant propagation.
197
198 4) Perform global cse using lazy code motion if not optimizing
199 for size, or code hoisting if we are.
200
201 5) Perform another pass of copy/constant propagation.
202
203 Two passes of copy/constant propagation are done because the first one
204 enables more GCSE and the second one helps to clean up the copies that
205 GCSE creates. This is needed more for PRE than for Classic because Classic
206 GCSE will try to use an existing register containing the common
207 subexpression rather than create a new one. This is harder to do for PRE
208 because of the code motion (which Classic GCSE doesn't do).
209
210 Expressions we are interested in GCSE-ing are of the form
211 (set (pseudo-reg) (expression)).
212 Function want_to_gcse_p says what these are.
213
214 PRE handles moving invariant expressions out of loops (by treating them as
215 partially redundant).
216
217 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
218 assignment) based GVN (global value numbering). L. T. Simpson's paper
219 (Rice University) on value numbering is a useful reference for this.
220
221 **********************
222
223 We used to support multiple passes but there are diminishing returns in
224 doing so. The first pass usually makes 90% of the changes that are doable.
225 A second pass can make a few more changes made possible by the first pass.
226 Experiments show any further passes don't make enough changes to justify
227 the expense.
228
229 A study of spec92 using an unlimited number of passes:
230 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
231 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
232 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
233
234 It was found doing copy propagation between each pass enables further
235 substitutions.
236
237 PRE is quite expensive in complicated functions because the DFA can take
238 a while to converge. Hence we only perform one pass. The parameter
239 max-gcse-passes can be modified if one wants to experiment.
240
241 **********************
242
243 The steps for PRE are:
244
245 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
246
247 2) Perform the data flow analysis for PRE.
248
249 3) Delete the redundant instructions
250
251 4) Insert the required copies [if any] that make the partially
252 redundant instructions fully redundant.
253
254 5) For other reaching expressions, insert an instruction to copy the value
255 to a newly created pseudo that will reach the redundant instruction.
256
257 The deletion is done first so that when we do insertions we
258 know which pseudo reg to use.
259
260 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
261 argue it is not. The number of iterations for the algorithm to converge
262 is typically 2-4 so I don't view it as that expensive (relatively speaking).
263
264 PRE GCSE depends heavily on the second CSE pass to clean up the copies
265 we create. To make an expression reach the place where it's redundant,
266 the result of the expression is copied to a new register, and the redundant
267 expression is deleted by replacing it with this new register. Classic GCSE
268 doesn't have this problem as much as it computes the reaching defs of
269 each register in each block and thus can try to use an existing
270 register. */
271 \f
272 /* GCSE global vars. */
273
274 /* -dG dump file. */
275 static FILE *gcse_file;
276
277 /* Note whether or not we should run jump optimization after gcse. We
278 want to do this for two cases.
279
280 * If we changed any jumps via cprop.
281
282 * If we added any labels via edge splitting. */
283 static int run_jump_opt_after_gcse;
284
285 /* Bitmaps are normally not included in debugging dumps.
286 However it's useful to be able to print them from GDB.
287 We could create special functions for this, but it's simpler to
288 just allow passing stderr to the dump_foo fns. Since stderr can
289 be a macro, we store a copy here. */
290 static FILE *debug_stderr;
291
292 /* An obstack for our working variables. */
293 static struct obstack gcse_obstack;
294
295 struct reg_use {rtx reg_rtx; };
296
297 /* Hash table of expressions. */
298
299 struct expr
300 {
301 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
302 rtx expr;
303 /* Index in the available expression bitmaps. */
304 int bitmap_index;
305 /* Next entry with the same hash. */
306 struct expr *next_same_hash;
307 /* List of anticipatable occurrences in basic blocks in the function.
308 An "anticipatable occurrence" is one that is the first occurrence in the
309 basic block, the operands are not modified in the basic block prior
310 to the occurrence and the output is not used between the start of
311 the block and the occurrence. */
312 struct occr *antic_occr;
313 /* List of available occurrence in basic blocks in the function.
314 An "available occurrence" is one that is the last occurrence in the
315 basic block and the operands are not modified by following statements in
316 the basic block [including this insn]. */
317 struct occr *avail_occr;
318 /* Non-null if the computation is PRE redundant.
319 The value is the newly created pseudo-reg to record a copy of the
320 expression in all the places that reach the redundant copy. */
321 rtx reaching_reg;
322 };
323
324 /* Occurrence of an expression.
325 There is one per basic block. If a pattern appears more than once the
326 last appearance is used [or first for anticipatable expressions]. */
327
328 struct occr
329 {
330 /* Next occurrence of this expression. */
331 struct occr *next;
332 /* The insn that computes the expression. */
333 rtx insn;
334 /* Nonzero if this [anticipatable] occurrence has been deleted. */
335 char deleted_p;
336 /* Nonzero if this [available] occurrence has been copied to
337 reaching_reg. */
338 /* ??? This is mutually exclusive with deleted_p, so they could share
339 the same byte. */
340 char copied_p;
341 };
342
343 /* Expression and copy propagation hash tables.
344 Each hash table is an array of buckets.
345 ??? It is known that if it were an array of entries, structure elements
346 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
347 not clear whether in the final analysis a sufficient amount of memory would
348 be saved as the size of the available expression bitmaps would be larger
349 [one could build a mapping table without holes afterwards though].
350 Someday I'll perform the computation and figure it out. */
351
352 struct hash_table
353 {
354 /* The table itself.
355 This is an array of `expr_hash_table_size' elements. */
356 struct expr **table;
357
358 /* Size of the hash table, in elements. */
359 unsigned int size;
360
361 /* Number of hash table elements. */
362 unsigned int n_elems;
363
364 /* Whether the table is expression of copy propagation one. */
365 int set_p;
366 };
367
368 /* Expression hash table. */
369 static struct hash_table expr_hash_table;
370
371 /* Copy propagation hash table. */
372 static struct hash_table set_hash_table;
373
374 /* Mapping of uids to cuids.
375 Only real insns get cuids. */
376 static int *uid_cuid;
377
378 /* Highest UID in UID_CUID. */
379 static int max_uid;
380
381 /* Get the cuid of an insn. */
382 #ifdef ENABLE_CHECKING
383 #define INSN_CUID(INSN) \
384 (gcc_assert (INSN_UID (INSN) <= max_uid), uid_cuid[INSN_UID (INSN)])
385 #else
386 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
387 #endif
388
389 /* Number of cuids. */
390 static int max_cuid;
391
392 /* Mapping of cuids to insns. */
393 static rtx *cuid_insn;
394
395 /* Get insn from cuid. */
396 #define CUID_INSN(CUID) (cuid_insn[CUID])
397
398 /* Maximum register number in function prior to doing gcse + 1.
399 Registers created during this pass have regno >= max_gcse_regno.
400 This is named with "gcse" to not collide with global of same name. */
401 static unsigned int max_gcse_regno;
402
403 /* Table of registers that are modified.
404
405 For each register, each element is a list of places where the pseudo-reg
406 is set.
407
408 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
409 requires knowledge of which blocks kill which regs [and thus could use
410 a bitmap instead of the lists `reg_set_table' uses].
411
412 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
413 num-regs) [however perhaps it may be useful to keep the data as is]. One
414 advantage of recording things this way is that `reg_set_table' is fairly
415 sparse with respect to pseudo regs but for hard regs could be fairly dense
416 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
417 up functions like compute_transp since in the case of pseudo-regs we only
418 need to iterate over the number of times a pseudo-reg is set, not over the
419 number of basic blocks [clearly there is a bit of a slow down in the cases
420 where a pseudo is set more than once in a block, however it is believed
421 that the net effect is to speed things up]. This isn't done for hard-regs
422 because recording call-clobbered hard-regs in `reg_set_table' at each
423 function call can consume a fair bit of memory, and iterating over
424 hard-regs stored this way in compute_transp will be more expensive. */
425
426 typedef struct reg_set
427 {
428 /* The next setting of this register. */
429 struct reg_set *next;
430 /* The index of the block where it was set. */
431 int bb_index;
432 } reg_set;
433
434 static reg_set **reg_set_table;
435
436 /* Size of `reg_set_table'.
437 The table starts out at max_gcse_regno + slop, and is enlarged as
438 necessary. */
439 static int reg_set_table_size;
440
441 /* Amount to grow `reg_set_table' by when it's full. */
442 #define REG_SET_TABLE_SLOP 100
443
444 /* This is a list of expressions which are MEMs and will be used by load
445 or store motion.
446 Load motion tracks MEMs which aren't killed by
447 anything except itself. (i.e., loads and stores to a single location).
448 We can then allow movement of these MEM refs with a little special
449 allowance. (all stores copy the same value to the reaching reg used
450 for the loads). This means all values used to store into memory must have
451 no side effects so we can re-issue the setter value.
452 Store Motion uses this structure as an expression table to track stores
453 which look interesting, and might be moveable towards the exit block. */
454
455 struct ls_expr
456 {
457 struct expr * expr; /* Gcse expression reference for LM. */
458 rtx pattern; /* Pattern of this mem. */
459 rtx pattern_regs; /* List of registers mentioned by the mem. */
460 rtx loads; /* INSN list of loads seen. */
461 rtx stores; /* INSN list of stores seen. */
462 struct ls_expr * next; /* Next in the list. */
463 int invalid; /* Invalid for some reason. */
464 int index; /* If it maps to a bitmap index. */
465 unsigned int hash_index; /* Index when in a hash table. */
466 rtx reaching_reg; /* Register to use when re-writing. */
467 };
468
469 /* Array of implicit set patterns indexed by basic block index. */
470 static rtx *implicit_sets;
471
472 /* Head of the list of load/store memory refs. */
473 static struct ls_expr * pre_ldst_mems = NULL;
474
475 /* Hashtable for the load/store memory refs. */
476 static htab_t pre_ldst_table = NULL;
477
478 /* Bitmap containing one bit for each register in the program.
479 Used when performing GCSE to track which registers have been set since
480 the start of the basic block. */
481 static regset reg_set_bitmap;
482
483 /* For each block, a bitmap of registers set in the block.
484 This is used by compute_transp.
485 It is computed during hash table computation and not by compute_sets
486 as it includes registers added since the last pass (or between cprop and
487 gcse) and it's currently not easy to realloc sbitmap vectors. */
488 static sbitmap *reg_set_in_block;
489
490 /* Array, indexed by basic block number for a list of insns which modify
491 memory within that block. */
492 static rtx * modify_mem_list;
493 static bitmap modify_mem_list_set;
494
495 /* This array parallels modify_mem_list, but is kept canonicalized. */
496 static rtx * canon_modify_mem_list;
497
498 /* Bitmap indexed by block numbers to record which blocks contain
499 function calls. */
500 static bitmap blocks_with_calls;
501
502 /* Various variables for statistics gathering. */
503
504 /* Memory used in a pass.
505 This isn't intended to be absolutely precise. Its intent is only
506 to keep an eye on memory usage. */
507 static int bytes_used;
508
509 /* GCSE substitutions made. */
510 static int gcse_subst_count;
511 /* Number of copy instructions created. */
512 static int gcse_create_count;
513 /* Number of local constants propagated. */
514 static int local_const_prop_count;
515 /* Number of local copies propagated. */
516 static int local_copy_prop_count;
517 /* Number of global constants propagated. */
518 static int global_const_prop_count;
519 /* Number of global copies propagated. */
520 static int global_copy_prop_count;
521 \f
522 /* For available exprs */
523 static sbitmap *ae_kill, *ae_gen;
524 \f
525 static void compute_can_copy (void);
526 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
527 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
528 static void *grealloc (void *, size_t);
529 static void *gcse_alloc (unsigned long);
530 static void alloc_gcse_mem (void);
531 static void free_gcse_mem (void);
532 static void alloc_reg_set_mem (int);
533 static void free_reg_set_mem (void);
534 static void record_one_set (int, rtx);
535 static void record_set_info (rtx, rtx, void *);
536 static void compute_sets (void);
537 static void hash_scan_insn (rtx, struct hash_table *, int);
538 static void hash_scan_set (rtx, rtx, struct hash_table *);
539 static void hash_scan_clobber (rtx, rtx, struct hash_table *);
540 static void hash_scan_call (rtx, rtx, struct hash_table *);
541 static int want_to_gcse_p (rtx);
542 static bool can_assign_to_reg_p (rtx);
543 static bool gcse_constant_p (rtx);
544 static int oprs_unchanged_p (rtx, rtx, int);
545 static int oprs_anticipatable_p (rtx, rtx);
546 static int oprs_available_p (rtx, rtx);
547 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
548 struct hash_table *);
549 static void insert_set_in_table (rtx, rtx, struct hash_table *);
550 static unsigned int hash_expr (rtx, enum machine_mode, int *, int);
551 static unsigned int hash_set (int, int);
552 static int expr_equiv_p (rtx, rtx);
553 static void record_last_reg_set_info (rtx, int);
554 static void record_last_mem_set_info (rtx);
555 static void record_last_set_info (rtx, rtx, void *);
556 static void compute_hash_table (struct hash_table *);
557 static void alloc_hash_table (int, struct hash_table *, int);
558 static void free_hash_table (struct hash_table *);
559 static void compute_hash_table_work (struct hash_table *);
560 static void dump_hash_table (FILE *, const char *, struct hash_table *);
561 static struct expr *lookup_set (unsigned int, struct hash_table *);
562 static struct expr *next_set (unsigned int, struct expr *);
563 static void reset_opr_set_tables (void);
564 static int oprs_not_set_p (rtx, rtx);
565 static void mark_call (rtx);
566 static void mark_set (rtx, rtx);
567 static void mark_clobber (rtx, rtx);
568 static void mark_oprs_set (rtx);
569 static void alloc_cprop_mem (int, int);
570 static void free_cprop_mem (void);
571 static void compute_transp (rtx, int, sbitmap *, int);
572 static void compute_transpout (void);
573 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
574 struct hash_table *);
575 static void compute_cprop_data (void);
576 static void find_used_regs (rtx *, void *);
577 static int try_replace_reg (rtx, rtx, rtx);
578 static struct expr *find_avail_set (int, rtx);
579 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
580 static void mems_conflict_for_gcse_p (rtx, rtx, void *);
581 static int load_killed_in_block_p (basic_block, int, rtx, int);
582 static void canon_list_insert (rtx, rtx, void *);
583 static int cprop_insn (rtx, int);
584 static int cprop (int);
585 static void find_implicit_sets (void);
586 static int one_cprop_pass (int, bool, bool);
587 static bool constprop_register (rtx, rtx, rtx, bool);
588 static struct expr *find_bypass_set (int, int);
589 static bool reg_killed_on_edge (rtx, edge);
590 static int bypass_block (basic_block, rtx, rtx);
591 static int bypass_conditional_jumps (void);
592 static void alloc_pre_mem (int, int);
593 static void free_pre_mem (void);
594 static void compute_pre_data (void);
595 static int pre_expr_reaches_here_p (basic_block, struct expr *,
596 basic_block);
597 static void insert_insn_end_bb (struct expr *, basic_block, int);
598 static void pre_insert_copy_insn (struct expr *, rtx);
599 static void pre_insert_copies (void);
600 static int pre_delete (void);
601 static int pre_gcse (void);
602 static int one_pre_gcse_pass (int);
603 static void add_label_notes (rtx, rtx);
604 static void alloc_code_hoist_mem (int, int);
605 static void free_code_hoist_mem (void);
606 static void compute_code_hoist_vbeinout (void);
607 static void compute_code_hoist_data (void);
608 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
609 static void hoist_code (void);
610 static int one_code_hoisting_pass (void);
611 static rtx process_insert_insn (struct expr *);
612 static int pre_edge_insert (struct edge_list *, struct expr **);
613 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
614 basic_block, char *);
615 static struct ls_expr * ldst_entry (rtx);
616 static void free_ldst_entry (struct ls_expr *);
617 static void free_ldst_mems (void);
618 static void print_ldst_list (FILE *);
619 static struct ls_expr * find_rtx_in_ldst (rtx);
620 static int enumerate_ldsts (void);
621 static inline struct ls_expr * first_ls_expr (void);
622 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
623 static int simple_mem (rtx);
624 static void invalidate_any_buried_refs (rtx);
625 static void compute_ld_motion_mems (void);
626 static void trim_ld_motion_mems (void);
627 static void update_ld_motion_stores (struct expr *);
628 static void reg_set_info (rtx, rtx, void *);
629 static void reg_clear_last_set (rtx, rtx, void *);
630 static bool store_ops_ok (rtx, int *);
631 static rtx extract_mentioned_regs (rtx);
632 static rtx extract_mentioned_regs_helper (rtx, rtx);
633 static void find_moveable_store (rtx, int *, int *);
634 static int compute_store_table (void);
635 static bool load_kills_store (rtx, rtx, int);
636 static bool find_loads (rtx, rtx, int);
637 static bool store_killed_in_insn (rtx, rtx, rtx, int);
638 static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *);
639 static bool store_killed_before (rtx, rtx, rtx, basic_block, int *);
640 static void build_store_vectors (void);
641 static void insert_insn_start_bb (rtx, basic_block);
642 static int insert_store (struct ls_expr *, edge);
643 static void remove_reachable_equiv_notes (basic_block, struct ls_expr *);
644 static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *);
645 static void delete_store (struct ls_expr *, basic_block);
646 static void free_store_memory (void);
647 static void store_motion (void);
648 static void free_insn_expr_list_list (rtx *);
649 static void clear_modify_mem_tables (void);
650 static void free_modify_mem_tables (void);
651 static rtx gcse_emit_move_after (rtx, rtx, rtx);
652 static void local_cprop_find_used_regs (rtx *, void *);
653 static bool do_local_cprop (rtx, rtx, bool, rtx*);
654 static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
655 static void local_cprop_pass (bool);
656 static bool is_too_expensive (const char *);
657 \f
658
659 /* Entry point for global common subexpression elimination.
660 F is the first instruction in the function. Return nonzero if a
661 change is mode. */
662
663 int
664 gcse_main (rtx f ATTRIBUTE_UNUSED, FILE *file)
665 {
666 int changed, pass;
667 /* Bytes used at start of pass. */
668 int initial_bytes_used;
669 /* Maximum number of bytes used by a pass. */
670 int max_pass_bytes;
671 /* Point to release obstack data from for each pass. */
672 char *gcse_obstack_bottom;
673
674 /* We do not construct an accurate cfg in functions which call
675 setjmp, so just punt to be safe. */
676 if (current_function_calls_setjmp)
677 return 0;
678
679 /* Assume that we do not need to run jump optimizations after gcse. */
680 run_jump_opt_after_gcse = 0;
681
682 /* For calling dump_foo fns from gdb. */
683 debug_stderr = stderr;
684 gcse_file = file;
685
686 /* Identify the basic block information for this function, including
687 successors and predecessors. */
688 max_gcse_regno = max_reg_num ();
689
690 if (file)
691 dump_flow_info (file);
692
693 /* Return if there's nothing to do, or it is too expensive. */
694 if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled")))
695 return 0;
696
697 gcc_obstack_init (&gcse_obstack);
698 bytes_used = 0;
699
700 /* We need alias. */
701 init_alias_analysis ();
702 /* Record where pseudo-registers are set. This data is kept accurate
703 during each pass. ??? We could also record hard-reg information here
704 [since it's unchanging], however it is currently done during hash table
705 computation.
706
707 It may be tempting to compute MEM set information here too, but MEM sets
708 will be subject to code motion one day and thus we need to compute
709 information about memory sets when we build the hash tables. */
710
711 alloc_reg_set_mem (max_gcse_regno);
712 compute_sets ();
713
714 pass = 0;
715 initial_bytes_used = bytes_used;
716 max_pass_bytes = 0;
717 gcse_obstack_bottom = gcse_alloc (1);
718 changed = 1;
719 while (changed && pass < MAX_GCSE_PASSES)
720 {
721 changed = 0;
722 if (file)
723 fprintf (file, "GCSE pass %d\n\n", pass + 1);
724
725 /* Initialize bytes_used to the space for the pred/succ lists,
726 and the reg_set_table data. */
727 bytes_used = initial_bytes_used;
728
729 /* Each pass may create new registers, so recalculate each time. */
730 max_gcse_regno = max_reg_num ();
731
732 alloc_gcse_mem ();
733
734 /* Don't allow constant propagation to modify jumps
735 during this pass. */
736 timevar_push (TV_CPROP1);
737 changed = one_cprop_pass (pass + 1, false, false);
738 timevar_pop (TV_CPROP1);
739
740 if (optimize_size)
741 /* Do nothing. */ ;
742 else
743 {
744 timevar_push (TV_PRE);
745 changed |= one_pre_gcse_pass (pass + 1);
746 /* We may have just created new basic blocks. Release and
747 recompute various things which are sized on the number of
748 basic blocks. */
749 if (changed)
750 {
751 free_modify_mem_tables ();
752 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
753 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
754 }
755 free_reg_set_mem ();
756 alloc_reg_set_mem (max_reg_num ());
757 compute_sets ();
758 run_jump_opt_after_gcse = 1;
759 timevar_pop (TV_PRE);
760 }
761
762 if (max_pass_bytes < bytes_used)
763 max_pass_bytes = bytes_used;
764
765 /* Free up memory, then reallocate for code hoisting. We can
766 not re-use the existing allocated memory because the tables
767 will not have info for the insns or registers created by
768 partial redundancy elimination. */
769 free_gcse_mem ();
770
771 /* It does not make sense to run code hoisting unless we are optimizing
772 for code size -- it rarely makes programs faster, and can make
773 them bigger if we did partial redundancy elimination (when optimizing
774 for space, we don't run the partial redundancy algorithms). */
775 if (optimize_size)
776 {
777 timevar_push (TV_HOIST);
778 max_gcse_regno = max_reg_num ();
779 alloc_gcse_mem ();
780 changed |= one_code_hoisting_pass ();
781 free_gcse_mem ();
782
783 if (max_pass_bytes < bytes_used)
784 max_pass_bytes = bytes_used;
785 timevar_pop (TV_HOIST);
786 }
787
788 if (file)
789 {
790 fprintf (file, "\n");
791 fflush (file);
792 }
793
794 obstack_free (&gcse_obstack, gcse_obstack_bottom);
795 pass++;
796 }
797
798 /* Do one last pass of copy propagation, including cprop into
799 conditional jumps. */
800
801 max_gcse_regno = max_reg_num ();
802 alloc_gcse_mem ();
803 /* This time, go ahead and allow cprop to alter jumps. */
804 timevar_push (TV_CPROP2);
805 one_cprop_pass (pass + 1, true, false);
806 timevar_pop (TV_CPROP2);
807 free_gcse_mem ();
808
809 if (file)
810 {
811 fprintf (file, "GCSE of %s: %d basic blocks, ",
812 current_function_name (), n_basic_blocks);
813 fprintf (file, "%d pass%s, %d bytes\n\n",
814 pass, pass > 1 ? "es" : "", max_pass_bytes);
815 }
816
817 obstack_free (&gcse_obstack, NULL);
818 free_reg_set_mem ();
819
820 /* We are finished with alias. */
821 end_alias_analysis ();
822 allocate_reg_info (max_reg_num (), FALSE, FALSE);
823
824 if (!optimize_size && flag_gcse_sm)
825 {
826 timevar_push (TV_LSM);
827 store_motion ();
828 timevar_pop (TV_LSM);
829 }
830
831 /* Record where pseudo-registers are set. */
832 return run_jump_opt_after_gcse;
833 }
834 \f
835 /* Misc. utilities. */
836
837 /* Nonzero for each mode that supports (set (reg) (reg)).
838 This is trivially true for integer and floating point values.
839 It may or may not be true for condition codes. */
840 static char can_copy[(int) NUM_MACHINE_MODES];
841
842 /* Compute which modes support reg/reg copy operations. */
843
844 static void
845 compute_can_copy (void)
846 {
847 int i;
848 #ifndef AVOID_CCMODE_COPIES
849 rtx reg, insn;
850 #endif
851 memset (can_copy, 0, NUM_MACHINE_MODES);
852
853 start_sequence ();
854 for (i = 0; i < NUM_MACHINE_MODES; i++)
855 if (GET_MODE_CLASS (i) == MODE_CC)
856 {
857 #ifdef AVOID_CCMODE_COPIES
858 can_copy[i] = 0;
859 #else
860 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
861 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
862 if (recog (PATTERN (insn), insn, NULL) >= 0)
863 can_copy[i] = 1;
864 #endif
865 }
866 else
867 can_copy[i] = 1;
868
869 end_sequence ();
870 }
871
872 /* Returns whether the mode supports reg/reg copy operations. */
873
874 bool
875 can_copy_p (enum machine_mode mode)
876 {
877 static bool can_copy_init_p = false;
878
879 if (! can_copy_init_p)
880 {
881 compute_can_copy ();
882 can_copy_init_p = true;
883 }
884
885 return can_copy[mode] != 0;
886 }
887 \f
888 /* Cover function to xmalloc to record bytes allocated. */
889
890 static void *
891 gmalloc (size_t size)
892 {
893 bytes_used += size;
894 return xmalloc (size);
895 }
896
897 /* Cover function to xcalloc to record bytes allocated. */
898
899 static void *
900 gcalloc (size_t nelem, size_t elsize)
901 {
902 bytes_used += nelem * elsize;
903 return xcalloc (nelem, elsize);
904 }
905
906 /* Cover function to xrealloc.
907 We don't record the additional size since we don't know it.
908 It won't affect memory usage stats much anyway. */
909
910 static void *
911 grealloc (void *ptr, size_t size)
912 {
913 return xrealloc (ptr, size);
914 }
915
916 /* Cover function to obstack_alloc. */
917
918 static void *
919 gcse_alloc (unsigned long size)
920 {
921 bytes_used += size;
922 return obstack_alloc (&gcse_obstack, size);
923 }
924
925 /* Allocate memory for the cuid mapping array,
926 and reg/memory set tracking tables.
927
928 This is called at the start of each pass. */
929
930 static void
931 alloc_gcse_mem (void)
932 {
933 int i;
934 basic_block bb;
935 rtx insn;
936
937 /* Find the largest UID and create a mapping from UIDs to CUIDs.
938 CUIDs are like UIDs except they increase monotonically, have no gaps,
939 and only apply to real insns.
940 (Actually, there are gaps, for insn that are not inside a basic block.
941 but we should never see those anyway, so this is OK.) */
942
943 max_uid = get_max_uid ();
944 uid_cuid = gcalloc (max_uid + 1, sizeof (int));
945 i = 0;
946 FOR_EACH_BB (bb)
947 FOR_BB_INSNS (bb, insn)
948 {
949 if (INSN_P (insn))
950 uid_cuid[INSN_UID (insn)] = i++;
951 else
952 uid_cuid[INSN_UID (insn)] = i;
953 }
954
955 /* Create a table mapping cuids to insns. */
956
957 max_cuid = i;
958 cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx));
959 i = 0;
960 FOR_EACH_BB (bb)
961 FOR_BB_INSNS (bb, insn)
962 if (INSN_P (insn))
963 CUID_INSN (i++) = insn;
964
965 /* Allocate vars to track sets of regs. */
966 reg_set_bitmap = BITMAP_ALLOC (NULL);
967
968 /* Allocate vars to track sets of regs, memory per block. */
969 reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
970 /* Allocate array to keep a list of insns which modify memory in each
971 basic block. */
972 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
973 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
974 modify_mem_list_set = BITMAP_ALLOC (NULL);
975 blocks_with_calls = BITMAP_ALLOC (NULL);
976 }
977
978 /* Free memory allocated by alloc_gcse_mem. */
979
980 static void
981 free_gcse_mem (void)
982 {
983 free (uid_cuid);
984 free (cuid_insn);
985
986 BITMAP_FREE (reg_set_bitmap);
987
988 sbitmap_vector_free (reg_set_in_block);
989 free_modify_mem_tables ();
990 BITMAP_FREE (modify_mem_list_set);
991 BITMAP_FREE (blocks_with_calls);
992 }
993 \f
994 /* Compute the local properties of each recorded expression.
995
996 Local properties are those that are defined by the block, irrespective of
997 other blocks.
998
999 An expression is transparent in a block if its operands are not modified
1000 in the block.
1001
1002 An expression is computed (locally available) in a block if it is computed
1003 at least once and expression would contain the same value if the
1004 computation was moved to the end of the block.
1005
1006 An expression is locally anticipatable in a block if it is computed at
1007 least once and expression would contain the same value if the computation
1008 was moved to the beginning of the block.
1009
1010 We call this routine for cprop, pre and code hoisting. They all compute
1011 basically the same information and thus can easily share this code.
1012
1013 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1014 properties. If NULL, then it is not necessary to compute or record that
1015 particular property.
1016
1017 TABLE controls which hash table to look at. If it is set hash table,
1018 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1019 ABSALTERED. */
1020
1021 static void
1022 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
1023 struct hash_table *table)
1024 {
1025 unsigned int i;
1026
1027 /* Initialize any bitmaps that were passed in. */
1028 if (transp)
1029 {
1030 if (table->set_p)
1031 sbitmap_vector_zero (transp, last_basic_block);
1032 else
1033 sbitmap_vector_ones (transp, last_basic_block);
1034 }
1035
1036 if (comp)
1037 sbitmap_vector_zero (comp, last_basic_block);
1038 if (antloc)
1039 sbitmap_vector_zero (antloc, last_basic_block);
1040
1041 for (i = 0; i < table->size; i++)
1042 {
1043 struct expr *expr;
1044
1045 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1046 {
1047 int indx = expr->bitmap_index;
1048 struct occr *occr;
1049
1050 /* The expression is transparent in this block if it is not killed.
1051 We start by assuming all are transparent [none are killed], and
1052 then reset the bits for those that are. */
1053 if (transp)
1054 compute_transp (expr->expr, indx, transp, table->set_p);
1055
1056 /* The occurrences recorded in antic_occr are exactly those that
1057 we want to set to nonzero in ANTLOC. */
1058 if (antloc)
1059 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1060 {
1061 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1062
1063 /* While we're scanning the table, this is a good place to
1064 initialize this. */
1065 occr->deleted_p = 0;
1066 }
1067
1068 /* The occurrences recorded in avail_occr are exactly those that
1069 we want to set to nonzero in COMP. */
1070 if (comp)
1071 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1072 {
1073 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1074
1075 /* While we're scanning the table, this is a good place to
1076 initialize this. */
1077 occr->copied_p = 0;
1078 }
1079
1080 /* While we're scanning the table, this is a good place to
1081 initialize this. */
1082 expr->reaching_reg = 0;
1083 }
1084 }
1085 }
1086 \f
1087 /* Register set information.
1088
1089 `reg_set_table' records where each register is set or otherwise
1090 modified. */
1091
1092 static struct obstack reg_set_obstack;
1093
1094 static void
1095 alloc_reg_set_mem (int n_regs)
1096 {
1097 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1098 reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *));
1099
1100 gcc_obstack_init (&reg_set_obstack);
1101 }
1102
1103 static void
1104 free_reg_set_mem (void)
1105 {
1106 free (reg_set_table);
1107 obstack_free (&reg_set_obstack, NULL);
1108 }
1109
1110 /* Record REGNO in the reg_set table. */
1111
1112 static void
1113 record_one_set (int regno, rtx insn)
1114 {
1115 /* Allocate a new reg_set element and link it onto the list. */
1116 struct reg_set *new_reg_info;
1117
1118 /* If the table isn't big enough, enlarge it. */
1119 if (regno >= reg_set_table_size)
1120 {
1121 int new_size = regno + REG_SET_TABLE_SLOP;
1122
1123 reg_set_table = grealloc (reg_set_table,
1124 new_size * sizeof (struct reg_set *));
1125 memset (reg_set_table + reg_set_table_size, 0,
1126 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1127 reg_set_table_size = new_size;
1128 }
1129
1130 new_reg_info = obstack_alloc (&reg_set_obstack, sizeof (struct reg_set));
1131 bytes_used += sizeof (struct reg_set);
1132 new_reg_info->bb_index = BLOCK_NUM (insn);
1133 new_reg_info->next = reg_set_table[regno];
1134 reg_set_table[regno] = new_reg_info;
1135 }
1136
1137 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1138 an insn. The DATA is really the instruction in which the SET is
1139 occurring. */
1140
1141 static void
1142 record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
1143 {
1144 rtx record_set_insn = (rtx) data;
1145
1146 if (REG_P (dest) && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1147 record_one_set (REGNO (dest), record_set_insn);
1148 }
1149
1150 /* Scan the function and record each set of each pseudo-register.
1151
1152 This is called once, at the start of the gcse pass. See the comments for
1153 `reg_set_table' for further documentation. */
1154
1155 static void
1156 compute_sets (void)
1157 {
1158 basic_block bb;
1159 rtx insn;
1160
1161 FOR_EACH_BB (bb)
1162 FOR_BB_INSNS (bb, insn)
1163 if (INSN_P (insn))
1164 note_stores (PATTERN (insn), record_set_info, insn);
1165 }
1166 \f
1167 /* Hash table support. */
1168
1169 struct reg_avail_info
1170 {
1171 basic_block last_bb;
1172 int first_set;
1173 int last_set;
1174 };
1175
1176 static struct reg_avail_info *reg_avail_info;
1177 static basic_block current_bb;
1178
1179
1180 /* See whether X, the source of a set, is something we want to consider for
1181 GCSE. */
1182
1183 static int
1184 want_to_gcse_p (rtx x)
1185 {
1186 switch (GET_CODE (x))
1187 {
1188 case REG:
1189 case SUBREG:
1190 case CONST_INT:
1191 case CONST_DOUBLE:
1192 case CONST_VECTOR:
1193 case CALL:
1194 return 0;
1195
1196 default:
1197 return can_assign_to_reg_p (x);
1198 }
1199 }
1200
1201 /* Used internally by can_assign_to_reg_p. */
1202
1203 static GTY(()) rtx test_insn;
1204
1205 /* Return true if we can assign X to a pseudo register. */
1206
1207 static bool
1208 can_assign_to_reg_p (rtx x)
1209 {
1210 int num_clobbers = 0;
1211 int icode;
1212
1213 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1214 if (general_operand (x, GET_MODE (x)))
1215 return 1;
1216 else if (GET_MODE (x) == VOIDmode)
1217 return 0;
1218
1219 /* Otherwise, check if we can make a valid insn from it. First initialize
1220 our test insn if we haven't already. */
1221 if (test_insn == 0)
1222 {
1223 test_insn
1224 = make_insn_raw (gen_rtx_SET (VOIDmode,
1225 gen_rtx_REG (word_mode,
1226 FIRST_PSEUDO_REGISTER * 2),
1227 const0_rtx));
1228 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1229 }
1230
1231 /* Now make an insn like the one we would make when GCSE'ing and see if
1232 valid. */
1233 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1234 SET_SRC (PATTERN (test_insn)) = x;
1235 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1236 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1237 }
1238
1239 /* Return nonzero if the operands of expression X are unchanged from the
1240 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1241 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1242
1243 static int
1244 oprs_unchanged_p (rtx x, rtx insn, int avail_p)
1245 {
1246 int i, j;
1247 enum rtx_code code;
1248 const char *fmt;
1249
1250 if (x == 0)
1251 return 1;
1252
1253 code = GET_CODE (x);
1254 switch (code)
1255 {
1256 case REG:
1257 {
1258 struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1259
1260 if (info->last_bb != current_bb)
1261 return 1;
1262 if (avail_p)
1263 return info->last_set < INSN_CUID (insn);
1264 else
1265 return info->first_set >= INSN_CUID (insn);
1266 }
1267
1268 case MEM:
1269 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1270 x, avail_p))
1271 return 0;
1272 else
1273 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1274
1275 case PRE_DEC:
1276 case PRE_INC:
1277 case POST_DEC:
1278 case POST_INC:
1279 case PRE_MODIFY:
1280 case POST_MODIFY:
1281 return 0;
1282
1283 case PC:
1284 case CC0: /*FIXME*/
1285 case CONST:
1286 case CONST_INT:
1287 case CONST_DOUBLE:
1288 case CONST_VECTOR:
1289 case SYMBOL_REF:
1290 case LABEL_REF:
1291 case ADDR_VEC:
1292 case ADDR_DIFF_VEC:
1293 return 1;
1294
1295 default:
1296 break;
1297 }
1298
1299 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1300 {
1301 if (fmt[i] == 'e')
1302 {
1303 /* If we are about to do the last recursive call needed at this
1304 level, change it into iteration. This function is called enough
1305 to be worth it. */
1306 if (i == 0)
1307 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1308
1309 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1310 return 0;
1311 }
1312 else if (fmt[i] == 'E')
1313 for (j = 0; j < XVECLEN (x, i); j++)
1314 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1315 return 0;
1316 }
1317
1318 return 1;
1319 }
1320
1321 /* Used for communication between mems_conflict_for_gcse_p and
1322 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1323 conflict between two memory references. */
1324 static int gcse_mems_conflict_p;
1325
1326 /* Used for communication between mems_conflict_for_gcse_p and
1327 load_killed_in_block_p. A memory reference for a load instruction,
1328 mems_conflict_for_gcse_p will see if a memory store conflicts with
1329 this memory load. */
1330 static rtx gcse_mem_operand;
1331
1332 /* DEST is the output of an instruction. If it is a memory reference, and
1333 possibly conflicts with the load found in gcse_mem_operand, then set
1334 gcse_mems_conflict_p to a nonzero value. */
1335
1336 static void
1337 mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED,
1338 void *data ATTRIBUTE_UNUSED)
1339 {
1340 while (GET_CODE (dest) == SUBREG
1341 || GET_CODE (dest) == ZERO_EXTRACT
1342 || GET_CODE (dest) == STRICT_LOW_PART)
1343 dest = XEXP (dest, 0);
1344
1345 /* If DEST is not a MEM, then it will not conflict with the load. Note
1346 that function calls are assumed to clobber memory, but are handled
1347 elsewhere. */
1348 if (! MEM_P (dest))
1349 return;
1350
1351 /* If we are setting a MEM in our list of specially recognized MEMs,
1352 don't mark as killed this time. */
1353
1354 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1355 {
1356 if (!find_rtx_in_ldst (dest))
1357 gcse_mems_conflict_p = 1;
1358 return;
1359 }
1360
1361 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1362 rtx_addr_varies_p))
1363 gcse_mems_conflict_p = 1;
1364 }
1365
1366 /* Return nonzero if the expression in X (a memory reference) is killed
1367 in block BB before or after the insn with the CUID in UID_LIMIT.
1368 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1369 before UID_LIMIT.
1370
1371 To check the entire block, set UID_LIMIT to max_uid + 1 and
1372 AVAIL_P to 0. */
1373
1374 static int
1375 load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p)
1376 {
1377 rtx list_entry = modify_mem_list[bb->index];
1378
1379 /* If this is a readonly then we aren't going to be changing it. */
1380 if (MEM_READONLY_P (x))
1381 return 0;
1382
1383 while (list_entry)
1384 {
1385 rtx setter;
1386 /* Ignore entries in the list that do not apply. */
1387 if ((avail_p
1388 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1389 || (! avail_p
1390 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1391 {
1392 list_entry = XEXP (list_entry, 1);
1393 continue;
1394 }
1395
1396 setter = XEXP (list_entry, 0);
1397
1398 /* If SETTER is a call everything is clobbered. Note that calls
1399 to pure functions are never put on the list, so we need not
1400 worry about them. */
1401 if (CALL_P (setter))
1402 return 1;
1403
1404 /* SETTER must be an INSN of some kind that sets memory. Call
1405 note_stores to examine each hunk of memory that is modified.
1406
1407 The note_stores interface is pretty limited, so we have to
1408 communicate via global variables. Yuk. */
1409 gcse_mem_operand = x;
1410 gcse_mems_conflict_p = 0;
1411 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1412 if (gcse_mems_conflict_p)
1413 return 1;
1414 list_entry = XEXP (list_entry, 1);
1415 }
1416 return 0;
1417 }
1418
1419 /* Return nonzero if the operands of expression X are unchanged from
1420 the start of INSN's basic block up to but not including INSN. */
1421
1422 static int
1423 oprs_anticipatable_p (rtx x, rtx insn)
1424 {
1425 return oprs_unchanged_p (x, insn, 0);
1426 }
1427
1428 /* Return nonzero if the operands of expression X are unchanged from
1429 INSN to the end of INSN's basic block. */
1430
1431 static int
1432 oprs_available_p (rtx x, rtx insn)
1433 {
1434 return oprs_unchanged_p (x, insn, 1);
1435 }
1436
1437 /* Hash expression X.
1438
1439 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1440 indicating if a volatile operand is found or if the expression contains
1441 something we don't want to insert in the table. HASH_TABLE_SIZE is
1442 the current size of the hash table to be probed. */
1443
1444 static unsigned int
1445 hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p,
1446 int hash_table_size)
1447 {
1448 unsigned int hash;
1449
1450 *do_not_record_p = 0;
1451
1452 hash = hash_rtx (x, mode, do_not_record_p,
1453 NULL, /*have_reg_qty=*/false);
1454 return hash % hash_table_size;
1455 }
1456
1457 /* Hash a set of register REGNO.
1458
1459 Sets are hashed on the register that is set. This simplifies the PRE copy
1460 propagation code.
1461
1462 ??? May need to make things more elaborate. Later, as necessary. */
1463
1464 static unsigned int
1465 hash_set (int regno, int hash_table_size)
1466 {
1467 unsigned int hash;
1468
1469 hash = regno;
1470 return hash % hash_table_size;
1471 }
1472
1473 /* Return nonzero if exp1 is equivalent to exp2. */
1474
1475 static int
1476 expr_equiv_p (rtx x, rtx y)
1477 {
1478 return exp_equiv_p (x, y, 0, true);
1479 }
1480
1481 /* Insert expression X in INSN in the hash TABLE.
1482 If it is already present, record it as the last occurrence in INSN's
1483 basic block.
1484
1485 MODE is the mode of the value X is being stored into.
1486 It is only used if X is a CONST_INT.
1487
1488 ANTIC_P is nonzero if X is an anticipatable expression.
1489 AVAIL_P is nonzero if X is an available expression. */
1490
1491 static void
1492 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1493 int avail_p, struct hash_table *table)
1494 {
1495 int found, do_not_record_p;
1496 unsigned int hash;
1497 struct expr *cur_expr, *last_expr = NULL;
1498 struct occr *antic_occr, *avail_occr;
1499
1500 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1501
1502 /* Do not insert expression in table if it contains volatile operands,
1503 or if hash_expr determines the expression is something we don't want
1504 to or can't handle. */
1505 if (do_not_record_p)
1506 return;
1507
1508 cur_expr = table->table[hash];
1509 found = 0;
1510
1511 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1512 {
1513 /* If the expression isn't found, save a pointer to the end of
1514 the list. */
1515 last_expr = cur_expr;
1516 cur_expr = cur_expr->next_same_hash;
1517 }
1518
1519 if (! found)
1520 {
1521 cur_expr = gcse_alloc (sizeof (struct expr));
1522 bytes_used += sizeof (struct expr);
1523 if (table->table[hash] == NULL)
1524 /* This is the first pattern that hashed to this index. */
1525 table->table[hash] = cur_expr;
1526 else
1527 /* Add EXPR to end of this hash chain. */
1528 last_expr->next_same_hash = cur_expr;
1529
1530 /* Set the fields of the expr element. */
1531 cur_expr->expr = x;
1532 cur_expr->bitmap_index = table->n_elems++;
1533 cur_expr->next_same_hash = NULL;
1534 cur_expr->antic_occr = NULL;
1535 cur_expr->avail_occr = NULL;
1536 }
1537
1538 /* Now record the occurrence(s). */
1539 if (antic_p)
1540 {
1541 antic_occr = cur_expr->antic_occr;
1542
1543 if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1544 antic_occr = NULL;
1545
1546 if (antic_occr)
1547 /* Found another instance of the expression in the same basic block.
1548 Prefer the currently recorded one. We want the first one in the
1549 block and the block is scanned from start to end. */
1550 ; /* nothing to do */
1551 else
1552 {
1553 /* First occurrence of this expression in this basic block. */
1554 antic_occr = gcse_alloc (sizeof (struct occr));
1555 bytes_used += sizeof (struct occr);
1556 antic_occr->insn = insn;
1557 antic_occr->next = cur_expr->antic_occr;
1558 antic_occr->deleted_p = 0;
1559 cur_expr->antic_occr = antic_occr;
1560 }
1561 }
1562
1563 if (avail_p)
1564 {
1565 avail_occr = cur_expr->avail_occr;
1566
1567 if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
1568 {
1569 /* Found another instance of the expression in the same basic block.
1570 Prefer this occurrence to the currently recorded one. We want
1571 the last one in the block and the block is scanned from start
1572 to end. */
1573 avail_occr->insn = insn;
1574 }
1575 else
1576 {
1577 /* First occurrence of this expression in this basic block. */
1578 avail_occr = gcse_alloc (sizeof (struct occr));
1579 bytes_used += sizeof (struct occr);
1580 avail_occr->insn = insn;
1581 avail_occr->next = cur_expr->avail_occr;
1582 avail_occr->deleted_p = 0;
1583 cur_expr->avail_occr = avail_occr;
1584 }
1585 }
1586 }
1587
1588 /* Insert pattern X in INSN in the hash table.
1589 X is a SET of a reg to either another reg or a constant.
1590 If it is already present, record it as the last occurrence in INSN's
1591 basic block. */
1592
1593 static void
1594 insert_set_in_table (rtx x, rtx insn, struct hash_table *table)
1595 {
1596 int found;
1597 unsigned int hash;
1598 struct expr *cur_expr, *last_expr = NULL;
1599 struct occr *cur_occr;
1600
1601 gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1602
1603 hash = hash_set (REGNO (SET_DEST (x)), table->size);
1604
1605 cur_expr = table->table[hash];
1606 found = 0;
1607
1608 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1609 {
1610 /* If the expression isn't found, save a pointer to the end of
1611 the list. */
1612 last_expr = cur_expr;
1613 cur_expr = cur_expr->next_same_hash;
1614 }
1615
1616 if (! found)
1617 {
1618 cur_expr = gcse_alloc (sizeof (struct expr));
1619 bytes_used += sizeof (struct expr);
1620 if (table->table[hash] == NULL)
1621 /* This is the first pattern that hashed to this index. */
1622 table->table[hash] = cur_expr;
1623 else
1624 /* Add EXPR to end of this hash chain. */
1625 last_expr->next_same_hash = cur_expr;
1626
1627 /* Set the fields of the expr element.
1628 We must copy X because it can be modified when copy propagation is
1629 performed on its operands. */
1630 cur_expr->expr = copy_rtx (x);
1631 cur_expr->bitmap_index = table->n_elems++;
1632 cur_expr->next_same_hash = NULL;
1633 cur_expr->antic_occr = NULL;
1634 cur_expr->avail_occr = NULL;
1635 }
1636
1637 /* Now record the occurrence. */
1638 cur_occr = cur_expr->avail_occr;
1639
1640 if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
1641 {
1642 /* Found another instance of the expression in the same basic block.
1643 Prefer this occurrence to the currently recorded one. We want
1644 the last one in the block and the block is scanned from start
1645 to end. */
1646 cur_occr->insn = insn;
1647 }
1648 else
1649 {
1650 /* First occurrence of this expression in this basic block. */
1651 cur_occr = gcse_alloc (sizeof (struct occr));
1652 bytes_used += sizeof (struct occr);
1653
1654 cur_occr->insn = insn;
1655 cur_occr->next = cur_expr->avail_occr;
1656 cur_occr->deleted_p = 0;
1657 cur_expr->avail_occr = cur_occr;
1658 }
1659 }
1660
1661 /* Determine whether the rtx X should be treated as a constant for
1662 the purposes of GCSE's constant propagation. */
1663
1664 static bool
1665 gcse_constant_p (rtx x)
1666 {
1667 /* Consider a COMPARE of two integers constant. */
1668 if (GET_CODE (x) == COMPARE
1669 && GET_CODE (XEXP (x, 0)) == CONST_INT
1670 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1671 return true;
1672
1673 /* Consider a COMPARE of the same registers is a constant
1674 if they are not floating point registers. */
1675 if (GET_CODE(x) == COMPARE
1676 && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1677 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1678 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1679 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1680 return true;
1681
1682 return CONSTANT_P (x);
1683 }
1684
1685 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1686 expression one). */
1687
1688 static void
1689 hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
1690 {
1691 rtx src = SET_SRC (pat);
1692 rtx dest = SET_DEST (pat);
1693 rtx note;
1694
1695 if (GET_CODE (src) == CALL)
1696 hash_scan_call (src, insn, table);
1697
1698 else if (REG_P (dest))
1699 {
1700 unsigned int regno = REGNO (dest);
1701 rtx tmp;
1702
1703 /* If this is a single set and we are doing constant propagation,
1704 see if a REG_NOTE shows this equivalent to a constant. */
1705 if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0
1706 && gcse_constant_p (XEXP (note, 0)))
1707 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1708
1709 /* Only record sets of pseudo-regs in the hash table. */
1710 if (! table->set_p
1711 && regno >= FIRST_PSEUDO_REGISTER
1712 /* Don't GCSE something if we can't do a reg/reg copy. */
1713 && can_copy_p (GET_MODE (dest))
1714 /* GCSE commonly inserts instruction after the insn. We can't
1715 do that easily for EH_REGION notes so disable GCSE on these
1716 for now. */
1717 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1718 /* Is SET_SRC something we want to gcse? */
1719 && want_to_gcse_p (src)
1720 /* Don't CSE a nop. */
1721 && ! set_noop_p (pat)
1722 /* Don't GCSE if it has attached REG_EQUIV note.
1723 At this point this only function parameters should have
1724 REG_EQUIV notes and if the argument slot is used somewhere
1725 explicitly, it means address of parameter has been taken,
1726 so we should not extend the lifetime of the pseudo. */
1727 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1728 || ! MEM_P (XEXP (note, 0))))
1729 {
1730 /* An expression is not anticipatable if its operands are
1731 modified before this insn or if this is not the only SET in
1732 this insn. */
1733 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
1734 /* An expression is not available if its operands are
1735 subsequently modified, including this insn. It's also not
1736 available if this is a branch, because we can't insert
1737 a set after the branch. */
1738 int avail_p = (oprs_available_p (src, insn)
1739 && ! JUMP_P (insn));
1740
1741 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
1742 }
1743
1744 /* Record sets for constant/copy propagation. */
1745 else if (table->set_p
1746 && regno >= FIRST_PSEUDO_REGISTER
1747 && ((REG_P (src)
1748 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1749 && can_copy_p (GET_MODE (dest))
1750 && REGNO (src) != regno)
1751 || gcse_constant_p (src))
1752 /* A copy is not available if its src or dest is subsequently
1753 modified. Here we want to search from INSN+1 on, but
1754 oprs_available_p searches from INSN on. */
1755 && (insn == BB_END (BLOCK_FOR_INSN (insn))
1756 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
1757 && oprs_available_p (pat, tmp))))
1758 insert_set_in_table (pat, insn, table);
1759 }
1760 /* In case of store we want to consider the memory value as available in
1761 the REG stored in that memory. This makes it possible to remove
1762 redundant loads from due to stores to the same location. */
1763 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1764 {
1765 unsigned int regno = REGNO (src);
1766
1767 /* Do not do this for constant/copy propagation. */
1768 if (! table->set_p
1769 /* Only record sets of pseudo-regs in the hash table. */
1770 && regno >= FIRST_PSEUDO_REGISTER
1771 /* Don't GCSE something if we can't do a reg/reg copy. */
1772 && can_copy_p (GET_MODE (src))
1773 /* GCSE commonly inserts instruction after the insn. We can't
1774 do that easily for EH_REGION notes so disable GCSE on these
1775 for now. */
1776 && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1777 /* Is SET_DEST something we want to gcse? */
1778 && want_to_gcse_p (dest)
1779 /* Don't CSE a nop. */
1780 && ! set_noop_p (pat)
1781 /* Don't GCSE if it has attached REG_EQUIV note.
1782 At this point this only function parameters should have
1783 REG_EQUIV notes and if the argument slot is used somewhere
1784 explicitly, it means address of parameter has been taken,
1785 so we should not extend the lifetime of the pseudo. */
1786 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1787 || ! MEM_P (XEXP (note, 0))))
1788 {
1789 /* Stores are never anticipatable. */
1790 int antic_p = 0;
1791 /* An expression is not available if its operands are
1792 subsequently modified, including this insn. It's also not
1793 available if this is a branch, because we can't insert
1794 a set after the branch. */
1795 int avail_p = oprs_available_p (dest, insn)
1796 && ! JUMP_P (insn);
1797
1798 /* Record the memory expression (DEST) in the hash table. */
1799 insert_expr_in_table (dest, GET_MODE (dest), insn,
1800 antic_p, avail_p, table);
1801 }
1802 }
1803 }
1804
1805 static void
1806 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1807 struct hash_table *table ATTRIBUTE_UNUSED)
1808 {
1809 /* Currently nothing to do. */
1810 }
1811
1812 static void
1813 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1814 struct hash_table *table ATTRIBUTE_UNUSED)
1815 {
1816 /* Currently nothing to do. */
1817 }
1818
1819 /* Process INSN and add hash table entries as appropriate.
1820
1821 Only available expressions that set a single pseudo-reg are recorded.
1822
1823 Single sets in a PARALLEL could be handled, but it's an extra complication
1824 that isn't dealt with right now. The trick is handling the CLOBBERs that
1825 are also in the PARALLEL. Later.
1826
1827 If SET_P is nonzero, this is for the assignment hash table,
1828 otherwise it is for the expression hash table.
1829 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
1830 not record any expressions. */
1831
1832 static void
1833 hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block)
1834 {
1835 rtx pat = PATTERN (insn);
1836 int i;
1837
1838 if (in_libcall_block)
1839 return;
1840
1841 /* Pick out the sets of INSN and for other forms of instructions record
1842 what's been modified. */
1843
1844 if (GET_CODE (pat) == SET)
1845 hash_scan_set (pat, insn, table);
1846 else if (GET_CODE (pat) == PARALLEL)
1847 for (i = 0; i < XVECLEN (pat, 0); i++)
1848 {
1849 rtx x = XVECEXP (pat, 0, i);
1850
1851 if (GET_CODE (x) == SET)
1852 hash_scan_set (x, insn, table);
1853 else if (GET_CODE (x) == CLOBBER)
1854 hash_scan_clobber (x, insn, table);
1855 else if (GET_CODE (x) == CALL)
1856 hash_scan_call (x, insn, table);
1857 }
1858
1859 else if (GET_CODE (pat) == CLOBBER)
1860 hash_scan_clobber (pat, insn, table);
1861 else if (GET_CODE (pat) == CALL)
1862 hash_scan_call (pat, insn, table);
1863 }
1864
1865 static void
1866 dump_hash_table (FILE *file, const char *name, struct hash_table *table)
1867 {
1868 int i;
1869 /* Flattened out table, so it's printed in proper order. */
1870 struct expr **flat_table;
1871 unsigned int *hash_val;
1872 struct expr *expr;
1873
1874 flat_table = xcalloc (table->n_elems, sizeof (struct expr *));
1875 hash_val = xmalloc (table->n_elems * sizeof (unsigned int));
1876
1877 for (i = 0; i < (int) table->size; i++)
1878 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1879 {
1880 flat_table[expr->bitmap_index] = expr;
1881 hash_val[expr->bitmap_index] = i;
1882 }
1883
1884 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1885 name, table->size, table->n_elems);
1886
1887 for (i = 0; i < (int) table->n_elems; i++)
1888 if (flat_table[i] != 0)
1889 {
1890 expr = flat_table[i];
1891 fprintf (file, "Index %d (hash value %d)\n ",
1892 expr->bitmap_index, hash_val[i]);
1893 print_rtl (file, expr->expr);
1894 fprintf (file, "\n");
1895 }
1896
1897 fprintf (file, "\n");
1898
1899 free (flat_table);
1900 free (hash_val);
1901 }
1902
1903 /* Record register first/last/block set information for REGNO in INSN.
1904
1905 first_set records the first place in the block where the register
1906 is set and is used to compute "anticipatability".
1907
1908 last_set records the last place in the block where the register
1909 is set and is used to compute "availability".
1910
1911 last_bb records the block for which first_set and last_set are
1912 valid, as a quick test to invalidate them.
1913
1914 reg_set_in_block records whether the register is set in the block
1915 and is used to compute "transparency". */
1916
1917 static void
1918 record_last_reg_set_info (rtx insn, int regno)
1919 {
1920 struct reg_avail_info *info = &reg_avail_info[regno];
1921 int cuid = INSN_CUID (insn);
1922
1923 info->last_set = cuid;
1924 if (info->last_bb != current_bb)
1925 {
1926 info->last_bb = current_bb;
1927 info->first_set = cuid;
1928 SET_BIT (reg_set_in_block[current_bb->index], regno);
1929 }
1930 }
1931
1932
1933 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1934 Note we store a pair of elements in the list, so they have to be
1935 taken off pairwise. */
1936
1937 static void
1938 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED,
1939 void * v_insn)
1940 {
1941 rtx dest_addr, insn;
1942 int bb;
1943
1944 while (GET_CODE (dest) == SUBREG
1945 || GET_CODE (dest) == ZERO_EXTRACT
1946 || GET_CODE (dest) == STRICT_LOW_PART)
1947 dest = XEXP (dest, 0);
1948
1949 /* If DEST is not a MEM, then it will not conflict with a load. Note
1950 that function calls are assumed to clobber memory, but are handled
1951 elsewhere. */
1952
1953 if (! MEM_P (dest))
1954 return;
1955
1956 dest_addr = get_addr (XEXP (dest, 0));
1957 dest_addr = canon_rtx (dest_addr);
1958 insn = (rtx) v_insn;
1959 bb = BLOCK_NUM (insn);
1960
1961 canon_modify_mem_list[bb] =
1962 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1963 canon_modify_mem_list[bb] =
1964 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1965 }
1966
1967 /* Record memory modification information for INSN. We do not actually care
1968 about the memory location(s) that are set, or even how they are set (consider
1969 a CALL_INSN). We merely need to record which insns modify memory. */
1970
1971 static void
1972 record_last_mem_set_info (rtx insn)
1973 {
1974 int bb = BLOCK_NUM (insn);
1975
1976 /* load_killed_in_block_p will handle the case of calls clobbering
1977 everything. */
1978 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1979 bitmap_set_bit (modify_mem_list_set, bb);
1980
1981 if (CALL_P (insn))
1982 {
1983 /* Note that traversals of this loop (other than for free-ing)
1984 will break after encountering a CALL_INSN. So, there's no
1985 need to insert a pair of items, as canon_list_insert does. */
1986 canon_modify_mem_list[bb] =
1987 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1988 bitmap_set_bit (blocks_with_calls, bb);
1989 }
1990 else
1991 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1992 }
1993
1994 /* Called from compute_hash_table via note_stores to handle one
1995 SET or CLOBBER in an insn. DATA is really the instruction in which
1996 the SET is taking place. */
1997
1998 static void
1999 record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
2000 {
2001 rtx last_set_insn = (rtx) data;
2002
2003 if (GET_CODE (dest) == SUBREG)
2004 dest = SUBREG_REG (dest);
2005
2006 if (REG_P (dest))
2007 record_last_reg_set_info (last_set_insn, REGNO (dest));
2008 else if (MEM_P (dest)
2009 /* Ignore pushes, they clobber nothing. */
2010 && ! push_operand (dest, GET_MODE (dest)))
2011 record_last_mem_set_info (last_set_insn);
2012 }
2013
2014 /* Top level function to create an expression or assignment hash table.
2015
2016 Expression entries are placed in the hash table if
2017 - they are of the form (set (pseudo-reg) src),
2018 - src is something we want to perform GCSE on,
2019 - none of the operands are subsequently modified in the block
2020
2021 Assignment entries are placed in the hash table if
2022 - they are of the form (set (pseudo-reg) src),
2023 - src is something we want to perform const/copy propagation on,
2024 - none of the operands or target are subsequently modified in the block
2025
2026 Currently src must be a pseudo-reg or a const_int.
2027
2028 TABLE is the table computed. */
2029
2030 static void
2031 compute_hash_table_work (struct hash_table *table)
2032 {
2033 unsigned int i;
2034
2035 /* While we compute the hash table we also compute a bit array of which
2036 registers are set in which blocks.
2037 ??? This isn't needed during const/copy propagation, but it's cheap to
2038 compute. Later. */
2039 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2040
2041 /* re-Cache any INSN_LIST nodes we have allocated. */
2042 clear_modify_mem_tables ();
2043 /* Some working arrays used to track first and last set in each block. */
2044 reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2045
2046 for (i = 0; i < max_gcse_regno; ++i)
2047 reg_avail_info[i].last_bb = NULL;
2048
2049 FOR_EACH_BB (current_bb)
2050 {
2051 rtx insn;
2052 unsigned int regno;
2053 int in_libcall_block;
2054
2055 /* First pass over the instructions records information used to
2056 determine when registers and memory are first and last set.
2057 ??? hard-reg reg_set_in_block computation
2058 could be moved to compute_sets since they currently don't change. */
2059
2060 FOR_BB_INSNS (current_bb, insn)
2061 {
2062 if (! INSN_P (insn))
2063 continue;
2064
2065 if (CALL_P (insn))
2066 {
2067 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2068 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2069 record_last_reg_set_info (insn, regno);
2070
2071 mark_call (insn);
2072 }
2073
2074 note_stores (PATTERN (insn), record_last_set_info, insn);
2075 }
2076
2077 /* Insert implicit sets in the hash table. */
2078 if (table->set_p
2079 && implicit_sets[current_bb->index] != NULL_RTX)
2080 hash_scan_set (implicit_sets[current_bb->index],
2081 BB_HEAD (current_bb), table);
2082
2083 /* The next pass builds the hash table. */
2084 in_libcall_block = 0;
2085 FOR_BB_INSNS (current_bb, insn)
2086 if (INSN_P (insn))
2087 {
2088 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2089 in_libcall_block = 1;
2090 else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2091 in_libcall_block = 0;
2092 hash_scan_insn (insn, table, in_libcall_block);
2093 if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2094 in_libcall_block = 0;
2095 }
2096 }
2097
2098 free (reg_avail_info);
2099 reg_avail_info = NULL;
2100 }
2101
2102 /* Allocate space for the set/expr hash TABLE.
2103 N_INSNS is the number of instructions in the function.
2104 It is used to determine the number of buckets to use.
2105 SET_P determines whether set or expression table will
2106 be created. */
2107
2108 static void
2109 alloc_hash_table (int n_insns, struct hash_table *table, int set_p)
2110 {
2111 int n;
2112
2113 table->size = n_insns / 4;
2114 if (table->size < 11)
2115 table->size = 11;
2116
2117 /* Attempt to maintain efficient use of hash table.
2118 Making it an odd number is simplest for now.
2119 ??? Later take some measurements. */
2120 table->size |= 1;
2121 n = table->size * sizeof (struct expr *);
2122 table->table = gmalloc (n);
2123 table->set_p = set_p;
2124 }
2125
2126 /* Free things allocated by alloc_hash_table. */
2127
2128 static void
2129 free_hash_table (struct hash_table *table)
2130 {
2131 free (table->table);
2132 }
2133
2134 /* Compute the hash TABLE for doing copy/const propagation or
2135 expression hash table. */
2136
2137 static void
2138 compute_hash_table (struct hash_table *table)
2139 {
2140 /* Initialize count of number of entries in hash table. */
2141 table->n_elems = 0;
2142 memset (table->table, 0, table->size * sizeof (struct expr *));
2143
2144 compute_hash_table_work (table);
2145 }
2146 \f
2147 /* Expression tracking support. */
2148
2149 /* Lookup REGNO in the set TABLE. The result is a pointer to the
2150 table entry, or NULL if not found. */
2151
2152 static struct expr *
2153 lookup_set (unsigned int regno, struct hash_table *table)
2154 {
2155 unsigned int hash = hash_set (regno, table->size);
2156 struct expr *expr;
2157
2158 expr = table->table[hash];
2159
2160 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2161 expr = expr->next_same_hash;
2162
2163 return expr;
2164 }
2165
2166 /* Return the next entry for REGNO in list EXPR. */
2167
2168 static struct expr *
2169 next_set (unsigned int regno, struct expr *expr)
2170 {
2171 do
2172 expr = expr->next_same_hash;
2173 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2174
2175 return expr;
2176 }
2177
2178 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2179 types may be mixed. */
2180
2181 static void
2182 free_insn_expr_list_list (rtx *listp)
2183 {
2184 rtx list, next;
2185
2186 for (list = *listp; list ; list = next)
2187 {
2188 next = XEXP (list, 1);
2189 if (GET_CODE (list) == EXPR_LIST)
2190 free_EXPR_LIST_node (list);
2191 else
2192 free_INSN_LIST_node (list);
2193 }
2194
2195 *listp = NULL;
2196 }
2197
2198 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2199 static void
2200 clear_modify_mem_tables (void)
2201 {
2202 unsigned i;
2203 bitmap_iterator bi;
2204
2205 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
2206 {
2207 free_INSN_LIST_list (modify_mem_list + i);
2208 free_insn_expr_list_list (canon_modify_mem_list + i);
2209 }
2210 bitmap_clear (modify_mem_list_set);
2211 bitmap_clear (blocks_with_calls);
2212 }
2213
2214 /* Release memory used by modify_mem_list_set. */
2215
2216 static void
2217 free_modify_mem_tables (void)
2218 {
2219 clear_modify_mem_tables ();
2220 free (modify_mem_list);
2221 free (canon_modify_mem_list);
2222 modify_mem_list = 0;
2223 canon_modify_mem_list = 0;
2224 }
2225
2226 /* Reset tables used to keep track of what's still available [since the
2227 start of the block]. */
2228
2229 static void
2230 reset_opr_set_tables (void)
2231 {
2232 /* Maintain a bitmap of which regs have been set since beginning of
2233 the block. */
2234 CLEAR_REG_SET (reg_set_bitmap);
2235
2236 /* Also keep a record of the last instruction to modify memory.
2237 For now this is very trivial, we only record whether any memory
2238 location has been modified. */
2239 clear_modify_mem_tables ();
2240 }
2241
2242 /* Return nonzero if the operands of X are not set before INSN in
2243 INSN's basic block. */
2244
2245 static int
2246 oprs_not_set_p (rtx x, rtx insn)
2247 {
2248 int i, j;
2249 enum rtx_code code;
2250 const char *fmt;
2251
2252 if (x == 0)
2253 return 1;
2254
2255 code = GET_CODE (x);
2256 switch (code)
2257 {
2258 case PC:
2259 case CC0:
2260 case CONST:
2261 case CONST_INT:
2262 case CONST_DOUBLE:
2263 case CONST_VECTOR:
2264 case SYMBOL_REF:
2265 case LABEL_REF:
2266 case ADDR_VEC:
2267 case ADDR_DIFF_VEC:
2268 return 1;
2269
2270 case MEM:
2271 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2272 INSN_CUID (insn), x, 0))
2273 return 0;
2274 else
2275 return oprs_not_set_p (XEXP (x, 0), insn);
2276
2277 case REG:
2278 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2279
2280 default:
2281 break;
2282 }
2283
2284 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2285 {
2286 if (fmt[i] == 'e')
2287 {
2288 /* If we are about to do the last recursive call
2289 needed at this level, change it into iteration.
2290 This function is called enough to be worth it. */
2291 if (i == 0)
2292 return oprs_not_set_p (XEXP (x, i), insn);
2293
2294 if (! oprs_not_set_p (XEXP (x, i), insn))
2295 return 0;
2296 }
2297 else if (fmt[i] == 'E')
2298 for (j = 0; j < XVECLEN (x, i); j++)
2299 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2300 return 0;
2301 }
2302
2303 return 1;
2304 }
2305
2306 /* Mark things set by a CALL. */
2307
2308 static void
2309 mark_call (rtx insn)
2310 {
2311 if (! CONST_OR_PURE_CALL_P (insn))
2312 record_last_mem_set_info (insn);
2313 }
2314
2315 /* Mark things set by a SET. */
2316
2317 static void
2318 mark_set (rtx pat, rtx insn)
2319 {
2320 rtx dest = SET_DEST (pat);
2321
2322 while (GET_CODE (dest) == SUBREG
2323 || GET_CODE (dest) == ZERO_EXTRACT
2324 || GET_CODE (dest) == STRICT_LOW_PART)
2325 dest = XEXP (dest, 0);
2326
2327 if (REG_P (dest))
2328 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2329 else if (MEM_P (dest))
2330 record_last_mem_set_info (insn);
2331
2332 if (GET_CODE (SET_SRC (pat)) == CALL)
2333 mark_call (insn);
2334 }
2335
2336 /* Record things set by a CLOBBER. */
2337
2338 static void
2339 mark_clobber (rtx pat, rtx insn)
2340 {
2341 rtx clob = XEXP (pat, 0);
2342
2343 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2344 clob = XEXP (clob, 0);
2345
2346 if (REG_P (clob))
2347 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2348 else
2349 record_last_mem_set_info (insn);
2350 }
2351
2352 /* Record things set by INSN.
2353 This data is used by oprs_not_set_p. */
2354
2355 static void
2356 mark_oprs_set (rtx insn)
2357 {
2358 rtx pat = PATTERN (insn);
2359 int i;
2360
2361 if (GET_CODE (pat) == SET)
2362 mark_set (pat, insn);
2363 else if (GET_CODE (pat) == PARALLEL)
2364 for (i = 0; i < XVECLEN (pat, 0); i++)
2365 {
2366 rtx x = XVECEXP (pat, 0, i);
2367
2368 if (GET_CODE (x) == SET)
2369 mark_set (x, insn);
2370 else if (GET_CODE (x) == CLOBBER)
2371 mark_clobber (x, insn);
2372 else if (GET_CODE (x) == CALL)
2373 mark_call (insn);
2374 }
2375
2376 else if (GET_CODE (pat) == CLOBBER)
2377 mark_clobber (pat, insn);
2378 else if (GET_CODE (pat) == CALL)
2379 mark_call (insn);
2380 }
2381
2382 \f
2383 /* Compute copy/constant propagation working variables. */
2384
2385 /* Local properties of assignments. */
2386 static sbitmap *cprop_pavloc;
2387 static sbitmap *cprop_absaltered;
2388
2389 /* Global properties of assignments (computed from the local properties). */
2390 static sbitmap *cprop_avin;
2391 static sbitmap *cprop_avout;
2392
2393 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
2394 basic blocks. N_SETS is the number of sets. */
2395
2396 static void
2397 alloc_cprop_mem (int n_blocks, int n_sets)
2398 {
2399 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2400 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2401
2402 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2403 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2404 }
2405
2406 /* Free vars used by copy/const propagation. */
2407
2408 static void
2409 free_cprop_mem (void)
2410 {
2411 sbitmap_vector_free (cprop_pavloc);
2412 sbitmap_vector_free (cprop_absaltered);
2413 sbitmap_vector_free (cprop_avin);
2414 sbitmap_vector_free (cprop_avout);
2415 }
2416
2417 /* For each block, compute whether X is transparent. X is either an
2418 expression or an assignment [though we don't care which, for this context
2419 an assignment is treated as an expression]. For each block where an
2420 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2421 bit in BMAP. */
2422
2423 static void
2424 compute_transp (rtx x, int indx, sbitmap *bmap, int set_p)
2425 {
2426 int i, j;
2427 basic_block bb;
2428 enum rtx_code code;
2429 reg_set *r;
2430 const char *fmt;
2431
2432 /* repeat is used to turn tail-recursion into iteration since GCC
2433 can't do it when there's no return value. */
2434 repeat:
2435
2436 if (x == 0)
2437 return;
2438
2439 code = GET_CODE (x);
2440 switch (code)
2441 {
2442 case REG:
2443 if (set_p)
2444 {
2445 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
2446 {
2447 FOR_EACH_BB (bb)
2448 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
2449 SET_BIT (bmap[bb->index], indx);
2450 }
2451 else
2452 {
2453 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
2454 SET_BIT (bmap[r->bb_index], indx);
2455 }
2456 }
2457 else
2458 {
2459 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
2460 {
2461 FOR_EACH_BB (bb)
2462 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
2463 RESET_BIT (bmap[bb->index], indx);
2464 }
2465 else
2466 {
2467 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
2468 RESET_BIT (bmap[r->bb_index], indx);
2469 }
2470 }
2471
2472 return;
2473
2474 case MEM:
2475 if (! MEM_READONLY_P (x))
2476 {
2477 bitmap_iterator bi;
2478 unsigned bb_index;
2479
2480 /* First handle all the blocks with calls. We don't need to
2481 do any list walking for them. */
2482 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2483 {
2484 if (set_p)
2485 SET_BIT (bmap[bb_index], indx);
2486 else
2487 RESET_BIT (bmap[bb_index], indx);
2488 }
2489
2490 /* Now iterate over the blocks which have memory modifications
2491 but which do not have any calls. */
2492 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2493 blocks_with_calls,
2494 0, bb_index, bi)
2495 {
2496 rtx list_entry = canon_modify_mem_list[bb_index];
2497
2498 while (list_entry)
2499 {
2500 rtx dest, dest_addr;
2501
2502 /* LIST_ENTRY must be an INSN of some kind that sets memory.
2503 Examine each hunk of memory that is modified. */
2504
2505 dest = XEXP (list_entry, 0);
2506 list_entry = XEXP (list_entry, 1);
2507 dest_addr = XEXP (list_entry, 0);
2508
2509 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2510 x, rtx_addr_varies_p))
2511 {
2512 if (set_p)
2513 SET_BIT (bmap[bb_index], indx);
2514 else
2515 RESET_BIT (bmap[bb_index], indx);
2516 break;
2517 }
2518 list_entry = XEXP (list_entry, 1);
2519 }
2520 }
2521 }
2522
2523 x = XEXP (x, 0);
2524 goto repeat;
2525
2526 case PC:
2527 case CC0: /*FIXME*/
2528 case CONST:
2529 case CONST_INT:
2530 case CONST_DOUBLE:
2531 case CONST_VECTOR:
2532 case SYMBOL_REF:
2533 case LABEL_REF:
2534 case ADDR_VEC:
2535 case ADDR_DIFF_VEC:
2536 return;
2537
2538 default:
2539 break;
2540 }
2541
2542 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2543 {
2544 if (fmt[i] == 'e')
2545 {
2546 /* If we are about to do the last recursive call
2547 needed at this level, change it into iteration.
2548 This function is called enough to be worth it. */
2549 if (i == 0)
2550 {
2551 x = XEXP (x, i);
2552 goto repeat;
2553 }
2554
2555 compute_transp (XEXP (x, i), indx, bmap, set_p);
2556 }
2557 else if (fmt[i] == 'E')
2558 for (j = 0; j < XVECLEN (x, i); j++)
2559 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2560 }
2561 }
2562
2563 /* Top level routine to do the dataflow analysis needed by copy/const
2564 propagation. */
2565
2566 static void
2567 compute_cprop_data (void)
2568 {
2569 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2570 compute_available (cprop_pavloc, cprop_absaltered,
2571 cprop_avout, cprop_avin);
2572 }
2573 \f
2574 /* Copy/constant propagation. */
2575
2576 /* Maximum number of register uses in an insn that we handle. */
2577 #define MAX_USES 8
2578
2579 /* Table of uses found in an insn.
2580 Allocated statically to avoid alloc/free complexity and overhead. */
2581 static struct reg_use reg_use_table[MAX_USES];
2582
2583 /* Index into `reg_use_table' while building it. */
2584 static int reg_use_count;
2585
2586 /* Set up a list of register numbers used in INSN. The found uses are stored
2587 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
2588 and contains the number of uses in the table upon exit.
2589
2590 ??? If a register appears multiple times we will record it multiple times.
2591 This doesn't hurt anything but it will slow things down. */
2592
2593 static void
2594 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2595 {
2596 int i, j;
2597 enum rtx_code code;
2598 const char *fmt;
2599 rtx x = *xptr;
2600
2601 /* repeat is used to turn tail-recursion into iteration since GCC
2602 can't do it when there's no return value. */
2603 repeat:
2604 if (x == 0)
2605 return;
2606
2607 code = GET_CODE (x);
2608 if (REG_P (x))
2609 {
2610 if (reg_use_count == MAX_USES)
2611 return;
2612
2613 reg_use_table[reg_use_count].reg_rtx = x;
2614 reg_use_count++;
2615 }
2616
2617 /* Recursively scan the operands of this expression. */
2618
2619 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2620 {
2621 if (fmt[i] == 'e')
2622 {
2623 /* If we are about to do the last recursive call
2624 needed at this level, change it into iteration.
2625 This function is called enough to be worth it. */
2626 if (i == 0)
2627 {
2628 x = XEXP (x, 0);
2629 goto repeat;
2630 }
2631
2632 find_used_regs (&XEXP (x, i), data);
2633 }
2634 else if (fmt[i] == 'E')
2635 for (j = 0; j < XVECLEN (x, i); j++)
2636 find_used_regs (&XVECEXP (x, i, j), data);
2637 }
2638 }
2639
2640 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2641 Returns nonzero is successful. */
2642
2643 static int
2644 try_replace_reg (rtx from, rtx to, rtx insn)
2645 {
2646 rtx note = find_reg_equal_equiv_note (insn);
2647 rtx src = 0;
2648 int success = 0;
2649 rtx set = single_set (insn);
2650
2651 validate_replace_src_group (from, to, insn);
2652 if (num_changes_pending () && apply_change_group ())
2653 success = 1;
2654
2655 /* Try to simplify SET_SRC if we have substituted a constant. */
2656 if (success && set && CONSTANT_P (to))
2657 {
2658 src = simplify_rtx (SET_SRC (set));
2659
2660 if (src)
2661 validate_change (insn, &SET_SRC (set), src, 0);
2662 }
2663
2664 /* If there is already a NOTE, update the expression in it with our
2665 replacement. */
2666 if (note != 0)
2667 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
2668
2669 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2670 {
2671 /* If above failed and this is a single set, try to simplify the source of
2672 the set given our substitution. We could perhaps try this for multiple
2673 SETs, but it probably won't buy us anything. */
2674 src = simplify_replace_rtx (SET_SRC (set), from, to);
2675
2676 if (!rtx_equal_p (src, SET_SRC (set))
2677 && validate_change (insn, &SET_SRC (set), src, 0))
2678 success = 1;
2679
2680 /* If we've failed to do replacement, have a single SET, don't already
2681 have a note, and have no special SET, add a REG_EQUAL note to not
2682 lose information. */
2683 if (!success && note == 0 && set != 0
2684 && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2685 && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2686 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2687 }
2688
2689 /* REG_EQUAL may get simplified into register.
2690 We don't allow that. Remove that note. This code ought
2691 not to happen, because previous code ought to synthesize
2692 reg-reg move, but be on the safe side. */
2693 if (note && REG_P (XEXP (note, 0)))
2694 remove_note (insn, note);
2695
2696 return success;
2697 }
2698
2699 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
2700 NULL no such set is found. */
2701
2702 static struct expr *
2703 find_avail_set (int regno, rtx insn)
2704 {
2705 /* SET1 contains the last set found that can be returned to the caller for
2706 use in a substitution. */
2707 struct expr *set1 = 0;
2708
2709 /* Loops are not possible here. To get a loop we would need two sets
2710 available at the start of the block containing INSN. i.e. we would
2711 need two sets like this available at the start of the block:
2712
2713 (set (reg X) (reg Y))
2714 (set (reg Y) (reg X))
2715
2716 This can not happen since the set of (reg Y) would have killed the
2717 set of (reg X) making it unavailable at the start of this block. */
2718 while (1)
2719 {
2720 rtx src;
2721 struct expr *set = lookup_set (regno, &set_hash_table);
2722
2723 /* Find a set that is available at the start of the block
2724 which contains INSN. */
2725 while (set)
2726 {
2727 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
2728 break;
2729 set = next_set (regno, set);
2730 }
2731
2732 /* If no available set was found we've reached the end of the
2733 (possibly empty) copy chain. */
2734 if (set == 0)
2735 break;
2736
2737 gcc_assert (GET_CODE (set->expr) == SET);
2738
2739 src = SET_SRC (set->expr);
2740
2741 /* We know the set is available.
2742 Now check that SRC is ANTLOC (i.e. none of the source operands
2743 have changed since the start of the block).
2744
2745 If the source operand changed, we may still use it for the next
2746 iteration of this loop, but we may not use it for substitutions. */
2747
2748 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2749 set1 = set;
2750
2751 /* If the source of the set is anything except a register, then
2752 we have reached the end of the copy chain. */
2753 if (! REG_P (src))
2754 break;
2755
2756 /* Follow the copy chain, i.e. start another iteration of the loop
2757 and see if we have an available copy into SRC. */
2758 regno = REGNO (src);
2759 }
2760
2761 /* SET1 holds the last set that was available and anticipatable at
2762 INSN. */
2763 return set1;
2764 }
2765
2766 /* Subroutine of cprop_insn that tries to propagate constants into
2767 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
2768 it is the instruction that immediately precedes JUMP, and must be a
2769 single SET of a register. FROM is what we will try to replace,
2770 SRC is the constant we will try to substitute for it. Returns nonzero
2771 if a change was made. */
2772
2773 static int
2774 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2775 {
2776 rtx new, set_src, note_src;
2777 rtx set = pc_set (jump);
2778 rtx note = find_reg_equal_equiv_note (jump);
2779
2780 if (note)
2781 {
2782 note_src = XEXP (note, 0);
2783 if (GET_CODE (note_src) == EXPR_LIST)
2784 note_src = NULL_RTX;
2785 }
2786 else note_src = NULL_RTX;
2787
2788 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
2789 set_src = note_src ? note_src : SET_SRC (set);
2790
2791 /* First substitute the SETCC condition into the JUMP instruction,
2792 then substitute that given values into this expanded JUMP. */
2793 if (setcc != NULL_RTX
2794 && !modified_between_p (from, setcc, jump)
2795 && !modified_between_p (src, setcc, jump))
2796 {
2797 rtx setcc_src;
2798 rtx setcc_set = single_set (setcc);
2799 rtx setcc_note = find_reg_equal_equiv_note (setcc);
2800 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2801 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2802 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2803 setcc_src);
2804 }
2805 else
2806 setcc = NULL_RTX;
2807
2808 new = simplify_replace_rtx (set_src, from, src);
2809
2810 /* If no simplification can be made, then try the next register. */
2811 if (rtx_equal_p (new, SET_SRC (set)))
2812 return 0;
2813
2814 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
2815 if (new == pc_rtx)
2816 delete_insn (jump);
2817 else
2818 {
2819 /* Ensure the value computed inside the jump insn to be equivalent
2820 to one computed by setcc. */
2821 if (setcc && modified_in_p (new, setcc))
2822 return 0;
2823 if (! validate_change (jump, &SET_SRC (set), new, 0))
2824 {
2825 /* When (some) constants are not valid in a comparison, and there
2826 are two registers to be replaced by constants before the entire
2827 comparison can be folded into a constant, we need to keep
2828 intermediate information in REG_EQUAL notes. For targets with
2829 separate compare insns, such notes are added by try_replace_reg.
2830 When we have a combined compare-and-branch instruction, however,
2831 we need to attach a note to the branch itself to make this
2832 optimization work. */
2833
2834 if (!rtx_equal_p (new, note_src))
2835 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
2836 return 0;
2837 }
2838
2839 /* Remove REG_EQUAL note after simplification. */
2840 if (note_src)
2841 remove_note (jump, note);
2842
2843 /* If this has turned into an unconditional jump,
2844 then put a barrier after it so that the unreachable
2845 code will be deleted. */
2846 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
2847 emit_barrier_after (jump);
2848 }
2849
2850 #ifdef HAVE_cc0
2851 /* Delete the cc0 setter. */
2852 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2853 delete_insn (setcc);
2854 #endif
2855
2856 run_jump_opt_after_gcse = 1;
2857
2858 global_const_prop_count++;
2859 if (gcse_file != NULL)
2860 {
2861 fprintf (gcse_file,
2862 "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2863 REGNO (from), INSN_UID (jump));
2864 print_rtl (gcse_file, src);
2865 fprintf (gcse_file, "\n");
2866 }
2867 purge_dead_edges (bb);
2868
2869 return 1;
2870 }
2871
2872 static bool
2873 constprop_register (rtx insn, rtx from, rtx to, bool alter_jumps)
2874 {
2875 rtx sset;
2876
2877 /* Check for reg or cc0 setting instructions followed by
2878 conditional branch instructions first. */
2879 if (alter_jumps
2880 && (sset = single_set (insn)) != NULL
2881 && NEXT_INSN (insn)
2882 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2883 {
2884 rtx dest = SET_DEST (sset);
2885 if ((REG_P (dest) || CC0_P (dest))
2886 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2887 return 1;
2888 }
2889
2890 /* Handle normal insns next. */
2891 if (NONJUMP_INSN_P (insn)
2892 && try_replace_reg (from, to, insn))
2893 return 1;
2894
2895 /* Try to propagate a CONST_INT into a conditional jump.
2896 We're pretty specific about what we will handle in this
2897 code, we can extend this as necessary over time.
2898
2899 Right now the insn in question must look like
2900 (set (pc) (if_then_else ...)) */
2901 else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
2902 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2903 return 0;
2904 }
2905
2906 /* Perform constant and copy propagation on INSN.
2907 The result is nonzero if a change was made. */
2908
2909 static int
2910 cprop_insn (rtx insn, int alter_jumps)
2911 {
2912 struct reg_use *reg_used;
2913 int changed = 0;
2914 rtx note;
2915
2916 if (!INSN_P (insn))
2917 return 0;
2918
2919 reg_use_count = 0;
2920 note_uses (&PATTERN (insn), find_used_regs, NULL);
2921
2922 note = find_reg_equal_equiv_note (insn);
2923
2924 /* We may win even when propagating constants into notes. */
2925 if (note)
2926 find_used_regs (&XEXP (note, 0), NULL);
2927
2928 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2929 reg_used++, reg_use_count--)
2930 {
2931 unsigned int regno = REGNO (reg_used->reg_rtx);
2932 rtx pat, src;
2933 struct expr *set;
2934
2935 /* Ignore registers created by GCSE.
2936 We do this because ... */
2937 if (regno >= max_gcse_regno)
2938 continue;
2939
2940 /* If the register has already been set in this block, there's
2941 nothing we can do. */
2942 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2943 continue;
2944
2945 /* Find an assignment that sets reg_used and is available
2946 at the start of the block. */
2947 set = find_avail_set (regno, insn);
2948 if (! set)
2949 continue;
2950
2951 pat = set->expr;
2952 /* ??? We might be able to handle PARALLELs. Later. */
2953 gcc_assert (GET_CODE (pat) == SET);
2954
2955 src = SET_SRC (pat);
2956
2957 /* Constant propagation. */
2958 if (gcse_constant_p (src))
2959 {
2960 if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
2961 {
2962 changed = 1;
2963 global_const_prop_count++;
2964 if (gcse_file != NULL)
2965 {
2966 fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2967 fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn));
2968 print_rtl (gcse_file, src);
2969 fprintf (gcse_file, "\n");
2970 }
2971 if (INSN_DELETED_P (insn))
2972 return 1;
2973 }
2974 }
2975 else if (REG_P (src)
2976 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2977 && REGNO (src) != regno)
2978 {
2979 if (try_replace_reg (reg_used->reg_rtx, src, insn))
2980 {
2981 changed = 1;
2982 global_copy_prop_count++;
2983 if (gcse_file != NULL)
2984 {
2985 fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2986 regno, INSN_UID (insn));
2987 fprintf (gcse_file, " with reg %d\n", REGNO (src));
2988 }
2989
2990 /* The original insn setting reg_used may or may not now be
2991 deletable. We leave the deletion to flow. */
2992 /* FIXME: If it turns out that the insn isn't deletable,
2993 then we may have unnecessarily extended register lifetimes
2994 and made things worse. */
2995 }
2996 }
2997 }
2998
2999 return changed;
3000 }
3001
3002 /* Like find_used_regs, but avoid recording uses that appear in
3003 input-output contexts such as zero_extract or pre_dec. This
3004 restricts the cases we consider to those for which local cprop
3005 can legitimately make replacements. */
3006
3007 static void
3008 local_cprop_find_used_regs (rtx *xptr, void *data)
3009 {
3010 rtx x = *xptr;
3011
3012 if (x == 0)
3013 return;
3014
3015 switch (GET_CODE (x))
3016 {
3017 case ZERO_EXTRACT:
3018 case SIGN_EXTRACT:
3019 case STRICT_LOW_PART:
3020 return;
3021
3022 case PRE_DEC:
3023 case PRE_INC:
3024 case POST_DEC:
3025 case POST_INC:
3026 case PRE_MODIFY:
3027 case POST_MODIFY:
3028 /* Can only legitimately appear this early in the context of
3029 stack pushes for function arguments, but handle all of the
3030 codes nonetheless. */
3031 return;
3032
3033 case SUBREG:
3034 /* Setting a subreg of a register larger than word_mode leaves
3035 the non-written words unchanged. */
3036 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
3037 return;
3038 break;
3039
3040 default:
3041 break;
3042 }
3043
3044 find_used_regs (xptr, data);
3045 }
3046
3047 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
3048 their REG_EQUAL notes need updating. */
3049
3050 static bool
3051 do_local_cprop (rtx x, rtx insn, bool alter_jumps, rtx *libcall_sp)
3052 {
3053 rtx newreg = NULL, newcnst = NULL;
3054
3055 /* Rule out USE instructions and ASM statements as we don't want to
3056 change the hard registers mentioned. */
3057 if (REG_P (x)
3058 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
3059 || (GET_CODE (PATTERN (insn)) != USE
3060 && asm_noperands (PATTERN (insn)) < 0)))
3061 {
3062 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
3063 struct elt_loc_list *l;
3064
3065 if (!val)
3066 return false;
3067 for (l = val->locs; l; l = l->next)
3068 {
3069 rtx this_rtx = l->loc;
3070 rtx note;
3071
3072 /* Don't CSE non-constant values out of libcall blocks. */
3073 if (l->in_libcall && ! CONSTANT_P (this_rtx))
3074 continue;
3075
3076 if (gcse_constant_p (this_rtx))
3077 newcnst = this_rtx;
3078 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
3079 /* Don't copy propagate if it has attached REG_EQUIV note.
3080 At this point this only function parameters should have
3081 REG_EQUIV notes and if the argument slot is used somewhere
3082 explicitly, it means address of parameter has been taken,
3083 so we should not extend the lifetime of the pseudo. */
3084 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
3085 || ! MEM_P (XEXP (note, 0))))
3086 newreg = this_rtx;
3087 }
3088 if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
3089 {
3090 /* If we find a case where we can't fix the retval REG_EQUAL notes
3091 match the new register, we either have to abandon this replacement
3092 or fix delete_trivially_dead_insns to preserve the setting insn,
3093 or make it delete the REG_EUAQL note, and fix up all passes that
3094 require the REG_EQUAL note there. */
3095 bool adjusted;
3096
3097 adjusted = adjust_libcall_notes (x, newcnst, insn, libcall_sp);
3098 gcc_assert (adjusted);
3099
3100 if (gcse_file != NULL)
3101 {
3102 fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
3103 REGNO (x));
3104 fprintf (gcse_file, "insn %d with constant ",
3105 INSN_UID (insn));
3106 print_rtl (gcse_file, newcnst);
3107 fprintf (gcse_file, "\n");
3108 }
3109 local_const_prop_count++;
3110 return true;
3111 }
3112 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
3113 {
3114 adjust_libcall_notes (x, newreg, insn, libcall_sp);
3115 if (gcse_file != NULL)
3116 {
3117 fprintf (gcse_file,
3118 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
3119 REGNO (x), INSN_UID (insn));
3120 fprintf (gcse_file, " with reg %d\n", REGNO (newreg));
3121 }
3122 local_copy_prop_count++;
3123 return true;
3124 }
3125 }
3126 return false;
3127 }
3128
3129 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
3130 their REG_EQUAL notes need updating to reflect that OLDREG has been
3131 replaced with NEWVAL in INSN. Return true if all substitutions could
3132 be made. */
3133 static bool
3134 adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp)
3135 {
3136 rtx end;
3137
3138 while ((end = *libcall_sp++))
3139 {
3140 rtx note = find_reg_equal_equiv_note (end);
3141
3142 if (! note)
3143 continue;
3144
3145 if (REG_P (newval))
3146 {
3147 if (reg_set_between_p (newval, PREV_INSN (insn), end))
3148 {
3149 do
3150 {
3151 note = find_reg_equal_equiv_note (end);
3152 if (! note)
3153 continue;
3154 if (reg_mentioned_p (newval, XEXP (note, 0)))
3155 return false;
3156 }
3157 while ((end = *libcall_sp++));
3158 return true;
3159 }
3160 }
3161 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), oldreg, newval);
3162 insn = end;
3163 }
3164 return true;
3165 }
3166
3167 #define MAX_NESTED_LIBCALLS 9
3168
3169 /* Do local const/copy propagation (i.e. within each basic block).
3170 If ALTER_JUMPS is true, allow propagating into jump insns, which
3171 could modify the CFG. */
3172
3173 static void
3174 local_cprop_pass (bool alter_jumps)
3175 {
3176 basic_block bb;
3177 rtx insn;
3178 struct reg_use *reg_used;
3179 rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
3180 bool changed = false;
3181
3182 cselib_init (false);
3183 libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
3184 *libcall_sp = 0;
3185 FOR_EACH_BB (bb)
3186 {
3187 FOR_BB_INSNS (bb, insn)
3188 {
3189 if (INSN_P (insn))
3190 {
3191 rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3192
3193 if (note)
3194 {
3195 gcc_assert (libcall_sp != libcall_stack);
3196 *--libcall_sp = XEXP (note, 0);
3197 }
3198 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3199 if (note)
3200 libcall_sp++;
3201 note = find_reg_equal_equiv_note (insn);
3202 do
3203 {
3204 reg_use_count = 0;
3205 note_uses (&PATTERN (insn), local_cprop_find_used_regs,
3206 NULL);
3207 if (note)
3208 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
3209
3210 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
3211 reg_used++, reg_use_count--)
3212 if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
3213 libcall_sp))
3214 {
3215 changed = true;
3216 break;
3217 }
3218 if (INSN_DELETED_P (insn))
3219 break;
3220 }
3221 while (reg_use_count);
3222 }
3223 cselib_process_insn (insn);
3224 }
3225
3226 /* Forget everything at the end of a basic block. Make sure we are
3227 not inside a libcall, they should never cross basic blocks. */
3228 cselib_clear_table ();
3229 gcc_assert (libcall_sp == &libcall_stack[MAX_NESTED_LIBCALLS]);
3230 }
3231
3232 cselib_finish ();
3233
3234 /* Global analysis may get into infinite loops for unreachable blocks. */
3235 if (changed && alter_jumps)
3236 {
3237 delete_unreachable_blocks ();
3238 free_reg_set_mem ();
3239 alloc_reg_set_mem (max_reg_num ());
3240 compute_sets ();
3241 }
3242 }
3243
3244 /* Forward propagate copies. This includes copies and constants. Return
3245 nonzero if a change was made. */
3246
3247 static int
3248 cprop (int alter_jumps)
3249 {
3250 int changed;
3251 basic_block bb;
3252 rtx insn;
3253
3254 /* Note we start at block 1. */
3255 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3256 {
3257 if (gcse_file != NULL)
3258 fprintf (gcse_file, "\n");
3259 return 0;
3260 }
3261
3262 changed = 0;
3263 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3264 {
3265 /* Reset tables used to keep track of what's still valid [since the
3266 start of the block]. */
3267 reset_opr_set_tables ();
3268
3269 FOR_BB_INSNS (bb, insn)
3270 if (INSN_P (insn))
3271 {
3272 changed |= cprop_insn (insn, alter_jumps);
3273
3274 /* Keep track of everything modified by this insn. */
3275 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
3276 call mark_oprs_set if we turned the insn into a NOTE. */
3277 if (! NOTE_P (insn))
3278 mark_oprs_set (insn);
3279 }
3280 }
3281
3282 if (gcse_file != NULL)
3283 fprintf (gcse_file, "\n");
3284
3285 return changed;
3286 }
3287
3288 /* Similar to get_condition, only the resulting condition must be
3289 valid at JUMP, instead of at EARLIEST.
3290
3291 This differs from noce_get_condition in ifcvt.c in that we prefer not to
3292 settle for the condition variable in the jump instruction being integral.
3293 We prefer to be able to record the value of a user variable, rather than
3294 the value of a temporary used in a condition. This could be solved by
3295 recording the value of *every* register scanned by canonicalize_condition,
3296 but this would require some code reorganization. */
3297
3298 rtx
3299 fis_get_condition (rtx jump)
3300 {
3301 return get_condition (jump, NULL, false, true);
3302 }
3303
3304 /* Check the comparison COND to see if we can safely form an implicit set from
3305 it. COND is either an EQ or NE comparison. */
3306
3307 static bool
3308 implicit_set_cond_p (rtx cond)
3309 {
3310 enum machine_mode mode = GET_MODE (XEXP (cond, 0));
3311 rtx cst = XEXP (cond, 1);
3312
3313 /* We can't perform this optimization if either operand might be or might
3314 contain a signed zero. */
3315 if (HONOR_SIGNED_ZEROS (mode))
3316 {
3317 /* It is sufficient to check if CST is or contains a zero. We must
3318 handle float, complex, and vector. If any subpart is a zero, then
3319 the optimization can't be performed. */
3320 /* ??? The complex and vector checks are not implemented yet. We just
3321 always return zero for them. */
3322 if (GET_CODE (cst) == CONST_DOUBLE)
3323 {
3324 REAL_VALUE_TYPE d;
3325 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
3326 if (REAL_VALUES_EQUAL (d, dconst0))
3327 return 0;
3328 }
3329 else
3330 return 0;
3331 }
3332
3333 return gcse_constant_p (cst);
3334 }
3335
3336 /* Find the implicit sets of a function. An "implicit set" is a constraint
3337 on the value of a variable, implied by a conditional jump. For example,
3338 following "if (x == 2)", the then branch may be optimized as though the
3339 conditional performed an "explicit set", in this example, "x = 2". This
3340 function records the set patterns that are implicit at the start of each
3341 basic block. */
3342
3343 static void
3344 find_implicit_sets (void)
3345 {
3346 basic_block bb, dest;
3347 unsigned int count;
3348 rtx cond, new;
3349
3350 count = 0;
3351 FOR_EACH_BB (bb)
3352 /* Check for more than one successor. */
3353 if (EDGE_COUNT (bb->succs) > 1)
3354 {
3355 cond = fis_get_condition (BB_END (bb));
3356
3357 if (cond
3358 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
3359 && REG_P (XEXP (cond, 0))
3360 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
3361 && implicit_set_cond_p (cond))
3362 {
3363 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
3364 : FALLTHRU_EDGE (bb)->dest;
3365
3366 if (dest && single_pred_p (dest)
3367 && dest != EXIT_BLOCK_PTR)
3368 {
3369 new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
3370 XEXP (cond, 1));
3371 implicit_sets[dest->index] = new;
3372 if (gcse_file)
3373 {
3374 fprintf(gcse_file, "Implicit set of reg %d in ",
3375 REGNO (XEXP (cond, 0)));
3376 fprintf(gcse_file, "basic block %d\n", dest->index);
3377 }
3378 count++;
3379 }
3380 }
3381 }
3382
3383 if (gcse_file)
3384 fprintf (gcse_file, "Found %d implicit sets\n", count);
3385 }
3386
3387 /* Perform one copy/constant propagation pass.
3388 PASS is the pass count. If CPROP_JUMPS is true, perform constant
3389 propagation into conditional jumps. If BYPASS_JUMPS is true,
3390 perform conditional jump bypassing optimizations. */
3391
3392 static int
3393 one_cprop_pass (int pass, bool cprop_jumps, bool bypass_jumps)
3394 {
3395 int changed = 0;
3396
3397 global_const_prop_count = local_const_prop_count = 0;
3398 global_copy_prop_count = local_copy_prop_count = 0;
3399
3400 local_cprop_pass (cprop_jumps);
3401
3402 /* Determine implicit sets. */
3403 implicit_sets = xcalloc (last_basic_block, sizeof (rtx));
3404 find_implicit_sets ();
3405
3406 alloc_hash_table (max_cuid, &set_hash_table, 1);
3407 compute_hash_table (&set_hash_table);
3408
3409 /* Free implicit_sets before peak usage. */
3410 free (implicit_sets);
3411 implicit_sets = NULL;
3412
3413 if (gcse_file)
3414 dump_hash_table (gcse_file, "SET", &set_hash_table);
3415 if (set_hash_table.n_elems > 0)
3416 {
3417 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
3418 compute_cprop_data ();
3419 changed = cprop (cprop_jumps);
3420 if (bypass_jumps)
3421 changed |= bypass_conditional_jumps ();
3422 free_cprop_mem ();
3423 }
3424
3425 free_hash_table (&set_hash_table);
3426
3427 if (gcse_file)
3428 {
3429 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
3430 current_function_name (), pass, bytes_used);
3431 fprintf (gcse_file, "%d local const props, %d local copy props, ",
3432 local_const_prop_count, local_copy_prop_count);
3433 fprintf (gcse_file, "%d global const props, %d global copy props\n\n",
3434 global_const_prop_count, global_copy_prop_count);
3435 }
3436 /* Global analysis may get into infinite loops for unreachable blocks. */
3437 if (changed && cprop_jumps)
3438 delete_unreachable_blocks ();
3439
3440 return changed;
3441 }
3442 \f
3443 /* Bypass conditional jumps. */
3444
3445 /* The value of last_basic_block at the beginning of the jump_bypass
3446 pass. The use of redirect_edge_and_branch_force may introduce new
3447 basic blocks, but the data flow analysis is only valid for basic
3448 block indices less than bypass_last_basic_block. */
3449
3450 static int bypass_last_basic_block;
3451
3452 /* Find a set of REGNO to a constant that is available at the end of basic
3453 block BB. Returns NULL if no such set is found. Based heavily upon
3454 find_avail_set. */
3455
3456 static struct expr *
3457 find_bypass_set (int regno, int bb)
3458 {
3459 struct expr *result = 0;
3460
3461 for (;;)
3462 {
3463 rtx src;
3464 struct expr *set = lookup_set (regno, &set_hash_table);
3465
3466 while (set)
3467 {
3468 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
3469 break;
3470 set = next_set (regno, set);
3471 }
3472
3473 if (set == 0)
3474 break;
3475
3476 gcc_assert (GET_CODE (set->expr) == SET);
3477
3478 src = SET_SRC (set->expr);
3479 if (gcse_constant_p (src))
3480 result = set;
3481
3482 if (! REG_P (src))
3483 break;
3484
3485 regno = REGNO (src);
3486 }
3487 return result;
3488 }
3489
3490
3491 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3492 any of the instructions inserted on an edge. Jump bypassing places
3493 condition code setters on CFG edges using insert_insn_on_edge. This
3494 function is required to check that our data flow analysis is still
3495 valid prior to commit_edge_insertions. */
3496
3497 static bool
3498 reg_killed_on_edge (rtx reg, edge e)
3499 {
3500 rtx insn;
3501
3502 for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
3503 if (INSN_P (insn) && reg_set_p (reg, insn))
3504 return true;
3505
3506 return false;
3507 }
3508
3509 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3510 basic block BB which has more than one predecessor. If not NULL, SETCC
3511 is the first instruction of BB, which is immediately followed by JUMP_INSN
3512 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3513 Returns nonzero if a change was made.
3514
3515 During the jump bypassing pass, we may place copies of SETCC instructions
3516 on CFG edges. The following routine must be careful to pay attention to
3517 these inserted insns when performing its transformations. */
3518
3519 static int
3520 bypass_block (basic_block bb, rtx setcc, rtx jump)
3521 {
3522 rtx insn, note;
3523 edge e, edest;
3524 int i, change;
3525 int may_be_loop_header;
3526 unsigned removed_p;
3527 edge_iterator ei;
3528
3529 insn = (setcc != NULL) ? setcc : jump;
3530
3531 /* Determine set of register uses in INSN. */
3532 reg_use_count = 0;
3533 note_uses (&PATTERN (insn), find_used_regs, NULL);
3534 note = find_reg_equal_equiv_note (insn);
3535 if (note)
3536 find_used_regs (&XEXP (note, 0), NULL);
3537
3538 may_be_loop_header = false;
3539 FOR_EACH_EDGE (e, ei, bb->preds)
3540 if (e->flags & EDGE_DFS_BACK)
3541 {
3542 may_be_loop_header = true;
3543 break;
3544 }
3545
3546 change = 0;
3547 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3548 {
3549 removed_p = 0;
3550
3551 if (e->flags & EDGE_COMPLEX)
3552 {
3553 ei_next (&ei);
3554 continue;
3555 }
3556
3557 /* We can't redirect edges from new basic blocks. */
3558 if (e->src->index >= bypass_last_basic_block)
3559 {
3560 ei_next (&ei);
3561 continue;
3562 }
3563
3564 /* The irreducible loops created by redirecting of edges entering the
3565 loop from outside would decrease effectiveness of some of the following
3566 optimizations, so prevent this. */
3567 if (may_be_loop_header
3568 && !(e->flags & EDGE_DFS_BACK))
3569 {
3570 ei_next (&ei);
3571 continue;
3572 }
3573
3574 for (i = 0; i < reg_use_count; i++)
3575 {
3576 struct reg_use *reg_used = &reg_use_table[i];
3577 unsigned int regno = REGNO (reg_used->reg_rtx);
3578 basic_block dest, old_dest;
3579 struct expr *set;
3580 rtx src, new;
3581
3582 if (regno >= max_gcse_regno)
3583 continue;
3584
3585 set = find_bypass_set (regno, e->src->index);
3586
3587 if (! set)
3588 continue;
3589
3590 /* Check the data flow is valid after edge insertions. */
3591 if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3592 continue;
3593
3594 src = SET_SRC (pc_set (jump));
3595
3596 if (setcc != NULL)
3597 src = simplify_replace_rtx (src,
3598 SET_DEST (PATTERN (setcc)),
3599 SET_SRC (PATTERN (setcc)));
3600
3601 new = simplify_replace_rtx (src, reg_used->reg_rtx,
3602 SET_SRC (set->expr));
3603
3604 /* Jump bypassing may have already placed instructions on
3605 edges of the CFG. We can't bypass an outgoing edge that
3606 has instructions associated with it, as these insns won't
3607 get executed if the incoming edge is redirected. */
3608
3609 if (new == pc_rtx)
3610 {
3611 edest = FALLTHRU_EDGE (bb);
3612 dest = edest->insns.r ? NULL : edest->dest;
3613 }
3614 else if (GET_CODE (new) == LABEL_REF)
3615 {
3616 dest = BLOCK_FOR_INSN (XEXP (new, 0));
3617 /* Don't bypass edges containing instructions. */
3618 edest = find_edge (bb, dest);
3619 if (edest && edest->insns.r)
3620 dest = NULL;
3621 }
3622 else
3623 dest = NULL;
3624
3625 /* Avoid unification of the edge with other edges from original
3626 branch. We would end up emitting the instruction on "both"
3627 edges. */
3628
3629 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3630 && find_edge (e->src, dest))
3631 dest = NULL;
3632
3633 old_dest = e->dest;
3634 if (dest != NULL
3635 && dest != old_dest
3636 && dest != EXIT_BLOCK_PTR)
3637 {
3638 redirect_edge_and_branch_force (e, dest);
3639
3640 /* Copy the register setter to the redirected edge.
3641 Don't copy CC0 setters, as CC0 is dead after jump. */
3642 if (setcc)
3643 {
3644 rtx pat = PATTERN (setcc);
3645 if (!CC0_P (SET_DEST (pat)))
3646 insert_insn_on_edge (copy_insn (pat), e);
3647 }
3648
3649 if (gcse_file != NULL)
3650 {
3651 fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d "
3652 "in jump_insn %d equals constant ",
3653 regno, INSN_UID (jump));
3654 print_rtl (gcse_file, SET_SRC (set->expr));
3655 fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
3656 e->src->index, old_dest->index, dest->index);
3657 }
3658 change = 1;
3659 removed_p = 1;
3660 break;
3661 }
3662 }
3663 if (!removed_p)
3664 ei_next (&ei);
3665 }
3666 return change;
3667 }
3668
3669 /* Find basic blocks with more than one predecessor that only contain a
3670 single conditional jump. If the result of the comparison is known at
3671 compile-time from any incoming edge, redirect that edge to the
3672 appropriate target. Returns nonzero if a change was made.
3673
3674 This function is now mis-named, because we also handle indirect jumps. */
3675
3676 static int
3677 bypass_conditional_jumps (void)
3678 {
3679 basic_block bb;
3680 int changed;
3681 rtx setcc;
3682 rtx insn;
3683 rtx dest;
3684
3685 /* Note we start at block 1. */
3686 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3687 return 0;
3688
3689 bypass_last_basic_block = last_basic_block;
3690 mark_dfs_back_edges ();
3691
3692 changed = 0;
3693 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3694 EXIT_BLOCK_PTR, next_bb)
3695 {
3696 /* Check for more than one predecessor. */
3697 if (!single_pred_p (bb))
3698 {
3699 setcc = NULL_RTX;
3700 FOR_BB_INSNS (bb, insn)
3701 if (NONJUMP_INSN_P (insn))
3702 {
3703 if (setcc)
3704 break;
3705 if (GET_CODE (PATTERN (insn)) != SET)
3706 break;
3707
3708 dest = SET_DEST (PATTERN (insn));
3709 if (REG_P (dest) || CC0_P (dest))
3710 setcc = insn;
3711 else
3712 break;
3713 }
3714 else if (JUMP_P (insn))
3715 {
3716 if ((any_condjump_p (insn) || computed_jump_p (insn))
3717 && onlyjump_p (insn))
3718 changed |= bypass_block (bb, setcc, insn);
3719 break;
3720 }
3721 else if (INSN_P (insn))
3722 break;
3723 }
3724 }
3725
3726 /* If we bypassed any register setting insns, we inserted a
3727 copy on the redirected edge. These need to be committed. */
3728 if (changed)
3729 commit_edge_insertions();
3730
3731 return changed;
3732 }
3733 \f
3734 /* Compute PRE+LCM working variables. */
3735
3736 /* Local properties of expressions. */
3737 /* Nonzero for expressions that are transparent in the block. */
3738 static sbitmap *transp;
3739
3740 /* Nonzero for expressions that are transparent at the end of the block.
3741 This is only zero for expressions killed by abnormal critical edge
3742 created by a calls. */
3743 static sbitmap *transpout;
3744
3745 /* Nonzero for expressions that are computed (available) in the block. */
3746 static sbitmap *comp;
3747
3748 /* Nonzero for expressions that are locally anticipatable in the block. */
3749 static sbitmap *antloc;
3750
3751 /* Nonzero for expressions where this block is an optimal computation
3752 point. */
3753 static sbitmap *pre_optimal;
3754
3755 /* Nonzero for expressions which are redundant in a particular block. */
3756 static sbitmap *pre_redundant;
3757
3758 /* Nonzero for expressions which should be inserted on a specific edge. */
3759 static sbitmap *pre_insert_map;
3760
3761 /* Nonzero for expressions which should be deleted in a specific block. */
3762 static sbitmap *pre_delete_map;
3763
3764 /* Contains the edge_list returned by pre_edge_lcm. */
3765 static struct edge_list *edge_list;
3766
3767 /* Redundant insns. */
3768 static sbitmap pre_redundant_insns;
3769
3770 /* Allocate vars used for PRE analysis. */
3771
3772 static void
3773 alloc_pre_mem (int n_blocks, int n_exprs)
3774 {
3775 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3776 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3777 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3778
3779 pre_optimal = NULL;
3780 pre_redundant = NULL;
3781 pre_insert_map = NULL;
3782 pre_delete_map = NULL;
3783 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3784
3785 /* pre_insert and pre_delete are allocated later. */
3786 }
3787
3788 /* Free vars used for PRE analysis. */
3789
3790 static void
3791 free_pre_mem (void)
3792 {
3793 sbitmap_vector_free (transp);
3794 sbitmap_vector_free (comp);
3795
3796 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
3797
3798 if (pre_optimal)
3799 sbitmap_vector_free (pre_optimal);
3800 if (pre_redundant)
3801 sbitmap_vector_free (pre_redundant);
3802 if (pre_insert_map)
3803 sbitmap_vector_free (pre_insert_map);
3804 if (pre_delete_map)
3805 sbitmap_vector_free (pre_delete_map);
3806
3807 transp = comp = NULL;
3808 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3809 }
3810
3811 /* Top level routine to do the dataflow analysis needed by PRE. */
3812
3813 static void
3814 compute_pre_data (void)
3815 {
3816 sbitmap trapping_expr;
3817 basic_block bb;
3818 unsigned int ui;
3819
3820 compute_local_properties (transp, comp, antloc, &expr_hash_table);
3821 sbitmap_vector_zero (ae_kill, last_basic_block);
3822
3823 /* Collect expressions which might trap. */
3824 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
3825 sbitmap_zero (trapping_expr);
3826 for (ui = 0; ui < expr_hash_table.size; ui++)
3827 {
3828 struct expr *e;
3829 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3830 if (may_trap_p (e->expr))
3831 SET_BIT (trapping_expr, e->bitmap_index);
3832 }
3833
3834 /* Compute ae_kill for each basic block using:
3835
3836 ~(TRANSP | COMP)
3837 */
3838
3839 FOR_EACH_BB (bb)
3840 {
3841 edge e;
3842 edge_iterator ei;
3843
3844 /* If the current block is the destination of an abnormal edge, we
3845 kill all trapping expressions because we won't be able to properly
3846 place the instruction on the edge. So make them neither
3847 anticipatable nor transparent. This is fairly conservative. */
3848 FOR_EACH_EDGE (e, ei, bb->preds)
3849 if (e->flags & EDGE_ABNORMAL)
3850 {
3851 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
3852 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
3853 break;
3854 }
3855
3856 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3857 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3858 }
3859
3860 edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc,
3861 ae_kill, &pre_insert_map, &pre_delete_map);
3862 sbitmap_vector_free (antloc);
3863 antloc = NULL;
3864 sbitmap_vector_free (ae_kill);
3865 ae_kill = NULL;
3866 sbitmap_free (trapping_expr);
3867 }
3868 \f
3869 /* PRE utilities */
3870
3871 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3872 block BB.
3873
3874 VISITED is a pointer to a working buffer for tracking which BB's have
3875 been visited. It is NULL for the top-level call.
3876
3877 We treat reaching expressions that go through blocks containing the same
3878 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3879 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3880 2 as not reaching. The intent is to improve the probability of finding
3881 only one reaching expression and to reduce register lifetimes by picking
3882 the closest such expression. */
3883
3884 static int
3885 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3886 {
3887 edge pred;
3888 edge_iterator ei;
3889
3890 FOR_EACH_EDGE (pred, ei, bb->preds)
3891 {
3892 basic_block pred_bb = pred->src;
3893
3894 if (pred->src == ENTRY_BLOCK_PTR
3895 /* Has predecessor has already been visited? */
3896 || visited[pred_bb->index])
3897 ;/* Nothing to do. */
3898
3899 /* Does this predecessor generate this expression? */
3900 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3901 {
3902 /* Is this the occurrence we're looking for?
3903 Note that there's only one generating occurrence per block
3904 so we just need to check the block number. */
3905 if (occr_bb == pred_bb)
3906 return 1;
3907
3908 visited[pred_bb->index] = 1;
3909 }
3910 /* Ignore this predecessor if it kills the expression. */
3911 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3912 visited[pred_bb->index] = 1;
3913
3914 /* Neither gen nor kill. */
3915 else
3916 {
3917 visited[pred_bb->index] = 1;
3918 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3919 return 1;
3920 }
3921 }
3922
3923 /* All paths have been checked. */
3924 return 0;
3925 }
3926
3927 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3928 memory allocated for that function is returned. */
3929
3930 static int
3931 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3932 {
3933 int rval;
3934 char *visited = xcalloc (last_basic_block, 1);
3935
3936 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3937
3938 free (visited);
3939 return rval;
3940 }
3941 \f
3942
3943 /* Given an expr, generate RTL which we can insert at the end of a BB,
3944 or on an edge. Set the block number of any insns generated to
3945 the value of BB. */
3946
3947 static rtx
3948 process_insert_insn (struct expr *expr)
3949 {
3950 rtx reg = expr->reaching_reg;
3951 rtx exp = copy_rtx (expr->expr);
3952 rtx pat;
3953
3954 start_sequence ();
3955
3956 /* If the expression is something that's an operand, like a constant,
3957 just copy it to a register. */
3958 if (general_operand (exp, GET_MODE (reg)))
3959 emit_move_insn (reg, exp);
3960
3961 /* Otherwise, make a new insn to compute this expression and make sure the
3962 insn will be recognized (this also adds any needed CLOBBERs). Copy the
3963 expression to make sure we don't have any sharing issues. */
3964 else
3965 {
3966 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3967
3968 if (insn_invalid_p (insn))
3969 gcc_unreachable ();
3970 }
3971
3972
3973 pat = get_insns ();
3974 end_sequence ();
3975
3976 return pat;
3977 }
3978
3979 /* Add EXPR to the end of basic block BB.
3980
3981 This is used by both the PRE and code hoisting.
3982
3983 For PRE, we want to verify that the expr is either transparent
3984 or locally anticipatable in the target block. This check makes
3985 no sense for code hoisting. */
3986
3987 static void
3988 insert_insn_end_bb (struct expr *expr, basic_block bb, int pre)
3989 {
3990 rtx insn = BB_END (bb);
3991 rtx new_insn;
3992 rtx reg = expr->reaching_reg;
3993 int regno = REGNO (reg);
3994 rtx pat, pat_end;
3995
3996 pat = process_insert_insn (expr);
3997 gcc_assert (pat && INSN_P (pat));
3998
3999 pat_end = pat;
4000 while (NEXT_INSN (pat_end) != NULL_RTX)
4001 pat_end = NEXT_INSN (pat_end);
4002
4003 /* If the last insn is a jump, insert EXPR in front [taking care to
4004 handle cc0, etc. properly]. Similarly we need to care trapping
4005 instructions in presence of non-call exceptions. */
4006
4007 if (JUMP_P (insn)
4008 || (NONJUMP_INSN_P (insn)
4009 && (!single_succ_p (bb)
4010 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
4011 {
4012 #ifdef HAVE_cc0
4013 rtx note;
4014 #endif
4015 /* It should always be the case that we can put these instructions
4016 anywhere in the basic block with performing PRE optimizations.
4017 Check this. */
4018 gcc_assert (!NONJUMP_INSN_P (insn) || !pre
4019 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
4020 || TEST_BIT (transp[bb->index], expr->bitmap_index));
4021
4022 /* If this is a jump table, then we can't insert stuff here. Since
4023 we know the previous real insn must be the tablejump, we insert
4024 the new instruction just before the tablejump. */
4025 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4026 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4027 insn = prev_real_insn (insn);
4028
4029 #ifdef HAVE_cc0
4030 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4031 if cc0 isn't set. */
4032 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4033 if (note)
4034 insn = XEXP (note, 0);
4035 else
4036 {
4037 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4038 if (maybe_cc0_setter
4039 && INSN_P (maybe_cc0_setter)
4040 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4041 insn = maybe_cc0_setter;
4042 }
4043 #endif
4044 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4045 new_insn = emit_insn_before_noloc (pat, insn);
4046 }
4047
4048 /* Likewise if the last insn is a call, as will happen in the presence
4049 of exception handling. */
4050 else if (CALL_P (insn)
4051 && (!single_succ_p (bb)
4052 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
4053 {
4054 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4055 we search backward and place the instructions before the first
4056 parameter is loaded. Do this for everyone for consistency and a
4057 presumption that we'll get better code elsewhere as well.
4058
4059 It should always be the case that we can put these instructions
4060 anywhere in the basic block with performing PRE optimizations.
4061 Check this. */
4062
4063 gcc_assert (!pre
4064 || TEST_BIT (antloc[bb->index], expr->bitmap_index)
4065 || TEST_BIT (transp[bb->index], expr->bitmap_index));
4066
4067 /* Since different machines initialize their parameter registers
4068 in different orders, assume nothing. Collect the set of all
4069 parameter registers. */
4070 insn = find_first_parameter_load (insn, BB_HEAD (bb));
4071
4072 /* If we found all the parameter loads, then we want to insert
4073 before the first parameter load.
4074
4075 If we did not find all the parameter loads, then we might have
4076 stopped on the head of the block, which could be a CODE_LABEL.
4077 If we inserted before the CODE_LABEL, then we would be putting
4078 the insn in the wrong basic block. In that case, put the insn
4079 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4080 while (LABEL_P (insn)
4081 || NOTE_INSN_BASIC_BLOCK_P (insn))
4082 insn = NEXT_INSN (insn);
4083
4084 new_insn = emit_insn_before_noloc (pat, insn);
4085 }
4086 else
4087 new_insn = emit_insn_after_noloc (pat, insn);
4088
4089 while (1)
4090 {
4091 if (INSN_P (pat))
4092 {
4093 add_label_notes (PATTERN (pat), new_insn);
4094 note_stores (PATTERN (pat), record_set_info, pat);
4095 }
4096 if (pat == pat_end)
4097 break;
4098 pat = NEXT_INSN (pat);
4099 }
4100
4101 gcse_create_count++;
4102
4103 if (gcse_file)
4104 {
4105 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4106 bb->index, INSN_UID (new_insn));
4107 fprintf (gcse_file, "copying expression %d to reg %d\n",
4108 expr->bitmap_index, regno);
4109 }
4110 }
4111
4112 /* Insert partially redundant expressions on edges in the CFG to make
4113 the expressions fully redundant. */
4114
4115 static int
4116 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
4117 {
4118 int e, i, j, num_edges, set_size, did_insert = 0;
4119 sbitmap *inserted;
4120
4121 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4122 if it reaches any of the deleted expressions. */
4123
4124 set_size = pre_insert_map[0]->size;
4125 num_edges = NUM_EDGES (edge_list);
4126 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
4127 sbitmap_vector_zero (inserted, num_edges);
4128
4129 for (e = 0; e < num_edges; e++)
4130 {
4131 int indx;
4132 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4133
4134 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4135 {
4136 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4137
4138 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
4139 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4140 {
4141 struct expr *expr = index_map[j];
4142 struct occr *occr;
4143
4144 /* Now look at each deleted occurrence of this expression. */
4145 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4146 {
4147 if (! occr->deleted_p)
4148 continue;
4149
4150 /* Insert this expression on this edge if it would
4151 reach the deleted occurrence in BB. */
4152 if (!TEST_BIT (inserted[e], j))
4153 {
4154 rtx insn;
4155 edge eg = INDEX_EDGE (edge_list, e);
4156
4157 /* We can't insert anything on an abnormal and
4158 critical edge, so we insert the insn at the end of
4159 the previous block. There are several alternatives
4160 detailed in Morgans book P277 (sec 10.5) for
4161 handling this situation. This one is easiest for
4162 now. */
4163
4164 if (eg->flags & EDGE_ABNORMAL)
4165 insert_insn_end_bb (index_map[j], bb, 0);
4166 else
4167 {
4168 insn = process_insert_insn (index_map[j]);
4169 insert_insn_on_edge (insn, eg);
4170 }
4171
4172 if (gcse_file)
4173 {
4174 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
4175 bb->index,
4176 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4177 fprintf (gcse_file, "copy expression %d\n",
4178 expr->bitmap_index);
4179 }
4180
4181 update_ld_motion_stores (expr);
4182 SET_BIT (inserted[e], j);
4183 did_insert = 1;
4184 gcse_create_count++;
4185 }
4186 }
4187 }
4188 }
4189 }
4190
4191 sbitmap_vector_free (inserted);
4192 return did_insert;
4193 }
4194
4195 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
4196 Given "old_reg <- expr" (INSN), instead of adding after it
4197 reaching_reg <- old_reg
4198 it's better to do the following:
4199 reaching_reg <- expr
4200 old_reg <- reaching_reg
4201 because this way copy propagation can discover additional PRE
4202 opportunities. But if this fails, we try the old way.
4203 When "expr" is a store, i.e.
4204 given "MEM <- old_reg", instead of adding after it
4205 reaching_reg <- old_reg
4206 it's better to add it before as follows:
4207 reaching_reg <- old_reg
4208 MEM <- reaching_reg. */
4209
4210 static void
4211 pre_insert_copy_insn (struct expr *expr, rtx insn)
4212 {
4213 rtx reg = expr->reaching_reg;
4214 int regno = REGNO (reg);
4215 int indx = expr->bitmap_index;
4216 rtx pat = PATTERN (insn);
4217 rtx set, new_insn;
4218 rtx old_reg;
4219 int i;
4220
4221 /* This block matches the logic in hash_scan_insn. */
4222 switch (GET_CODE (pat))
4223 {
4224 case SET:
4225 set = pat;
4226 break;
4227
4228 case PARALLEL:
4229 /* Search through the parallel looking for the set whose
4230 source was the expression that we're interested in. */
4231 set = NULL_RTX;
4232 for (i = 0; i < XVECLEN (pat, 0); i++)
4233 {
4234 rtx x = XVECEXP (pat, 0, i);
4235 if (GET_CODE (x) == SET
4236 && expr_equiv_p (SET_SRC (x), expr->expr))
4237 {
4238 set = x;
4239 break;
4240 }
4241 }
4242 break;
4243
4244 default:
4245 gcc_unreachable ();
4246 }
4247
4248 if (REG_P (SET_DEST (set)))
4249 {
4250 old_reg = SET_DEST (set);
4251 /* Check if we can modify the set destination in the original insn. */
4252 if (validate_change (insn, &SET_DEST (set), reg, 0))
4253 {
4254 new_insn = gen_move_insn (old_reg, reg);
4255 new_insn = emit_insn_after (new_insn, insn);
4256
4257 /* Keep register set table up to date. */
4258 record_one_set (regno, insn);
4259 }
4260 else
4261 {
4262 new_insn = gen_move_insn (reg, old_reg);
4263 new_insn = emit_insn_after (new_insn, insn);
4264
4265 /* Keep register set table up to date. */
4266 record_one_set (regno, new_insn);
4267 }
4268 }
4269 else /* This is possible only in case of a store to memory. */
4270 {
4271 old_reg = SET_SRC (set);
4272 new_insn = gen_move_insn (reg, old_reg);
4273
4274 /* Check if we can modify the set source in the original insn. */
4275 if (validate_change (insn, &SET_SRC (set), reg, 0))
4276 new_insn = emit_insn_before (new_insn, insn);
4277 else
4278 new_insn = emit_insn_after (new_insn, insn);
4279
4280 /* Keep register set table up to date. */
4281 record_one_set (regno, new_insn);
4282 }
4283
4284 gcse_create_count++;
4285
4286 if (gcse_file)
4287 fprintf (gcse_file,
4288 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4289 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4290 INSN_UID (insn), regno);
4291 }
4292
4293 /* Copy available expressions that reach the redundant expression
4294 to `reaching_reg'. */
4295
4296 static void
4297 pre_insert_copies (void)
4298 {
4299 unsigned int i, added_copy;
4300 struct expr *expr;
4301 struct occr *occr;
4302 struct occr *avail;
4303
4304 /* For each available expression in the table, copy the result to
4305 `reaching_reg' if the expression reaches a deleted one.
4306
4307 ??? The current algorithm is rather brute force.
4308 Need to do some profiling. */
4309
4310 for (i = 0; i < expr_hash_table.size; i++)
4311 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4312 {
4313 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4314 we don't want to insert a copy here because the expression may not
4315 really be redundant. So only insert an insn if the expression was
4316 deleted. This test also avoids further processing if the
4317 expression wasn't deleted anywhere. */
4318 if (expr->reaching_reg == NULL)
4319 continue;
4320
4321 /* Set when we add a copy for that expression. */
4322 added_copy = 0;
4323
4324 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4325 {
4326 if (! occr->deleted_p)
4327 continue;
4328
4329 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4330 {
4331 rtx insn = avail->insn;
4332
4333 /* No need to handle this one if handled already. */
4334 if (avail->copied_p)
4335 continue;
4336
4337 /* Don't handle this one if it's a redundant one. */
4338 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4339 continue;
4340
4341 /* Or if the expression doesn't reach the deleted one. */
4342 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4343 expr,
4344 BLOCK_FOR_INSN (occr->insn)))
4345 continue;
4346
4347 added_copy = 1;
4348
4349 /* Copy the result of avail to reaching_reg. */
4350 pre_insert_copy_insn (expr, insn);
4351 avail->copied_p = 1;
4352 }
4353 }
4354
4355 if (added_copy)
4356 update_ld_motion_stores (expr);
4357 }
4358 }
4359
4360 /* Emit move from SRC to DEST noting the equivalence with expression computed
4361 in INSN. */
4362 static rtx
4363 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
4364 {
4365 rtx new;
4366 rtx set = single_set (insn), set2;
4367 rtx note;
4368 rtx eqv;
4369
4370 /* This should never fail since we're creating a reg->reg copy
4371 we've verified to be valid. */
4372
4373 new = emit_insn_after (gen_move_insn (dest, src), insn);
4374
4375 /* Note the equivalence for local CSE pass. */
4376 set2 = single_set (new);
4377 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
4378 return new;
4379 if ((note = find_reg_equal_equiv_note (insn)))
4380 eqv = XEXP (note, 0);
4381 else
4382 eqv = SET_SRC (set);
4383
4384 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
4385
4386 return new;
4387 }
4388
4389 /* Delete redundant computations.
4390 Deletion is done by changing the insn to copy the `reaching_reg' of
4391 the expression into the result of the SET. It is left to later passes
4392 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4393
4394 Returns nonzero if a change is made. */
4395
4396 static int
4397 pre_delete (void)
4398 {
4399 unsigned int i;
4400 int changed;
4401 struct expr *expr;
4402 struct occr *occr;
4403
4404 changed = 0;
4405 for (i = 0; i < expr_hash_table.size; i++)
4406 for (expr = expr_hash_table.table[i];
4407 expr != NULL;
4408 expr = expr->next_same_hash)
4409 {
4410 int indx = expr->bitmap_index;
4411
4412 /* We only need to search antic_occr since we require
4413 ANTLOC != 0. */
4414
4415 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4416 {
4417 rtx insn = occr->insn;
4418 rtx set;
4419 basic_block bb = BLOCK_FOR_INSN (insn);
4420
4421 /* We only delete insns that have a single_set. */
4422 if (TEST_BIT (pre_delete_map[bb->index], indx)
4423 && (set = single_set (insn)) != 0)
4424 {
4425 /* Create a pseudo-reg to store the result of reaching
4426 expressions into. Get the mode for the new pseudo from
4427 the mode of the original destination pseudo. */
4428 if (expr->reaching_reg == NULL)
4429 expr->reaching_reg
4430 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4431
4432 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4433 delete_insn (insn);
4434 occr->deleted_p = 1;
4435 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4436 changed = 1;
4437 gcse_subst_count++;
4438
4439 if (gcse_file)
4440 {
4441 fprintf (gcse_file,
4442 "PRE: redundant insn %d (expression %d) in ",
4443 INSN_UID (insn), indx);
4444 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
4445 bb->index, REGNO (expr->reaching_reg));
4446 }
4447 }
4448 }
4449 }
4450
4451 return changed;
4452 }
4453
4454 /* Perform GCSE optimizations using PRE.
4455 This is called by one_pre_gcse_pass after all the dataflow analysis
4456 has been done.
4457
4458 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4459 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4460 Compiler Design and Implementation.
4461
4462 ??? A new pseudo reg is created to hold the reaching expression. The nice
4463 thing about the classical approach is that it would try to use an existing
4464 reg. If the register can't be adequately optimized [i.e. we introduce
4465 reload problems], one could add a pass here to propagate the new register
4466 through the block.
4467
4468 ??? We don't handle single sets in PARALLELs because we're [currently] not
4469 able to copy the rest of the parallel when we insert copies to create full
4470 redundancies from partial redundancies. However, there's no reason why we
4471 can't handle PARALLELs in the cases where there are no partial
4472 redundancies. */
4473
4474 static int
4475 pre_gcse (void)
4476 {
4477 unsigned int i;
4478 int did_insert, changed;
4479 struct expr **index_map;
4480 struct expr *expr;
4481
4482 /* Compute a mapping from expression number (`bitmap_index') to
4483 hash table entry. */
4484
4485 index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
4486 for (i = 0; i < expr_hash_table.size; i++)
4487 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4488 index_map[expr->bitmap_index] = expr;
4489
4490 /* Reset bitmap used to track which insns are redundant. */
4491 pre_redundant_insns = sbitmap_alloc (max_cuid);
4492 sbitmap_zero (pre_redundant_insns);
4493
4494 /* Delete the redundant insns first so that
4495 - we know what register to use for the new insns and for the other
4496 ones with reaching expressions
4497 - we know which insns are redundant when we go to create copies */
4498
4499 changed = pre_delete ();
4500
4501 did_insert = pre_edge_insert (edge_list, index_map);
4502
4503 /* In other places with reaching expressions, copy the expression to the
4504 specially allocated pseudo-reg that reaches the redundant expr. */
4505 pre_insert_copies ();
4506 if (did_insert)
4507 {
4508 commit_edge_insertions ();
4509 changed = 1;
4510 }
4511
4512 free (index_map);
4513 sbitmap_free (pre_redundant_insns);
4514 return changed;
4515 }
4516
4517 /* Top level routine to perform one PRE GCSE pass.
4518
4519 Return nonzero if a change was made. */
4520
4521 static int
4522 one_pre_gcse_pass (int pass)
4523 {
4524 int changed = 0;
4525
4526 gcse_subst_count = 0;
4527 gcse_create_count = 0;
4528
4529 alloc_hash_table (max_cuid, &expr_hash_table, 0);
4530 add_noreturn_fake_exit_edges ();
4531 if (flag_gcse_lm)
4532 compute_ld_motion_mems ();
4533
4534 compute_hash_table (&expr_hash_table);
4535 trim_ld_motion_mems ();
4536 if (gcse_file)
4537 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
4538
4539 if (expr_hash_table.n_elems > 0)
4540 {
4541 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
4542 compute_pre_data ();
4543 changed |= pre_gcse ();
4544 free_edge_list (edge_list);
4545 free_pre_mem ();
4546 }
4547
4548 free_ldst_mems ();
4549 remove_fake_exit_edges ();
4550 free_hash_table (&expr_hash_table);
4551
4552 if (gcse_file)
4553 {
4554 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
4555 current_function_name (), pass, bytes_used);
4556 fprintf (gcse_file, "%d substs, %d insns created\n",
4557 gcse_subst_count, gcse_create_count);
4558 }
4559
4560 return changed;
4561 }
4562 \f
4563 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
4564 If notes are added to an insn which references a CODE_LABEL, the
4565 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
4566 because the following loop optimization pass requires them. */
4567
4568 /* ??? This is very similar to the loop.c add_label_notes function. We
4569 could probably share code here. */
4570
4571 /* ??? If there was a jump optimization pass after gcse and before loop,
4572 then we would not need to do this here, because jump would add the
4573 necessary REG_LABEL notes. */
4574
4575 static void
4576 add_label_notes (rtx x, rtx insn)
4577 {
4578 enum rtx_code code = GET_CODE (x);
4579 int i, j;
4580 const char *fmt;
4581
4582 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4583 {
4584 /* This code used to ignore labels that referred to dispatch tables to
4585 avoid flow generating (slightly) worse code.
4586
4587 We no longer ignore such label references (see LABEL_REF handling in
4588 mark_jump_label for additional information). */
4589
4590 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
4591 REG_NOTES (insn));
4592 if (LABEL_P (XEXP (x, 0)))
4593 LABEL_NUSES (XEXP (x, 0))++;
4594 return;
4595 }
4596
4597 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4598 {
4599 if (fmt[i] == 'e')
4600 add_label_notes (XEXP (x, i), insn);
4601 else if (fmt[i] == 'E')
4602 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4603 add_label_notes (XVECEXP (x, i, j), insn);
4604 }
4605 }
4606
4607 /* Compute transparent outgoing information for each block.
4608
4609 An expression is transparent to an edge unless it is killed by
4610 the edge itself. This can only happen with abnormal control flow,
4611 when the edge is traversed through a call. This happens with
4612 non-local labels and exceptions.
4613
4614 This would not be necessary if we split the edge. While this is
4615 normally impossible for abnormal critical edges, with some effort
4616 it should be possible with exception handling, since we still have
4617 control over which handler should be invoked. But due to increased
4618 EH table sizes, this may not be worthwhile. */
4619
4620 static void
4621 compute_transpout (void)
4622 {
4623 basic_block bb;
4624 unsigned int i;
4625 struct expr *expr;
4626
4627 sbitmap_vector_ones (transpout, last_basic_block);
4628
4629 FOR_EACH_BB (bb)
4630 {
4631 /* Note that flow inserted a nop a the end of basic blocks that
4632 end in call instructions for reasons other than abnormal
4633 control flow. */
4634 if (! CALL_P (BB_END (bb)))
4635 continue;
4636
4637 for (i = 0; i < expr_hash_table.size; i++)
4638 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
4639 if (MEM_P (expr->expr))
4640 {
4641 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4642 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4643 continue;
4644
4645 /* ??? Optimally, we would use interprocedural alias
4646 analysis to determine if this mem is actually killed
4647 by this call. */
4648 RESET_BIT (transpout[bb->index], expr->bitmap_index);
4649 }
4650 }
4651 }
4652
4653 /* Code Hoisting variables and subroutines. */
4654
4655 /* Very busy expressions. */
4656 static sbitmap *hoist_vbein;
4657 static sbitmap *hoist_vbeout;
4658
4659 /* Hoistable expressions. */
4660 static sbitmap *hoist_exprs;
4661
4662 /* ??? We could compute post dominators and run this algorithm in
4663 reverse to perform tail merging, doing so would probably be
4664 more effective than the tail merging code in jump.c.
4665
4666 It's unclear if tail merging could be run in parallel with
4667 code hoisting. It would be nice. */
4668
4669 /* Allocate vars used for code hoisting analysis. */
4670
4671 static void
4672 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4673 {
4674 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4675 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4676 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4677
4678 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4679 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4680 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
4681 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4682 }
4683
4684 /* Free vars used for code hoisting analysis. */
4685
4686 static void
4687 free_code_hoist_mem (void)
4688 {
4689 sbitmap_vector_free (antloc);
4690 sbitmap_vector_free (transp);
4691 sbitmap_vector_free (comp);
4692
4693 sbitmap_vector_free (hoist_vbein);
4694 sbitmap_vector_free (hoist_vbeout);
4695 sbitmap_vector_free (hoist_exprs);
4696 sbitmap_vector_free (transpout);
4697
4698 free_dominance_info (CDI_DOMINATORS);
4699 }
4700
4701 /* Compute the very busy expressions at entry/exit from each block.
4702
4703 An expression is very busy if all paths from a given point
4704 compute the expression. */
4705
4706 static void
4707 compute_code_hoist_vbeinout (void)
4708 {
4709 int changed, passes;
4710 basic_block bb;
4711
4712 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4713 sbitmap_vector_zero (hoist_vbein, last_basic_block);
4714
4715 passes = 0;
4716 changed = 1;
4717
4718 while (changed)
4719 {
4720 changed = 0;
4721
4722 /* We scan the blocks in the reverse order to speed up
4723 the convergence. */
4724 FOR_EACH_BB_REVERSE (bb)
4725 {
4726 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
4727 hoist_vbeout[bb->index], transp[bb->index]);
4728 if (bb->next_bb != EXIT_BLOCK_PTR)
4729 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
4730 }
4731
4732 passes++;
4733 }
4734
4735 if (gcse_file)
4736 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
4737 }
4738
4739 /* Top level routine to do the dataflow analysis needed by code hoisting. */
4740
4741 static void
4742 compute_code_hoist_data (void)
4743 {
4744 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4745 compute_transpout ();
4746 compute_code_hoist_vbeinout ();
4747 calculate_dominance_info (CDI_DOMINATORS);
4748 if (gcse_file)
4749 fprintf (gcse_file, "\n");
4750 }
4751
4752 /* Determine if the expression identified by EXPR_INDEX would
4753 reach BB unimpared if it was placed at the end of EXPR_BB.
4754
4755 It's unclear exactly what Muchnick meant by "unimpared". It seems
4756 to me that the expression must either be computed or transparent in
4757 *every* block in the path(s) from EXPR_BB to BB. Any other definition
4758 would allow the expression to be hoisted out of loops, even if
4759 the expression wasn't a loop invariant.
4760
4761 Contrast this to reachability for PRE where an expression is
4762 considered reachable if *any* path reaches instead of *all*
4763 paths. */
4764
4765 static int
4766 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
4767 {
4768 edge pred;
4769 edge_iterator ei;
4770 int visited_allocated_locally = 0;
4771
4772
4773 if (visited == NULL)
4774 {
4775 visited_allocated_locally = 1;
4776 visited = xcalloc (last_basic_block, 1);
4777 }
4778
4779 FOR_EACH_EDGE (pred, ei, bb->preds)
4780 {
4781 basic_block pred_bb = pred->src;
4782
4783 if (pred->src == ENTRY_BLOCK_PTR)
4784 break;
4785 else if (pred_bb == expr_bb)
4786 continue;
4787 else if (visited[pred_bb->index])
4788 continue;
4789
4790 /* Does this predecessor generate this expression? */
4791 else if (TEST_BIT (comp[pred_bb->index], expr_index))
4792 break;
4793 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4794 break;
4795
4796 /* Not killed. */
4797 else
4798 {
4799 visited[pred_bb->index] = 1;
4800 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
4801 pred_bb, visited))
4802 break;
4803 }
4804 }
4805 if (visited_allocated_locally)
4806 free (visited);
4807
4808 return (pred == NULL);
4809 }
4810 \f
4811 /* Actually perform code hoisting. */
4812
4813 static void
4814 hoist_code (void)
4815 {
4816 basic_block bb, dominated;
4817 basic_block *domby;
4818 unsigned int domby_len;
4819 unsigned int i,j;
4820 struct expr **index_map;
4821 struct expr *expr;
4822
4823 sbitmap_vector_zero (hoist_exprs, last_basic_block);
4824
4825 /* Compute a mapping from expression number (`bitmap_index') to
4826 hash table entry. */
4827
4828 index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
4829 for (i = 0; i < expr_hash_table.size; i++)
4830 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4831 index_map[expr->bitmap_index] = expr;
4832
4833 /* Walk over each basic block looking for potentially hoistable
4834 expressions, nothing gets hoisted from the entry block. */
4835 FOR_EACH_BB (bb)
4836 {
4837 int found = 0;
4838 int insn_inserted_p;
4839
4840 domby_len = get_dominated_by (CDI_DOMINATORS, bb, &domby);
4841 /* Examine each expression that is very busy at the exit of this
4842 block. These are the potentially hoistable expressions. */
4843 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4844 {
4845 int hoistable = 0;
4846
4847 if (TEST_BIT (hoist_vbeout[bb->index], i)
4848 && TEST_BIT (transpout[bb->index], i))
4849 {
4850 /* We've found a potentially hoistable expression, now
4851 we look at every block BB dominates to see if it
4852 computes the expression. */
4853 for (j = 0; j < domby_len; j++)
4854 {
4855 dominated = domby[j];
4856 /* Ignore self dominance. */
4857 if (bb == dominated)
4858 continue;
4859 /* We've found a dominated block, now see if it computes
4860 the busy expression and whether or not moving that
4861 expression to the "beginning" of that block is safe. */
4862 if (!TEST_BIT (antloc[dominated->index], i))
4863 continue;
4864
4865 /* Note if the expression would reach the dominated block
4866 unimpared if it was placed at the end of BB.
4867
4868 Keep track of how many times this expression is hoistable
4869 from a dominated block into BB. */
4870 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4871 hoistable++;
4872 }
4873
4874 /* If we found more than one hoistable occurrence of this
4875 expression, then note it in the bitmap of expressions to
4876 hoist. It makes no sense to hoist things which are computed
4877 in only one BB, and doing so tends to pessimize register
4878 allocation. One could increase this value to try harder
4879 to avoid any possible code expansion due to register
4880 allocation issues; however experiments have shown that
4881 the vast majority of hoistable expressions are only movable
4882 from two successors, so raising this threshold is likely
4883 to nullify any benefit we get from code hoisting. */
4884 if (hoistable > 1)
4885 {
4886 SET_BIT (hoist_exprs[bb->index], i);
4887 found = 1;
4888 }
4889 }
4890 }
4891 /* If we found nothing to hoist, then quit now. */
4892 if (! found)
4893 {
4894 free (domby);
4895 continue;
4896 }
4897
4898 /* Loop over all the hoistable expressions. */
4899 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
4900 {
4901 /* We want to insert the expression into BB only once, so
4902 note when we've inserted it. */
4903 insn_inserted_p = 0;
4904
4905 /* These tests should be the same as the tests above. */
4906 if (TEST_BIT (hoist_exprs[bb->index], i))
4907 {
4908 /* We've found a potentially hoistable expression, now
4909 we look at every block BB dominates to see if it
4910 computes the expression. */
4911 for (j = 0; j < domby_len; j++)
4912 {
4913 dominated = domby[j];
4914 /* Ignore self dominance. */
4915 if (bb == dominated)
4916 continue;
4917
4918 /* We've found a dominated block, now see if it computes
4919 the busy expression and whether or not moving that
4920 expression to the "beginning" of that block is safe. */
4921 if (!TEST_BIT (antloc[dominated->index], i))
4922 continue;
4923
4924 /* The expression is computed in the dominated block and
4925 it would be safe to compute it at the start of the
4926 dominated block. Now we have to determine if the
4927 expression would reach the dominated block if it was
4928 placed at the end of BB. */
4929 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4930 {
4931 struct expr *expr = index_map[i];
4932 struct occr *occr = expr->antic_occr;
4933 rtx insn;
4934 rtx set;
4935
4936 /* Find the right occurrence of this expression. */
4937 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
4938 occr = occr->next;
4939
4940 gcc_assert (occr);
4941 insn = occr->insn;
4942 set = single_set (insn);
4943 gcc_assert (set);
4944
4945 /* Create a pseudo-reg to store the result of reaching
4946 expressions into. Get the mode for the new pseudo
4947 from the mode of the original destination pseudo. */
4948 if (expr->reaching_reg == NULL)
4949 expr->reaching_reg
4950 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4951
4952 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4953 delete_insn (insn);
4954 occr->deleted_p = 1;
4955 if (!insn_inserted_p)
4956 {
4957 insert_insn_end_bb (index_map[i], bb, 0);
4958 insn_inserted_p = 1;
4959 }
4960 }
4961 }
4962 }
4963 }
4964 free (domby);
4965 }
4966
4967 free (index_map);
4968 }
4969
4970 /* Top level routine to perform one code hoisting (aka unification) pass
4971
4972 Return nonzero if a change was made. */
4973
4974 static int
4975 one_code_hoisting_pass (void)
4976 {
4977 int changed = 0;
4978
4979 alloc_hash_table (max_cuid, &expr_hash_table, 0);
4980 compute_hash_table (&expr_hash_table);
4981 if (gcse_file)
4982 dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table);
4983
4984 if (expr_hash_table.n_elems > 0)
4985 {
4986 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4987 compute_code_hoist_data ();
4988 hoist_code ();
4989 free_code_hoist_mem ();
4990 }
4991
4992 free_hash_table (&expr_hash_table);
4993
4994 return changed;
4995 }
4996 \f
4997 /* Here we provide the things required to do store motion towards
4998 the exit. In order for this to be effective, gcse also needed to
4999 be taught how to move a load when it is kill only by a store to itself.
5000
5001 int i;
5002 float a[10];
5003
5004 void foo(float scale)
5005 {
5006 for (i=0; i<10; i++)
5007 a[i] *= scale;
5008 }
5009
5010 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5011 the load out since its live around the loop, and stored at the bottom
5012 of the loop.
5013
5014 The 'Load Motion' referred to and implemented in this file is
5015 an enhancement to gcse which when using edge based lcm, recognizes
5016 this situation and allows gcse to move the load out of the loop.
5017
5018 Once gcse has hoisted the load, store motion can then push this
5019 load towards the exit, and we end up with no loads or stores of 'i'
5020 in the loop. */
5021
5022 static hashval_t
5023 pre_ldst_expr_hash (const void *p)
5024 {
5025 int do_not_record_p = 0;
5026 const struct ls_expr *x = p;
5027 return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
5028 }
5029
5030 static int
5031 pre_ldst_expr_eq (const void *p1, const void *p2)
5032 {
5033 const struct ls_expr *ptr1 = p1, *ptr2 = p2;
5034 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
5035 }
5036
5037 /* This will search the ldst list for a matching expression. If it
5038 doesn't find one, we create one and initialize it. */
5039
5040 static struct ls_expr *
5041 ldst_entry (rtx x)
5042 {
5043 int do_not_record_p = 0;
5044 struct ls_expr * ptr;
5045 unsigned int hash;
5046 void **slot;
5047 struct ls_expr e;
5048
5049 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
5050 NULL, /*have_reg_qty=*/false);
5051
5052 e.pattern = x;
5053 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
5054 if (*slot)
5055 return (struct ls_expr *)*slot;
5056
5057 ptr = xmalloc (sizeof (struct ls_expr));
5058
5059 ptr->next = pre_ldst_mems;
5060 ptr->expr = NULL;
5061 ptr->pattern = x;
5062 ptr->pattern_regs = NULL_RTX;
5063 ptr->loads = NULL_RTX;
5064 ptr->stores = NULL_RTX;
5065 ptr->reaching_reg = NULL_RTX;
5066 ptr->invalid = 0;
5067 ptr->index = 0;
5068 ptr->hash_index = hash;
5069 pre_ldst_mems = ptr;
5070 *slot = ptr;
5071
5072 return ptr;
5073 }
5074
5075 /* Free up an individual ldst entry. */
5076
5077 static void
5078 free_ldst_entry (struct ls_expr * ptr)
5079 {
5080 free_INSN_LIST_list (& ptr->loads);
5081 free_INSN_LIST_list (& ptr->stores);
5082
5083 free (ptr);
5084 }
5085
5086 /* Free up all memory associated with the ldst list. */
5087
5088 static void
5089 free_ldst_mems (void)
5090 {
5091 if (pre_ldst_table)
5092 htab_delete (pre_ldst_table);
5093 pre_ldst_table = NULL;
5094
5095 while (pre_ldst_mems)
5096 {
5097 struct ls_expr * tmp = pre_ldst_mems;
5098
5099 pre_ldst_mems = pre_ldst_mems->next;
5100
5101 free_ldst_entry (tmp);
5102 }
5103
5104 pre_ldst_mems = NULL;
5105 }
5106
5107 /* Dump debugging info about the ldst list. */
5108
5109 static void
5110 print_ldst_list (FILE * file)
5111 {
5112 struct ls_expr * ptr;
5113
5114 fprintf (file, "LDST list: \n");
5115
5116 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5117 {
5118 fprintf (file, " Pattern (%3d): ", ptr->index);
5119
5120 print_rtl (file, ptr->pattern);
5121
5122 fprintf (file, "\n Loads : ");
5123
5124 if (ptr->loads)
5125 print_rtl (file, ptr->loads);
5126 else
5127 fprintf (file, "(nil)");
5128
5129 fprintf (file, "\n Stores : ");
5130
5131 if (ptr->stores)
5132 print_rtl (file, ptr->stores);
5133 else
5134 fprintf (file, "(nil)");
5135
5136 fprintf (file, "\n\n");
5137 }
5138
5139 fprintf (file, "\n");
5140 }
5141
5142 /* Returns 1 if X is in the list of ldst only expressions. */
5143
5144 static struct ls_expr *
5145 find_rtx_in_ldst (rtx x)
5146 {
5147 struct ls_expr e;
5148 void **slot;
5149 if (!pre_ldst_table)
5150 return NULL;
5151 e.pattern = x;
5152 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
5153 if (!slot || ((struct ls_expr *)*slot)->invalid)
5154 return NULL;
5155 return *slot;
5156 }
5157
5158 /* Assign each element of the list of mems a monotonically increasing value. */
5159
5160 static int
5161 enumerate_ldsts (void)
5162 {
5163 struct ls_expr * ptr;
5164 int n = 0;
5165
5166 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5167 ptr->index = n++;
5168
5169 return n;
5170 }
5171
5172 /* Return first item in the list. */
5173
5174 static inline struct ls_expr *
5175 first_ls_expr (void)
5176 {
5177 return pre_ldst_mems;
5178 }
5179
5180 /* Return the next item in the list after the specified one. */
5181
5182 static inline struct ls_expr *
5183 next_ls_expr (struct ls_expr * ptr)
5184 {
5185 return ptr->next;
5186 }
5187 \f
5188 /* Load Motion for loads which only kill themselves. */
5189
5190 /* Return true if x is a simple MEM operation, with no registers or
5191 side effects. These are the types of loads we consider for the
5192 ld_motion list, otherwise we let the usual aliasing take care of it. */
5193
5194 static int
5195 simple_mem (rtx x)
5196 {
5197 if (! MEM_P (x))
5198 return 0;
5199
5200 if (MEM_VOLATILE_P (x))
5201 return 0;
5202
5203 if (GET_MODE (x) == BLKmode)
5204 return 0;
5205
5206 /* If we are handling exceptions, we must be careful with memory references
5207 that may trap. If we are not, the behavior is undefined, so we may just
5208 continue. */
5209 if (flag_non_call_exceptions && may_trap_p (x))
5210 return 0;
5211
5212 if (side_effects_p (x))
5213 return 0;
5214
5215 /* Do not consider function arguments passed on stack. */
5216 if (reg_mentioned_p (stack_pointer_rtx, x))
5217 return 0;
5218
5219 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
5220 return 0;
5221
5222 return 1;
5223 }
5224
5225 /* Make sure there isn't a buried reference in this pattern anywhere.
5226 If there is, invalidate the entry for it since we're not capable
5227 of fixing it up just yet.. We have to be sure we know about ALL
5228 loads since the aliasing code will allow all entries in the
5229 ld_motion list to not-alias itself. If we miss a load, we will get
5230 the wrong value since gcse might common it and we won't know to
5231 fix it up. */
5232
5233 static void
5234 invalidate_any_buried_refs (rtx x)
5235 {
5236 const char * fmt;
5237 int i, j;
5238 struct ls_expr * ptr;
5239
5240 /* Invalidate it in the list. */
5241 if (MEM_P (x) && simple_mem (x))
5242 {
5243 ptr = ldst_entry (x);
5244 ptr->invalid = 1;
5245 }
5246
5247 /* Recursively process the insn. */
5248 fmt = GET_RTX_FORMAT (GET_CODE (x));
5249
5250 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5251 {
5252 if (fmt[i] == 'e')
5253 invalidate_any_buried_refs (XEXP (x, i));
5254 else if (fmt[i] == 'E')
5255 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5256 invalidate_any_buried_refs (XVECEXP (x, i, j));
5257 }
5258 }
5259
5260 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
5261 being defined as MEM loads and stores to symbols, with no side effects
5262 and no registers in the expression. For a MEM destination, we also
5263 check that the insn is still valid if we replace the destination with a
5264 REG, as is done in update_ld_motion_stores. If there are any uses/defs
5265 which don't match this criteria, they are invalidated and trimmed out
5266 later. */
5267
5268 static void
5269 compute_ld_motion_mems (void)
5270 {
5271 struct ls_expr * ptr;
5272 basic_block bb;
5273 rtx insn;
5274
5275 pre_ldst_mems = NULL;
5276 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5277 pre_ldst_expr_eq, NULL);
5278
5279 FOR_EACH_BB (bb)
5280 {
5281 FOR_BB_INSNS (bb, insn)
5282 {
5283 if (INSN_P (insn))
5284 {
5285 if (GET_CODE (PATTERN (insn)) == SET)
5286 {
5287 rtx src = SET_SRC (PATTERN (insn));
5288 rtx dest = SET_DEST (PATTERN (insn));
5289
5290 /* Check for a simple LOAD... */
5291 if (MEM_P (src) && simple_mem (src))
5292 {
5293 ptr = ldst_entry (src);
5294 if (REG_P (dest))
5295 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
5296 else
5297 ptr->invalid = 1;
5298 }
5299 else
5300 {
5301 /* Make sure there isn't a buried load somewhere. */
5302 invalidate_any_buried_refs (src);
5303 }
5304
5305 /* Check for stores. Don't worry about aliased ones, they
5306 will block any movement we might do later. We only care
5307 about this exact pattern since those are the only
5308 circumstance that we will ignore the aliasing info. */
5309 if (MEM_P (dest) && simple_mem (dest))
5310 {
5311 ptr = ldst_entry (dest);
5312
5313 if (! MEM_P (src)
5314 && GET_CODE (src) != ASM_OPERANDS
5315 /* Check for REG manually since want_to_gcse_p
5316 returns 0 for all REGs. */
5317 && can_assign_to_reg_p (src))
5318 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
5319 else
5320 ptr->invalid = 1;
5321 }
5322 }
5323 else
5324 invalidate_any_buried_refs (PATTERN (insn));
5325 }
5326 }
5327 }
5328 }
5329
5330 /* Remove any references that have been either invalidated or are not in the
5331 expression list for pre gcse. */
5332
5333 static void
5334 trim_ld_motion_mems (void)
5335 {
5336 struct ls_expr * * last = & pre_ldst_mems;
5337 struct ls_expr * ptr = pre_ldst_mems;
5338
5339 while (ptr != NULL)
5340 {
5341 struct expr * expr;
5342
5343 /* Delete if entry has been made invalid. */
5344 if (! ptr->invalid)
5345 {
5346 /* Delete if we cannot find this mem in the expression list. */
5347 unsigned int hash = ptr->hash_index % expr_hash_table.size;
5348
5349 for (expr = expr_hash_table.table[hash];
5350 expr != NULL;
5351 expr = expr->next_same_hash)
5352 if (expr_equiv_p (expr->expr, ptr->pattern))
5353 break;
5354 }
5355 else
5356 expr = (struct expr *) 0;
5357
5358 if (expr)
5359 {
5360 /* Set the expression field if we are keeping it. */
5361 ptr->expr = expr;
5362 last = & ptr->next;
5363 ptr = ptr->next;
5364 }
5365 else
5366 {
5367 *last = ptr->next;
5368 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5369 free_ldst_entry (ptr);
5370 ptr = * last;
5371 }
5372 }
5373
5374 /* Show the world what we've found. */
5375 if (gcse_file && pre_ldst_mems != NULL)
5376 print_ldst_list (gcse_file);
5377 }
5378
5379 /* This routine will take an expression which we are replacing with
5380 a reaching register, and update any stores that are needed if
5381 that expression is in the ld_motion list. Stores are updated by
5382 copying their SRC to the reaching register, and then storing
5383 the reaching register into the store location. These keeps the
5384 correct value in the reaching register for the loads. */
5385
5386 static void
5387 update_ld_motion_stores (struct expr * expr)
5388 {
5389 struct ls_expr * mem_ptr;
5390
5391 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
5392 {
5393 /* We can try to find just the REACHED stores, but is shouldn't
5394 matter to set the reaching reg everywhere... some might be
5395 dead and should be eliminated later. */
5396
5397 /* We replace (set mem expr) with (set reg expr) (set mem reg)
5398 where reg is the reaching reg used in the load. We checked in
5399 compute_ld_motion_mems that we can replace (set mem expr) with
5400 (set reg expr) in that insn. */
5401 rtx list = mem_ptr->stores;
5402
5403 for ( ; list != NULL_RTX; list = XEXP (list, 1))
5404 {
5405 rtx insn = XEXP (list, 0);
5406 rtx pat = PATTERN (insn);
5407 rtx src = SET_SRC (pat);
5408 rtx reg = expr->reaching_reg;
5409 rtx copy, new;
5410
5411 /* If we've already copied it, continue. */
5412 if (expr->reaching_reg == src)
5413 continue;
5414
5415 if (gcse_file)
5416 {
5417 fprintf (gcse_file, "PRE: store updated with reaching reg ");
5418 print_rtl (gcse_file, expr->reaching_reg);
5419 fprintf (gcse_file, ":\n ");
5420 print_inline_rtx (gcse_file, insn, 8);
5421 fprintf (gcse_file, "\n");
5422 }
5423
5424 copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
5425 new = emit_insn_before (copy, insn);
5426 record_one_set (REGNO (reg), new);
5427 SET_SRC (pat) = reg;
5428
5429 /* un-recognize this pattern since it's probably different now. */
5430 INSN_CODE (insn) = -1;
5431 gcse_create_count++;
5432 }
5433 }
5434 }
5435 \f
5436 /* Store motion code. */
5437
5438 #define ANTIC_STORE_LIST(x) ((x)->loads)
5439 #define AVAIL_STORE_LIST(x) ((x)->stores)
5440 #define LAST_AVAIL_CHECK_FAILURE(x) ((x)->reaching_reg)
5441
5442 /* This is used to communicate the target bitvector we want to use in the
5443 reg_set_info routine when called via the note_stores mechanism. */
5444 static int * regvec;
5445
5446 /* And current insn, for the same routine. */
5447 static rtx compute_store_table_current_insn;
5448
5449 /* Used in computing the reverse edge graph bit vectors. */
5450 static sbitmap * st_antloc;
5451
5452 /* Global holding the number of store expressions we are dealing with. */
5453 static int num_stores;
5454
5455 /* Checks to set if we need to mark a register set. Called from
5456 note_stores. */
5457
5458 static void
5459 reg_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED,
5460 void *data)
5461 {
5462 sbitmap bb_reg = data;
5463
5464 if (GET_CODE (dest) == SUBREG)
5465 dest = SUBREG_REG (dest);
5466
5467 if (REG_P (dest))
5468 {
5469 regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn);
5470 if (bb_reg)
5471 SET_BIT (bb_reg, REGNO (dest));
5472 }
5473 }
5474
5475 /* Clear any mark that says that this insn sets dest. Called from
5476 note_stores. */
5477
5478 static void
5479 reg_clear_last_set (rtx dest, rtx setter ATTRIBUTE_UNUSED,
5480 void *data)
5481 {
5482 int *dead_vec = data;
5483
5484 if (GET_CODE (dest) == SUBREG)
5485 dest = SUBREG_REG (dest);
5486
5487 if (REG_P (dest) &&
5488 dead_vec[REGNO (dest)] == INSN_UID (compute_store_table_current_insn))
5489 dead_vec[REGNO (dest)] = 0;
5490 }
5491
5492 /* Return zero if some of the registers in list X are killed
5493 due to set of registers in bitmap REGS_SET. */
5494
5495 static bool
5496 store_ops_ok (rtx x, int *regs_set)
5497 {
5498 rtx reg;
5499
5500 for (; x; x = XEXP (x, 1))
5501 {
5502 reg = XEXP (x, 0);
5503 if (regs_set[REGNO(reg)])
5504 return false;
5505 }
5506
5507 return true;
5508 }
5509
5510 /* Returns a list of registers mentioned in X. */
5511 static rtx
5512 extract_mentioned_regs (rtx x)
5513 {
5514 return extract_mentioned_regs_helper (x, NULL_RTX);
5515 }
5516
5517 /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used
5518 registers. */
5519 static rtx
5520 extract_mentioned_regs_helper (rtx x, rtx accum)
5521 {
5522 int i;
5523 enum rtx_code code;
5524 const char * fmt;
5525
5526 /* Repeat is used to turn tail-recursion into iteration. */
5527 repeat:
5528
5529 if (x == 0)
5530 return accum;
5531
5532 code = GET_CODE (x);
5533 switch (code)
5534 {
5535 case REG:
5536 return alloc_EXPR_LIST (0, x, accum);
5537
5538 case MEM:
5539 x = XEXP (x, 0);
5540 goto repeat;
5541
5542 case PRE_DEC:
5543 case PRE_INC:
5544 case POST_DEC:
5545 case POST_INC:
5546 /* We do not run this function with arguments having side effects. */
5547 gcc_unreachable ();
5548
5549 case PC:
5550 case CC0: /*FIXME*/
5551 case CONST:
5552 case CONST_INT:
5553 case CONST_DOUBLE:
5554 case CONST_VECTOR:
5555 case SYMBOL_REF:
5556 case LABEL_REF:
5557 case ADDR_VEC:
5558 case ADDR_DIFF_VEC:
5559 return accum;
5560
5561 default:
5562 break;
5563 }
5564
5565 i = GET_RTX_LENGTH (code) - 1;
5566 fmt = GET_RTX_FORMAT (code);
5567
5568 for (; i >= 0; i--)
5569 {
5570 if (fmt[i] == 'e')
5571 {
5572 rtx tem = XEXP (x, i);
5573
5574 /* If we are about to do the last recursive call
5575 needed at this level, change it into iteration. */
5576 if (i == 0)
5577 {
5578 x = tem;
5579 goto repeat;
5580 }
5581
5582 accum = extract_mentioned_regs_helper (tem, accum);
5583 }
5584 else if (fmt[i] == 'E')
5585 {
5586 int j;
5587
5588 for (j = 0; j < XVECLEN (x, i); j++)
5589 accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum);
5590 }
5591 }
5592
5593 return accum;
5594 }
5595
5596 /* Determine whether INSN is MEM store pattern that we will consider moving.
5597 REGS_SET_BEFORE is bitmap of registers set before (and including) the
5598 current insn, REGS_SET_AFTER is bitmap of registers set after (and
5599 including) the insn in this basic block. We must be passing through BB from
5600 head to end, as we are using this fact to speed things up.
5601
5602 The results are stored this way:
5603
5604 -- the first anticipatable expression is added into ANTIC_STORE_LIST
5605 -- if the processed expression is not anticipatable, NULL_RTX is added
5606 there instead, so that we can use it as indicator that no further
5607 expression of this type may be anticipatable
5608 -- if the expression is available, it is added as head of AVAIL_STORE_LIST;
5609 consequently, all of them but this head are dead and may be deleted.
5610 -- if the expression is not available, the insn due to that it fails to be
5611 available is stored in reaching_reg.
5612
5613 The things are complicated a bit by fact that there already may be stores
5614 to the same MEM from other blocks; also caller must take care of the
5615 necessary cleanup of the temporary markers after end of the basic block.
5616 */
5617
5618 static void
5619 find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after)
5620 {
5621 struct ls_expr * ptr;
5622 rtx dest, set, tmp;
5623 int check_anticipatable, check_available;
5624 basic_block bb = BLOCK_FOR_INSN (insn);
5625
5626 set = single_set (insn);
5627 if (!set)
5628 return;
5629
5630 dest = SET_DEST (set);
5631
5632 if (! MEM_P (dest) || MEM_VOLATILE_P (dest)
5633 || GET_MODE (dest) == BLKmode)
5634 return;
5635
5636 if (side_effects_p (dest))
5637 return;
5638
5639 /* If we are handling exceptions, we must be careful with memory references
5640 that may trap. If we are not, the behavior is undefined, so we may just
5641 continue. */
5642 if (flag_non_call_exceptions && may_trap_p (dest))
5643 return;
5644
5645 /* Even if the destination cannot trap, the source may. In this case we'd
5646 need to handle updating the REG_EH_REGION note. */
5647 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
5648 return;
5649
5650 ptr = ldst_entry (dest);
5651 if (!ptr->pattern_regs)
5652 ptr->pattern_regs = extract_mentioned_regs (dest);
5653
5654 /* Do not check for anticipatability if we either found one anticipatable
5655 store already, or tested for one and found out that it was killed. */
5656 check_anticipatable = 0;
5657 if (!ANTIC_STORE_LIST (ptr))
5658 check_anticipatable = 1;
5659 else
5660 {
5661 tmp = XEXP (ANTIC_STORE_LIST (ptr), 0);
5662 if (tmp != NULL_RTX
5663 && BLOCK_FOR_INSN (tmp) != bb)
5664 check_anticipatable = 1;
5665 }
5666 if (check_anticipatable)
5667 {
5668 if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
5669 tmp = NULL_RTX;
5670 else
5671 tmp = insn;
5672 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp,
5673 ANTIC_STORE_LIST (ptr));
5674 }
5675
5676 /* It is not necessary to check whether store is available if we did
5677 it successfully before; if we failed before, do not bother to check
5678 until we reach the insn that caused us to fail. */
5679 check_available = 0;
5680 if (!AVAIL_STORE_LIST (ptr))
5681 check_available = 1;
5682 else
5683 {
5684 tmp = XEXP (AVAIL_STORE_LIST (ptr), 0);
5685 if (BLOCK_FOR_INSN (tmp) != bb)
5686 check_available = 1;
5687 }
5688 if (check_available)
5689 {
5690 /* Check that we have already reached the insn at that the check
5691 failed last time. */
5692 if (LAST_AVAIL_CHECK_FAILURE (ptr))
5693 {
5694 for (tmp = BB_END (bb);
5695 tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
5696 tmp = PREV_INSN (tmp))
5697 continue;
5698 if (tmp == insn)
5699 check_available = 0;
5700 }
5701 else
5702 check_available = store_killed_after (dest, ptr->pattern_regs, insn,
5703 bb, regs_set_after,
5704 &LAST_AVAIL_CHECK_FAILURE (ptr));
5705 }
5706 if (!check_available)
5707 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr));
5708 }
5709
5710 /* Find available and anticipatable stores. */
5711
5712 static int
5713 compute_store_table (void)
5714 {
5715 int ret;
5716 basic_block bb;
5717 unsigned regno;
5718 rtx insn, pat, tmp;
5719 int *last_set_in, *already_set;
5720 struct ls_expr * ptr, **prev_next_ptr_ptr;
5721
5722 max_gcse_regno = max_reg_num ();
5723
5724 reg_set_in_block = sbitmap_vector_alloc (last_basic_block,
5725 max_gcse_regno);
5726 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
5727 pre_ldst_mems = 0;
5728 pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5729 pre_ldst_expr_eq, NULL);
5730 last_set_in = xcalloc (max_gcse_regno, sizeof (int));
5731 already_set = xmalloc (sizeof (int) * max_gcse_regno);
5732
5733 /* Find all the stores we care about. */
5734 FOR_EACH_BB (bb)
5735 {
5736 /* First compute the registers set in this block. */
5737 regvec = last_set_in;
5738
5739 FOR_BB_INSNS (bb, insn)
5740 {
5741 if (! INSN_P (insn))
5742 continue;
5743
5744 if (CALL_P (insn))
5745 {
5746 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5747 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5748 {
5749 last_set_in[regno] = INSN_UID (insn);
5750 SET_BIT (reg_set_in_block[bb->index], regno);
5751 }
5752 }
5753
5754 pat = PATTERN (insn);
5755 compute_store_table_current_insn = insn;
5756 note_stores (pat, reg_set_info, reg_set_in_block[bb->index]);
5757 }
5758
5759 /* Now find the stores. */
5760 memset (already_set, 0, sizeof (int) * max_gcse_regno);
5761 regvec = already_set;
5762 FOR_BB_INSNS (bb, insn)
5763 {
5764 if (! INSN_P (insn))
5765 continue;
5766
5767 if (CALL_P (insn))
5768 {
5769 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5770 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5771 already_set[regno] = 1;
5772 }
5773
5774 pat = PATTERN (insn);
5775 note_stores (pat, reg_set_info, NULL);
5776
5777 /* Now that we've marked regs, look for stores. */
5778 find_moveable_store (insn, already_set, last_set_in);
5779
5780 /* Unmark regs that are no longer set. */
5781 compute_store_table_current_insn = insn;
5782 note_stores (pat, reg_clear_last_set, last_set_in);
5783 if (CALL_P (insn))
5784 {
5785 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5786 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
5787 && last_set_in[regno] == INSN_UID (insn))
5788 last_set_in[regno] = 0;
5789 }
5790 }
5791
5792 #ifdef ENABLE_CHECKING
5793 /* last_set_in should now be all-zero. */
5794 for (regno = 0; regno < max_gcse_regno; regno++)
5795 gcc_assert (!last_set_in[regno]);
5796 #endif
5797
5798 /* Clear temporary marks. */
5799 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5800 {
5801 LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX;
5802 if (ANTIC_STORE_LIST (ptr)
5803 && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX)
5804 ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1);
5805 }
5806 }
5807
5808 /* Remove the stores that are not available anywhere, as there will
5809 be no opportunity to optimize them. */
5810 for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems;
5811 ptr != NULL;
5812 ptr = *prev_next_ptr_ptr)
5813 {
5814 if (!AVAIL_STORE_LIST (ptr))
5815 {
5816 *prev_next_ptr_ptr = ptr->next;
5817 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5818 free_ldst_entry (ptr);
5819 }
5820 else
5821 prev_next_ptr_ptr = &ptr->next;
5822 }
5823
5824 ret = enumerate_ldsts ();
5825
5826 if (gcse_file)
5827 {
5828 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
5829 print_ldst_list (gcse_file);
5830 }
5831
5832 free (last_set_in);
5833 free (already_set);
5834 return ret;
5835 }
5836
5837 /* Check to see if the load X is aliased with STORE_PATTERN.
5838 AFTER is true if we are checking the case when STORE_PATTERN occurs
5839 after the X. */
5840
5841 static bool
5842 load_kills_store (rtx x, rtx store_pattern, int after)
5843 {
5844 if (after)
5845 return anti_dependence (x, store_pattern);
5846 else
5847 return true_dependence (store_pattern, GET_MODE (store_pattern), x,
5848 rtx_addr_varies_p);
5849 }
5850
5851 /* Go through the entire insn X, looking for any loads which might alias
5852 STORE_PATTERN. Return true if found.
5853 AFTER is true if we are checking the case when STORE_PATTERN occurs
5854 after the insn X. */
5855
5856 static bool
5857 find_loads (rtx x, rtx store_pattern, int after)
5858 {
5859 const char * fmt;
5860 int i, j;
5861 int ret = false;
5862
5863 if (!x)
5864 return false;
5865
5866 if (GET_CODE (x) == SET)
5867 x = SET_SRC (x);
5868
5869 if (MEM_P (x))
5870 {
5871 if (load_kills_store (x, store_pattern, after))
5872 return true;
5873 }
5874
5875 /* Recursively process the insn. */
5876 fmt = GET_RTX_FORMAT (GET_CODE (x));
5877
5878 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
5879 {
5880 if (fmt[i] == 'e')
5881 ret |= find_loads (XEXP (x, i), store_pattern, after);
5882 else if (fmt[i] == 'E')
5883 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5884 ret |= find_loads (XVECEXP (x, i, j), store_pattern, after);
5885 }
5886 return ret;
5887 }
5888
5889 /* Check if INSN kills the store pattern X (is aliased with it).
5890 AFTER is true if we are checking the case when store X occurs
5891 after the insn. Return true if it does. */
5892
5893 static bool
5894 store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after)
5895 {
5896 rtx reg, base, note;
5897
5898 if (!INSN_P (insn))
5899 return false;
5900
5901 if (CALL_P (insn))
5902 {
5903 /* A normal or pure call might read from pattern,
5904 but a const call will not. */
5905 if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn))
5906 return true;
5907
5908 /* But even a const call reads its parameters. Check whether the
5909 base of some of registers used in mem is stack pointer. */
5910 for (reg = x_regs; reg; reg = XEXP (reg, 1))
5911 {
5912 base = find_base_term (XEXP (reg, 0));
5913 if (!base
5914 || (GET_CODE (base) == ADDRESS
5915 && GET_MODE (base) == Pmode
5916 && XEXP (base, 0) == stack_pointer_rtx))
5917 return true;
5918 }
5919
5920 return false;
5921 }
5922
5923 if (GET_CODE (PATTERN (insn)) == SET)
5924 {
5925 rtx pat = PATTERN (insn);
5926 rtx dest = SET_DEST (pat);
5927
5928 if (GET_CODE (dest) == ZERO_EXTRACT)
5929 dest = XEXP (dest, 0);
5930
5931 /* Check for memory stores to aliased objects. */
5932 if (MEM_P (dest)
5933 && !expr_equiv_p (dest, x))
5934 {
5935 if (after)
5936 {
5937 if (output_dependence (dest, x))
5938 return true;
5939 }
5940 else
5941 {
5942 if (output_dependence (x, dest))
5943 return true;
5944 }
5945 }
5946 if (find_loads (SET_SRC (pat), x, after))
5947 return true;
5948 }
5949 else if (find_loads (PATTERN (insn), x, after))
5950 return true;
5951
5952 /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory
5953 location aliased with X, then this insn kills X. */
5954 note = find_reg_equal_equiv_note (insn);
5955 if (! note)
5956 return false;
5957 note = XEXP (note, 0);
5958
5959 /* However, if the note represents a must alias rather than a may
5960 alias relationship, then it does not kill X. */
5961 if (expr_equiv_p (note, x))
5962 return false;
5963
5964 /* See if there are any aliased loads in the note. */
5965 return find_loads (note, x, after);
5966 }
5967
5968 /* Returns true if the expression X is loaded or clobbered on or after INSN
5969 within basic block BB. REGS_SET_AFTER is bitmap of registers set in
5970 or after the insn. X_REGS is list of registers mentioned in X. If the store
5971 is killed, return the last insn in that it occurs in FAIL_INSN. */
5972
5973 static bool
5974 store_killed_after (rtx x, rtx x_regs, rtx insn, basic_block bb,
5975 int *regs_set_after, rtx *fail_insn)
5976 {
5977 rtx last = BB_END (bb), act;
5978
5979 if (!store_ops_ok (x_regs, regs_set_after))
5980 {
5981 /* We do not know where it will happen. */
5982 if (fail_insn)
5983 *fail_insn = NULL_RTX;
5984 return true;
5985 }
5986
5987 /* Scan from the end, so that fail_insn is determined correctly. */
5988 for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act))
5989 if (store_killed_in_insn (x, x_regs, act, false))
5990 {
5991 if (fail_insn)
5992 *fail_insn = act;
5993 return true;
5994 }
5995
5996 return false;
5997 }
5998
5999 /* Returns true if the expression X is loaded or clobbered on or before INSN
6000 within basic block BB. X_REGS is list of registers mentioned in X.
6001 REGS_SET_BEFORE is bitmap of registers set before or in this insn. */
6002 static bool
6003 store_killed_before (rtx x, rtx x_regs, rtx insn, basic_block bb,
6004 int *regs_set_before)
6005 {
6006 rtx first = BB_HEAD (bb);
6007
6008 if (!store_ops_ok (x_regs, regs_set_before))
6009 return true;
6010
6011 for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn))
6012 if (store_killed_in_insn (x, x_regs, insn, true))
6013 return true;
6014
6015 return false;
6016 }
6017
6018 /* Fill in available, anticipatable, transparent and kill vectors in
6019 STORE_DATA, based on lists of available and anticipatable stores. */
6020 static void
6021 build_store_vectors (void)
6022 {
6023 basic_block bb;
6024 int *regs_set_in_block;
6025 rtx insn, st;
6026 struct ls_expr * ptr;
6027 unsigned regno;
6028
6029 /* Build the gen_vector. This is any store in the table which is not killed
6030 by aliasing later in its block. */
6031 ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores);
6032 sbitmap_vector_zero (ae_gen, last_basic_block);
6033
6034 st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
6035 sbitmap_vector_zero (st_antloc, last_basic_block);
6036
6037 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6038 {
6039 for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
6040 {
6041 insn = XEXP (st, 0);
6042 bb = BLOCK_FOR_INSN (insn);
6043
6044 /* If we've already seen an available expression in this block,
6045 we can delete this one (It occurs earlier in the block). We'll
6046 copy the SRC expression to an unused register in case there
6047 are any side effects. */
6048 if (TEST_BIT (ae_gen[bb->index], ptr->index))
6049 {
6050 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6051 if (gcse_file)
6052 fprintf (gcse_file, "Removing redundant store:\n");
6053 replace_store_insn (r, XEXP (st, 0), bb, ptr);
6054 continue;
6055 }
6056 SET_BIT (ae_gen[bb->index], ptr->index);
6057 }
6058
6059 for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
6060 {
6061 insn = XEXP (st, 0);
6062 bb = BLOCK_FOR_INSN (insn);
6063 SET_BIT (st_antloc[bb->index], ptr->index);
6064 }
6065 }
6066
6067 ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
6068 sbitmap_vector_zero (ae_kill, last_basic_block);
6069
6070 transp = sbitmap_vector_alloc (last_basic_block, num_stores);
6071 sbitmap_vector_zero (transp, last_basic_block);
6072 regs_set_in_block = xmalloc (sizeof (int) * max_gcse_regno);
6073
6074 FOR_EACH_BB (bb)
6075 {
6076 for (regno = 0; regno < max_gcse_regno; regno++)
6077 regs_set_in_block[regno] = TEST_BIT (reg_set_in_block[bb->index], regno);
6078
6079 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6080 {
6081 if (store_killed_after (ptr->pattern, ptr->pattern_regs, BB_HEAD (bb),
6082 bb, regs_set_in_block, NULL))
6083 {
6084 /* It should not be necessary to consider the expression
6085 killed if it is both anticipatable and available. */
6086 if (!TEST_BIT (st_antloc[bb->index], ptr->index)
6087 || !TEST_BIT (ae_gen[bb->index], ptr->index))
6088 SET_BIT (ae_kill[bb->index], ptr->index);
6089 }
6090 else
6091 SET_BIT (transp[bb->index], ptr->index);
6092 }
6093 }
6094
6095 free (regs_set_in_block);
6096
6097 if (gcse_file)
6098 {
6099 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
6100 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
6101 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
6102 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
6103 }
6104 }
6105
6106 /* Insert an instruction at the beginning of a basic block, and update
6107 the BB_HEAD if needed. */
6108
6109 static void
6110 insert_insn_start_bb (rtx insn, basic_block bb)
6111 {
6112 /* Insert at start of successor block. */
6113 rtx prev = PREV_INSN (BB_HEAD (bb));
6114 rtx before = BB_HEAD (bb);
6115 while (before != 0)
6116 {
6117 if (! LABEL_P (before)
6118 && (! NOTE_P (before)
6119 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6120 break;
6121 prev = before;
6122 if (prev == BB_END (bb))
6123 break;
6124 before = NEXT_INSN (before);
6125 }
6126
6127 insn = emit_insn_after_noloc (insn, prev);
6128
6129 if (gcse_file)
6130 {
6131 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
6132 bb->index);
6133 print_inline_rtx (gcse_file, insn, 6);
6134 fprintf (gcse_file, "\n");
6135 }
6136 }
6137
6138 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6139 the memory reference, and E is the edge to insert it on. Returns nonzero
6140 if an edge insertion was performed. */
6141
6142 static int
6143 insert_store (struct ls_expr * expr, edge e)
6144 {
6145 rtx reg, insn;
6146 basic_block bb;
6147 edge tmp;
6148 edge_iterator ei;
6149
6150 /* We did all the deleted before this insert, so if we didn't delete a
6151 store, then we haven't set the reaching reg yet either. */
6152 if (expr->reaching_reg == NULL_RTX)
6153 return 0;
6154
6155 if (e->flags & EDGE_FAKE)
6156 return 0;
6157
6158 reg = expr->reaching_reg;
6159 insn = gen_move_insn (copy_rtx (expr->pattern), reg);
6160
6161 /* If we are inserting this expression on ALL predecessor edges of a BB,
6162 insert it at the start of the BB, and reset the insert bits on the other
6163 edges so we don't try to insert it on the other edges. */
6164 bb = e->dest;
6165 FOR_EACH_EDGE (tmp, ei, e->dest->preds)
6166 if (!(tmp->flags & EDGE_FAKE))
6167 {
6168 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6169
6170 gcc_assert (index != EDGE_INDEX_NO_EDGE);
6171 if (! TEST_BIT (pre_insert_map[index], expr->index))
6172 break;
6173 }
6174
6175 /* If tmp is NULL, we found an insertion on every edge, blank the
6176 insertion vector for these edges, and insert at the start of the BB. */
6177 if (!tmp && bb != EXIT_BLOCK_PTR)
6178 {
6179 FOR_EACH_EDGE (tmp, ei, e->dest->preds)
6180 {
6181 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6182 RESET_BIT (pre_insert_map[index], expr->index);
6183 }
6184 insert_insn_start_bb (insn, bb);
6185 return 0;
6186 }
6187
6188 /* We can't put stores in the front of blocks pointed to by abnormal
6189 edges since that may put a store where one didn't used to be. */
6190 gcc_assert (!(e->flags & EDGE_ABNORMAL));
6191
6192 insert_insn_on_edge (insn, e);
6193
6194 if (gcse_file)
6195 {
6196 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
6197 e->src->index, e->dest->index);
6198 print_inline_rtx (gcse_file, insn, 6);
6199 fprintf (gcse_file, "\n");
6200 }
6201
6202 return 1;
6203 }
6204
6205 /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the
6206 memory location in SMEXPR set in basic block BB.
6207
6208 This could be rather expensive. */
6209
6210 static void
6211 remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
6212 {
6213 edge_iterator *stack, ei;
6214 int sp;
6215 edge act;
6216 sbitmap visited = sbitmap_alloc (last_basic_block);
6217 rtx last, insn, note;
6218 rtx mem = smexpr->pattern;
6219
6220 stack = xmalloc (sizeof (edge_iterator) * n_basic_blocks);
6221 sp = 0;
6222 ei = ei_start (bb->succs);
6223
6224 sbitmap_zero (visited);
6225
6226 act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
6227 while (1)
6228 {
6229 if (!act)
6230 {
6231 if (!sp)
6232 {
6233 free (stack);
6234 sbitmap_free (visited);
6235 return;
6236 }
6237 act = ei_edge (stack[--sp]);
6238 }
6239 bb = act->dest;
6240
6241 if (bb == EXIT_BLOCK_PTR
6242 || TEST_BIT (visited, bb->index))
6243 {
6244 if (!ei_end_p (ei))
6245 ei_next (&ei);
6246 act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
6247 continue;
6248 }
6249 SET_BIT (visited, bb->index);
6250
6251 if (TEST_BIT (st_antloc[bb->index], smexpr->index))
6252 {
6253 for (last = ANTIC_STORE_LIST (smexpr);
6254 BLOCK_FOR_INSN (XEXP (last, 0)) != bb;
6255 last = XEXP (last, 1))
6256 continue;
6257 last = XEXP (last, 0);
6258 }
6259 else
6260 last = NEXT_INSN (BB_END (bb));
6261
6262 for (insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
6263 if (INSN_P (insn))
6264 {
6265 note = find_reg_equal_equiv_note (insn);
6266 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
6267 continue;
6268
6269 if (gcse_file)
6270 fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
6271 INSN_UID (insn));
6272 remove_note (insn, note);
6273 }
6274
6275 if (!ei_end_p (ei))
6276 ei_next (&ei);
6277 act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
6278
6279 if (EDGE_COUNT (bb->succs) > 0)
6280 {
6281 if (act)
6282 stack[sp++] = ei;
6283 ei = ei_start (bb->succs);
6284 act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
6285 }
6286 }
6287 }
6288
6289 /* This routine will replace a store with a SET to a specified register. */
6290
6291 static void
6292 replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
6293 {
6294 rtx insn, mem, note, set, ptr, pair;
6295
6296 mem = smexpr->pattern;
6297 insn = gen_move_insn (reg, SET_SRC (single_set (del)));
6298 insn = emit_insn_after (insn, del);
6299
6300 if (gcse_file)
6301 {
6302 fprintf (gcse_file,
6303 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
6304 print_inline_rtx (gcse_file, del, 6);
6305 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
6306 print_inline_rtx (gcse_file, insn, 6);
6307 fprintf (gcse_file, "\n");
6308 }
6309
6310 for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
6311 if (XEXP (ptr, 0) == del)
6312 {
6313 XEXP (ptr, 0) = insn;
6314 break;
6315 }
6316
6317 /* Move the notes from the deleted insn to its replacement, and patch
6318 up the LIBCALL notes. */
6319 REG_NOTES (insn) = REG_NOTES (del);
6320
6321 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
6322 if (note)
6323 {
6324 pair = XEXP (note, 0);
6325 note = find_reg_note (pair, REG_LIBCALL, NULL_RTX);
6326 XEXP (note, 0) = insn;
6327 }
6328 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
6329 if (note)
6330 {
6331 pair = XEXP (note, 0);
6332 note = find_reg_note (pair, REG_RETVAL, NULL_RTX);
6333 XEXP (note, 0) = insn;
6334 }
6335
6336 delete_insn (del);
6337
6338 /* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
6339 they are no longer accurate provided that they are reached by this
6340 definition, so drop them. */
6341 for (; insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn))
6342 if (INSN_P (insn))
6343 {
6344 set = single_set (insn);
6345 if (!set)
6346 continue;
6347 if (expr_equiv_p (SET_DEST (set), mem))
6348 return;
6349 note = find_reg_equal_equiv_note (insn);
6350 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
6351 continue;
6352
6353 if (gcse_file)
6354 fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
6355 INSN_UID (insn));
6356 remove_note (insn, note);
6357 }
6358 remove_reachable_equiv_notes (bb, smexpr);
6359 }
6360
6361
6362 /* Delete a store, but copy the value that would have been stored into
6363 the reaching_reg for later storing. */
6364
6365 static void
6366 delete_store (struct ls_expr * expr, basic_block bb)
6367 {
6368 rtx reg, i, del;
6369
6370 if (expr->reaching_reg == NULL_RTX)
6371 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
6372
6373 reg = expr->reaching_reg;
6374
6375 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
6376 {
6377 del = XEXP (i, 0);
6378 if (BLOCK_FOR_INSN (del) == bb)
6379 {
6380 /* We know there is only one since we deleted redundant
6381 ones during the available computation. */
6382 replace_store_insn (reg, del, bb, expr);
6383 break;
6384 }
6385 }
6386 }
6387
6388 /* Free memory used by store motion. */
6389
6390 static void
6391 free_store_memory (void)
6392 {
6393 free_ldst_mems ();
6394
6395 if (ae_gen)
6396 sbitmap_vector_free (ae_gen);
6397 if (ae_kill)
6398 sbitmap_vector_free (ae_kill);
6399 if (transp)
6400 sbitmap_vector_free (transp);
6401 if (st_antloc)
6402 sbitmap_vector_free (st_antloc);
6403 if (pre_insert_map)
6404 sbitmap_vector_free (pre_insert_map);
6405 if (pre_delete_map)
6406 sbitmap_vector_free (pre_delete_map);
6407 if (reg_set_in_block)
6408 sbitmap_vector_free (reg_set_in_block);
6409
6410 ae_gen = ae_kill = transp = st_antloc = NULL;
6411 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
6412 }
6413
6414 /* Perform store motion. Much like gcse, except we move expressions the
6415 other way by looking at the flowgraph in reverse. */
6416
6417 static void
6418 store_motion (void)
6419 {
6420 basic_block bb;
6421 int x;
6422 struct ls_expr * ptr;
6423 int update_flow = 0;
6424
6425 if (gcse_file)
6426 {
6427 fprintf (gcse_file, "before store motion\n");
6428 print_rtl (gcse_file, get_insns ());
6429 }
6430
6431 init_alias_analysis ();
6432
6433 /* Find all the available and anticipatable stores. */
6434 num_stores = compute_store_table ();
6435 if (num_stores == 0)
6436 {
6437 htab_delete (pre_ldst_table);
6438 pre_ldst_table = NULL;
6439 sbitmap_vector_free (reg_set_in_block);
6440 end_alias_analysis ();
6441 return;
6442 }
6443
6444 /* Now compute kill & transp vectors. */
6445 build_store_vectors ();
6446 add_noreturn_fake_exit_edges ();
6447 connect_infinite_loops_to_exit ();
6448
6449 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
6450 st_antloc, ae_kill, &pre_insert_map,
6451 &pre_delete_map);
6452
6453 /* Now we want to insert the new stores which are going to be needed. */
6454 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6455 {
6456 /* If any of the edges we have above are abnormal, we can't move this
6457 store. */
6458 for (x = NUM_EDGES (edge_list) - 1; x >= 0; x--)
6459 if (TEST_BIT (pre_insert_map[x], ptr->index)
6460 && (INDEX_EDGE (edge_list, x)->flags & EDGE_ABNORMAL))
6461 break;
6462
6463 if (x >= 0)
6464 {
6465 if (gcse_file != NULL)
6466 fprintf (gcse_file,
6467 "Can't replace store %d: abnormal edge from %d to %d\n",
6468 ptr->index, INDEX_EDGE (edge_list, x)->src->index,
6469 INDEX_EDGE (edge_list, x)->dest->index);
6470 continue;
6471 }
6472
6473 /* Now we want to insert the new stores which are going to be needed. */
6474
6475 FOR_EACH_BB (bb)
6476 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
6477 delete_store (ptr, bb);
6478
6479 for (x = 0; x < NUM_EDGES (edge_list); x++)
6480 if (TEST_BIT (pre_insert_map[x], ptr->index))
6481 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
6482 }
6483
6484 if (update_flow)
6485 commit_edge_insertions ();
6486
6487 free_store_memory ();
6488 free_edge_list (edge_list);
6489 remove_fake_exit_edges ();
6490 end_alias_analysis ();
6491 }
6492
6493 \f
6494 /* Entry point for jump bypassing optimization pass. */
6495
6496 int
6497 bypass_jumps (FILE *file)
6498 {
6499 int changed;
6500
6501 /* We do not construct an accurate cfg in functions which call
6502 setjmp, so just punt to be safe. */
6503 if (current_function_calls_setjmp)
6504 return 0;
6505
6506 /* For calling dump_foo fns from gdb. */
6507 debug_stderr = stderr;
6508 gcse_file = file;
6509
6510 /* Identify the basic block information for this function, including
6511 successors and predecessors. */
6512 max_gcse_regno = max_reg_num ();
6513
6514 if (file)
6515 dump_flow_info (file);
6516
6517 /* Return if there's nothing to do, or it is too expensive. */
6518 if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled")))
6519 return 0;
6520
6521 gcc_obstack_init (&gcse_obstack);
6522 bytes_used = 0;
6523
6524 /* We need alias. */
6525 init_alias_analysis ();
6526
6527 /* Record where pseudo-registers are set. This data is kept accurate
6528 during each pass. ??? We could also record hard-reg information here
6529 [since it's unchanging], however it is currently done during hash table
6530 computation.
6531
6532 It may be tempting to compute MEM set information here too, but MEM sets
6533 will be subject to code motion one day and thus we need to compute
6534 information about memory sets when we build the hash tables. */
6535
6536 alloc_reg_set_mem (max_gcse_regno);
6537 compute_sets ();
6538
6539 max_gcse_regno = max_reg_num ();
6540 alloc_gcse_mem ();
6541 changed = one_cprop_pass (MAX_GCSE_PASSES + 2, true, true);
6542 free_gcse_mem ();
6543
6544 if (file)
6545 {
6546 fprintf (file, "BYPASS of %s: %d basic blocks, ",
6547 current_function_name (), n_basic_blocks);
6548 fprintf (file, "%d bytes\n\n", bytes_used);
6549 }
6550
6551 obstack_free (&gcse_obstack, NULL);
6552 free_reg_set_mem ();
6553
6554 /* We are finished with alias. */
6555 end_alias_analysis ();
6556 allocate_reg_info (max_reg_num (), FALSE, FALSE);
6557
6558 return changed;
6559 }
6560
6561 /* Return true if the graph is too expensive to optimize. PASS is the
6562 optimization about to be performed. */
6563
6564 static bool
6565 is_too_expensive (const char *pass)
6566 {
6567 /* Trying to perform global optimizations on flow graphs which have
6568 a high connectivity will take a long time and is unlikely to be
6569 particularly useful.
6570
6571 In normal circumstances a cfg should have about twice as many
6572 edges as blocks. But we do not want to punish small functions
6573 which have a couple switch statements. Rather than simply
6574 threshold the number of blocks, uses something with a more
6575 graceful degradation. */
6576 if (n_edges > 20000 + n_basic_blocks * 4)
6577 {
6578 warning (OPT_Wdisabled_optimization,
6579 "%s: %d basic blocks and %d edges/basic block",
6580 pass, n_basic_blocks, n_edges / n_basic_blocks);
6581
6582 return true;
6583 }
6584
6585 /* If allocating memory for the cprop bitmap would take up too much
6586 storage it's better just to disable the optimization. */
6587 if ((n_basic_blocks
6588 * SBITMAP_SET_SIZE (max_reg_num ())
6589 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
6590 {
6591 warning (OPT_Wdisabled_optimization,
6592 "%s: %d basic blocks and %d registers",
6593 pass, n_basic_blocks, max_reg_num ());
6594
6595 return true;
6596 }
6597
6598 return false;
6599 }
6600 \f
6601 static bool
6602 gate_handle_jump_bypass (void)
6603 {
6604 return optimize > 0 && flag_gcse;
6605 }
6606
6607 /* Perform jump bypassing and control flow optimizations. */
6608 static void
6609 rest_of_handle_jump_bypass (void)
6610 {
6611 cleanup_cfg (CLEANUP_EXPENSIVE);
6612 reg_scan (get_insns (), max_reg_num ());
6613
6614 if (bypass_jumps (dump_file))
6615 {
6616 rebuild_jump_labels (get_insns ());
6617 cleanup_cfg (CLEANUP_EXPENSIVE);
6618 delete_trivially_dead_insns (get_insns (), max_reg_num ());
6619 }
6620 }
6621
6622 struct tree_opt_pass pass_jump_bypass =
6623 {
6624 "bypass", /* name */
6625 gate_handle_jump_bypass, /* gate */
6626 rest_of_handle_jump_bypass, /* execute */
6627 NULL, /* sub */
6628 NULL, /* next */
6629 0, /* static_pass_number */
6630 TV_BYPASS, /* tv_id */
6631 0, /* properties_required */
6632 0, /* properties_provided */
6633 0, /* properties_destroyed */
6634 0, /* todo_flags_start */
6635 TODO_dump_func |
6636 TODO_ggc_collect | TODO_verify_flow, /* todo_flags_finish */
6637 'G' /* letter */
6638 };
6639
6640
6641 static bool
6642 gate_handle_gcse (void)
6643 {
6644 return optimize > 0 && flag_gcse;
6645 }
6646
6647
6648 static void
6649 rest_of_handle_gcse (void)
6650 {
6651 int save_csb, save_cfj;
6652 int tem2 = 0, tem;
6653
6654 tem = gcse_main (get_insns (), dump_file);
6655 rebuild_jump_labels (get_insns ());
6656 delete_trivially_dead_insns (get_insns (), max_reg_num ());
6657
6658 save_csb = flag_cse_skip_blocks;
6659 save_cfj = flag_cse_follow_jumps;
6660 flag_cse_skip_blocks = flag_cse_follow_jumps = 0;
6661
6662 /* If -fexpensive-optimizations, re-run CSE to clean up things done
6663 by gcse. */
6664 if (flag_expensive_optimizations)
6665 {
6666 timevar_push (TV_CSE);
6667 reg_scan (get_insns (), max_reg_num ());
6668 tem2 = cse_main (get_insns (), max_reg_num (), dump_file);
6669 purge_all_dead_edges ();
6670 delete_trivially_dead_insns (get_insns (), max_reg_num ());
6671 timevar_pop (TV_CSE);
6672 cse_not_expected = !flag_rerun_cse_after_loop;
6673 }
6674
6675 /* If gcse or cse altered any jumps, rerun jump optimizations to clean
6676 things up. */
6677 if (tem || tem2)
6678 {
6679 timevar_push (TV_JUMP);
6680 rebuild_jump_labels (get_insns ());
6681 delete_dead_jumptables ();
6682 cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
6683 timevar_pop (TV_JUMP);
6684 }
6685
6686 flag_cse_skip_blocks = save_csb;
6687 flag_cse_follow_jumps = save_cfj;
6688 }
6689
6690 struct tree_opt_pass pass_gcse =
6691 {
6692 "gcse1", /* name */
6693 gate_handle_gcse, /* gate */
6694 rest_of_handle_gcse, /* execute */
6695 NULL, /* sub */
6696 NULL, /* next */
6697 0, /* static_pass_number */
6698 TV_GCSE, /* tv_id */
6699 0, /* properties_required */
6700 0, /* properties_provided */
6701 0, /* properties_destroyed */
6702 0, /* todo_flags_start */
6703 TODO_dump_func |
6704 TODO_verify_flow | TODO_ggc_collect, /* todo_flags_finish */
6705 'G' /* letter */
6706 };
6707
6708
6709 #include "gt-gcse.h"
This page took 0.3297 seconds and 5 git commands to generate.