]> gcc.gnu.org Git - gcc.git/blob - gcc/gcse.c
ce4c07b8d408b002b3caa48a0e44a6eb500cc154
[gcc.git] / gcc / gcse.c
1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 /* TODO
23 - reordering of memory allocation and freeing to be more space efficient
24 - do rough calc of how many regs are needed in each block, and a rough
25 calc of how many regs are available in each class and use that to
26 throttle back the code in cases where RTX_COST is minimal.
27 - dead store elimination
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "toplev.h"
149
150 #include "rtl.h"
151 #include "tm_p.h"
152 #include "regs.h"
153 #include "hard-reg-set.h"
154 #include "flags.h"
155 #include "real.h"
156 #include "insn-config.h"
157 #include "recog.h"
158 #include "basic-block.h"
159 #include "output.h"
160 #include "function.h"
161 #include "expr.h"
162
163 #include "obstack.h"
164 #define obstack_chunk_alloc gmalloc
165 #define obstack_chunk_free free
166
167 /* Maximum number of passes to perform. */
168 #define MAX_PASSES 1
169
170 /* Propagate flow information through back edges and thus enable PRE's
171 moving loop invariant calculations out of loops.
172
173 Originally this tended to create worse overall code, but several
174 improvements during the development of PRE seem to have made following
175 back edges generally a win.
176
177 Note much of the loop invariant code motion done here would normally
178 be done by loop.c, which has more heuristics for when to move invariants
179 out of loops. At some point we might need to move some of those
180 heuristics into gcse.c. */
181 #define FOLLOW_BACK_EDGES 1
182
183 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
184 are a superset of those done by GCSE.
185
186 We perform the following steps:
187
188 1) Compute basic block information.
189
190 2) Compute table of places where registers are set.
191
192 3) Perform copy/constant propagation.
193
194 4) Perform global cse.
195
196 5) Perform another pass of copy/constant propagation.
197
198 Two passes of copy/constant propagation are done because the first one
199 enables more GCSE and the second one helps to clean up the copies that
200 GCSE creates. This is needed more for PRE than for Classic because Classic
201 GCSE will try to use an existing register containing the common
202 subexpression rather than create a new one. This is harder to do for PRE
203 because of the code motion (which Classic GCSE doesn't do).
204
205 Expressions we are interested in GCSE-ing are of the form
206 (set (pseudo-reg) (expression)).
207 Function want_to_gcse_p says what these are.
208
209 PRE handles moving invariant expressions out of loops (by treating them as
210 partially redundant).
211
212 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
213 assignment) based GVN (global value numbering). L. T. Simpson's paper
214 (Rice University) on value numbering is a useful reference for this.
215
216 **********************
217
218 We used to support multiple passes but there are diminishing returns in
219 doing so. The first pass usually makes 90% of the changes that are doable.
220 A second pass can make a few more changes made possible by the first pass.
221 Experiments show any further passes don't make enough changes to justify
222 the expense.
223
224 A study of spec92 using an unlimited number of passes:
225 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
226 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
227 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
228
229 It was found doing copy propagation between each pass enables further
230 substitutions.
231
232 PRE is quite expensive in complicated functions because the DFA can take
233 awhile to converge. Hence we only perform one pass. Macro MAX_PASSES can
234 be modified if one wants to experiment.
235
236 **********************
237
238 The steps for PRE are:
239
240 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
241
242 2) Perform the data flow analysis for PRE.
243
244 3) Delete the redundant instructions
245
246 4) Insert the required copies [if any] that make the partially
247 redundant instructions fully redundant.
248
249 5) For other reaching expressions, insert an instruction to copy the value
250 to a newly created pseudo that will reach the redundant instruction.
251
252 The deletion is done first so that when we do insertions we
253 know which pseudo reg to use.
254
255 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
256 argue it is not. The number of iterations for the algorithm to converge
257 is typically 2-4 so I don't view it as that expensive (relatively speaking).
258
259 PRE GCSE depends heavily on the second CSE pass to clean up the copies
260 we create. To make an expression reach the place where it's redundant,
261 the result of the expression is copied to a new register, and the redundant
262 expression is deleted by replacing it with this new register. Classic GCSE
263 doesn't have this problem as much as it computes the reaching defs of
264 each register in each block and thus can try to use an existing register.
265
266 **********************
267
268 A fair bit of simplicity is created by creating small functions for simple
269 tasks, even when the function is only called in one place. This may
270 measurably slow things down [or may not] by creating more function call
271 overhead than is necessary. The source is laid out so that it's trivial
272 to make the affected functions inline so that one can measure what speed
273 up, if any, can be achieved, and maybe later when things settle things can
274 be rearranged.
275
276 Help stamp out big monolithic functions! */
277 \f
278 /* GCSE global vars. */
279
280 /* -dG dump file. */
281 static FILE *gcse_file;
282
283 /* Note whether or not we should run jump optimization after gcse. We
284 want to do this for two cases.
285
286 * If we changed any jumps via cprop.
287
288 * If we added any labels via edge splitting. */
289
290 static int run_jump_opt_after_gcse;
291
292 /* Bitmaps are normally not included in debugging dumps.
293 However it's useful to be able to print them from GDB.
294 We could create special functions for this, but it's simpler to
295 just allow passing stderr to the dump_foo fns. Since stderr can
296 be a macro, we store a copy here. */
297 static FILE *debug_stderr;
298
299 /* An obstack for our working variables. */
300 static struct obstack gcse_obstack;
301
302 /* Non-zero for each mode that supports (set (reg) (reg)).
303 This is trivially true for integer and floating point values.
304 It may or may not be true for condition codes. */
305 static char can_copy_p[(int) NUM_MACHINE_MODES];
306
307 /* Non-zero if can_copy_p has been initialized. */
308 static int can_copy_init_p;
309
310 struct reg_use {rtx reg_rtx; };
311
312 /* Hash table of expressions. */
313
314 struct expr
315 {
316 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
317 rtx expr;
318 /* Index in the available expression bitmaps. */
319 int bitmap_index;
320 /* Next entry with the same hash. */
321 struct expr *next_same_hash;
322 /* List of anticipatable occurrences in basic blocks in the function.
323 An "anticipatable occurrence" is one that is the first occurrence in the
324 basic block, the operands are not modified in the basic block prior
325 to the occurrence and the output is not used between the start of
326 the block and the occurrence. */
327 struct occr *antic_occr;
328 /* List of available occurrence in basic blocks in the function.
329 An "available occurrence" is one that is the last occurrence in the
330 basic block and the operands are not modified by following statements in
331 the basic block [including this insn]. */
332 struct occr *avail_occr;
333 /* Non-null if the computation is PRE redundant.
334 The value is the newly created pseudo-reg to record a copy of the
335 expression in all the places that reach the redundant copy. */
336 rtx reaching_reg;
337 };
338
339 /* Occurrence of an expression.
340 There is one per basic block. If a pattern appears more than once the
341 last appearance is used [or first for anticipatable expressions]. */
342
343 struct occr
344 {
345 /* Next occurrence of this expression. */
346 struct occr *next;
347 /* The insn that computes the expression. */
348 rtx insn;
349 /* Non-zero if this [anticipatable] occurrence has been deleted. */
350 char deleted_p;
351 /* Non-zero if this [available] occurrence has been copied to
352 reaching_reg. */
353 /* ??? This is mutually exclusive with deleted_p, so they could share
354 the same byte. */
355 char copied_p;
356 };
357
358 /* Expression and copy propagation hash tables.
359 Each hash table is an array of buckets.
360 ??? It is known that if it were an array of entries, structure elements
361 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
362 not clear whether in the final analysis a sufficient amount of memory would
363 be saved as the size of the available expression bitmaps would be larger
364 [one could build a mapping table without holes afterwards though].
365 Someday I'll perform the computation and figure it out. */
366
367 /* Total size of the expression hash table, in elements. */
368 static unsigned int expr_hash_table_size;
369
370 /* The table itself.
371 This is an array of `expr_hash_table_size' elements. */
372 static struct expr **expr_hash_table;
373
374 /* Total size of the copy propagation hash table, in elements. */
375 static int set_hash_table_size;
376
377 /* The table itself.
378 This is an array of `set_hash_table_size' elements. */
379 static struct expr **set_hash_table;
380
381 /* Mapping of uids to cuids.
382 Only real insns get cuids. */
383 static int *uid_cuid;
384
385 /* Highest UID in UID_CUID. */
386 static int max_uid;
387
388 /* Get the cuid of an insn. */
389 #ifdef ENABLE_CHECKING
390 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
391 #else
392 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
393 #endif
394
395 /* Number of cuids. */
396 static int max_cuid;
397
398 /* Mapping of cuids to insns. */
399 static rtx *cuid_insn;
400
401 /* Get insn from cuid. */
402 #define CUID_INSN(CUID) (cuid_insn[CUID])
403
404 /* Maximum register number in function prior to doing gcse + 1.
405 Registers created during this pass have regno >= max_gcse_regno.
406 This is named with "gcse" to not collide with global of same name. */
407 static unsigned int max_gcse_regno;
408
409 /* Maximum number of cse-able expressions found. */
410 static int n_exprs;
411
412 /* Maximum number of assignments for copy propagation found. */
413 static int n_sets;
414
415 /* Table of registers that are modified.
416
417 For each register, each element is a list of places where the pseudo-reg
418 is set.
419
420 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
421 requires knowledge of which blocks kill which regs [and thus could use
422 a bitmap instead of the lists `reg_set_table' uses].
423
424 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
425 num-regs) [however perhaps it may be useful to keep the data as is]. One
426 advantage of recording things this way is that `reg_set_table' is fairly
427 sparse with respect to pseudo regs but for hard regs could be fairly dense
428 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
429 up functions like compute_transp since in the case of pseudo-regs we only
430 need to iterate over the number of times a pseudo-reg is set, not over the
431 number of basic blocks [clearly there is a bit of a slow down in the cases
432 where a pseudo is set more than once in a block, however it is believed
433 that the net effect is to speed things up]. This isn't done for hard-regs
434 because recording call-clobbered hard-regs in `reg_set_table' at each
435 function call can consume a fair bit of memory, and iterating over
436 hard-regs stored this way in compute_transp will be more expensive. */
437
438 typedef struct reg_set
439 {
440 /* The next setting of this register. */
441 struct reg_set *next;
442 /* The insn where it was set. */
443 rtx insn;
444 } reg_set;
445
446 static reg_set **reg_set_table;
447
448 /* Size of `reg_set_table'.
449 The table starts out at max_gcse_regno + slop, and is enlarged as
450 necessary. */
451 static int reg_set_table_size;
452
453 /* Amount to grow `reg_set_table' by when it's full. */
454 #define REG_SET_TABLE_SLOP 100
455
456 /* Bitmap containing one bit for each register in the program.
457 Used when performing GCSE to track which registers have been set since
458 the start of the basic block. */
459 static sbitmap reg_set_bitmap;
460
461 /* For each block, a bitmap of registers set in the block.
462 This is used by expr_killed_p and compute_transp.
463 It is computed during hash table computation and not by compute_sets
464 as it includes registers added since the last pass (or between cprop and
465 gcse) and it's currently not easy to realloc sbitmap vectors. */
466 static sbitmap *reg_set_in_block;
467
468 /* For each block, non-zero if memory is set in that block.
469 This is computed during hash table computation and is used by
470 expr_killed_p and compute_transp.
471 ??? Handling of memory is very simple, we don't make any attempt
472 to optimize things (later).
473 ??? This can be computed by compute_sets since the information
474 doesn't change. */
475 static char *mem_set_in_block;
476
477 /* Various variables for statistics gathering. */
478
479 /* Memory used in a pass.
480 This isn't intended to be absolutely precise. Its intent is only
481 to keep an eye on memory usage. */
482 static int bytes_used;
483
484 /* GCSE substitutions made. */
485 static int gcse_subst_count;
486 /* Number of copy instructions created. */
487 static int gcse_create_count;
488 /* Number of constants propagated. */
489 static int const_prop_count;
490 /* Number of copys propagated. */
491 static int copy_prop_count;
492 \f
493 /* These variables are used by classic GCSE.
494 Normally they'd be defined a bit later, but `rd_gen' needs to
495 be declared sooner. */
496
497 /* A bitmap of all ones for implementing the algorithm for available
498 expressions and reaching definitions. */
499 /* ??? Available expression bitmaps have a different size than reaching
500 definition bitmaps. This should be the larger of the two, however, it
501 is not currently used for reaching definitions. */
502 static sbitmap u_bitmap;
503
504 /* Each block has a bitmap of each type.
505 The length of each blocks bitmap is:
506
507 max_cuid - for reaching definitions
508 n_exprs - for available expressions
509
510 Thus we view the bitmaps as 2 dimensional arrays. i.e.
511 rd_kill[block_num][cuid_num]
512 ae_kill[block_num][expr_num] */
513
514 /* For reaching defs */
515 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
516
517 /* for available exprs */
518 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
519
520 /* Objects of this type are passed around by the null-pointer check
521 removal routines. */
522 struct null_pointer_info
523 {
524 /* The basic block being processed. */
525 int current_block;
526 /* The first register to be handled in this pass. */
527 unsigned int min_reg;
528 /* One greater than the last register to be handled in this pass. */
529 unsigned int max_reg;
530 sbitmap *nonnull_local;
531 sbitmap *nonnull_killed;
532 };
533 \f
534 static void compute_can_copy PARAMS ((void));
535 static char *gmalloc PARAMS ((unsigned int));
536 static char *grealloc PARAMS ((char *, unsigned int));
537 static char *gcse_alloc PARAMS ((unsigned long));
538 static void alloc_gcse_mem PARAMS ((rtx));
539 static void free_gcse_mem PARAMS ((void));
540 static void alloc_reg_set_mem PARAMS ((int));
541 static void free_reg_set_mem PARAMS ((void));
542 static int get_bitmap_width PARAMS ((int, int, int));
543 static void record_one_set PARAMS ((int, rtx));
544 static void record_set_info PARAMS ((rtx, rtx, void *));
545 static void compute_sets PARAMS ((rtx));
546 static void hash_scan_insn PARAMS ((rtx, int, int));
547 static void hash_scan_set PARAMS ((rtx, rtx, int));
548 static void hash_scan_clobber PARAMS ((rtx, rtx));
549 static void hash_scan_call PARAMS ((rtx, rtx));
550 static int want_to_gcse_p PARAMS ((rtx));
551 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
552 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
553 static int oprs_available_p PARAMS ((rtx, rtx));
554 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
555 int, int));
556 static void insert_set_in_table PARAMS ((rtx, rtx));
557 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
558 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
559 static unsigned int hash_set PARAMS ((int, int));
560 static int expr_equiv_p PARAMS ((rtx, rtx));
561 static void record_last_reg_set_info PARAMS ((rtx, int));
562 static void record_last_mem_set_info PARAMS ((rtx));
563 static void record_last_set_info PARAMS ((rtx, rtx, void *));
564 static void compute_hash_table PARAMS ((int));
565 static void alloc_set_hash_table PARAMS ((int));
566 static void free_set_hash_table PARAMS ((void));
567 static void compute_set_hash_table PARAMS ((void));
568 static void alloc_expr_hash_table PARAMS ((unsigned int));
569 static void free_expr_hash_table PARAMS ((void));
570 static void compute_expr_hash_table PARAMS ((void));
571 static void dump_hash_table PARAMS ((FILE *, const char *, struct expr **,
572 int, int));
573 static struct expr *lookup_expr PARAMS ((rtx));
574 static struct expr *lookup_set PARAMS ((unsigned int, rtx));
575 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
576 static void reset_opr_set_tables PARAMS ((void));
577 static int oprs_not_set_p PARAMS ((rtx, rtx));
578 static void mark_call PARAMS ((rtx));
579 static void mark_set PARAMS ((rtx, rtx));
580 static void mark_clobber PARAMS ((rtx, rtx));
581 static void mark_oprs_set PARAMS ((rtx));
582 static void alloc_cprop_mem PARAMS ((int, int));
583 static void free_cprop_mem PARAMS ((void));
584 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
585 static void compute_transpout PARAMS ((void));
586 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
587 int));
588 static void compute_cprop_data PARAMS ((void));
589 static void find_used_regs PARAMS ((rtx));
590 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
591 static struct expr *find_avail_set PARAMS ((int, rtx));
592 static int cprop_jump PARAMS ((rtx, rtx, struct reg_use *, rtx));
593 #ifdef HAVE_cc0
594 static int cprop_cc0_jump PARAMS ((rtx, struct reg_use *, rtx));
595 #endif
596 static int cprop_insn PARAMS ((rtx, int));
597 static int cprop PARAMS ((int));
598 static int one_cprop_pass PARAMS ((int, int));
599 static void alloc_pre_mem PARAMS ((int, int));
600 static void free_pre_mem PARAMS ((void));
601 static void compute_pre_data PARAMS ((void));
602 static int pre_expr_reaches_here_p PARAMS ((int, struct expr *, int));
603 static void insert_insn_end_bb PARAMS ((struct expr *, int, int));
604 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
605 static void pre_insert_copies PARAMS ((void));
606 static int pre_delete PARAMS ((void));
607 static int pre_gcse PARAMS ((void));
608 static int one_pre_gcse_pass PARAMS ((int));
609 static void add_label_notes PARAMS ((rtx, rtx));
610 static void alloc_code_hoist_mem PARAMS ((int, int));
611 static void free_code_hoist_mem PARAMS ((void));
612 static void compute_code_hoist_vbeinout PARAMS ((void));
613 static void compute_code_hoist_data PARAMS ((void));
614 static int hoist_expr_reaches_here_p PARAMS ((int, int, int, char *));
615 static void hoist_code PARAMS ((void));
616 static int one_code_hoisting_pass PARAMS ((void));
617 static void alloc_rd_mem PARAMS ((int, int));
618 static void free_rd_mem PARAMS ((void));
619 static void handle_rd_kill_set PARAMS ((rtx, int, int));
620 static void compute_kill_rd PARAMS ((void));
621 static void compute_rd PARAMS ((void));
622 static void alloc_avail_expr_mem PARAMS ((int, int));
623 static void free_avail_expr_mem PARAMS ((void));
624 static void compute_ae_gen PARAMS ((void));
625 static int expr_killed_p PARAMS ((rtx, int));
626 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *));
627 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
628 int, int));
629 static rtx computing_insn PARAMS ((struct expr *, rtx));
630 static int def_reaches_here_p PARAMS ((rtx, rtx));
631 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
632 static int handle_avail_expr PARAMS ((rtx, struct expr *));
633 static int classic_gcse PARAMS ((void));
634 static int one_classic_gcse_pass PARAMS ((int));
635 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
636 static void delete_null_pointer_checks_1 PARAMS ((unsigned int *, sbitmap *,
637 sbitmap *,
638 struct null_pointer_info *));
639 static rtx process_insert_insn PARAMS ((struct expr *));
640 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
641 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
642 int, int, char *));
643 static int pre_expr_reaches_here_p_work PARAMS ((int, struct expr *,
644 int, char *));
645 \f
646 /* Entry point for global common subexpression elimination.
647 F is the first instruction in the function. */
648
649 int
650 gcse_main (f, file)
651 rtx f;
652 FILE *file;
653 {
654 int changed, pass;
655 /* Bytes used at start of pass. */
656 int initial_bytes_used;
657 /* Maximum number of bytes used by a pass. */
658 int max_pass_bytes;
659 /* Point to release obstack data from for each pass. */
660 char *gcse_obstack_bottom;
661
662 /* We do not construct an accurate cfg in functions which call
663 setjmp, so just punt to be safe. */
664 if (current_function_calls_setjmp)
665 return 0;
666
667 /* Assume that we do not need to run jump optimizations after gcse. */
668 run_jump_opt_after_gcse = 0;
669
670 /* For calling dump_foo fns from gdb. */
671 debug_stderr = stderr;
672 gcse_file = file;
673
674 /* Identify the basic block information for this function, including
675 successors and predecessors. */
676 max_gcse_regno = max_reg_num ();
677
678 if (file)
679 dump_flow_info (file);
680
681 /* Return if there's nothing to do. */
682 if (n_basic_blocks <= 1)
683 return 0;
684
685 /* Trying to perform global optimizations on flow graphs which have
686 a high connectivity will take a long time and is unlikely to be
687 particularly useful.
688
689 In normal circumstances a cfg should have about twice has many edges
690 as blocks. But we do not want to punish small functions which have
691 a couple switch statements. So we require a relatively large number
692 of basic blocks and the ratio of edges to blocks to be high. */
693 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
694 return 0;
695
696 /* See what modes support reg/reg copy operations. */
697 if (! can_copy_init_p)
698 {
699 compute_can_copy ();
700 can_copy_init_p = 1;
701 }
702
703 gcc_obstack_init (&gcse_obstack);
704 bytes_used = 0;
705
706 /* Record where pseudo-registers are set. This data is kept accurate
707 during each pass. ??? We could also record hard-reg information here
708 [since it's unchanging], however it is currently done during hash table
709 computation.
710
711 It may be tempting to compute MEM set information here too, but MEM sets
712 will be subject to code motion one day and thus we need to compute
713 information about memory sets when we build the hash tables. */
714
715 alloc_reg_set_mem (max_gcse_regno);
716 compute_sets (f);
717
718 pass = 0;
719 initial_bytes_used = bytes_used;
720 max_pass_bytes = 0;
721 gcse_obstack_bottom = gcse_alloc (1);
722 changed = 1;
723 while (changed && pass < MAX_PASSES)
724 {
725 changed = 0;
726 if (file)
727 fprintf (file, "GCSE pass %d\n\n", pass + 1);
728
729 /* Initialize bytes_used to the space for the pred/succ lists,
730 and the reg_set_table data. */
731 bytes_used = initial_bytes_used;
732
733 /* Each pass may create new registers, so recalculate each time. */
734 max_gcse_regno = max_reg_num ();
735
736 alloc_gcse_mem (f);
737
738 /* Don't allow constant propagation to modify jumps
739 during this pass. */
740 changed = one_cprop_pass (pass + 1, 0);
741
742 if (optimize_size)
743 changed |= one_classic_gcse_pass (pass + 1);
744 else
745 {
746 changed |= one_pre_gcse_pass (pass + 1);
747 free_reg_set_mem ();
748 alloc_reg_set_mem (max_reg_num ());
749 compute_sets (f);
750 run_jump_opt_after_gcse = 1;
751 }
752
753 if (max_pass_bytes < bytes_used)
754 max_pass_bytes = bytes_used;
755
756 /* Free up memory, then reallocate for code hoisting. We can
757 not re-use the existing allocated memory because the tables
758 will not have info for the insns or registers created by
759 partial redundancy elimination. */
760 free_gcse_mem ();
761
762 /* It does not make sense to run code hoisting unless we optimizing
763 for code size -- it rarely makes programs faster, and can make
764 them bigger if we did partial redundancy elimination (when optimizing
765 for space, we use a classic gcse algorithm instead of partial
766 redundancy algorithms). */
767 if (optimize_size)
768 {
769 max_gcse_regno = max_reg_num ();
770 alloc_gcse_mem (f);
771 changed |= one_code_hoisting_pass ();
772 free_gcse_mem ();
773
774 if (max_pass_bytes < bytes_used)
775 max_pass_bytes = bytes_used;
776 }
777
778 if (file)
779 {
780 fprintf (file, "\n");
781 fflush (file);
782 }
783
784 obstack_free (&gcse_obstack, gcse_obstack_bottom);
785 pass++;
786 }
787
788 /* Do one last pass of copy propagation, including cprop into
789 conditional jumps. */
790
791 max_gcse_regno = max_reg_num ();
792 alloc_gcse_mem (f);
793 /* This time, go ahead and allow cprop to alter jumps. */
794 one_cprop_pass (pass + 1, 1);
795 free_gcse_mem ();
796
797 if (file)
798 {
799 fprintf (file, "GCSE of %s: %d basic blocks, ",
800 current_function_name, n_basic_blocks);
801 fprintf (file, "%d pass%s, %d bytes\n\n",
802 pass, pass > 1 ? "es" : "", max_pass_bytes);
803 }
804
805 obstack_free (&gcse_obstack, NULL_PTR);
806 free_reg_set_mem ();
807 return run_jump_opt_after_gcse;
808 }
809 \f
810 /* Misc. utilities. */
811
812 /* Compute which modes support reg/reg copy operations. */
813
814 static void
815 compute_can_copy ()
816 {
817 int i;
818 #ifndef AVOID_CCMODE_COPIES
819 rtx reg,insn;
820 #endif
821 char *free_point = (char *) oballoc (1);
822
823 bzero (can_copy_p, NUM_MACHINE_MODES);
824
825 start_sequence ();
826 for (i = 0; i < NUM_MACHINE_MODES; i++)
827 if (GET_MODE_CLASS (i) == MODE_CC)
828 {
829 #ifdef AVOID_CCMODE_COPIES
830 can_copy_p[i] = 0;
831 #else
832 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
833 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
834 if (recog (PATTERN (insn), insn, NULL_PTR) >= 0)
835 can_copy_p[i] = 1;
836 #endif
837 }
838 else
839 can_copy_p[i] = 1;
840
841 end_sequence ();
842
843 /* Free the objects we just allocated. */
844 obfree (free_point);
845 }
846 \f
847 /* Cover function to xmalloc to record bytes allocated. */
848
849 static char *
850 gmalloc (size)
851 unsigned int size;
852 {
853 bytes_used += size;
854 return xmalloc (size);
855 }
856
857 /* Cover function to xrealloc.
858 We don't record the additional size since we don't know it.
859 It won't affect memory usage stats much anyway. */
860
861 static char *
862 grealloc (ptr, size)
863 char *ptr;
864 unsigned int size;
865 {
866 return xrealloc (ptr, size);
867 }
868
869 /* Cover function to obstack_alloc.
870 We don't need to record the bytes allocated here since
871 obstack_chunk_alloc is set to gmalloc. */
872
873 static char *
874 gcse_alloc (size)
875 unsigned long size;
876 {
877 return (char *) obstack_alloc (&gcse_obstack, size);
878 }
879
880 /* Allocate memory for the cuid mapping array,
881 and reg/memory set tracking tables.
882
883 This is called at the start of each pass. */
884
885 static void
886 alloc_gcse_mem (f)
887 rtx f;
888 {
889 int i,n;
890 rtx insn;
891
892 /* Find the largest UID and create a mapping from UIDs to CUIDs.
893 CUIDs are like UIDs except they increase monotonically, have no gaps,
894 and only apply to real insns. */
895
896 max_uid = get_max_uid ();
897 n = (max_uid + 1) * sizeof (int);
898 uid_cuid = (int *) gmalloc (n);
899 bzero ((char *) uid_cuid, n);
900 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
901 {
902 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
903 uid_cuid[INSN_UID (insn)] = i++;
904 else
905 uid_cuid[INSN_UID (insn)] = i;
906 }
907
908 /* Create a table mapping cuids to insns. */
909
910 max_cuid = i;
911 n = (max_cuid + 1) * sizeof (rtx);
912 cuid_insn = (rtx *) gmalloc (n);
913 bzero ((char *) cuid_insn, n);
914 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
915 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
916 CUID_INSN (i++) = insn;
917
918 /* Allocate vars to track sets of regs. */
919 reg_set_bitmap = (sbitmap) sbitmap_alloc (max_gcse_regno);
920
921 /* Allocate vars to track sets of regs, memory per block. */
922 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
923 max_gcse_regno);
924 mem_set_in_block = (char *) gmalloc (n_basic_blocks);
925 }
926
927 /* Free memory allocated by alloc_gcse_mem. */
928
929 static void
930 free_gcse_mem ()
931 {
932 free (uid_cuid);
933 free (cuid_insn);
934
935 free (reg_set_bitmap);
936
937 free (reg_set_in_block);
938 free (mem_set_in_block);
939 }
940
941 /* Many of the global optimization algorithms work by solving dataflow
942 equations for various expressions. Initially, some local value is
943 computed for each expression in each block. Then, the values across the
944 various blocks are combined (by following flow graph edges) to arrive at
945 global values. Conceptually, each set of equations is independent. We
946 may therefore solve all the equations in parallel, solve them one at a
947 time, or pick any intermediate approach.
948
949 When you're going to need N two-dimensional bitmaps, each X (say, the
950 number of blocks) by Y (say, the number of expressions), call this
951 function. It's not important what X and Y represent; only that Y
952 correspond to the things that can be done in parallel. This function will
953 return an appropriate chunking factor C; you should solve C sets of
954 equations in parallel. By going through this function, we can easily
955 trade space against time; by solving fewer equations in parallel we use
956 less space. */
957
958 static int
959 get_bitmap_width (n, x, y)
960 int n;
961 int x;
962 int y;
963 {
964 /* It's not really worth figuring out *exactly* how much memory will
965 be used by a particular choice. The important thing is to get
966 something approximately right. */
967 size_t max_bitmap_memory = 10 * 1024 * 1024;
968
969 /* The number of bytes we'd use for a single column of minimum
970 width. */
971 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
972
973 /* Often, it's reasonable just to solve all the equations in
974 parallel. */
975 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
976 return y;
977
978 /* Otherwise, pick the largest width we can, without going over the
979 limit. */
980 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
981 / column_size);
982 }
983 \f
984 /* Compute the local properties of each recorded expression.
985
986 Local properties are those that are defined by the block, irrespective of
987 other blocks.
988
989 An expression is transparent in a block if its operands are not modified
990 in the block.
991
992 An expression is computed (locally available) in a block if it is computed
993 at least once and expression would contain the same value if the
994 computation was moved to the end of the block.
995
996 An expression is locally anticipatable in a block if it is computed at
997 least once and expression would contain the same value if the computation
998 was moved to the beginning of the block.
999
1000 We call this routine for cprop, pre and code hoisting. They all compute
1001 basically the same information and thus can easily share this code.
1002
1003 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1004 properties. If NULL, then it is not necessary to compute or record that
1005 particular property.
1006
1007 SETP controls which hash table to look at. If zero, this routine looks at
1008 the expr hash table; if nonzero this routine looks at the set hash table.
1009 Additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1010 ABSALTERED. */
1011
1012 static void
1013 compute_local_properties (transp, comp, antloc, setp)
1014 sbitmap *transp;
1015 sbitmap *comp;
1016 sbitmap *antloc;
1017 int setp;
1018 {
1019 unsigned int i, hash_table_size;
1020 struct expr **hash_table;
1021
1022 /* Initialize any bitmaps that were passed in. */
1023 if (transp)
1024 {
1025 if (setp)
1026 sbitmap_vector_zero (transp, n_basic_blocks);
1027 else
1028 sbitmap_vector_ones (transp, n_basic_blocks);
1029 }
1030
1031 if (comp)
1032 sbitmap_vector_zero (comp, n_basic_blocks);
1033 if (antloc)
1034 sbitmap_vector_zero (antloc, n_basic_blocks);
1035
1036 /* We use the same code for cprop, pre and hoisting. For cprop
1037 we care about the set hash table, for pre and hoisting we
1038 care about the expr hash table. */
1039 hash_table_size = setp ? set_hash_table_size : expr_hash_table_size;
1040 hash_table = setp ? set_hash_table : expr_hash_table;
1041
1042 for (i = 0; i < hash_table_size; i++)
1043 {
1044 struct expr *expr;
1045
1046 for (expr = hash_table[i]; expr != NULL; expr = expr->next_same_hash)
1047 {
1048 int indx = expr->bitmap_index;
1049 struct occr *occr;
1050
1051 /* The expression is transparent in this block if it is not killed.
1052 We start by assuming all are transparent [none are killed], and
1053 then reset the bits for those that are. */
1054 if (transp)
1055 compute_transp (expr->expr, indx, transp, setp);
1056
1057 /* The occurrences recorded in antic_occr are exactly those that
1058 we want to set to non-zero in ANTLOC. */
1059 if (antloc)
1060 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1061 {
1062 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1063
1064 /* While we're scanning the table, this is a good place to
1065 initialize this. */
1066 occr->deleted_p = 0;
1067 }
1068
1069 /* The occurrences recorded in avail_occr are exactly those that
1070 we want to set to non-zero in COMP. */
1071 if (comp)
1072 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1073 {
1074 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1075
1076 /* While we're scanning the table, this is a good place to
1077 initialize this. */
1078 occr->copied_p = 0;
1079 }
1080
1081 /* While we're scanning the table, this is a good place to
1082 initialize this. */
1083 expr->reaching_reg = 0;
1084 }
1085 }
1086 }
1087 \f
1088 /* Register set information.
1089
1090 `reg_set_table' records where each register is set or otherwise
1091 modified. */
1092
1093 static struct obstack reg_set_obstack;
1094
1095 static void
1096 alloc_reg_set_mem (n_regs)
1097 int n_regs;
1098 {
1099 unsigned int n;
1100
1101 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1102 n = reg_set_table_size * sizeof (struct reg_set *);
1103 reg_set_table = (struct reg_set **) gmalloc (n);
1104 bzero ((char *) reg_set_table, n);
1105
1106 gcc_obstack_init (&reg_set_obstack);
1107 }
1108
1109 static void
1110 free_reg_set_mem ()
1111 {
1112 free (reg_set_table);
1113 obstack_free (&reg_set_obstack, NULL_PTR);
1114 }
1115
1116 /* Record REGNO in the reg_set table. */
1117
1118 static void
1119 record_one_set (regno, insn)
1120 int regno;
1121 rtx insn;
1122 {
1123 /* allocate a new reg_set element and link it onto the list */
1124 struct reg_set *new_reg_info, *reg_info_ptr1, *reg_info_ptr2;
1125
1126 /* If the table isn't big enough, enlarge it. */
1127 if (regno >= reg_set_table_size)
1128 {
1129 int new_size = regno + REG_SET_TABLE_SLOP;
1130
1131 reg_set_table
1132 = (struct reg_set **) grealloc ((char *) reg_set_table,
1133 new_size * sizeof (struct reg_set *));
1134 bzero ((char *) (reg_set_table + reg_set_table_size),
1135 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1136 reg_set_table_size = new_size;
1137 }
1138
1139 new_reg_info = (struct reg_set *) obstack_alloc (&reg_set_obstack,
1140 sizeof (struct reg_set));
1141 bytes_used += sizeof (struct reg_set);
1142 new_reg_info->insn = insn;
1143 new_reg_info->next = reg_set_table[regno];
1144 reg_set_table[regno] = new_reg_info;
1145 }
1146
1147 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1148 an insn. The DATA is really the instruction in which the SET is
1149 occurring. */
1150
1151 static void
1152 record_set_info (dest, setter, data)
1153 rtx dest, setter ATTRIBUTE_UNUSED;
1154 void *data;
1155 {
1156 rtx record_set_insn = (rtx) data;
1157
1158 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1159 record_one_set (REGNO (dest), record_set_insn);
1160 }
1161
1162 /* Scan the function and record each set of each pseudo-register.
1163
1164 This is called once, at the start of the gcse pass. See the comments for
1165 `reg_set_table' for further documenation. */
1166
1167 static void
1168 compute_sets (f)
1169 rtx f;
1170 {
1171 rtx insn;
1172
1173 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1174 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1175 note_stores (PATTERN (insn), record_set_info, insn);
1176 }
1177 \f
1178 /* Hash table support. */
1179
1180 /* For each register, the cuid of the first/last insn in the block to set it,
1181 or -1 if not set. */
1182 #define NEVER_SET -1
1183 static int *reg_first_set;
1184 static int *reg_last_set;
1185
1186 /* While computing "first/last set" info, this is the CUID of first/last insn
1187 to set memory or -1 if not set. `mem_last_set' is also used when
1188 performing GCSE to record whether memory has been set since the beginning
1189 of the block.
1190
1191 Note that handling of memory is very simple, we don't make any attempt
1192 to optimize things (later). */
1193 static int mem_first_set;
1194 static int mem_last_set;
1195
1196 /* Perform a quick check whether X, the source of a set, is something
1197 we want to consider for GCSE. */
1198
1199 static int
1200 want_to_gcse_p (x)
1201 rtx x;
1202 {
1203 switch (GET_CODE (x))
1204 {
1205 case REG:
1206 case SUBREG:
1207 case CONST_INT:
1208 case CONST_DOUBLE:
1209 case CALL:
1210 return 0;
1211
1212 default:
1213 break;
1214 }
1215
1216 return 1;
1217 }
1218
1219 /* Return non-zero if the operands of expression X are unchanged from the
1220 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1221 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1222
1223 static int
1224 oprs_unchanged_p (x, insn, avail_p)
1225 rtx x, insn;
1226 int avail_p;
1227 {
1228 int i, j;
1229 enum rtx_code code;
1230 const char *fmt;
1231
1232 if (x == 0)
1233 return 1;
1234
1235 code = GET_CODE (x);
1236 switch (code)
1237 {
1238 case REG:
1239 if (avail_p)
1240 return (reg_last_set[REGNO (x)] == NEVER_SET
1241 || reg_last_set[REGNO (x)] < INSN_CUID (insn));
1242 else
1243 return (reg_first_set[REGNO (x)] == NEVER_SET
1244 || reg_first_set[REGNO (x)] >= INSN_CUID (insn));
1245
1246 case MEM:
1247 if (avail_p && mem_last_set != NEVER_SET
1248 && mem_last_set >= INSN_CUID (insn))
1249 return 0;
1250 else if (! avail_p && mem_first_set != NEVER_SET
1251 && mem_first_set < INSN_CUID (insn))
1252 return 0;
1253 else
1254 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1255
1256 case PRE_DEC:
1257 case PRE_INC:
1258 case POST_DEC:
1259 case POST_INC:
1260 return 0;
1261
1262 case PC:
1263 case CC0: /*FIXME*/
1264 case CONST:
1265 case CONST_INT:
1266 case CONST_DOUBLE:
1267 case SYMBOL_REF:
1268 case LABEL_REF:
1269 case ADDR_VEC:
1270 case ADDR_DIFF_VEC:
1271 return 1;
1272
1273 default:
1274 break;
1275 }
1276
1277 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1278 {
1279 if (fmt[i] == 'e')
1280 {
1281 /* If we are about to do the last recursive call needed at this
1282 level, change it into iteration. This function is called enough
1283 to be worth it. */
1284 if (i == 0)
1285 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1286
1287 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1288 return 0;
1289 }
1290 else if (fmt[i] == 'E')
1291 for (j = 0; j < XVECLEN (x, i); j++)
1292 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1293 return 0;
1294 }
1295
1296 return 1;
1297 }
1298
1299 /* Return non-zero if the operands of expression X are unchanged from
1300 the start of INSN's basic block up to but not including INSN. */
1301
1302 static int
1303 oprs_anticipatable_p (x, insn)
1304 rtx x, insn;
1305 {
1306 return oprs_unchanged_p (x, insn, 0);
1307 }
1308
1309 /* Return non-zero if the operands of expression X are unchanged from
1310 INSN to the end of INSN's basic block. */
1311
1312 static int
1313 oprs_available_p (x, insn)
1314 rtx x, insn;
1315 {
1316 return oprs_unchanged_p (x, insn, 1);
1317 }
1318
1319 /* Hash expression X.
1320
1321 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1322 indicating if a volatile operand is found or if the expression contains
1323 something we don't want to insert in the table.
1324
1325 ??? One might want to merge this with canon_hash. Later. */
1326
1327 static unsigned int
1328 hash_expr (x, mode, do_not_record_p, hash_table_size)
1329 rtx x;
1330 enum machine_mode mode;
1331 int *do_not_record_p;
1332 int hash_table_size;
1333 {
1334 unsigned int hash;
1335
1336 *do_not_record_p = 0;
1337
1338 hash = hash_expr_1 (x, mode, do_not_record_p);
1339 return hash % hash_table_size;
1340 }
1341
1342 /* Subroutine of hash_expr to do the actual work. */
1343
1344 static unsigned int
1345 hash_expr_1 (x, mode, do_not_record_p)
1346 rtx x;
1347 enum machine_mode mode;
1348 int *do_not_record_p;
1349 {
1350 int i, j;
1351 unsigned hash = 0;
1352 enum rtx_code code;
1353 const char *fmt;
1354
1355 /* Used to turn recursion into iteration. We can't rely on GCC's
1356 tail-recursion eliminatio since we need to keep accumulating values
1357 in HASH. */
1358
1359 if (x == 0)
1360 return hash;
1361
1362 repeat:
1363 code = GET_CODE (x);
1364 switch (code)
1365 {
1366 case REG:
1367 hash += ((unsigned int) REG << 7) + REGNO (x);
1368 return hash;
1369
1370 case CONST_INT:
1371 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1372 + (unsigned int) INTVAL (x));
1373 return hash;
1374
1375 case CONST_DOUBLE:
1376 /* This is like the general case, except that it only counts
1377 the integers representing the constant. */
1378 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1379 if (GET_MODE (x) != VOIDmode)
1380 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1381 hash += (unsigned int) XWINT (x, i);
1382 else
1383 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1384 + (unsigned int) CONST_DOUBLE_HIGH (x));
1385 return hash;
1386
1387 /* Assume there is only one rtx object for any given label. */
1388 case LABEL_REF:
1389 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1390 differences and differences between each stage's debugging dumps. */
1391 hash += (((unsigned int) LABEL_REF << 7)
1392 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1393 return hash;
1394
1395 case SYMBOL_REF:
1396 {
1397 /* Don't hash on the symbol's address to avoid bootstrap differences.
1398 Different hash values may cause expressions to be recorded in
1399 different orders and thus different registers to be used in the
1400 final assembler. This also avoids differences in the dump files
1401 between various stages. */
1402 unsigned int h = 0;
1403 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1404
1405 while (*p)
1406 h += (h << 7) + *p++; /* ??? revisit */
1407
1408 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1409 return hash;
1410 }
1411
1412 case MEM:
1413 if (MEM_VOLATILE_P (x))
1414 {
1415 *do_not_record_p = 1;
1416 return 0;
1417 }
1418
1419 hash += (unsigned int) MEM;
1420 hash += MEM_ALIAS_SET (x);
1421 x = XEXP (x, 0);
1422 goto repeat;
1423
1424 case PRE_DEC:
1425 case PRE_INC:
1426 case POST_DEC:
1427 case POST_INC:
1428 case PC:
1429 case CC0:
1430 case CALL:
1431 case UNSPEC_VOLATILE:
1432 *do_not_record_p = 1;
1433 return 0;
1434
1435 case ASM_OPERANDS:
1436 if (MEM_VOLATILE_P (x))
1437 {
1438 *do_not_record_p = 1;
1439 return 0;
1440 }
1441
1442 default:
1443 break;
1444 }
1445
1446 hash += (unsigned) code + (unsigned) GET_MODE (x);
1447 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1448 {
1449 if (fmt[i] == 'e')
1450 {
1451 /* If we are about to do the last recursive call
1452 needed at this level, change it into iteration.
1453 This function is called enough to be worth it. */
1454 if (i == 0)
1455 {
1456 x = XEXP (x, i);
1457 goto repeat;
1458 }
1459
1460 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1461 if (*do_not_record_p)
1462 return 0;
1463 }
1464
1465 else if (fmt[i] == 'E')
1466 for (j = 0; j < XVECLEN (x, i); j++)
1467 {
1468 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1469 if (*do_not_record_p)
1470 return 0;
1471 }
1472
1473 else if (fmt[i] == 's')
1474 {
1475 register const unsigned char *p =
1476 (const unsigned char *) XSTR (x, i);
1477
1478 if (p)
1479 while (*p)
1480 hash += *p++;
1481 }
1482 else if (fmt[i] == 'i')
1483 hash += (unsigned int) XINT (x, i);
1484 else
1485 abort ();
1486 }
1487
1488 return hash;
1489 }
1490
1491 /* Hash a set of register REGNO.
1492
1493 Sets are hashed on the register that is set. This simplifies the PRE copy
1494 propagation code.
1495
1496 ??? May need to make things more elaborate. Later, as necessary. */
1497
1498 static unsigned int
1499 hash_set (regno, hash_table_size)
1500 int regno;
1501 int hash_table_size;
1502 {
1503 unsigned int hash;
1504
1505 hash = regno;
1506 return hash % hash_table_size;
1507 }
1508
1509 /* Return non-zero if exp1 is equivalent to exp2.
1510 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1511
1512 static int
1513 expr_equiv_p (x, y)
1514 rtx x, y;
1515 {
1516 register int i, j;
1517 register enum rtx_code code;
1518 register const char *fmt;
1519
1520 if (x == y)
1521 return 1;
1522
1523 if (x == 0 || y == 0)
1524 return x == y;
1525
1526 code = GET_CODE (x);
1527 if (code != GET_CODE (y))
1528 return 0;
1529
1530 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1531 if (GET_MODE (x) != GET_MODE (y))
1532 return 0;
1533
1534 switch (code)
1535 {
1536 case PC:
1537 case CC0:
1538 return x == y;
1539
1540 case CONST_INT:
1541 return INTVAL (x) == INTVAL (y);
1542
1543 case LABEL_REF:
1544 return XEXP (x, 0) == XEXP (y, 0);
1545
1546 case SYMBOL_REF:
1547 return XSTR (x, 0) == XSTR (y, 0);
1548
1549 case REG:
1550 return REGNO (x) == REGNO (y);
1551
1552 case MEM:
1553 /* Can't merge two expressions in different alias sets, since we can
1554 decide that the expression is transparent in a block when it isn't,
1555 due to it being set with the different alias set. */
1556 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1557 return 0;
1558 break;
1559
1560 /* For commutative operations, check both orders. */
1561 case PLUS:
1562 case MULT:
1563 case AND:
1564 case IOR:
1565 case XOR:
1566 case NE:
1567 case EQ:
1568 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1569 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1570 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1571 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1572
1573 default:
1574 break;
1575 }
1576
1577 /* Compare the elements. If any pair of corresponding elements
1578 fail to match, return 0 for the whole thing. */
1579
1580 fmt = GET_RTX_FORMAT (code);
1581 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1582 {
1583 switch (fmt[i])
1584 {
1585 case 'e':
1586 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1587 return 0;
1588 break;
1589
1590 case 'E':
1591 if (XVECLEN (x, i) != XVECLEN (y, i))
1592 return 0;
1593 for (j = 0; j < XVECLEN (x, i); j++)
1594 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1595 return 0;
1596 break;
1597
1598 case 's':
1599 if (strcmp (XSTR (x, i), XSTR (y, i)))
1600 return 0;
1601 break;
1602
1603 case 'i':
1604 if (XINT (x, i) != XINT (y, i))
1605 return 0;
1606 break;
1607
1608 case 'w':
1609 if (XWINT (x, i) != XWINT (y, i))
1610 return 0;
1611 break;
1612
1613 case '0':
1614 break;
1615
1616 default:
1617 abort ();
1618 }
1619 }
1620
1621 return 1;
1622 }
1623
1624 /* Insert expression X in INSN in the hash table.
1625 If it is already present, record it as the last occurrence in INSN's
1626 basic block.
1627
1628 MODE is the mode of the value X is being stored into.
1629 It is only used if X is a CONST_INT.
1630
1631 ANTIC_P is non-zero if X is an anticipatable expression.
1632 AVAIL_P is non-zero if X is an available expression. */
1633
1634 static void
1635 insert_expr_in_table (x, mode, insn, antic_p, avail_p)
1636 rtx x;
1637 enum machine_mode mode;
1638 rtx insn;
1639 int antic_p, avail_p;
1640 {
1641 int found, do_not_record_p;
1642 unsigned int hash;
1643 struct expr *cur_expr, *last_expr = NULL;
1644 struct occr *antic_occr, *avail_occr;
1645 struct occr *last_occr = NULL;
1646
1647 hash = hash_expr (x, mode, &do_not_record_p, expr_hash_table_size);
1648
1649 /* Do not insert expression in table if it contains volatile operands,
1650 or if hash_expr determines the expression is something we don't want
1651 to or can't handle. */
1652 if (do_not_record_p)
1653 return;
1654
1655 cur_expr = expr_hash_table[hash];
1656 found = 0;
1657
1658 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1659 {
1660 /* If the expression isn't found, save a pointer to the end of
1661 the list. */
1662 last_expr = cur_expr;
1663 cur_expr = cur_expr->next_same_hash;
1664 }
1665
1666 if (! found)
1667 {
1668 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1669 bytes_used += sizeof (struct expr);
1670 if (expr_hash_table[hash] == NULL)
1671 /* This is the first pattern that hashed to this index. */
1672 expr_hash_table[hash] = cur_expr;
1673 else
1674 /* Add EXPR to end of this hash chain. */
1675 last_expr->next_same_hash = cur_expr;
1676
1677 /* Set the fields of the expr element. */
1678 cur_expr->expr = x;
1679 cur_expr->bitmap_index = n_exprs++;
1680 cur_expr->next_same_hash = NULL;
1681 cur_expr->antic_occr = NULL;
1682 cur_expr->avail_occr = NULL;
1683 }
1684
1685 /* Now record the occurrence(s). */
1686 if (antic_p)
1687 {
1688 antic_occr = cur_expr->antic_occr;
1689
1690 /* Search for another occurrence in the same basic block. */
1691 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1692 {
1693 /* If an occurrence isn't found, save a pointer to the end of
1694 the list. */
1695 last_occr = antic_occr;
1696 antic_occr = antic_occr->next;
1697 }
1698
1699 if (antic_occr)
1700 /* Found another instance of the expression in the same basic block.
1701 Prefer the currently recorded one. We want the first one in the
1702 block and the block is scanned from start to end. */
1703 ; /* nothing to do */
1704 else
1705 {
1706 /* First occurrence of this expression in this basic block. */
1707 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
1708 bytes_used += sizeof (struct occr);
1709 /* First occurrence of this expression in any block? */
1710 if (cur_expr->antic_occr == NULL)
1711 cur_expr->antic_occr = antic_occr;
1712 else
1713 last_occr->next = antic_occr;
1714
1715 antic_occr->insn = insn;
1716 antic_occr->next = NULL;
1717 }
1718 }
1719
1720 if (avail_p)
1721 {
1722 avail_occr = cur_expr->avail_occr;
1723
1724 /* Search for another occurrence in the same basic block. */
1725 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
1726 {
1727 /* If an occurrence isn't found, save a pointer to the end of
1728 the list. */
1729 last_occr = avail_occr;
1730 avail_occr = avail_occr->next;
1731 }
1732
1733 if (avail_occr)
1734 /* Found another instance of the expression in the same basic block.
1735 Prefer this occurrence to the currently recorded one. We want
1736 the last one in the block and the block is scanned from start
1737 to end. */
1738 avail_occr->insn = insn;
1739 else
1740 {
1741 /* First occurrence of this expression in this basic block. */
1742 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
1743 bytes_used += sizeof (struct occr);
1744
1745 /* First occurrence of this expression in any block? */
1746 if (cur_expr->avail_occr == NULL)
1747 cur_expr->avail_occr = avail_occr;
1748 else
1749 last_occr->next = avail_occr;
1750
1751 avail_occr->insn = insn;
1752 avail_occr->next = NULL;
1753 }
1754 }
1755 }
1756
1757 /* Insert pattern X in INSN in the hash table.
1758 X is a SET of a reg to either another reg or a constant.
1759 If it is already present, record it as the last occurrence in INSN's
1760 basic block. */
1761
1762 static void
1763 insert_set_in_table (x, insn)
1764 rtx x;
1765 rtx insn;
1766 {
1767 int found;
1768 unsigned int hash;
1769 struct expr *cur_expr, *last_expr = NULL;
1770 struct occr *cur_occr, *last_occr = NULL;
1771
1772 if (GET_CODE (x) != SET
1773 || GET_CODE (SET_DEST (x)) != REG)
1774 abort ();
1775
1776 hash = hash_set (REGNO (SET_DEST (x)), set_hash_table_size);
1777
1778 cur_expr = set_hash_table[hash];
1779 found = 0;
1780
1781 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1782 {
1783 /* If the expression isn't found, save a pointer to the end of
1784 the list. */
1785 last_expr = cur_expr;
1786 cur_expr = cur_expr->next_same_hash;
1787 }
1788
1789 if (! found)
1790 {
1791 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1792 bytes_used += sizeof (struct expr);
1793 if (set_hash_table[hash] == NULL)
1794 /* This is the first pattern that hashed to this index. */
1795 set_hash_table[hash] = cur_expr;
1796 else
1797 /* Add EXPR to end of this hash chain. */
1798 last_expr->next_same_hash = cur_expr;
1799
1800 /* Set the fields of the expr element.
1801 We must copy X because it can be modified when copy propagation is
1802 performed on its operands. */
1803 /* ??? Should this go in a different obstack? */
1804 cur_expr->expr = copy_rtx (x);
1805 cur_expr->bitmap_index = n_sets++;
1806 cur_expr->next_same_hash = NULL;
1807 cur_expr->antic_occr = NULL;
1808 cur_expr->avail_occr = NULL;
1809 }
1810
1811 /* Now record the occurrence. */
1812 cur_occr = cur_expr->avail_occr;
1813
1814 /* Search for another occurrence in the same basic block. */
1815 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
1816 {
1817 /* If an occurrence isn't found, save a pointer to the end of
1818 the list. */
1819 last_occr = cur_occr;
1820 cur_occr = cur_occr->next;
1821 }
1822
1823 if (cur_occr)
1824 /* Found another instance of the expression in the same basic block.
1825 Prefer this occurrence to the currently recorded one. We want the
1826 last one in the block and the block is scanned from start to end. */
1827 cur_occr->insn = insn;
1828 else
1829 {
1830 /* First occurrence of this expression in this basic block. */
1831 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
1832 bytes_used += sizeof (struct occr);
1833
1834 /* First occurrence of this expression in any block? */
1835 if (cur_expr->avail_occr == NULL)
1836 cur_expr->avail_occr = cur_occr;
1837 else
1838 last_occr->next = cur_occr;
1839
1840 cur_occr->insn = insn;
1841 cur_occr->next = NULL;
1842 }
1843 }
1844
1845 /* Scan pattern PAT of INSN and add an entry to the hash table. If SET_P is
1846 non-zero, this is for the assignment hash table, otherwise it is for the
1847 expression hash table. */
1848
1849 static void
1850 hash_scan_set (pat, insn, set_p)
1851 rtx pat, insn;
1852 int set_p;
1853 {
1854 rtx src = SET_SRC (pat);
1855 rtx dest = SET_DEST (pat);
1856
1857 if (GET_CODE (src) == CALL)
1858 hash_scan_call (src, insn);
1859
1860 if (GET_CODE (dest) == REG)
1861 {
1862 int regno = REGNO (dest);
1863 rtx tmp;
1864
1865 /* Only record sets of pseudo-regs in the hash table. */
1866 if (! set_p
1867 && regno >= FIRST_PSEUDO_REGISTER
1868 /* Don't GCSE something if we can't do a reg/reg copy. */
1869 && can_copy_p [GET_MODE (dest)]
1870 /* Is SET_SRC something we want to gcse? */
1871 && want_to_gcse_p (src))
1872 {
1873 /* An expression is not anticipatable if its operands are
1874 modified before this insn. */
1875 int antic_p = oprs_anticipatable_p (src, insn);
1876 /* An expression is not available if its operands are
1877 subsequently modified, including this insn. */
1878 int avail_p = oprs_available_p (src, insn);
1879
1880 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p);
1881 }
1882
1883 /* Record sets for constant/copy propagation. */
1884 else if (set_p
1885 && regno >= FIRST_PSEUDO_REGISTER
1886 && ((GET_CODE (src) == REG
1887 && REGNO (src) >= FIRST_PSEUDO_REGISTER
1888 && can_copy_p [GET_MODE (dest)])
1889 || GET_CODE (src) == CONST_INT
1890 || GET_CODE (src) == SYMBOL_REF
1891 || GET_CODE (src) == CONST_DOUBLE)
1892 /* A copy is not available if its src or dest is subsequently
1893 modified. Here we want to search from INSN+1 on, but
1894 oprs_available_p searches from INSN on. */
1895 && (insn == BLOCK_END (BLOCK_NUM (insn))
1896 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
1897 && oprs_available_p (pat, tmp))))
1898 insert_set_in_table (pat, insn);
1899 }
1900 }
1901
1902 static void
1903 hash_scan_clobber (x, insn)
1904 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
1905 {
1906 /* Currently nothing to do. */
1907 }
1908
1909 static void
1910 hash_scan_call (x, insn)
1911 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
1912 {
1913 /* Currently nothing to do. */
1914 }
1915
1916 /* Process INSN and add hash table entries as appropriate.
1917
1918 Only available expressions that set a single pseudo-reg are recorded.
1919
1920 Single sets in a PARALLEL could be handled, but it's an extra complication
1921 that isn't dealt with right now. The trick is handling the CLOBBERs that
1922 are also in the PARALLEL. Later.
1923
1924 If SET_P is non-zero, this is for the assignment hash table,
1925 otherwise it is for the expression hash table.
1926 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
1927 not record any expressions. */
1928
1929 static void
1930 hash_scan_insn (insn, set_p, in_libcall_block)
1931 rtx insn;
1932 int set_p;
1933 int in_libcall_block;
1934 {
1935 rtx pat = PATTERN (insn);
1936 int i;
1937
1938 /* Pick out the sets of INSN and for other forms of instructions record
1939 what's been modified. */
1940
1941 if (GET_CODE (pat) == SET && ! in_libcall_block)
1942 {
1943 /* Ignore obvious no-ops. */
1944 if (SET_SRC (pat) != SET_DEST (pat))
1945 hash_scan_set (pat, insn, set_p);
1946 }
1947 else if (GET_CODE (pat) == PARALLEL)
1948 for (i = 0; i < XVECLEN (pat, 0); i++)
1949 {
1950 rtx x = XVECEXP (pat, 0, i);
1951
1952 if (GET_CODE (x) == SET)
1953 {
1954 if (GET_CODE (SET_SRC (x)) == CALL)
1955 hash_scan_call (SET_SRC (x), insn);
1956 }
1957 else if (GET_CODE (x) == CLOBBER)
1958 hash_scan_clobber (x, insn);
1959 else if (GET_CODE (x) == CALL)
1960 hash_scan_call (x, insn);
1961 }
1962
1963 else if (GET_CODE (pat) == CLOBBER)
1964 hash_scan_clobber (pat, insn);
1965 else if (GET_CODE (pat) == CALL)
1966 hash_scan_call (pat, insn);
1967 }
1968
1969 static void
1970 dump_hash_table (file, name, table, table_size, total_size)
1971 FILE *file;
1972 const char *name;
1973 struct expr **table;
1974 int table_size, total_size;
1975 {
1976 int i;
1977 /* Flattened out table, so it's printed in proper order. */
1978 struct expr **flat_table;
1979 unsigned int *hash_val;
1980 struct expr *expr;
1981
1982 flat_table
1983 = (struct expr **) xcalloc (total_size, sizeof (struct expr *));
1984 hash_val = (unsigned int *) xmalloc (total_size * sizeof (unsigned int));
1985
1986 for (i = 0; i < table_size; i++)
1987 for (expr = table[i]; expr != NULL; expr = expr->next_same_hash)
1988 {
1989 flat_table[expr->bitmap_index] = expr;
1990 hash_val[expr->bitmap_index] = i;
1991 }
1992
1993 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1994 name, table_size, total_size);
1995
1996 for (i = 0; i < total_size; i++)
1997 if (flat_table[i] != 0)
1998 {
1999 expr = flat_table[i];
2000 fprintf (file, "Index %d (hash value %d)\n ",
2001 expr->bitmap_index, hash_val[i]);
2002 print_rtl (file, expr->expr);
2003 fprintf (file, "\n");
2004 }
2005
2006 fprintf (file, "\n");
2007
2008 free (flat_table);
2009 free (hash_val);
2010 }
2011
2012 /* Record register first/last/block set information for REGNO in INSN.
2013
2014 reg_first_set records the first place in the block where the register
2015 is set and is used to compute "anticipatability".
2016
2017 reg_last_set records the last place in the block where the register
2018 is set and is used to compute "availability".
2019
2020 reg_set_in_block records whether the register is set in the block
2021 and is used to compute "transparency". */
2022
2023 static void
2024 record_last_reg_set_info (insn, regno)
2025 rtx insn;
2026 int regno;
2027 {
2028 if (reg_first_set[regno] == NEVER_SET)
2029 reg_first_set[regno] = INSN_CUID (insn);
2030
2031 reg_last_set[regno] = INSN_CUID (insn);
2032 SET_BIT (reg_set_in_block[BLOCK_NUM (insn)], regno);
2033 }
2034
2035 /* Record memory first/last/block set information for INSN. */
2036
2037 static void
2038 record_last_mem_set_info (insn)
2039 rtx insn;
2040 {
2041 if (mem_first_set == NEVER_SET)
2042 mem_first_set = INSN_CUID (insn);
2043
2044 mem_last_set = INSN_CUID (insn);
2045 mem_set_in_block[BLOCK_NUM (insn)] = 1;
2046 }
2047
2048 /* Called from compute_hash_table via note_stores to handle one
2049 SET or CLOBBER in an insn. DATA is really the instruction in which
2050 the SET is taking place. */
2051
2052 static void
2053 record_last_set_info (dest, setter, data)
2054 rtx dest, setter ATTRIBUTE_UNUSED;
2055 void *data;
2056 {
2057 rtx last_set_insn = (rtx) data;
2058
2059 if (GET_CODE (dest) == SUBREG)
2060 dest = SUBREG_REG (dest);
2061
2062 if (GET_CODE (dest) == REG)
2063 record_last_reg_set_info (last_set_insn, REGNO (dest));
2064 else if (GET_CODE (dest) == MEM
2065 /* Ignore pushes, they clobber nothing. */
2066 && ! push_operand (dest, GET_MODE (dest)))
2067 record_last_mem_set_info (last_set_insn);
2068 }
2069
2070 /* Top level function to create an expression or assignment hash table.
2071
2072 Expression entries are placed in the hash table if
2073 - they are of the form (set (pseudo-reg) src),
2074 - src is something we want to perform GCSE on,
2075 - none of the operands are subsequently modified in the block
2076
2077 Assignment entries are placed in the hash table if
2078 - they are of the form (set (pseudo-reg) src),
2079 - src is something we want to perform const/copy propagation on,
2080 - none of the operands or target are subsequently modified in the block
2081
2082 Currently src must be a pseudo-reg or a const_int.
2083
2084 F is the first insn.
2085 SET_P is non-zero for computing the assignment hash table. */
2086
2087 static void
2088 compute_hash_table (set_p)
2089 int set_p;
2090 {
2091 int bb;
2092
2093 /* While we compute the hash table we also compute a bit array of which
2094 registers are set in which blocks.
2095 We also compute which blocks set memory, in the absence of aliasing
2096 support [which is TODO].
2097 ??? This isn't needed during const/copy propagation, but it's cheap to
2098 compute. Later. */
2099 sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
2100 bzero ((char *) mem_set_in_block, n_basic_blocks);
2101
2102 /* Some working arrays used to track first and last set in each block. */
2103 /* ??? One could use alloca here, but at some size a threshold is crossed
2104 beyond which one should use malloc. Are we at that threshold here? */
2105 reg_first_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
2106 reg_last_set = (int *) gmalloc (max_gcse_regno * sizeof (int));
2107
2108 for (bb = 0; bb < n_basic_blocks; bb++)
2109 {
2110 rtx insn;
2111 unsigned int regno;
2112 int in_libcall_block;
2113 unsigned int i;
2114
2115 /* First pass over the instructions records information used to
2116 determine when registers and memory are first and last set.
2117 ??? The mem_set_in_block and hard-reg reg_set_in_block computation
2118 could be moved to compute_sets since they currently don't change. */
2119
2120 for (i = 0; i < max_gcse_regno; i++)
2121 reg_first_set[i] = reg_last_set[i] = NEVER_SET;
2122
2123 mem_first_set = NEVER_SET;
2124 mem_last_set = NEVER_SET;
2125
2126 for (insn = BLOCK_HEAD (bb);
2127 insn && insn != NEXT_INSN (BLOCK_END (bb));
2128 insn = NEXT_INSN (insn))
2129 {
2130 #ifdef NON_SAVING_SETJMP
2131 if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
2132 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
2133 {
2134 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2135 record_last_reg_set_info (insn, regno);
2136 continue;
2137 }
2138 #endif
2139
2140 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
2141 continue;
2142
2143 if (GET_CODE (insn) == CALL_INSN)
2144 {
2145 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2146 if ((call_used_regs[regno]
2147 && regno != STACK_POINTER_REGNUM
2148 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2149 && regno != HARD_FRAME_POINTER_REGNUM
2150 #endif
2151 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2152 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2153 #endif
2154 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
2155 && ! (regno == PIC_OFFSET_TABLE_REGNUM && flag_pic)
2156 #endif
2157
2158 && regno != FRAME_POINTER_REGNUM)
2159 || global_regs[regno])
2160 record_last_reg_set_info (insn, regno);
2161
2162 if (! CONST_CALL_P (insn))
2163 record_last_mem_set_info (insn);
2164 }
2165
2166 note_stores (PATTERN (insn), record_last_set_info, insn);
2167 }
2168
2169 /* The next pass builds the hash table. */
2170
2171 for (insn = BLOCK_HEAD (bb), in_libcall_block = 0;
2172 insn && insn != NEXT_INSN (BLOCK_END (bb));
2173 insn = NEXT_INSN (insn))
2174 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
2175 {
2176 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2177 in_libcall_block = 1;
2178 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
2179 in_libcall_block = 0;
2180 hash_scan_insn (insn, set_p, in_libcall_block);
2181 }
2182 }
2183
2184 free (reg_first_set);
2185 free (reg_last_set);
2186
2187 /* Catch bugs early. */
2188 reg_first_set = reg_last_set = 0;
2189 }
2190
2191 /* Allocate space for the set hash table.
2192 N_INSNS is the number of instructions in the function.
2193 It is used to determine the number of buckets to use. */
2194
2195 static void
2196 alloc_set_hash_table (n_insns)
2197 int n_insns;
2198 {
2199 int n;
2200
2201 set_hash_table_size = n_insns / 4;
2202 if (set_hash_table_size < 11)
2203 set_hash_table_size = 11;
2204
2205 /* Attempt to maintain efficient use of hash table.
2206 Making it an odd number is simplest for now.
2207 ??? Later take some measurements. */
2208 set_hash_table_size |= 1;
2209 n = set_hash_table_size * sizeof (struct expr *);
2210 set_hash_table = (struct expr **) gmalloc (n);
2211 }
2212
2213 /* Free things allocated by alloc_set_hash_table. */
2214
2215 static void
2216 free_set_hash_table ()
2217 {
2218 free (set_hash_table);
2219 }
2220
2221 /* Compute the hash table for doing copy/const propagation. */
2222
2223 static void
2224 compute_set_hash_table ()
2225 {
2226 /* Initialize count of number of entries in hash table. */
2227 n_sets = 0;
2228 bzero ((char *) set_hash_table,
2229 set_hash_table_size * sizeof (struct expr *));
2230
2231 compute_hash_table (1);
2232 }
2233
2234 /* Allocate space for the expression hash table.
2235 N_INSNS is the number of instructions in the function.
2236 It is used to determine the number of buckets to use. */
2237
2238 static void
2239 alloc_expr_hash_table (n_insns)
2240 unsigned int n_insns;
2241 {
2242 int n;
2243
2244 expr_hash_table_size = n_insns / 2;
2245 /* Make sure the amount is usable. */
2246 if (expr_hash_table_size < 11)
2247 expr_hash_table_size = 11;
2248
2249 /* Attempt to maintain efficient use of hash table.
2250 Making it an odd number is simplest for now.
2251 ??? Later take some measurements. */
2252 expr_hash_table_size |= 1;
2253 n = expr_hash_table_size * sizeof (struct expr *);
2254 expr_hash_table = (struct expr **) gmalloc (n);
2255 }
2256
2257 /* Free things allocated by alloc_expr_hash_table. */
2258
2259 static void
2260 free_expr_hash_table ()
2261 {
2262 free (expr_hash_table);
2263 }
2264
2265 /* Compute the hash table for doing GCSE. */
2266
2267 static void
2268 compute_expr_hash_table ()
2269 {
2270 /* Initialize count of number of entries in hash table. */
2271 n_exprs = 0;
2272 bzero ((char *) expr_hash_table,
2273 expr_hash_table_size * sizeof (struct expr *));
2274
2275 compute_hash_table (0);
2276 }
2277 \f
2278 /* Expression tracking support. */
2279
2280 /* Lookup pattern PAT in the expression table.
2281 The result is a pointer to the table entry, or NULL if not found. */
2282
2283 static struct expr *
2284 lookup_expr (pat)
2285 rtx pat;
2286 {
2287 int do_not_record_p;
2288 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2289 expr_hash_table_size);
2290 struct expr *expr;
2291
2292 if (do_not_record_p)
2293 return NULL;
2294
2295 expr = expr_hash_table[hash];
2296
2297 while (expr && ! expr_equiv_p (expr->expr, pat))
2298 expr = expr->next_same_hash;
2299
2300 return expr;
2301 }
2302
2303 /* Lookup REGNO in the set table. If PAT is non-NULL look for the entry that
2304 matches it, otherwise return the first entry for REGNO. The result is a
2305 pointer to the table entry, or NULL if not found. */
2306
2307 static struct expr *
2308 lookup_set (regno, pat)
2309 unsigned int regno;
2310 rtx pat;
2311 {
2312 unsigned int hash = hash_set (regno, set_hash_table_size);
2313 struct expr *expr;
2314
2315 expr = set_hash_table[hash];
2316
2317 if (pat)
2318 {
2319 while (expr && ! expr_equiv_p (expr->expr, pat))
2320 expr = expr->next_same_hash;
2321 }
2322 else
2323 {
2324 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2325 expr = expr->next_same_hash;
2326 }
2327
2328 return expr;
2329 }
2330
2331 /* Return the next entry for REGNO in list EXPR. */
2332
2333 static struct expr *
2334 next_set (regno, expr)
2335 unsigned int regno;
2336 struct expr *expr;
2337 {
2338 do
2339 expr = expr->next_same_hash;
2340 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2341
2342 return expr;
2343 }
2344
2345 /* Reset tables used to keep track of what's still available [since the
2346 start of the block]. */
2347
2348 static void
2349 reset_opr_set_tables ()
2350 {
2351 /* Maintain a bitmap of which regs have been set since beginning of
2352 the block. */
2353 sbitmap_zero (reg_set_bitmap);
2354
2355 /* Also keep a record of the last instruction to modify memory.
2356 For now this is very trivial, we only record whether any memory
2357 location has been modified. */
2358 mem_last_set = 0;
2359 }
2360
2361 /* Return non-zero if the operands of X are not set before INSN in
2362 INSN's basic block. */
2363
2364 static int
2365 oprs_not_set_p (x, insn)
2366 rtx x, insn;
2367 {
2368 int i, j;
2369 enum rtx_code code;
2370 const char *fmt;
2371
2372 if (x == 0)
2373 return 1;
2374
2375 code = GET_CODE (x);
2376 switch (code)
2377 {
2378 case PC:
2379 case CC0:
2380 case CONST:
2381 case CONST_INT:
2382 case CONST_DOUBLE:
2383 case SYMBOL_REF:
2384 case LABEL_REF:
2385 case ADDR_VEC:
2386 case ADDR_DIFF_VEC:
2387 return 1;
2388
2389 case MEM:
2390 if (mem_last_set != 0)
2391 return 0;
2392 else
2393 return oprs_not_set_p (XEXP (x, 0), insn);
2394
2395 case REG:
2396 return ! TEST_BIT (reg_set_bitmap, REGNO (x));
2397
2398 default:
2399 break;
2400 }
2401
2402 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2403 {
2404 if (fmt[i] == 'e')
2405 {
2406 /* If we are about to do the last recursive call
2407 needed at this level, change it into iteration.
2408 This function is called enough to be worth it. */
2409 if (i == 0)
2410 return oprs_not_set_p (XEXP (x, i), insn);
2411
2412 if (! oprs_not_set_p (XEXP (x, i), insn))
2413 return 0;
2414 }
2415 else if (fmt[i] == 'E')
2416 for (j = 0; j < XVECLEN (x, i); j++)
2417 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2418 return 0;
2419 }
2420
2421 return 1;
2422 }
2423
2424 /* Mark things set by a CALL. */
2425
2426 static void
2427 mark_call (insn)
2428 rtx insn;
2429 {
2430 mem_last_set = INSN_CUID (insn);
2431 }
2432
2433 /* Mark things set by a SET. */
2434
2435 static void
2436 mark_set (pat, insn)
2437 rtx pat, insn;
2438 {
2439 rtx dest = SET_DEST (pat);
2440
2441 while (GET_CODE (dest) == SUBREG
2442 || GET_CODE (dest) == ZERO_EXTRACT
2443 || GET_CODE (dest) == SIGN_EXTRACT
2444 || GET_CODE (dest) == STRICT_LOW_PART)
2445 dest = XEXP (dest, 0);
2446
2447 if (GET_CODE (dest) == REG)
2448 SET_BIT (reg_set_bitmap, REGNO (dest));
2449 else if (GET_CODE (dest) == MEM)
2450 mem_last_set = INSN_CUID (insn);
2451
2452 if (GET_CODE (SET_SRC (pat)) == CALL)
2453 mark_call (insn);
2454 }
2455
2456 /* Record things set by a CLOBBER. */
2457
2458 static void
2459 mark_clobber (pat, insn)
2460 rtx pat, insn;
2461 {
2462 rtx clob = XEXP (pat, 0);
2463
2464 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2465 clob = XEXP (clob, 0);
2466
2467 if (GET_CODE (clob) == REG)
2468 SET_BIT (reg_set_bitmap, REGNO (clob));
2469 else
2470 mem_last_set = INSN_CUID (insn);
2471 }
2472
2473 /* Record things set by INSN.
2474 This data is used by oprs_not_set_p. */
2475
2476 static void
2477 mark_oprs_set (insn)
2478 rtx insn;
2479 {
2480 rtx pat = PATTERN (insn);
2481 int i;
2482
2483 if (GET_CODE (pat) == SET)
2484 mark_set (pat, insn);
2485 else if (GET_CODE (pat) == PARALLEL)
2486 for (i = 0; i < XVECLEN (pat, 0); i++)
2487 {
2488 rtx x = XVECEXP (pat, 0, i);
2489
2490 if (GET_CODE (x) == SET)
2491 mark_set (x, insn);
2492 else if (GET_CODE (x) == CLOBBER)
2493 mark_clobber (x, insn);
2494 else if (GET_CODE (x) == CALL)
2495 mark_call (insn);
2496 }
2497
2498 else if (GET_CODE (pat) == CLOBBER)
2499 mark_clobber (pat, insn);
2500 else if (GET_CODE (pat) == CALL)
2501 mark_call (insn);
2502 }
2503
2504 \f
2505 /* Classic GCSE reaching definition support. */
2506
2507 /* Allocate reaching def variables. */
2508
2509 static void
2510 alloc_rd_mem (n_blocks, n_insns)
2511 int n_blocks, n_insns;
2512 {
2513 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2514 sbitmap_vector_zero (rd_kill, n_basic_blocks);
2515
2516 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2517 sbitmap_vector_zero (rd_gen, n_basic_blocks);
2518
2519 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2520 sbitmap_vector_zero (reaching_defs, n_basic_blocks);
2521
2522 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2523 sbitmap_vector_zero (rd_out, n_basic_blocks);
2524 }
2525
2526 /* Free reaching def variables. */
2527
2528 static void
2529 free_rd_mem ()
2530 {
2531 free (rd_kill);
2532 free (rd_gen);
2533 free (reaching_defs);
2534 free (rd_out);
2535 }
2536
2537 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2538
2539 static void
2540 handle_rd_kill_set (insn, regno, bb)
2541 rtx insn;
2542 int regno, bb;
2543 {
2544 struct reg_set *this_reg;
2545
2546 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2547 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2548 SET_BIT (rd_kill[bb], INSN_CUID (this_reg->insn));
2549 }
2550
2551 /* Compute the set of kill's for reaching definitions. */
2552
2553 static void
2554 compute_kill_rd ()
2555 {
2556 int bb, cuid;
2557 int regno, i;
2558
2559 /* For each block
2560 For each set bit in `gen' of the block (i.e each insn which
2561 generates a definition in the block)
2562 Call the reg set by the insn corresponding to that bit regx
2563 Look at the linked list starting at reg_set_table[regx]
2564 For each setting of regx in the linked list, which is not in
2565 this block
2566 Set the bit in `kill' corresponding to that insn. */
2567 for (bb = 0; bb < n_basic_blocks; bb++)
2568 for (cuid = 0; cuid < max_cuid; cuid++)
2569 if (TEST_BIT (rd_gen[bb], cuid))
2570 {
2571 rtx insn = CUID_INSN (cuid);
2572 rtx pat = PATTERN (insn);
2573
2574 if (GET_CODE (insn) == CALL_INSN)
2575 {
2576 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2577 {
2578 if ((call_used_regs[regno]
2579 && regno != STACK_POINTER_REGNUM
2580 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2581 && regno != HARD_FRAME_POINTER_REGNUM
2582 #endif
2583 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2584 && ! (regno == ARG_POINTER_REGNUM
2585 && fixed_regs[regno])
2586 #endif
2587 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
2588 && ! (regno == PIC_OFFSET_TABLE_REGNUM && flag_pic)
2589 #endif
2590 && regno != FRAME_POINTER_REGNUM)
2591 || global_regs[regno])
2592 handle_rd_kill_set (insn, regno, bb);
2593 }
2594 }
2595
2596 if (GET_CODE (pat) == PARALLEL)
2597 {
2598 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2599 {
2600 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2601
2602 if ((code == SET || code == CLOBBER)
2603 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2604 handle_rd_kill_set (insn,
2605 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2606 bb);
2607 }
2608 }
2609 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2610 /* Each setting of this register outside of this block
2611 must be marked in the set of kills in this block. */
2612 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
2613 }
2614 }
2615
2616 /* Compute the reaching definitions as in
2617 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2618 Chapter 10. It is the same algorithm as used for computing available
2619 expressions but applied to the gens and kills of reaching definitions. */
2620
2621 static void
2622 compute_rd ()
2623 {
2624 int bb, changed, passes;
2625
2626 for (bb = 0; bb < n_basic_blocks; bb++)
2627 sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
2628
2629 passes = 0;
2630 changed = 1;
2631 while (changed)
2632 {
2633 changed = 0;
2634 for (bb = 0; bb < n_basic_blocks; bb++)
2635 {
2636 sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
2637 changed |= sbitmap_union_of_diff (rd_out[bb], rd_gen[bb],
2638 reaching_defs[bb], rd_kill[bb]);
2639 }
2640 passes++;
2641 }
2642
2643 if (gcse_file)
2644 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
2645 }
2646 \f
2647 /* Classic GCSE available expression support. */
2648
2649 /* Allocate memory for available expression computation. */
2650
2651 static void
2652 alloc_avail_expr_mem (n_blocks, n_exprs)
2653 int n_blocks, n_exprs;
2654 {
2655 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
2656 sbitmap_vector_zero (ae_kill, n_basic_blocks);
2657
2658 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
2659 sbitmap_vector_zero (ae_gen, n_basic_blocks);
2660
2661 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
2662 sbitmap_vector_zero (ae_in, n_basic_blocks);
2663
2664 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
2665 sbitmap_vector_zero (ae_out, n_basic_blocks);
2666
2667 u_bitmap = (sbitmap) sbitmap_alloc (n_exprs);
2668 sbitmap_ones (u_bitmap);
2669 }
2670
2671 static void
2672 free_avail_expr_mem ()
2673 {
2674 free (ae_kill);
2675 free (ae_gen);
2676 free (ae_in);
2677 free (ae_out);
2678 free (u_bitmap);
2679 }
2680
2681 /* Compute the set of available expressions generated in each basic block. */
2682
2683 static void
2684 compute_ae_gen ()
2685 {
2686 unsigned int i;
2687 struct expr *expr;
2688 struct occr *occr;
2689
2690 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
2691 This is all we have to do because an expression is not recorded if it
2692 is not available, and the only expressions we want to work with are the
2693 ones that are recorded. */
2694 for (i = 0; i < expr_hash_table_size; i++)
2695 for (expr = expr_hash_table[i]; expr != 0; expr = expr->next_same_hash)
2696 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
2697 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
2698 }
2699
2700 /* Return non-zero if expression X is killed in BB. */
2701
2702 static int
2703 expr_killed_p (x, bb)
2704 rtx x;
2705 int bb;
2706 {
2707 int i, j;
2708 enum rtx_code code;
2709 const char *fmt;
2710
2711 if (x == 0)
2712 return 1;
2713
2714 code = GET_CODE (x);
2715 switch (code)
2716 {
2717 case REG:
2718 return TEST_BIT (reg_set_in_block[bb], REGNO (x));
2719
2720 case MEM:
2721 if (mem_set_in_block[bb])
2722 return 1;
2723 else
2724 return expr_killed_p (XEXP (x, 0), bb);
2725
2726 case PC:
2727 case CC0: /*FIXME*/
2728 case CONST:
2729 case CONST_INT:
2730 case CONST_DOUBLE:
2731 case SYMBOL_REF:
2732 case LABEL_REF:
2733 case ADDR_VEC:
2734 case ADDR_DIFF_VEC:
2735 return 0;
2736
2737 default:
2738 break;
2739 }
2740
2741 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2742 {
2743 if (fmt[i] == 'e')
2744 {
2745 /* If we are about to do the last recursive call
2746 needed at this level, change it into iteration.
2747 This function is called enough to be worth it. */
2748 if (i == 0)
2749 return expr_killed_p (XEXP (x, i), bb);
2750 else if (expr_killed_p (XEXP (x, i), bb))
2751 return 1;
2752 }
2753 else if (fmt[i] == 'E')
2754 for (j = 0; j < XVECLEN (x, i); j++)
2755 if (expr_killed_p (XVECEXP (x, i, j), bb))
2756 return 1;
2757 }
2758
2759 return 0;
2760 }
2761
2762 /* Compute the set of available expressions killed in each basic block. */
2763
2764 static void
2765 compute_ae_kill (ae_gen, ae_kill)
2766 sbitmap *ae_gen, *ae_kill;
2767 {
2768 int bb;
2769 unsigned int i;
2770 struct expr *expr;
2771
2772 for (bb = 0; bb < n_basic_blocks; bb++)
2773 for (i = 0; i < expr_hash_table_size; i++)
2774 for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
2775 {
2776 /* Skip EXPR if generated in this block. */
2777 if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
2778 continue;
2779
2780 if (expr_killed_p (expr->expr, bb))
2781 SET_BIT (ae_kill[bb], expr->bitmap_index);
2782 }
2783 }
2784 \f
2785 /* Actually perform the Classic GCSE optimizations. */
2786
2787 /* Return non-zero if occurrence OCCR of expression EXPR reaches block BB.
2788
2789 CHECK_SELF_LOOP is non-zero if we should consider a block reaching itself
2790 as a positive reach. We want to do this when there are two computations
2791 of the expression in the block.
2792
2793 VISITED is a pointer to a working buffer for tracking which BB's have
2794 been visited. It is NULL for the top-level call.
2795
2796 We treat reaching expressions that go through blocks containing the same
2797 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
2798 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
2799 2 as not reaching. The intent is to improve the probability of finding
2800 only one reaching expression and to reduce register lifetimes by picking
2801 the closest such expression. */
2802
2803 static int
2804 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
2805 struct occr *occr;
2806 struct expr *expr;
2807 int bb;
2808 int check_self_loop;
2809 char *visited;
2810 {
2811 edge pred;
2812
2813 for (pred = BASIC_BLOCK(bb)->pred; pred != NULL; pred = pred->pred_next)
2814 {
2815 int pred_bb = pred->src->index;
2816
2817 if (visited[pred_bb])
2818 /* This predecessor has already been visited. Nothing to do. */
2819 ;
2820 else if (pred_bb == bb)
2821 {
2822 /* BB loops on itself. */
2823 if (check_self_loop
2824 && TEST_BIT (ae_gen[pred_bb], expr->bitmap_index)
2825 && BLOCK_NUM (occr->insn) == pred_bb)
2826 return 1;
2827
2828 visited[pred_bb] = 1;
2829 }
2830
2831 /* Ignore this predecessor if it kills the expression. */
2832 else if (TEST_BIT (ae_kill[pred_bb], expr->bitmap_index))
2833 visited[pred_bb] = 1;
2834
2835 /* Does this predecessor generate this expression? */
2836 else if (TEST_BIT (ae_gen[pred_bb], expr->bitmap_index))
2837 {
2838 /* Is this the occurrence we're looking for?
2839 Note that there's only one generating occurrence per block
2840 so we just need to check the block number. */
2841 if (BLOCK_NUM (occr->insn) == pred_bb)
2842 return 1;
2843
2844 visited[pred_bb] = 1;
2845 }
2846
2847 /* Neither gen nor kill. */
2848 else
2849 {
2850 visited[pred_bb] = 1;
2851 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
2852 visited))
2853
2854 return 1;
2855 }
2856 }
2857
2858 /* All paths have been checked. */
2859 return 0;
2860 }
2861
2862 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
2863 memory allocated for that function is returned. */
2864
2865 static int
2866 expr_reaches_here_p (occr, expr, bb, check_self_loop)
2867 struct occr *occr;
2868 struct expr *expr;
2869 int bb;
2870 int check_self_loop;
2871 {
2872 int rval;
2873 char *visited = (char *) xcalloc (n_basic_blocks, 1);
2874
2875 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
2876
2877 free (visited);
2878 return rval;
2879 }
2880
2881 /* Return the instruction that computes EXPR that reaches INSN's basic block.
2882 If there is more than one such instruction, return NULL.
2883
2884 Called only by handle_avail_expr. */
2885
2886 static rtx
2887 computing_insn (expr, insn)
2888 struct expr *expr;
2889 rtx insn;
2890 {
2891 int bb = BLOCK_NUM (insn);
2892
2893 if (expr->avail_occr->next == NULL)
2894 {
2895 if (BLOCK_NUM (expr->avail_occr->insn) == bb)
2896 /* The available expression is actually itself
2897 (i.e. a loop in the flow graph) so do nothing. */
2898 return NULL;
2899
2900 /* (FIXME) Case that we found a pattern that was created by
2901 a substitution that took place. */
2902 return expr->avail_occr->insn;
2903 }
2904 else
2905 {
2906 /* Pattern is computed more than once.
2907 Search backwards from this insn to see how many of these
2908 computations actually reach this insn. */
2909 struct occr *occr;
2910 rtx insn_computes_expr = NULL;
2911 int can_reach = 0;
2912
2913 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
2914 {
2915 if (BLOCK_NUM (occr->insn) == bb)
2916 {
2917 /* The expression is generated in this block.
2918 The only time we care about this is when the expression
2919 is generated later in the block [and thus there's a loop].
2920 We let the normal cse pass handle the other cases. */
2921 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
2922 && expr_reaches_here_p (occr, expr, bb, 1))
2923 {
2924 can_reach++;
2925 if (can_reach > 1)
2926 return NULL;
2927
2928 insn_computes_expr = occr->insn;
2929 }
2930 }
2931 else if (expr_reaches_here_p (occr, expr, bb, 0))
2932 {
2933 can_reach++;
2934 if (can_reach > 1)
2935 return NULL;
2936
2937 insn_computes_expr = occr->insn;
2938 }
2939 }
2940
2941 if (insn_computes_expr == NULL)
2942 abort ();
2943
2944 return insn_computes_expr;
2945 }
2946 }
2947
2948 /* Return non-zero if the definition in DEF_INSN can reach INSN.
2949 Only called by can_disregard_other_sets. */
2950
2951 static int
2952 def_reaches_here_p (insn, def_insn)
2953 rtx insn, def_insn;
2954 {
2955 rtx reg;
2956
2957 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
2958 return 1;
2959
2960 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
2961 {
2962 if (INSN_CUID (def_insn) < INSN_CUID (insn))
2963 {
2964 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
2965 return 1;
2966 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
2967 reg = XEXP (PATTERN (def_insn), 0);
2968 else if (GET_CODE (PATTERN (def_insn)) == SET)
2969 reg = SET_DEST (PATTERN (def_insn));
2970 else
2971 abort ();
2972
2973 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
2974 }
2975 else
2976 return 0;
2977 }
2978
2979 return 0;
2980 }
2981
2982 /* Return non-zero if *ADDR_THIS_REG can only have one value at INSN. The
2983 value returned is the number of definitions that reach INSN. Returning a
2984 value of zero means that [maybe] more than one definition reaches INSN and
2985 the caller can't perform whatever optimization it is trying. i.e. it is
2986 always safe to return zero. */
2987
2988 static int
2989 can_disregard_other_sets (addr_this_reg, insn, for_combine)
2990 struct reg_set **addr_this_reg;
2991 rtx insn;
2992 int for_combine;
2993 {
2994 int number_of_reaching_defs = 0;
2995 struct reg_set *this_reg;
2996
2997 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
2998 if (def_reaches_here_p (insn, this_reg->insn))
2999 {
3000 number_of_reaching_defs++;
3001 /* Ignore parallels for now. */
3002 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3003 return 0;
3004
3005 if (!for_combine
3006 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3007 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3008 SET_SRC (PATTERN (insn)))))
3009 /* A setting of the reg to a different value reaches INSN. */
3010 return 0;
3011
3012 if (number_of_reaching_defs > 1)
3013 {
3014 /* If in this setting the value the register is being set to is
3015 equal to the previous value the register was set to and this
3016 setting reaches the insn we are trying to do the substitution
3017 on then we are ok. */
3018 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3019 return 0;
3020 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3021 SET_SRC (PATTERN (insn))))
3022 return 0;
3023 }
3024
3025 *addr_this_reg = this_reg;
3026 }
3027
3028 return number_of_reaching_defs;
3029 }
3030
3031 /* Expression computed by insn is available and the substitution is legal,
3032 so try to perform the substitution.
3033
3034 The result is non-zero if any changes were made. */
3035
3036 static int
3037 handle_avail_expr (insn, expr)
3038 rtx insn;
3039 struct expr *expr;
3040 {
3041 rtx pat, insn_computes_expr;
3042 rtx to;
3043 struct reg_set *this_reg;
3044 int found_setting, use_src;
3045 int changed = 0;
3046
3047 /* We only handle the case where one computation of the expression
3048 reaches this instruction. */
3049 insn_computes_expr = computing_insn (expr, insn);
3050 if (insn_computes_expr == NULL)
3051 return 0;
3052
3053 found_setting = 0;
3054 use_src = 0;
3055
3056 /* At this point we know only one computation of EXPR outside of this
3057 block reaches this insn. Now try to find a register that the
3058 expression is computed into. */
3059 if (GET_CODE (SET_SRC (PATTERN (insn_computes_expr))) == REG)
3060 {
3061 /* This is the case when the available expression that reaches
3062 here has already been handled as an available expression. */
3063 unsigned int regnum_for_replacing
3064 = REGNO (SET_SRC (PATTERN (insn_computes_expr)));
3065
3066 /* If the register was created by GCSE we can't use `reg_set_table',
3067 however we know it's set only once. */
3068 if (regnum_for_replacing >= max_gcse_regno
3069 /* If the register the expression is computed into is set only once,
3070 or only one set reaches this insn, we can use it. */
3071 || (((this_reg = reg_set_table[regnum_for_replacing]),
3072 this_reg->next == NULL)
3073 || can_disregard_other_sets (&this_reg, insn, 0)))
3074 {
3075 use_src = 1;
3076 found_setting = 1;
3077 }
3078 }
3079
3080 if (!found_setting)
3081 {
3082 unsigned int regnum_for_replacing
3083 = REGNO (SET_DEST (PATTERN (insn_computes_expr)));
3084
3085 /* This shouldn't happen. */
3086 if (regnum_for_replacing >= max_gcse_regno)
3087 abort ();
3088
3089 this_reg = reg_set_table[regnum_for_replacing];
3090
3091 /* If the register the expression is computed into is set only once,
3092 or only one set reaches this insn, use it. */
3093 if (this_reg->next == NULL
3094 || can_disregard_other_sets (&this_reg, insn, 0))
3095 found_setting = 1;
3096 }
3097
3098 if (found_setting)
3099 {
3100 pat = PATTERN (insn);
3101 if (use_src)
3102 to = SET_SRC (PATTERN (insn_computes_expr));
3103 else
3104 to = SET_DEST (PATTERN (insn_computes_expr));
3105 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3106
3107 /* We should be able to ignore the return code from validate_change but
3108 to play it safe we check. */
3109 if (changed)
3110 {
3111 gcse_subst_count++;
3112 if (gcse_file != NULL)
3113 {
3114 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3115 INSN_UID (insn));
3116 fprintf (gcse_file, " reg %d %s insn %d\n",
3117 REGNO (to), use_src ? "from" : "set in",
3118 INSN_UID (insn_computes_expr));
3119 }
3120 }
3121 }
3122
3123 /* The register that the expr is computed into is set more than once. */
3124 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3125 {
3126 /* Insert an insn after insnx that copies the reg set in insnx
3127 into a new pseudo register call this new register REGN.
3128 From insnb until end of basic block or until REGB is set
3129 replace all uses of REGB with REGN. */
3130 rtx new_insn;
3131
3132 to = gen_reg_rtx (GET_MODE (SET_DEST (PATTERN (insn_computes_expr))));
3133
3134 /* Generate the new insn. */
3135 /* ??? If the change fails, we return 0, even though we created
3136 an insn. I think this is ok. */
3137 new_insn
3138 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3139 SET_DEST (PATTERN
3140 (insn_computes_expr))),
3141 insn_computes_expr);
3142
3143 /* Keep block number table up to date. */
3144 set_block_num (new_insn, BLOCK_NUM (insn_computes_expr));
3145
3146 /* Keep register set table up to date. */
3147 record_one_set (REGNO (to), new_insn);
3148
3149 gcse_create_count++;
3150 if (gcse_file != NULL)
3151 {
3152 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3153 INSN_UID (NEXT_INSN (insn_computes_expr)),
3154 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3155 fprintf (gcse_file, ", computed in insn %d,\n",
3156 INSN_UID (insn_computes_expr));
3157 fprintf (gcse_file, " into newly allocated reg %d\n",
3158 REGNO (to));
3159 }
3160
3161 pat = PATTERN (insn);
3162
3163 /* Do register replacement for INSN. */
3164 changed = validate_change (insn, &SET_SRC (pat),
3165 SET_DEST (PATTERN
3166 (NEXT_INSN (insn_computes_expr))),
3167 0);
3168
3169 /* We should be able to ignore the return code from validate_change but
3170 to play it safe we check. */
3171 if (changed)
3172 {
3173 gcse_subst_count++;
3174 if (gcse_file != NULL)
3175 {
3176 fprintf (gcse_file,
3177 "GCSE: Replacing the source in insn %d with reg %d ",
3178 INSN_UID (insn),
3179 REGNO (SET_DEST (PATTERN (NEXT_INSN
3180 (insn_computes_expr)))));
3181 fprintf (gcse_file, "set in insn %d\n",
3182 INSN_UID (insn_computes_expr));
3183 }
3184 }
3185 }
3186
3187 return changed;
3188 }
3189
3190 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3191 the dataflow analysis has been done.
3192
3193 The result is non-zero if a change was made. */
3194
3195 static int
3196 classic_gcse ()
3197 {
3198 int bb, changed;
3199 rtx insn;
3200
3201 /* Note we start at block 1. */
3202
3203 changed = 0;
3204 for (bb = 1; bb < n_basic_blocks; bb++)
3205 {
3206 /* Reset tables used to keep track of what's still valid [since the
3207 start of the block]. */
3208 reset_opr_set_tables ();
3209
3210 for (insn = BLOCK_HEAD (bb);
3211 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
3212 insn = NEXT_INSN (insn))
3213 {
3214 /* Is insn of form (set (pseudo-reg) ...)? */
3215 if (GET_CODE (insn) == INSN
3216 && GET_CODE (PATTERN (insn)) == SET
3217 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3218 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3219 {
3220 rtx pat = PATTERN (insn);
3221 rtx src = SET_SRC (pat);
3222 struct expr *expr;
3223
3224 if (want_to_gcse_p (src)
3225 /* Is the expression recorded? */
3226 && ((expr = lookup_expr (src)) != NULL)
3227 /* Is the expression available [at the start of the
3228 block]? */
3229 && TEST_BIT (ae_in[bb], expr->bitmap_index)
3230 /* Are the operands unchanged since the start of the
3231 block? */
3232 && oprs_not_set_p (src, insn))
3233 changed |= handle_avail_expr (insn, expr);
3234 }
3235
3236 /* Keep track of everything modified by this insn. */
3237 /* ??? Need to be careful w.r.t. mods done to INSN. */
3238 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3239 mark_oprs_set (insn);
3240 }
3241 }
3242
3243 return changed;
3244 }
3245
3246 /* Top level routine to perform one classic GCSE pass.
3247
3248 Return non-zero if a change was made. */
3249
3250 static int
3251 one_classic_gcse_pass (pass)
3252 int pass;
3253 {
3254 int changed = 0;
3255
3256 gcse_subst_count = 0;
3257 gcse_create_count = 0;
3258
3259 alloc_expr_hash_table (max_cuid);
3260 alloc_rd_mem (n_basic_blocks, max_cuid);
3261 compute_expr_hash_table ();
3262 if (gcse_file)
3263 dump_hash_table (gcse_file, "Expression", expr_hash_table,
3264 expr_hash_table_size, n_exprs);
3265
3266 if (n_exprs > 0)
3267 {
3268 compute_kill_rd ();
3269 compute_rd ();
3270 alloc_avail_expr_mem (n_basic_blocks, n_exprs);
3271 compute_ae_gen ();
3272 compute_ae_kill (ae_gen, ae_kill);
3273 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3274 changed = classic_gcse ();
3275 free_avail_expr_mem ();
3276 }
3277
3278 free_rd_mem ();
3279 free_expr_hash_table ();
3280
3281 if (gcse_file)
3282 {
3283 fprintf (gcse_file, "\n");
3284 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3285 current_function_name, pass, bytes_used, gcse_subst_count);
3286 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3287 }
3288
3289 return changed;
3290 }
3291 \f
3292 /* Compute copy/constant propagation working variables. */
3293
3294 /* Local properties of assignments. */
3295 static sbitmap *cprop_pavloc;
3296 static sbitmap *cprop_absaltered;
3297
3298 /* Global properties of assignments (computed from the local properties). */
3299 static sbitmap *cprop_avin;
3300 static sbitmap *cprop_avout;
3301
3302 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3303 basic blocks. N_SETS is the number of sets. */
3304
3305 static void
3306 alloc_cprop_mem (n_blocks, n_sets)
3307 int n_blocks, n_sets;
3308 {
3309 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3310 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3311
3312 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3313 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3314 }
3315
3316 /* Free vars used by copy/const propagation. */
3317
3318 static void
3319 free_cprop_mem ()
3320 {
3321 free (cprop_pavloc);
3322 free (cprop_absaltered);
3323 free (cprop_avin);
3324 free (cprop_avout);
3325 }
3326
3327 /* For each block, compute whether X is transparent. X is either an
3328 expression or an assignment [though we don't care which, for this context
3329 an assignment is treated as an expression]. For each block where an
3330 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3331 bit in BMAP. */
3332
3333 static void
3334 compute_transp (x, indx, bmap, set_p)
3335 rtx x;
3336 int indx;
3337 sbitmap *bmap;
3338 int set_p;
3339 {
3340 int bb, i, j;
3341 enum rtx_code code;
3342 reg_set *r;
3343 const char *fmt;
3344
3345 /* repeat is used to turn tail-recursion into iteration since GCC
3346 can't do it when there's no return value. */
3347 repeat:
3348
3349 if (x == 0)
3350 return;
3351
3352 code = GET_CODE (x);
3353 switch (code)
3354 {
3355 case REG:
3356 if (set_p)
3357 {
3358 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3359 {
3360 for (bb = 0; bb < n_basic_blocks; bb++)
3361 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3362 SET_BIT (bmap[bb], indx);
3363 }
3364 else
3365 {
3366 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3367 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3368 }
3369 }
3370 else
3371 {
3372 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3373 {
3374 for (bb = 0; bb < n_basic_blocks; bb++)
3375 if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
3376 RESET_BIT (bmap[bb], indx);
3377 }
3378 else
3379 {
3380 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3381 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3382 }
3383 }
3384
3385 return;
3386
3387 case MEM:
3388 if (set_p)
3389 {
3390 for (bb = 0; bb < n_basic_blocks; bb++)
3391 if (mem_set_in_block[bb])
3392 SET_BIT (bmap[bb], indx);
3393 }
3394 else
3395 {
3396 for (bb = 0; bb < n_basic_blocks; bb++)
3397 if (mem_set_in_block[bb])
3398 RESET_BIT (bmap[bb], indx);
3399 }
3400
3401 x = XEXP (x, 0);
3402 goto repeat;
3403
3404 case PC:
3405 case CC0: /*FIXME*/
3406 case CONST:
3407 case CONST_INT:
3408 case CONST_DOUBLE:
3409 case SYMBOL_REF:
3410 case LABEL_REF:
3411 case ADDR_VEC:
3412 case ADDR_DIFF_VEC:
3413 return;
3414
3415 default:
3416 break;
3417 }
3418
3419 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3420 {
3421 if (fmt[i] == 'e')
3422 {
3423 /* If we are about to do the last recursive call
3424 needed at this level, change it into iteration.
3425 This function is called enough to be worth it. */
3426 if (i == 0)
3427 {
3428 x = XEXP (x, i);
3429 goto repeat;
3430 }
3431
3432 compute_transp (XEXP (x, i), indx, bmap, set_p);
3433 }
3434 else if (fmt[i] == 'E')
3435 for (j = 0; j < XVECLEN (x, i); j++)
3436 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3437 }
3438 }
3439
3440 /* Top level routine to do the dataflow analysis needed by copy/const
3441 propagation. */
3442
3443 static void
3444 compute_cprop_data ()
3445 {
3446 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, 1);
3447 compute_available (cprop_pavloc, cprop_absaltered,
3448 cprop_avout, cprop_avin);
3449 }
3450 \f
3451 /* Copy/constant propagation. */
3452
3453 /* Maximum number of register uses in an insn that we handle. */
3454 #define MAX_USES 8
3455
3456 /* Table of uses found in an insn.
3457 Allocated statically to avoid alloc/free complexity and overhead. */
3458 static struct reg_use reg_use_table[MAX_USES];
3459
3460 /* Index into `reg_use_table' while building it. */
3461 static int reg_use_count;
3462
3463 /* Set up a list of register numbers used in INSN. The found uses are stored
3464 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3465 and contains the number of uses in the table upon exit.
3466
3467 ??? If a register appears multiple times we will record it multiple times.
3468 This doesn't hurt anything but it will slow things down. */
3469
3470 static void
3471 find_used_regs (x)
3472 rtx x;
3473 {
3474 int i, j;
3475 enum rtx_code code;
3476 const char *fmt;
3477
3478 /* repeat is used to turn tail-recursion into iteration since GCC
3479 can't do it when there's no return value. */
3480 repeat:
3481
3482 if (x == 0)
3483 return;
3484
3485 code = GET_CODE (x);
3486 switch (code)
3487 {
3488 case REG:
3489 if (reg_use_count == MAX_USES)
3490 return;
3491
3492 reg_use_table[reg_use_count].reg_rtx = x;
3493 reg_use_count++;
3494 return;
3495
3496 case MEM:
3497 x = XEXP (x, 0);
3498 goto repeat;
3499
3500 case PC:
3501 case CC0:
3502 case CONST:
3503 case CONST_INT:
3504 case CONST_DOUBLE:
3505 case SYMBOL_REF:
3506 case LABEL_REF:
3507 case CLOBBER:
3508 case ADDR_VEC:
3509 case ADDR_DIFF_VEC:
3510 case ASM_INPUT: /*FIXME*/
3511 return;
3512
3513 case SET:
3514 if (GET_CODE (SET_DEST (x)) == MEM)
3515 find_used_regs (SET_DEST (x));
3516 x = SET_SRC (x);
3517 goto repeat;
3518
3519 default:
3520 break;
3521 }
3522
3523 /* Recursively scan the operands of this expression. */
3524
3525 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3526 {
3527 if (fmt[i] == 'e')
3528 {
3529 /* If we are about to do the last recursive call
3530 needed at this level, change it into iteration.
3531 This function is called enough to be worth it. */
3532 if (i == 0)
3533 {
3534 x = XEXP (x, 0);
3535 goto repeat;
3536 }
3537
3538 find_used_regs (XEXP (x, i));
3539 }
3540 else if (fmt[i] == 'E')
3541 for (j = 0; j < XVECLEN (x, i); j++)
3542 find_used_regs (XVECEXP (x, i, j));
3543 }
3544 }
3545
3546 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3547 Returns non-zero is successful. */
3548
3549 static int
3550 try_replace_reg (from, to, insn)
3551 rtx from, to, insn;
3552 {
3553 rtx note;
3554 rtx src;
3555 int success;
3556 rtx set;
3557
3558 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
3559
3560 if (!note)
3561 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3562
3563 /* If this fails we could try to simplify the result of the
3564 replacement and attempt to recognize the simplified insn.
3565
3566 But we need a general simplify_rtx that doesn't have pass
3567 specific state variables. I'm not aware of one at the moment. */
3568
3569 success = validate_replace_src (from, to, insn);
3570 set = single_set (insn);
3571
3572 /* We've failed to do replacement. Try to add REG_EQUAL note to not loose
3573 information. */
3574 if (!success && !note)
3575 {
3576 if (!set)
3577 return 0;
3578
3579 note = REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL,
3580 copy_rtx (SET_SRC (set)),
3581 REG_NOTES (insn));
3582 }
3583
3584 /* Always do the replacement in REQ_EQUAL and REG_EQUIV notes. Also
3585 try to simplify them. */
3586 if (note)
3587 {
3588 rtx simplified;
3589
3590 src = XEXP (note, 0);
3591 replace_rtx (src, from, to);
3592
3593 /* Try to simplify resulting note. */
3594 simplified = simplify_rtx (src);
3595 if (simplified)
3596 {
3597 src = simplified;
3598 XEXP (note, 0) = src;
3599 }
3600
3601 /* REG_EQUAL may get simplified into register.
3602 We don't allow that. Remove that note. This code ought
3603 not to hapen, because previous code ought to syntetize
3604 reg-reg move, but be on the safe side. */
3605 else if (REG_P (src))
3606 remove_note (insn, note);
3607 }
3608 return success;
3609 }
3610
3611 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3612 NULL no such set is found. */
3613
3614 static struct expr *
3615 find_avail_set (regno, insn)
3616 int regno;
3617 rtx insn;
3618 {
3619 /* SET1 contains the last set found that can be returned to the caller for
3620 use in a substitution. */
3621 struct expr *set1 = 0;
3622
3623 /* Loops are not possible here. To get a loop we would need two sets
3624 available at the start of the block containing INSN. ie we would
3625 need two sets like this available at the start of the block:
3626
3627 (set (reg X) (reg Y))
3628 (set (reg Y) (reg X))
3629
3630 This can not happen since the set of (reg Y) would have killed the
3631 set of (reg X) making it unavailable at the start of this block. */
3632 while (1)
3633 {
3634 rtx src;
3635 struct expr *set = lookup_set (regno, NULL_RTX);
3636
3637 /* Find a set that is available at the start of the block
3638 which contains INSN. */
3639 while (set)
3640 {
3641 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3642 break;
3643 set = next_set (regno, set);
3644 }
3645
3646 /* If no available set was found we've reached the end of the
3647 (possibly empty) copy chain. */
3648 if (set == 0)
3649 break;
3650
3651 if (GET_CODE (set->expr) != SET)
3652 abort ();
3653
3654 src = SET_SRC (set->expr);
3655
3656 /* We know the set is available.
3657 Now check that SRC is ANTLOC (i.e. none of the source operands
3658 have changed since the start of the block).
3659
3660 If the source operand changed, we may still use it for the next
3661 iteration of this loop, but we may not use it for substitutions. */
3662
3663 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
3664 set1 = set;
3665
3666 /* If the source of the set is anything except a register, then
3667 we have reached the end of the copy chain. */
3668 if (GET_CODE (src) != REG)
3669 break;
3670
3671 /* Follow the copy chain, ie start another iteration of the loop
3672 and see if we have an available copy into SRC. */
3673 regno = REGNO (src);
3674 }
3675
3676 /* SET1 holds the last set that was available and anticipatable at
3677 INSN. */
3678 return set1;
3679 }
3680
3681 /* Subroutine of cprop_insn that tries to propagate constants into
3682 JUMP_INSNS. INSN must be a conditional jump; COPY is a copy of it
3683 that we can use for substitutions.
3684 REG_USED is the use we will try to replace, SRC is the constant we
3685 will try to substitute for it.
3686 Returns nonzero if a change was made. */
3687
3688 static int
3689 cprop_jump (insn, copy, reg_used, src)
3690 rtx insn, copy;
3691 struct reg_use *reg_used;
3692 rtx src;
3693 {
3694 rtx set = PATTERN (copy);
3695 rtx temp;
3696
3697 /* Replace the register with the appropriate constant. */
3698 replace_rtx (SET_SRC (set), reg_used->reg_rtx, src);
3699
3700 temp = simplify_ternary_operation (GET_CODE (SET_SRC (set)),
3701 GET_MODE (SET_SRC (set)),
3702 GET_MODE (XEXP (SET_SRC (set), 0)),
3703 XEXP (SET_SRC (set), 0),
3704 XEXP (SET_SRC (set), 1),
3705 XEXP (SET_SRC (set), 2));
3706
3707 /* If no simplification can be made, then try the next
3708 register. */
3709 if (temp == 0)
3710 return 0;
3711
3712 SET_SRC (set) = temp;
3713
3714 /* That may have changed the structure of TEMP, so
3715 force it to be rerecognized if it has not turned
3716 into a nop or unconditional jump. */
3717
3718 INSN_CODE (copy) = -1;
3719 if ((SET_DEST (set) == pc_rtx
3720 && (SET_SRC (set) == pc_rtx
3721 || GET_CODE (SET_SRC (set)) == LABEL_REF))
3722 || recog (PATTERN (copy), copy, NULL) >= 0)
3723 {
3724 /* This has either become an unconditional jump
3725 or a nop-jump. We'd like to delete nop jumps
3726 here, but doing so confuses gcse. So we just
3727 make the replacement and let later passes
3728 sort things out. */
3729 PATTERN (insn) = set;
3730 INSN_CODE (insn) = -1;
3731
3732 /* One less use of the label this insn used to jump to
3733 if we turned this into a NOP jump. */
3734 if (SET_SRC (set) == pc_rtx && JUMP_LABEL (insn) != 0)
3735 --LABEL_NUSES (JUMP_LABEL (insn));
3736
3737 /* If this has turned into an unconditional jump,
3738 then put a barrier after it so that the unreachable
3739 code will be deleted. */
3740 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
3741 emit_barrier_after (insn);
3742
3743 run_jump_opt_after_gcse = 1;
3744
3745 const_prop_count++;
3746 if (gcse_file != NULL)
3747 {
3748 fprintf (gcse_file,
3749 "CONST-PROP: Replacing reg %d in insn %d with constant ",
3750 REGNO (reg_used->reg_rtx), INSN_UID (insn));
3751 print_rtl (gcse_file, src);
3752 fprintf (gcse_file, "\n");
3753 }
3754
3755 return 1;
3756 }
3757 return 0;
3758 }
3759
3760 #ifdef HAVE_cc0
3761
3762 /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS
3763 for machines that have CC0. INSN is a single set that stores into CC0;
3764 the insn following it is a conditional jump. REG_USED is the use we will
3765 try to replace, SRC is the constant we will try to substitute for it.
3766 Returns nonzero if a change was made. */
3767
3768 static int
3769 cprop_cc0_jump (insn, reg_used, src)
3770 rtx insn;
3771 struct reg_use *reg_used;
3772 rtx src;
3773 {
3774 rtx jump = NEXT_INSN (insn);
3775 rtx copy = copy_rtx (jump);
3776 rtx set = PATTERN (copy);
3777
3778 /* We need to copy the source of the cc0 setter, as cprop_jump is going to
3779 substitute into it. */
3780 replace_rtx (SET_SRC (set), cc0_rtx, copy_rtx (SET_SRC (PATTERN (insn))));
3781 if (! cprop_jump (jump, copy, reg_used, src))
3782 return 0;
3783
3784 /* If we succeeded, delete the cc0 setter. */
3785 PUT_CODE (insn, NOTE);
3786 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
3787 NOTE_SOURCE_FILE (insn) = 0;
3788 return 1;
3789 }
3790 #endif
3791
3792 /* Perform constant and copy propagation on INSN.
3793 The result is non-zero if a change was made. */
3794
3795 static int
3796 cprop_insn (insn, alter_jumps)
3797 rtx insn;
3798 int alter_jumps;
3799 {
3800 struct reg_use *reg_used;
3801 int changed = 0;
3802 rtx note;
3803
3804 /* Only propagate into SETs. Note that a conditional jump is a
3805 SET with pc_rtx as the destination. */
3806 if ((GET_CODE (insn) != INSN
3807 && GET_CODE (insn) != JUMP_INSN)
3808 || GET_CODE (PATTERN (insn)) != SET)
3809 return 0;
3810
3811 reg_use_count = 0;
3812 find_used_regs (PATTERN (insn));
3813
3814 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3815 if (!note)
3816 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
3817
3818 /* We may win even when propagating constants into notes. */
3819 if (note)
3820 find_used_regs (XEXP (note, 0));
3821
3822 for (reg_used = &reg_use_table[0]; reg_use_count > 0;
3823 reg_used++, reg_use_count--)
3824 {
3825 unsigned int regno = REGNO (reg_used->reg_rtx);
3826 rtx pat, src;
3827 struct expr *set;
3828
3829 /* Ignore registers created by GCSE.
3830 We do this because ... */
3831 if (regno >= max_gcse_regno)
3832 continue;
3833
3834 /* If the register has already been set in this block, there's
3835 nothing we can do. */
3836 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
3837 continue;
3838
3839 /* Find an assignment that sets reg_used and is available
3840 at the start of the block. */
3841 set = find_avail_set (regno, insn);
3842 if (! set)
3843 continue;
3844
3845 pat = set->expr;
3846 /* ??? We might be able to handle PARALLELs. Later. */
3847 if (GET_CODE (pat) != SET)
3848 abort ();
3849
3850 src = SET_SRC (pat);
3851
3852 /* Constant propagation. */
3853 if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE
3854 || GET_CODE (src) == SYMBOL_REF)
3855 {
3856 /* Handle normal insns first. */
3857 if (GET_CODE (insn) == INSN
3858 && try_replace_reg (reg_used->reg_rtx, src, insn))
3859 {
3860 changed = 1;
3861 const_prop_count++;
3862 if (gcse_file != NULL)
3863 {
3864 fprintf (gcse_file, "CONST-PROP: Replacing reg %d in ",
3865 regno);
3866 fprintf (gcse_file, "insn %d with constant ",
3867 INSN_UID (insn));
3868 print_rtl (gcse_file, src);
3869 fprintf (gcse_file, "\n");
3870 }
3871
3872 /* The original insn setting reg_used may or may not now be
3873 deletable. We leave the deletion to flow. */
3874 }
3875
3876 /* Try to propagate a CONST_INT into a conditional jump.
3877 We're pretty specific about what we will handle in this
3878 code, we can extend this as necessary over time.
3879
3880 Right now the insn in question must look like
3881 (set (pc) (if_then_else ...)) */
3882 else if (alter_jumps
3883 && GET_CODE (insn) == JUMP_INSN
3884 && condjump_p (insn)
3885 && ! simplejump_p (insn))
3886 changed |= cprop_jump (insn, copy_rtx (insn), reg_used, src);
3887 #ifdef HAVE_cc0
3888 /* Similar code for machines that use a pair of CC0 setter and
3889 conditional jump insn. */
3890 else if (alter_jumps
3891 && GET_CODE (PATTERN (insn)) == SET
3892 && SET_DEST (PATTERN (insn)) == cc0_rtx
3893 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
3894 && condjump_p (NEXT_INSN (insn))
3895 && ! simplejump_p (NEXT_INSN (insn)))
3896 changed |= cprop_cc0_jump (insn, reg_used, src);
3897 #endif
3898 }
3899 else if (GET_CODE (src) == REG
3900 && REGNO (src) >= FIRST_PSEUDO_REGISTER
3901 && REGNO (src) != regno)
3902 {
3903 if (try_replace_reg (reg_used->reg_rtx, src, insn))
3904 {
3905 changed = 1;
3906 copy_prop_count++;
3907 if (gcse_file != NULL)
3908 {
3909 fprintf (gcse_file, "COPY-PROP: Replacing reg %d in insn %d",
3910 regno, INSN_UID (insn));
3911 fprintf (gcse_file, " with reg %d\n", REGNO (src));
3912 }
3913
3914 /* The original insn setting reg_used may or may not now be
3915 deletable. We leave the deletion to flow. */
3916 /* FIXME: If it turns out that the insn isn't deletable,
3917 then we may have unnecessarily extended register lifetimes
3918 and made things worse. */
3919 }
3920 }
3921 }
3922
3923 return changed;
3924 }
3925
3926 /* Forward propagate copies. This includes copies and constants. Return
3927 non-zero if a change was made. */
3928
3929 static int
3930 cprop (alter_jumps)
3931 int alter_jumps;
3932 {
3933 int bb, changed;
3934 rtx insn;
3935
3936 /* Note we start at block 1. */
3937
3938 changed = 0;
3939 for (bb = 1; bb < n_basic_blocks; bb++)
3940 {
3941 /* Reset tables used to keep track of what's still valid [since the
3942 start of the block]. */
3943 reset_opr_set_tables ();
3944
3945 for (insn = BLOCK_HEAD (bb);
3946 insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
3947 insn = NEXT_INSN (insn))
3948 {
3949 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3950 {
3951 changed |= cprop_insn (insn, alter_jumps);
3952
3953 /* Keep track of everything modified by this insn. */
3954 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
3955 call mark_oprs_set if we turned the insn into a NOTE. */
3956 if (GET_CODE (insn) != NOTE)
3957 mark_oprs_set (insn);
3958 }
3959 }
3960 }
3961
3962 if (gcse_file != NULL)
3963 fprintf (gcse_file, "\n");
3964
3965 return changed;
3966 }
3967
3968 /* Perform one copy/constant propagation pass.
3969 F is the first insn in the function.
3970 PASS is the pass count. */
3971
3972 static int
3973 one_cprop_pass (pass, alter_jumps)
3974 int pass;
3975 int alter_jumps;
3976 {
3977 int changed = 0;
3978
3979 const_prop_count = 0;
3980 copy_prop_count = 0;
3981
3982 alloc_set_hash_table (max_cuid);
3983 compute_set_hash_table ();
3984 if (gcse_file)
3985 dump_hash_table (gcse_file, "SET", set_hash_table, set_hash_table_size,
3986 n_sets);
3987 if (n_sets > 0)
3988 {
3989 alloc_cprop_mem (n_basic_blocks, n_sets);
3990 compute_cprop_data ();
3991 changed = cprop (alter_jumps);
3992 free_cprop_mem ();
3993 }
3994
3995 free_set_hash_table ();
3996
3997 if (gcse_file)
3998 {
3999 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4000 current_function_name, pass, bytes_used);
4001 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4002 const_prop_count, copy_prop_count);
4003 }
4004
4005 return changed;
4006 }
4007 \f
4008 /* Compute PRE+LCM working variables. */
4009
4010 /* Local properties of expressions. */
4011 /* Nonzero for expressions that are transparent in the block. */
4012 static sbitmap *transp;
4013
4014 /* Nonzero for expressions that are transparent at the end of the block.
4015 This is only zero for expressions killed by abnormal critical edge
4016 created by a calls. */
4017 static sbitmap *transpout;
4018
4019 /* Nonzero for expressions that are computed (available) in the block. */
4020 static sbitmap *comp;
4021
4022 /* Nonzero for expressions that are locally anticipatable in the block. */
4023 static sbitmap *antloc;
4024
4025 /* Nonzero for expressions where this block is an optimal computation
4026 point. */
4027 static sbitmap *pre_optimal;
4028
4029 /* Nonzero for expressions which are redundant in a particular block. */
4030 static sbitmap *pre_redundant;
4031
4032 /* Nonzero for expressions which should be inserted on a specific edge. */
4033 static sbitmap *pre_insert_map;
4034
4035 /* Nonzero for expressions which should be deleted in a specific block. */
4036 static sbitmap *pre_delete_map;
4037
4038 /* Contains the edge_list returned by pre_edge_lcm. */
4039 static struct edge_list *edge_list;
4040
4041 /* Redundant insns. */
4042 static sbitmap pre_redundant_insns;
4043
4044 /* Allocate vars used for PRE analysis. */
4045
4046 static void
4047 alloc_pre_mem (n_blocks, n_exprs)
4048 int n_blocks, n_exprs;
4049 {
4050 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4051 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4052 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4053
4054 pre_optimal = NULL;
4055 pre_redundant = NULL;
4056 pre_insert_map = NULL;
4057 pre_delete_map = NULL;
4058 ae_in = NULL;
4059 ae_out = NULL;
4060 u_bitmap = NULL;
4061 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4062 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4063
4064 /* pre_insert and pre_delete are allocated later. */
4065 }
4066
4067 /* Free vars used for PRE analysis. */
4068
4069 static void
4070 free_pre_mem ()
4071 {
4072 free (transp);
4073 free (comp);
4074 free (antloc);
4075
4076 if (pre_optimal)
4077 free (pre_optimal);
4078 if (pre_redundant)
4079 free (pre_redundant);
4080 if (pre_insert_map)
4081 free (pre_insert_map);
4082 if (pre_delete_map)
4083 free (pre_delete_map);
4084 if (transpout)
4085 free (transpout);
4086
4087 if (ae_in)
4088 free (ae_in);
4089 if (ae_out)
4090 free (ae_out);
4091 if (ae_kill)
4092 free (ae_kill);
4093 if (u_bitmap)
4094 free (u_bitmap);
4095
4096 transp = comp = antloc = NULL;
4097 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4098 transpout = ae_in = ae_out = ae_kill = NULL;
4099 u_bitmap = NULL;
4100
4101 }
4102
4103 /* Top level routine to do the dataflow analysis needed by PRE. */
4104
4105 static void
4106 compute_pre_data ()
4107 {
4108 int i;
4109
4110 compute_local_properties (transp, comp, antloc, 0);
4111 compute_transpout ();
4112 sbitmap_vector_zero (ae_kill, n_basic_blocks);
4113
4114 /* Compute ae_kill for each basic block using:
4115
4116 ~(TRANSP | COMP)
4117
4118 This is significantly faster than compute_ae_kill. */
4119
4120 for (i = 0; i < n_basic_blocks; i++)
4121 {
4122 sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
4123 sbitmap_not (ae_kill[i], ae_kill[i]);
4124 }
4125
4126 edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
4127 ae_kill, &pre_insert_map, &pre_delete_map);
4128 }
4129 \f
4130 /* PRE utilities */
4131
4132 /* Return non-zero if an occurrence of expression EXPR in OCCR_BB would reach
4133 block BB.
4134
4135 VISITED is a pointer to a working buffer for tracking which BB's have
4136 been visited. It is NULL for the top-level call.
4137
4138 We treat reaching expressions that go through blocks containing the same
4139 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4140 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4141 2 as not reaching. The intent is to improve the probability of finding
4142 only one reaching expression and to reduce register lifetimes by picking
4143 the closest such expression. */
4144
4145 static int
4146 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4147 int occr_bb;
4148 struct expr *expr;
4149 int bb;
4150 char *visited;
4151 {
4152 edge pred;
4153
4154 for (pred = BASIC_BLOCK (bb)->pred; pred != NULL; pred = pred->pred_next)
4155 {
4156 int pred_bb = pred->src->index;
4157
4158 if (pred->src == ENTRY_BLOCK_PTR
4159 /* Has predecessor has already been visited? */
4160 || visited[pred_bb])
4161 ;/* Nothing to do. */
4162
4163 /* Does this predecessor generate this expression? */
4164 else if (TEST_BIT (comp[pred_bb], expr->bitmap_index))
4165 {
4166 /* Is this the occurrence we're looking for?
4167 Note that there's only one generating occurrence per block
4168 so we just need to check the block number. */
4169 if (occr_bb == pred_bb)
4170 return 1;
4171
4172 visited[pred_bb] = 1;
4173 }
4174 /* Ignore this predecessor if it kills the expression. */
4175 else if (! TEST_BIT (transp[pred_bb], expr->bitmap_index))
4176 visited[pred_bb] = 1;
4177
4178 /* Neither gen nor kill. */
4179 else
4180 {
4181 visited[pred_bb] = 1;
4182 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
4183 return 1;
4184 }
4185 }
4186
4187 /* All paths have been checked. */
4188 return 0;
4189 }
4190
4191 /* The wrapper for pre_expr_reaches_here_work that ensures that any
4192 memory allocated for that function is returned. */
4193
4194 static int
4195 pre_expr_reaches_here_p (occr_bb, expr, bb)
4196 int occr_bb;
4197 struct expr *expr;
4198 int bb;
4199 {
4200 int rval;
4201 char *visited = (char *) xcalloc (n_basic_blocks, 1);
4202
4203 rval = pre_expr_reaches_here_p_work(occr_bb, expr, bb, visited);
4204
4205 free (visited);
4206 return rval;
4207 }
4208 \f
4209
4210 /* Given an expr, generate RTL which we can insert at the end of a BB,
4211 or on an edge. Set the block number of any insns generated to
4212 the value of BB. */
4213
4214 static rtx
4215 process_insert_insn (expr)
4216 struct expr *expr;
4217 {
4218 rtx reg = expr->reaching_reg;
4219 rtx pat, copied_expr;
4220 rtx first_new_insn;
4221
4222 start_sequence ();
4223 copied_expr = copy_rtx (expr->expr);
4224 emit_move_insn (reg, copied_expr);
4225 first_new_insn = get_insns ();
4226 pat = gen_sequence ();
4227 end_sequence ();
4228
4229 return pat;
4230 }
4231
4232 /* Add EXPR to the end of basic block BB.
4233
4234 This is used by both the PRE and code hoisting.
4235
4236 For PRE, we want to verify that the expr is either transparent
4237 or locally anticipatable in the target block. This check makes
4238 no sense for code hoisting. */
4239
4240 static void
4241 insert_insn_end_bb (expr, bb, pre)
4242 struct expr *expr;
4243 int bb;
4244 int pre;
4245 {
4246 rtx insn = BLOCK_END (bb);
4247 rtx new_insn;
4248 rtx reg = expr->reaching_reg;
4249 int regno = REGNO (reg);
4250 rtx pat;
4251 int i;
4252
4253 pat = process_insert_insn (expr);
4254
4255 /* If the last insn is a jump, insert EXPR in front [taking care to
4256 handle cc0, etc. properly]. */
4257
4258 if (GET_CODE (insn) == JUMP_INSN)
4259 {
4260 #ifdef HAVE_cc0
4261 rtx note;
4262 #endif
4263
4264 /* If this is a jump table, then we can't insert stuff here. Since
4265 we know the previous real insn must be the tablejump, we insert
4266 the new instruction just before the tablejump. */
4267 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4268 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4269 insn = prev_real_insn (insn);
4270
4271 #ifdef HAVE_cc0
4272 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4273 if cc0 isn't set. */
4274 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4275 if (note)
4276 insn = XEXP (note, 0);
4277 else
4278 {
4279 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4280 if (maybe_cc0_setter
4281 && GET_RTX_CLASS (GET_CODE (maybe_cc0_setter)) == 'i'
4282 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4283 insn = maybe_cc0_setter;
4284 }
4285 #endif
4286 /* FIXME: What if something in cc0/jump uses value set in new insn? */
4287 new_insn = emit_block_insn_before (pat, insn, BASIC_BLOCK (bb));
4288 }
4289
4290 /* Likewise if the last insn is a call, as will happen in the presence
4291 of exception handling. */
4292 else if (GET_CODE (insn) == CALL_INSN)
4293 {
4294 HARD_REG_SET parm_regs;
4295 int nparm_regs;
4296 rtx p;
4297
4298 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4299 we search backward and place the instructions before the first
4300 parameter is loaded. Do this for everyone for consistency and a
4301 presumtion that we'll get better code elsewhere as well.
4302
4303 It should always be the case that we can put these instructions
4304 anywhere in the basic block with performing PRE optimizations.
4305 Check this. */
4306
4307 if (pre
4308 && !TEST_BIT (antloc[bb], expr->bitmap_index)
4309 && !TEST_BIT (transp[bb], expr->bitmap_index))
4310 abort ();
4311
4312 /* Since different machines initialize their parameter registers
4313 in different orders, assume nothing. Collect the set of all
4314 parameter registers. */
4315 CLEAR_HARD_REG_SET (parm_regs);
4316 nparm_regs = 0;
4317 for (p = CALL_INSN_FUNCTION_USAGE (insn); p ; p = XEXP (p, 1))
4318 if (GET_CODE (XEXP (p, 0)) == USE
4319 && GET_CODE (XEXP (XEXP (p, 0), 0)) == REG)
4320 {
4321 if (REGNO (XEXP (XEXP (p, 0), 0)) >= FIRST_PSEUDO_REGISTER)
4322 abort ();
4323
4324 SET_HARD_REG_BIT (parm_regs, REGNO (XEXP (XEXP (p, 0), 0)));
4325 nparm_regs++;
4326 }
4327
4328 /* Search backward for the first set of a register in this set. */
4329 while (nparm_regs && BLOCK_HEAD (bb) != insn)
4330 {
4331 insn = PREV_INSN (insn);
4332 p = single_set (insn);
4333 if (p && GET_CODE (SET_DEST (p)) == REG
4334 && REGNO (SET_DEST (p)) < FIRST_PSEUDO_REGISTER
4335 && TEST_HARD_REG_BIT (parm_regs, REGNO (SET_DEST (p))))
4336 {
4337 CLEAR_HARD_REG_BIT (parm_regs, REGNO (SET_DEST (p)));
4338 nparm_regs--;
4339 }
4340 }
4341
4342 /* If we found all the parameter loads, then we want to insert
4343 before the first parameter load.
4344
4345 If we did not find all the parameter loads, then we might have
4346 stopped on the head of the block, which could be a CODE_LABEL.
4347 If we inserted before the CODE_LABEL, then we would be putting
4348 the insn in the wrong basic block. In that case, put the insn
4349 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
4350 while (GET_CODE (insn) == CODE_LABEL
4351 || (GET_CODE (insn) == NOTE
4352 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK))
4353 insn = NEXT_INSN (insn);
4354
4355 new_insn = emit_block_insn_before (pat, insn, BASIC_BLOCK (bb));
4356 }
4357 else
4358 {
4359 new_insn = emit_insn_after (pat, insn);
4360 BLOCK_END (bb) = new_insn;
4361 }
4362
4363 /* Keep block number table up to date.
4364 Note, PAT could be a multiple insn sequence, we have to make
4365 sure that each insn in the sequence is handled. */
4366 if (GET_CODE (pat) == SEQUENCE)
4367 {
4368 for (i = 0; i < XVECLEN (pat, 0); i++)
4369 {
4370 rtx insn = XVECEXP (pat, 0, i);
4371
4372 set_block_num (insn, bb);
4373 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
4374 add_label_notes (PATTERN (insn), new_insn);
4375
4376 note_stores (PATTERN (insn), record_set_info, insn);
4377 }
4378 }
4379 else
4380 {
4381 add_label_notes (SET_SRC (pat), new_insn);
4382 set_block_num (new_insn, bb);
4383
4384 /* Keep register set table up to date. */
4385 record_one_set (regno, new_insn);
4386 }
4387
4388 gcse_create_count++;
4389
4390 if (gcse_file)
4391 {
4392 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
4393 bb, INSN_UID (new_insn));
4394 fprintf (gcse_file, "copying expression %d to reg %d\n",
4395 expr->bitmap_index, regno);
4396 }
4397 }
4398
4399 /* Insert partially redundant expressions on edges in the CFG to make
4400 the expressions fully redundant. */
4401
4402 static int
4403 pre_edge_insert (edge_list, index_map)
4404 struct edge_list *edge_list;
4405 struct expr **index_map;
4406 {
4407 int e, i, j, num_edges, set_size, did_insert = 0;
4408 sbitmap *inserted;
4409
4410 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4411 if it reaches any of the deleted expressions. */
4412
4413 set_size = pre_insert_map[0]->size;
4414 num_edges = NUM_EDGES (edge_list);
4415 inserted = sbitmap_vector_alloc (num_edges, n_exprs);
4416 sbitmap_vector_zero (inserted, num_edges);
4417
4418 for (e = 0; e < num_edges; e++)
4419 {
4420 int indx;
4421 basic_block pred = INDEX_EDGE_PRED_BB (edge_list, e);
4422 int bb = pred->index;
4423
4424 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4425 {
4426 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4427
4428 for (j = indx; insert && j < n_exprs; j++, insert >>= 1)
4429 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4430 {
4431 struct expr *expr = index_map[j];
4432 struct occr *occr;
4433
4434 /* Now look at each deleted occurence of this expression. */
4435 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4436 {
4437 if (! occr->deleted_p)
4438 continue;
4439
4440 /* Insert this expression on this edge if if it would
4441 reach the deleted occurence in BB. */
4442 if (!TEST_BIT (inserted[e], j))
4443 {
4444 rtx insn;
4445 edge eg = INDEX_EDGE (edge_list, e);
4446
4447 /* We can't insert anything on an abnormal and
4448 critical edge, so we insert the insn at the end of
4449 the previous block. There are several alternatives
4450 detailed in Morgans book P277 (sec 10.5) for
4451 handling this situation. This one is easiest for
4452 now. */
4453
4454 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
4455 insert_insn_end_bb (index_map[j], bb, 0);
4456 else
4457 {
4458 insn = process_insert_insn (index_map[j]);
4459 insert_insn_on_edge (insn, eg);
4460 }
4461
4462 if (gcse_file)
4463 {
4464 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
4465 bb,
4466 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4467 fprintf (gcse_file, "copy expression %d\n",
4468 expr->bitmap_index);
4469 }
4470
4471 SET_BIT (inserted[e], j);
4472 did_insert = 1;
4473 gcse_create_count++;
4474 }
4475 }
4476 }
4477 }
4478 }
4479
4480 free (inserted);
4481 return did_insert;
4482 }
4483
4484 /* Copy the result of INSN to REG. INDX is the expression number. */
4485
4486 static void
4487 pre_insert_copy_insn (expr, insn)
4488 struct expr *expr;
4489 rtx insn;
4490 {
4491 rtx reg = expr->reaching_reg;
4492 int regno = REGNO (reg);
4493 int indx = expr->bitmap_index;
4494 rtx set = single_set (insn);
4495 rtx new_insn;
4496 int bb = BLOCK_NUM (insn);
4497
4498 if (!set)
4499 abort ();
4500
4501 new_insn = emit_insn_after (gen_rtx_SET (VOIDmode, reg, SET_DEST (set)),
4502 insn);
4503
4504 /* Keep block number table up to date. */
4505 set_block_num (new_insn, bb);
4506
4507 /* Keep register set table up to date. */
4508 record_one_set (regno, new_insn);
4509 if (insn == BLOCK_END (bb))
4510 BLOCK_END (bb) = new_insn;
4511
4512 gcse_create_count++;
4513
4514 if (gcse_file)
4515 fprintf (gcse_file,
4516 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4517 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4518 INSN_UID (insn), regno);
4519 }
4520
4521 /* Copy available expressions that reach the redundant expression
4522 to `reaching_reg'. */
4523
4524 static void
4525 pre_insert_copies ()
4526 {
4527 unsigned int i;
4528 struct expr *expr;
4529 struct occr *occr;
4530 struct occr *avail;
4531
4532 /* For each available expression in the table, copy the result to
4533 `reaching_reg' if the expression reaches a deleted one.
4534
4535 ??? The current algorithm is rather brute force.
4536 Need to do some profiling. */
4537
4538 for (i = 0; i < expr_hash_table_size; i++)
4539 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4540 {
4541 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
4542 we don't want to insert a copy here because the expression may not
4543 really be redundant. So only insert an insn if the expression was
4544 deleted. This test also avoids further processing if the
4545 expression wasn't deleted anywhere. */
4546 if (expr->reaching_reg == NULL)
4547 continue;
4548
4549 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4550 {
4551 if (! occr->deleted_p)
4552 continue;
4553
4554 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4555 {
4556 rtx insn = avail->insn;
4557
4558 /* No need to handle this one if handled already. */
4559 if (avail->copied_p)
4560 continue;
4561
4562 /* Don't handle this one if it's a redundant one. */
4563 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4564 continue;
4565
4566 /* Or if the expression doesn't reach the deleted one. */
4567 if (! pre_expr_reaches_here_p (BLOCK_NUM (avail->insn), expr,
4568 BLOCK_NUM (occr->insn)))
4569 continue;
4570
4571 /* Copy the result of avail to reaching_reg. */
4572 pre_insert_copy_insn (expr, insn);
4573 avail->copied_p = 1;
4574 }
4575 }
4576 }
4577 }
4578
4579 /* Delete redundant computations.
4580 Deletion is done by changing the insn to copy the `reaching_reg' of
4581 the expression into the result of the SET. It is left to later passes
4582 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4583
4584 Returns non-zero if a change is made. */
4585
4586 static int
4587 pre_delete ()
4588 {
4589 unsigned int i;
4590 int bb, changed;
4591 struct expr *expr;
4592 struct occr *occr;
4593
4594 changed = 0;
4595 for (i = 0; i < expr_hash_table_size; i++)
4596 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4597 {
4598 int indx = expr->bitmap_index;
4599
4600 /* We only need to search antic_occr since we require
4601 ANTLOC != 0. */
4602
4603 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4604 {
4605 rtx insn = occr->insn;
4606 rtx set;
4607 int bb = BLOCK_NUM (insn);
4608
4609 if (TEST_BIT (pre_delete_map[bb], indx))
4610 {
4611 set = single_set (insn);
4612 if (! set)
4613 abort ();
4614
4615 /* Create a pseudo-reg to store the result of reaching
4616 expressions into. Get the mode for the new pseudo from
4617 the mode of the original destination pseudo. */
4618 if (expr->reaching_reg == NULL)
4619 expr->reaching_reg
4620 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4621
4622 /* In theory this should never fail since we're creating
4623 a reg->reg copy.
4624
4625 However, on the x86 some of the movXX patterns actually
4626 contain clobbers of scratch regs. This may cause the
4627 insn created by validate_change to not match any pattern
4628 and thus cause validate_change to fail. */
4629 if (validate_change (insn, &SET_SRC (set),
4630 expr->reaching_reg, 0))
4631 {
4632 occr->deleted_p = 1;
4633 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4634 changed = 1;
4635 gcse_subst_count++;
4636 }
4637
4638 if (gcse_file)
4639 {
4640 fprintf (gcse_file,
4641 "PRE: redundant insn %d (expression %d) in ",
4642 INSN_UID (insn), indx);
4643 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
4644 bb, REGNO (expr->reaching_reg));
4645 }
4646 }
4647 }
4648 }
4649
4650 return changed;
4651 }
4652
4653 /* Perform GCSE optimizations using PRE.
4654 This is called by one_pre_gcse_pass after all the dataflow analysis
4655 has been done.
4656
4657 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4658 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4659 Compiler Design and Implementation.
4660
4661 ??? A new pseudo reg is created to hold the reaching expression. The nice
4662 thing about the classical approach is that it would try to use an existing
4663 reg. If the register can't be adequately optimized [i.e. we introduce
4664 reload problems], one could add a pass here to propagate the new register
4665 through the block.
4666
4667 ??? We don't handle single sets in PARALLELs because we're [currently] not
4668 able to copy the rest of the parallel when we insert copies to create full
4669 redundancies from partial redundancies. However, there's no reason why we
4670 can't handle PARALLELs in the cases where there are no partial
4671 redundancies. */
4672
4673 static int
4674 pre_gcse ()
4675 {
4676 unsigned int i;
4677 int did_insert, changed;
4678 struct expr **index_map;
4679 struct expr *expr;
4680
4681 /* Compute a mapping from expression number (`bitmap_index') to
4682 hash table entry. */
4683
4684 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
4685 for (i = 0; i < expr_hash_table_size; i++)
4686 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
4687 index_map[expr->bitmap_index] = expr;
4688
4689 /* Reset bitmap used to track which insns are redundant. */
4690 pre_redundant_insns = sbitmap_alloc (max_cuid);
4691 sbitmap_zero (pre_redundant_insns);
4692
4693 /* Delete the redundant insns first so that
4694 - we know what register to use for the new insns and for the other
4695 ones with reaching expressions
4696 - we know which insns are redundant when we go to create copies */
4697
4698 changed = pre_delete ();
4699
4700 did_insert = pre_edge_insert (edge_list, index_map);
4701
4702 /* In other places with reaching expressions, copy the expression to the
4703 specially allocated pseudo-reg that reaches the redundant expr. */
4704 pre_insert_copies ();
4705 if (did_insert)
4706 {
4707 commit_edge_insertions ();
4708 changed = 1;
4709 }
4710
4711 free (index_map);
4712 free (pre_redundant_insns);
4713 return changed;
4714 }
4715
4716 /* Top level routine to perform one PRE GCSE pass.
4717
4718 Return non-zero if a change was made. */
4719
4720 static int
4721 one_pre_gcse_pass (pass)
4722 int pass;
4723 {
4724 int changed = 0;
4725
4726 gcse_subst_count = 0;
4727 gcse_create_count = 0;
4728
4729 alloc_expr_hash_table (max_cuid);
4730 add_noreturn_fake_exit_edges ();
4731 compute_expr_hash_table ();
4732 if (gcse_file)
4733 dump_hash_table (gcse_file, "Expression", expr_hash_table,
4734 expr_hash_table_size, n_exprs);
4735
4736 if (n_exprs > 0)
4737 {
4738 alloc_pre_mem (n_basic_blocks, n_exprs);
4739 compute_pre_data ();
4740 changed |= pre_gcse ();
4741 free_edge_list (edge_list);
4742 free_pre_mem ();
4743 }
4744
4745 remove_fake_edges ();
4746 free_expr_hash_table ();
4747
4748 if (gcse_file)
4749 {
4750 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
4751 current_function_name, pass, bytes_used);
4752 fprintf (gcse_file, "%d substs, %d insns created\n",
4753 gcse_subst_count, gcse_create_count);
4754 }
4755
4756 return changed;
4757 }
4758 \f
4759 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
4760 We have to add REG_LABEL notes, because the following loop optimization
4761 pass requires them. */
4762
4763 /* ??? This is very similar to the loop.c add_label_notes function. We
4764 could probably share code here. */
4765
4766 /* ??? If there was a jump optimization pass after gcse and before loop,
4767 then we would not need to do this here, because jump would add the
4768 necessary REG_LABEL notes. */
4769
4770 static void
4771 add_label_notes (x, insn)
4772 rtx x;
4773 rtx insn;
4774 {
4775 enum rtx_code code = GET_CODE (x);
4776 int i, j;
4777 const char *fmt;
4778
4779 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4780 {
4781 /* This code used to ignore labels that referred to dispatch tables to
4782 avoid flow generating (slighly) worse code.
4783
4784 We no longer ignore such label references (see LABEL_REF handling in
4785 mark_jump_label for additional information). */
4786
4787 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
4788 REG_NOTES (insn));
4789 return;
4790 }
4791
4792 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4793 {
4794 if (fmt[i] == 'e')
4795 add_label_notes (XEXP (x, i), insn);
4796 else if (fmt[i] == 'E')
4797 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4798 add_label_notes (XVECEXP (x, i, j), insn);
4799 }
4800 }
4801
4802 /* Compute transparent outgoing information for each block.
4803
4804 An expression is transparent to an edge unless it is killed by
4805 the edge itself. This can only happen with abnormal control flow,
4806 when the edge is traversed through a call. This happens with
4807 non-local labels and exceptions.
4808
4809 This would not be necessary if we split the edge. While this is
4810 normally impossible for abnormal critical edges, with some effort
4811 it should be possible with exception handling, since we still have
4812 control over which handler should be invoked. But due to increased
4813 EH table sizes, this may not be worthwhile. */
4814
4815 static void
4816 compute_transpout ()
4817 {
4818 int bb;
4819 unsigned int i;
4820 struct expr *expr;
4821
4822 sbitmap_vector_ones (transpout, n_basic_blocks);
4823
4824 for (bb = 0; bb < n_basic_blocks; ++bb)
4825 {
4826 /* Note that flow inserted a nop a the end of basic blocks that
4827 end in call instructions for reasons other than abnormal
4828 control flow. */
4829 if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
4830 continue;
4831
4832 for (i = 0; i < expr_hash_table_size; i++)
4833 for (expr = expr_hash_table[i]; expr ; expr = expr->next_same_hash)
4834 if (GET_CODE (expr->expr) == MEM)
4835 {
4836 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4837 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4838 continue;
4839
4840 /* ??? Optimally, we would use interprocedural alias
4841 analysis to determine if this mem is actually killed
4842 by this call. */
4843 RESET_BIT (transpout[bb], expr->bitmap_index);
4844 }
4845 }
4846 }
4847
4848 /* Removal of useless null pointer checks */
4849
4850 /* Called via note_stores. X is set by SETTER. If X is a register we must
4851 invalidate nonnull_local and set nonnull_killed. DATA is really a
4852 `null_pointer_info *'.
4853
4854 We ignore hard registers. */
4855
4856 static void
4857 invalidate_nonnull_info (x, setter, data)
4858 rtx x;
4859 rtx setter ATTRIBUTE_UNUSED;
4860 void *data;
4861 {
4862 unsigned int regno;
4863 struct null_pointer_info *npi = (struct null_pointer_info *) data;
4864
4865 while (GET_CODE (x) == SUBREG)
4866 x = SUBREG_REG (x);
4867
4868 /* Ignore anything that is not a register or is a hard register. */
4869 if (GET_CODE (x) != REG
4870 || REGNO (x) < npi->min_reg
4871 || REGNO (x) >= npi->max_reg)
4872 return;
4873
4874 regno = REGNO (x) - npi->min_reg;
4875
4876 RESET_BIT (npi->nonnull_local[npi->current_block], regno);
4877 SET_BIT (npi->nonnull_killed[npi->current_block], regno);
4878 }
4879
4880 /* Do null-pointer check elimination for the registers indicated in
4881 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
4882 they are not our responsibility to free. */
4883
4884 static void
4885 delete_null_pointer_checks_1 (block_reg, nonnull_avin, nonnull_avout, npi)
4886 unsigned int *block_reg;
4887 sbitmap *nonnull_avin;
4888 sbitmap *nonnull_avout;
4889 struct null_pointer_info *npi;
4890 {
4891 int bb;
4892 int current_block;
4893 sbitmap *nonnull_local = npi->nonnull_local;
4894 sbitmap *nonnull_killed = npi->nonnull_killed;
4895
4896 /* Compute local properties, nonnull and killed. A register will have
4897 the nonnull property if at the end of the current block its value is
4898 known to be nonnull. The killed property indicates that somewhere in
4899 the block any information we had about the register is killed.
4900
4901 Note that a register can have both properties in a single block. That
4902 indicates that it's killed, then later in the block a new value is
4903 computed. */
4904 sbitmap_vector_zero (nonnull_local, n_basic_blocks);
4905 sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
4906
4907 for (current_block = 0; current_block < n_basic_blocks; current_block++)
4908 {
4909 rtx insn, stop_insn;
4910
4911 /* Set the current block for invalidate_nonnull_info. */
4912 npi->current_block = current_block;
4913
4914 /* Scan each insn in the basic block looking for memory references and
4915 register sets. */
4916 stop_insn = NEXT_INSN (BLOCK_END (current_block));
4917 for (insn = BLOCK_HEAD (current_block);
4918 insn != stop_insn;
4919 insn = NEXT_INSN (insn))
4920 {
4921 rtx set;
4922 rtx reg;
4923
4924 /* Ignore anything that is not a normal insn. */
4925 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
4926 continue;
4927
4928 /* Basically ignore anything that is not a simple SET. We do have
4929 to make sure to invalidate nonnull_local and set nonnull_killed
4930 for such insns though. */
4931 set = single_set (insn);
4932 if (!set)
4933 {
4934 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
4935 continue;
4936 }
4937
4938 /* See if we've got a useable memory load. We handle it first
4939 in case it uses its address register as a dest (which kills
4940 the nonnull property). */
4941 if (GET_CODE (SET_SRC (set)) == MEM
4942 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
4943 && REGNO (reg) >= npi->min_reg
4944 && REGNO (reg) < npi->max_reg)
4945 SET_BIT (nonnull_local[current_block],
4946 REGNO (reg) - npi->min_reg);
4947
4948 /* Now invalidate stuff clobbered by this insn. */
4949 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
4950
4951 /* And handle stores, we do these last since any sets in INSN can
4952 not kill the nonnull property if it is derived from a MEM
4953 appearing in a SET_DEST. */
4954 if (GET_CODE (SET_DEST (set)) == MEM
4955 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
4956 && REGNO (reg) >= npi->min_reg
4957 && REGNO (reg) < npi->max_reg)
4958 SET_BIT (nonnull_local[current_block],
4959 REGNO (reg) - npi->min_reg);
4960 }
4961 }
4962
4963 /* Now compute global properties based on the local properties. This
4964 is a classic global availablity algorithm. */
4965 compute_available (nonnull_local, nonnull_killed,
4966 nonnull_avout, nonnull_avin);
4967
4968 /* Now look at each bb and see if it ends with a compare of a value
4969 against zero. */
4970 for (bb = 0; bb < n_basic_blocks; bb++)
4971 {
4972 rtx last_insn = BLOCK_END (bb);
4973 rtx condition, earliest;
4974 int compare_and_branch;
4975
4976 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
4977 since BLOCK_REG[BB] is zero if this block did not end with a
4978 comparison against zero, this condition works. */
4979 if (block_reg[bb] < npi->min_reg
4980 || block_reg[bb] >= npi->max_reg)
4981 continue;
4982
4983 /* LAST_INSN is a conditional jump. Get its condition. */
4984 condition = get_condition (last_insn, &earliest);
4985
4986 /* If we can't determine the condition then skip. */
4987 if (! condition)
4988 continue;
4989
4990 /* Is the register known to have a nonzero value? */
4991 if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
4992 continue;
4993
4994 /* Try to compute whether the compare/branch at the loop end is one or
4995 two instructions. */
4996 if (earliest == last_insn)
4997 compare_and_branch = 1;
4998 else if (earliest == prev_nonnote_insn (last_insn))
4999 compare_and_branch = 2;
5000 else
5001 continue;
5002
5003 /* We know the register in this comparison is nonnull at exit from
5004 this block. We can optimize this comparison. */
5005 if (GET_CODE (condition) == NE)
5006 {
5007 rtx new_jump;
5008
5009 new_jump = emit_jump_insn_before (gen_jump (JUMP_LABEL (last_insn)),
5010 last_insn);
5011 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5012 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5013 emit_barrier_after (new_jump);
5014 }
5015 delete_insn (last_insn);
5016 if (compare_and_branch == 2)
5017 delete_insn (earliest);
5018
5019 /* Don't check this block again. (Note that BLOCK_END is
5020 invalid here; we deleted the last instruction in the
5021 block.) */
5022 block_reg[bb] = 0;
5023 }
5024 }
5025
5026 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5027 at compile time.
5028
5029 This is conceptually similar to global constant/copy propagation and
5030 classic global CSE (it even uses the same dataflow equations as cprop).
5031
5032 If a register is used as memory address with the form (mem (reg)), then we
5033 know that REG can not be zero at that point in the program. Any instruction
5034 which sets REG "kills" this property.
5035
5036 So, if every path leading to a conditional branch has an available memory
5037 reference of that form, then we know the register can not have the value
5038 zero at the conditional branch.
5039
5040 So we merely need to compute the local properies and propagate that data
5041 around the cfg, then optimize where possible.
5042
5043 We run this pass two times. Once before CSE, then again after CSE. This
5044 has proven to be the most profitable approach. It is rare for new
5045 optimization opportunities of this nature to appear after the first CSE
5046 pass.
5047
5048 This could probably be integrated with global cprop with a little work. */
5049
5050 void
5051 delete_null_pointer_checks (f)
5052 rtx f ATTRIBUTE_UNUSED;
5053 {
5054 sbitmap *nonnull_avin, *nonnull_avout;
5055 unsigned int *block_reg;
5056 int bb;
5057 int reg;
5058 int regs_per_pass;
5059 int max_reg;
5060 struct null_pointer_info npi;
5061
5062 /* If we have only a single block, then there's nothing to do. */
5063 if (n_basic_blocks <= 1)
5064 return;
5065
5066 /* Trying to perform global optimizations on flow graphs which have
5067 a high connectivity will take a long time and is unlikely to be
5068 particularly useful.
5069
5070 In normal circumstances a cfg should have about twice has many edges
5071 as blocks. But we do not want to punish small functions which have
5072 a couple switch statements. So we require a relatively large number
5073 of basic blocks and the ratio of edges to blocks to be high. */
5074 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5075 return;
5076
5077 /* We need four bitmaps, each with a bit for each register in each
5078 basic block. */
5079 max_reg = max_reg_num ();
5080 regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
5081
5082 /* Allocate bitmaps to hold local and global properties. */
5083 npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5084 npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5085 nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5086 nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
5087
5088 /* Go through the basic blocks, seeing whether or not each block
5089 ends with a conditional branch whose condition is a comparison
5090 against zero. Record the register compared in BLOCK_REG. */
5091 block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
5092 for (bb = 0; bb < n_basic_blocks; bb++)
5093 {
5094 rtx last_insn = BLOCK_END (bb);
5095 rtx condition, earliest, reg;
5096
5097 /* We only want conditional branches. */
5098 if (GET_CODE (last_insn) != JUMP_INSN
5099 || !any_condjump_p (last_insn)
5100 || !onlyjump_p (last_insn))
5101 continue;
5102
5103 /* LAST_INSN is a conditional jump. Get its condition. */
5104 condition = get_condition (last_insn, &earliest);
5105
5106 /* If we were unable to get the condition, or it is not a equality
5107 comparison against zero then there's nothing we can do. */
5108 if (!condition
5109 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5110 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5111 || (XEXP (condition, 1)
5112 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5113 continue;
5114
5115 /* We must be checking a register against zero. */
5116 reg = XEXP (condition, 0);
5117 if (GET_CODE (reg) != REG)
5118 continue;
5119
5120 block_reg[bb] = REGNO (reg);
5121 }
5122
5123 /* Go through the algorithm for each block of registers. */
5124 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
5125 {
5126 npi.min_reg = reg;
5127 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
5128 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5129 nonnull_avout, &npi);
5130 }
5131
5132 /* Free the table of registers compared at the end of every block. */
5133 free (block_reg);
5134
5135 /* Free bitmaps. */
5136 free (npi.nonnull_local);
5137 free (npi.nonnull_killed);
5138 free (nonnull_avin);
5139 free (nonnull_avout);
5140 }
5141
5142 /* Code Hoisting variables and subroutines. */
5143
5144 /* Very busy expressions. */
5145 static sbitmap *hoist_vbein;
5146 static sbitmap *hoist_vbeout;
5147
5148 /* Hoistable expressions. */
5149 static sbitmap *hoist_exprs;
5150
5151 /* Dominator bitmaps. */
5152 static sbitmap *dominators;
5153
5154 /* ??? We could compute post dominators and run this algorithm in
5155 reverse to to perform tail merging, doing so would probably be
5156 more effective than the tail merging code in jump.c.
5157
5158 It's unclear if tail merging could be run in parallel with
5159 code hoisting. It would be nice. */
5160
5161 /* Allocate vars used for code hoisting analysis. */
5162
5163 static void
5164 alloc_code_hoist_mem (n_blocks, n_exprs)
5165 int n_blocks, n_exprs;
5166 {
5167 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5168 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5169 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5170
5171 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
5172 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
5173 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
5174 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
5175
5176 dominators = sbitmap_vector_alloc (n_blocks, n_blocks);
5177 }
5178
5179 /* Free vars used for code hoisting analysis. */
5180
5181 static void
5182 free_code_hoist_mem ()
5183 {
5184 free (antloc);
5185 free (transp);
5186 free (comp);
5187
5188 free (hoist_vbein);
5189 free (hoist_vbeout);
5190 free (hoist_exprs);
5191 free (transpout);
5192
5193 free (dominators);
5194 }
5195
5196 /* Compute the very busy expressions at entry/exit from each block.
5197
5198 An expression is very busy if all paths from a given point
5199 compute the expression. */
5200
5201 static void
5202 compute_code_hoist_vbeinout ()
5203 {
5204 int bb, changed, passes;
5205
5206 sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
5207 sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
5208
5209 passes = 0;
5210 changed = 1;
5211
5212 while (changed)
5213 {
5214 changed = 0;
5215
5216 /* We scan the blocks in the reverse order to speed up
5217 the convergence. */
5218 for (bb = n_basic_blocks - 1; bb >= 0; bb--)
5219 {
5220 changed |= sbitmap_a_or_b_and_c (hoist_vbein[bb], antloc[bb],
5221 hoist_vbeout[bb], transp[bb]);
5222 if (bb != n_basic_blocks - 1)
5223 sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
5224 }
5225
5226 passes++;
5227 }
5228
5229 if (gcse_file)
5230 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
5231 }
5232
5233 /* Top level routine to do the dataflow analysis needed by code hoisting. */
5234
5235 static void
5236 compute_code_hoist_data ()
5237 {
5238 compute_local_properties (transp, comp, antloc, 0);
5239 compute_transpout ();
5240 compute_code_hoist_vbeinout ();
5241 compute_flow_dominators (dominators, NULL);
5242 if (gcse_file)
5243 fprintf (gcse_file, "\n");
5244 }
5245
5246 /* Determine if the expression identified by EXPR_INDEX would
5247 reach BB unimpared if it was placed at the end of EXPR_BB.
5248
5249 It's unclear exactly what Muchnick meant by "unimpared". It seems
5250 to me that the expression must either be computed or transparent in
5251 *every* block in the path(s) from EXPR_BB to BB. Any other definition
5252 would allow the expression to be hoisted out of loops, even if
5253 the expression wasn't a loop invariant.
5254
5255 Contrast this to reachability for PRE where an expression is
5256 considered reachable if *any* path reaches instead of *all*
5257 paths. */
5258
5259 static int
5260 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
5261 int expr_bb;
5262 int expr_index;
5263 int bb;
5264 char *visited;
5265 {
5266 edge pred;
5267 int visited_allocated_locally = 0;
5268
5269
5270 if (visited == NULL)
5271 {
5272 visited_allocated_locally = 1;
5273 visited = xcalloc (n_basic_blocks, 1);
5274 }
5275
5276 visited[expr_bb] = 1;
5277 for (pred = BASIC_BLOCK (bb)->pred; pred != NULL; pred = pred->pred_next)
5278 {
5279 int pred_bb = pred->src->index;
5280
5281 if (pred->src == ENTRY_BLOCK_PTR)
5282 break;
5283 else if (visited[pred_bb])
5284 continue;
5285
5286 /* Does this predecessor generate this expression? */
5287 else if (TEST_BIT (comp[pred_bb], expr_index))
5288 break;
5289 else if (! TEST_BIT (transp[pred_bb], expr_index))
5290 break;
5291
5292 /* Not killed. */
5293 else
5294 {
5295 visited[pred_bb] = 1;
5296 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
5297 pred_bb, visited))
5298 break;
5299 }
5300 }
5301 if (visited_allocated_locally)
5302 free (visited);
5303
5304 return (pred == NULL);
5305 }
5306 \f
5307 /* Actually perform code hoisting. */
5308
5309 static void
5310 hoist_code ()
5311 {
5312 int bb, dominated;
5313 unsigned int i;
5314 struct expr **index_map;
5315 struct expr *expr;
5316
5317 sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
5318
5319 /* Compute a mapping from expression number (`bitmap_index') to
5320 hash table entry. */
5321
5322 index_map = (struct expr **) xcalloc (n_exprs, sizeof (struct expr *));
5323 for (i = 0; i < expr_hash_table_size; i++)
5324 for (expr = expr_hash_table[i]; expr != NULL; expr = expr->next_same_hash)
5325 index_map[expr->bitmap_index] = expr;
5326
5327 /* Walk over each basic block looking for potentially hoistable
5328 expressions, nothing gets hoisted from the entry block. */
5329 for (bb = 0; bb < n_basic_blocks; bb++)
5330 {
5331 int found = 0;
5332 int insn_inserted_p;
5333
5334 /* Examine each expression that is very busy at the exit of this
5335 block. These are the potentially hoistable expressions. */
5336 for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
5337 {
5338 int hoistable = 0;
5339
5340 if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
5341 {
5342 /* We've found a potentially hoistable expression, now
5343 we look at every block BB dominates to see if it
5344 computes the expression. */
5345 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5346 {
5347 /* Ignore self dominance. */
5348 if (bb == dominated
5349 || ! TEST_BIT (dominators[dominated], bb))
5350 continue;
5351
5352 /* We've found a dominated block, now see if it computes
5353 the busy expression and whether or not moving that
5354 expression to the "beginning" of that block is safe. */
5355 if (!TEST_BIT (antloc[dominated], i))
5356 continue;
5357
5358 /* Note if the expression would reach the dominated block
5359 unimpared if it was placed at the end of BB.
5360
5361 Keep track of how many times this expression is hoistable
5362 from a dominated block into BB. */
5363 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
5364 hoistable++;
5365 }
5366
5367 /* If we found more than one hoistable occurence of this
5368 expression, then note it in the bitmap of expressions to
5369 hoist. It makes no sense to hoist things which are computed
5370 in only one BB, and doing so tends to pessimize register
5371 allocation. One could increase this value to try harder
5372 to avoid any possible code expansion due to register
5373 allocation issues; however experiments have shown that
5374 the vast majority of hoistable expressions are only movable
5375 from two successors, so raising this threshhold is likely
5376 to nullify any benefit we get from code hoisting. */
5377 if (hoistable > 1)
5378 {
5379 SET_BIT (hoist_exprs[bb], i);
5380 found = 1;
5381 }
5382 }
5383 }
5384
5385 /* If we found nothing to hoist, then quit now. */
5386 if (! found)
5387 continue;
5388
5389 /* Loop over all the hoistable expressions. */
5390 for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
5391 {
5392 /* We want to insert the expression into BB only once, so
5393 note when we've inserted it. */
5394 insn_inserted_p = 0;
5395
5396 /* These tests should be the same as the tests above. */
5397 if (TEST_BIT (hoist_vbeout[bb], i))
5398 {
5399 /* We've found a potentially hoistable expression, now
5400 we look at every block BB dominates to see if it
5401 computes the expression. */
5402 for (dominated = 0; dominated < n_basic_blocks; dominated++)
5403 {
5404 /* Ignore self dominance. */
5405 if (bb == dominated
5406 || ! TEST_BIT (dominators[dominated], bb))
5407 continue;
5408
5409 /* We've found a dominated block, now see if it computes
5410 the busy expression and whether or not moving that
5411 expression to the "beginning" of that block is safe. */
5412 if (!TEST_BIT (antloc[dominated], i))
5413 continue;
5414
5415 /* The expression is computed in the dominated block and
5416 it would be safe to compute it at the start of the
5417 dominated block. Now we have to determine if the
5418 expresion would reach the dominated block if it was
5419 placed at the end of BB. */
5420 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
5421 {
5422 struct expr *expr = index_map[i];
5423 struct occr *occr = expr->antic_occr;
5424 rtx insn;
5425 rtx set;
5426
5427 /* Find the right occurence of this expression. */
5428 while (BLOCK_NUM (occr->insn) != dominated && occr)
5429 occr = occr->next;
5430
5431 /* Should never happen. */
5432 if (!occr)
5433 abort ();
5434
5435 insn = occr->insn;
5436
5437 set = single_set (insn);
5438 if (! set)
5439 abort ();
5440
5441 /* Create a pseudo-reg to store the result of reaching
5442 expressions into. Get the mode for the new pseudo
5443 from the mode of the original destination pseudo. */
5444 if (expr->reaching_reg == NULL)
5445 expr->reaching_reg
5446 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5447
5448 /* In theory this should never fail since we're creating
5449 a reg->reg copy.
5450
5451 However, on the x86 some of the movXX patterns
5452 actually contain clobbers of scratch regs. This may
5453 cause the insn created by validate_change to not
5454 match any pattern and thus cause validate_change to
5455 fail. */
5456 if (validate_change (insn, &SET_SRC (set),
5457 expr->reaching_reg, 0))
5458 {
5459 occr->deleted_p = 1;
5460 if (!insn_inserted_p)
5461 {
5462 insert_insn_end_bb (index_map[i], bb, 0);
5463 insn_inserted_p = 1;
5464 }
5465 }
5466 }
5467 }
5468 }
5469 }
5470 }
5471
5472 free (index_map);
5473 }
5474
5475 /* Top level routine to perform one code hoisting (aka unification) pass
5476
5477 Return non-zero if a change was made. */
5478
5479 static int
5480 one_code_hoisting_pass ()
5481 {
5482 int changed = 0;
5483
5484 alloc_expr_hash_table (max_cuid);
5485 compute_expr_hash_table ();
5486 if (gcse_file)
5487 dump_hash_table (gcse_file, "Code Hosting Expressions", expr_hash_table,
5488 expr_hash_table_size, n_exprs);
5489
5490 if (n_exprs > 0)
5491 {
5492 alloc_code_hoist_mem (n_basic_blocks, n_exprs);
5493 compute_code_hoist_data ();
5494 hoist_code ();
5495 free_code_hoist_mem ();
5496 }
5497
5498 free_expr_hash_table ();
5499
5500 return changed;
5501 }
This page took 0.269619 seconds and 4 git commands to generate.