1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
23 #include "hash-table.h"
32 #include "pointer-set.h"
33 #include "basic-block.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
37 #include "gimple-expr.h"
40 #include "gimple-iterator.h"
41 #include "gimple-ssa.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
50 #include "tree-inline.h"
51 #include "tree-pass.h"
52 #include "langhooks.h"
53 #include "diagnostic-core.h"
56 #include "gimple-low.h"
58 /* In some instances a tree and a gimple need to be stored in a same table,
59 i.e. in hash tables. This is a structure to do this. */
60 typedef union {tree
*tp
; tree t
; gimple g
;} treemple
;
62 /* Misc functions used in this file. */
64 /* Remember and lookup EH landing pad data for arbitrary statements.
65 Really this means any statement that could_throw_p. We could
66 stuff this information into the stmt_ann data structure, but:
68 (1) We absolutely rely on this information being kept until
69 we get to rtl. Once we're done with lowering here, if we lose
70 the information there's no way to recover it!
72 (2) There are many more statements that *cannot* throw as
73 compared to those that can. We should be saving some amount
74 of space by only allocating memory for those that can throw. */
76 /* Add statement T in function IFUN to landing pad NUM. */
79 add_stmt_to_eh_lp_fn (struct function
*ifun
, gimple t
, int num
)
81 struct throw_stmt_node
*n
;
84 gcc_assert (num
!= 0);
86 n
= ggc_alloc
<throw_stmt_node
> ();
90 if (!get_eh_throw_stmt_table (ifun
))
91 set_eh_throw_stmt_table (ifun
, htab_create_ggc (31, struct_ptr_hash
,
95 slot
= htab_find_slot (get_eh_throw_stmt_table (ifun
), n
, INSERT
);
100 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
103 add_stmt_to_eh_lp (gimple t
, int num
)
105 add_stmt_to_eh_lp_fn (cfun
, t
, num
);
108 /* Add statement T to the single EH landing pad in REGION. */
111 record_stmt_eh_region (eh_region region
, gimple t
)
115 if (region
->type
== ERT_MUST_NOT_THROW
)
116 add_stmt_to_eh_lp_fn (cfun
, t
, -region
->index
);
119 eh_landing_pad lp
= region
->landing_pads
;
121 lp
= gen_eh_landing_pad (region
);
123 gcc_assert (lp
->next_lp
== NULL
);
124 add_stmt_to_eh_lp_fn (cfun
, t
, lp
->index
);
129 /* Remove statement T in function IFUN from its EH landing pad. */
132 remove_stmt_from_eh_lp_fn (struct function
*ifun
, gimple t
)
134 struct throw_stmt_node dummy
;
137 if (!get_eh_throw_stmt_table (ifun
))
141 slot
= htab_find_slot (get_eh_throw_stmt_table (ifun
), &dummy
,
145 htab_clear_slot (get_eh_throw_stmt_table (ifun
), slot
);
153 /* Remove statement T in the current function (cfun) from its
157 remove_stmt_from_eh_lp (gimple t
)
159 return remove_stmt_from_eh_lp_fn (cfun
, t
);
162 /* Determine if statement T is inside an EH region in function IFUN.
163 Positive numbers indicate a landing pad index; negative numbers
164 indicate a MUST_NOT_THROW region index; zero indicates that the
165 statement is not recorded in the region table. */
168 lookup_stmt_eh_lp_fn (struct function
*ifun
, gimple t
)
170 struct throw_stmt_node
*p
, n
;
172 if (ifun
->eh
->throw_stmt_table
== NULL
)
176 p
= (struct throw_stmt_node
*) htab_find (ifun
->eh
->throw_stmt_table
, &n
);
177 return p
? p
->lp_nr
: 0;
180 /* Likewise, but always use the current function. */
183 lookup_stmt_eh_lp (gimple t
)
185 /* We can get called from initialized data when -fnon-call-exceptions
186 is on; prevent crash. */
189 return lookup_stmt_eh_lp_fn (cfun
, t
);
192 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
193 nodes and LABEL_DECL nodes. We will use this during the second phase to
194 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
196 struct finally_tree_node
198 /* When storing a GIMPLE_TRY, we have to record a gimple. However
199 when deciding whether a GOTO to a certain LABEL_DECL (which is a
200 tree) leaves the TRY block, its necessary to record a tree in
201 this field. Thus a treemple is used. */
206 /* Hashtable helpers. */
208 struct finally_tree_hasher
: typed_free_remove
<finally_tree_node
>
210 typedef finally_tree_node value_type
;
211 typedef finally_tree_node compare_type
;
212 static inline hashval_t
hash (const value_type
*);
213 static inline bool equal (const value_type
*, const compare_type
*);
217 finally_tree_hasher::hash (const value_type
*v
)
219 return (intptr_t)v
->child
.t
>> 4;
223 finally_tree_hasher::equal (const value_type
*v
, const compare_type
*c
)
225 return v
->child
.t
== c
->child
.t
;
228 /* Note that this table is *not* marked GTY. It is short-lived. */
229 static hash_table
<finally_tree_hasher
> *finally_tree
;
232 record_in_finally_tree (treemple child
, gimple parent
)
234 struct finally_tree_node
*n
;
235 finally_tree_node
**slot
;
237 n
= XNEW (struct finally_tree_node
);
241 slot
= finally_tree
->find_slot (n
, INSERT
);
247 collect_finally_tree (gimple stmt
, gimple region
);
249 /* Go through the gimple sequence. Works with collect_finally_tree to
250 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
253 collect_finally_tree_1 (gimple_seq seq
, gimple region
)
255 gimple_stmt_iterator gsi
;
257 for (gsi
= gsi_start (seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
258 collect_finally_tree (gsi_stmt (gsi
), region
);
262 collect_finally_tree (gimple stmt
, gimple region
)
266 switch (gimple_code (stmt
))
269 temp
.t
= gimple_label_label (stmt
);
270 record_in_finally_tree (temp
, region
);
274 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
277 record_in_finally_tree (temp
, region
);
278 collect_finally_tree_1 (gimple_try_eval (stmt
), stmt
);
279 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
281 else if (gimple_try_kind (stmt
) == GIMPLE_TRY_CATCH
)
283 collect_finally_tree_1 (gimple_try_eval (stmt
), region
);
284 collect_finally_tree_1 (gimple_try_cleanup (stmt
), region
);
289 collect_finally_tree_1 (gimple_catch_handler (stmt
), region
);
292 case GIMPLE_EH_FILTER
:
293 collect_finally_tree_1 (gimple_eh_filter_failure (stmt
), region
);
297 collect_finally_tree_1 (gimple_eh_else_n_body (stmt
), region
);
298 collect_finally_tree_1 (gimple_eh_else_e_body (stmt
), region
);
302 /* A type, a decl, or some kind of statement that we're not
303 interested in. Don't walk them. */
309 /* Use the finally tree to determine if a jump from START to TARGET
310 would leave the try_finally node that START lives in. */
313 outside_finally_tree (treemple start
, gimple target
)
315 struct finally_tree_node n
, *p
;
320 p
= finally_tree
->find (&n
);
325 while (start
.g
!= target
);
330 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
331 nodes into a set of gotos, magic labels, and eh regions.
332 The eh region creation is straight-forward, but frobbing all the gotos
333 and such into shape isn't. */
335 /* The sequence into which we record all EH stuff. This will be
336 placed at the end of the function when we're all done. */
337 static gimple_seq eh_seq
;
339 /* Record whether an EH region contains something that can throw,
340 indexed by EH region number. */
341 static bitmap eh_region_may_contain_throw_map
;
343 /* The GOTO_QUEUE is is an array of GIMPLE_GOTO and GIMPLE_RETURN
344 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
345 The idea is to record a gimple statement for everything except for
346 the conditionals, which get their labels recorded. Since labels are
347 of type 'tree', we need this node to store both gimple and tree
348 objects. REPL_STMT is the sequence used to replace the goto/return
349 statement. CONT_STMT is used to store the statement that allows
350 the return/goto to jump to the original destination. */
352 struct goto_queue_node
356 gimple_seq repl_stmt
;
359 /* This is used when index >= 0 to indicate that stmt is a label (as
360 opposed to a goto stmt). */
364 /* State of the world while lowering. */
368 /* What's "current" while constructing the eh region tree. These
369 correspond to variables of the same name in cfun->eh, which we
370 don't have easy access to. */
371 eh_region cur_region
;
373 /* What's "current" for the purposes of __builtin_eh_pointer. For
374 a CATCH, this is the associated TRY. For an EH_FILTER, this is
375 the associated ALLOWED_EXCEPTIONS, etc. */
376 eh_region ehp_region
;
378 /* Processing of TRY_FINALLY requires a bit more state. This is
379 split out into a separate structure so that we don't have to
380 copy so much when processing other nodes. */
381 struct leh_tf_state
*tf
;
386 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
387 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
388 this so that outside_finally_tree can reliably reference the tree used
389 in the collect_finally_tree data structures. */
390 gimple try_finally_expr
;
393 /* While lowering a top_p usually it is expanded into multiple statements,
394 thus we need the following field to store them. */
395 gimple_seq top_p_seq
;
397 /* The state outside this try_finally node. */
398 struct leh_state
*outer
;
400 /* The exception region created for it. */
403 /* The goto queue. */
404 struct goto_queue_node
*goto_queue
;
405 size_t goto_queue_size
;
406 size_t goto_queue_active
;
408 /* Pointer map to help in searching goto_queue when it is large. */
409 struct pointer_map_t
*goto_queue_map
;
411 /* The set of unique labels seen as entries in the goto queue. */
412 vec
<tree
> dest_array
;
414 /* A label to be added at the end of the completed transformed
415 sequence. It will be set if may_fallthru was true *at one time*,
416 though subsequent transformations may have cleared that flag. */
419 /* True if it is possible to fall out the bottom of the try block.
420 Cleared if the fallthru is converted to a goto. */
423 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
426 /* True if the finally block can receive an exception edge.
427 Cleared if the exception case is handled by code duplication. */
431 static gimple_seq
lower_eh_must_not_throw (struct leh_state
*, gimple
);
433 /* Search for STMT in the goto queue. Return the replacement,
434 or null if the statement isn't in the queue. */
436 #define LARGE_GOTO_QUEUE 20
438 static void lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*seq
);
441 find_goto_replacement (struct leh_tf_state
*tf
, treemple stmt
)
446 if (tf
->goto_queue_active
< LARGE_GOTO_QUEUE
)
448 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
449 if ( tf
->goto_queue
[i
].stmt
.g
== stmt
.g
)
450 return tf
->goto_queue
[i
].repl_stmt
;
454 /* If we have a large number of entries in the goto_queue, create a
455 pointer map and use that for searching. */
457 if (!tf
->goto_queue_map
)
459 tf
->goto_queue_map
= pointer_map_create ();
460 for (i
= 0; i
< tf
->goto_queue_active
; i
++)
462 slot
= pointer_map_insert (tf
->goto_queue_map
,
463 tf
->goto_queue
[i
].stmt
.g
);
464 gcc_assert (*slot
== NULL
);
465 *slot
= &tf
->goto_queue
[i
];
469 slot
= pointer_map_contains (tf
->goto_queue_map
, stmt
.g
);
471 return (((struct goto_queue_node
*) *slot
)->repl_stmt
);
476 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
477 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
478 then we can just splat it in, otherwise we add the new stmts immediately
479 after the GIMPLE_COND and redirect. */
482 replace_goto_queue_cond_clause (tree
*tp
, struct leh_tf_state
*tf
,
483 gimple_stmt_iterator
*gsi
)
488 location_t loc
= gimple_location (gsi_stmt (*gsi
));
491 new_seq
= find_goto_replacement (tf
, temp
);
495 if (gimple_seq_singleton_p (new_seq
)
496 && gimple_code (gimple_seq_first_stmt (new_seq
)) == GIMPLE_GOTO
)
498 *tp
= gimple_goto_dest (gimple_seq_first_stmt (new_seq
));
502 label
= create_artificial_label (loc
);
503 /* Set the new label for the GIMPLE_COND */
506 gsi_insert_after (gsi
, gimple_build_label (label
), GSI_CONTINUE_LINKING
);
507 gsi_insert_seq_after (gsi
, gimple_seq_copy (new_seq
), GSI_CONTINUE_LINKING
);
510 /* The real work of replace_goto_queue. Returns with TSI updated to
511 point to the next statement. */
513 static void replace_goto_queue_stmt_list (gimple_seq
*, struct leh_tf_state
*);
516 replace_goto_queue_1 (gimple stmt
, struct leh_tf_state
*tf
,
517 gimple_stmt_iterator
*gsi
)
523 switch (gimple_code (stmt
))
528 seq
= find_goto_replacement (tf
, temp
);
531 gsi_insert_seq_before (gsi
, gimple_seq_copy (seq
), GSI_SAME_STMT
);
532 gsi_remove (gsi
, false);
538 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 2), tf
, gsi
);
539 replace_goto_queue_cond_clause (gimple_op_ptr (stmt
, 3), tf
, gsi
);
543 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt
), tf
);
544 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt
), tf
);
547 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (stmt
), tf
);
549 case GIMPLE_EH_FILTER
:
550 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt
), tf
);
553 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (stmt
), tf
);
554 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (stmt
), tf
);
558 /* These won't have gotos in them. */
565 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
568 replace_goto_queue_stmt_list (gimple_seq
*seq
, struct leh_tf_state
*tf
)
570 gimple_stmt_iterator gsi
= gsi_start (*seq
);
572 while (!gsi_end_p (gsi
))
573 replace_goto_queue_1 (gsi_stmt (gsi
), tf
, &gsi
);
576 /* Replace all goto queue members. */
579 replace_goto_queue (struct leh_tf_state
*tf
)
581 if (tf
->goto_queue_active
== 0)
583 replace_goto_queue_stmt_list (&tf
->top_p_seq
, tf
);
584 replace_goto_queue_stmt_list (&eh_seq
, tf
);
587 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
588 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
592 record_in_goto_queue (struct leh_tf_state
*tf
,
599 struct goto_queue_node
*q
;
601 gcc_assert (!tf
->goto_queue_map
);
603 active
= tf
->goto_queue_active
;
604 size
= tf
->goto_queue_size
;
607 size
= (size
? size
* 2 : 32);
608 tf
->goto_queue_size
= size
;
610 = XRESIZEVEC (struct goto_queue_node
, tf
->goto_queue
, size
);
613 q
= &tf
->goto_queue
[active
];
614 tf
->goto_queue_active
= active
+ 1;
616 memset (q
, 0, sizeof (*q
));
619 q
->location
= location
;
620 q
->is_label
= is_label
;
623 /* Record the LABEL label in the goto queue contained in TF.
627 record_in_goto_queue_label (struct leh_tf_state
*tf
, treemple stmt
, tree label
,
631 treemple temp
, new_stmt
;
636 /* Computed and non-local gotos do not get processed. Given
637 their nature we can neither tell whether we've escaped the
638 finally block nor redirect them if we knew. */
639 if (TREE_CODE (label
) != LABEL_DECL
)
642 /* No need to record gotos that don't leave the try block. */
644 if (!outside_finally_tree (temp
, tf
->try_finally_expr
))
647 if (! tf
->dest_array
.exists ())
649 tf
->dest_array
.create (10);
650 tf
->dest_array
.quick_push (label
);
655 int n
= tf
->dest_array
.length ();
656 for (index
= 0; index
< n
; ++index
)
657 if (tf
->dest_array
[index
] == label
)
660 tf
->dest_array
.safe_push (label
);
663 /* In the case of a GOTO we want to record the destination label,
664 since with a GIMPLE_COND we have an easy access to the then/else
667 record_in_goto_queue (tf
, new_stmt
, index
, true, location
);
670 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
671 node, and if so record that fact in the goto queue associated with that
675 maybe_record_in_goto_queue (struct leh_state
*state
, gimple stmt
)
677 struct leh_tf_state
*tf
= state
->tf
;
683 switch (gimple_code (stmt
))
686 new_stmt
.tp
= gimple_op_ptr (stmt
, 2);
687 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_true_label (stmt
),
688 EXPR_LOCATION (*new_stmt
.tp
));
689 new_stmt
.tp
= gimple_op_ptr (stmt
, 3);
690 record_in_goto_queue_label (tf
, new_stmt
, gimple_cond_false_label (stmt
),
691 EXPR_LOCATION (*new_stmt
.tp
));
695 record_in_goto_queue_label (tf
, new_stmt
, gimple_goto_dest (stmt
),
696 gimple_location (stmt
));
700 tf
->may_return
= true;
702 record_in_goto_queue (tf
, new_stmt
, -1, false, gimple_location (stmt
));
711 #ifdef ENABLE_CHECKING
712 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
713 was in fact structured, and we've not yet done jump threading, then none
714 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
717 verify_norecord_switch_expr (struct leh_state
*state
, gimple switch_expr
)
719 struct leh_tf_state
*tf
= state
->tf
;
725 n
= gimple_switch_num_labels (switch_expr
);
727 for (i
= 0; i
< n
; ++i
)
730 tree lab
= CASE_LABEL (gimple_switch_label (switch_expr
, i
));
732 gcc_assert (!outside_finally_tree (temp
, tf
->try_finally_expr
));
736 #define verify_norecord_switch_expr(state, switch_expr)
739 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
740 non-null, insert it before the new branch. */
743 do_return_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
)
747 /* In the case of a return, the queue node must be a gimple statement. */
748 gcc_assert (!q
->is_label
);
750 /* Note that the return value may have already been computed, e.g.,
763 should return 0, not 1. We don't have to do anything to make
764 this happens because the return value has been placed in the
765 RESULT_DECL already. */
767 q
->cont_stmt
= q
->stmt
.g
;
770 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
772 x
= gimple_build_goto (finlab
);
773 gimple_set_location (x
, q
->location
);
774 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
777 /* Similar, but easier, for GIMPLE_GOTO. */
780 do_goto_redirection (struct goto_queue_node
*q
, tree finlab
, gimple_seq mod
,
781 struct leh_tf_state
*tf
)
785 gcc_assert (q
->is_label
);
787 q
->cont_stmt
= gimple_build_goto (tf
->dest_array
[q
->index
]);
790 gimple_seq_add_seq (&q
->repl_stmt
, mod
);
792 x
= gimple_build_goto (finlab
);
793 gimple_set_location (x
, q
->location
);
794 gimple_seq_add_stmt (&q
->repl_stmt
, x
);
797 /* Emit a standard landing pad sequence into SEQ for REGION. */
800 emit_post_landing_pad (gimple_seq
*seq
, eh_region region
)
802 eh_landing_pad lp
= region
->landing_pads
;
806 lp
= gen_eh_landing_pad (region
);
808 lp
->post_landing_pad
= create_artificial_label (UNKNOWN_LOCATION
);
809 EH_LANDING_PAD_NR (lp
->post_landing_pad
) = lp
->index
;
811 x
= gimple_build_label (lp
->post_landing_pad
);
812 gimple_seq_add_stmt (seq
, x
);
815 /* Emit a RESX statement into SEQ for REGION. */
818 emit_resx (gimple_seq
*seq
, eh_region region
)
820 gimple x
= gimple_build_resx (region
->index
);
821 gimple_seq_add_stmt (seq
, x
);
823 record_stmt_eh_region (region
->outer
, x
);
826 /* Emit an EH_DISPATCH statement into SEQ for REGION. */
829 emit_eh_dispatch (gimple_seq
*seq
, eh_region region
)
831 gimple x
= gimple_build_eh_dispatch (region
->index
);
832 gimple_seq_add_stmt (seq
, x
);
835 /* Note that the current EH region may contain a throw, or a
836 call to a function which itself may contain a throw. */
839 note_eh_region_may_contain_throw (eh_region region
)
841 while (bitmap_set_bit (eh_region_may_contain_throw_map
, region
->index
))
843 if (region
->type
== ERT_MUST_NOT_THROW
)
845 region
= region
->outer
;
851 /* Check if REGION has been marked as containing a throw. If REGION is
852 NULL, this predicate is false. */
855 eh_region_may_contain_throw (eh_region r
)
857 return r
&& bitmap_bit_p (eh_region_may_contain_throw_map
, r
->index
);
860 /* We want to transform
861 try { body; } catch { stuff; }
871 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
872 should be placed before the second operand, or NULL. OVER is
873 an existing label that should be put at the exit, or NULL. */
876 frob_into_branch_around (gimple tp
, eh_region region
, tree over
)
879 gimple_seq cleanup
, result
;
880 location_t loc
= gimple_location (tp
);
882 cleanup
= gimple_try_cleanup (tp
);
883 result
= gimple_try_eval (tp
);
886 emit_post_landing_pad (&eh_seq
, region
);
888 if (gimple_seq_may_fallthru (cleanup
))
891 over
= create_artificial_label (loc
);
892 x
= gimple_build_goto (over
);
893 gimple_set_location (x
, loc
);
894 gimple_seq_add_stmt (&cleanup
, x
);
896 gimple_seq_add_seq (&eh_seq
, cleanup
);
900 x
= gimple_build_label (over
);
901 gimple_seq_add_stmt (&result
, x
);
906 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
907 Make sure to record all new labels found. */
910 lower_try_finally_dup_block (gimple_seq seq
, struct leh_state
*outer_state
,
913 gimple region
= NULL
;
915 gimple_stmt_iterator gsi
;
917 new_seq
= copy_gimple_seq_and_replace_locals (seq
);
919 for (gsi
= gsi_start (new_seq
); !gsi_end_p (gsi
); gsi_next (&gsi
))
921 gimple stmt
= gsi_stmt (gsi
);
922 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
924 tree block
= gimple_block (stmt
);
925 gimple_set_location (stmt
, loc
);
926 gimple_set_block (stmt
, block
);
931 region
= outer_state
->tf
->try_finally_expr
;
932 collect_finally_tree_1 (new_seq
, region
);
937 /* A subroutine of lower_try_finally. Create a fallthru label for
938 the given try_finally state. The only tricky bit here is that
939 we have to make sure to record the label in our outer context. */
942 lower_try_finally_fallthru_label (struct leh_tf_state
*tf
)
944 tree label
= tf
->fallthru_label
;
949 label
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
950 tf
->fallthru_label
= label
;
954 record_in_finally_tree (temp
, tf
->outer
->tf
->try_finally_expr
);
960 /* A subroutine of lower_try_finally. If FINALLY consits of a
961 GIMPLE_EH_ELSE node, return it. */
964 get_eh_else (gimple_seq finally
)
966 gimple x
= gimple_seq_first_stmt (finally
);
967 if (gimple_code (x
) == GIMPLE_EH_ELSE
)
969 gcc_assert (gimple_seq_singleton_p (finally
));
975 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
976 langhook returns non-null, then the language requires that the exception
977 path out of a try_finally be treated specially. To wit: the code within
978 the finally block may not itself throw an exception. We have two choices
979 here. First we can duplicate the finally block and wrap it in a
980 must_not_throw region. Second, we can generate code like
985 if (fintmp == eh_edge)
986 protect_cleanup_actions;
989 where "fintmp" is the temporary used in the switch statement generation
990 alternative considered below. For the nonce, we always choose the first
993 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
996 honor_protect_cleanup_actions (struct leh_state
*outer_state
,
997 struct leh_state
*this_state
,
998 struct leh_tf_state
*tf
)
1000 tree protect_cleanup_actions
;
1001 gimple_stmt_iterator gsi
;
1002 bool finally_may_fallthru
;
1006 /* First check for nothing to do. */
1007 if (lang_hooks
.eh_protect_cleanup_actions
== NULL
)
1009 protect_cleanup_actions
= lang_hooks
.eh_protect_cleanup_actions ();
1010 if (protect_cleanup_actions
== NULL
)
1013 finally
= gimple_try_cleanup (tf
->top_p
);
1014 eh_else
= get_eh_else (finally
);
1016 /* Duplicate the FINALLY block. Only need to do this for try-finally,
1017 and not for cleanups. If we've got an EH_ELSE, extract it now. */
1020 finally
= gimple_eh_else_e_body (eh_else
);
1021 gimple_try_set_cleanup (tf
->top_p
, gimple_eh_else_n_body (eh_else
));
1023 else if (this_state
)
1024 finally
= lower_try_finally_dup_block (finally
, outer_state
,
1025 gimple_location (tf
->try_finally_expr
));
1026 finally_may_fallthru
= gimple_seq_may_fallthru (finally
);
1028 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1029 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1030 to be in an enclosing scope, but needs to be implemented at this level
1031 to avoid a nesting violation (see wrap_temporary_cleanups in
1032 cp/decl.c). Since it's logically at an outer level, we should call
1033 terminate before we get to it, so strip it away before adding the
1034 MUST_NOT_THROW filter. */
1035 gsi
= gsi_start (finally
);
1037 if (gimple_code (x
) == GIMPLE_TRY
1038 && gimple_try_kind (x
) == GIMPLE_TRY_CATCH
1039 && gimple_try_catch_is_cleanup (x
))
1041 gsi_insert_seq_before (&gsi
, gimple_try_eval (x
), GSI_SAME_STMT
);
1042 gsi_remove (&gsi
, false);
1045 /* Wrap the block with protect_cleanup_actions as the action. */
1046 x
= gimple_build_eh_must_not_throw (protect_cleanup_actions
);
1047 x
= gimple_build_try (finally
, gimple_seq_alloc_with_stmt (x
),
1049 finally
= lower_eh_must_not_throw (outer_state
, x
);
1051 /* Drop all of this into the exception sequence. */
1052 emit_post_landing_pad (&eh_seq
, tf
->region
);
1053 gimple_seq_add_seq (&eh_seq
, finally
);
1054 if (finally_may_fallthru
)
1055 emit_resx (&eh_seq
, tf
->region
);
1057 /* Having now been handled, EH isn't to be considered with
1058 the rest of the outgoing edges. */
1059 tf
->may_throw
= false;
1062 /* A subroutine of lower_try_finally. We have determined that there is
1063 no fallthru edge out of the finally block. This means that there is
1064 no outgoing edge corresponding to any incoming edge. Restructure the
1065 try_finally node for this special case. */
1068 lower_try_finally_nofallthru (struct leh_state
*state
,
1069 struct leh_tf_state
*tf
)
1074 struct goto_queue_node
*q
, *qe
;
1076 lab
= create_artificial_label (gimple_location (tf
->try_finally_expr
));
1078 /* We expect that tf->top_p is a GIMPLE_TRY. */
1079 finally
= gimple_try_cleanup (tf
->top_p
);
1080 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1082 x
= gimple_build_label (lab
);
1083 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1086 qe
= q
+ tf
->goto_queue_active
;
1089 do_return_redirection (q
, lab
, NULL
);
1091 do_goto_redirection (q
, lab
, NULL
, tf
);
1093 replace_goto_queue (tf
);
1095 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1096 eh_else
= get_eh_else (finally
);
1099 finally
= gimple_eh_else_n_body (eh_else
);
1100 lower_eh_constructs_1 (state
, &finally
);
1101 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1105 finally
= gimple_eh_else_e_body (eh_else
);
1106 lower_eh_constructs_1 (state
, &finally
);
1108 emit_post_landing_pad (&eh_seq
, tf
->region
);
1109 gimple_seq_add_seq (&eh_seq
, finally
);
1114 lower_eh_constructs_1 (state
, &finally
);
1115 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1119 emit_post_landing_pad (&eh_seq
, tf
->region
);
1121 x
= gimple_build_goto (lab
);
1122 gimple_set_location (x
, gimple_location (tf
->try_finally_expr
));
1123 gimple_seq_add_stmt (&eh_seq
, x
);
1128 /* A subroutine of lower_try_finally. We have determined that there is
1129 exactly one destination of the finally block. Restructure the
1130 try_finally node for this special case. */
1133 lower_try_finally_onedest (struct leh_state
*state
, struct leh_tf_state
*tf
)
1135 struct goto_queue_node
*q
, *qe
;
1138 gimple_stmt_iterator gsi
;
1140 location_t loc
= gimple_location (tf
->try_finally_expr
);
1142 finally
= gimple_try_cleanup (tf
->top_p
);
1143 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1145 /* Since there's only one destination, and the destination edge can only
1146 either be EH or non-EH, that implies that all of our incoming edges
1147 are of the same type. Therefore we can lower EH_ELSE immediately. */
1148 x
= get_eh_else (finally
);
1152 finally
= gimple_eh_else_e_body (x
);
1154 finally
= gimple_eh_else_n_body (x
);
1157 lower_eh_constructs_1 (state
, &finally
);
1159 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1161 gimple stmt
= gsi_stmt (gsi
);
1162 if (LOCATION_LOCUS (gimple_location (stmt
)) == UNKNOWN_LOCATION
)
1164 tree block
= gimple_block (stmt
);
1165 gimple_set_location (stmt
, gimple_location (tf
->try_finally_expr
));
1166 gimple_set_block (stmt
, block
);
1172 /* Only reachable via the exception edge. Add the given label to
1173 the head of the FINALLY block. Append a RESX at the end. */
1174 emit_post_landing_pad (&eh_seq
, tf
->region
);
1175 gimple_seq_add_seq (&eh_seq
, finally
);
1176 emit_resx (&eh_seq
, tf
->region
);
1180 if (tf
->may_fallthru
)
1182 /* Only reachable via the fallthru edge. Do nothing but let
1183 the two blocks run together; we'll fall out the bottom. */
1184 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1188 finally_label
= create_artificial_label (loc
);
1189 x
= gimple_build_label (finally_label
);
1190 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1192 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1195 qe
= q
+ tf
->goto_queue_active
;
1199 /* Reachable by return expressions only. Redirect them. */
1201 do_return_redirection (q
, finally_label
, NULL
);
1202 replace_goto_queue (tf
);
1206 /* Reachable by goto expressions only. Redirect them. */
1208 do_goto_redirection (q
, finally_label
, NULL
, tf
);
1209 replace_goto_queue (tf
);
1211 if (tf
->dest_array
[0] == tf
->fallthru_label
)
1213 /* Reachable by goto to fallthru label only. Redirect it
1214 to the new label (already created, sadly), and do not
1215 emit the final branch out, or the fallthru label. */
1216 tf
->fallthru_label
= NULL
;
1221 /* Place the original return/goto to the original destination
1222 immediately after the finally block. */
1223 x
= tf
->goto_queue
[0].cont_stmt
;
1224 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1225 maybe_record_in_goto_queue (state
, x
);
1228 /* A subroutine of lower_try_finally. There are multiple edges incoming
1229 and outgoing from the finally block. Implement this by duplicating the
1230 finally block for every destination. */
1233 lower_try_finally_copy (struct leh_state
*state
, struct leh_tf_state
*tf
)
1236 gimple_seq new_stmt
;
1240 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1242 finally
= gimple_try_cleanup (tf
->top_p
);
1244 /* Notice EH_ELSE, and simplify some of the remaining code
1245 by considering FINALLY to be the normal return path only. */
1246 eh_else
= get_eh_else (finally
);
1248 finally
= gimple_eh_else_n_body (eh_else
);
1250 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1253 if (tf
->may_fallthru
)
1255 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1256 lower_eh_constructs_1 (state
, &seq
);
1257 gimple_seq_add_seq (&new_stmt
, seq
);
1259 tmp
= lower_try_finally_fallthru_label (tf
);
1260 x
= gimple_build_goto (tmp
);
1261 gimple_set_location (x
, tf_loc
);
1262 gimple_seq_add_stmt (&new_stmt
, x
);
1267 /* We don't need to copy the EH path of EH_ELSE,
1268 since it is only emitted once. */
1270 seq
= gimple_eh_else_e_body (eh_else
);
1272 seq
= lower_try_finally_dup_block (finally
, state
, tf_loc
);
1273 lower_eh_constructs_1 (state
, &seq
);
1275 emit_post_landing_pad (&eh_seq
, tf
->region
);
1276 gimple_seq_add_seq (&eh_seq
, seq
);
1277 emit_resx (&eh_seq
, tf
->region
);
1282 struct goto_queue_node
*q
, *qe
;
1283 int return_index
, index
;
1286 struct goto_queue_node
*q
;
1290 return_index
= tf
->dest_array
.length ();
1291 labels
= XCNEWVEC (struct labels_s
, return_index
+ 1);
1294 qe
= q
+ tf
->goto_queue_active
;
1297 index
= q
->index
< 0 ? return_index
: q
->index
;
1299 if (!labels
[index
].q
)
1300 labels
[index
].q
= q
;
1303 for (index
= 0; index
< return_index
+ 1; index
++)
1307 q
= labels
[index
].q
;
1311 lab
= labels
[index
].label
1312 = create_artificial_label (tf_loc
);
1314 if (index
== return_index
)
1315 do_return_redirection (q
, lab
, NULL
);
1317 do_goto_redirection (q
, lab
, NULL
, tf
);
1319 x
= gimple_build_label (lab
);
1320 gimple_seq_add_stmt (&new_stmt
, x
);
1322 seq
= lower_try_finally_dup_block (finally
, state
, q
->location
);
1323 lower_eh_constructs_1 (state
, &seq
);
1324 gimple_seq_add_seq (&new_stmt
, seq
);
1326 gimple_seq_add_stmt (&new_stmt
, q
->cont_stmt
);
1327 maybe_record_in_goto_queue (state
, q
->cont_stmt
);
1330 for (q
= tf
->goto_queue
; q
< qe
; q
++)
1334 index
= q
->index
< 0 ? return_index
: q
->index
;
1336 if (labels
[index
].q
== q
)
1339 lab
= labels
[index
].label
;
1341 if (index
== return_index
)
1342 do_return_redirection (q
, lab
, NULL
);
1344 do_goto_redirection (q
, lab
, NULL
, tf
);
1347 replace_goto_queue (tf
);
1351 /* Need to link new stmts after running replace_goto_queue due
1352 to not wanting to process the same goto stmts twice. */
1353 gimple_seq_add_seq (&tf
->top_p_seq
, new_stmt
);
1356 /* A subroutine of lower_try_finally. There are multiple edges incoming
1357 and outgoing from the finally block. Implement this by instrumenting
1358 each incoming edge and creating a switch statement at the end of the
1359 finally block that branches to the appropriate destination. */
1362 lower_try_finally_switch (struct leh_state
*state
, struct leh_tf_state
*tf
)
1364 struct goto_queue_node
*q
, *qe
;
1365 tree finally_tmp
, finally_label
;
1366 int return_index
, eh_index
, fallthru_index
;
1367 int nlabels
, ndests
, j
, last_case_index
;
1369 vec
<tree
> case_label_vec
;
1370 gimple_seq switch_body
= NULL
;
1375 struct pointer_map_t
*cont_map
= NULL
;
1376 /* The location of the TRY_FINALLY stmt. */
1377 location_t tf_loc
= gimple_location (tf
->try_finally_expr
);
1378 /* The location of the finally block. */
1379 location_t finally_loc
;
1381 finally
= gimple_try_cleanup (tf
->top_p
);
1382 eh_else
= get_eh_else (finally
);
1384 /* Mash the TRY block to the head of the chain. */
1385 tf
->top_p_seq
= gimple_try_eval (tf
->top_p
);
1387 /* The location of the finally is either the last stmt in the finally
1388 block or the location of the TRY_FINALLY itself. */
1389 x
= gimple_seq_last_stmt (finally
);
1390 finally_loc
= x
? gimple_location (x
) : tf_loc
;
1392 /* Prepare for switch statement generation. */
1393 nlabels
= tf
->dest_array
.length ();
1394 return_index
= nlabels
;
1395 eh_index
= return_index
+ tf
->may_return
;
1396 fallthru_index
= eh_index
+ (tf
->may_throw
&& !eh_else
);
1397 ndests
= fallthru_index
+ tf
->may_fallthru
;
1399 finally_tmp
= create_tmp_var (integer_type_node
, "finally_tmp");
1400 finally_label
= create_artificial_label (finally_loc
);
1402 /* We use vec::quick_push on case_label_vec throughout this function,
1403 since we know the size in advance and allocate precisely as muce
1405 case_label_vec
.create (ndests
);
1407 last_case_index
= 0;
1409 /* Begin inserting code for getting to the finally block. Things
1410 are done in this order to correspond to the sequence the code is
1413 if (tf
->may_fallthru
)
1415 x
= gimple_build_assign (finally_tmp
,
1416 build_int_cst (integer_type_node
,
1418 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1420 tmp
= build_int_cst (integer_type_node
, fallthru_index
);
1421 last_case
= build_case_label (tmp
, NULL
,
1422 create_artificial_label (tf_loc
));
1423 case_label_vec
.quick_push (last_case
);
1426 x
= gimple_build_label (CASE_LABEL (last_case
));
1427 gimple_seq_add_stmt (&switch_body
, x
);
1429 tmp
= lower_try_finally_fallthru_label (tf
);
1430 x
= gimple_build_goto (tmp
);
1431 gimple_set_location (x
, tf_loc
);
1432 gimple_seq_add_stmt (&switch_body
, x
);
1435 /* For EH_ELSE, emit the exception path (plus resx) now, then
1436 subsequently we only need consider the normal path. */
1441 finally
= gimple_eh_else_e_body (eh_else
);
1442 lower_eh_constructs_1 (state
, &finally
);
1444 emit_post_landing_pad (&eh_seq
, tf
->region
);
1445 gimple_seq_add_seq (&eh_seq
, finally
);
1446 emit_resx (&eh_seq
, tf
->region
);
1449 finally
= gimple_eh_else_n_body (eh_else
);
1451 else if (tf
->may_throw
)
1453 emit_post_landing_pad (&eh_seq
, tf
->region
);
1455 x
= gimple_build_assign (finally_tmp
,
1456 build_int_cst (integer_type_node
, eh_index
));
1457 gimple_seq_add_stmt (&eh_seq
, x
);
1459 x
= gimple_build_goto (finally_label
);
1460 gimple_set_location (x
, tf_loc
);
1461 gimple_seq_add_stmt (&eh_seq
, x
);
1463 tmp
= build_int_cst (integer_type_node
, eh_index
);
1464 last_case
= build_case_label (tmp
, NULL
,
1465 create_artificial_label (tf_loc
));
1466 case_label_vec
.quick_push (last_case
);
1469 x
= gimple_build_label (CASE_LABEL (last_case
));
1470 gimple_seq_add_stmt (&eh_seq
, x
);
1471 emit_resx (&eh_seq
, tf
->region
);
1474 x
= gimple_build_label (finally_label
);
1475 gimple_seq_add_stmt (&tf
->top_p_seq
, x
);
1477 lower_eh_constructs_1 (state
, &finally
);
1478 gimple_seq_add_seq (&tf
->top_p_seq
, finally
);
1480 /* Redirect each incoming goto edge. */
1482 qe
= q
+ tf
->goto_queue_active
;
1483 j
= last_case_index
+ tf
->may_return
;
1484 /* Prepare the assignments to finally_tmp that are executed upon the
1485 entrance through a particular edge. */
1488 gimple_seq mod
= NULL
;
1490 unsigned int case_index
;
1494 x
= gimple_build_assign (finally_tmp
,
1495 build_int_cst (integer_type_node
,
1497 gimple_seq_add_stmt (&mod
, x
);
1498 do_return_redirection (q
, finally_label
, mod
);
1499 switch_id
= return_index
;
1503 x
= gimple_build_assign (finally_tmp
,
1504 build_int_cst (integer_type_node
, q
->index
));
1505 gimple_seq_add_stmt (&mod
, x
);
1506 do_goto_redirection (q
, finally_label
, mod
, tf
);
1507 switch_id
= q
->index
;
1510 case_index
= j
+ q
->index
;
1511 if (case_label_vec
.length () <= case_index
|| !case_label_vec
[case_index
])
1515 tmp
= build_int_cst (integer_type_node
, switch_id
);
1516 case_lab
= build_case_label (tmp
, NULL
,
1517 create_artificial_label (tf_loc
));
1518 /* We store the cont_stmt in the pointer map, so that we can recover
1519 it in the loop below. */
1521 cont_map
= pointer_map_create ();
1522 slot
= pointer_map_insert (cont_map
, case_lab
);
1523 *slot
= q
->cont_stmt
;
1524 case_label_vec
.quick_push (case_lab
);
1527 for (j
= last_case_index
; j
< last_case_index
+ nlabels
; j
++)
1532 last_case
= case_label_vec
[j
];
1534 gcc_assert (last_case
);
1535 gcc_assert (cont_map
);
1537 slot
= pointer_map_contains (cont_map
, last_case
);
1539 cont_stmt
= *(gimple
*) slot
;
1541 x
= gimple_build_label (CASE_LABEL (last_case
));
1542 gimple_seq_add_stmt (&switch_body
, x
);
1543 gimple_seq_add_stmt (&switch_body
, cont_stmt
);
1544 maybe_record_in_goto_queue (state
, cont_stmt
);
1547 pointer_map_destroy (cont_map
);
1549 replace_goto_queue (tf
);
1551 /* Make sure that the last case is the default label, as one is required.
1552 Then sort the labels, which is also required in GIMPLE. */
1553 CASE_LOW (last_case
) = NULL
;
1554 tree tem
= case_label_vec
.pop ();
1555 gcc_assert (tem
== last_case
);
1556 sort_case_labels (case_label_vec
);
1558 /* Build the switch statement, setting last_case to be the default
1560 switch_stmt
= gimple_build_switch (finally_tmp
, last_case
,
1562 gimple_set_location (switch_stmt
, finally_loc
);
1564 /* Need to link SWITCH_STMT after running replace_goto_queue
1565 due to not wanting to process the same goto stmts twice. */
1566 gimple_seq_add_stmt (&tf
->top_p_seq
, switch_stmt
);
1567 gimple_seq_add_seq (&tf
->top_p_seq
, switch_body
);
1570 /* Decide whether or not we are going to duplicate the finally block.
1571 There are several considerations.
1573 First, if this is Java, then the finally block contains code
1574 written by the user. It has line numbers associated with it,
1575 so duplicating the block means it's difficult to set a breakpoint.
1576 Since controlling code generation via -g is verboten, we simply
1577 never duplicate code without optimization.
1579 Second, we'd like to prevent egregious code growth. One way to
1580 do this is to estimate the size of the finally block, multiply
1581 that by the number of copies we'd need to make, and compare against
1582 the estimate of the size of the switch machinery we'd have to add. */
1585 decide_copy_try_finally (int ndests
, bool may_throw
, gimple_seq finally
)
1587 int f_estimate
, sw_estimate
;
1590 /* If there's an EH_ELSE involved, the exception path is separate
1591 and really doesn't come into play for this computation. */
1592 eh_else
= get_eh_else (finally
);
1595 ndests
-= may_throw
;
1596 finally
= gimple_eh_else_n_body (eh_else
);
1601 gimple_stmt_iterator gsi
;
1606 for (gsi
= gsi_start (finally
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1608 gimple stmt
= gsi_stmt (gsi
);
1609 if (!is_gimple_debug (stmt
) && !gimple_clobber_p (stmt
))
1615 /* Finally estimate N times, plus N gotos. */
1616 f_estimate
= count_insns_seq (finally
, &eni_size_weights
);
1617 f_estimate
= (f_estimate
+ 1) * ndests
;
1619 /* Switch statement (cost 10), N variable assignments, N gotos. */
1620 sw_estimate
= 10 + 2 * ndests
;
1622 /* Optimize for size clearly wants our best guess. */
1623 if (optimize_function_for_size_p (cfun
))
1624 return f_estimate
< sw_estimate
;
1626 /* ??? These numbers are completely made up so far. */
1628 return f_estimate
< 100 || f_estimate
< sw_estimate
* 2;
1630 return f_estimate
< 40 || f_estimate
* 2 < sw_estimate
* 3;
1633 /* REG is the enclosing region for a possible cleanup region, or the region
1634 itself. Returns TRUE if such a region would be unreachable.
1636 Cleanup regions within a must-not-throw region aren't actually reachable
1637 even if there are throwing stmts within them, because the personality
1638 routine will call terminate before unwinding. */
1641 cleanup_is_dead_in (eh_region reg
)
1643 while (reg
&& reg
->type
== ERT_CLEANUP
)
1645 return (reg
&& reg
->type
== ERT_MUST_NOT_THROW
);
1648 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1649 to a sequence of labels and blocks, plus the exception region trees
1650 that record all the magic. This is complicated by the need to
1651 arrange for the FINALLY block to be executed on all exits. */
1654 lower_try_finally (struct leh_state
*state
, gimple tp
)
1656 struct leh_tf_state this_tf
;
1657 struct leh_state this_state
;
1659 gimple_seq old_eh_seq
;
1661 /* Process the try block. */
1663 memset (&this_tf
, 0, sizeof (this_tf
));
1664 this_tf
.try_finally_expr
= tp
;
1666 this_tf
.outer
= state
;
1667 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state
->cur_region
))
1669 this_tf
.region
= gen_eh_region_cleanup (state
->cur_region
);
1670 this_state
.cur_region
= this_tf
.region
;
1674 this_tf
.region
= NULL
;
1675 this_state
.cur_region
= state
->cur_region
;
1678 this_state
.ehp_region
= state
->ehp_region
;
1679 this_state
.tf
= &this_tf
;
1681 old_eh_seq
= eh_seq
;
1684 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1686 /* Determine if the try block is escaped through the bottom. */
1687 this_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1689 /* Determine if any exceptions are possible within the try block. */
1691 this_tf
.may_throw
= eh_region_may_contain_throw (this_tf
.region
);
1692 if (this_tf
.may_throw
)
1693 honor_protect_cleanup_actions (state
, &this_state
, &this_tf
);
1695 /* Determine how many edges (still) reach the finally block. Or rather,
1696 how many destinations are reached by the finally block. Use this to
1697 determine how we process the finally block itself. */
1699 ndests
= this_tf
.dest_array
.length ();
1700 ndests
+= this_tf
.may_fallthru
;
1701 ndests
+= this_tf
.may_return
;
1702 ndests
+= this_tf
.may_throw
;
1704 /* If the FINALLY block is not reachable, dike it out. */
1707 gimple_seq_add_seq (&this_tf
.top_p_seq
, gimple_try_eval (tp
));
1708 gimple_try_set_cleanup (tp
, NULL
);
1710 /* If the finally block doesn't fall through, then any destination
1711 we might try to impose there isn't reached either. There may be
1712 some minor amount of cleanup and redirection still needed. */
1713 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp
)))
1714 lower_try_finally_nofallthru (state
, &this_tf
);
1716 /* We can easily special-case redirection to a single destination. */
1717 else if (ndests
== 1)
1718 lower_try_finally_onedest (state
, &this_tf
);
1719 else if (decide_copy_try_finally (ndests
, this_tf
.may_throw
,
1720 gimple_try_cleanup (tp
)))
1721 lower_try_finally_copy (state
, &this_tf
);
1723 lower_try_finally_switch (state
, &this_tf
);
1725 /* If someone requested we add a label at the end of the transformed
1727 if (this_tf
.fallthru_label
)
1729 /* This must be reached only if ndests == 0. */
1730 gimple x
= gimple_build_label (this_tf
.fallthru_label
);
1731 gimple_seq_add_stmt (&this_tf
.top_p_seq
, x
);
1734 this_tf
.dest_array
.release ();
1735 free (this_tf
.goto_queue
);
1736 if (this_tf
.goto_queue_map
)
1737 pointer_map_destroy (this_tf
.goto_queue_map
);
1739 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1740 If there was no old eh_seq, then the append is trivially already done. */
1744 eh_seq
= old_eh_seq
;
1747 gimple_seq new_eh_seq
= eh_seq
;
1748 eh_seq
= old_eh_seq
;
1749 gimple_seq_add_seq (&eh_seq
, new_eh_seq
);
1753 return this_tf
.top_p_seq
;
1756 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1757 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1758 exception region trees that records all the magic. */
1761 lower_catch (struct leh_state
*state
, gimple tp
)
1763 eh_region try_region
= NULL
;
1764 struct leh_state this_state
= *state
;
1765 gimple_stmt_iterator gsi
;
1767 gimple_seq new_seq
, cleanup
;
1769 location_t try_catch_loc
= gimple_location (tp
);
1771 if (flag_exceptions
)
1773 try_region
= gen_eh_region_try (state
->cur_region
);
1774 this_state
.cur_region
= try_region
;
1777 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1779 if (!eh_region_may_contain_throw (try_region
))
1780 return gimple_try_eval (tp
);
1783 emit_eh_dispatch (&new_seq
, try_region
);
1784 emit_resx (&new_seq
, try_region
);
1786 this_state
.cur_region
= state
->cur_region
;
1787 this_state
.ehp_region
= try_region
;
1790 cleanup
= gimple_try_cleanup (tp
);
1791 for (gsi
= gsi_start (cleanup
);
1799 gcatch
= gsi_stmt (gsi
);
1800 c
= gen_eh_region_catch (try_region
, gimple_catch_types (gcatch
));
1802 handler
= gimple_catch_handler (gcatch
);
1803 lower_eh_constructs_1 (&this_state
, &handler
);
1805 c
->label
= create_artificial_label (UNKNOWN_LOCATION
);
1806 x
= gimple_build_label (c
->label
);
1807 gimple_seq_add_stmt (&new_seq
, x
);
1809 gimple_seq_add_seq (&new_seq
, handler
);
1811 if (gimple_seq_may_fallthru (new_seq
))
1814 out_label
= create_artificial_label (try_catch_loc
);
1816 x
= gimple_build_goto (out_label
);
1817 gimple_seq_add_stmt (&new_seq
, x
);
1823 gimple_try_set_cleanup (tp
, new_seq
);
1825 return frob_into_branch_around (tp
, try_region
, out_label
);
1828 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1829 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1830 region trees that record all the magic. */
1833 lower_eh_filter (struct leh_state
*state
, gimple tp
)
1835 struct leh_state this_state
= *state
;
1836 eh_region this_region
= NULL
;
1840 inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1842 if (flag_exceptions
)
1844 this_region
= gen_eh_region_allowed (state
->cur_region
,
1845 gimple_eh_filter_types (inner
));
1846 this_state
.cur_region
= this_region
;
1849 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1851 if (!eh_region_may_contain_throw (this_region
))
1852 return gimple_try_eval (tp
);
1855 this_state
.cur_region
= state
->cur_region
;
1856 this_state
.ehp_region
= this_region
;
1858 emit_eh_dispatch (&new_seq
, this_region
);
1859 emit_resx (&new_seq
, this_region
);
1861 this_region
->u
.allowed
.label
= create_artificial_label (UNKNOWN_LOCATION
);
1862 x
= gimple_build_label (this_region
->u
.allowed
.label
);
1863 gimple_seq_add_stmt (&new_seq
, x
);
1865 lower_eh_constructs_1 (&this_state
, gimple_eh_filter_failure_ptr (inner
));
1866 gimple_seq_add_seq (&new_seq
, gimple_eh_filter_failure (inner
));
1868 gimple_try_set_cleanup (tp
, new_seq
);
1870 return frob_into_branch_around (tp
, this_region
, NULL
);
1873 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1874 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1875 plus the exception region trees that record all the magic. */
1878 lower_eh_must_not_throw (struct leh_state
*state
, gimple tp
)
1880 struct leh_state this_state
= *state
;
1882 if (flag_exceptions
)
1884 gimple inner
= gimple_seq_first_stmt (gimple_try_cleanup (tp
));
1885 eh_region this_region
;
1887 this_region
= gen_eh_region_must_not_throw (state
->cur_region
);
1888 this_region
->u
.must_not_throw
.failure_decl
1889 = gimple_eh_must_not_throw_fndecl (inner
);
1890 this_region
->u
.must_not_throw
.failure_loc
1891 = LOCATION_LOCUS (gimple_location (tp
));
1893 /* In order to get mangling applied to this decl, we must mark it
1894 used now. Otherwise, pass_ipa_free_lang_data won't think it
1896 TREE_USED (this_region
->u
.must_not_throw
.failure_decl
) = 1;
1898 this_state
.cur_region
= this_region
;
1901 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1903 return gimple_try_eval (tp
);
1906 /* Implement a cleanup expression. This is similar to try-finally,
1907 except that we only execute the cleanup block for exception edges. */
1910 lower_cleanup (struct leh_state
*state
, gimple tp
)
1912 struct leh_state this_state
= *state
;
1913 eh_region this_region
= NULL
;
1914 struct leh_tf_state fake_tf
;
1916 bool cleanup_dead
= cleanup_is_dead_in (state
->cur_region
);
1918 if (flag_exceptions
&& !cleanup_dead
)
1920 this_region
= gen_eh_region_cleanup (state
->cur_region
);
1921 this_state
.cur_region
= this_region
;
1924 lower_eh_constructs_1 (&this_state
, gimple_try_eval_ptr (tp
));
1926 if (cleanup_dead
|| !eh_region_may_contain_throw (this_region
))
1927 return gimple_try_eval (tp
);
1929 /* Build enough of a try-finally state so that we can reuse
1930 honor_protect_cleanup_actions. */
1931 memset (&fake_tf
, 0, sizeof (fake_tf
));
1932 fake_tf
.top_p
= fake_tf
.try_finally_expr
= tp
;
1933 fake_tf
.outer
= state
;
1934 fake_tf
.region
= this_region
;
1935 fake_tf
.may_fallthru
= gimple_seq_may_fallthru (gimple_try_eval (tp
));
1936 fake_tf
.may_throw
= true;
1938 honor_protect_cleanup_actions (state
, NULL
, &fake_tf
);
1940 if (fake_tf
.may_throw
)
1942 /* In this case honor_protect_cleanup_actions had nothing to do,
1943 and we should process this normally. */
1944 lower_eh_constructs_1 (state
, gimple_try_cleanup_ptr (tp
));
1945 result
= frob_into_branch_around (tp
, this_region
,
1946 fake_tf
.fallthru_label
);
1950 /* In this case honor_protect_cleanup_actions did nearly all of
1951 the work. All we have left is to append the fallthru_label. */
1953 result
= gimple_try_eval (tp
);
1954 if (fake_tf
.fallthru_label
)
1956 gimple x
= gimple_build_label (fake_tf
.fallthru_label
);
1957 gimple_seq_add_stmt (&result
, x
);
1963 /* Main loop for lowering eh constructs. Also moves gsi to the next
1967 lower_eh_constructs_2 (struct leh_state
*state
, gimple_stmt_iterator
*gsi
)
1971 gimple stmt
= gsi_stmt (*gsi
);
1973 switch (gimple_code (stmt
))
1977 tree fndecl
= gimple_call_fndecl (stmt
);
1980 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
1981 switch (DECL_FUNCTION_CODE (fndecl
))
1983 case BUILT_IN_EH_POINTER
:
1984 /* The front end may have generated a call to
1985 __builtin_eh_pointer (0) within a catch region. Replace
1986 this zero argument with the current catch region number. */
1987 if (state
->ehp_region
)
1989 tree nr
= build_int_cst (integer_type_node
,
1990 state
->ehp_region
->index
);
1991 gimple_call_set_arg (stmt
, 0, nr
);
1995 /* The user has dome something silly. Remove it. */
1996 rhs
= null_pointer_node
;
2001 case BUILT_IN_EH_FILTER
:
2002 /* ??? This should never appear, but since it's a builtin it
2003 is accessible to abuse by users. Just remove it and
2004 replace the use with the arbitrary value zero. */
2005 rhs
= build_int_cst (TREE_TYPE (TREE_TYPE (fndecl
)), 0);
2007 lhs
= gimple_call_lhs (stmt
);
2008 x
= gimple_build_assign (lhs
, rhs
);
2009 gsi_insert_before (gsi
, x
, GSI_SAME_STMT
);
2012 case BUILT_IN_EH_COPY_VALUES
:
2013 /* Likewise this should not appear. Remove it. */
2014 gsi_remove (gsi
, true);
2024 /* If the stmt can throw use a new temporary for the assignment
2025 to a LHS. This makes sure the old value of the LHS is
2026 available on the EH edge. Only do so for statements that
2027 potentially fall through (no noreturn calls e.g.), otherwise
2028 this new assignment might create fake fallthru regions. */
2029 if (stmt_could_throw_p (stmt
)
2030 && gimple_has_lhs (stmt
)
2031 && gimple_stmt_may_fallthru (stmt
)
2032 && !tree_could_throw_p (gimple_get_lhs (stmt
))
2033 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt
))))
2035 tree lhs
= gimple_get_lhs (stmt
);
2036 tree tmp
= create_tmp_var (TREE_TYPE (lhs
), NULL
);
2037 gimple s
= gimple_build_assign (lhs
, tmp
);
2038 gimple_set_location (s
, gimple_location (stmt
));
2039 gimple_set_block (s
, gimple_block (stmt
));
2040 gimple_set_lhs (stmt
, tmp
);
2041 if (TREE_CODE (TREE_TYPE (tmp
)) == COMPLEX_TYPE
2042 || TREE_CODE (TREE_TYPE (tmp
)) == VECTOR_TYPE
)
2043 DECL_GIMPLE_REG_P (tmp
) = 1;
2044 gsi_insert_after (gsi
, s
, GSI_SAME_STMT
);
2046 /* Look for things that can throw exceptions, and record them. */
2047 if (state
->cur_region
&& stmt_could_throw_p (stmt
))
2049 record_stmt_eh_region (state
->cur_region
, stmt
);
2050 note_eh_region_may_contain_throw (state
->cur_region
);
2057 maybe_record_in_goto_queue (state
, stmt
);
2061 verify_norecord_switch_expr (state
, stmt
);
2065 if (gimple_try_kind (stmt
) == GIMPLE_TRY_FINALLY
)
2066 replace
= lower_try_finally (state
, stmt
);
2069 x
= gimple_seq_first_stmt (gimple_try_cleanup (stmt
));
2072 replace
= gimple_try_eval (stmt
);
2073 lower_eh_constructs_1 (state
, &replace
);
2076 switch (gimple_code (x
))
2079 replace
= lower_catch (state
, stmt
);
2081 case GIMPLE_EH_FILTER
:
2082 replace
= lower_eh_filter (state
, stmt
);
2084 case GIMPLE_EH_MUST_NOT_THROW
:
2085 replace
= lower_eh_must_not_throw (state
, stmt
);
2087 case GIMPLE_EH_ELSE
:
2088 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2091 replace
= lower_cleanup (state
, stmt
);
2096 /* Remove the old stmt and insert the transformed sequence
2098 gsi_insert_seq_before (gsi
, replace
, GSI_SAME_STMT
);
2099 gsi_remove (gsi
, true);
2101 /* Return since we don't want gsi_next () */
2104 case GIMPLE_EH_ELSE
:
2105 /* We should be eliminating this in lower_try_finally et al. */
2109 /* A type, a decl, or some kind of statement that we're not
2110 interested in. Don't walk them. */
2117 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2120 lower_eh_constructs_1 (struct leh_state
*state
, gimple_seq
*pseq
)
2122 gimple_stmt_iterator gsi
;
2123 for (gsi
= gsi_start (*pseq
); !gsi_end_p (gsi
);)
2124 lower_eh_constructs_2 (state
, &gsi
);
2129 const pass_data pass_data_lower_eh
=
2131 GIMPLE_PASS
, /* type */
2133 OPTGROUP_NONE
, /* optinfo_flags */
2134 TV_TREE_EH
, /* tv_id */
2135 PROP_gimple_lcf
, /* properties_required */
2136 PROP_gimple_leh
, /* properties_provided */
2137 0, /* properties_destroyed */
2138 0, /* todo_flags_start */
2139 0, /* todo_flags_finish */
2142 class pass_lower_eh
: public gimple_opt_pass
2145 pass_lower_eh (gcc::context
*ctxt
)
2146 : gimple_opt_pass (pass_data_lower_eh
, ctxt
)
2149 /* opt_pass methods: */
2150 virtual unsigned int execute (function
*);
2152 }; // class pass_lower_eh
2155 pass_lower_eh::execute (function
*fun
)
2157 struct leh_state null_state
;
2160 bodyp
= gimple_body (current_function_decl
);
2164 finally_tree
= new hash_table
<finally_tree_hasher
> (31);
2165 eh_region_may_contain_throw_map
= BITMAP_ALLOC (NULL
);
2166 memset (&null_state
, 0, sizeof (null_state
));
2168 collect_finally_tree_1 (bodyp
, NULL
);
2169 lower_eh_constructs_1 (&null_state
, &bodyp
);
2170 gimple_set_body (current_function_decl
, bodyp
);
2172 /* We assume there's a return statement, or something, at the end of
2173 the function, and thus ploping the EH sequence afterward won't
2175 gcc_assert (!gimple_seq_may_fallthru (bodyp
));
2176 gimple_seq_add_seq (&bodyp
, eh_seq
);
2178 /* We assume that since BODYP already existed, adding EH_SEQ to it
2179 didn't change its value, and we don't have to re-set the function. */
2180 gcc_assert (bodyp
== gimple_body (current_function_decl
));
2182 delete finally_tree
;
2183 finally_tree
= NULL
;
2184 BITMAP_FREE (eh_region_may_contain_throw_map
);
2187 /* If this function needs a language specific EH personality routine
2188 and the frontend didn't already set one do so now. */
2189 if (function_needs_eh_personality (fun
) == eh_personality_lang
2190 && !DECL_FUNCTION_PERSONALITY (current_function_decl
))
2191 DECL_FUNCTION_PERSONALITY (current_function_decl
)
2192 = lang_hooks
.eh_personality ();
2200 make_pass_lower_eh (gcc::context
*ctxt
)
2202 return new pass_lower_eh (ctxt
);
2205 /* Create the multiple edges from an EH_DISPATCH statement to all of
2206 the possible handlers for its EH region. Return true if there's
2207 no fallthru edge; false if there is. */
2210 make_eh_dispatch_edges (gimple stmt
)
2214 basic_block src
, dst
;
2216 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2217 src
= gimple_bb (stmt
);
2222 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2224 dst
= label_to_block (c
->label
);
2225 make_edge (src
, dst
, 0);
2227 /* A catch-all handler doesn't have a fallthru. */
2228 if (c
->type_list
== NULL
)
2233 case ERT_ALLOWED_EXCEPTIONS
:
2234 dst
= label_to_block (r
->u
.allowed
.label
);
2235 make_edge (src
, dst
, 0);
2245 /* Create the single EH edge from STMT to its nearest landing pad,
2246 if there is such a landing pad within the current function. */
2249 make_eh_edges (gimple stmt
)
2251 basic_block src
, dst
;
2255 lp_nr
= lookup_stmt_eh_lp (stmt
);
2259 lp
= get_eh_landing_pad_from_number (lp_nr
);
2260 gcc_assert (lp
!= NULL
);
2262 src
= gimple_bb (stmt
);
2263 dst
= label_to_block (lp
->post_landing_pad
);
2264 make_edge (src
, dst
, EDGE_EH
);
2267 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2268 do not actually perform the final edge redirection.
2270 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2271 we intend to change the destination EH region as well; this means
2272 EH_LANDING_PAD_NR must already be set on the destination block label.
2273 If false, we're being called from generic cfg manipulation code and we
2274 should preserve our place within the region tree. */
2277 redirect_eh_edge_1 (edge edge_in
, basic_block new_bb
, bool change_region
)
2279 eh_landing_pad old_lp
, new_lp
;
2282 int old_lp_nr
, new_lp_nr
;
2283 tree old_label
, new_label
;
2287 old_bb
= edge_in
->dest
;
2288 old_label
= gimple_block_label (old_bb
);
2289 old_lp_nr
= EH_LANDING_PAD_NR (old_label
);
2290 gcc_assert (old_lp_nr
> 0);
2291 old_lp
= get_eh_landing_pad_from_number (old_lp_nr
);
2293 throw_stmt
= last_stmt (edge_in
->src
);
2294 gcc_assert (lookup_stmt_eh_lp (throw_stmt
) == old_lp_nr
);
2296 new_label
= gimple_block_label (new_bb
);
2298 /* Look for an existing region that might be using NEW_BB already. */
2299 new_lp_nr
= EH_LANDING_PAD_NR (new_label
);
2302 new_lp
= get_eh_landing_pad_from_number (new_lp_nr
);
2303 gcc_assert (new_lp
);
2305 /* Unless CHANGE_REGION is true, the new and old landing pad
2306 had better be associated with the same EH region. */
2307 gcc_assert (change_region
|| new_lp
->region
== old_lp
->region
);
2312 gcc_assert (!change_region
);
2315 /* Notice when we redirect the last EH edge away from OLD_BB. */
2316 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
2317 if (e
!= edge_in
&& (e
->flags
& EDGE_EH
))
2322 /* NEW_LP already exists. If there are still edges into OLD_LP,
2323 there's nothing to do with the EH tree. If there are no more
2324 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2325 If CHANGE_REGION is true, then our caller is expecting to remove
2327 if (e
== NULL
&& !change_region
)
2328 remove_eh_landing_pad (old_lp
);
2332 /* No correct landing pad exists. If there are no more edges
2333 into OLD_LP, then we can simply re-use the existing landing pad.
2334 Otherwise, we have to create a new landing pad. */
2337 EH_LANDING_PAD_NR (old_lp
->post_landing_pad
) = 0;
2341 new_lp
= gen_eh_landing_pad (old_lp
->region
);
2342 new_lp
->post_landing_pad
= new_label
;
2343 EH_LANDING_PAD_NR (new_label
) = new_lp
->index
;
2346 /* Maybe move the throwing statement to the new region. */
2347 if (old_lp
!= new_lp
)
2349 remove_stmt_from_eh_lp (throw_stmt
);
2350 add_stmt_to_eh_lp (throw_stmt
, new_lp
->index
);
2354 /* Redirect EH edge E to NEW_BB. */
2357 redirect_eh_edge (edge edge_in
, basic_block new_bb
)
2359 redirect_eh_edge_1 (edge_in
, new_bb
, false);
2360 return ssa_redirect_edge (edge_in
, new_bb
);
2363 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2364 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2365 The actual edge update will happen in the caller. */
2368 redirect_eh_dispatch_edge (gimple stmt
, edge e
, basic_block new_bb
)
2370 tree new_lab
= gimple_block_label (new_bb
);
2371 bool any_changed
= false;
2376 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
2380 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
2382 old_bb
= label_to_block (c
->label
);
2383 if (old_bb
== e
->dest
)
2391 case ERT_ALLOWED_EXCEPTIONS
:
2392 old_bb
= label_to_block (r
->u
.allowed
.label
);
2393 gcc_assert (old_bb
== e
->dest
);
2394 r
->u
.allowed
.label
= new_lab
;
2402 gcc_assert (any_changed
);
2405 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2408 operation_could_trap_helper_p (enum tree_code op
,
2419 case TRUNC_DIV_EXPR
:
2421 case FLOOR_DIV_EXPR
:
2422 case ROUND_DIV_EXPR
:
2423 case EXACT_DIV_EXPR
:
2425 case FLOOR_MOD_EXPR
:
2426 case ROUND_MOD_EXPR
:
2427 case TRUNC_MOD_EXPR
:
2429 if (honor_snans
|| honor_trapv
)
2432 return flag_trapping_math
;
2433 if (!TREE_CONSTANT (divisor
) || integer_zerop (divisor
))
2442 /* Some floating point comparisons may trap. */
2447 case UNORDERED_EXPR
:
2457 case FIX_TRUNC_EXPR
:
2458 /* Conversion of floating point might trap. */
2464 /* These operations don't trap with floating point. */
2472 /* Any floating arithmetic may trap. */
2473 if (fp_operation
&& flag_trapping_math
)
2481 /* Constructing an object cannot trap. */
2485 /* Any floating arithmetic may trap. */
2486 if (fp_operation
&& flag_trapping_math
)
2494 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2495 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2496 type operands that may trap. If OP is a division operator, DIVISOR contains
2497 the value of the divisor. */
2500 operation_could_trap_p (enum tree_code op
, bool fp_operation
, bool honor_trapv
,
2503 bool honor_nans
= (fp_operation
&& flag_trapping_math
2504 && !flag_finite_math_only
);
2505 bool honor_snans
= fp_operation
&& flag_signaling_nans
!= 0;
2508 if (TREE_CODE_CLASS (op
) != tcc_comparison
2509 && TREE_CODE_CLASS (op
) != tcc_unary
2510 && TREE_CODE_CLASS (op
) != tcc_binary
)
2513 return operation_could_trap_helper_p (op
, fp_operation
, honor_trapv
,
2514 honor_nans
, honor_snans
, divisor
,
2519 /* Returns true if it is possible to prove that the index of
2520 an array access REF (an ARRAY_REF expression) falls into the
2524 in_array_bounds_p (tree ref
)
2526 tree idx
= TREE_OPERAND (ref
, 1);
2529 if (TREE_CODE (idx
) != INTEGER_CST
)
2532 min
= array_ref_low_bound (ref
);
2533 max
= array_ref_up_bound (ref
);
2536 || TREE_CODE (min
) != INTEGER_CST
2537 || TREE_CODE (max
) != INTEGER_CST
)
2540 if (tree_int_cst_lt (idx
, min
)
2541 || tree_int_cst_lt (max
, idx
))
2547 /* Returns true if it is possible to prove that the range of
2548 an array access REF (an ARRAY_RANGE_REF expression) falls
2549 into the array bounds. */
2552 range_in_array_bounds_p (tree ref
)
2554 tree domain_type
= TYPE_DOMAIN (TREE_TYPE (ref
));
2555 tree range_min
, range_max
, min
, max
;
2557 range_min
= TYPE_MIN_VALUE (domain_type
);
2558 range_max
= TYPE_MAX_VALUE (domain_type
);
2561 || TREE_CODE (range_min
) != INTEGER_CST
2562 || TREE_CODE (range_max
) != INTEGER_CST
)
2565 min
= array_ref_low_bound (ref
);
2566 max
= array_ref_up_bound (ref
);
2569 || TREE_CODE (min
) != INTEGER_CST
2570 || TREE_CODE (max
) != INTEGER_CST
)
2573 if (tree_int_cst_lt (range_min
, min
)
2574 || tree_int_cst_lt (max
, range_max
))
2580 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2581 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2582 This routine expects only GIMPLE lhs or rhs input. */
2585 tree_could_trap_p (tree expr
)
2587 enum tree_code code
;
2588 bool fp_operation
= false;
2589 bool honor_trapv
= false;
2590 tree t
, base
, div
= NULL_TREE
;
2595 code
= TREE_CODE (expr
);
2596 t
= TREE_TYPE (expr
);
2600 if (COMPARISON_CLASS_P (expr
))
2601 fp_operation
= FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr
, 0)));
2603 fp_operation
= FLOAT_TYPE_P (t
);
2604 honor_trapv
= INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
);
2607 if (TREE_CODE_CLASS (code
) == tcc_binary
)
2608 div
= TREE_OPERAND (expr
, 1);
2609 if (operation_could_trap_p (code
, fp_operation
, honor_trapv
, div
))
2619 case VIEW_CONVERT_EXPR
:
2620 case WITH_SIZE_EXPR
:
2621 expr
= TREE_OPERAND (expr
, 0);
2622 code
= TREE_CODE (expr
);
2625 case ARRAY_RANGE_REF
:
2626 base
= TREE_OPERAND (expr
, 0);
2627 if (tree_could_trap_p (base
))
2629 if (TREE_THIS_NOTRAP (expr
))
2631 return !range_in_array_bounds_p (expr
);
2634 base
= TREE_OPERAND (expr
, 0);
2635 if (tree_could_trap_p (base
))
2637 if (TREE_THIS_NOTRAP (expr
))
2639 return !in_array_bounds_p (expr
);
2641 case TARGET_MEM_REF
:
2643 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
2644 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0)))
2646 if (TREE_THIS_NOTRAP (expr
))
2648 /* We cannot prove that the access is in-bounds when we have
2649 variable-index TARGET_MEM_REFs. */
2650 if (code
== TARGET_MEM_REF
2651 && (TMR_INDEX (expr
) || TMR_INDEX2 (expr
)))
2653 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
2655 tree base
= TREE_OPERAND (TREE_OPERAND (expr
, 0), 0);
2656 offset_int off
= mem_ref_offset (expr
);
2657 if (wi::neg_p (off
, SIGNED
))
2659 if (TREE_CODE (base
) == STRING_CST
)
2660 return wi::leu_p (TREE_STRING_LENGTH (base
), off
);
2661 else if (DECL_SIZE_UNIT (base
) == NULL_TREE
2662 || TREE_CODE (DECL_SIZE_UNIT (base
)) != INTEGER_CST
2663 || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base
)), off
))
2665 /* Now we are sure the first byte of the access is inside
2672 return !TREE_THIS_NOTRAP (expr
);
2675 return TREE_THIS_VOLATILE (expr
);
2678 t
= get_callee_fndecl (expr
);
2679 /* Assume that calls to weak functions may trap. */
2680 if (!t
|| !DECL_P (t
))
2683 return tree_could_trap_p (t
);
2687 /* Assume that accesses to weak functions may trap, unless we know
2688 they are certainly defined in current TU or in some other
2690 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
))
2692 struct cgraph_node
*node
;
2693 if (!DECL_EXTERNAL (expr
))
2695 node
= cgraph_node::get (expr
)->function_symbol ();
2696 if (node
&& node
->in_other_partition
)
2703 /* Assume that accesses to weak vars may trap, unless we know
2704 they are certainly defined in current TU or in some other
2706 if (DECL_WEAK (expr
) && !DECL_COMDAT (expr
))
2709 if (!DECL_EXTERNAL (expr
))
2711 node
= varpool_node::get (expr
)->ultimate_alias_target ();
2712 if (node
&& node
->in_other_partition
)
2724 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2725 an assignment or a conditional) may throw. */
2728 stmt_could_throw_1_p (gimple stmt
)
2730 enum tree_code code
= gimple_expr_code (stmt
);
2731 bool honor_nans
= false;
2732 bool honor_snans
= false;
2733 bool fp_operation
= false;
2734 bool honor_trapv
= false;
2739 if (TREE_CODE_CLASS (code
) == tcc_comparison
2740 || TREE_CODE_CLASS (code
) == tcc_unary
2741 || TREE_CODE_CLASS (code
) == tcc_binary
)
2743 if (is_gimple_assign (stmt
)
2744 && TREE_CODE_CLASS (code
) == tcc_comparison
)
2745 t
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2746 else if (gimple_code (stmt
) == GIMPLE_COND
)
2747 t
= TREE_TYPE (gimple_cond_lhs (stmt
));
2749 t
= gimple_expr_type (stmt
);
2750 fp_operation
= FLOAT_TYPE_P (t
);
2753 honor_nans
= flag_trapping_math
&& !flag_finite_math_only
;
2754 honor_snans
= flag_signaling_nans
!= 0;
2756 else if (INTEGRAL_TYPE_P (t
) && TYPE_OVERFLOW_TRAPS (t
))
2760 /* Check if the main expression may trap. */
2761 t
= is_gimple_assign (stmt
) ? gimple_assign_rhs2 (stmt
) : NULL
;
2762 ret
= operation_could_trap_helper_p (code
, fp_operation
, honor_trapv
,
2763 honor_nans
, honor_snans
, t
,
2768 /* If the expression does not trap, see if any of the individual operands may
2770 for (i
= 0; i
< gimple_num_ops (stmt
); i
++)
2771 if (tree_could_trap_p (gimple_op (stmt
, i
)))
2778 /* Return true if statement STMT could throw an exception. */
2781 stmt_could_throw_p (gimple stmt
)
2783 if (!flag_exceptions
)
2786 /* The only statements that can throw an exception are assignments,
2787 conditionals, calls, resx, and asms. */
2788 switch (gimple_code (stmt
))
2794 return !gimple_call_nothrow_p (stmt
);
2798 if (!cfun
->can_throw_non_call_exceptions
)
2800 return stmt_could_throw_1_p (stmt
);
2803 if (!cfun
->can_throw_non_call_exceptions
)
2805 return gimple_asm_volatile_p (stmt
);
2813 /* Return true if expression T could throw an exception. */
2816 tree_could_throw_p (tree t
)
2818 if (!flag_exceptions
)
2820 if (TREE_CODE (t
) == MODIFY_EXPR
)
2822 if (cfun
->can_throw_non_call_exceptions
2823 && tree_could_trap_p (TREE_OPERAND (t
, 0)))
2825 t
= TREE_OPERAND (t
, 1);
2828 if (TREE_CODE (t
) == WITH_SIZE_EXPR
)
2829 t
= TREE_OPERAND (t
, 0);
2830 if (TREE_CODE (t
) == CALL_EXPR
)
2831 return (call_expr_flags (t
) & ECF_NOTHROW
) == 0;
2832 if (cfun
->can_throw_non_call_exceptions
)
2833 return tree_could_trap_p (t
);
2837 /* Return true if STMT can throw an exception that is not caught within
2838 the current function (CFUN). */
2841 stmt_can_throw_external (gimple stmt
)
2845 if (!stmt_could_throw_p (stmt
))
2848 lp_nr
= lookup_stmt_eh_lp (stmt
);
2852 /* Return true if STMT can throw an exception that is caught within
2853 the current function (CFUN). */
2856 stmt_can_throw_internal (gimple stmt
)
2860 if (!stmt_could_throw_p (stmt
))
2863 lp_nr
= lookup_stmt_eh_lp (stmt
);
2867 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2868 remove any entry it might have from the EH table. Return true if
2869 any change was made. */
2872 maybe_clean_eh_stmt_fn (struct function
*ifun
, gimple stmt
)
2874 if (stmt_could_throw_p (stmt
))
2876 return remove_stmt_from_eh_lp_fn (ifun
, stmt
);
2879 /* Likewise, but always use the current function. */
2882 maybe_clean_eh_stmt (gimple stmt
)
2884 return maybe_clean_eh_stmt_fn (cfun
, stmt
);
2887 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2888 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2889 in the table if it should be in there. Return TRUE if a replacement was
2890 done that my require an EH edge purge. */
2893 maybe_clean_or_replace_eh_stmt (gimple old_stmt
, gimple new_stmt
)
2895 int lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2899 bool new_stmt_could_throw
= stmt_could_throw_p (new_stmt
);
2901 if (new_stmt
== old_stmt
&& new_stmt_could_throw
)
2904 remove_stmt_from_eh_lp (old_stmt
);
2905 if (new_stmt_could_throw
)
2907 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2917 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
2918 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
2919 operand is the return value of duplicate_eh_regions. */
2922 maybe_duplicate_eh_stmt_fn (struct function
*new_fun
, gimple new_stmt
,
2923 struct function
*old_fun
, gimple old_stmt
,
2924 struct pointer_map_t
*map
, int default_lp_nr
)
2926 int old_lp_nr
, new_lp_nr
;
2929 if (!stmt_could_throw_p (new_stmt
))
2932 old_lp_nr
= lookup_stmt_eh_lp_fn (old_fun
, old_stmt
);
2935 if (default_lp_nr
== 0)
2937 new_lp_nr
= default_lp_nr
;
2939 else if (old_lp_nr
> 0)
2941 eh_landing_pad old_lp
, new_lp
;
2943 old_lp
= (*old_fun
->eh
->lp_array
)[old_lp_nr
];
2944 slot
= pointer_map_contains (map
, old_lp
);
2945 new_lp
= (eh_landing_pad
) *slot
;
2946 new_lp_nr
= new_lp
->index
;
2950 eh_region old_r
, new_r
;
2952 old_r
= (*old_fun
->eh
->region_array
)[-old_lp_nr
];
2953 slot
= pointer_map_contains (map
, old_r
);
2954 new_r
= (eh_region
) *slot
;
2955 new_lp_nr
= -new_r
->index
;
2958 add_stmt_to_eh_lp_fn (new_fun
, new_stmt
, new_lp_nr
);
2962 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
2963 and thus no remapping is required. */
2966 maybe_duplicate_eh_stmt (gimple new_stmt
, gimple old_stmt
)
2970 if (!stmt_could_throw_p (new_stmt
))
2973 lp_nr
= lookup_stmt_eh_lp (old_stmt
);
2977 add_stmt_to_eh_lp (new_stmt
, lp_nr
);
2981 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
2982 GIMPLE_TRY) that are similar enough to be considered the same. Currently
2983 this only handles handlers consisting of a single call, as that's the
2984 important case for C++: a destructor call for a particular object showing
2985 up in multiple handlers. */
2988 same_handler_p (gimple_seq oneh
, gimple_seq twoh
)
2990 gimple_stmt_iterator gsi
;
2994 gsi
= gsi_start (oneh
);
2995 if (!gsi_one_before_end_p (gsi
))
2997 ones
= gsi_stmt (gsi
);
2999 gsi
= gsi_start (twoh
);
3000 if (!gsi_one_before_end_p (gsi
))
3002 twos
= gsi_stmt (gsi
);
3004 if (!is_gimple_call (ones
)
3005 || !is_gimple_call (twos
)
3006 || gimple_call_lhs (ones
)
3007 || gimple_call_lhs (twos
)
3008 || gimple_call_chain (ones
)
3009 || gimple_call_chain (twos
)
3010 || !gimple_call_same_target_p (ones
, twos
)
3011 || gimple_call_num_args (ones
) != gimple_call_num_args (twos
))
3014 for (ai
= 0; ai
< gimple_call_num_args (ones
); ++ai
)
3015 if (!operand_equal_p (gimple_call_arg (ones
, ai
),
3016 gimple_call_arg (twos
, ai
), 0))
3023 try { A() } finally { try { ~B() } catch { ~A() } }
3024 try { ... } finally { ~A() }
3026 try { A() } catch { ~B() }
3027 try { ~B() ... } finally { ~A() }
3029 This occurs frequently in C++, where A is a local variable and B is a
3030 temporary used in the initializer for A. */
3033 optimize_double_finally (gimple one
, gimple two
)
3036 gimple_stmt_iterator gsi
;
3039 cleanup
= gimple_try_cleanup (one
);
3040 gsi
= gsi_start (cleanup
);
3041 if (!gsi_one_before_end_p (gsi
))
3044 oneh
= gsi_stmt (gsi
);
3045 if (gimple_code (oneh
) != GIMPLE_TRY
3046 || gimple_try_kind (oneh
) != GIMPLE_TRY_CATCH
)
3049 if (same_handler_p (gimple_try_cleanup (oneh
), gimple_try_cleanup (two
)))
3051 gimple_seq seq
= gimple_try_eval (oneh
);
3053 gimple_try_set_cleanup (one
, seq
);
3054 gimple_try_set_kind (one
, GIMPLE_TRY_CATCH
);
3055 seq
= copy_gimple_seq_and_replace_locals (seq
);
3056 gimple_seq_add_seq (&seq
, gimple_try_eval (two
));
3057 gimple_try_set_eval (two
, seq
);
3061 /* Perform EH refactoring optimizations that are simpler to do when code
3062 flow has been lowered but EH structures haven't. */
3065 refactor_eh_r (gimple_seq seq
)
3067 gimple_stmt_iterator gsi
;
3072 gsi
= gsi_start (seq
);
3076 if (gsi_end_p (gsi
))
3079 two
= gsi_stmt (gsi
);
3082 && gimple_code (one
) == GIMPLE_TRY
3083 && gimple_code (two
) == GIMPLE_TRY
3084 && gimple_try_kind (one
) == GIMPLE_TRY_FINALLY
3085 && gimple_try_kind (two
) == GIMPLE_TRY_FINALLY
)
3086 optimize_double_finally (one
, two
);
3088 switch (gimple_code (one
))
3091 refactor_eh_r (gimple_try_eval (one
));
3092 refactor_eh_r (gimple_try_cleanup (one
));
3095 refactor_eh_r (gimple_catch_handler (one
));
3097 case GIMPLE_EH_FILTER
:
3098 refactor_eh_r (gimple_eh_filter_failure (one
));
3100 case GIMPLE_EH_ELSE
:
3101 refactor_eh_r (gimple_eh_else_n_body (one
));
3102 refactor_eh_r (gimple_eh_else_e_body (one
));
3116 const pass_data pass_data_refactor_eh
=
3118 GIMPLE_PASS
, /* type */
3120 OPTGROUP_NONE
, /* optinfo_flags */
3121 TV_TREE_EH
, /* tv_id */
3122 PROP_gimple_lcf
, /* properties_required */
3123 0, /* properties_provided */
3124 0, /* properties_destroyed */
3125 0, /* todo_flags_start */
3126 0, /* todo_flags_finish */
3129 class pass_refactor_eh
: public gimple_opt_pass
3132 pass_refactor_eh (gcc::context
*ctxt
)
3133 : gimple_opt_pass (pass_data_refactor_eh
, ctxt
)
3136 /* opt_pass methods: */
3137 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3138 virtual unsigned int execute (function
*)
3140 refactor_eh_r (gimple_body (current_function_decl
));
3144 }; // class pass_refactor_eh
3149 make_pass_refactor_eh (gcc::context
*ctxt
)
3151 return new pass_refactor_eh (ctxt
);
3154 /* At the end of gimple optimization, we can lower RESX. */
3157 lower_resx (basic_block bb
, gimple stmt
, struct pointer_map_t
*mnt_map
)
3160 eh_region src_r
, dst_r
;
3161 gimple_stmt_iterator gsi
;
3166 lp_nr
= lookup_stmt_eh_lp (stmt
);
3168 dst_r
= get_eh_region_from_lp_number (lp_nr
);
3172 src_r
= get_eh_region_from_number (gimple_resx_region (stmt
));
3173 gsi
= gsi_last_bb (bb
);
3177 /* We can wind up with no source region when pass_cleanup_eh shows
3178 that there are no entries into an eh region and deletes it, but
3179 then the block that contains the resx isn't removed. This can
3180 happen without optimization when the switch statement created by
3181 lower_try_finally_switch isn't simplified to remove the eh case.
3183 Resolve this by expanding the resx node to an abort. */
3185 fn
= builtin_decl_implicit (BUILT_IN_TRAP
);
3186 x
= gimple_build_call (fn
, 0);
3187 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3189 while (EDGE_COUNT (bb
->succs
) > 0)
3190 remove_edge (EDGE_SUCC (bb
, 0));
3194 /* When we have a destination region, we resolve this by copying
3195 the excptr and filter values into place, and changing the edge
3196 to immediately after the landing pad. */
3205 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3206 the failure decl into a new block, if needed. */
3207 gcc_assert (dst_r
->type
== ERT_MUST_NOT_THROW
);
3209 slot
= pointer_map_contains (mnt_map
, dst_r
);
3212 gimple_stmt_iterator gsi2
;
3214 new_bb
= create_empty_bb (bb
);
3215 add_bb_to_loop (new_bb
, bb
->loop_father
);
3216 lab
= gimple_block_label (new_bb
);
3217 gsi2
= gsi_start_bb (new_bb
);
3219 fn
= dst_r
->u
.must_not_throw
.failure_decl
;
3220 x
= gimple_build_call (fn
, 0);
3221 gimple_set_location (x
, dst_r
->u
.must_not_throw
.failure_loc
);
3222 gsi_insert_after (&gsi2
, x
, GSI_CONTINUE_LINKING
);
3224 slot
= pointer_map_insert (mnt_map
, dst_r
);
3230 new_bb
= label_to_block (lab
);
3233 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3234 e
= make_edge (bb
, new_bb
, EDGE_FALLTHRU
);
3235 e
->count
= bb
->count
;
3236 e
->probability
= REG_BR_PROB_BASE
;
3241 tree dst_nr
= build_int_cst (integer_type_node
, dst_r
->index
);
3243 fn
= builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES
);
3244 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3245 x
= gimple_build_call (fn
, 2, dst_nr
, src_nr
);
3246 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3248 /* Update the flags for the outgoing edge. */
3249 e
= single_succ_edge (bb
);
3250 gcc_assert (e
->flags
& EDGE_EH
);
3251 e
->flags
= (e
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
3253 /* If there are no more EH users of the landing pad, delete it. */
3254 FOR_EACH_EDGE (e
, ei
, e
->dest
->preds
)
3255 if (e
->flags
& EDGE_EH
)
3259 eh_landing_pad lp
= get_eh_landing_pad_from_number (lp_nr
);
3260 remove_eh_landing_pad (lp
);
3270 /* When we don't have a destination region, this exception escapes
3271 up the call chain. We resolve this by generating a call to the
3272 _Unwind_Resume library function. */
3274 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3275 with no arguments for C++ and Java. Check for that. */
3276 if (src_r
->use_cxa_end_cleanup
)
3278 fn
= builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP
);
3279 x
= gimple_build_call (fn
, 0);
3280 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3284 fn
= builtin_decl_implicit (BUILT_IN_EH_POINTER
);
3285 src_nr
= build_int_cst (integer_type_node
, src_r
->index
);
3286 x
= gimple_build_call (fn
, 1, src_nr
);
3287 var
= create_tmp_var (ptr_type_node
, NULL
);
3288 var
= make_ssa_name (var
, x
);
3289 gimple_call_set_lhs (x
, var
);
3290 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3292 fn
= builtin_decl_implicit (BUILT_IN_UNWIND_RESUME
);
3293 x
= gimple_build_call (fn
, 1, var
);
3294 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3297 gcc_assert (EDGE_COUNT (bb
->succs
) == 0);
3300 gsi_remove (&gsi
, true);
3307 const pass_data pass_data_lower_resx
=
3309 GIMPLE_PASS
, /* type */
3311 OPTGROUP_NONE
, /* optinfo_flags */
3312 TV_TREE_EH
, /* tv_id */
3313 PROP_gimple_lcf
, /* properties_required */
3314 0, /* properties_provided */
3315 0, /* properties_destroyed */
3316 0, /* todo_flags_start */
3317 0, /* todo_flags_finish */
3320 class pass_lower_resx
: public gimple_opt_pass
3323 pass_lower_resx (gcc::context
*ctxt
)
3324 : gimple_opt_pass (pass_data_lower_resx
, ctxt
)
3327 /* opt_pass methods: */
3328 virtual bool gate (function
*) { return flag_exceptions
!= 0; }
3329 virtual unsigned int execute (function
*);
3331 }; // class pass_lower_resx
3334 pass_lower_resx::execute (function
*fun
)
3337 struct pointer_map_t
*mnt_map
;
3338 bool dominance_invalidated
= false;
3339 bool any_rewritten
= false;
3341 mnt_map
= pointer_map_create ();
3343 FOR_EACH_BB_FN (bb
, fun
)
3345 gimple last
= last_stmt (bb
);
3346 if (last
&& is_gimple_resx (last
))
3348 dominance_invalidated
|= lower_resx (bb
, last
, mnt_map
);
3349 any_rewritten
= true;
3353 pointer_map_destroy (mnt_map
);
3355 if (dominance_invalidated
)
3357 free_dominance_info (CDI_DOMINATORS
);
3358 free_dominance_info (CDI_POST_DOMINATORS
);
3361 return any_rewritten
? TODO_update_ssa_only_virtuals
: 0;
3367 make_pass_lower_resx (gcc::context
*ctxt
)
3369 return new pass_lower_resx (ctxt
);
3372 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3376 optimize_clobbers (basic_block bb
)
3378 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
3379 bool any_clobbers
= false;
3380 bool seen_stack_restore
= false;
3384 /* Only optimize anything if the bb contains at least one clobber,
3385 ends with resx (checked by caller), optionally contains some
3386 debug stmts or labels, or at most one __builtin_stack_restore
3387 call, and has an incoming EH edge. */
3388 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3390 gimple stmt
= gsi_stmt (gsi
);
3391 if (is_gimple_debug (stmt
))
3393 if (gimple_clobber_p (stmt
))
3395 any_clobbers
= true;
3398 if (!seen_stack_restore
3399 && gimple_call_builtin_p (stmt
, BUILT_IN_STACK_RESTORE
))
3401 seen_stack_restore
= true;
3404 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3410 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3411 if (e
->flags
& EDGE_EH
)
3415 gsi
= gsi_last_bb (bb
);
3416 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3418 gimple stmt
= gsi_stmt (gsi
);
3419 if (!gimple_clobber_p (stmt
))
3421 unlink_stmt_vdef (stmt
);
3422 gsi_remove (&gsi
, true);
3423 release_defs (stmt
);
3427 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3428 internal throw to successor BB. */
3431 sink_clobbers (basic_block bb
)
3435 gimple_stmt_iterator gsi
, dgsi
;
3437 bool any_clobbers
= false;
3440 /* Only optimize if BB has a single EH successor and
3441 all predecessor edges are EH too. */
3442 if (!single_succ_p (bb
)
3443 || (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
3446 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3448 if ((e
->flags
& EDGE_EH
) == 0)
3452 /* And BB contains only CLOBBER stmts before the final
3454 gsi
= gsi_last_bb (bb
);
3455 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3457 gimple stmt
= gsi_stmt (gsi
);
3458 if (is_gimple_debug (stmt
))
3460 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3462 if (!gimple_clobber_p (stmt
))
3464 any_clobbers
= true;
3469 edge succe
= single_succ_edge (bb
);
3470 succbb
= succe
->dest
;
3472 /* See if there is a virtual PHI node to take an updated virtual
3475 tree vuse
= NULL_TREE
;
3476 for (gsi
= gsi_start_phis (succbb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3478 tree res
= gimple_phi_result (gsi_stmt (gsi
));
3479 if (virtual_operand_p (res
))
3481 vphi
= gsi_stmt (gsi
);
3487 dgsi
= gsi_after_labels (succbb
);
3488 gsi
= gsi_last_bb (bb
);
3489 for (gsi_prev (&gsi
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
3491 gimple stmt
= gsi_stmt (gsi
);
3493 if (is_gimple_debug (stmt
))
3495 if (gimple_code (stmt
) == GIMPLE_LABEL
)
3497 lhs
= gimple_assign_lhs (stmt
);
3498 /* Unfortunately we don't have dominance info updated at this
3499 point, so checking if
3500 dominated_by_p (CDI_DOMINATORS, succbb,
3501 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3502 would be too costly. Thus, avoid sinking any clobbers that
3503 refer to non-(D) SSA_NAMEs. */
3504 if (TREE_CODE (lhs
) == MEM_REF
3505 && TREE_CODE (TREE_OPERAND (lhs
, 0)) == SSA_NAME
3506 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs
, 0)))
3508 unlink_stmt_vdef (stmt
);
3509 gsi_remove (&gsi
, true);
3510 release_defs (stmt
);
3514 /* As we do not change stmt order when sinking across a
3515 forwarder edge we can keep virtual operands in place. */
3516 gsi_remove (&gsi
, false);
3517 gsi_insert_before (&dgsi
, stmt
, GSI_NEW_STMT
);
3519 /* But adjust virtual operands if we sunk across a PHI node. */
3523 imm_use_iterator iter
;
3524 use_operand_p use_p
;
3525 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, vuse
)
3526 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
3527 SET_USE (use_p
, gimple_vdef (stmt
));
3528 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
))
3530 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt
)) = 1;
3531 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse
) = 0;
3533 /* Adjust the incoming virtual operand. */
3534 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi
, succe
), gimple_vuse (stmt
));
3535 SET_USE (gimple_vuse_op (stmt
), vuse
);
3537 /* If there isn't a single predecessor but no virtual PHI node
3538 arrange for virtual operands to be renamed. */
3539 else if (gimple_vuse_op (stmt
) != NULL_USE_OPERAND_P
3540 && !single_pred_p (succbb
))
3542 /* In this case there will be no use of the VDEF of this stmt.
3543 ??? Unless this is a secondary opportunity and we have not
3544 removed unreachable blocks yet, so we cannot assert this.
3545 Which also means we will end up renaming too many times. */
3546 SET_USE (gimple_vuse_op (stmt
), gimple_vop (cfun
));
3547 mark_virtual_operands_for_renaming (cfun
);
3548 todo
|= TODO_update_ssa_only_virtuals
;
3555 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3556 we have found some duplicate labels and removed some edges. */
3559 lower_eh_dispatch (basic_block src
, gimple stmt
)
3561 gimple_stmt_iterator gsi
;
3566 bool redirected
= false;
3568 region_nr
= gimple_eh_dispatch_region (stmt
);
3569 r
= get_eh_region_from_number (region_nr
);
3571 gsi
= gsi_last_bb (src
);
3577 auto_vec
<tree
> labels
;
3578 tree default_label
= NULL
;
3582 hash_set
<tree
> seen_values
;
3584 /* Collect the labels for a switch. Zero the post_landing_pad
3585 field becase we'll no longer have anything keeping these labels
3586 in existence and the optimizer will be free to merge these
3588 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
3590 tree tp_node
, flt_node
, lab
= c
->label
;
3591 bool have_label
= false;
3594 tp_node
= c
->type_list
;
3595 flt_node
= c
->filter_list
;
3597 if (tp_node
== NULL
)
3599 default_label
= lab
;
3604 /* Filter out duplicate labels that arise when this handler
3605 is shadowed by an earlier one. When no labels are
3606 attached to the handler anymore, we remove
3607 the corresponding edge and then we delete unreachable
3608 blocks at the end of this pass. */
3609 if (! seen_values
.contains (TREE_VALUE (flt_node
)))
3611 tree t
= build_case_label (TREE_VALUE (flt_node
),
3613 labels
.safe_push (t
);
3614 seen_values
.add (TREE_VALUE (flt_node
));
3618 tp_node
= TREE_CHAIN (tp_node
);
3619 flt_node
= TREE_CHAIN (flt_node
);
3624 remove_edge (find_edge (src
, label_to_block (lab
)));
3629 /* Clean up the edge flags. */
3630 FOR_EACH_EDGE (e
, ei
, src
->succs
)
3632 if (e
->flags
& EDGE_FALLTHRU
)
3634 /* If there was no catch-all, use the fallthru edge. */
3635 if (default_label
== NULL
)
3636 default_label
= gimple_block_label (e
->dest
);
3637 e
->flags
&= ~EDGE_FALLTHRU
;
3640 gcc_assert (default_label
!= NULL
);
3642 /* Don't generate a switch if there's only a default case.
3643 This is common in the form of try { A; } catch (...) { B; }. */
3644 if (!labels
.exists ())
3646 e
= single_succ_edge (src
);
3647 e
->flags
|= EDGE_FALLTHRU
;
3651 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3652 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3654 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3655 filter
= make_ssa_name (filter
, x
);
3656 gimple_call_set_lhs (x
, filter
);
3657 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3659 /* Turn the default label into a default case. */
3660 default_label
= build_case_label (NULL
, NULL
, default_label
);
3661 sort_case_labels (labels
);
3663 x
= gimple_build_switch (filter
, default_label
, labels
);
3664 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3669 case ERT_ALLOWED_EXCEPTIONS
:
3671 edge b_e
= BRANCH_EDGE (src
);
3672 edge f_e
= FALLTHRU_EDGE (src
);
3674 fn
= builtin_decl_implicit (BUILT_IN_EH_FILTER
);
3675 x
= gimple_build_call (fn
, 1, build_int_cst (integer_type_node
,
3677 filter
= create_tmp_var (TREE_TYPE (TREE_TYPE (fn
)), NULL
);
3678 filter
= make_ssa_name (filter
, x
);
3679 gimple_call_set_lhs (x
, filter
);
3680 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3682 r
->u
.allowed
.label
= NULL
;
3683 x
= gimple_build_cond (EQ_EXPR
, filter
,
3684 build_int_cst (TREE_TYPE (filter
),
3685 r
->u
.allowed
.filter
),
3686 NULL_TREE
, NULL_TREE
);
3687 gsi_insert_before (&gsi
, x
, GSI_SAME_STMT
);
3689 b_e
->flags
= b_e
->flags
| EDGE_TRUE_VALUE
;
3690 f_e
->flags
= (f_e
->flags
& ~EDGE_FALLTHRU
) | EDGE_FALSE_VALUE
;
3698 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3699 gsi_remove (&gsi
, true);
3705 const pass_data pass_data_lower_eh_dispatch
=
3707 GIMPLE_PASS
, /* type */
3708 "ehdisp", /* name */
3709 OPTGROUP_NONE
, /* optinfo_flags */
3710 TV_TREE_EH
, /* tv_id */
3711 PROP_gimple_lcf
, /* properties_required */
3712 0, /* properties_provided */
3713 0, /* properties_destroyed */
3714 0, /* todo_flags_start */
3715 0, /* todo_flags_finish */
3718 class pass_lower_eh_dispatch
: public gimple_opt_pass
3721 pass_lower_eh_dispatch (gcc::context
*ctxt
)
3722 : gimple_opt_pass (pass_data_lower_eh_dispatch
, ctxt
)
3725 /* opt_pass methods: */
3726 virtual bool gate (function
*fun
) { return fun
->eh
->region_tree
!= NULL
; }
3727 virtual unsigned int execute (function
*);
3729 }; // class pass_lower_eh_dispatch
3732 pass_lower_eh_dispatch::execute (function
*fun
)
3736 bool redirected
= false;
3738 assign_filter_values ();
3740 FOR_EACH_BB_FN (bb
, fun
)
3742 gimple last
= last_stmt (bb
);
3745 if (gimple_code (last
) == GIMPLE_EH_DISPATCH
)
3747 redirected
|= lower_eh_dispatch (bb
, last
);
3748 flags
|= TODO_update_ssa_only_virtuals
;
3750 else if (gimple_code (last
) == GIMPLE_RESX
)
3752 if (stmt_can_throw_external (last
))
3753 optimize_clobbers (bb
);
3755 flags
|= sink_clobbers (bb
);
3760 delete_unreachable_blocks ();
3767 make_pass_lower_eh_dispatch (gcc::context
*ctxt
)
3769 return new pass_lower_eh_dispatch (ctxt
);
3772 /* Walk statements, see what regions and, optionally, landing pads
3773 are really referenced.
3775 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3776 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3778 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3781 The caller is responsible for freeing the returned sbitmaps. */
3784 mark_reachable_handlers (sbitmap
*r_reachablep
, sbitmap
*lp_reachablep
)
3786 sbitmap r_reachable
, lp_reachable
;
3788 bool mark_landing_pads
= (lp_reachablep
!= NULL
);
3789 gcc_checking_assert (r_reachablep
!= NULL
);
3791 r_reachable
= sbitmap_alloc (cfun
->eh
->region_array
->length ());
3792 bitmap_clear (r_reachable
);
3793 *r_reachablep
= r_reachable
;
3795 if (mark_landing_pads
)
3797 lp_reachable
= sbitmap_alloc (cfun
->eh
->lp_array
->length ());
3798 bitmap_clear (lp_reachable
);
3799 *lp_reachablep
= lp_reachable
;
3802 lp_reachable
= NULL
;
3804 FOR_EACH_BB_FN (bb
, cfun
)
3806 gimple_stmt_iterator gsi
;
3808 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3810 gimple stmt
= gsi_stmt (gsi
);
3812 if (mark_landing_pads
)
3814 int lp_nr
= lookup_stmt_eh_lp (stmt
);
3816 /* Negative LP numbers are MUST_NOT_THROW regions which
3817 are not considered BB enders. */
3819 bitmap_set_bit (r_reachable
, -lp_nr
);
3821 /* Positive LP numbers are real landing pads, and BB enders. */
3824 gcc_assert (gsi_one_before_end_p (gsi
));
3825 eh_region region
= get_eh_region_from_lp_number (lp_nr
);
3826 bitmap_set_bit (r_reachable
, region
->index
);
3827 bitmap_set_bit (lp_reachable
, lp_nr
);
3831 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
3832 switch (gimple_code (stmt
))
3835 bitmap_set_bit (r_reachable
, gimple_resx_region (stmt
));
3837 case GIMPLE_EH_DISPATCH
:
3838 bitmap_set_bit (r_reachable
, gimple_eh_dispatch_region (stmt
));
3847 /* Remove unreachable handlers and unreachable landing pads. */
3850 remove_unreachable_handlers (void)
3852 sbitmap r_reachable
, lp_reachable
;
3857 mark_reachable_handlers (&r_reachable
, &lp_reachable
);
3861 fprintf (dump_file
, "Before removal of unreachable regions:\n");
3862 dump_eh_tree (dump_file
, cfun
);
3863 fprintf (dump_file
, "Reachable regions: ");
3864 dump_bitmap_file (dump_file
, r_reachable
);
3865 fprintf (dump_file
, "Reachable landing pads: ");
3866 dump_bitmap_file (dump_file
, lp_reachable
);
3871 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3872 if (region
&& !bitmap_bit_p (r_reachable
, region
->index
))
3874 "Removing unreachable region %d\n",
3878 remove_unreachable_eh_regions (r_reachable
);
3880 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3881 if (lp
&& !bitmap_bit_p (lp_reachable
, lp
->index
))
3885 "Removing unreachable landing pad %d\n",
3887 remove_eh_landing_pad (lp
);
3892 fprintf (dump_file
, "\n\nAfter removal of unreachable regions:\n");
3893 dump_eh_tree (dump_file
, cfun
);
3894 fprintf (dump_file
, "\n\n");
3897 sbitmap_free (r_reachable
);
3898 sbitmap_free (lp_reachable
);
3900 #ifdef ENABLE_CHECKING
3901 verify_eh_tree (cfun
);
3905 /* Remove unreachable handlers if any landing pads have been removed after
3906 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
3909 maybe_remove_unreachable_handlers (void)
3914 if (cfun
->eh
== NULL
)
3917 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->lp_array
, i
, lp
)
3918 if (lp
&& lp
->post_landing_pad
)
3920 if (label_to_block (lp
->post_landing_pad
) == NULL
)
3922 remove_unreachable_handlers ();
3928 /* Remove regions that do not have landing pads. This assumes
3929 that remove_unreachable_handlers has already been run, and
3930 that we've just manipulated the landing pads since then.
3932 Preserve regions with landing pads and regions that prevent
3933 exceptions from propagating further, even if these regions
3934 are not reachable. */
3937 remove_unreachable_handlers_no_lp (void)
3940 sbitmap r_reachable
;
3943 mark_reachable_handlers (&r_reachable
, /*lp_reachablep=*/NULL
);
3945 FOR_EACH_VEC_SAFE_ELT (cfun
->eh
->region_array
, i
, region
)
3950 if (region
->landing_pads
!= NULL
3951 || region
->type
== ERT_MUST_NOT_THROW
)
3952 bitmap_set_bit (r_reachable
, region
->index
);
3955 && !bitmap_bit_p (r_reachable
, region
->index
))
3957 "Removing unreachable region %d\n",
3961 remove_unreachable_eh_regions (r_reachable
);
3963 sbitmap_free (r_reachable
);
3966 /* Undo critical edge splitting on an EH landing pad. Earlier, we
3967 optimisticaly split all sorts of edges, including EH edges. The
3968 optimization passes in between may not have needed them; if not,
3969 we should undo the split.
3971 Recognize this case by having one EH edge incoming to the BB and
3972 one normal edge outgoing; BB should be empty apart from the
3973 post_landing_pad label.
3975 Note that this is slightly different from the empty handler case
3976 handled by cleanup_empty_eh, in that the actual handler may yet
3977 have actual code but the landing pad has been separated from the
3978 handler. As such, cleanup_empty_eh relies on this transformation
3979 having been done first. */
3982 unsplit_eh (eh_landing_pad lp
)
3984 basic_block bb
= label_to_block (lp
->post_landing_pad
);
3985 gimple_stmt_iterator gsi
;
3988 /* Quickly check the edge counts on BB for singularity. */
3989 if (!single_pred_p (bb
) || !single_succ_p (bb
))
3991 e_in
= single_pred_edge (bb
);
3992 e_out
= single_succ_edge (bb
);
3994 /* Input edge must be EH and output edge must be normal. */
3995 if ((e_in
->flags
& EDGE_EH
) == 0 || (e_out
->flags
& EDGE_EH
) != 0)
3998 /* The block must be empty except for the labels and debug insns. */
3999 gsi
= gsi_after_labels (bb
);
4000 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4001 gsi_next_nondebug (&gsi
);
4002 if (!gsi_end_p (gsi
))
4005 /* The destination block must not already have a landing pad
4006 for a different region. */
4007 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4009 gimple stmt
= gsi_stmt (gsi
);
4013 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4015 lab
= gimple_label_label (stmt
);
4016 lp_nr
= EH_LANDING_PAD_NR (lab
);
4017 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4021 /* The new destination block must not already be a destination of
4022 the source block, lest we merge fallthru and eh edges and get
4023 all sorts of confused. */
4024 if (find_edge (e_in
->src
, e_out
->dest
))
4027 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4028 thought this should have been cleaned up by a phicprop pass, but
4029 that doesn't appear to handle virtuals. Propagate by hand. */
4030 if (!gimple_seq_empty_p (phi_nodes (bb
)))
4032 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); )
4034 gimple use_stmt
, phi
= gsi_stmt (gsi
);
4035 tree lhs
= gimple_phi_result (phi
);
4036 tree rhs
= gimple_phi_arg_def (phi
, 0);
4037 use_operand_p use_p
;
4038 imm_use_iterator iter
;
4040 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
4042 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
4043 SET_USE (use_p
, rhs
);
4046 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
4047 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
4049 remove_phi_node (&gsi
, true);
4053 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4054 fprintf (dump_file
, "Unsplit EH landing pad %d to block %i.\n",
4055 lp
->index
, e_out
->dest
->index
);
4057 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4058 a successor edge, humor it. But do the real CFG change with the
4059 predecessor of E_OUT in order to preserve the ordering of arguments
4060 to the PHI nodes in E_OUT->DEST. */
4061 redirect_eh_edge_1 (e_in
, e_out
->dest
, false);
4062 redirect_edge_pred (e_out
, e_in
->src
);
4063 e_out
->flags
= e_in
->flags
;
4064 e_out
->probability
= e_in
->probability
;
4065 e_out
->count
= e_in
->count
;
4071 /* Examine each landing pad block and see if it matches unsplit_eh. */
4074 unsplit_all_eh (void)
4076 bool changed
= false;
4080 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4082 changed
|= unsplit_eh (lp
);
4087 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4088 to OLD_BB to NEW_BB; return true on success, false on failure.
4090 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4091 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4092 Virtual PHIs may be deleted and marked for renaming. */
4095 cleanup_empty_eh_merge_phis (basic_block new_bb
, basic_block old_bb
,
4096 edge old_bb_out
, bool change_region
)
4098 gimple_stmt_iterator ngsi
, ogsi
;
4101 bitmap ophi_handled
;
4103 /* The destination block must not be a regular successor for any
4104 of the preds of the landing pad. Thus, avoid turning
4114 which CFG verification would choke on. See PR45172 and PR51089. */
4115 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4116 if (find_edge (e
->src
, new_bb
))
4119 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4120 redirect_edge_var_map_clear (e
);
4122 ophi_handled
= BITMAP_ALLOC (NULL
);
4124 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4125 for the edges we're going to move. */
4126 for (ngsi
= gsi_start_phis (new_bb
); !gsi_end_p (ngsi
); gsi_next (&ngsi
))
4128 gimple ophi
, nphi
= gsi_stmt (ngsi
);
4131 nresult
= gimple_phi_result (nphi
);
4132 nop
= gimple_phi_arg_def (nphi
, old_bb_out
->dest_idx
);
4134 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4135 the source ssa_name. */
4137 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4139 ophi
= gsi_stmt (ogsi
);
4140 if (gimple_phi_result (ophi
) == nop
)
4145 /* If we did find the corresponding PHI, copy those inputs. */
4148 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4149 if (!has_single_use (nop
))
4151 imm_use_iterator imm_iter
;
4152 use_operand_p use_p
;
4154 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, nop
)
4156 if (!gimple_debug_bind_p (USE_STMT (use_p
))
4157 && (gimple_code (USE_STMT (use_p
)) != GIMPLE_PHI
4158 || gimple_bb (USE_STMT (use_p
)) != new_bb
))
4162 bitmap_set_bit (ophi_handled
, SSA_NAME_VERSION (nop
));
4163 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4168 if ((e
->flags
& EDGE_EH
) == 0)
4170 oop
= gimple_phi_arg_def (ophi
, e
->dest_idx
);
4171 oloc
= gimple_phi_arg_location (ophi
, e
->dest_idx
);
4172 redirect_edge_var_map_add (e
, nresult
, oop
, oloc
);
4175 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4176 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4177 variable is unchanged from input to the block and we can simply
4178 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4182 = gimple_phi_arg_location (nphi
, old_bb_out
->dest_idx
);
4183 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4184 redirect_edge_var_map_add (e
, nresult
, nop
, nloc
);
4188 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4189 we don't know what values from the other edges into NEW_BB to use. */
4190 for (ogsi
= gsi_start_phis (old_bb
); !gsi_end_p (ogsi
); gsi_next (&ogsi
))
4192 gimple ophi
= gsi_stmt (ogsi
);
4193 tree oresult
= gimple_phi_result (ophi
);
4194 if (!bitmap_bit_p (ophi_handled
, SSA_NAME_VERSION (oresult
)))
4198 /* Finally, move the edges and update the PHIs. */
4199 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)); )
4200 if (e
->flags
& EDGE_EH
)
4202 /* ??? CFG manipluation routines do not try to update loop
4203 form on edge redirection. Do so manually here for now. */
4204 /* If we redirect a loop entry or latch edge that will either create
4205 a multiple entry loop or rotate the loop. If the loops merge
4206 we may have created a loop with multiple latches.
4207 All of this isn't easily fixed thus cancel the affected loop
4208 and mark the other loop as possibly having multiple latches. */
4209 if (e
->dest
== e
->dest
->loop_father
->header
)
4211 e
->dest
->loop_father
->header
= NULL
;
4212 e
->dest
->loop_father
->latch
= NULL
;
4213 new_bb
->loop_father
->latch
= NULL
;
4214 loops_state_set (LOOPS_NEED_FIXUP
|LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
4216 redirect_eh_edge_1 (e
, new_bb
, change_region
);
4217 redirect_edge_succ (e
, new_bb
);
4218 flush_pending_stmts (e
);
4223 BITMAP_FREE (ophi_handled
);
4227 FOR_EACH_EDGE (e
, ei
, old_bb
->preds
)
4228 redirect_edge_var_map_clear (e
);
4229 BITMAP_FREE (ophi_handled
);
4233 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4234 old region to NEW_REGION at BB. */
4237 cleanup_empty_eh_move_lp (basic_block bb
, edge e_out
,
4238 eh_landing_pad lp
, eh_region new_region
)
4240 gimple_stmt_iterator gsi
;
4243 for (pp
= &lp
->region
->landing_pads
; *pp
!= lp
; pp
= &(*pp
)->next_lp
)
4247 lp
->region
= new_region
;
4248 lp
->next_lp
= new_region
->landing_pads
;
4249 new_region
->landing_pads
= lp
;
4251 /* Delete the RESX that was matched within the empty handler block. */
4252 gsi
= gsi_last_bb (bb
);
4253 unlink_stmt_vdef (gsi_stmt (gsi
));
4254 gsi_remove (&gsi
, true);
4256 /* Clean up E_OUT for the fallthru. */
4257 e_out
->flags
= (e_out
->flags
& ~EDGE_EH
) | EDGE_FALLTHRU
;
4258 e_out
->probability
= REG_BR_PROB_BASE
;
4261 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4262 unsplitting than unsplit_eh was prepared to handle, e.g. when
4263 multiple incoming edges and phis are involved. */
4266 cleanup_empty_eh_unsplit (basic_block bb
, edge e_out
, eh_landing_pad lp
)
4268 gimple_stmt_iterator gsi
;
4271 /* We really ought not have totally lost everything following
4272 a landing pad label. Given that BB is empty, there had better
4274 gcc_assert (e_out
!= NULL
);
4276 /* The destination block must not already have a landing pad
4277 for a different region. */
4279 for (gsi
= gsi_start_bb (e_out
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4281 gimple stmt
= gsi_stmt (gsi
);
4284 if (gimple_code (stmt
) != GIMPLE_LABEL
)
4286 lab
= gimple_label_label (stmt
);
4287 lp_nr
= EH_LANDING_PAD_NR (lab
);
4288 if (lp_nr
&& get_eh_region_from_lp_number (lp_nr
) != lp
->region
)
4292 /* Attempt to move the PHIs into the successor block. */
4293 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, false))
4295 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4297 "Unsplit EH landing pad %d to block %i "
4298 "(via cleanup_empty_eh).\n",
4299 lp
->index
, e_out
->dest
->index
);
4306 /* Return true if edge E_FIRST is part of an empty infinite loop
4307 or leads to such a loop through a series of single successor
4311 infinite_empty_loop_p (edge e_first
)
4313 bool inf_loop
= false;
4316 if (e_first
->dest
== e_first
->src
)
4319 e_first
->src
->aux
= (void *) 1;
4320 for (e
= e_first
; single_succ_p (e
->dest
); e
= single_succ_edge (e
->dest
))
4322 gimple_stmt_iterator gsi
;
4328 e
->dest
->aux
= (void *) 1;
4329 gsi
= gsi_after_labels (e
->dest
);
4330 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4331 gsi_next_nondebug (&gsi
);
4332 if (!gsi_end_p (gsi
))
4335 e_first
->src
->aux
= NULL
;
4336 for (e
= e_first
; e
->dest
->aux
; e
= single_succ_edge (e
->dest
))
4337 e
->dest
->aux
= NULL
;
4342 /* Examine the block associated with LP to determine if it's an empty
4343 handler for its EH region. If so, attempt to redirect EH edges to
4344 an outer region. Return true the CFG was updated in any way. This
4345 is similar to jump forwarding, just across EH edges. */
4348 cleanup_empty_eh (eh_landing_pad lp
)
4350 basic_block bb
= label_to_block (lp
->post_landing_pad
);
4351 gimple_stmt_iterator gsi
;
4353 eh_region new_region
;
4356 bool has_non_eh_pred
;
4360 /* There can be zero or one edges out of BB. This is the quickest test. */
4361 switch (EDGE_COUNT (bb
->succs
))
4367 e_out
= single_succ_edge (bb
);
4373 resx
= last_stmt (bb
);
4374 if (resx
&& is_gimple_resx (resx
))
4376 if (stmt_can_throw_external (resx
))
4377 optimize_clobbers (bb
);
4378 else if (sink_clobbers (bb
))
4382 gsi
= gsi_after_labels (bb
);
4384 /* Make sure to skip debug statements. */
4385 if (!gsi_end_p (gsi
) && is_gimple_debug (gsi_stmt (gsi
)))
4386 gsi_next_nondebug (&gsi
);
4388 /* If the block is totally empty, look for more unsplitting cases. */
4389 if (gsi_end_p (gsi
))
4391 /* For the degenerate case of an infinite loop bail out.
4392 If bb has no successors and is totally empty, which can happen e.g.
4393 because of incorrect noreturn attribute, bail out too. */
4395 || infinite_empty_loop_p (e_out
))
4398 return ret
| cleanup_empty_eh_unsplit (bb
, e_out
, lp
);
4401 /* The block should consist only of a single RESX statement, modulo a
4402 preceding call to __builtin_stack_restore if there is no outgoing
4403 edge, since the call can be eliminated in this case. */
4404 resx
= gsi_stmt (gsi
);
4405 if (!e_out
&& gimple_call_builtin_p (resx
, BUILT_IN_STACK_RESTORE
))
4408 resx
= gsi_stmt (gsi
);
4410 if (!is_gimple_resx (resx
))
4412 gcc_assert (gsi_one_before_end_p (gsi
));
4414 /* Determine if there are non-EH edges, or resx edges into the handler. */
4415 has_non_eh_pred
= false;
4416 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
4417 if (!(e
->flags
& EDGE_EH
))
4418 has_non_eh_pred
= true;
4420 /* Find the handler that's outer of the empty handler by looking at
4421 where the RESX instruction was vectored. */
4422 new_lp_nr
= lookup_stmt_eh_lp (resx
);
4423 new_region
= get_eh_region_from_lp_number (new_lp_nr
);
4425 /* If there's no destination region within the current function,
4426 redirection is trivial via removing the throwing statements from
4427 the EH region, removing the EH edges, and allowing the block
4428 to go unreachable. */
4429 if (new_region
== NULL
)
4431 gcc_assert (e_out
== NULL
);
4432 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4433 if (e
->flags
& EDGE_EH
)
4435 gimple stmt
= last_stmt (e
->src
);
4436 remove_stmt_from_eh_lp (stmt
);
4444 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4445 to handle the abort and allow the blocks to go unreachable. */
4446 if (new_region
->type
== ERT_MUST_NOT_THROW
)
4448 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
4449 if (e
->flags
& EDGE_EH
)
4451 gimple stmt
= last_stmt (e
->src
);
4452 remove_stmt_from_eh_lp (stmt
);
4453 add_stmt_to_eh_lp (stmt
, new_lp_nr
);
4461 /* Try to redirect the EH edges and merge the PHIs into the destination
4462 landing pad block. If the merge succeeds, we'll already have redirected
4463 all the EH edges. The handler itself will go unreachable if there were
4465 if (cleanup_empty_eh_merge_phis (e_out
->dest
, bb
, e_out
, true))
4468 /* Finally, if all input edges are EH edges, then we can (potentially)
4469 reduce the number of transfers from the runtime by moving the landing
4470 pad from the original region to the new region. This is a win when
4471 we remove the last CLEANUP region along a particular exception
4472 propagation path. Since nothing changes except for the region with
4473 which the landing pad is associated, the PHI nodes do not need to be
4475 if (!has_non_eh_pred
)
4477 cleanup_empty_eh_move_lp (bb
, e_out
, lp
, new_region
);
4478 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4479 fprintf (dump_file
, "Empty EH handler %i moved to EH region %i.\n",
4480 lp
->index
, new_region
->index
);
4482 /* ??? The CFG didn't change, but we may have rendered the
4483 old EH region unreachable. Trigger a cleanup there. */
4490 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4491 fprintf (dump_file
, "Empty EH handler %i removed.\n", lp
->index
);
4492 remove_eh_landing_pad (lp
);
4496 /* Do a post-order traversal of the EH region tree. Examine each
4497 post_landing_pad block and see if we can eliminate it as empty. */
4500 cleanup_all_empty_eh (void)
4502 bool changed
= false;
4506 for (i
= 1; vec_safe_iterate (cfun
->eh
->lp_array
, i
, &lp
); ++i
)
4508 changed
|= cleanup_empty_eh (lp
);
4513 /* Perform cleanups and lowering of exception handling
4514 1) cleanups regions with handlers doing nothing are optimized out
4515 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4516 3) Info about regions that are containing instructions, and regions
4517 reachable via local EH edges is collected
4518 4) Eh tree is pruned for regions no longer necessary.
4520 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4521 Unify those that have the same failure decl and locus.
4525 execute_cleanup_eh_1 (void)
4527 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4528 looking up unreachable landing pads. */
4529 remove_unreachable_handlers ();
4531 /* Watch out for the region tree vanishing due to all unreachable. */
4532 if (cfun
->eh
->region_tree
)
4534 bool changed
= false;
4537 changed
|= unsplit_all_eh ();
4538 changed
|= cleanup_all_empty_eh ();
4542 free_dominance_info (CDI_DOMINATORS
);
4543 free_dominance_info (CDI_POST_DOMINATORS
);
4545 /* We delayed all basic block deletion, as we may have performed
4546 cleanups on EH edges while non-EH edges were still present. */
4547 delete_unreachable_blocks ();
4549 /* We manipulated the landing pads. Remove any region that no
4550 longer has a landing pad. */
4551 remove_unreachable_handlers_no_lp ();
4553 return TODO_cleanup_cfg
| TODO_update_ssa_only_virtuals
;
4562 const pass_data pass_data_cleanup_eh
=
4564 GIMPLE_PASS
, /* type */
4565 "ehcleanup", /* name */
4566 OPTGROUP_NONE
, /* optinfo_flags */
4567 TV_TREE_EH
, /* tv_id */
4568 PROP_gimple_lcf
, /* properties_required */
4569 0, /* properties_provided */
4570 0, /* properties_destroyed */
4571 0, /* todo_flags_start */
4572 0, /* todo_flags_finish */
4575 class pass_cleanup_eh
: public gimple_opt_pass
4578 pass_cleanup_eh (gcc::context
*ctxt
)
4579 : gimple_opt_pass (pass_data_cleanup_eh
, ctxt
)
4582 /* opt_pass methods: */
4583 opt_pass
* clone () { return new pass_cleanup_eh (m_ctxt
); }
4584 virtual bool gate (function
*fun
)
4586 return fun
->eh
!= NULL
&& fun
->eh
->region_tree
!= NULL
;
4589 virtual unsigned int execute (function
*);
4591 }; // class pass_cleanup_eh
4594 pass_cleanup_eh::execute (function
*fun
)
4596 int ret
= execute_cleanup_eh_1 ();
4598 /* If the function no longer needs an EH personality routine
4599 clear it. This exposes cross-language inlining opportunities
4600 and avoids references to a never defined personality routine. */
4601 if (DECL_FUNCTION_PERSONALITY (current_function_decl
)
4602 && function_needs_eh_personality (fun
) != eh_personality_lang
)
4603 DECL_FUNCTION_PERSONALITY (current_function_decl
) = NULL_TREE
;
4611 make_pass_cleanup_eh (gcc::context
*ctxt
)
4613 return new pass_cleanup_eh (ctxt
);
4616 /* Verify that BB containing STMT as the last statement, has precisely the
4617 edge that make_eh_edges would create. */
4620 verify_eh_edges (gimple stmt
)
4622 basic_block bb
= gimple_bb (stmt
);
4623 eh_landing_pad lp
= NULL
;
4628 lp_nr
= lookup_stmt_eh_lp (stmt
);
4630 lp
= get_eh_landing_pad_from_number (lp_nr
);
4633 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4635 if (e
->flags
& EDGE_EH
)
4639 error ("BB %i has multiple EH edges", bb
->index
);
4651 error ("BB %i can not throw but has an EH edge", bb
->index
);
4657 if (!stmt_could_throw_p (stmt
))
4659 error ("BB %i last statement has incorrectly set lp", bb
->index
);
4663 if (eh_edge
== NULL
)
4665 error ("BB %i is missing an EH edge", bb
->index
);
4669 if (eh_edge
->dest
!= label_to_block (lp
->post_landing_pad
))
4671 error ("Incorrect EH edge %i->%i", bb
->index
, eh_edge
->dest
->index
);
4678 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4681 verify_eh_dispatch_edge (gimple stmt
)
4685 basic_block src
, dst
;
4686 bool want_fallthru
= true;
4690 r
= get_eh_region_from_number (gimple_eh_dispatch_region (stmt
));
4691 src
= gimple_bb (stmt
);
4693 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4694 gcc_assert (e
->aux
== NULL
);
4699 for (c
= r
->u
.eh_try
.first_catch
; c
; c
= c
->next_catch
)
4701 dst
= label_to_block (c
->label
);
4702 e
= find_edge (src
, dst
);
4705 error ("BB %i is missing an edge", src
->index
);
4710 /* A catch-all handler doesn't have a fallthru. */
4711 if (c
->type_list
== NULL
)
4713 want_fallthru
= false;
4719 case ERT_ALLOWED_EXCEPTIONS
:
4720 dst
= label_to_block (r
->u
.allowed
.label
);
4721 e
= find_edge (src
, dst
);
4724 error ("BB %i is missing an edge", src
->index
);
4735 FOR_EACH_EDGE (e
, ei
, src
->succs
)
4737 if (e
->flags
& EDGE_FALLTHRU
)
4739 if (fall_edge
!= NULL
)
4741 error ("BB %i too many fallthru edges", src
->index
);
4750 error ("BB %i has incorrect edge", src
->index
);
4754 if ((fall_edge
!= NULL
) ^ want_fallthru
)
4756 error ("BB %i has incorrect fallthru edge", src
->index
);