[PATCH 11/13] Eliminate FOR_EACH_BB macro.

David Malcolm dmalcolm@redhat.com
Fri Dec 6 15:08:00 GMT 2013


gcc/
	* basic-block.h (FOR_EACH_BB): Eliminate macro.

	* asan.c (transform_statements, execute_sanopt): Eliminate
	use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
	explicit.
	* auto-inc-dec.c (rest_of_handle_auto_inc_dec): Likewise.
	* bb-reorder.c (find_rarely_executed_basic_blocks_and_crossing_edges,
	set_edge_can_fallthru_flag, fix_up_fall_thru_edges,
	fix_crossing_unconditional_branches, add_reg_crossing_jump_notes,
	insert_section_boundary_note, rest_of_handle_reorder_blocks,
	duplicate_computed_gotos): Likewise.
	* cfg.c (clear_edges, compact_blocks, brief_dump_cfg): Likewise.
	* cfganal.c (find_unreachable_blocks, add_noreturn_fake_exit_edges,
	compute_dominance_frontiers_1, single_pred_before_succ_order): Likewise.
	* cfgbuild.c (find_many_sub_basic_blocks): Likewise.
	* cfgcleanup.c (try_optimize_cfg, delete_dead_jumptables): Likewise.
	* cfgexpand.c (add_scope_conflicts, discover_nonconstant_array_refs):
	Likewise.
	* cfgloop.c (flow_loops_cfg_dump, get_loop_body, record_loop_exits,
	verify_loop_structure): Likewise.
	* cfgloopanal.c (mark_loop_exit_edges): Likewise.
	* cfgrtl.c (compute_bb_for_insn, find_partition_fixes,
	verify_hot_cold_block_grouping, purge_all_dead_edges,
	fixup_abnormal_edges, record_effective_endpoints,
	outof_cfg_layout_mode, fixup_reorder_chain, force_one_exit_fallthru,
	break_superblocks): Likewise.
	* cgraphbuild.c (build_cgraph_edges, rebuild_cgraph_edges,
	cgraph_rebuild_references): Likewise.
	* combine-stack-adj.c (combine_stack_adjustments): Likewise.
	* combine.c (delete_noop_moves, create_log_links,
	combine_instructions): Likewise.
	* config/arm/arm.c (thumb1_reorg, thumb2_reorg): Likewise.
	* config/bfin/bfin.c (bfin_gen_bundles, reorder_var_tracking_notes):
	Likewise.
	* config/c6x/c6x.c (c6x_gen_bundles, conditionalize_after_sched,
	c6x_reorg): Likewise.
	* config/epiphany/resolve-sw-modes.c (resolve_sw_modes): Likewise.
	* config/frv/frv.c (frv_optimize_membar): Likewise.
	* config/i386/i386.c (ix86_finalize_stack_realign_flags): Likewise.
	* config/ia64/ia64.c (ia64_reorg): Likewise.
	* config/mips/mips.c (mips_annotate_pic_calls): Likewise.
	* config/picochip/picochip.c (reorder_var_tracking_notes): Likewise.
	* config/rs6000/rs6000.c (rs6000_alloc_sdmode_stack_slot): Likewise.
	* config/s390/s390.c (s390_regs_ever_clobbered): Likewise.
	* config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
	* config/tilegx/tilegx.c (tilegx_gen_bundles,
	reorder_var_tracking_notes): Likewise.
	* config/tilepro/tilepro.c (tilepro_gen_bundles,
	reorder_var_tracking_notes): Likewise.
	* coverage.c (coverage_compute_cfg_checksum): Likewise.
	* cprop.c (compute_hash_table_work, compute_cprop_data,
	local_cprop_pass, find_implicit_sets): Likewise.
	* cse.c (cse_condition_code_reg): Likewise.
	* dce.c (prescan_insns_for_dce): Likewise.
	* df-core.c (df_compact_blocks): Likewise.
	* df-problems.c (df_word_lr_alloc): Likewise.
	* df-scan.c (df_scan_start_dump, df_scan_blocks, df_insn_rescan_all,
	df_update_entry_exit_and_calls): Likewise.
	* dominance.c (calculate_dominance_info, verify_dominators,
	debug_dominance_info): Likewise.
	* dse.c (dse_step5_nospill): Likewise.
	* except.c (finish_eh_generation): Likewise.
	* final.c (compute_alignments): Likewise.
	* function.c (thread_prologue_and_epilogue_insns,
	rest_of_match_asm_constraints): Likewise.
	* gcse.c (compute_hash_table_work, prune_expressions,
	compute_pre_data, compute_code_hoist_vbeinout, hoist_code,
	calculate_bb_reg_pressure, compute_ld_motion_mems): Likewise.
	* gimple-iterator.c (gsi_commit_edge_inserts): Likewise.
	* gimple-ssa-isolate-paths.c (find_implicit_erroneous_behaviour,
	find_explicit_erroneous_behaviour): Likewise.
	* graphite-sese-to-poly.c (rewrite_reductions_out_of_ssa,
	rewrite_cross_bb_scalar_deps_out_of_ssa): Likewise.
	* haifa-sched.c (haifa_sched_init): Likewise.
	* hw-doloop.c (discover_loops, set_bb_indices, reorder_loops):
	Likewise.
	* ifcvt.c (if_convert): Likewise.
	* init-regs.c (initialize_uninitialized_regs): Likewise.
	* ipa-prop.c (ipcp_transform_function): Likewise.
	* ipa-pure-const.c (analyze_function): Likewise.
	* ipa-split.c (find_split_points, execute_split_functions): Likewise.
	* ira-build.c (form_loop_tree): Likewise.
	* ira-costs.c (find_costs_and_classes): Likewise.
	* ira-emit.c (emit_moves, add_ranges_and_copies, ira_emit): Likewise.
	* ira.c (decrease_live_ranges_number, compute_regs_asm_clobbered,
	mark_elimination, update_equiv_regs, find_moveable_pseudos,
	split_live_ranges_for_shrink_wrap, allocate_initial_values): Likewise.
	* jump.c (mark_all_labels): Likewise.
	* lcm.c (compute_laterin, compute_insert_delete, compute_available,
	compute_nearerout, compute_rev_insert_delete): Likewise.
	* loop-init.c (fix_loop_structure): Likewise.
	* loop-invariant.c (calculate_loop_reg_pressure): Likewise.
	* lower-subreg.c (decompose_multiword_subregs,
	decompose_multiword_subregs): Likewise.
	* lra-assigns.c (assign_by_spills): Likewise.
	* lra-coalesce.c (lra_coalesce): Likewise.
	* lra-constraints.c (lra_inheritance, remove_inheritance_pseudos):
	Likewise.
	* lra-eliminations.c (lra_init_elimination): Likewise.
	* lra-spills.c (assign_spill_hard_regs, spill_pseudos,
	lra_final_code_change): Likewise.
	* lra.c (remove_scratches, check_rtl, has_nonexceptional_receiver,
	update_inc_notes): Likewise.
	* mcf.c (adjust_cfg_counts): Likewise.
	* mode-switching.c (optimize_mode_switching): Likewise.
	* modulo-sched.c (rest_of_handle_sms): Likewise.
	* omp-low.c (optimize_omp_library_calls, expand_omp_taskreg,
	expand_omp_target): Likewise.
	* postreload-gcse.c (alloc_mem, compute_hash_table): Likewise.
	* postreload.c (reload_cse_regs_1): Likewise.
	* predict.c (strip_predict_hints, tree_bb_level_predictions,
	tree_estimate_probability, expensive_function_p,
	estimate_bb_frequencies, compute_function_frequency): Likewise.
	* profile.c (is_inconsistent, compute_branch_probabilities,
	branch_prob): Likewise.
	* ree.c (find_removable_extensions): Likewise.
	* reg-stack.c (compensate_edges, convert_regs, reg_to_stack): Likewise.
	* regcprop.c (copyprop_hardreg_forward): Likewise.
	* reginfo.c (init_subregs_of_mode): Likewise.
	* regrename.c (regrename_analyze): Likewise.
	* regstat.c (regstat_compute_ri, regstat_compute_calls_crossed):
	Likewise.
	* reload1.c (has_nonexceptional_receiver, reload,
	calculate_elim_costs_all_insns): Likewise.
	* resource.c (init_resource_info, free_resource_info): Likewise.
	* sched-ebb.c (schedule_ebbs): Likewise.
	* sched-rgn.c (is_cfg_nonregular, find_single_block_region,
	haifa_find_rgns, sched_rgn_local_init): Likewise.
	* sel-sched-dump.c (sel_dump_cfg_2): Likewise.
	* sel-sched-ir.c (init_lv_sets, free_lv_sets,
	make_regions_from_the_rest): Likewise.
	* sese.c (build_sese_loop_nests, sese_build_liveouts): Likewise.
	* stack-ptr-mod.c (notice_stack_pointer_modification): Likewise.
	* store-motion.c (compute_store_table, build_store_vectors,
	one_store_motion_pass): Likewise.
	* tracer.c (tail_duplicate): Likewise.
	* trans-mem.c (compute_transaction_bits): Likewise.
	* tree-call-cdce.c (tree_call_cdce): Likewise.
	* tree-cfg.c (replace_loop_annotate, factor_computed_gotos,
	fold_cond_expr_cond, make_edges, assign_discriminators,
	make_abnormal_goto_edges, cleanup_dead_labels, group_case_labels,
	dump_cfg_stats, gimple_verify_flow_info, print_loop,
	execute_fixup_cfg): Likewise.
	* tree-cfgcleanup.c (cleanup_tree_cfg_1, merge_phi_nodes): Likewise.
	* tree-complex.c (init_dont_simulate_again, tree_lower_complex):
	Likewise.
	* tree-dfa.c (collect_dfa_stats, dump_enumerated_decls): Likewise.
	* tree-eh.c (execute_lower_resx, execute_lower_eh_dispatch,
	mark_reachable_handlers): Likewise.
	* tree-emutls.c (lower_emutls_function_body): Likewise.
	* tree-if-conv.c (main_tree_if_conversion): Likewise.
	* tree-inline.c (optimize_inline_calls): Likewise.
	* tree-into-ssa.c (rewrite_into_ssa, update_ssa): Likewise.
	* tree-nrv.c (tree_nrv, execute_return_slot_opt): Likewise.
	* tree-object-size.c (compute_object_sizes): Likewise.
	* tree-outof-ssa.c (eliminate_useless_phis, rewrite_trees,
	insert_backedge_copies, tree_profiling): Likewise.
	* tree-scalar-evolution.c (scev_const_prop): Likewise.
	* tree-sra.c (scan_function, sra_modify_function_body,
	propagate_dereference_distances, ipa_sra_modify_function_body,
	convert_callers): Likewise.
	* tree-ssa-ccp.c (ccp_initialize, execute_fold_all_builtins): Likewise.
	* tree-ssa-coalesce.c (build_ssa_conflict_graph): Likewise.
	create_outofssa_var_map, coalesce_partitions): Likewise.
	* tree-ssa-copy.c (init_copy_prop): Likewise.
	* tree-ssa-copyrename.c (rename_ssa_copies): Likewise.
	* tree-ssa-dce.c (find_obviously_necessary_stmts,
	eliminate_unnecessary_stmts): Likewise.
	* tree-ssa-dom.c (free_all_edge_infos, tree_ssa_dominator_optimize):
	Likewise.
	* tree-ssa-forwprop.c (ssa_forward_propagate_and_combine): Likewise.
	* tree-ssa-live.c (clear_unused_block_pointer, remove_unused_locals,
	new_tree_live_info, calculate_live_on_exit, dump_live_info,
	analyze_memory_references, fill_always_executed_in,
	tree_ssa_lim_finalize): Likewise.
	* tree-ssa-loop-manip.c (find_uses_to_rename, verify_loop_closed_ssa):
	Likewise.
	* tree-ssa-math-opts.c (execute_cse_reciprocals, execute_cse_sincos,
	execute_optimize_bswap, execute_optimize_widening_mul): Likewise.
	* tree-ssa-propagate.c (substitute_and_fold): Likewise.
	* tree-ssa-structalias.c (compute_points_to_sets): Likewise.
	* tree-ssa-tail-merge.c (find_same_succ, reset_cluster_vectors):
	Likewise.
	* tree-ssa-ter.c (find_replaceable_exprs): Likewise.
	* tree-ssa-threadupdate.c (thread_through_all_blocks): Likewise.
	* tree-ssa-uncprop.c (associate_equivalences_with_edges,
	tree_ssa_uncprop): Likewise.
	* tree-ssa-uninit.c (warn_uninitialized_vars,
	execute_late_warn_uninitialized): Likewise.
	* tree-ssa.c (verify_ssa, execute_update_addresses_taken): Likewise.
	* tree-stdarg.c (check_all_va_list_escapes, execute_optimize_stdarg):
	Likewise.
	* tree-switch-conversion.c (do_switchconv): Likewise.
	* tree-vect-generic.c (expand_vector_operations): Likewise.
	* tree-vectorizer.c (adjust_simduid_builtins, note_simd_array_uses,
	execute_vect_slp): Likewise.
	* tree-vrp.c (check_all_array_refs, remove_range_assertions,
	vrp_initialize, identify_jump_threads, instrument_memory_accesses):
	Likewise.
	* ubsan.c (ubsan_pass): Likewise.
	* value-prof.c (verify_histograms, gimple_value_profile_transformations,
	gimple_find_values_to_profile): Likewise.
	* var-tracking.c (vt_find_locations, dump_dataflow_sets, vt_emit_notes,
	vt_initialize, delete_debug_insns, vt_finalize): Likewise.

gcc/testsuite/
	* g++.dg/plugin/selfassign.c (execute_warn_self_assign): Eliminate
	use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
	explicit.
	* gcc.dg/plugin/selfassign.c (execute_warn_self_assign): Likewise.
---
 gcc/asan.c                               |  4 ++--
 gcc/auto-inc-dec.c                       |  2 +-
 gcc/basic-block.h                        |  2 --
 gcc/bb-reorder.c                         | 22 +++++++++++-----------
 gcc/cfg.c                                |  6 +++---
 gcc/cfganal.c                            |  8 ++++----
 gcc/cfgbuild.c                           |  8 ++++----
 gcc/cfgcleanup.c                         |  4 ++--
 gcc/cfgexpand.c                          |  4 ++--
 gcc/cfgloop.c                            | 14 +++++++-------
 gcc/cfgloopanal.c                        |  2 +-
 gcc/cfgrtl.c                             | 22 +++++++++++-----------
 gcc/cgraphbuild.c                        |  6 +++---
 gcc/combine-stack-adj.c                  |  2 +-
 gcc/combine.c                            |  8 ++++----
 gcc/config/arm/arm.c                     |  4 ++--
 gcc/config/bfin/bfin.c                   |  4 ++--
 gcc/config/c6x/c6x.c                     |  6 +++---
 gcc/config/epiphany/resolve-sw-modes.c   |  2 +-
 gcc/config/frv/frv.c                     |  4 ++--
 gcc/config/i386/i386.c                   |  2 +-
 gcc/config/ia64/ia64.c                   |  2 +-
 gcc/config/mips/mips.c                   |  2 +-
 gcc/config/picochip/picochip.c           |  2 +-
 gcc/config/rs6000/rs6000.c               |  2 +-
 gcc/config/s390/s390.c                   |  2 +-
 gcc/config/spu/spu.c                     |  2 +-
 gcc/config/tilegx/tilegx.c               |  4 ++--
 gcc/config/tilepro/tilepro.c             |  4 ++--
 gcc/coverage.c                           |  2 +-
 gcc/cprop.c                              |  8 ++++----
 gcc/cse.c                                |  2 +-
 gcc/dce.c                                |  2 +-
 gcc/df-core.c                            |  8 ++++----
 gcc/df-problems.c                        |  2 +-
 gcc/df-scan.c                            |  8 ++++----
 gcc/dominance.c                          |  6 +++---
 gcc/dse.c                                |  2 +-
 gcc/except.c                             |  2 +-
 gcc/final.c                              |  4 ++--
 gcc/function.c                           | 12 ++++++------
 gcc/gcse.c                               | 16 ++++++++--------
 gcc/gimple-iterator.c                    |  2 +-
 gcc/gimple-ssa-isolate-paths.c           |  4 ++--
 gcc/graphite-sese-to-poly.c              |  4 ++--
 gcc/haifa-sched.c                        |  2 +-
 gcc/hw-doloop.c                          |  6 +++---
 gcc/ifcvt.c                              |  2 +-
 gcc/init-regs.c                          |  2 +-
 gcc/ipa-prop.c                           |  2 +-
 gcc/ipa-pure-const.c                     |  2 +-
 gcc/ipa-split.c                          |  4 ++--
 gcc/ira-build.c                          |  2 +-
 gcc/ira-costs.c                          |  2 +-
 gcc/ira-emit.c                           | 14 +++++++-------
 gcc/ira.c                                | 22 +++++++++++-----------
 gcc/jump.c                               |  2 +-
 gcc/lcm.c                                | 10 +++++-----
 gcc/loop-init.c                          |  4 ++--
 gcc/loop-invariant.c                     |  2 +-
 gcc/lower-subreg.c                       |  4 ++--
 gcc/lra-assigns.c                        |  2 +-
 gcc/lra-coalesce.c                       |  4 ++--
 gcc/lra-constraints.c                    |  4 ++--
 gcc/lra-eliminations.c                   |  2 +-
 gcc/lra-spills.c                         |  6 +++---
 gcc/lra.c                                |  8 ++++----
 gcc/mcf.c                                |  2 +-
 gcc/mode-switching.c                     |  6 +++---
 gcc/modulo-sched.c                       |  2 +-
 gcc/omp-low.c                            |  6 +++---
 gcc/postreload-gcse.c                    |  4 ++--
 gcc/postreload.c                         |  2 +-
 gcc/predict.c                            | 14 +++++++-------
 gcc/profile.c                            |  8 ++++----
 gcc/ree.c                                |  2 +-
 gcc/reg-stack.c                          |  6 +++---
 gcc/regcprop.c                           |  4 ++--
 gcc/reginfo.c                            |  2 +-
 gcc/regrename.c                          |  8 ++++----
 gcc/regstat.c                            |  4 ++--
 gcc/reload1.c                            |  8 ++++----
 gcc/resource.c                           |  4 ++--
 gcc/sched-ebb.c                          |  2 +-
 gcc/sched-rgn.c                          | 26 +++++++++++++-------------
 gcc/sel-sched-dump.c                     |  2 +-
 gcc/sel-sched-ir.c                       | 10 +++++-----
 gcc/sese.c                               |  6 +++---
 gcc/stack-ptr-mod.c                      |  2 +-
 gcc/store-motion.c                       |  6 +++---
 gcc/testsuite/g++.dg/plugin/selfassign.c |  2 +-
 gcc/testsuite/gcc.dg/plugin/selfassign.c |  2 +-
 gcc/tracer.c                             |  2 +-
 gcc/trans-mem.c                          |  2 +-
 gcc/tree-call-cdce.c                     |  2 +-
 gcc/tree-cfg.c                           | 28 ++++++++++++++--------------
 gcc/tree-cfgcleanup.c                    |  4 ++--
 gcc/tree-complex.c                       |  4 ++--
 gcc/tree-dfa.c                           |  4 ++--
 gcc/tree-eh.c                            |  6 +++---
 gcc/tree-emutls.c                        |  2 +-
 gcc/tree-if-conv.c                       |  2 +-
 gcc/tree-inline.c                        |  2 +-
 gcc/tree-into-ssa.c                      |  8 ++++----
 gcc/tree-nrv.c                           |  6 +++---
 gcc/tree-object-size.c                   |  2 +-
 gcc/tree-outof-ssa.c                     |  6 +++---
 gcc/tree-profile.c                       |  2 +-
 gcc/tree-scalar-evolution.c              |  2 +-
 gcc/tree-sra.c                           | 10 +++++-----
 gcc/tree-ssa-ccp.c                       |  6 +++---
 gcc/tree-ssa-coalesce.c                  |  6 +++---
 gcc/tree-ssa-copy.c                      |  2 +-
 gcc/tree-ssa-copyrename.c                |  4 ++--
 gcc/tree-ssa-dce.c                       |  6 +++---
 gcc/tree-ssa-dom.c                       |  4 ++--
 gcc/tree-ssa-forwprop.c                  |  2 +-
 gcc/tree-ssa-live.c                      | 18 +++++++++---------
 gcc/tree-ssa-loop-im.c                   |  6 +++---
 gcc/tree-ssa-loop-manip.c                |  4 ++--
 gcc/tree-ssa-math-opts.c                 | 10 +++++-----
 gcc/tree-ssa-propagate.c                 |  2 +-
 gcc/tree-ssa-structalias.c               |  4 ++--
 gcc/tree-ssa-tail-merge.c                |  4 ++--
 gcc/tree-ssa-ter.c                       |  2 +-
 gcc/tree-ssa-threadupdate.c              |  2 +-
 gcc/tree-ssa-uncprop.c                   |  4 ++--
 gcc/tree-ssa-uninit.c                    |  4 ++--
 gcc/tree-ssa.c                           |  6 +++---
 gcc/tree-stdarg.c                        |  6 +++---
 gcc/tree-switch-conversion.c             |  2 +-
 gcc/tree-vect-generic.c                  |  2 +-
 gcc/tree-vectorizer.c                    |  6 +++---
 gcc/tree-vrp.c                           |  8 ++++----
 gcc/tsan.c                               |  2 +-
 gcc/ubsan.c                              |  2 +-
 gcc/value-prof.c                         |  6 +++---
 gcc/var-tracking.c                       | 16 ++++++++--------
 138 files changed, 363 insertions(+), 365 deletions(-)

diff --git a/gcc/asan.c b/gcc/asan.c
index 09c0667..a50186c 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -2043,7 +2043,7 @@ transform_statements (void)
   gimple_stmt_iterator i;
   int saved_last_basic_block = last_basic_block_for_fn (cfun);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       basic_block prev_bb = bb;
 
@@ -2557,7 +2557,7 @@ execute_sanopt (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/auto-inc-dec.c b/gcc/auto-inc-dec.c
index 6006b70..be7fdf8 100644
--- a/gcc/auto-inc-dec.c
+++ b/gcc/auto-inc-dec.c
@@ -1480,7 +1480,7 @@ rest_of_handle_auto_inc_dec (void)
   reg_next_use = XCNEWVEC (rtx, max_reg);
   reg_next_inc_use = XCNEWVEC (rtx, max_reg);
   reg_next_def = XCNEWVEC (rtx, max_reg);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     merge_in_block (max_reg, bb);
 
   free (reg_next_use);
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 174b650..b378a5b 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -333,8 +333,6 @@ struct GTY(()) control_flow_graph {
 #define FOR_EACH_BB_FN(BB, FN) \
   FOR_BB_BETWEEN (BB, (FN)->cfg->x_entry_block_ptr->next_bb, (FN)->cfg->x_exit_block_ptr, next_bb)
 
-#define FOR_EACH_BB(BB) FOR_EACH_BB_FN (BB, cfun)
-
 #define FOR_EACH_BB_REVERSE_FN(BB, FN) \
   FOR_BB_BETWEEN (BB, (FN)->cfg->x_exit_block_ptr->prev_bb, (FN)->cfg->x_entry_block_ptr, prev_bb)
 
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 363af2d..7f8ea07 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -1566,7 +1566,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (void)
   vec<basic_block> bbs_in_hot_partition = vNULL;
 
   /* Mark which partition (hot/cold) each basic block belongs in.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bool cold_bb = false;
 
@@ -1658,7 +1658,7 @@ find_rarely_executed_basic_blocks_and_crossing_edges (void)
 
   /* Mark every edge that crosses between sections.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_EACH_EDGE (e, ei, bb->succs)
       {
 	unsigned int flags = e->flags;
@@ -1691,7 +1691,7 @@ set_edge_can_fallthru_flag (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       edge_iterator ei;
@@ -1792,7 +1792,7 @@ fix_up_fall_thru_edges (void)
   rtx old_jump;
   rtx fall_thru_label;
 
-  FOR_EACH_BB (cur_bb)
+  FOR_EACH_BB_FN (cur_bb, cfun)
     {
       fall_thru = NULL;
       if (EDGE_COUNT (cur_bb->succs) > 0)
@@ -1992,7 +1992,7 @@ fix_crossing_conditional_branches (void)
   rtx old_label = NULL_RTX;
   rtx new_label;
 
-  FOR_EACH_BB (cur_bb)
+  FOR_EACH_BB_FN (cur_bb, cfun)
     {
       crossing_edge = NULL;
       if (EDGE_COUNT (cur_bb->succs) > 0)
@@ -2123,7 +2123,7 @@ fix_crossing_unconditional_branches (void)
   rtx cur_insn;
   edge succ;
 
-  FOR_EACH_BB (cur_bb)
+  FOR_EACH_BB_FN (cur_bb, cfun)
     {
       last_insn = BB_END (cur_bb);
 
@@ -2201,7 +2201,7 @@ add_reg_crossing_jump_notes (void)
   edge e;
   edge_iterator ei;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_EACH_EDGE (e, ei, bb->succs)
       if ((e->flags & EDGE_CROSSING)
 	  && JUMP_P (BB_END (e->src))
@@ -2286,7 +2286,7 @@ insert_section_boundary_note (void)
   if (!crtl->has_bb_partition)
     return;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (!current_partition)
 	current_partition = BB_PARTITION (bb);
@@ -2321,7 +2321,7 @@ rest_of_handle_reorder_blocks (void)
   reorder_basic_blocks ();
   cleanup_cfg (CLEANUP_EXPENSIVE);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
       bb->aux = bb->next_bb;
   cfg_layout_finalize ();
@@ -2410,7 +2410,7 @@ duplicate_computed_gotos (void)
   /* Look for blocks that end in a computed jump, and see if such blocks
      are suitable for unfactoring.  If a block is a candidate for unfactoring,
      mark it in the candidates.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       edge e;
@@ -2457,7 +2457,7 @@ duplicate_computed_gotos (void)
     goto done;
 
   /* Duplicate computed gotos.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (bb->flags & BB_VISITED)
 	continue;
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 6c3181d..4f9d769 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -101,7 +101,7 @@ clear_edges (void)
   edge e;
   edge_iterator ei;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_EACH_EDGE (e, ei, bb->succs)
 	free_edge (e);
@@ -163,7 +163,7 @@ compact_blocks (void)
       basic_block bb;
 
       i = NUM_FIXED_BLOCKS;
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
 	  bb->index = i;
@@ -828,7 +828,7 @@ brief_dump_cfg (FILE *file, int flags)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       dump_bb_info (file, bb, 0,
 		    flags & (TDF_COMMENT | TDF_DETAILS),
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index 9900d82..3371b4a 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -159,7 +159,7 @@ find_unreachable_blocks (void)
 
   /* Clear all the reachability flags.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->flags &= ~BB_REACHABLE;
 
   /* Add our starting points to the worklist.  Almost always there will
@@ -554,7 +554,7 @@ add_noreturn_fake_exit_edges (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (EDGE_COUNT (bb->succs) == 0)
       make_single_succ_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
 }
@@ -1236,7 +1236,7 @@ compute_dominance_frontiers_1 (bitmap_head *frontiers)
   edge p;
   edge_iterator ei;
   basic_block b;
-  FOR_EACH_BB (b)
+  FOR_EACH_BB_FN (b, cfun)
     {
       if (EDGE_COUNT (b->preds) >= 2)
 	{
@@ -1517,7 +1517,7 @@ single_pred_before_succ_order (void)
   bitmap_clear (visited);
 
   MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
-  FOR_EACH_BB (x)
+  FOR_EACH_BB_FN (x, cfun)
     {
       if (VISITED_P (x))
 	continue;
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index f73bbc5..acfc73b 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -595,15 +595,15 @@ find_many_sub_basic_blocks (sbitmap blocks)
 {
   basic_block bb, min, max;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     SET_STATE (bb,
 	       bitmap_bit_p (blocks, bb->index) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (STATE (bb) == BLOCK_TO_SPLIT)
       find_bb_boundaries (bb);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (STATE (bb) != BLOCK_ORIGINAL)
       break;
 
@@ -640,6 +640,6 @@ find_many_sub_basic_blocks (sbitmap blocks)
 	compute_outgoing_frequencies (bb);
       }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     SET_STATE (bb, 0);
 }
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index 234e5b6..cf72c03 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -2613,7 +2613,7 @@ try_optimize_cfg (int mode)
 
   crossjumps_occured = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     update_forwarder_flag (bb);
 
   if (! targetm.cannot_modify_jumps_p ())
@@ -2955,7 +2955,7 @@ delete_dead_jumptables (void)
 
   /* A dead jump table does not belong to any basic block.  Scan insns
      between two adjacent basic blocks.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn, next;
 
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 014f78b..56bcd80 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -520,7 +520,7 @@ add_scope_conflicts (void)
 	}
     }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     add_scope_conflicts_1 (bb, work, true);
 
   free (rpo);
@@ -5378,7 +5378,7 @@ discover_nonconstant_array_refs (void)
   basic_block bb;
   gimple_stmt_iterator gsi;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	gimple stmt = gsi_stmt (gsi);
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index 9d28950..5639e7a 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -50,7 +50,7 @@ flow_loops_cfg_dump (FILE *file)
   if (!file)
     return;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge succ;
       edge_iterator ei;
@@ -834,7 +834,7 @@ get_loop_body (const struct loop *loop)
       gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
       body[tv++] = loop->header;
       body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	body[tv++] = bb;
     }
   else
@@ -1082,7 +1082,7 @@ record_loop_exits (void)
 					  loop_exit_hash, loop_exit_eq,
 					  loop_exit_free);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_EACH_EDGE (e, ei, bb->succs)
 	{
@@ -1343,7 +1343,7 @@ verify_loop_structure (void)
     verify_dominators (CDI_DOMINATORS);
 
   /* Check the headers.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb_loop_header_p (bb))
       {
 	if (bb->loop_father->header == NULL)
@@ -1479,7 +1479,7 @@ verify_loop_structure (void)
     {
       /* Record old info.  */
       irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  edge_iterator ei;
 	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
@@ -1495,7 +1495,7 @@ verify_loop_structure (void)
       mark_irreducible_loops ();
 
       /* Compare.  */
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  edge_iterator ei;
 
@@ -1578,7 +1578,7 @@ verify_loop_structure (void)
 
       sizes = XCNEWVEC (unsigned, num);
       memset (sizes, 0, sizeof (unsigned) * num);
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  edge_iterator ei;
 	  if (bb->loop_father == current_loops->tree_root)
diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c
index 84b61c1..5e89cb1c 100644
--- a/gcc/cfgloopanal.c
+++ b/gcc/cfgloopanal.c
@@ -432,7 +432,7 @@ mark_loop_exit_edges (void)
   if (number_of_loops (cfun) <= 1)
     return;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge_iterator ei;
 
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index 5dc52a6..daadd9b 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -416,7 +416,7 @@ compute_bb_for_insn (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx end = BB_END (bb);
       rtx insn;
@@ -2275,7 +2275,7 @@ find_partition_fixes (bool flag_only)
   /* Callers check this.  */
   gcc_checking_assert (crtl->has_bb_partition);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
       bbs_in_cold_partition.safe_push (bb);
 
@@ -2372,7 +2372,7 @@ verify_hot_cold_block_grouping (void)
       || current_ir_type () != IR_RTL_CFGRTL)
     return err;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (current_partition != BB_UNPARTITIONED
           && BB_PARTITION (bb) != current_partition)
@@ -3201,7 +3201,7 @@ purge_all_dead_edges (void)
   int purged = false;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bool purged_here = purge_dead_edges (bb);
 
@@ -3226,7 +3226,7 @@ fixup_abnormal_edges (void)
   bool inserted = false;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       edge_iterator ei;
@@ -3449,7 +3449,7 @@ record_effective_endpoints (void)
     cfg_layout_function_header = NULL_RTX;
 
   next_insn = get_insns ();
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx end;
 
@@ -3479,7 +3479,7 @@ outof_cfg_layout_mode (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
       bb->aux = bb->next_bb;
 
@@ -3857,7 +3857,7 @@ fixup_reorder_chain (void)
   relink_block_chain (/*stay_in_cfglayout_mode=*/false);
 
   /* Annoying special case - jump around dead jumptables left in the code.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e = find_fallthru_edge (bb->succs);
 
@@ -3868,7 +3868,7 @@ fixup_reorder_chain (void)
   /* Ensure goto_locus from edges has some instructions with that locus
      in RTL.  */
   if (!optimize)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       {
         edge e;
         edge_iterator ei;
@@ -4047,7 +4047,7 @@ force_one_exit_fallthru (void)
 
   /* Fix up the chain of blocks -- make FORWARDER immediately precede the
      exit block.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (bb->aux == NULL && bb != forwarder)
 	{
@@ -4258,7 +4258,7 @@ break_superblocks (void)
   superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
   bitmap_clear (superblocks);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->flags & BB_SUPERBLOCK)
       {
 	bb->flags &= ~BB_SUPERBLOCK;
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index 6c6698b..429dc8e 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -317,7 +317,7 @@ build_cgraph_edges (void)
 
   /* Create the callgraph edges and record the nodes referenced by the function.
      body.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -451,7 +451,7 @@ rebuild_cgraph_edges (void)
 
   node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -505,7 +505,7 @@ cgraph_rebuild_references (void)
 
   node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	ipa_record_stmt_references (node, gsi_stmt (gsi));
diff --git a/gcc/combine-stack-adj.c b/gcc/combine-stack-adj.c
index 5ca131f..5c897cf 100644
--- a/gcc/combine-stack-adj.c
+++ b/gcc/combine-stack-adj.c
@@ -95,7 +95,7 @@ combine_stack_adjustments (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     combine_stack_adjustments_for_block (bb);
 }
 
diff --git a/gcc/combine.c b/gcc/combine.c
index c7eb5e5..dea6c28 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -960,7 +960,7 @@ delete_noop_moves (void)
   rtx insn, next;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
 	{
@@ -997,7 +997,7 @@ create_log_links (void)
      usage -- these are taken from original flow.c did. Don't ask me why it is
      done this way; I don't know and if it works, I don't want to know.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS_REVERSE (bb, insn)
         {
@@ -1160,7 +1160,7 @@ combine_instructions (rtx f, unsigned int nregs)
   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
 
   create_log_links ();
-  FOR_EACH_BB (this_basic_block)
+  FOR_EACH_BB_FN (this_basic_block, cfun)
     {
       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
       last_call_luid = 0;
@@ -1211,7 +1211,7 @@ combine_instructions (rtx f, unsigned int nregs)
   setup_incoming_promotions (first);
   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
 
-  FOR_EACH_BB (this_basic_block)
+  FOR_EACH_BB_FN (this_basic_block, cfun)
     {
       rtx last_combined_insn = NULL_RTX;
       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index b3a81b0..268e560 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -16548,7 +16548,7 @@ thumb1_reorg (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx dest, src;
       rtx pat, op0, set = NULL;
@@ -16626,7 +16626,7 @@ thumb2_reorg (void)
   compute_bb_for_insn ();
   df_analyze ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
 
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index a1adf80..c15451c 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -3957,7 +3957,7 @@ static void
 bfin_gen_bundles (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn, next;
       rtx slot[3];
@@ -4036,7 +4036,7 @@ static void
 reorder_var_tracking_notes (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn, next;
       rtx queue = NULL_RTX;
diff --git a/gcc/config/c6x/c6x.c b/gcc/config/c6x/c6x.c
index af310ba..6f80bc8 100644
--- a/gcc/config/c6x/c6x.c
+++ b/gcc/config/c6x/c6x.c
@@ -4629,7 +4629,7 @@ c6x_gen_bundles (void)
   basic_block bb;
   rtx insn, next, last_call;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn, next;
       /* The machine is eight insns wide.  We can have up to six shadow
@@ -5383,7 +5383,7 @@ conditionalize_after_sched (void)
 {
   basic_block bb;
   rtx insn;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       {
 	unsigned uid = INSN_UID (insn);
@@ -5959,7 +5959,7 @@ c6x_reorg (void)
 
   if (c6x_flag_schedule_insns2)
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	if ((bb->flags & BB_DISABLE_SCHEDULE) == 0)
 	  assign_reservations (BB_HEAD (bb), BB_END (bb));
     }
diff --git a/gcc/config/epiphany/resolve-sw-modes.c b/gcc/config/epiphany/resolve-sw-modes.c
index a780254..30f6920 100644
--- a/gcc/config/epiphany/resolve-sw-modes.c
+++ b/gcc/config/epiphany/resolve-sw-modes.c
@@ -69,7 +69,7 @@ resolve_sw_modes (void)
       df_note_add_problem ();
       df_analyze ();
     }
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       {
 	enum attr_fp_mode selected_mode;
diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c
index a5aeb75..3755e62 100644
--- a/gcc/config/frv/frv.c
+++ b/gcc/config/frv/frv.c
@@ -8070,11 +8070,11 @@ frv_optimize_membar (void)
   first_io = XCNEWVEC (struct frv_io, last_basic_block_for_fn (cfun));
   last_membar = XCNEWVEC (rtx, last_basic_block_for_fn (cfun));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     frv_optimize_membar_local (bb, &first_io[bb->index],
 			       &last_membar[bb->index]);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (last_membar[bb->index] != 0)
       frv_optimize_membar_global (bb, first_io, last_membar[bb->index]);
 
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 0f6612d..aa9694f 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -10481,7 +10481,7 @@ ix86_finalize_stack_realign_flags (void)
       add_to_hard_reg_set (&set_up_by_prologue, Pmode, ARG_POINTER_REGNUM);
       add_to_hard_reg_set (&set_up_by_prologue, Pmode,
 			   HARD_FRAME_POINTER_REGNUM);
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
         {
           rtx insn;
 	  FOR_BB_INSNS (bb, insn)
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 8f305c1..a837974 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -9688,7 +9688,7 @@ ia64_reorg (void)
 
       /* We can't let modulo-sched prevent us from scheduling any bbs,
 	 since we need the final schedule to produce bundle information.  */
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	bb->flags &= ~BB_DISABLE_SCHEDULE;
 
       initiate_bundle_states ();
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index f19478c..e65dc6b 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -15332,7 +15332,7 @@ mips_annotate_pic_calls (void)
   basic_block bb;
   rtx insn;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
     {
       rtx call, reg, symbol, second_call;
diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c
index 4756cb7..8861ffc 100644
--- a/gcc/config/picochip/picochip.c
+++ b/gcc/config/picochip/picochip.c
@@ -3174,7 +3174,7 @@ reorder_var_tracking_notes (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn, next, last_insn = NULL_RTX;
       rtx queue = NULL_RTX;
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 599cf49..1db97fa 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -16395,7 +16395,7 @@ rs6000_alloc_sdmode_stack_slot (void)
   if (TARGET_NO_SDMODE_STACK)
     return;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index fcd7532..f9b7cd0 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -7458,7 +7458,7 @@ s390_regs_ever_clobbered (char regs_ever_clobbered[])
       if (!call_really_used_regs[i])
 	regs_ever_clobbered[i] = 1;
 
-  FOR_EACH_BB (cur_bb)
+  FOR_EACH_BB_FN (cur_bb, cfun)
     {
       FOR_BB_INSNS (cur_bb, cur_insn)
 	{
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index 1a9895e..66209b6 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -2645,7 +2645,7 @@ spu_machine_dependent_reorg (void)
     find_many_sub_basic_blocks (blocks);
 
   /* We have to schedule to make sure alignment is ok. */
-  FOR_EACH_BB (bb) bb->flags &= ~BB_DISABLE_SCHEDULE;
+  FOR_EACH_BB_FN (bb, cfun) bb->flags &= ~BB_DISABLE_SCHEDULE;
 
   /* The hints need to be scheduled, so call it again. */
   schedule_insns ();
diff --git a/gcc/config/tilegx/tilegx.c b/gcc/config/tilegx/tilegx.c
index c2f9e07..eecc9a9 100644
--- a/gcc/config/tilegx/tilegx.c
+++ b/gcc/config/tilegx/tilegx.c
@@ -4383,7 +4383,7 @@ static void
 tilegx_gen_bundles (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn, next;
       rtx end = NEXT_INSN (BB_END (bb));
@@ -4709,7 +4709,7 @@ static void
 reorder_var_tracking_notes (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
   {
     rtx insn, next;
     rtx queue = NULL_RTX;
diff --git a/gcc/config/tilepro/tilepro.c b/gcc/config/tilepro/tilepro.c
index 31bc490..b2bafb4 100644
--- a/gcc/config/tilepro/tilepro.c
+++ b/gcc/config/tilepro/tilepro.c
@@ -3988,7 +3988,7 @@ static void
 tilepro_gen_bundles (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
   {
     rtx insn, next;
     rtx end = NEXT_INSN (BB_END (bb));
@@ -4259,7 +4259,7 @@ static void
 reorder_var_tracking_notes (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
   {
     rtx insn, next;
     rtx queue = NULL_RTX;
diff --git a/gcc/coverage.c b/gcc/coverage.c
index f2ac5fc..f7a2924 100644
--- a/gcc/coverage.c
+++ b/gcc/coverage.c
@@ -588,7 +588,7 @@ coverage_compute_cfg_checksum (void)
   basic_block bb;
   unsigned chksum = n_basic_blocks_for_fn (cfun);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       edge_iterator ei;
diff --git a/gcc/cprop.c b/gcc/cprop.c
index 600c617..7d07246 100644
--- a/gcc/cprop.c
+++ b/gcc/cprop.c
@@ -400,7 +400,7 @@ compute_hash_table_work (struct hash_table_d *table)
   /* Allocate vars to track sets of regs.  */
   reg_set_bitmap = ALLOC_REG_SET (NULL);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
 
@@ -649,7 +649,7 @@ compute_cprop_data (void)
      aren't recorded for the local pass so they cannot be propagated within
      their basic block by this pass and 2) the global pass would otherwise
      propagate them only in the successors of their basic block.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       int index = implicit_set_indexes[bb->index];
       if (index != -1)
@@ -1234,7 +1234,7 @@ local_cprop_pass (void)
   unsigned i;
 
   cselib_init (0);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS (bb, insn)
 	{
@@ -1359,7 +1359,7 @@ find_implicit_sets (void)
 
   implicit_sets = XCNEWVEC (rtx, implicit_sets_size);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Check for more than one successor.  */
       if (EDGE_COUNT (bb->succs) <= 1)
diff --git a/gcc/cse.c b/gcc/cse.c
index 74ae8ba..0e28f48 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -7335,7 +7335,7 @@ cse_condition_code_reg (void)
   else
     cc_reg_2 = NULL_RTX;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx last_insn;
       rtx cc_reg;
diff --git a/gcc/dce.c b/gcc/dce.c
index 07d31f7..3101102 100644
--- a/gcc/dce.c
+++ b/gcc/dce.c
@@ -623,7 +623,7 @@ prescan_insns_for_dce (bool fast)
   if (!df_in_progress && ACCUMULATE_OUTGOING_ARGS)
     arg_stores = BITMAP_ALLOC (NULL);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS_REVERSE_SAFE (bb, insn, prev)
 	if (NONDEBUG_INSN_P (insn))
diff --git a/gcc/df-core.c b/gcc/df-core.c
index d41fb72..ba57d39 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -1543,7 +1543,7 @@ df_compact_blocks (void)
 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
 
 	  i = NUM_FIXED_BLOCKS;
-	  FOR_EACH_BB (bb)
+	  FOR_EACH_BB_FN (bb, cfun)
 	    {
 	      if (bitmap_bit_p (&tmp, bb->index))
 		bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
@@ -1564,7 +1564,7 @@ df_compact_blocks (void)
 	     place in the block_info vector.  Null out the copied
 	     item.  The entry and exit blocks never move.  */
 	  i = NUM_FIXED_BLOCKS;
-	  FOR_EACH_BB (bb)
+	  FOR_EACH_BB_FN (bb, cfun)
 	    {
 	      df_set_bb_info (dflow, i,
 			      (char *)problem_temps
@@ -1590,7 +1590,7 @@ df_compact_blocks (void)
       bitmap_copy (&tmp, df->blocks_to_analyze);
       bitmap_clear (df->blocks_to_analyze);
       i = NUM_FIXED_BLOCKS;
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  if (bitmap_bit_p (&tmp, bb->index))
 	    bitmap_set_bit (df->blocks_to_analyze, i);
@@ -1601,7 +1601,7 @@ df_compact_blocks (void)
   bitmap_clear (&tmp);
 
   i = NUM_FIXED_BLOCKS;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
       bb->index = i;
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index ab19372..70f7254 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -2427,7 +2427,7 @@ df_word_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 
   bitmap_obstack_initialize (&problem_data->word_lr_bitmaps);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_set_bit (df_word_lr->out_of_date_transfer_functions, bb->index);
 
   bitmap_set_bit (df_word_lr->out_of_date_transfer_functions, ENTRY_BLOCK);
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index 5f0ba4a..9f6f67a 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -449,7 +449,7 @@ df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED)
 	fprintf (file, "} ");
       }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       if (INSN_P (insn))
 	{
@@ -673,7 +673,7 @@ df_scan_blocks (void)
   df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK));
 
   /* Regular blocks */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       unsigned int bb_index = bb->index;
       df_bb_refs_record (bb_index, true);
@@ -1415,7 +1415,7 @@ df_insn_rescan_all (void)
   bitmap_clear (&df->insns_to_rescan);
   bitmap_clear (&df->insns_to_notes_rescan);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       FOR_BB_INSNS (bb, insn)
@@ -4154,7 +4154,7 @@ df_update_entry_exit_and_calls (void)
 
   /* The call insns need to be rescanned because there may be changes
      in the set of registers clobbered across the call.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       FOR_BB_INSNS (bb, insn)
diff --git a/gcc/dominance.c b/gcc/dominance.c
index af73078..521b224 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -662,7 +662,7 @@ calculate_dominance_info (enum cdi_direction dir)
       calc_dfs_tree (&di, reverse);
       calc_idoms (&di, reverse);
 
-      FOR_EACH_BB (b)
+      FOR_EACH_BB_FN (b, cfun)
 	{
 	  TBB d = di.dom[di.dfs_order[b->index]];
 
@@ -1025,7 +1025,7 @@ verify_dominators (enum cdi_direction dir)
   calc_dfs_tree (&di, reverse);
   calc_idoms (&di, reverse);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       imm_bb = get_immediate_dominator (dir, bb);
       if (!imm_bb)
@@ -1492,7 +1492,7 @@ DEBUG_FUNCTION void
 debug_dominance_info (enum cdi_direction dir)
 {
   basic_block bb, bb2;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if ((bb2 = get_immediate_dominator (dir, bb)))
       fprintf (stderr, "%i %i\n", bb->index, bb2->index);
 }
diff --git a/gcc/dse.c b/gcc/dse.c
index a926cb8..e5b0850 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -3507,7 +3507,7 @@ static void
 dse_step5_nospill (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bb_info_t bb_info = bb_table[bb->index];
       insn_info_t insn_info = bb_info->last_insn;
diff --git a/gcc/except.c b/gcc/except.c
index e4b8cad..cf4fd14 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -1511,7 +1511,7 @@ finish_eh_generation (void)
     commit_edge_insertions ();
 
   /* Redirect all EH edges from the post_landing_pad to the landing pad.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       eh_landing_pad lp;
       edge_iterator ei;
diff --git a/gcc/final.c b/gcc/final.c
index 2ab6a4d..f475d27 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -700,14 +700,14 @@ compute_alignments (void)
       flow_loops_dump (dump_file, NULL, 1);
     }
   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->frequency > freq_max)
       freq_max = bb->frequency;
   freq_threshold = freq_max / PARAM_VALUE (PARAM_ALIGN_THRESHOLD);
 
   if (dump_file)
     fprintf (dump_file, "freq_max: %i\n",freq_max);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx label = BB_HEAD (bb);
       int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0;
diff --git a/gcc/function.c b/gcc/function.c
index d257af4..e00f583 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -6043,7 +6043,7 @@ thread_prologue_and_epilogue_insns (void)
       max_grow_size = get_uncond_jump_length ();
       max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  rtx insn;
 	  unsigned size = 0;
@@ -6120,7 +6120,7 @@ thread_prologue_and_epilogue_insns (void)
 	 needing a prologue.  */
       bitmap_clear (&bb_on_list);
       bitmap_and_compl (&bb_antic_flags, &bb_flags, &bb_tail);
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  if (!bitmap_bit_p (&bb_antic_flags, bb->index))
 	    continue;
@@ -6154,7 +6154,7 @@ thread_prologue_and_epilogue_insns (void)
       /* Find exactly one edge that leads to a block in ANTIC from
 	 a block that isn't.  */
       if (!bitmap_bit_p (&bb_antic_flags, entry_edge->dest->index))
-	FOR_EACH_BB (bb)
+	FOR_EACH_BB_FN (bb, cfun)
 	  {
 	    if (!bitmap_bit_p (&bb_antic_flags, bb->index))
 	      continue;
@@ -6202,7 +6202,7 @@ thread_prologue_and_epilogue_insns (void)
 	  /* Find tail blocks reachable from both blocks needing a
 	     prologue and blocks not needing a prologue.  */
 	  if (!bitmap_empty_p (&bb_tail))
-	    FOR_EACH_BB (bb)
+	    FOR_EACH_BB_FN (bb, cfun)
 	      {
 		bool some_pro, some_no_pro;
 		if (!bitmap_bit_p (&bb_tail, bb->index))
@@ -6480,7 +6480,7 @@ thread_prologue_and_epilogue_insns (void)
 	 we take advantage of cfg_layout_finalize using
 	 fixup_fallthru_exit_predecessor.  */
       cfg_layout_initialize (0);
-      FOR_EACH_BB (cur_bb)
+      FOR_EACH_BB_FN (cur_bb, cfun)
 	if (cur_bb->index >= NUM_FIXED_BLOCKS
 	    && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
 	  cur_bb->aux = cur_bb->next_bb;
@@ -7192,7 +7192,7 @@ rest_of_match_asm_constraints (void)
     return 0;
 
   df_set_flags (DF_DEFER_INSN_RESCAN);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS (bb, insn)
 	{
diff --git a/gcc/gcse.c b/gcc/gcse.c
index fa25a46..a6874ab 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -1559,7 +1559,7 @@ compute_hash_table_work (struct hash_table_d *table)
   for (i = 0; i < max_reg_num (); ++i)
     reg_avail_info[i].last_bb = NULL;
 
-  FOR_EACH_BB (current_bb)
+  FOR_EACH_BB_FN (current_bb, cfun)
     {
       rtx insn;
       unsigned int regno;
@@ -1899,7 +1899,7 @@ prune_expressions (bool pre_p)
 	}
     }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       edge_iterator ei;
@@ -2020,7 +2020,7 @@ compute_pre_data (void)
      ~(TRANSP | COMP)
   */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
       bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
@@ -2855,7 +2855,7 @@ compute_code_hoist_vbeinout (void)
     {
       fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
         {
 	  fprintf (dump_file, "vbein (%d): ", bb->index);
 	  dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
@@ -3169,7 +3169,7 @@ hoist_code (void)
   to_bb_head = XCNEWVEC (int, get_max_uid ());
   bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       int to_head;
@@ -3512,7 +3512,7 @@ calculate_bb_reg_pressure (void)
 
   ira_setup_eliminable_regset ();
   curr_regs_live = BITMAP_ALLOC (&reg_obstack);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       curr_bb = bb;
       BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
@@ -3562,7 +3562,7 @@ calculate_bb_reg_pressure (void)
     return;
 
   fprintf (dump_file, "\nRegister Pressure: \n");
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       fprintf (dump_file, "  Basic block %d: \n", bb->index);
       for (i = 0; (int) i < ira_pressure_classes_num; i++)
@@ -3888,7 +3888,7 @@ compute_ld_motion_mems (void)
   pre_ldst_mems = NULL;
   pre_ldst_table.create (13);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS (bb, insn)
 	{
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index 9f51e6c..2460c61 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -839,7 +839,7 @@ gsi_commit_edge_inserts (void)
   gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
 			      NULL);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_EACH_EDGE (e, ei, bb->succs)
       gsi_commit_one_edge_insert (e, NULL);
 }
diff --git a/gcc/gimple-ssa-isolate-paths.c b/gcc/gimple-ssa-isolate-paths.c
index 052bf3f..aaa7537 100644
--- a/gcc/gimple-ssa-isolate-paths.c
+++ b/gcc/gimple-ssa-isolate-paths.c
@@ -216,7 +216,7 @@ find_implicit_erroneous_behaviour (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator si;
 
@@ -304,7 +304,7 @@ find_explicit_erroneous_behaviour (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator si;
 
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 975db63..66c1b6e 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -2295,7 +2295,7 @@ rewrite_reductions_out_of_ssa (scop_p scop)
   gimple_stmt_iterator psi;
   sese region = SCOP_REGION (scop);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb_in_sese_p (bb, region))
       for (psi = gsi_start_phis (bb); !gsi_end_p (psi);)
 	{
@@ -2489,7 +2489,7 @@ rewrite_cross_bb_scalar_deps_out_of_ssa (scop_p scop)
   /* Create an extra empty BB after the scop.  */
   split_edge (SESE_EXIT (region));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb_in_sese_p (bb, region))
       for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi))
 	changed |= rewrite_cross_bb_scalar_deps (scop, &psi);
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index d5e3309..4f3b054 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -6709,7 +6709,7 @@ haifa_sched_init (void)
 
     sched_init_bbs ();
 
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       bbs.quick_push (bb);
     sched_init_luids (bbs);
     sched_deps_init (true);
diff --git a/gcc/hw-doloop.c b/gcc/hw-doloop.c
index 77c8149..b6184a2 100644
--- a/gcc/hw-doloop.c
+++ b/gcc/hw-doloop.c
@@ -357,7 +357,7 @@ discover_loops (bitmap_obstack *loop_stack, struct hw_doloop_hooks *hooks)
   /* Find all the possible loop tails.  This means searching for every
      loop_end instruction.  For each one found, create a hwloop_info
      structure and add the head block to the work list. */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx tail = BB_END (bb);
       rtx insn, reg;
@@ -480,7 +480,7 @@ set_bb_indices (void)
   intptr_t index;
 
   index = 0;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->aux = (void *) index++;
 }
 
@@ -537,7 +537,7 @@ reorder_loops (hwloop_info loops)
       loops = loops->next;
     }
   
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
 	bb->aux = bb->next_bb;
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index ac0276c..543a70d 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -4408,7 +4408,7 @@ if_convert (bool after_combine)
 	fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
 #endif
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
           basic_block new_bb;
           while (!df_get_bb_dirty (bb)
diff --git a/gcc/init-regs.c b/gcc/init-regs.c
index 2a15b3e..d26ee9b 100644
--- a/gcc/init-regs.c
+++ b/gcc/init-regs.c
@@ -59,7 +59,7 @@ initialize_uninitialized_regs (void)
 
   df_analyze ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       bitmap lr = DF_LR_IN (bb);
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 83dc53e..7b16b7e 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -4726,7 +4726,7 @@ ipcp_transform_function (struct cgraph_node *node)
   descriptors.safe_grow_cleared (param_count);
   ipa_populate_param_decls (node, descriptors);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	struct ipa_agg_replacement_value *v;
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index d84b35f..a60e078 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -754,7 +754,7 @@ analyze_function (struct cgraph_node *fn, bool ipa)
 
   push_cfun (DECL_STRUCT_FUNCTION (decl));
 
-  FOR_EACH_BB (this_block)
+  FOR_EACH_BB_FN (this_block, cfun)
     {
       gimple_stmt_iterator gsi;
       struct walk_stmt_info wi;
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index d5dfb8d..390adf1 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -1070,7 +1070,7 @@ find_split_points (int overall_time, int overall_size)
         stack.pop ();
     }
   ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = NULL;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->aux = NULL;
   stack.release ();
   BITMAP_FREE (current.ssa_names_to_pass);
@@ -1595,7 +1595,7 @@ execute_split_functions (void)
   /* Compute local info about basic blocks and determine function size/time.  */
   bb_info_vec.safe_grow_cleared (last_basic_block_for_fn (cfun) + 1);
   memset (&best_split_point, 0, sizeof (best_split_point));
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       int time = 0;
       int size = 0;
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index f9258ee..660fb0d 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -341,7 +341,7 @@ form_loop_tree (void)
   /* We can not use loop/bb node access macros because of potential
      checking and because the nodes are not initialized enough
      yet.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bb_node = &ira_bb_nodes[bb->index];
       bb_node->bb = bb;
diff --git a/gcc/ira-costs.c b/gcc/ira-costs.c
index d7299e6..c8d64d5 100644
--- a/gcc/ira-costs.c
+++ b/gcc/ira-costs.c
@@ -1585,7 +1585,7 @@ find_costs_and_classes (FILE *dump_file)
 	{
 	  basic_block bb;
 
-	  FOR_EACH_BB (bb)
+	  FOR_EACH_BB_FN (bb, cfun)
 	    process_bb_for_costs (bb);
 	}
 
diff --git a/gcc/ira-emit.c b/gcc/ira-emit.c
index d59461b..196efa0 100644
--- a/gcc/ira-emit.c
+++ b/gcc/ira-emit.c
@@ -986,7 +986,7 @@ emit_moves (void)
   edge e;
   rtx insns, tmp;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (at_bb_start[bb->index] != NULL)
 	{
@@ -1203,7 +1203,7 @@ add_ranges_and_copies (void)
   bitmap live_through;
 
   live_through = ira_allocate_bitmap ();
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* It does not matter what loop_tree_node (of source or
 	 destination block) to use for searching allocnos by their
@@ -1260,7 +1260,7 @@ ira_emit (bool loops_p)
   ira_free_bitmap (renamed_regno_bitmap);
   ira_free_bitmap (local_allocno_bitmap);
   setup_entered_from_non_parent_p ();
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       at_bb_start[bb->index] = NULL;
       at_bb_end[bb->index] = NULL;
@@ -1275,15 +1275,15 @@ ira_emit (bool loops_p)
   memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
   memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
   curr_tick = 0;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     unify_moves (bb, true);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     unify_moves (bb, false);
   move_vec.create (ira_allocnos_num);
   emit_moves ();
   add_ranges_and_copies ();
   /* Clean up: */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       free_move_list (at_bb_start[bb->index]);
       free_move_list (at_bb_end[bb->index]);
@@ -1301,7 +1301,7 @@ ira_emit (bool loops_p)
      reload assumes initial insn codes defined.  The insn codes can be
      invalidated by CFG infrastructure for example in jump
      redirection.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS_REVERSE (bb, insn)
       if (INSN_P (insn))
 	recog_memoized (insn);
diff --git a/gcc/ira.c b/gcc/ira.c
index ae35035..b4ae0ca 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -2135,7 +2135,7 @@ decrease_live_ranges_number (void)
   if (ira_dump_file)
     fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n");
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       {
 	set = single_set (insn);
@@ -2358,7 +2358,7 @@ compute_regs_asm_clobbered (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       FOR_BB_INSNS_REVERSE (bb, insn)
@@ -2951,7 +2951,7 @@ mark_elimination (int from, int to)
   basic_block bb;
   bitmap r;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       r = DF_LR_IN (bb);
       if (bitmap_bit_p (r, from))
@@ -3473,7 +3473,7 @@ update_equiv_regs (void)
      paradoxical subreg. Don't set such reg sequivalent to a mem,
      because lra will not substitute such equiv memory in order to
      prevent access beyond allocated memory for paradoxical memory subreg.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       if (NONDEBUG_INSN_P (insn))
 	for_each_rtx (&insn, set_paradoxical_subreg, (void *) pdx_subregs);
@@ -3481,7 +3481,7 @@ update_equiv_regs (void)
   /* Scan the insns and find which registers have equivalences.  Do this
      in a separate scan of the insns because (due to -fcse-follow-jumps)
      a register can be set below its use.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       loop_depth = bb_loop_depth (bb);
 
@@ -3905,7 +3905,7 @@ update_equiv_regs (void)
 
   if (!bitmap_empty_p (cleared_regs))
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
 	  bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
@@ -4532,7 +4532,7 @@ find_moveable_pseudos (void)
   bitmap_initialize (&used, 0);
   bitmap_initialize (&set, 0);
   bitmap_initialize (&unusable_as_input, 0);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       bitmap transp = bb_transp_live + bb->index;
@@ -4595,7 +4595,7 @@ find_moveable_pseudos (void)
   bitmap_clear (&used);
   bitmap_clear (&set);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bitmap local = bb_local + bb->index;
       rtx insn;
@@ -4824,7 +4824,7 @@ find_moveable_pseudos (void)
 	}
     }
   
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bitmap_clear (bb_local + bb->index);
       bitmap_clear (bb_transp_live + bb->index);
@@ -4921,7 +4921,7 @@ split_live_ranges_for_shrink_wrap (void)
   bitmap_initialize (&reachable, 0);
   queue.create (n_basic_blocks_for_fn (cfun));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       if (CALL_P (insn) && !SIBLING_CALL_P (insn))
 	{
@@ -5145,7 +5145,7 @@ allocate_initial_values (void)
 		     fixed regs are accepted.  */
 		  SET_REGNO (preg, new_regno);
 		  /* Update global register liveness information.  */
-		  FOR_EACH_BB (bb)
+		  FOR_EACH_BB_FN (bb, cfun)
 		    {
 		      if (REGNO_REG_SET_P (df_get_live_in (bb), regno))
 			SET_REGNO_REG_SET (df_get_live_in (bb), new_regno);
diff --git a/gcc/jump.c b/gcc/jump.c
index a27aaa9..5eefeef 100644
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -275,7 +275,7 @@ mark_all_labels (rtx f)
   if (current_ir_type () == IR_RTL_CFGLAYOUT)
     {
       basic_block bb;
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  /* In cfglayout mode, we don't bother with trivial next-insn
 	     propagation of LABEL_REFs into JUMP_LABEL.  This will be
diff --git a/gcc/lcm.c b/gcc/lcm.c
index 1129d6c..0b528d9 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -281,7 +281,7 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
 
   /* Add all the blocks to the worklist.  This prevents an early exit from
      the loop given our optimistic initialization of LATER above.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       *qin++ = bb;
       bb->aux = bb;
@@ -350,7 +350,7 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
   int x;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_and_compl (del[bb->index], antloc[bb->index],
 			laterin[bb->index]);
 
@@ -497,7 +497,7 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
 
   /* Put every block on the worklist; this is necessary because of the
      optimistic initialization of AVOUT above.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       *qin++ = bb;
       bb->aux = bb;
@@ -638,7 +638,7 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
 
   /* Add all the blocks to the worklist.  This prevents an early exit
      from the loop given our optimistic initialization of NEARER.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       *tos++ = bb;
       bb->aux = bb;
@@ -695,7 +695,7 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
   int x;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_and_compl (del[bb->index], st_avloc[bb->index],
 			nearerout[bb->index]);
 
diff --git a/gcc/loop-init.c b/gcc/loop-init.c
index 664b1ac..3dc6953 100644
--- a/gcc/loop-init.c
+++ b/gcc/loop-init.c
@@ -213,7 +213,7 @@ fix_loop_structure (bitmap changed_bbs)
   /* Remember the depth of the blocks in the loop hierarchy, so that we can
      recognize blocks whose loop nesting relationship has changed.  */
   if (changed_bbs)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       bb->aux = (void *) (size_t) loop_depth (bb->loop_father);
 
   /* Remove the dead loops from structures.  We start from the innermost
@@ -256,7 +256,7 @@ fix_loop_structure (bitmap changed_bbs)
   /* Mark the blocks whose loop has changed.  */
   if (changed_bbs)
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  if ((void *) (size_t) loop_depth (bb->loop_father) != bb->aux)
 	    bitmap_set_bit (changed_bbs, bb->index);
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index 9f1fc07..f47bd50 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -1825,7 +1825,7 @@ calculate_loop_reg_pressure (void)
       }
   ira_setup_eliminable_regset ();
   bitmap_initialize (&curr_regs_live, &reg_obstack);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       curr_loop = bb->loop_father;
       if (curr_loop == current_loops->tree_root)
diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c
index 60c47b9..0b0e397 100644
--- a/gcc/lower-subreg.c
+++ b/gcc/lower-subreg.c
@@ -1463,7 +1463,7 @@ decompose_multiword_subregs (bool decompose_copies)
   memset (reg_copy_graph.address (), 0, sizeof (bitmap) * max);
 
   speed_p = optimize_function_for_speed_p (cfun);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
 
@@ -1543,7 +1543,7 @@ decompose_multiword_subregs (bool decompose_copies)
       EXECUTE_IF_SET_IN_BITMAP (decomposable_context, 0, regno, iter)
 	decompose_register (regno);
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  rtx insn;
 
diff --git a/gcc/lra-assigns.c b/gcc/lra-assigns.c
index 88fc693..41ee286 100644
--- a/gcc/lra-assigns.c
+++ b/gcc/lra-assigns.c
@@ -1302,7 +1302,7 @@ assign_by_spills (void)
 
       /* FIXME: Look up the changed insns in the cached LRA insn data using
 	 an EXECUTE_IF_SET_IN_BITMAP over changed_insns.  */
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	FOR_BB_INSNS (bb, insn)
 	if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
 	  {
diff --git a/gcc/lra-coalesce.c b/gcc/lra-coalesce.c
index 859e02f..94a21f0 100644
--- a/gcc/lra-coalesce.c
+++ b/gcc/lra-coalesce.c
@@ -239,7 +239,7 @@ lra_coalesce (void)
   mv_num = 0;
   /* Collect moves.  */
   coalesced_moves = 0;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS_SAFE (bb, insn, next)
 	if (INSN_P (insn)
@@ -297,7 +297,7 @@ lra_coalesce (void)
 	}
     }
   bitmap_initialize (&used_pseudos_bitmap, &reg_obstack);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       update_live_info (df_get_live_in (bb));
       update_live_info (df_get_live_out (bb));
diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
index bb5242a..f04166c 100644
--- a/gcc/lra-constraints.c
+++ b/gcc/lra-constraints.c
@@ -5300,7 +5300,7 @@ lra_inheritance (void)
   bitmap_initialize (&live_regs, &reg_obstack);
   bitmap_initialize (&temp_bitmap, &reg_obstack);
   bitmap_initialize (&ebb_global_regs, &reg_obstack);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       start_bb = bb;
       if (lra_dump_file != NULL)
@@ -5401,7 +5401,7 @@ remove_inheritance_pseudos (bitmap remove_pseudos)
      because we need to marks insns affected by previous
      inheritance/split pass for processing by the subsequent
      constraint pass.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       fix_bb_live_info (df_get_live_in (bb), remove_pseudos);
       fix_bb_live_info (df_get_live_out (bb), remove_pseudos);
diff --git a/gcc/lra-eliminations.c b/gcc/lra-eliminations.c
index 915e3a0..6c52bb3 100644
--- a/gcc/lra-eliminations.c
+++ b/gcc/lra-eliminations.c
@@ -1284,7 +1284,7 @@ init_elimination (void)
   struct elim_table *ep;
 
   init_elim_table ();
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       curr_sp_change = 0;
       stop_to_sp_elimination_p = false;
diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c
index 6bebb92..1e5f52b 100644
--- a/gcc/lra-spills.c
+++ b/gcc/lra-spills.c
@@ -280,7 +280,7 @@ assign_spill_hard_regs (int *pseudo_regnos, int n)
 	  add_to_hard_reg_set (&reserved_hard_regs[p],
 			       lra_reg_info[i].biggest_mode, hard_regno);
   bitmap_initialize (&ok_insn_bitmap, &reg_obstack);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       if (DEBUG_INSN_P (insn)
 	  || ((set = single_set (insn)) != NULL_RTX
@@ -478,7 +478,7 @@ spill_pseudos (void)
 	  bitmap_ior_into (&changed_insns, &lra_reg_info[i].insn_bitmap);
 	}
     }
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS (bb, insn)
 	if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
@@ -686,7 +686,7 @@ lra_final_code_change (void)
     if (lra_reg_info[i].nrefs != 0
 	&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
       SET_REGNO (regno_reg_rtx[i], hard_regno);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS_SAFE (bb, insn, curr)
       if (INSN_P (insn))
 	{
diff --git a/gcc/lra.c b/gcc/lra.c
index 50a0786..21b8af1 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -1960,7 +1960,7 @@ remove_scratches (void)
   scratches.create (get_max_uid ());
   bitmap_initialize (&scratch_bitmap, &reg_obstack);
   bitmap_initialize (&scratch_operand_bitmap, &reg_obstack);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
     if (INSN_P (insn))
       {
@@ -2049,7 +2049,7 @@ check_rtl (bool final_p)
   rtx insn;
 
   lra_assert (! final_p || reload_completed);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
     if (NONDEBUG_INSN_P (insn)
 	&& GET_CODE (PATTERN (insn)) != USE
@@ -2090,7 +2090,7 @@ has_nonexceptional_receiver (void)
   /* First determine which blocks can reach exit via normal paths.  */
   tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->flags &= ~BB_REACHABLE;
 
   /* Place the exit block on our worklist.  */
@@ -2165,7 +2165,7 @@ update_inc_notes (void)
   basic_block bb;
   rtx insn;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
     if (NONDEBUG_INSN_P (insn))
       {
diff --git a/gcc/mcf.c b/gcc/mcf.c
index e709f2a..f9b5505 100644
--- a/gcc/mcf.c
+++ b/gcc/mcf.c
@@ -1281,7 +1281,7 @@ adjust_cfg_counts (fixup_graph_type *fixup_graph)
     {
       fprintf (dump_file, "\nCheck %s() CFG flow conservation:\n",
 	       current_function_name ());
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
         {
           if ((bb->count != sum_edge_counts (bb->preds))
                || (bb->count != sum_edge_counts (bb->succs)))
diff --git a/gcc/mode-switching.c b/gcc/mode-switching.c
index a9e5069..4e31d68 100644
--- a/gcc/mode-switching.c
+++ b/gcc/mode-switching.c
@@ -516,7 +516,7 @@ optimize_mode_switching (void)
       /* Determine what the first use (if any) need for a mode of entity E is.
 	 This will be the mode that is anticipatable for this block.
 	 Also compute the initial transparency settings.  */
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  struct seginfo *ptr;
 	  int last_mode = no_mode;
@@ -624,7 +624,7 @@ optimize_mode_switching (void)
 	  int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
 	  struct bb_info *info = bb_info[j];
 
-	  FOR_EACH_BB (bb)
+	  FOR_EACH_BB_FN (bb, cfun)
 	    {
 	      if (info[bb->index].seginfo->mode == m)
 		bitmap_set_bit (antic[bb->index], j);
@@ -637,7 +637,7 @@ optimize_mode_switching (void)
       /* Calculate the optimal locations for the
 	 placement mode switches to modes with priority I.  */
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	bitmap_not (kill[bb->index], transp[bb->index]);
       edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
 				kill, &insert, &del);
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index f313044..ba8d020 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -3343,7 +3343,7 @@ rest_of_handle_sms (void)
   max_regno = max_reg_num ();
 
   /* Finalize layout changes.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
       bb->aux = bb->next_bb;
   free_dominance_info (CDI_DOMINATORS);
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index c929157..05fca40 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -4545,7 +4545,7 @@ optimize_omp_library_calls (gimple entry_stmt)
 		      && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
 					  OMP_CLAUSE_UNTIED) != NULL);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	gimple call = gsi_stmt (gsi);
@@ -4849,7 +4849,7 @@ expand_omp_taskreg (struct omp_region *region)
 	  basic_block bb;
 	  bool changed = false;
 
-	  FOR_EACH_BB (bb)
+	  FOR_EACH_BB_FN (bb, cfun)
 	    changed |= gimple_purge_dead_eh_edges (bb);
 	  if (changed)
 	    cleanup_tree_cfg ();
@@ -7939,7 +7939,7 @@ expand_omp_target (struct omp_region *region)
 	  basic_block bb;
 	  bool changed = false;
 
-	  FOR_EACH_BB (bb)
+	  FOR_EACH_BB_FN (bb, cfun)
 	    changed |= gimple_purge_dead_eh_edges (bb);
 	  if (changed)
 	    cleanup_tree_cfg ();
diff --git a/gcc/postreload-gcse.c b/gcc/postreload-gcse.c
index 9ce17e5..a1204f9 100644
--- a/gcc/postreload-gcse.c
+++ b/gcc/postreload-gcse.c
@@ -266,7 +266,7 @@ alloc_mem (void)
   /* Find the largest UID and create a mapping from UIDs to CUIDs.  */
   uid_cuid = XCNEWVEC (int, get_max_uid () + 1);
   i = 1;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       {
         if (INSN_P (insn))
@@ -828,7 +828,7 @@ compute_hash_table (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
 
diff --git a/gcc/postreload.c b/gcc/postreload.c
index b0c6342..bfa5a38 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -213,7 +213,7 @@ reload_cse_regs_1 (void)
   cselib_init (CSELIB_RECORD_MEMORY);
   init_alias_analysis ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       {
 	if (INSN_P (insn))
diff --git a/gcc/predict.c b/gcc/predict.c
index 6bb1b2c..78efb72 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1955,7 +1955,7 @@ strip_predict_hints (void)
   gimple ass_stmt;
   tree var;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator bi;
       for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
@@ -2226,7 +2226,7 @@ tree_bb_level_predictions (void)
 
   apply_return_prediction ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
@@ -2400,10 +2400,10 @@ tree_estimate_probability (void)
   if (number_of_loops (cfun) > 1)
     predict_loops ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     tree_estimate_probability_bb (bb);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     combine_predictions_for_bb (bb);
 
 #ifdef ENABLE_CHECKING
@@ -2928,7 +2928,7 @@ expensive_function_p (int threshold)
 
   /* Maximally BB_FREQ_MAX^2 so overflow won't happen.  */
   limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
 
@@ -2997,7 +2997,7 @@ estimate_bb_frequencies (bool force)
       estimate_loops ();
 
       memcpy (&freq_max, &real_zero, sizeof (real_zero));
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	if (sreal_compare (&freq_max, &BLOCK_INFO (bb)->frequency) < 0)
 	  memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
 
@@ -3055,7 +3055,7 @@ compute_function_frequency (void)
      functions to unlikely and that is most of what we care about.  */
   if (!cfun->after_inlining)
     node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (maybe_hot_bb_p (cfun, bb))
 	{
diff --git a/gcc/profile.c b/gcc/profile.c
index 24c16aa..62b126c 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -354,7 +354,7 @@ is_inconsistent (void)
 {
   basic_block bb;
   bool inconsistent = false;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       inconsistent |= is_edge_inconsistent (bb->preds);
       if (!dump_file && inconsistent)
@@ -692,7 +692,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
 
   /* If the graph has been correctly solved, every block will have a
      succ and pred count of zero.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
     }
@@ -1021,7 +1021,7 @@ branch_prob (void)
      We also add fake exit edges for each call and asm statement in the
      basic, since it may not return.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       int need_exit_edge = 0, need_entry_edge = 0;
       int have_exit_edge = 0, have_entry_edge = 0;
@@ -1260,7 +1260,7 @@ branch_prob (void)
       /* Initialize the output.  */
       output_location (NULL, 0, NULL, NULL);
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  gimple_stmt_iterator gsi;
 	  gcov_position_t offset = 0;
diff --git a/gcc/ree.c b/gcc/ree.c
index 87427fd..9938e98 100644
--- a/gcc/ree.c
+++ b/gcc/ree.c
@@ -835,7 +835,7 @@ find_removable_extensions (void)
   rtx insn, set;
   unsigned *def_map = XCNEWVEC (unsigned, max_insn_uid);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       {
 	if (!NONDEBUG_INSN_P (insn))
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index 6aad466..87b9821 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -2846,7 +2846,7 @@ compensate_edges (void)
 
   starting_stack_p = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
       {
         edge e;
@@ -3153,7 +3153,7 @@ convert_regs (void)
 
   /* ??? Process all unreachable blocks.  Though there's no excuse
      for keeping these even when not optimizing.  */
-  FOR_EACH_BB (b)
+  FOR_EACH_BB_FN (b, cfun)
     {
       block_info bi = BLOCK_INFO (b);
 
@@ -3212,7 +3212,7 @@ reg_to_stack (void)
 
   /* Set up block info for each basic block.  */
   alloc_aux_for_blocks (sizeof (struct block_info_def));
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       block_info bi = BLOCK_INFO (bb);
       edge_iterator ei;
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 0438875..3c9ef3d 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -1076,7 +1076,7 @@ copyprop_hardreg_forward (void)
       = create_alloc_pool ("debug insn changes pool",
 			   sizeof (struct queued_debug_insn_change), 256);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bitmap_set_bit (visited, bb->index);
 
@@ -1112,7 +1112,7 @@ copyprop_hardreg_forward (void)
 
   if (MAY_HAVE_DEBUG_INSNS)
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	if (bitmap_bit_p (visited, bb->index)
 	    && all_vd[bb->index].n_debug_insn_changes)
 	  {
diff --git a/gcc/reginfo.c b/gcc/reginfo.c
index db66a09..46288eb 100644
--- a/gcc/reginfo.c
+++ b/gcc/reginfo.c
@@ -1266,7 +1266,7 @@ init_subregs_of_mode (void)
   bitmap_obstack_initialize (&srom_obstack);
   subregs_of_mode = BITMAP_ALLOC (&srom_obstack);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     FOR_BB_INSNS (bb, insn)
       if (NONDEBUG_INSN_P (insn))
         find_subregs_of_mode (PATTERN (insn), subregs_of_mode);
diff --git a/gcc/regrename.c b/gcc/regrename.c
index 3c242fb..9ff94d0 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -674,7 +674,7 @@ regrename_analyze (bitmap bb_mask)
   /* Gather some information about the blocks in this function.  */
   rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
   i = 0;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       struct bb_rename_info *ri = rename_info + i;
       ri->bb = bb;
@@ -778,7 +778,7 @@ regrename_analyze (bitmap bb_mask)
      We perform the analysis for both incoming and outgoing edges, but we
      only need to merge once (in the second part, after verifying outgoing
      edges).  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
       unsigned j;
@@ -843,7 +843,7 @@ regrename_analyze (bitmap bb_mask)
 	    }
 	}
     }
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
       unsigned j;
@@ -920,7 +920,7 @@ regrename_analyze (bitmap bb_mask)
 
   free (rename_info);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->aux = NULL;
 }
 
diff --git a/gcc/regstat.c b/gcc/regstat.c
index 48d27c3..6a191d8 100644
--- a/gcc/regstat.c
+++ b/gcc/regstat.c
@@ -375,7 +375,7 @@ regstat_compute_ri (void)
   reg_info_p = XCNEWVEC (struct reg_info_t, max_regno);
   local_live_last_luid = XNEWVEC (int, max_regno);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       regstat_bb_compute_ri (bb->index, live, artificial_uses,
 			     local_live, local_processed,
@@ -522,7 +522,7 @@ regstat_compute_calls_crossed (void)
   reg_info_p_size = max_regno;
   reg_info_p = XCNEWVEC (struct reg_info_t, max_regno);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       regstat_bb_compute_calls_crossed (bb->index, live);
     }
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 15c6db5..47439ce 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -613,7 +613,7 @@ has_nonexceptional_receiver (void)
   /* First determine which blocks can reach exit via normal paths.  */
   tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->flags &= ~BB_REACHABLE;
 
   /* Place the exit block on our worklist.  */
@@ -641,7 +641,7 @@ has_nonexceptional_receiver (void)
 
   /* Now see if there's a reachable block with an exceptional incoming
      edge.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->flags & BB_REACHABLE && bb_has_abnormal_pred (bb))
       return true;
 
@@ -1048,7 +1048,7 @@ reload (rtx first, int global)
      pseudo.  */
 
   if (! frame_pointer_needed)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       bitmap_clear_bit (df_get_live_in (bb), HARD_FRAME_POINTER_REGNUM);
 
   /* Come here (with failure set nonzero) if we can't get enough spill
@@ -1592,7 +1592,7 @@ calculate_elim_costs_all_insns (void)
   set_initial_elim_offsets ();
   set_initial_label_offsets ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       elim_bb = bb;
diff --git a/gcc/resource.c b/gcc/resource.c
index 861d969..442c852 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -1219,7 +1219,7 @@ init_resource_info (rtx epilogue_insn)
   bb_ticks = XCNEWVEC (int, last_basic_block_for_fn (cfun));
 
   /* Set the BLOCK_FOR_INSN of each label that starts a basic block.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (LABEL_P (BB_HEAD (bb)))
       BLOCK_FOR_INSN (BB_HEAD (bb)) = bb;
 }
@@ -1258,7 +1258,7 @@ free_resource_info (void)
       bb_ticks = NULL;
     }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (LABEL_P (BB_HEAD (bb)))
       BLOCK_FOR_INSN (BB_HEAD (bb)) = NULL;
 }
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 73af0a7..d4baec5 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -637,7 +637,7 @@ schedule_ebbs (void)
   schedule_ebbs_init ();
 
   /* Schedule every region in the subroutine.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx head = BB_HEAD (bb);
 
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index a85ee5b..7fa9759 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -272,7 +272,7 @@ is_cfg_nonregular (void)
 
   /* If we have insns which refer to labels as non-jumped-to operands,
      then we consider the cfg not well structured.  */
-  FOR_EACH_BB (b)
+  FOR_EACH_BB_FN (b, cfun)
     FOR_BB_INSNS (b, insn)
       {
 	rtx note, next, set, dest;
@@ -317,7 +317,7 @@ is_cfg_nonregular (void)
      Unreachable loops with a single block are detected here.  This
      test is redundant with the one in find_rgns, but it's much
      cheaper to go ahead and catch the trivial case here.  */
-  FOR_EACH_BB (b)
+  FOR_EACH_BB_FN (b, cfun)
     {
       if (EDGE_COUNT (b->preds) == 0
 	  || (single_pred_p (b)
@@ -479,7 +479,7 @@ find_single_block_region (bool ebbs_p)
       probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
     probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
 
-    FOR_EACH_BB (ebb_start)
+    FOR_EACH_BB_FN (ebb_start, cfun)
       {
         RGN_NR_BLOCKS (nr_regions) = 0;
         RGN_BLOCKS (nr_regions) = i;
@@ -512,7 +512,7 @@ find_single_block_region (bool ebbs_p)
       }
   }
   else
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       {
         rgn_bb_table[nr_regions] = bb->index;
         RGN_NR_BLOCKS (nr_regions) = 1;
@@ -762,7 +762,7 @@ haifa_find_rgns (void)
      the entry node by placing a nonzero value in dfs_nr.  Thus if
      dfs_nr is zero for any block, then it must be unreachable.  */
   unreachable = 0;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (dfs_nr[bb->index] == 0)
       {
 	unreachable = 1;
@@ -773,7 +773,7 @@ haifa_find_rgns (void)
      to hold degree counts.  */
   degree = dfs_nr;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     degree[bb->index] = EDGE_COUNT (bb->preds);
 
   /* Do not perform region scheduling if there are any unreachable
@@ -807,7 +807,7 @@ haifa_find_rgns (void)
 
       /* Find blocks which are inner loop headers.  We still have non-reducible
 	 loops to consider at this point.  */
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
 	    {
@@ -826,7 +826,7 @@ haifa_find_rgns (void)
 		 If there exists a block that is not dominated by the loop
 		 header, then the block is reachable from outside the loop
 		 and thus the loop is not a natural loop.  */
-	      FOR_EACH_BB (jbb)
+	      FOR_EACH_BB_FN (jbb, cfun)
 		{
 		  /* First identify blocks in the loop, except for the loop
 		     entry block.  */
@@ -874,7 +874,7 @@ haifa_find_rgns (void)
 		 Place those blocks into the queue.  */
 	      if (no_loops)
 		{
-		  FOR_EACH_BB (jbb)
+		  FOR_EACH_BB_FN (jbb, cfun)
 		    /* Leaf nodes have only a single successor which must
 		       be EXIT_BLOCK.  */
 		    if (single_succ_p (jbb)
@@ -1052,7 +1052,7 @@ haifa_find_rgns (void)
 
   /* Any block that did not end up in a region is placed into a region
      by itself.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (degree[bb->index] >= 0)
       {
 	rgn_bb_table[idx] = bb->index;
@@ -3281,7 +3281,7 @@ sched_rgn_local_init (int rgn)
 
       /* Use ->aux to implement EDGE_TO_BIT mapping.  */
       rgn_nr_edges = 0;
-      FOR_EACH_BB (block)
+      FOR_EACH_BB_FN (block, cfun)
 	{
 	  if (CONTAINING_RGN (block->index) != rgn)
 	    continue;
@@ -3291,7 +3291,7 @@ sched_rgn_local_init (int rgn)
 
       rgn_edges = XNEWVEC (edge, rgn_nr_edges);
       rgn_nr_edges = 0;
-      FOR_EACH_BB (block)
+      FOR_EACH_BB_FN (block, cfun)
 	{
 	  if (CONTAINING_RGN (block->index) != rgn)
 	    continue;
@@ -3312,7 +3312,7 @@ sched_rgn_local_init (int rgn)
       /* Cleanup ->aux used for EDGE_TO_BIT mapping.  */
       /* We don't need them anymore.  But we want to avoid duplication of
 	 aux fields in the newly created edges.  */
-      FOR_EACH_BB (block)
+      FOR_EACH_BB_FN (block, cfun)
 	{
 	  if (CONTAINING_RGN (block->index) != rgn)
 	    continue;
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index 347b5eb..2e46770 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -750,7 +750,7 @@ sel_dump_cfg_2 (FILE *f, int flags)
   if (flags & SEL_DUMP_CFG_FUNCTION_NAME)
     fprintf (f, "function [label = \"%s\"];\n", current_function_name ());
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       insn_t insn = BB_HEAD (bb);
       insn_t next_tail = NEXT_INSN (BB_END (bb));
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index f7cc9ec..942d909 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -4321,7 +4321,7 @@ init_lv_sets (void)
   basic_block bb;
 
   /* Initialize of LV sets.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     init_lv_set (bb);
 
   /* Don't forget EXIT_BLOCK.  */
@@ -4349,7 +4349,7 @@ free_lv_sets (void)
   free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
 
   /* Free LV sets.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (BB_LV_SET (bb))
       free_lv_set (bb);
 }
@@ -6155,7 +6155,7 @@ make_regions_from_the_rest (void)
   for (i = 0; i < last_basic_block_for_fn (cfun); i++)
     loop_hdr[i] = -1;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (bb->loop_father && !bb->loop_father->num == 0
 	  && !(bb->flags & BB_IRREDUCIBLE_LOOP))
@@ -6165,7 +6165,7 @@ make_regions_from_the_rest (void)
   /* For each basic block degree is calculated as the number of incoming
      edges, that are going out of bbs that are not yet scheduled.
      The basic blocks that are scheduled have degree value of zero.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       degree[bb->index] = 0;
 
@@ -6183,7 +6183,7 @@ make_regions_from_the_rest (void)
 
   /* Any block that did not end up in a region is placed into a region
      by itself.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (degree[bb->index] >= 0)
       {
 	rgn_bb_table[cur_rgn_blocks] = bb->index;
diff --git a/gcc/sese.c b/gcc/sese.c
index 7e59ac8..5e47ef7 100644
--- a/gcc/sese.c
+++ b/gcc/sese.c
@@ -156,7 +156,7 @@ build_sese_loop_nests (sese region)
   basic_block bb;
   struct loop *loop0, *loop1;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb_in_sese_p (bb, region))
       {
 	struct loop *loop = bb->loop_father;
@@ -303,10 +303,10 @@ sese_build_liveouts (sese region, bitmap liveouts)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     sese_build_liveouts_bb (region, liveouts, bb);
   if (MAY_HAVE_DEBUG_STMTS)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       sese_reset_debug_liveouts_bb (region, liveouts, bb);
 }
 
diff --git a/gcc/stack-ptr-mod.c b/gcc/stack-ptr-mod.c
index 68ccd16..acca801 100644
--- a/gcc/stack-ptr-mod.c
+++ b/gcc/stack-ptr-mod.c
@@ -58,7 +58,7 @@ notice_stack_pointer_modification (void)
      been used.  */
   crtl->sp_is_unchanging = !cfun->calls_alloca;
   if (crtl->sp_is_unchanging)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       FOR_BB_INSNS (bb, insn)
         {
 	  if (INSN_P (insn))
diff --git a/gcc/store-motion.c b/gcc/store-motion.c
index 808b0a7..57c991a 100644
--- a/gcc/store-motion.c
+++ b/gcc/store-motion.c
@@ -656,7 +656,7 @@ compute_store_table (void)
   already_set = XNEWVEC (int, max_gcse_regno);
 
   /* Find all the stores we care about.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* First compute the registers set in this block.  */
       FOR_BB_INSNS (bb, insn)
@@ -1061,7 +1061,7 @@ build_store_vectors (void)
   bitmap_vector_clear (st_transp, last_basic_block_for_fn (cfun));
   regs_set_in_block = XNEWVEC (int, max_gcse_regno);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       memset (regs_set_in_block, 0, sizeof (int) * max_gcse_regno);
 
@@ -1188,7 +1188,7 @@ one_store_motion_pass (void)
 
       /* Now we want to insert the new stores which are going to be needed.  */
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	if (bitmap_bit_p (st_delete_map[bb->index], ptr->index))
 	  {
 	    delete_store (ptr, bb);
diff --git a/gcc/testsuite/g++.dg/plugin/selfassign.c b/gcc/testsuite/g++.dg/plugin/selfassign.c
index be5a204..041f25d 100644
--- a/gcc/testsuite/g++.dg/plugin/selfassign.c
+++ b/gcc/testsuite/g++.dg/plugin/selfassign.c
@@ -261,7 +261,7 @@ execute_warn_self_assign (void)
   gimple_stmt_iterator gsi;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
         warn_self_assign (gsi_stmt (gsi));
diff --git a/gcc/testsuite/gcc.dg/plugin/selfassign.c b/gcc/testsuite/gcc.dg/plugin/selfassign.c
index be5a204..041f25d 100644
--- a/gcc/testsuite/gcc.dg/plugin/selfassign.c
+++ b/gcc/testsuite/gcc.dg/plugin/selfassign.c
@@ -261,7 +261,7 @@ execute_warn_self_assign (void)
   gimple_stmt_iterator gsi;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
         warn_self_assign (gsi_stmt (gsi));
diff --git a/gcc/tracer.c b/gcc/tracer.c
index de6877a..a40cbeb 100644
--- a/gcc/tracer.c
+++ b/gcc/tracer.c
@@ -256,7 +256,7 @@ tail_duplicate (void)
   branch_ratio_cutoff =
     (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       int n = count_insns (bb);
       if (!ignore_bb_p (bb))
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
index 2a6597d..c9af680 100644
--- a/gcc/trans-mem.c
+++ b/gcc/trans-mem.c
@@ -2656,7 +2656,7 @@ compute_transaction_bits (void)
      certainly don't need it to calculate CDI_DOMINATOR info.  */
   gate_tm_init ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bb->flags &= ~BB_IN_TRANSACTION;
 
   for (region = all_tm_regions; region; region = region->next)
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index 19402e3..32d0d5a 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -876,7 +876,7 @@ tree_call_cdce (void)
   gimple_stmt_iterator i;
   bool something_changed = false;
   auto_vec<gimple> cond_dead_built_in_calls;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Collect dead call candidates.  */
       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index ec365b5..98434ac 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -302,7 +302,7 @@ replace_loop_annotate ()
     }
 
   /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gsi = gsi_last_bb (bb);
       stmt = gsi_stmt (gsi);
@@ -456,7 +456,7 @@ factor_computed_gotos (void)
      Examine the last statement in each basic block to see if the block
      ends with a computed goto.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi = gsi_last_bb (bb);
       gimple last;
@@ -635,7 +635,7 @@ fold_cond_expr_cond (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple stmt = last_stmt (bb);
 
@@ -682,7 +682,7 @@ make_edges (void)
 	     EDGE_FALLTHRU);
 
   /* Traverse the basic block array placing edges.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple last = last_stmt (bb);
       bool fallthru;
@@ -836,7 +836,7 @@ assign_discriminators (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       edge_iterator ei;
@@ -1055,7 +1055,7 @@ make_abnormal_goto_edges (basic_block bb, bool for_call)
   basic_block target_bb;
   gimple_stmt_iterator gsi;
 
-  FOR_EACH_BB (target_bb)
+  FOR_EACH_BB_FN (target_bb, cfun)
     {
       for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -1235,7 +1235,7 @@ cleanup_dead_labels (void)
 
   /* Find a suitable label for each block.  We use the first user-defined
      label if there is one, or otherwise just the first label we see.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
@@ -1271,7 +1271,7 @@ cleanup_dead_labels (void)
 
   /* Now redirect all jumps/branches to the selected label.
      First do so for each block ending in a control statement.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple stmt = last_stmt (bb);
       tree label, new_label;
@@ -1363,7 +1363,7 @@ cleanup_dead_labels (void)
   /* Finally, purge dead labels.  All user-defined labels and labels that
      can be the target of non-local gotos and labels which have their
      address taken are preserved.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
       tree label_for_this_bb = label_for_bb[bb->index].label;
@@ -1487,7 +1487,7 @@ group_case_labels (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple stmt = last_stmt (bb);
       if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
@@ -2160,7 +2160,7 @@ dump_cfg_stats (FILE *file)
 	   SCALE (size), LABEL (size));
 
   num_edges = 0;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     num_edges += EDGE_COUNT (bb->succs);
   size = num_edges * sizeof (struct edge_def);
   total += size;
@@ -4894,7 +4894,7 @@ gimple_verify_flow_info (void)
 	err = 1;
       }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bool found_ctrl_stmt = false;
 
@@ -7241,7 +7241,7 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
   if (verbosity >= 1)
     {
       fprintf (file, "%s{\n", s_indent);
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	if (bb->loop_father == loop)
 	  print_loops_bb (file, bb, indent, verbosity);
 
@@ -8331,7 +8331,7 @@ execute_fixup_cfg (void)
   FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
     e->count = apply_scale (e->count, count_scale);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bb->count = apply_scale (bb->count, count_scale);
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index 50b4a68..949b21d 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -640,7 +640,7 @@ cleanup_tree_cfg_1 (void)
      recording of edge to CASE_LABEL_EXPR.  */
   start_recording_case_labels ();
 
-  /* Start by iterating over all basic blocks.  We cannot use FOR_EACH_BB,
+  /* Start by iterating over all basic blocks.  We cannot use FOR_EACH_BB_FN,
      since the basic blocks may get removed.  */
   n = last_basic_block_for_fn (cfun);
   for (i = NUM_FIXED_BLOCKS; i < n; i++)
@@ -918,7 +918,7 @@ merge_phi_nodes (void)
   calculate_dominance_info (CDI_DOMINATORS);
 
   /* Find all PHI nodes that we may be able to merge.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       basic_block dest;
 
diff --git a/gcc/tree-complex.c b/gcc/tree-complex.c
index ff5ccab..8c9a3aa 100644
--- a/gcc/tree-complex.c
+++ b/gcc/tree-complex.c
@@ -207,7 +207,7 @@ init_dont_simulate_again (void)
   gimple phi;
   bool saw_a_complex_op = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -1637,7 +1637,7 @@ tree_lower_complex (void)
 
   /* ??? Ideally we'd traverse the blocks in breadth-first order.  */
   old_last_basic_block = last_basic_block_for_fn (cfun);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       if (bb->index >= old_last_basic_block)
 	continue;
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index 27d6a71..2d964d5 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -279,7 +279,7 @@ collect_dfa_stats (struct dfa_stats_d *dfa_stats_p ATTRIBUTE_UNUSED)
   memset ((void *)dfa_stats_p, 0, sizeof (struct dfa_stats_d));
 
   /* Walk all the statements in the function counting references.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator si;
 
@@ -741,7 +741,7 @@ dump_enumerated_decls (FILE *file, int flags)
 
   memset (&wi, '\0', sizeof (wi));
   wi.info = (void *) &decl_list;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 85dc79f..467eb20 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -3304,7 +3304,7 @@ execute_lower_resx (void)
 
   mnt_map = pointer_map_create ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple last = last_stmt (bb);
       if (last && is_gimple_resx (last))
@@ -3710,7 +3710,7 @@ execute_lower_eh_dispatch (void)
 
   assign_filter_values ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple last = last_stmt (bb);
       if (last == NULL)
@@ -3810,7 +3810,7 @@ mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
   else
     lp_reachable = NULL;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
diff --git a/gcc/tree-emutls.c b/gcc/tree-emutls.c
index 9ba25fc..32599eb 100644
--- a/gcc/tree-emutls.c
+++ b/gcc/tree-emutls.c
@@ -638,7 +638,7 @@ lower_emutls_function_body (struct cgraph_node *node)
      create a node for it.  */
   d.builtin_node = cgraph_get_create_node (d.builtin_decl);
 
-  FOR_EACH_BB (d.bb)
+  FOR_EACH_BB_FN (d.bb, cfun)
     {
       gimple_stmt_iterator gsi;
       unsigned int i, nedge;
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 7f6a150..71a25f1 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -1815,7 +1815,7 @@ main_tree_if_conversion (void)
 #ifdef ENABLE_CHECKING
   {
     basic_block bb;
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       gcc_assert (!bb->aux);
   }
 #endif
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index ed06cb9..ab8e40b 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -4569,7 +4569,7 @@ optimize_inline_calls (tree fn)
      will split id->current_basic_block, and the new blocks will
      follow it; we'll trudge through them, processing their CALL_EXPRs
      along the way.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     inlined_p |= gimple_expand_calls_inline (bb, &id);
 
   pop_gimplify_context (NULL);
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index b6d3dd7..8e539f2 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -2320,7 +2320,7 @@ rewrite_into_ssa (void)
 
   /* Initialize dominance frontier.  */
   dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_initialize (&dfs[bb->index], &bitmap_default_obstack);
 
   /* 1- Compute dominance frontiers.  */
@@ -2337,7 +2337,7 @@ rewrite_into_ssa (void)
   rewrite_blocks (ENTRY_BLOCK_PTR_FOR_FN (cfun), REWRITE_ALL);
 
   /* Free allocated memory.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_clear (&dfs[bb->index]);
   free (dfs);
 
@@ -3270,7 +3270,7 @@ update_ssa (unsigned update_flags)
       /* If the caller requested PHI nodes to be added, compute
 	 dominance frontiers.  */
       dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	bitmap_initialize (&dfs[bb->index], &bitmap_default_obstack);
       compute_dominance_frontiers (dfs);
 
@@ -3296,7 +3296,7 @@ update_ssa (unsigned update_flags)
 	insert_updated_phi_nodes_for (sym, dfs, blocks_to_update,
 	                              update_flags);
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	bitmap_clear (&dfs[bb->index]);
       free (dfs);
 
diff --git a/gcc/tree-nrv.c b/gcc/tree-nrv.c
index b42993d..e00463d 100644
--- a/gcc/tree-nrv.c
+++ b/gcc/tree-nrv.c
@@ -144,7 +144,7 @@ tree_nrv (void)
     return 0;
 
   /* Look through each block for assignments to the RESULT_DECL.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -238,7 +238,7 @@ tree_nrv (void)
      RESULT.  */
   data.var = found;
   data.result = result;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
 	{
@@ -358,7 +358,7 @@ execute_return_slot_opt (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 6a587e1..c83345f 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -1211,7 +1211,7 @@ static unsigned int
 compute_object_sizes (void)
 {
   basic_block bb;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c
index 8df3026..c5bba789 100644
--- a/gcc/tree-outof-ssa.c
+++ b/gcc/tree-outof-ssa.c
@@ -835,7 +835,7 @@ eliminate_useless_phis (void)
   gimple_stmt_iterator gsi;
   tree result;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
         {
@@ -893,7 +893,7 @@ rewrite_trees (var_map map ATTRIBUTE_UNUSED)
   /* Search for PHIs where the destination has no partition, but one
      or more arguments has a partition.  This should not happen and can
      create incorrect code.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -1101,7 +1101,7 @@ insert_backedge_copies (void)
 
   mark_dfs_back_edges ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Mark block as possibly needing calculation of UIDs.  */
       bb->aux = &bb->aux;
diff --git a/gcc/tree-profile.c b/gcc/tree-profile.c
index 537c246..51e997c 100644
--- a/gcc/tree-profile.c
+++ b/gcc/tree-profile.c
@@ -637,7 +637,7 @@ tree_profiling (void)
 
       push_cfun (DECL_STRUCT_FUNCTION (node->decl));
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  gimple_stmt_iterator gsi;
 	  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index ada942d..59e44cb 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -3276,7 +3276,7 @@ scev_const_prop (void)
   if (number_of_loops (cfun) <= 1)
     return 0;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       loop = bb->loop_father;
 
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 9aa526f..ebd4218 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -1252,7 +1252,7 @@ scan_function (void)
   basic_block bb;
   bool ret = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -3311,7 +3311,7 @@ sra_modify_function_body (void)
   bool cfg_changed = false;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi = gsi_start_bb (bb);
       while (!gsi_end_p (gsi))
@@ -3795,7 +3795,7 @@ propagate_dereference_distances (void)
 
   auto_vec<basic_block> queue (last_basic_block_for_fn (cfun));
   queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       queue.quick_push (bb);
       bb->aux = bb;
@@ -4572,7 +4572,7 @@ ipa_sra_modify_function_body (ipa_parm_adjustment_vec adjustments)
   bool cfg_changed = false;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
@@ -4811,7 +4811,7 @@ convert_callers (struct cgraph_node *node, tree old_decl,
   if (!encountered_recursive_call)
     return;
 
-  FOR_EACH_BB (this_block)
+  FOR_EACH_BB_FN (this_block, cfun)
     {
       gimple_stmt_iterator gsi;
 
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 3d05258..7e07771 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -774,7 +774,7 @@ ccp_initialize (void)
   const_val = XCNEWVEC (prop_value_t, n_const_val);
 
   /* Initialize simulation flags for PHI nodes and statements.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
@@ -808,7 +808,7 @@ ccp_initialize (void)
   /* Now process PHI nodes.  We never clear the simulate_again flag on
      phi nodes, since we do not know which edges are executable yet,
      except for phi nodes for virtual operands when we do not do store ccp.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
@@ -2508,7 +2508,7 @@ execute_fold_all_builtins (void)
   basic_block bb;
   unsigned int todoflags = 0;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
diff --git a/gcc/tree-ssa-coalesce.c b/gcc/tree-ssa-coalesce.c
index 70158d5..38a4078 100644
--- a/gcc/tree-ssa-coalesce.c
+++ b/gcc/tree-ssa-coalesce.c
@@ -821,7 +821,7 @@ build_ssa_conflict_graph (tree_live_info_p liveinfo)
 
   live = new_live_track (map);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
@@ -929,7 +929,7 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
 
   map = init_var_map (num_ssa_names);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       tree arg;
 
@@ -1183,7 +1183,7 @@ coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl,
      in the coalesce list because they do not need to be sorted, and simply
      consume extra memory/compilation time in large programs.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_EACH_EDGE (e, ei, bb->preds)
 	if (e->flags & EDGE_ABNORMAL)
diff --git a/gcc/tree-ssa-copy.c b/gcc/tree-ssa-copy.c
index 0dd5e14..3da262b 100644
--- a/gcc/tree-ssa-copy.c
+++ b/gcc/tree-ssa-copy.c
@@ -469,7 +469,7 @@ init_copy_prop (void)
   n_copy_of = num_ssa_names;
   copy_of = XCNEWVEC (prop_value_t, n_copy_of);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator si;
       int depth = bb_loop_depth (bb);
diff --git a/gcc/tree-ssa-copyrename.c b/gcc/tree-ssa-copyrename.c
index 90e070f..c7d514f 100644
--- a/gcc/tree-ssa-copyrename.c
+++ b/gcc/tree-ssa-copyrename.c
@@ -325,7 +325,7 @@ rename_ssa_copies (void)
 
   map = init_var_map (num_ssa_names);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Scan for real copies.  */
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -341,7 +341,7 @@ rename_ssa_copies (void)
 	}
     }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Treat PHI nodes as copies between the result and each argument.  */
       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index 701dd44..5abef5c 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -374,7 +374,7 @@ find_obviously_necessary_stmts (bool aggressive)
   gimple phi, stmt;
   int flags;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* PHI nodes are never inherently necessary.  */
       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -404,7 +404,7 @@ find_obviously_necessary_stmts (bool aggressive)
       struct loop *loop;
       scev_initialize ();
       if (mark_irreducible_loops ())
-	FOR_EACH_BB (bb)
+	FOR_EACH_BB_FN (bb, cfun)
 	  {
 	    edge_iterator ei;
 	    FOR_EACH_EDGE (e, ei, bb->succs)
@@ -1325,7 +1325,7 @@ eliminate_unnecessary_stmts (void)
 	    }
 	}
     }
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Remove dead PHI nodes.  */
       something_changed |= remove_dead_phis (bb);
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 6cf60be..2bd2a86 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -795,7 +795,7 @@ free_all_edge_infos (void)
   edge_iterator ei;
   edge e;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_EACH_EDGE (e, ei, bb->preds)
         {
@@ -866,7 +866,7 @@ tree_ssa_dominator_optimize (void)
   {
     gimple_stmt_iterator gsi;
     basic_block bb;
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       {
 	for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	  update_stmt_if_modified (gsi_stmt (gsi));
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 6e6d115..a77a639 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -3386,7 +3386,7 @@ ssa_forward_propagate_and_combine (void)
 
   cfg_changed = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index 6ccf2fb..da7198b 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -673,7 +673,7 @@ clear_unused_block_pointer (void)
   basic_block bb;
   gimple_stmt_iterator gsi;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	unsigned i;
@@ -791,7 +791,7 @@ remove_unused_locals (void)
   usedvars = BITMAP_ALLOC (NULL);
 
   /* Walk the CFG marking all referenced symbols.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       size_t i;
@@ -856,7 +856,7 @@ remove_unused_locals (void)
      ignores them, and the second pass (if there were any) tries to remove
      them.  */
   if (have_local_clobbers)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       {
 	gimple_stmt_iterator gsi;
 
@@ -963,11 +963,11 @@ new_tree_live_info (var_map map)
   live->num_blocks = last_basic_block_for_fn (cfun);
 
   live->livein = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_initialize (&live->livein[bb->index], &liveness_bitmap_obstack);
 
   live->liveout = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_initialize (&live->liveout[bb->index], &liveness_bitmap_obstack);
 
   live->work_stack = XNEWVEC (int, last_basic_block_for_fn (cfun));
@@ -1149,11 +1149,11 @@ calculate_live_on_exit (tree_live_info_p liveinfo)
   edge_iterator ei;
 
   /* live on entry calculations used liveout vectors for defs, clear them.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     bitmap_clear (&liveinfo->liveout[bb->index]);
 
   /* Set all the live-on-exit bits for uses in PHIs.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       size_t i;
@@ -1294,7 +1294,7 @@ dump_live_info (FILE *f, tree_live_info_p live, int flag)
 
   if ((flag & LIVEDUMP_ENTRY) && live->livein)
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  fprintf (f, "\nLive on entry to BB%d : ", bb->index);
 	  EXECUTE_IF_SET_IN_BITMAP (&live->livein[bb->index], 0, i, bi)
@@ -1308,7 +1308,7 @@ dump_live_info (FILE *f, tree_live_info_p live, int flag)
 
   if ((flag & LIVEDUMP_EXIT) && live->liveout)
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	{
 	  fprintf (f, "\nLive on exit from BB%d : ", bb->index);
 	  EXECUTE_IF_SET_IN_BITMAP (&live->liveout[bb->index], 0, i, bi)
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 3aaf2b2..cbcdc37 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1601,7 +1601,7 @@ analyze_memory_references (void)
      loops postorder.  */
   i = 0;
   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     if (bb->loop_father != current_loops->tree_root)
       bbs[i++] = bb;
   n = i;
@@ -2406,7 +2406,7 @@ fill_always_executed_in (void)
   struct loop *loop;
 
   bitmap_clear (contains_call);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@@ -2478,7 +2478,7 @@ tree_ssa_lim_finalize (void)
 
   free_aux_for_edges ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     SET_ALWAYS_EXECUTED_IN (bb, NULL);
 
   bitmap_obstack_release (&lim_bitmap_obstack);
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index 76d5958..ed30c7b0 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -463,7 +463,7 @@ find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis)
     EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi)
       find_uses_to_rename_bb (BASIC_BLOCK_FOR_FN (cfun, index), use_blocks, need_phis);
   else
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       find_uses_to_rename_bb (bb, use_blocks, need_phis);
 }
 
@@ -602,7 +602,7 @@ verify_loop_closed_ssa (bool verify_ssa_p)
 
   timevar_push (TV_VERIFY_LOOP_CLOSED);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
 	{
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index f77c016..1c89f45 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -527,7 +527,7 @@ execute_cse_reciprocals (void)
   calculate_dominance_info (CDI_POST_DOMINATORS);
 
 #ifdef ENABLE_CHECKING
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     gcc_assert (!bb->aux);
 #endif
 
@@ -540,7 +540,7 @@ execute_cse_reciprocals (void)
 	  execute_cse_reciprocals_1 (NULL, name);
       }
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       gimple phi;
@@ -1419,7 +1419,7 @@ execute_cse_sincos (void)
   calculate_dominance_info (CDI_DOMINATORS);
   memset (&sincos_stats, 0, sizeof (sincos_stats));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
       bool cleanup_eh = false;
@@ -1939,7 +1939,7 @@ execute_optimize_bswap (void)
 
   memset (&bswap_stats, 0, sizeof (bswap_stats));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
@@ -2785,7 +2785,7 @@ execute_optimize_widening_mul (void)
 
   memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c
index 55ae68b..f9f084b 100644
--- a/gcc/tree-ssa-propagate.c
+++ b/gcc/tree-ssa-propagate.c
@@ -1097,7 +1097,7 @@ substitute_and_fold (ssa_prop_get_value_fn get_value_fn,
       }
 
   /* Propagate into all uses and fold.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 16679f4..9ec1512 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -6778,7 +6778,7 @@ compute_points_to_sets (void)
   intra_create_variable_infos ();
 
   /* Now walk all statements and build the constraint set.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
@@ -6825,7 +6825,7 @@ compute_points_to_sets (void)
     }
 
   /* Compute the call-used/clobbered sets.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi;
 
diff --git a/gcc/tree-ssa-tail-merge.c b/gcc/tree-ssa-tail-merge.c
index a0eac67..4e05246 100644
--- a/gcc/tree-ssa-tail-merge.c
+++ b/gcc/tree-ssa-tail-merge.c
@@ -754,7 +754,7 @@ find_same_succ (void)
   same_succ same = same_succ_alloc ();
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       find_same_succ_bb (bb, &same);
       if (same == NULL)
@@ -1015,7 +1015,7 @@ reset_cluster_vectors (void)
   for (i = 0; i < all_clusters.length (); ++i)
     delete_cluster (all_clusters[i]);
   all_clusters.truncate (0);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     BB_CLUSTER (bb) = NULL;
 }
 
diff --git a/gcc/tree-ssa-ter.c b/gcc/tree-ssa-ter.c
index fa6a248..22ae47b 100644
--- a/gcc/tree-ssa-ter.c
+++ b/gcc/tree-ssa-ter.c
@@ -683,7 +683,7 @@ find_replaceable_exprs (var_map map)
 
   bitmap_obstack_initialize (&ter_bitmap_obstack);
   table = new_temp_expr_table (map);
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       find_replaceable_in_bb (table, bb);
       gcc_checking_assert (bitmap_empty_p (table->partition_in_use));
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index 9289c11..6f978e2 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -1631,7 +1631,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
      ahead and thread it, else ignore it.  */
   basic_block bb;
   edge e;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* If we do end up threading here, we can remove elements from
 	 BB->preds.  Thus we can not use the FOR_EACH_EDGE iterator.  */
diff --git a/gcc/tree-ssa-uncprop.c b/gcc/tree-ssa-uncprop.c
index d38e0dd..63a2e10 100644
--- a/gcc/tree-ssa-uncprop.c
+++ b/gcc/tree-ssa-uncprop.c
@@ -65,7 +65,7 @@ associate_equivalences_with_edges (void)
 
   /* Walk over each block.  If the block ends with a control statement,
      then it might create a useful equivalence.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator gsi = gsi_last_bb (bb);
       gimple stmt;
@@ -406,7 +406,7 @@ tree_ssa_uncprop (void)
   /* we just need to empty elements out of the hash table, and cleanup the
     AUX field on the edges.  */
   val_ssa_equiv.dispose ();
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       edge_iterator ei;
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index 4fd5fb8..c6b0a90 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -176,7 +176,7 @@ warn_uninitialized_vars (bool warn_possibly_uninitialized)
   gimple_stmt_iterator gsi;
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
 					     single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
@@ -2130,7 +2130,7 @@ execute_late_warn_uninitialized (void)
   added_to_worklist = pointer_set_create ();
 
   /* Initialize worklist  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
         gimple phi = gsi_stmt (gsi);
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index f1025b2..8c1aaf2 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -999,7 +999,7 @@ verify_ssa (bool check_modified_stmt)
 
   /* Now verify all the uses and make sure they agree with the definitions
      found in the previous pass.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge e;
       gimple phi;
@@ -1456,7 +1456,7 @@ execute_update_addresses_taken (void)
 
   /* Collect into ADDRESSES_TAKEN all variables whose address is taken within
      the function body.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -1558,7 +1558,7 @@ execute_update_addresses_taken (void)
      variables and operands need to be rewritten to expose bare symbols.  */
   if (!bitmap_empty_p (suitable_for_renaming))
     {
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
 	for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
 	  {
 	    gimple stmt = gsi_stmt (gsi);
diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c
index 8b168e0..dc82340 100644
--- a/gcc/tree-stdarg.c
+++ b/gcc/tree-stdarg.c
@@ -536,7 +536,7 @@ check_all_va_list_escapes (struct stdarg_info *si)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
@@ -703,7 +703,7 @@ execute_optimize_stdarg (void)
 			   || TREE_TYPE (cfun_va_list) == char_type_node);
   gcc_assert (is_gimple_reg_type (cfun_va_list) == va_list_simple_ptr);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
@@ -813,7 +813,7 @@ execute_optimize_stdarg (void)
   memset (&wi, 0, sizeof (wi));
   wi.info = si.va_list_vars;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index f6b17b8..efcc94d 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -1420,7 +1420,7 @@ do_switchconv (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
   {
     const char *failure_reason;
     gimple stmt = last_stmt (bb);
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index d55485d..098012c 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -1541,7 +1541,7 @@ expand_vector_operations (void)
   basic_block bb;
   bool cfg_changed = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index c11f8a8..e5d201f 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -157,7 +157,7 @@ adjust_simduid_builtins (hash_table <simduid_to_vf> &htab)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator i;
 
@@ -265,7 +265,7 @@ note_simd_array_uses (hash_table <simd_array_to_simduid> *htab)
   wi.info = &ns;
   ns.htab = htab;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	gimple stmt = gsi_stmt (gsi);
@@ -475,7 +475,7 @@ execute_vect_slp (void)
 
   init_stmt_vec_info_vec ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       vect_location = find_bb_location (bb);
 
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 06b6259..8ab6d76 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -6431,7 +6431,7 @@ check_all_array_refs (void)
   basic_block bb;
   gimple_stmt_iterator si;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       edge_iterator ei;
       edge e;
@@ -6593,7 +6593,7 @@ remove_range_assertions (void)
   /* Note that the BSI iterator bump happens at the bottom of the
      loop and no bump is necessary if we're removing the statement
      referenced by the current BSI.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
       {
 	gimple stmt = gsi_stmt (si);
@@ -6708,7 +6708,7 @@ vrp_initialize (void)
   vr_value = XCNEWVEC (value_range_t *, num_vr_values);
   vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple_stmt_iterator si;
 
@@ -9543,7 +9543,7 @@ identify_jump_threads (void)
      I doubt it's worth the effort for the classes of jump
      threading opportunities we are trying to identify at this
      point in compilation.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gimple last;
 
diff --git a/gcc/tsan.c b/gcc/tsan.c
index 4efcfe5..d12459f 100644
--- a/gcc/tsan.c
+++ b/gcc/tsan.c
@@ -640,7 +640,7 @@ instrument_memory_accesses (void)
   gimple_stmt_iterator gsi;
   bool fentry_exit_instrument = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       fentry_exit_instrument |= instrument_gimple (&gsi);
   return fentry_exit_instrument;
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index 846e884..51b4f8d 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -741,7 +741,7 @@ ubsan_pass (void)
   basic_block bb;
   gimple_stmt_iterator gsi;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
 	{
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index d509354..c684835 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -542,7 +542,7 @@ verify_histograms (void)
 
   error_found = false;
   visited_hists = pointer_set_create ();
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       {
 	gimple stmt = gsi_stmt (gsi);
@@ -648,7 +648,7 @@ gimple_value_profile_transformations (void)
   gimple_stmt_iterator gsi;
   bool changed = false;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
 	{
@@ -1944,7 +1944,7 @@ gimple_find_values_to_profile (histogram_values *values)
   histogram_value hist = NULL;
   values->create (0);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
       gimple_values_to_profile (gsi_stmt (gsi), values);
 
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 5bd0799..175ec01 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -6941,7 +6941,7 @@ vt_find_locations (void)
   in_pending = sbitmap_alloc (last_basic_block_for_fn (cfun));
   bitmap_clear (in_worklist);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     fibheap_insert (pending, bb_order[bb->index], bb);
   bitmap_ones (in_pending);
 
@@ -7101,7 +7101,7 @@ vt_find_locations (void)
     }
 
   if (success && MAY_HAVE_DEBUG_INSNS)
-    FOR_EACH_BB (bb)
+    FOR_EACH_BB_FN (bb, cfun)
       gcc_assert (VTI (bb)->flooded);
 
   free (bb_order);
@@ -7229,7 +7229,7 @@ dump_dataflow_sets (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       fprintf (dump_file, "\nBasic block %d:\n", bb->index);
       fprintf (dump_file, "IN:\n");
@@ -9402,7 +9402,7 @@ vt_emit_notes (void)
 
   /* Free memory occupied by the out hash tables, as they aren't used
      anymore.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     dataflow_set_clear (&VTI (bb)->out);
 
   /* Enable emitting notes by functions (mainly by set_variable_part and
@@ -9418,7 +9418,7 @@ vt_emit_notes (void)
 
   dataflow_set_init (&cur);
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       /* Emit the notes for changes of variable locations between two
 	 subsequent basic blocks.  */
@@ -9995,7 +9995,7 @@ vt_initialize (void)
 
   vt_add_function_parameters ();
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       rtx insn;
       HOST_WIDE_INT pre, post = 0;
@@ -10138,7 +10138,7 @@ delete_debug_insns (void)
   if (!MAY_HAVE_DEBUG_INSNS)
     return;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       FOR_BB_INSNS_SAFE (bb, insn, next)
 	if (DEBUG_INSN_P (insn))
@@ -10181,7 +10181,7 @@ vt_finalize (void)
 {
   basic_block bb;
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       VTI (bb)->mos.release ();
     }
-- 
1.7.11.7



More information about the Gcc-patches mailing list