+2015-10-27 Mikhail Maltsev <maltsevm@gmail.com>
+
+ * attribs.c (check_attribute_tables): New function, broken out from...
+ (init_attributes): Use it.
+ * cfgcleanup.c (try_optimize_cfg): Use flag_checking, CHECKING_P
+ gcc_checking_assert and checking_* functions to eliminate
+ ENABLE_CHECKING conditionals.
+ * cfgexpand.c (expand_goto, expand_debug_expr): Likewise.
+ (pass_expand::execute): Likewise.
+ * cgraphclones.c (symbol_table::materialize_all_clones): Likewise.
+ * cgraphunit.c (mark_functions_to_output): Likewise.
+ (cgraph_node::expand_thunk): Likewise.
+ (symbol_table::compile): Likewise.
+ * ddg.c (add_cross_iteration_register_deps): Likewise.
+ (create_ddg_all_sccs): Likewise.
+ * df-core.c (df_finish_pass, df_analyze): Likewise.
+ * diagnostic-core.h: Likewise.
+ * diagnostic.c (diagnostic_report_diagnostic): Likewise.
+ * dominance.c (calculate_dominance_info): Likewise.
+ * dwarf2out.c (add_AT_die_ref): Likewise.
+ (const_ok_for_output_1, mem_loc_descriptor): Likewise.
+ (loc_list_from_tree, gen_lexical_block_die): Likewise.
+ gen_type_die_with_usage, gen_type_die): Likewise.
+ (dwarf2out_decl): Likewise.
+ * emit-rtl.c (verify_rtx_sharing, reorder_insns_nobb): Likewise.
+ * except.c (duplicate_eh_regions): Likewise.
+ * fwprop.c (register_active_defs, update_df_init): Likewise.
+ (fwprop_init, fwprop_done): Likewise.
+ (update_uses): Likewise.
+ * ggc-page.c (ggc_grow): Likewise.
+ * gimplify.c (gimplify_body): Likewise.
+ (gimplify_hasher::equal): Likewise.
+ * graphite-isl-ast-to-gimple.c (graphite_verify): Likewise.
+ * graphite-scop-detection.c (canonicalize_loop_closed_ssa_form):
+ Likewise.
+ * graphite-sese-to-poly.c (rewrite_reductions_out_of_ssa): Likewise.
+ (rewrite_cross_bb_scalar_deps_out_of_ssa): Likwise.
+ * hash-table.h (::find_empty_slot_for_expand): Likewise.
+ * ifcvt.c (if_convert): Likewise.
+ * ipa-cp.c (ipcp_propagate_stage): Likewise.
+ * ipa-devirt.c (type_in_anonymous_namespace_p): Likewise.
+ (odr_type_p, odr_types_equivalent_p): Likewise.
+ (add_type_duplicate, get_odr_type): Likewise.
+ * ipa-icf.c (sem_item_optimizer::execute): Likewise.
+ (sem_item_optimizer::subdivide_classes_by_equality): Likewise.
+ (sem_item_optimizer::verify_classes): Likewise.
+ (sem_item_optimizer::traverse_congruence_split): Likewise.
+ (sem_item_optimizer::checking_verify_classes): New.
+ * ipa-icf.h (sem_item_optimizer::checking_verify_classes): Add new
+ method.
+ * cfgrtl.c (commit_edge_insertions): Likewise.
+ (fixup_reorder_chain, cfg_layout_finalize): Likewise.
+ (rtl_flow_call_edges_add): Likewise.
+ * cgraph.c (symbol_table::create_edge): Likewise.
+ (cgraph_edge::redirect_call_stmt_to_callee): Likewise.
+ * cgraph.h (symtab_node): Likewise.
+ (symtab_node::checking_verify_symtab_nodes): Define.
+ (cgraph_node::checking_verify_cgraph_nodes): Define.
+ * cfghooks.h (checking_verify_flow_info): Define.
+ * cfgloop.h (checking_verify_loop_structure): Define.
+ * dominance.h (checking_verify_dominators): Define.
+ * et-forest.c: Fix comment.
+ * ipa-inline-analysis.c (compute_inline_parameters): Use flag_checking,
+ CHECKING_P gcc_checking_assert and checking_* functions to eliminate
+ ENABLE_CHECKING conditionals.
+ * ipa-inline-transform.c (save_inline_function_body): Likewise.
+ * ipa-inline.c (inline_small_functions): Likewise.
+ (early_inliner): Likewise.
+ * ipa-inline.h (estimate_edge_growth): Likewise.
+ * ipa-visibility.c (function_and_variable_visibility): Likewise.
+ * ipa.c (symbol_table::remove_unreachable_nodes): Likewise.
+ (ipa_single_use): Likewise.
+ * ira-int.h: Likewise.
+ * ira.c (ira): Likewise.
+ * loop-doloop.c (doloop_optimize_loops): Likewise.
+ * loop-init.c (loop_optimizer_init, fix_loop_structure): Likewise.
+ * loop-invariant.c (move_loop_invariants): Likewise.
+ * lra-assigns.c (lra_assign): Likewise.
+ * lra-constraints.c (lra_constraints): Likewise.
+ * lra-eliminations.c (lra_eliminate): Likewise.
+ * lra-int.h (struct lra_reg): Likewise.
+ * lra-lives.c (check_pseudos_live_through_calls): Likewise.
+ (lra_create_live_ranges_1): Likewise.
+ * lra-remat.c (create_remat_bb_data): Likewise.
+ * lra.c (lra_update_insn_recog_data, restore_scratches): Likewise.
+ (lra): Likewise.
+ (check_rtl): Always define. Remove incorrect guard around
+ extract_constrain_insn call.
+ * lto-cgraph.c (input_cgraph_1: Use flag_checking,
+ CHECKING_P gcc_checking_assert and checking_* functions to eliminate
+ ENABLE_CHECKING conditionals.
+ * lto-streamer-out.c (DFS::DFS): Likewise.
+ (lto_output): Likewise.
+ * lto-streamer.c (lto_streamer_init): Likewise.
+ * omp-low.c (scan_omp_target, expand_omp_taskreg): Likewise.
+ expand_omp_target, execute_expand_omp): Likewise.
+ (lower_omp_target): Likewise.
+ * passes.c (execute_function_todo): Likewise.
+ (execute_todo, execute_one_pass): Likewise.
+ (verify_curr_properties): Always define.
+ * predict.c (tree_estimate_probability: Use flag_checking,
+ CHECKING_P gcc_checking_assert and checking_* functions to eliminate
+ ENABLE_CHECKING conditionals.
+ (propagate_freq): Likewise.
+ * pretty-print.c (pp_format): Likewise.
+ * real.c (real_to_decimal_for_mode): Likewise.
+ * recog.c (split_all_insns): Likewise.
+ * regcprop.c (kill_value_one_regno): Likewise.
+ (copy_value): Likewise.
+ (validate_value_data): Define unconditionally.
+ * reload.c: Fix comment.
+ * timevar.c: Include options.h
+ * tree-ssa.h (checking_verify_ssa): Define.
+ * tree-ssa-loop-manip.h (checking_verify_loop_closed_ssa): Define.
+ * sched-deps.c (CHECK): Remove unused macro.
+ (add_or_update_dep_1, sd_add_dep: Use flag_checking, CHECKING_P
+ gcc_checking_assert and checking_* functions to eliminate
+ ENABLE_CHECKING conditionals.
+ * sel-sched-ir.c (free_regset_pool, tidy_control_flow): Likewise.
+ * sel-sched.c (struct moveop_static_params): Likewise.
+ (find_best_reg_for_expr, move_cond_jump): Likewise.
+ (move_op_orig_expr_not_found): Likewise.
+ (code_motion_process_successors, move_op): Likewise.
+ * ssa-iterators.h (first_readonly_imm_use): Likewise.
+ (next_readonly_imm_use): Likewise.
+ * store-motion.c (compute_store_table): Likewise.
+ * symbol-summary.h (function_summary::function_summary): Likewise.
+ * target.h (cumulative_args_t): Likewise.
+ (get_cumulative_args, pack_cumulative_args): Likewise.
+ * timevar.c: (timer::print): Likewise.
+ * trans-mem.c (ipa_tm_execute): Likewise.
+ * tree-cfg.c (move_stmt_op): Likewise.
+ (move_sese_region_to_fn): Likewise.
+ (gimple_flow_call_edges_add): Likewise.
+ * tree-cfgcleanup.c (cleanup_tree_cfg_noloop, repair_loop_structures):
+ Likewise.
+ * tree-eh.c (remove_unreachable_handlers): Likewise.
+ * tree-if-conv.c (pass_if_conversion::execute): Likewise.
+ * tree-inline.c (expand_call_inline, optimize_inline_calls): Likewise.
+ * tree-into-ssa.c (update_ssa): Likewise.
+ * tree-loop-distribution.c (pass_loop_distribution::execute): Likewise.
+ * tree-outof-ssa.c (eliminate_useless_phis, rewrite_trees): Likewise.
+ * tree-parloops.c (pass_parallelize_loops::execute): Likewise.
+ * tree-predcom.c (suitable_component_p): Likewise.
+ * tree-profile.c (gimple_gen_const_delta_profiler): Likewise.
+ * tree-ssa-alias.c (refs_may_alias_p_1): Likewise.
+ * tree-ssa-live.c (verify_live_on_entry): Likewise.
+ * tree-ssa-live.h (register_ssa_partition): Likewise.
+ * tree-ssa-loop-ivcanon.c (tree_unroll_loops_completely): Likewise.
+ * tree-ssa-loop-manip.c (add_exit_phi): Likewise.
+ (tree_transform_and_unroll_loop): Likewise.
+ * tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Likewise.
+ * tree-ssa-operands.c (get_expr_operands): Likewise.
+ * tree-ssa-propagate.c (replace_exp_1): Likewise.
+ * tree-ssa-structalias.c (rewrite_constraints): Likewise.
+ * tree-ssa-ter.c (free_temp_expr_table): Likewise.
+ * tree-ssa-threadupdate.c (duplicate_thread_path): Likewise.
+ * tree-ssanames.c (release_ssa_name_fn): Likewise.
+ * tree-stdarg.c (expand_ifn_va_arg): Likewise.
+ * tree-vect-loop-manip.c
+ (slpeel_tree_duplicate_loop_to_edge_cfg): Likewise.
+ (slpeel_checking_verify_cfg_after_peeling): Likewise.
+ (vect_do_peeling_for_loop_bound): Likewise.
+ (vect_do_peeling_for_alignment): Likewise.
+ * tree-vrp.c (supports_overflow_infinity): Likewise.
+ (set_value_range): Likewise.
+ * tree.c (free_lang_data_in_cgraph): Likewise.
+ * value-prof.c (gimple_remove_histogram_value): Likewise.
+ (free_hist): Likewise.
+ * var-tracking.c (canonicalize_values_star): Likewise.
+ (compute_bb_dataflow, vt_find_locations, vt_emit_notes): Likewise.
+
2015-10-27 Nathan Sidwell <nathan@codesourcery.com>
* internal-fn.def (IFN_GOACC_DIM_SIZE, IFN_GOACC_DIM_POS,
return NULL;
}
-/* Initialize attribute tables, and make some sanity checks
- if --enable-checking. */
+/* Make some sanity checks on the attribute tables. */
+
+static void
+check_attribute_tables (void)
+{
+ for (size_t i = 0; i < ARRAY_SIZE (attribute_tables); i++)
+ for (size_t j = 0; attribute_tables[i][j].name != NULL; j++)
+ {
+ /* The name must not begin and end with __. */
+ const char *name = attribute_tables[i][j].name;
+ int len = strlen (name);
+
+ gcc_assert (!(name[0] == '_' && name[1] == '_'
+ && name[len - 1] == '_' && name[len - 2] == '_'));
+
+ /* The minimum and maximum lengths must be consistent. */
+ gcc_assert (attribute_tables[i][j].min_length >= 0);
+
+ gcc_assert (attribute_tables[i][j].max_length == -1
+ || (attribute_tables[i][j].max_length
+ >= attribute_tables[i][j].min_length));
+
+ /* An attribute cannot require both a DECL and a TYPE. */
+ gcc_assert (!attribute_tables[i][j].decl_required
+ || !attribute_tables[i][j].type_required);
+
+ /* If an attribute requires a function type, in particular
+ it requires a type. */
+ gcc_assert (!attribute_tables[i][j].function_type_required
+ || attribute_tables[i][j].type_required);
+ }
+
+ /* Check that each name occurs just once in each table. */
+ for (size_t i = 0; i < ARRAY_SIZE (attribute_tables); i++)
+ for (size_t j = 0; attribute_tables[i][j].name != NULL; j++)
+ for (size_t k = j + 1; attribute_tables[i][k].name != NULL; k++)
+ gcc_assert (strcmp (attribute_tables[i][j].name,
+ attribute_tables[i][k].name));
+
+ /* Check that no name occurs in more than one table. Names that
+ begin with '*' are exempt, and may be overridden. */
+ for (size_t i = 0; i < ARRAY_SIZE (attribute_tables); i++)
+ for (size_t j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
+ for (size_t k = 0; attribute_tables[i][k].name != NULL; k++)
+ for (size_t l = 0; attribute_tables[j][l].name != NULL; l++)
+ gcc_assert (attribute_tables[i][k].name[0] == '*'
+ || strcmp (attribute_tables[i][k].name,
+ attribute_tables[j][l].name));
+}
+
+/* Initialize attribute tables, and make some sanity checks if checking is
+ enabled. */
void
init_attributes (void)
if (attribute_tables[i] == NULL)
attribute_tables[i] = empty_attribute_table;
-#ifdef ENABLE_CHECKING
- /* Make some sanity checks on the attribute tables. */
- for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- {
- int j;
-
- for (j = 0; attribute_tables[i][j].name != NULL; j++)
- {
- /* The name must not begin and end with __. */
- const char *name = attribute_tables[i][j].name;
- int len = strlen (name);
-
- gcc_assert (!(name[0] == '_' && name[1] == '_'
- && name[len - 1] == '_' && name[len - 2] == '_'));
-
- /* The minimum and maximum lengths must be consistent. */
- gcc_assert (attribute_tables[i][j].min_length >= 0);
-
- gcc_assert (attribute_tables[i][j].max_length == -1
- || (attribute_tables[i][j].max_length
- >= attribute_tables[i][j].min_length));
-
- /* An attribute cannot require both a DECL and a TYPE. */
- gcc_assert (!attribute_tables[i][j].decl_required
- || !attribute_tables[i][j].type_required);
-
- /* If an attribute requires a function type, in particular
- it requires a type. */
- gcc_assert (!attribute_tables[i][j].function_type_required
- || attribute_tables[i][j].type_required);
- }
- }
-
- /* Check that each name occurs just once in each table. */
- for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- {
- int j, k;
- for (j = 0; attribute_tables[i][j].name != NULL; j++)
- for (k = j + 1; attribute_tables[i][k].name != NULL; k++)
- gcc_assert (strcmp (attribute_tables[i][j].name,
- attribute_tables[i][k].name));
- }
- /* Check that no name occurs in more than one table. Names that
- begin with '*' are exempt, and may be overridden. */
- for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
- {
- size_t j, k, l;
-
- for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
- for (k = 0; attribute_tables[i][k].name != NULL; k++)
- for (l = 0; attribute_tables[j][l].name != NULL; l++)
- gcc_assert (attribute_tables[i][k].name[0] == '*'
- || strcmp (attribute_tables[i][k].name,
- attribute_tables[j][l].name));
- }
-#endif
+ if (flag_checking)
+ check_attribute_tables ();
for (i = 0; i < ARRAY_SIZE (attribute_tables); ++i)
/* Put all the GNU attributes into the "gnu" namespace. */
to detect and fix during edge forwarding, and in some cases
is only visible after newly unreachable blocks are deleted,
which will be done in fixup_partitions. */
- fixup_partitions ();
-
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ fixup_partitions ();
+ checking_verify_flow_info ();
}
changed_overall |= changed;
static void
expand_goto (tree label)
{
-#ifdef ENABLE_CHECKING
- /* Check for a nonlocal goto to a containing function. Should have
- gotten translated to __builtin_nonlocal_goto. */
- tree context = decl_function_context (label);
- gcc_assert (!context || context == current_function_decl);
-#endif
+ if (flag_checking)
+ {
+ /* Check for a nonlocal goto to a containing function. Should have
+ gotten translated to __builtin_nonlocal_goto. */
+ tree context = decl_function_context (label);
+ gcc_assert (!context || context == current_function_decl);
+ }
emit_jump (jump_target_rtx (label));
}
default:
flag_unsupported:
-#ifdef ENABLE_CHECKING
- debug_tree (exp);
- gcc_unreachable ();
-#else
+ if (flag_checking)
+ {
+ debug_tree (exp);
+ gcc_unreachable ();
+ }
return NULL;
-#endif
}
}
gcc.c-torture/execute/ipa-sra-2.c execution, -Os -m32 fails otherwise. */
cleanup_cfg (CLEANUP_NO_INSN_DEL);
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
/* Initialize pseudos allocated for hard registers. */
emit_initial_value_sets ();
};
extern void verify_flow_info (void);
+
+/* Check control flow invariants, if internal consistency checks are
+ enabled. */
+
+static inline void
+checking_verify_flow_info (void)
+{
+ /* TODO: Add a separate option for -fchecking=cfg. */
+ if (flag_checking)
+ verify_flow_info ();
+}
+
extern void dump_bb (FILE *, basic_block, int, int);
extern void dump_bb_for_graph (pretty_printer *, basic_block);
extern void dump_flow_info (FILE *, int);
extern void verify_loop_structure (void);
+/* Check loop structure invariants, if internal consistency checks are
+ enabled. */
+
+static inline void
+checking_verify_loop_structure (void)
+{
+ if (flag_checking)
+ verify_loop_structure ();
+}
+
/* Loop analysis. */
extern bool just_once_each_iteration_p (const struct loop *, const_basic_block);
gcov_type expected_loop_iterations_unbounded (const struct loop *);
which will be done by fixup_partitions. */
fixup_partitions ();
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
insn = NEXT_INSN (insn);
set_last_insn (insn);
-#ifdef ENABLE_CHECKING
- verify_insn_chain ();
-#endif
+ if (flag_checking)
+ verify_insn_chain ();
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
void
cfg_layout_finalize (void)
{
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
force_one_exit_fallthru ();
rtl_register_cfg_hooks ();
if (reload_completed && !targetm.have_epilogue ())
rebuild_jump_labels (get_insns ());
delete_dead_jumptables ();
-#ifdef ENABLE_CHECKING
- verify_insn_chain ();
- verify_flow_info ();
-#endif
+ if (flag_checking)
+ verify_insn_chain ();
+ checking_verify_flow_info ();
}
block in CFG already. Calling make_edge in such case would
cause us to mark that edge as fake and remove it later. */
-#ifdef ENABLE_CHECKING
- if (split_at_insn == BB_END (bb))
+ if (flag_checking && split_at_insn == BB_END (bb))
{
e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
-#endif
/* Note that the following may create a new basic block
and renumber the existing basic blocks. */
{
/* This is a rather expensive check possibly triggering
construction of call stmt hashtable. */
-#ifdef ENABLE_CHECKING
cgraph_edge *e;
- gcc_checking_assert (
- !(e = caller->get_edge (call_stmt)) || e->speculative);
-#endif
+ gcc_checking_assert (!(e = caller->get_edge (call_stmt))
+ || e->speculative);
gcc_assert (is_gimple_call (call_stmt));
}
gcall *new_stmt;
gimple_stmt_iterator gsi;
bool skip_bounds = false;
-#ifdef ENABLE_CHECKING
- cgraph_node *node;
-#endif
if (e->speculative)
{
&& !skip_bounds))
return e->call_stmt;
-#ifdef ENABLE_CHECKING
- if (decl)
+ if (flag_checking && decl)
{
- node = cgraph_node::get (decl);
+ cgraph_node *node = cgraph_node::get (decl);
gcc_assert (!node || !node->clone.combined_args_to_skip);
}
-#endif
if (symtab->dump_file)
{
and NULL otherwise. */
static inline symtab_node *get (const_tree decl)
{
-#ifdef ENABLE_CHECKING
/* Check that we are called for sane type of object - functions
and static or external variables. */
gcc_checking_assert (TREE_CODE (decl) == FUNCTION_DECL
memcpy/memset on the tree nodes. */
gcc_checking_assert (!decl->decl_with_vis.symtab_node
|| decl->decl_with_vis.symtab_node->decl == decl);
-#endif
return decl->decl_with_vis.symtab_node;
}
/* Verify symbol table for internal consistency. */
static DEBUG_FUNCTION void verify_symtab_nodes (void);
+ /* Perform internal consistency checks, if they are enabled. */
+ static inline void checking_verify_symtab_nodes (void);
+
/* Type of the symbol. */
ENUM_BITFIELD (symtab_type) type : 8;
symtab_node *ultimate_alias_target_1 (enum availability *avail = NULL);
};
+inline void
+symtab_node::checking_verify_symtab_nodes (void)
+{
+ if (flag_checking)
+ symtab_node::verify_symtab_nodes ();
+}
+
/* Walk all aliases for NODE. */
#define FOR_EACH_ALIAS(node, alias) \
for (unsigned x_i = 0; node->iterate_direct_aliases (x_i, alias); x_i++)
/* Verify whole cgraph structure. */
static void DEBUG_FUNCTION verify_cgraph_nodes (void);
+ /* Verify cgraph, if consistency checking is enabled. */
+ static inline void checking_verify_cgraph_nodes (void);
+
/* Worker to bring NODE local. */
static bool make_local (cgraph_node *node, void *);
return true;
}
+/* Verify cgraph, if consistency checking is enabled. */
+
+inline void
+cgraph_node::checking_verify_cgraph_nodes (void)
+{
+ if (flag_checking)
+ cgraph_node::verify_cgraph_nodes ();
+}
+
/* Return true when variable can be removed from variable pool
if all direct calls are eliminated. */
if (symtab->dump_file)
fprintf (symtab->dump_file, "Materializing clones\n");
-#ifdef ENABLE_CHECKING
- cgraph_node::verify_cgraph_nodes ();
-#endif
+
+ cgraph_node::checking_verify_cgraph_nodes ();
/* We can also do topological order, but number of iterations should be
bounded by number of IPA passes since single IPA pass is probably not
node->clear_stmts_in_references ();
if (symtab->dump_file)
fprintf (symtab->dump_file, "Materialization Call site updates done.\n");
-#ifdef ENABLE_CHECKING
- cgraph_node::verify_cgraph_nodes ();
-#endif
+
+ cgraph_node::checking_verify_cgraph_nodes ();
+
symtab->remove_unreachable_nodes (symtab->dump_file);
}
static void
mark_functions_to_output (void)
{
- cgraph_node *node;
-#ifdef ENABLE_CHECKING
bool check_same_comdat_groups = false;
+ cgraph_node *node;
- FOR_EACH_FUNCTION (node)
- gcc_assert (!node->process);
-#endif
+ if (flag_checking)
+ FOR_EACH_FUNCTION (node)
+ gcc_assert (!node->process);
FOR_EACH_FUNCTION (node)
{
}
else if (node->same_comdat_group)
{
-#ifdef ENABLE_CHECKING
- check_same_comdat_groups = true;
-#endif
+ if (flag_checking)
+ check_same_comdat_groups = true;
}
else
{
/* We should've reclaimed all functions that are not needed. */
-#ifdef ENABLE_CHECKING
- if (!node->global.inlined_to
+ if (flag_checking
+ && !node->global.inlined_to
&& gimple_has_body_p (decl)
/* FIXME: in ltrans unit when offline copy is outside partition but inline copies
are inside partition, we can end up not removing the body since we no longer
node->debug ();
internal_error ("failed to reclaim unneeded function");
}
-#endif
gcc_assert (node->global.inlined_to
|| !gimple_has_body_p (decl)
|| node->in_other_partition
}
}
-#ifdef ENABLE_CHECKING
- if (check_same_comdat_groups)
+ if (flag_checking && check_same_comdat_groups)
FOR_EACH_FUNCTION (node)
if (node->same_comdat_group && !node->process)
{
"comdat group");
}
}
-#endif
}
/* DECL is FUNCTION_DECL. Initialize datastructures so DECL is a function
TREE_ASM_WRITTEN (thunk_fndecl) = false;
delete_unreachable_blocks ();
update_ssa (TODO_update_ssa);
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
free_dominance_info (CDI_DOMINATORS);
/* Since we want to emit the thunk, we explicitly mark its name as
if (seen_error ())
return;
-#ifdef ENABLE_CHECKING
- symtab_node::verify_symtab_nodes ();
-#endif
+ symtab_node::checking_verify_symtab_nodes ();
timevar_push (TV_CGRAPHOPT);
if (pre_ipa_mem_report)
(*debug_hooks->assembly_start) ();
if (!quiet_flag)
fprintf (stderr, "Assembling functions:\n");
-#ifdef ENABLE_CHECKING
- symtab_node::verify_symtab_nodes ();
-#endif
+ symtab_node::checking_verify_symtab_nodes ();
materialize_all_clones ();
bitmap_obstack_initialize (NULL);
fprintf (dump_file, "\nFinal ");
symtab_node::dump_table (dump_file);
}
-#ifdef ENABLE_CHECKING
+ if (!flag_checking)
+ return;
symtab_node::verify_symtab_nodes ();
/* Double check that all inline clones are gone and that all
function bodies have been released from memory. */
if (error_found)
internal_error ("nodes with unreleased memory found");
}
-#endif
}
rtx_insn *def_insn = DF_REF_INSN (last_def);
ddg_node_ptr last_def_node = get_node_of_insn (g, def_insn);
ddg_node_ptr use_node;
-#ifdef ENABLE_CHECKING
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
-#endif
df_ref first_def = df_bb_regno_first_def_find (g->bb, regno);
gcc_assert (last_def_node);
gcc_assert (first_def);
-#ifdef ENABLE_CHECKING
- if (DF_REF_ID (last_def) != DF_REF_ID (first_def))
- gcc_assert (!bitmap_bit_p (&bb_info->gen,
- DF_REF_ID (first_def)));
-#endif
+ if (flag_checking && DF_REF_ID (last_def) != DF_REF_ID (first_def))
+ {
+ struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
+ gcc_assert (!bitmap_bit_p (&bb_info->gen, DF_REF_ID (first_def)));
+ }
/* Create inter-loop true dependences and anti dependences. */
for (r_use = DF_REF_CHAIN (last_def); r_use != NULL; r_use = r_use->next)
(int (*) (const void *, const void *)) compare_sccs);
}
-#ifdef ENABLE_CHECKING
/* Check that every node in SCCS belongs to exactly one strongly connected
component and that no element of SCCS is empty. */
static void
}
sbitmap_free (tmp);
}
-#endif
/* Perform the Strongly Connected Components decomposing algorithm on the
DDG and return DDG_ALL_SCCS structure that contains them. */
sbitmap_free (from);
sbitmap_free (to);
sbitmap_free (scc_nodes);
-#ifdef ENABLE_CHECKING
- check_sccs (sccs, num_nodes);
-#endif
+
+ if (flag_checking)
+ check_sccs (sccs, num_nodes);
+
return sccs;
}
#endif
#endif
-#ifdef ENABLE_CHECKING
- if (verify)
+ if (flag_checking && verify)
df->changeable_flags |= DF_VERIFY_SCHEDULED;
-#endif
}
for (i = 0; i < df->n_blocks; i++)
bitmap_set_bit (current_all_blocks, df->postorder[i]);
-#ifdef ENABLE_CHECKING
- /* Verify that POSTORDER_INVERTED only contains blocks reachable from
- the ENTRY block. */
- for (i = 0; i < df->n_blocks_inverted; i++)
- gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
-#endif
+ if (flag_checking)
+ {
+ /* Verify that POSTORDER_INVERTED only contains blocks reachable from
+ the ENTRY block. */
+ for (i = 0; i < df->n_blocks_inverted; i++)
+ gcc_assert (bitmap_bit_p (current_all_blocks,
+ df->postorder_inverted[i]));
+ }
/* Make sure that we have pruned any unreachable blocks from these
sets. */
/* None of these functions are suitable for ATTRIBUTE_PRINTF, because
each language front end can extend them with its own set of format
specifiers. We must use custom format checks. */
-#if (ENABLE_CHECKING && GCC_VERSION >= 4001) || GCC_VERSION == BUILDING_GCC_VERSION
+#if (CHECKING_P && GCC_VERSION >= 4001) || GCC_VERSION == BUILDING_GCC_VERSION
#define ATTRIBUTE_GCC_DIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m, n))) ATTRIBUTE_NONNULL(m)
#else
#define ATTRIBUTE_GCC_DIAG(m, n) ATTRIBUTE_NONNULL(m)
if (diagnostic->kind == DK_ICE || diagnostic->kind == DK_ICE_NOBT)
{
-#ifndef ENABLE_CHECKING
/* When not checking, ICEs are converted to fatal errors when an
error has already occurred. This is counteracted by
abort_on_error. */
- if ((diagnostic_kind_count (context, DK_ERROR) > 0
- || diagnostic_kind_count (context, DK_SORRY) > 0)
+ if (!CHECKING_P
+ && (diagnostic_kind_count (context, DK_ERROR) > 0
+ || diagnostic_kind_count (context, DK_SORRY) > 0)
&& !context->abort_on_error)
{
expanded_location s
s.file, s.line);
exit (ICE_EXIT_CODE);
}
-#endif
if (context->internal_error)
(*context->internal_error) (context,
diagnostic->message.format_spec,
if (dom_computed[dir_index] == DOM_OK)
{
-#if ENABLE_CHECKING
- verify_dominators (dir);
-#endif
+ checking_verify_dominators (dir);
return;
}
dom_computed[dir_index] = DOM_NO_FAST_QUERY;
}
else
- {
-#if ENABLE_CHECKING
- verify_dominators (dir);
-#endif
- }
+ checking_verify_dominators (dir);
compute_dom_fast_query (dir);
unsigned bb_dom_dfs_in (enum cdi_direction, basic_block);
unsigned bb_dom_dfs_out (enum cdi_direction, basic_block);
extern void verify_dominators (enum cdi_direction);
+
+/* Verify invariants of computed dominance information, if internal consistency
+ checks are enabled. */
+
+static inline void
+checking_verify_dominators (cdi_direction dir)
+{
+ if (flag_checking)
+ verify_dominators (dir);
+}
+
basic_block recompute_dominator (enum cdi_direction, basic_block);
extern void iterate_fix_dominators (enum cdi_direction,
vec<basic_block> , bool);
add_AT_die_ref (dw_die_ref die, enum dwarf_attribute attr_kind, dw_die_ref targ_die)
{
dw_attr_node attr;
+ gcc_checking_assert (targ_die != NULL);
-#ifdef ENABLE_CHECKING
- gcc_assert (targ_die != NULL);
-#else
/* With LTO we can end up trying to reference something we didn't create
a DIE for. Avoid crashing later on a NULL referenced DIE. */
if (targ_die == NULL)
return;
-#endif
attr.dw_attr = attr_kind;
attr.dw_attr_val.val_class = dw_val_class_die_ref;
print_die (comp_unit_die (), stderr);
}
-#ifdef ENABLE_CHECKING
/* Sanity checks on DIEs. */
static void
&& a->dw_attr != DW_AT_GNU_all_call_sites);
}
}
-#endif
\f
/* Start a new compilation unit DIE for an include file. OLD_UNIT is the CU
for the enclosing include file, if any. BINCL_DIE is the DW_TAG_GNU_BINCL
{
/* If delegitimize_address couldn't do anything with the UNSPEC, assume
we can't express it in the debug info. */
-#ifdef ENABLE_CHECKING
/* Don't complain about TLS UNSPECs, those are just too hard to
delegitimize. Note this could be a non-decl SYMBOL_REF such as
one in a constant pool entry, so testing SYMBOL_REF_TLS_MODEL
rather than DECL_THREAD_LOCAL_P is not just an optimization. */
- if (XVECLEN (rtl, 0) == 0
- || GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
- || SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0)) == TLS_MODEL_NONE)
+ if (flag_checking
+ && (XVECLEN (rtl, 0) == 0
+ || GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
+ || SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0)) == TLS_MODEL_NONE))
inform (current_function_decl
? DECL_SOURCE_LOCATION (current_function_decl)
: UNKNOWN_LOCATION,
#else
"non-delegitimized UNSPEC %d found in variable location",
XINT (rtl, 1));
-#endif
#endif
expansion_failed (NULL_TREE, rtl,
"UNSPEC hasn't been delegitimized.\n");
goto symref;
default:
-#ifdef ENABLE_CHECKING
- print_rtl (stderr, rtl);
- gcc_unreachable ();
-#else
+ if (flag_checking)
+ {
+ print_rtl (stderr, rtl);
+ gcc_unreachable ();
+ }
break;
-#endif
}
if (mem_loc_result && initialized == VAR_INIT_STATUS_UNINITIALIZED)
return 0;
}
-#ifdef ENABLE_CHECKING
/* Otherwise this is a generic code; we should just lists all of
these explicitly. We forgot one. */
- gcc_unreachable ();
-#else
+ if (flag_checking)
+ gcc_unreachable ();
+
/* In a release build, we want to degrade gracefully: better to
generate incomplete debugging information than to crash. */
return NULL;
-#endif
}
if (!ret && !list_ret)
{
if (old_die)
{
-#ifdef ENABLE_CHECKING
/* This must have been generated early and it won't even
need location information since it's a DW_AT_inline
function. */
- for (dw_die_ref c = context_die; c; c = c->die_parent)
- if (c->die_tag == DW_TAG_inlined_subroutine
- || c->die_tag == DW_TAG_subprogram)
- {
- gcc_assert (get_AT (c, DW_AT_inline));
- break;
- }
-#endif
+ if (flag_checking)
+ for (dw_die_ref c = context_die; c; c = c->die_parent)
+ if (c->die_tag == DW_TAG_inlined_subroutine
+ || c->die_tag == DW_TAG_subprogram)
+ {
+ gcc_assert (get_AT (c, DW_AT_inline));
+ break;
+ }
return;
}
}
if (type == NULL_TREE || type == error_mark_node)
return;
-#ifdef ENABLE_CHECKING
- if (type)
+ if (flag_checking && type)
verify_type (type);
-#endif
if (TYPE_NAME (type) != NULL_TREE
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
if (type != error_mark_node)
{
gen_type_die_with_usage (type, context_die, DINFO_USAGE_DIR_USE);
-#ifdef ENABLE_CHECKING
- dw_die_ref die = lookup_type_die (type);
- if (die)
- check_die (die);
-#endif
+ if (flag_checking)
+ {
+ dw_die_ref die = lookup_type_die (type);
+ if (die)
+ check_die (die);
+ }
}
}
gen_decl_die (decl, NULL, context_die);
-#ifdef ENABLE_CHECKING
- dw_die_ref die = lookup_decl_die (decl);
- if (die)
- check_die (die);
-#endif
+ if (flag_checking)
+ {
+ dw_die_ref die = lookup_decl_die (decl);
+ if (die)
+ check_die (die);
+ }
}
/* Write the debugging output for DECL. */
/* This rtx may not be shared. If it has already been seen,
replace it with a copy of itself. */
-#ifdef ENABLE_CHECKING
- if (RTX_FLAG (x, used))
+ if (flag_checking && RTX_FLAG (x, used))
{
error ("invalid rtl sharing found in the insn");
debug_rtx (insn);
debug_rtx (x);
internal_error ("internal consistency failure");
}
-#endif
gcc_assert (!RTX_FLAG (x, used));
RTX_FLAG (x, used) = 1;
void
reorder_insns_nobb (rtx_insn *from, rtx_insn *to, rtx_insn *after)
{
-#ifdef ENABLE_CHECKING
- rtx_insn *x;
- for (x = from; x != to; x = NEXT_INSN (x))
- gcc_assert (after != x);
- gcc_assert (after != to);
-#endif
+ if (flag_checking)
+ {
+ for (rtx_insn *x = from; x != to; x = NEXT_INSN (x))
+ gcc_assert (after != x);
+ gcc_assert (after != to);
+ }
/* Splice this bunch out of where it is now. */
if (PREV_INSN (from))
#include "alloc-pool.h"
#include "et-forest.h"
-/* We do not enable this with ENABLE_CHECKING, since it is awfully slow. */
+/* We do not enable this with CHECKING_P, since it is awfully slow. */
#undef DEBUG_ET
#ifdef DEBUG_ET
struct duplicate_eh_regions_data data;
eh_region outer_region;
-#ifdef ENABLE_CHECKING
- verify_eh_tree (ifun);
-#endif
+ if (flag_checking)
+ verify_eh_tree (ifun);
data.label_map = map;
data.label_map_data = map_data;
duplicate_eh_regions_1 (&data, r, outer_region);
}
-#ifdef ENABLE_CHECKING
- verify_eh_tree (cfun);
-#endif
+ if (flag_checking)
+ verify_eh_tree (cfun);
return data.eh_map;
}
\f
static df_ref *active_defs;
-#ifdef ENABLE_CHECKING
static sparseset active_defs_check;
-#endif
/* Fill the ACTIVE_DEFS array with the use->def link for the registers
mentioned in USE_REC. Register the valid entries in ACTIVE_DEFS_CHECK
df_ref def = get_def_for_use (use);
int regno = DF_REF_REGNO (use);
-#ifdef ENABLE_CHECKING
- sparseset_set_bit (active_defs_check, regno);
-#endif
+ if (flag_checking)
+ sparseset_set_bit (active_defs_check, regno);
active_defs[regno] = def;
}
}
static void
update_df_init (rtx_insn *def_insn, rtx_insn *insn)
{
-#ifdef ENABLE_CHECKING
- sparseset_clear (active_defs_check);
-#endif
+ if (flag_checking)
+ sparseset_clear (active_defs_check);
register_active_defs (DF_INSN_USES (def_insn));
register_active_defs (DF_INSN_USES (insn));
register_active_defs (DF_INSN_EQ_USES (insn));
if (DF_REF_ID (use) >= (int) use_def_ref.length ())
use_def_ref.safe_grow_cleared (DF_REF_ID (use) + 1);
-#ifdef ENABLE_CHECKING
- gcc_assert (sparseset_bit_p (active_defs_check, regno));
-#endif
+ gcc_checking_assert (sparseset_bit_p (active_defs_check, regno));
use_def_ref[DF_REF_ID (use)] = active_defs[regno];
}
}
df_set_flags (DF_DEFER_INSN_RESCAN);
active_defs = XNEWVEC (df_ref, max_reg_num ());
-#ifdef ENABLE_CHECKING
- active_defs_check = sparseset_alloc (max_reg_num ());
-#endif
+ if (flag_checking)
+ active_defs_check = sparseset_alloc (max_reg_num ());
}
static void
use_def_ref.release ();
free (active_defs);
-#ifdef ENABLE_CHECKING
- sparseset_free (active_defs_check);
-#endif
+ if (flag_checking)
+ sparseset_free (active_defs_check);
free_dominance_info (CDI_DOMINATORS);
cleanup_cfg (0);
void
ggc_grow (void)
{
-#ifndef ENABLE_CHECKING
- G.allocated_last_gc = MAX (G.allocated_last_gc,
- G.allocated);
-#else
- ggc_collect ();
-#endif
+ if (!flag_checking)
+ G.allocated_last_gc = MAX (G.allocated_last_gc,
+ G.allocated);
+ else
+ ggc_collect ();
if (!quiet_flag)
fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
}
pop_gimplify_context (outer_bind);
gcc_assert (gimplify_ctxp == NULL);
-#ifdef ENABLE_CHECKING
- if (!seen_error ())
+ if (flag_checking && !seen_error ())
verify_gimple_in_seq (gimple_bind_body (outer_bind));
-#endif
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
if (!operand_equal_p (t1, t2, 0))
return false;
-#ifdef ENABLE_CHECKING
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
- gcc_assert (hash (p1) == hash (p2));
-#endif
+ gcc_checking_assert (hash (p1) == hash (p2));
return true;
}
static inline void
graphite_verify (void)
{
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
- verify_loop_closed_ssa (true);
-#endif
+ checking_verify_loop_structure ();
+ checking_verify_loop_closed_ssa (true);
}
/* IVS_PARAMS maps ISL's scattering and parameter identifiers
static void
canonicalize_loop_closed_ssa_form (void)
{
- loop_p loop;
-
-#ifdef ENABLE_CHECKING
- verify_loop_closed_ssa (true);
-#endif
+ checking_verify_loop_closed_ssa (true);
+ loop_p loop;
FOR_EACH_LOOP (loop, 0)
canonicalize_loop_closed_ssa (loop);
rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
update_ssa (TODO_update_ssa);
-#ifdef ENABLE_CHECKING
- verify_loop_closed_ssa (true);
-#endif
+ checking_verify_loop_closed_ssa (true);
}
/* Can all ivs be represented by a signed integer?
}
update_ssa (TODO_update_ssa);
-#ifdef ENABLE_CHECKING
- verify_loop_closed_ssa (true);
-#endif
+ checking_verify_loop_closed_ssa (true);
}
/* Rewrite the scalar dependence of DEF used in USE_STMT with a memory
{
scev_reset_htab ();
update_ssa (TODO_update_ssa);
-#ifdef ENABLE_CHECKING
- verify_loop_closed_ssa (true);
-#endif
+ checking_verify_loop_closed_ssa (true);
}
}
if (is_empty (*slot))
return slot;
-#ifdef ENABLE_CHECKING
gcc_checking_assert (!is_deleted (*slot));
-#endif
hash2 = hash_table_mod2 (hash, m_size_prime_index);
for (;;)
slot = m_entries + index;
if (is_empty (*slot))
return slot;
-#ifdef ENABLE_CHECKING
gcc_checking_assert (!is_deleted (*slot));
-#endif
}
}
if (optimize == 1)
df_remove_problem (df_live);
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
}
\f
/* If-conversion and CFG cleanup. */
overall_size, max_new_size);
propagate_constants_topo (topo);
-#ifdef ENABLE_CHECKING
- ipcp_verify_propagated_values ();
-#endif
+ if (flag_checking)
+ ipcp_verify_propagated_values ();
topo->constants.propagate_effects ();
topo->contexts.propagate_effects ();
{
/* C++ FE uses magic <anon> as assembler names of anonymous types.
verify that this match with type_in_anonymous_namespace_p. */
-#ifdef ENABLE_CHECKING
if (in_lto_p)
- gcc_assert (!strcmp ("<anon>",
- IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (TYPE_NAME (t)))));
-#endif
+ gcc_checking_assert (!strcmp ("<anon>",
+ IDENTIFIER_POINTER
+ (DECL_ASSEMBLER_NAME (TYPE_NAME (t)))));
return true;
}
return false;
if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL
&& (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t))))
{
-#ifdef ENABLE_CHECKING
/* C++ FE uses magic <anon> as assembler names of anonymous types.
verify that this match with type_in_anonymous_namespace_p. */
- gcc_assert (!type_with_linkage_p (t)
- || strcmp ("<anon>",
- IDENTIFIER_POINTER
- (DECL_ASSEMBLER_NAME (TYPE_NAME (t))))
- || type_in_anonymous_namespace_p (t));
-#endif
+ gcc_checking_assert (!type_with_linkage_p (t)
+ || strcmp ("<anon>",
+ IDENTIFIER_POINTER
+ (DECL_ASSEMBLER_NAME (TYPE_NAME (t))))
+ || type_in_anonymous_namespace_p (t));
return true;
}
return false;
bool
odr_types_equivalent_p (tree type1, tree type2)
{
- hash_set<type_pair> visited;
+ gcc_checking_assert (odr_or_derived_type_p (type1)
+ && odr_or_derived_type_p (type2));
-#ifdef ENABLE_CHECKING
- gcc_assert (odr_or_derived_type_p (type1) && odr_or_derived_type_p (type2));
-#endif
+ hash_set<type_pair> visited;
return odr_types_equivalent_p (type1, type2, false, NULL,
&visited, UNKNOWN_LOCATION, UNKNOWN_LOCATION);
}
}
gcc_assert (val->odr_violated || !odr_must_violate);
/* Sanity check that all bases will be build same way again. */
-#ifdef ENABLE_CHECKING
- if (COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type)
+ if (flag_checking
+ && COMPLETE_TYPE_P (type) && COMPLETE_TYPE_P (val->type)
&& TREE_CODE (val->type) == RECORD_TYPE
&& TREE_CODE (type) == RECORD_TYPE
&& TYPE_BINFO (val->type) && TYPE_BINFO (type)
j++;
}
}
-#endif
/* Regularize things a little. During LTO same types may come with
if (slot && *slot)
{
val = *slot;
-#ifdef ENABLE_CHECKING
- if (in_lto_p && can_be_vtable_hashed_p (type))
+ if (flag_checking
+ && in_lto_p && can_be_vtable_hashed_p (type))
{
hash = hash_odr_vtable (type);
vtable_slot = odr_vtable_hash->find_slot_with_hash (type, hash,
gcc_assert (!vtable_slot || *vtable_slot == *slot);
vtable_slot = NULL;
}
-#endif
}
else if (*vtable_slot)
val = *vtable_slot;
dump_cong_classes ();
process_cong_reduction ();
- verify_classes ();
+ checking_verify_classes ();
if (dump_file)
fprintf (dump_file, "Dump after callgraph-based congruence reduction\n");
process_cong_reduction ();
dump_cong_classes ();
- verify_classes ();
+ checking_verify_classes ();
bool merged_p = merge_classes (prev_class_count);
if (dump_file && (dump_flags & TDF_DETAILS))
}
}
- verify_classes ();
+ checking_verify_classes ();
}
/* Subdivide classes by address references that members of the class
return newly_created_classes;
}
-/* Verify congruence classes if checking is enabled. */
+/* Verify congruence classes, if checking is enabled. */
+
+void
+sem_item_optimizer::checking_verify_classes (void)
+{
+ if (flag_checking)
+ verify_classes ();
+}
+
+/* Verify congruence classes. */
void
sem_item_optimizer::verify_classes (void)
{
-#if ENABLE_CHECKING
for (hash_table <congruence_class_group_hash>::iterator it = m_classes.begin ();
it != m_classes.end (); ++it)
{
{
congruence_class *cls = (*it)->classes[i];
- gcc_checking_assert (cls);
- gcc_checking_assert (cls->members.length () > 0);
+ gcc_assert (cls);
+ gcc_assert (cls->members.length () > 0);
for (unsigned int j = 0; j < cls->members.length (); j++)
{
sem_item *item = cls->members[j];
- gcc_checking_assert (item);
- gcc_checking_assert (item->cls == cls);
+ gcc_assert (item);
+ gcc_assert (item->cls == cls);
for (unsigned k = 0; k < item->usages.length (); k++)
{
sem_usage_pair *usage = item->usages[k];
- gcc_checking_assert (usage->item->index_in_class <
- usage->item->cls->members.length ());
+ gcc_assert (usage->item->index_in_class <
+ usage->item->cls->members.length ());
}
}
}
}
-#endif
}
/* Disposes split map traverse function. CLS_PTR is pointer to congruence
add_item_to_class (tc, cls->members[i]);
}
-#ifdef ENABLE_CHECKING
- for (unsigned int i = 0; i < 2; i++)
- gcc_checking_assert (newclasses[i]->members.length ());
-#endif
+ if (flag_checking)
+ {
+ for (unsigned int i = 0; i < 2; i++)
+ gcc_assert (newclasses[i]->members.length ());
+ }
if (splitter_cls == cls)
optimizer->splitter_class_removed = true;
else
b = *slot;
-#if ENABLE_CHECKING
gcc_checking_assert (usage->item->cls);
gcc_checking_assert (usage->item->index_in_class <
usage->item->cls->members.length ());
-#endif
bitmap_set_bit (b, usage->item->index_in_class);
}
void dump (void);
/* Verify congruence classes if checking is enabled. */
+ void checking_verify_classes (void);
+
+ /* Verify congruence classes. */
void verify_classes (void);
/* Write IPA ICF summary for symbols. */
info->size = info->self_size;
info->stack_frame_offset = 0;
info->estimated_stack_size = info->estimated_self_stack_size;
-#ifdef ENABLE_CHECKING
- inline_update_overall_summary (node);
- gcc_assert (info->time == info->self_time && info->size == info->self_size);
-#endif
+ if (flag_checking)
+ {
+ inline_update_overall_summary (node);
+ gcc_assert (info->time == info->self_time
+ && info->size == info->self_size);
+ }
pop_cfun ();
}
first_clone->remove_symbol_and_inline_clones ();
first_clone = NULL;
}
-#ifdef ENABLE_CHECKING
- else
+ else if (flag_checking)
first_clone->verify ();
-#endif
+
return first_clone;
}
if (!edge->inline_failed || !edge->callee->analyzed)
continue;
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
/* Be sure that caches are maintained consistent. */
sreal cached_badness = edge_badness (edge, false);
if (ipa_node_params_sum)
return 0;
-#ifdef ENABLE_CHECKING
- node->verify ();
-#endif
+ if (flag_checking)
+ node->verify ();
node->remove_all_references ();
/* Rebuild this reference because it dosn't depend on
static inline int
estimate_edge_growth (struct cgraph_edge *edge)
{
-#ifdef ENABLE_CHECKING
gcc_checking_assert (inline_edge_summary (edge)->call_stmt_size
|| !edge->callee->analyzed);
-#endif
return (estimate_edge_size (edge)
- inline_edge_summary (edge)->call_stmt_size);
}
what comdat group they are in when they won't be emitted in this TU. */
if (node->same_comdat_group && DECL_EXTERNAL (node->decl))
{
-#ifdef ENABLE_CHECKING
- symtab_node *n;
-
- for (n = node->same_comdat_group;
- n != node;
- n = n->same_comdat_group)
- /* If at least one of same comdat group functions is external,
- all of them have to be, otherwise it is a front-end bug. */
- gcc_assert (DECL_EXTERNAL (n->decl));
-#endif
+ if (flag_checking)
+ {
+ for (symtab_node *n = node->same_comdat_group;
+ n != node;
+ n = n->same_comdat_group)
+ /* If at least one of same comdat group functions is external,
+ all of them have to be, otherwise it is a front-end bug. */
+ gcc_assert (DECL_EXTERNAL (n->decl));
+ }
node->dissolve_same_comdat_group_list ();
}
gcc_assert ((!DECL_WEAK (node->decl)
build_type_inheritance_graph ();
if (file)
fprintf (file, "\nReclaiming functions:");
-#ifdef ENABLE_CHECKING
- FOR_EACH_FUNCTION (node)
- gcc_assert (!node->aux);
- FOR_EACH_VARIABLE (vnode)
- gcc_assert (!vnode->aux);
-#endif
+ if (flag_checking)
+ {
+ FOR_EACH_FUNCTION (node)
+ gcc_assert (!node->aux);
+ FOR_EACH_VARIABLE (vnode)
+ gcc_assert (!vnode->aux);
+ }
/* Mark functions whose bodies are obviously needed.
This is mostly when they can be referenced externally. Inline clones
are special since their declarations are shared with master clone and thus
if (file)
fprintf (file, "\n");
-#ifdef ENABLE_CHECKING
- symtab_node::verify_symtab_nodes ();
-#endif
+ symtab_node::checking_verify_symtab_nodes ();
/* If we removed something, perhaps profile could be improved. */
if (changed && optimize && inline_edge_summary_vec.exists ())
{
if (var->aux != BOTTOM)
{
-#ifdef ENABLE_CHECKING
/* Not having the single user known means that the VAR is
unreachable. Either someone forgot to remove unreachable
variables or the reachability here is wrong. */
- gcc_assert (single_user_map.get (var));
-#endif
+ gcc_checking_assert (single_user_map.get (var));
+
if (dump_file)
{
fprintf (dump_file, "Variable %s/%i is used by single function\n",
/* To provide consistency in naming, all IRA external variables,
functions, common typedefs start with prefix ira_. */
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
#define ENABLE_IRA_CHECKING
#endif
df_remove_problem (df_live);
gcc_checking_assert (df_live == NULL);
-#ifdef ENABLE_CHECKING
- df->changeable_flags |= DF_VERIFY_SCHEDULED;
-#endif
+ if (flag_checking)
+ df->changeable_flags |= DF_VERIFY_SCHEDULED;
+
df_analyze ();
init_reg_equiv ();
iv_analysis_done ();
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
}
/* Ensure that the dominators are computed, like flow_loops_find does. */
calculate_dominance_info (CDI_DOMINATORS);
-#ifdef ENABLE_CHECKING
if (!needs_fixup)
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
/* Clear all flags. */
if (recorded_exits)
/* Dump loops. */
flow_loops_dump (dump_file, NULL, 1);
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
timevar_pop (TV_LOOP_INIT);
}
/* Apply flags to loops. */
apply_loop_flags (current_loops->state | record_exits);
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
timevar_pop (TV_LOOP_INIT);
invariant_table = NULL;
invariant_table_size = 0;
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
}
bitmap_initialize (&all_spilled_pseudos, ®_obstack);
create_live_range_start_chains ();
setup_live_pseudos_and_spill_after_risky_transforms (&all_spilled_pseudos);
-#ifdef ENABLE_CHECKING
- if (!flag_ipa_ra)
+ if (flag_checking && !flag_ipa_ra)
for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
if (lra_reg_info[i].nrefs != 0 && reg_renumber[i] >= 0
&& lra_reg_info[i].call_p
&& overlaps_hard_reg_set_p (call_used_reg_set,
PSEUDO_REGNO_MODE (i), reg_renumber[i]))
gcc_unreachable ();
-#endif
/* Setup insns to process on the next constraint pass. */
bitmap_initialize (&changed_pseudo_bitmap, ®_obstack);
init_live_reload_and_inheritance_pseudos ();
bitmap_clear (&equiv_insn_bitmap);
/* If we used a new hard regno, changed_p should be true because the
hard reg is assigned to a new pseudo. */
-#ifdef ENABLE_CHECKING
- if (! changed_p)
+ if (flag_checking && !changed_p)
{
for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++)
if (lra_reg_info[i].nrefs != 0
lra_assert (df_regs_ever_live_p (hard_regno + j));
}
}
-#endif
return changed_p;
}
bitmap_initialize (&insns_with_changed_offsets, ®_obstack);
if (final_p)
{
-#ifdef ENABLE_CHECKING
- update_reg_eliminate (&insns_with_changed_offsets);
- if (! bitmap_empty_p (&insns_with_changed_offsets))
- gcc_unreachable ();
-#endif
+ if (flag_checking)
+ {
+ update_reg_eliminate (&insns_with_changed_offsets);
+ gcc_assert (bitmap_empty_p (&insns_with_changed_offsets));
+ }
/* We change eliminable hard registers in insns so we should do
this for all insns containing any eliminable hard
register. */
/* True if the pseudo should not be assigned to a stack register. */
bool no_stack_p;
#endif
-#ifdef ENABLE_CHECKING
/* True if the pseudo crosses a call. It is setup in lra-lives.c
and used to check that the pseudo crossing a call did not get a
call used hard register. */
bool call_p;
-#endif
/* Number of references and execution frequencies of the register in
*non-debug* insns. */
int nrefs, freq;
for (hr = 0; hr < FIRST_PSEUDO_REGISTER; hr++)
if (HARD_REGNO_CALL_PART_CLOBBERED (hr, PSEUDO_REGNO_MODE (regno)))
SET_HARD_REG_BIT (lra_reg_info[regno].conflict_hard_regs, hr);
-#ifdef ENABLE_CHECKING
lra_reg_info[regno].call_p = true;
-#endif
if (! sparseset_bit_p (pseudos_live_through_setjumps, regno))
return;
sparseset_clear_bit (pseudos_live_through_setjumps, regno);
lra_reg_info[i].biggest_mode = GET_MODE (regno_reg_rtx[i]);
else
lra_reg_info[i].biggest_mode = VOIDmode;
-#ifdef ENABLE_CHECKING
lra_reg_info[i].call_p = false;
-#endif
if (i >= FIRST_PSEUDO_REGISTER
&& lra_reg_info[i].nrefs != 0)
{
last_basic_block_for_fn (cfun));
FOR_ALL_BB_FN (bb, cfun)
{
-#ifdef ENABLE_CHECKING
- if (bb->index < 0 || bb->index >= last_basic_block_for_fn (cfun))
- abort ();
-#endif
+ gcc_checking_assert (bb->index >= 0
+ && bb->index < last_basic_block_for_fn (cfun));
bb_info = get_remat_bb_data (bb);
bb_info->bb = bb;
bitmap_initialize (&bb_info->changed_regs, ®_obstack);
decode_asm_operands (PATTERN (insn), NULL,
data->operand_loc,
constraints, operand_mode, NULL);
-#ifdef ENABLE_CHECKING
- {
- int i;
- for (i = 0; i < nop; i++)
+ if (flag_checking)
+ for (int i = 0; i < nop; i++)
lra_assert
(insn_static_data->operand[i].mode == operand_mode[i]
&& insn_static_data->operand[i].constraint == constraints[i]
&& ! insn_static_data->operand[i].is_operator);
- }
-#endif
}
-#ifdef ENABLE_CHECKING
- {
- int i;
- for (i = 0; i < insn_static_data->n_operands; i++)
+ if (flag_checking)
+ for (int i = 0; i < insn_static_data->n_operands; i++)
lra_assert
(insn_static_data->operand[i].type
== (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
: insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
: OP_IN));
- }
-#endif
}
else
{
\f
-#ifdef ENABLE_CHECKING
-
/* Function checks RTL for correctness. If FINAL_P is true, it is
done at the end of LRA and the check is more rigorous. */
static void
{
if (final_p)
{
-#ifdef ENABLED_CHECKING
extract_constrain_insn (insn);
-#endif
continue;
}
/* LRA code is based on assumption that all addresses can be
fatal_insn_not_found (insn);
}
}
-#endif /* #ifdef ENABLE_CHECKING */
/* Determine if the current function has an exception receiver block
that reaches the exit block via non-exceptional edges */
init_insn_recog_data ();
-#ifdef ENABLE_CHECKING
/* Some quick check on RTL generated by previous passes. */
- check_rtl (false);
-#endif
+ if (flag_checking)
+ check_rtl (false);
lra_in_progress = 1;
by this, so unshare everything here. */
unshare_all_rtl_again (get_insns ());
-#ifdef ENABLE_CHECKING
- check_rtl (true);
-#endif
+ if (flag_checking)
+ check_rtl (true);
timevar_pop (TV_LRA);
}
lto_input_toplevel_asms (file_data, order_base);
/* AUX pointers should be all non-zero for function nodes read from the stream. */
-#ifdef ENABLE_CHECKING
- FOR_EACH_VEC_ELT (nodes, i, node)
- gcc_assert (node->aux || !is_a <cgraph_node *> (node));
-#endif
+ if (flag_checking)
+ {
+ FOR_EACH_VEC_ELT (nodes, i, node)
+ gcc_assert (node->aux || !is_a <cgraph_node *> (node));
+ }
FOR_EACH_VEC_ELT (nodes, i, node)
{
int ref;
std::swap (sccstack[first + i],
sccstack[first + entry_start + i]);
- if (scc_entry_len == 1)
- ; /* We already sorted SCC deterministically in hash_scc. */
- else
- /* Check that we have only one SCC.
- Naturally we may have conflicts if hash function is not
- strong enough. Lets see how far this gets. */
- {
-#ifdef ENABLE_CHECKING
- gcc_unreachable ();
-#endif
- }
+ /* We already sorted SCC deterministically in hash_scc. */
+
+ /* Check that we have only one SCC.
+ Naturally we may have conflicts if hash function is not
+ strong enough. Lets see how far this gets. */
+ gcc_checking_assert (scc_entry_len == 1);
}
/* Write LTO_tree_scc. */
lto_output (void)
{
struct lto_out_decl_state *decl_state;
-#ifdef ENABLE_CHECKING
- bitmap output = lto_bitmap_alloc ();
-#endif
+ bitmap output = NULL;
int i, n_nodes;
lto_symtab_encoder_t encoder = lto_get_out_decl_state ()->symtab_node_encoder;
+ if (flag_checking)
+ output = lto_bitmap_alloc ();
+
/* Initialize the streamer. */
lto_streamer_init ();
if (lto_symtab_encoder_encode_body_p (encoder, node)
&& !node->alias)
{
-#ifdef ENABLE_CHECKING
- gcc_assert (!bitmap_bit_p (output, DECL_UID (node->decl)));
- bitmap_set_bit (output, DECL_UID (node->decl));
-#endif
+ if (flag_checking)
+ {
+ gcc_assert (!bitmap_bit_p (output, DECL_UID (node->decl)));
+ bitmap_set_bit (output, DECL_UID (node->decl));
+ }
decl_state = lto_new_out_decl_state ();
lto_push_out_decl_state (decl_state);
if (gimple_has_body_p (node->decl) || !flag_wpa
&& !node->alias)
{
timevar_push (TV_IPA_LTO_CTORS_OUT);
-#ifdef ENABLE_CHECKING
- gcc_assert (!bitmap_bit_p (output, DECL_UID (node->decl)));
- bitmap_set_bit (output, DECL_UID (node->decl));
-#endif
+ if (flag_checking)
+ {
+ gcc_assert (!bitmap_bit_p (output, DECL_UID (node->decl)));
+ bitmap_set_bit (output, DECL_UID (node->decl));
+ }
decl_state = lto_new_out_decl_state ();
lto_push_out_decl_state (decl_state);
if (DECL_INITIAL (node->decl) != error_mark_node
output_offload_tables ();
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
lto_bitmap_free (output);
#endif
}
void
lto_streamer_init (void)
{
-#ifdef ENABLE_CHECKING
/* Check that all the TS_* handled by the reader and writer routines
match exactly the structures defined in treestruct.def. When a
new TS_* astructure is added, the streamer should be updated to
handle it. */
- streamer_check_handled_ts_structures ();
-#endif
+ if (flag_checking)
+ streamer_check_handled_ts_structures ();
#ifdef LTO_STREAMER_DEBUG
tree_htab = new hash_table<tree_hash_entry> (31);
+2015-10-27 Mikhail Maltsev <maltsevm@gmail.com>
+
+ * lto.c (unify_scc): Use flag_checking and remove ENABLE_CHECKING
+ conditionals.
+ (lto_fixup_state): Likewise.
+ (do_whole_program_analysis): Use
+ symtab_node::checking_verify_symtab_nodes and remove ENABLE_CHECKING
+ conditionals.
+
2015-10-13 Jakub Jelinek <jakub@redhat.com>
* lto-lang.c (DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10,
num_sccs_merged++;
total_scc_size_merged += len;
-#ifdef ENABLE_CHECKING
- for (unsigned i = 0; i < len; ++i)
- {
- tree t = map[2*i+1];
- enum tree_code code = TREE_CODE (t);
- /* IDENTIFIER_NODEs should be singletons and are merged by the
- streamer. The others should be singletons, too, and we
- should not merge them in any way. */
- gcc_assert (code != TRANSLATION_UNIT_DECL
- && code != IDENTIFIER_NODE
- && !streamer_handle_as_builtin_p (t));
- }
-#endif
+ if (flag_checking)
+ for (unsigned i = 0; i < len; ++i)
+ {
+ tree t = map[2*i+1];
+ enum tree_code code = TREE_CODE (t);
+ /* IDENTIFIER_NODEs should be singletons and are merged by the
+ streamer. The others should be singletons, too, and we
+ should not merge them in any way. */
+ gcc_assert (code != TRANSLATION_UNIT_DECL
+ && code != IDENTIFIER_NODE
+ && !streamer_handle_as_builtin_p (t));
+ }
/* Fixup the streamer cache with the prevailing nodes according
to the tree node mapping computed by compare_tree_sccs. */
for (i = 0; i < vec_safe_length (trees); i++)
{
tree t = (*trees)[i];
-#ifdef ENABLE_CHECKING
- if (TYPE_P (t))
+ if (flag_checking && TYPE_P (t))
verify_type (t);
-#endif
if (VAR_OR_FUNCTION_DECL_P (t)
&& (TREE_PUBLIC (t) || DECL_EXTERNAL (t)))
(*trees)[i] = lto_symtab_prevailing_decl (t);
fprintf (symtab->dump_file, "Optimized ");
symtab_node::dump_table (symtab->dump_file);
}
-#ifdef ENABLE_CHECKING
- symtab_node::verify_symtab_nodes ();
-#endif
+
+ symtab_node::checking_verify_symtab_nodes ();
bitmap_obstack_release (NULL);
/* We are about to launch the final LTRANS phase, stop the WPA timer. */
{
TYPE_FIELDS (ctx->record_type)
= nreverse (TYPE_FIELDS (ctx->record_type));
-#ifdef ENABLE_CHECKING
- tree field;
- unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
- for (field = TYPE_FIELDS (ctx->record_type);
- field;
- field = DECL_CHAIN (field))
- gcc_assert (DECL_ALIGN (field) == align);
-#endif
+ if (flag_checking)
+ {
+ unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
+ for (tree field = TYPE_FIELDS (ctx->record_type);
+ field;
+ field = DECL_CHAIN (field))
+ gcc_assert (DECL_ALIGN (field) == align);
+ }
layout_type (ctx->record_type);
if (offloaded)
fixup_child_record_type (ctx);
}
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa);
-#ifdef ENABLE_CHECKING
- if (!loops_state_satisfies_p (LOOPS_NEED_FIXUP))
+ if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
verify_loop_structure ();
-#endif
pop_cfun ();
}
if (changed)
cleanup_tree_cfg ();
}
-#ifdef ENABLE_CHECKING
- if (!loops_state_satisfies_p (LOOPS_NEED_FIXUP))
+ if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
verify_loop_structure ();
-#endif
pop_cfun ();
}
expand_omp (root_omp_region);
-#ifdef ENABLE_CHECKING
- if (!loops_state_satisfies_p (LOOPS_NEED_FIXUP))
+ if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
verify_loop_structure ();
-#endif
cleanup_tree_cfg ();
free_omp_regions ();
default:
break;
case OMP_CLAUSE_MAP:
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
/* First check what we're prepared to handle in the following. */
switch (OMP_CLAUSE_MAP_KIND (c))
{
gcc_assert (dom_info_state (fn, CDI_POST_DOMINATORS) == DOM_NONE);
/* If we've seen errors do not bother running any verifiers. */
- if (!seen_error ())
+ if (flag_checking && !seen_error ())
{
-#if defined ENABLE_CHECKING
dom_state pre_verify_state = dom_info_state (fn, CDI_DOMINATORS);
dom_state pre_verify_pstate = dom_info_state (fn, CDI_POST_DOMINATORS);
/* Make sure verifiers don't change dominator state. */
gcc_assert (dom_info_state (fn, CDI_DOMINATORS) == pre_verify_state);
gcc_assert (dom_info_state (fn, CDI_POST_DOMINATORS) == pre_verify_pstate);
-#endif
}
fn->last_verified = flags & TODO_verify_all;
static void
execute_todo (unsigned int flags)
{
-#if defined ENABLE_CHECKING
- if (cfun
+ if (flag_checking
+ && cfun
&& need_ssa_update_p (cfun))
gcc_assert (flags & TODO_update_ssa_any);
-#endif
timevar_push (TV_TODO);
/* Helper function. Verify that the properties has been turn into the
properties expected by the pass. */
-#ifdef ENABLE_CHECKING
-static void
+static void DEBUG_FUNCTION
verify_curr_properties (function *fn, void *data)
{
unsigned int props = (size_t)data;
gcc_assert ((fn->curr_properties & props) == props);
}
-#endif
/* Initialize pass dump file. */
/* This is non-static so that the plugins can use it. */
/* Run pre-pass verification. */
execute_todo (pass->todo_flags_start);
-#ifdef ENABLE_CHECKING
- do_per_function (verify_curr_properties,
- (void *)(size_t)pass->properties_required);
-#endif
+ if (flag_checking)
+ do_per_function (verify_curr_properties,
+ (void *)(size_t)pass->properties_required);
/* If a timevar is present, start it. */
if (pass->tv_id != TV_NONE)
}
}
-#ifdef ENABLE_CHECKING
-
/* Callback for hash_map::traverse, asserts that the pointer map is
empty. */
gcc_assert (!value);
return false;
}
-#endif
/* Predict branch probabilities and estimate profile for basic block BB. */
FOR_EACH_BB_FN (bb, cfun)
combine_predictions_for_bb (bb);
-#ifdef ENABLE_CHECKING
- bb_predictions->traverse<void *, assert_is_empty> (NULL);
-#endif
+ if (flag_checking)
+ bb_predictions->traverse<void *, assert_is_empty> (NULL);
+
delete bb_predictions;
bb_predictions = NULL;
/* Compute frequency of basic block. */
if (bb != head)
{
-#ifdef ENABLE_CHECKING
- FOR_EACH_EDGE (e, ei, bb->preds)
- gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
- || (e->flags & EDGE_DFS_BACK));
-#endif
+ if (flag_checking)
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
+ || (e->flags & EDGE_DFS_BACK));
FOR_EACH_EDGE (e, ei, bb->preds)
if (EDGE_INFO (e)->back_edge)
*formatters[argno] = XOBFINISH (&buffer->chunk_obstack, const char *);
}
-#ifdef ENABLE_CHECKING
- for (; argno < PP_NL_ARGMAX; argno++)
- gcc_assert (!formatters[argno]);
-#endif
+ if (CHECKING_P)
+ for (; argno < PP_NL_ARGMAX; argno++)
+ gcc_assert (!formatters[argno]);
/* Revert to normal obstack and wrapping mode. */
buffer->obstack = &buffer->formatted_obstack;
/* Append the exponent. */
sprintf (last, "e%+d", dec_exp);
-#ifdef ENABLE_CHECKING
/* Verify that we can read the original value back in. */
- if (mode != VOIDmode)
+ if (flag_checking && mode != VOIDmode)
{
real_from_string (&r, str);
real_convert (&r, mode, &r);
gcc_assert (real_identical (&r, r_orig));
}
-#endif
}
/* Likewise, except always uses round-to-nearest. */
if (changed)
find_many_sub_basic_blocks (blocks);
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
sbitmap_free (blocks);
}
static bool replace_oldest_value_mem (rtx, rtx_insn *, struct value_data *);
static bool copyprop_hardreg_forward_1 (basic_block, struct value_data *);
extern void debug_value_data (struct value_data *);
-#ifdef ENABLE_CHECKING
static void validate_value_data (struct value_data *);
-#endif
/* Free all queued updates for DEBUG_INSNs that change some reg to
register REGNO. */
if (vd->e[regno].debug_insn_changes)
free_debug_insn_changes (vd, regno);
-#ifdef ENABLE_CHECKING
- validate_value_data (vd);
-#endif
+ if (flag_checking)
+ validate_value_data (vd);
}
/* Kill the value in register REGNO for NREGS, and any other registers
continue;
vd->e[i].next_regno = dr;
-#ifdef ENABLE_CHECKING
- validate_value_data (vd);
-#endif
+ if (flag_checking)
+ validate_value_data (vd);
}
/* Return true if a mode change from ORIG to NEW is allowed for REGNO. */
skip_debug_insn_p = false;
}
-#ifdef ENABLE_CHECKING
static void
validate_value_data (struct value_data *vd)
{
i, GET_MODE_NAME (vd->e[i].mode), vd->e[i].oldest_regno,
vd->e[i].next_regno);
}
-#endif
+
\f
namespace {
#define REG_OK_STRICT
-/* We do not enable this with ENABLE_CHECKING, since it is awfully slow. */
+/* We do not enable this with CHECKING_P, since it is awfully slow. */
#undef DEBUG_RELOAD
#include "config.h"
#ifdef INSN_SCHEDULING
-#ifdef ENABLE_CHECKING
-#define CHECK (true)
-#else
-#define CHECK (false)
-#endif
-
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
rtx, rtx);
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
-#ifdef ENABLE_CHECKING
static void check_dep (dep_t, bool);
-#endif
+
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-#ifdef ENABLE_CHECKING
- check_dep (new_dep, mem1 != NULL);
-#endif
+ if (flag_checking)
+ check_dep (new_dep, mem1 != NULL);
if (true_dependency_cache != NULL)
{
add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
-#ifdef ENABLE_CHECKING
- check_dep (dep, false);
-#endif
+ if (flag_checking)
+ check_dep (dep, false);
add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
fprintf (stderr, "\n");
}
-#ifdef ENABLE_CHECKING
/* Verify that dependence type and status are consistent.
If RELAXED_P is true, then skip dep_weakness checks. */
static void
gcc_assert (ds & BEGIN_CONTROL);
}
}
-#endif /* ENABLE_CHECKING */
/* The following code discovers opportunities to switch a memory reference
and an increment by modifying the address. We ensure that this is done
regset_pool.v[regset_pool.n++] = rs;
}
-#ifdef ENABLE_CHECKING
/* This is used as a qsort callback for sorting regset pool stacks.
X and XX are addresses of two regsets. They are never equal. */
static int
return -1;
gcc_unreachable ();
}
-#endif
-/* Free the regset pool possibly checking for memory leaks. */
+/* Free the regset pool possibly checking for memory leaks. */
void
free_regset_pool (void)
{
-#ifdef ENABLE_CHECKING
- {
- regset *v = regset_pool.v;
- int i = 0;
- int n = regset_pool.n;
+ if (flag_checking)
+ {
+ regset *v = regset_pool.v;
+ int i = 0;
+ int n = regset_pool.n;
- regset *vv = regset_pool.vv;
- int ii = 0;
- int nn = regset_pool.nn;
+ regset *vv = regset_pool.vv;
+ int ii = 0;
+ int nn = regset_pool.nn;
- int diff = 0;
+ int diff = 0;
- gcc_assert (n <= nn);
+ gcc_assert (n <= nn);
- /* Sort both vectors so it will be possible to compare them. */
- qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
- qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
+ /* Sort both vectors so it will be possible to compare them. */
+ qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
+ qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
- while (ii < nn)
- {
- if (v[i] == vv[ii])
- i++;
- else
- /* VV[II] was lost. */
- diff++;
+ while (ii < nn)
+ {
+ if (v[i] == vv[ii])
+ i++;
+ else
+ /* VV[II] was lost. */
+ diff++;
- ii++;
- }
+ ii++;
+ }
- gcc_assert (diff == regset_pool.diff);
- }
-#endif
+ gcc_assert (diff == regset_pool.diff);
+ }
/* If not true - we have a memory leak. */
gcc_assert (regset_pool.diff == 0);
return sel_bb_head_p (insn) && sel_bb_end_p (insn);
}
-#ifdef ENABLE_CHECKING
/* Check that the region we're scheduling still has at most one
backedge. */
static void
gcc_assert (n <= 1);
}
}
-#endif
\f
/* Functions to work with control flow. */
sel_recompute_toporder ();
}
-#ifdef ENABLE_CHECKING
- verify_backedges ();
- verify_dominators (CDI_DOMINATORS);
-#endif
+ /* TODO: use separate flag for CFG checking. */
+ if (flag_checking)
+ {
+ verify_backedges ();
+ verify_dominators (CDI_DOMINATORS);
+ }
return changed;
}
they are to be removed. */
int uid;
-#ifdef ENABLE_CHECKING
/* This is initialized to the insn on which the driver stopped its traversal. */
insn_t failed_insn;
-#endif
/* True if we scheduled an insn with different register. */
bool was_renamed;
collect_unavailable_regs_from_bnds (expr, bnds, used_regs, ®_rename_data,
&original_insns);
-#ifdef ENABLE_CHECKING
/* If after reload, make sure we're working with hard regs here. */
- if (reload_completed)
+ if (flag_checking && reload_completed)
{
reg_set_iterator rsi;
unsigned i;
EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
gcc_unreachable ();
}
-#endif
if (EXPR_SEPARABLE_P (expr))
{
return false;
}
-#ifdef ENABLE_CHECKING
/* Return true if either of expressions from ORIG_OPS can be blocked
by previously created bookkeeping code. STATIC_PARAMS points to static
parameters of move_op. */
return false;
}
-#endif
/* Clear VINSN_VEC and detach vinsns. */
static void
block_bnd = BLOCK_FOR_INSN (BND_TO (bnd));
prev = BND_TO (bnd);
-#ifdef ENABLE_CHECKING
/* Moving of jump should not cross any other jumps or beginnings of new
basic blocks. The only exception is when we move a jump through
mutually exclusive insns along fallthru edges. */
- if (block_from != block_bnd)
+ if (flag_checking && block_from != block_bnd)
{
bb = block_from;
for (link = PREV_INSN (insn); link != PREV_INSN (prev);
}
}
}
-#endif
/* Jump is moved to the boundary. */
next = PREV_INSN (insn);
{
moveop_static_params_p sparams = (moveop_static_params_p) static_params;
-#ifdef ENABLE_CHECKING
sparams->failed_insn = insn;
-#endif
/* If we're scheduling separate expr, in order to generate correct code
we need to stop the search at bookkeeping code generated with the
}
}
-#ifdef ENABLE_CHECKING
/* Here, RES==1 if original expr was found at least for one of the
successors. After the loop, RES may happen to have zero value
only if at some point the expr searched is present in av_set, but is
The exception is when the original operation is blocked by
bookkeeping generated for another fence or for another path in current
move_op. */
- gcc_assert (res == 1
- || (res == 0
- && av_set_could_be_blocked_by_bookkeeping_p (orig_ops,
- static_params))
- || res == -1);
-#endif
+ gcc_checking_assert (res == 1
+ || (res == 0
+ && av_set_could_be_blocked_by_bookkeeping_p (orig_ops, static_params))
+ || res == -1);
/* Merge data, clean up, etc. */
if (res != -1 && code_motion_path_driver_info->after_merge_succs)
sparams.dest = dest;
sparams.c_expr = c_expr;
sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
-#ifdef ENABLE_CHECKING
sparams.failed_insn = NULL;
-#endif
sparams.was_renamed = false;
lparams.e1 = NULL;
{
imm->end_p = &(SSA_NAME_IMM_USE_NODE (var));
imm->imm_use = imm->end_p->next;
-#ifdef ENABLE_CHECKING
imm->iter_node.next = imm->imm_use->next;
-#endif
if (end_readonly_imm_use_p (imm))
return NULL_USE_OPERAND_P;
return imm->imm_use;
{
use_operand_p old = imm->imm_use;
-#ifdef ENABLE_CHECKING
/* If this assertion fails, it indicates the 'next' pointer has changed
since the last bump. This indicates that the list is being modified
via stmt changes, or SET_USE, or somesuch thing, and you need to be
using the SAFE version of the iterator. */
- gcc_assert (imm->iter_node.next == old->next);
- imm->iter_node.next = old->next->next;
-#endif
+ if (flag_checking)
+ {
+ gcc_assert (imm->iter_node.next == old->next);
+ imm->iter_node.next = old->next->next;
+ }
imm->imm_use = old->next;
if (end_readonly_imm_use_p (imm))
{
int ret;
basic_block bb;
-#ifdef ENABLE_CHECKING
- unsigned regno;
-#endif
rtx_insn *insn;
rtx_insn *tmp;
df_ref def;
last_set_in[DF_REF_REGNO (def)] = 0;
}
-#ifdef ENABLE_CHECKING
- /* last_set_in should now be all-zero. */
- for (regno = 0; regno < max_gcse_regno; regno++)
- gcc_assert (!last_set_in[regno]);
-#endif
+ if (flag_checking)
+ {
+ /* last_set_in should now be all-zero. */
+ for (unsigned regno = 0; regno < max_gcse_regno; regno++)
+ gcc_assert (!last_set_in[regno]);
+ }
/* Clear temporary marks. */
for (ptr = first_st_expr (); ptr != NULL; ptr = next_st_expr (ptr))
function_summary (symbol_table *symtab, bool ggc = false): m_ggc (ggc),
m_map (13, ggc), m_insertion_enabled (true), m_symtab (symtab)
{
-#ifdef ENABLE_CHECKING
- cgraph_node *node;
-
- FOR_EACH_FUNCTION (node)
- {
- gcc_checking_assert (node->summary_uid > 0);
- }
-#endif
+ if (flag_checking)
+ {
+ cgraph_node *node;
+ FOR_EACH_FUNCTION (node)
+ gcc_assert (node->summary_uid > 0);
+ }
m_symtab_insertion_hook =
symtab->add_cgraph_insertion_hook
#include "tm.h"
#include "hard-reg-set.h"
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
struct cumulative_args_t { void *magic; void *p; };
-#else /* !ENABLE_CHECKING */
+#else /* !CHECKING_P */
/* When using a GCC build compiler, we could use
__attribute__((transparent_union)) to get cumulative_args_t function
arguments passed like scalars where the ABI would mandate a less
efficient way of argument passing otherwise. However, that would come
- at the cost of less type-safe !ENABLE_CHECKING compilation. */
+ at the cost of less type-safe !CHECKING_P compilation. */
union cumulative_args_t { void *p; };
-#endif /* !ENABLE_CHECKING */
+#endif /* !CHECKING_P */
/* Types used by the record_gcc_switches() target function. */
enum print_switch_type
static inline CUMULATIVE_ARGS *
get_cumulative_args (cumulative_args_t arg)
{
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
gcc_assert (arg.magic == CUMULATIVE_ARGS_MAGIC);
-#endif /* ENABLE_CHECKING */
+#endif /* CHECKING_P */
return (CUMULATIVE_ARGS *) arg.p;
}
{
cumulative_args_t ret;
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
ret.magic = CUMULATIVE_ARGS_MAGIC;
-#endif /* ENABLE_CHECKING */
+#endif /* CHECKING_P */
ret.p = (void *) arg;
return ret;
}
#include "system.h"
#include "coretypes.h"
#include "timevar.h"
+#include "options.h"
#ifndef HAVE_CLOCK_T
typedef int clock_t;
#endif
fprintf (fp, "%8u kB\n", (unsigned) (total->ggc_mem >> 10));
-#ifdef ENABLE_CHECKING
- fprintf (fp, "Extra diagnostic checks enabled; compiler may run slowly.\n");
- fprintf (fp, "Configure with --enable-checking=release to disable checks.\n");
-#endif
+ if (flag_checking)
+ {
+ fprintf (fp, "Extra diagnostic checks enabled; "
+ "compiler may run slowly.\n");
+ fprintf (fp, "Configure with --enable-checking=release "
+ "to disable checks.\n");
+ }
#ifndef ENABLE_ASSERT_CHECKING
fprintf (fp, "Internal checks disabled; compiler is not suited for release.\n");
fprintf (fp, "Configure with --enable-checking=release to enable checks.\n");
enum availability a;
unsigned int i;
-#ifdef ENABLE_CHECKING
- cgraph_node::verify_cgraph_nodes ();
-#endif
+ cgraph_node::checking_verify_cgraph_nodes ();
bitmap_obstack_initialize (&tm_obstack);
initialize_original_copy_tables ();
FOR_EACH_FUNCTION (node)
node->aux = NULL;
-#ifdef ENABLE_CHECKING
- cgraph_node::verify_cgraph_nodes ();
-#endif
+ cgraph_node::checking_verify_cgraph_nodes ();
return 0;
}
|| (p->orig_block == NULL_TREE
&& block != NULL_TREE))
TREE_SET_BLOCK (t, p->new_block);
-#ifdef ENABLE_CHECKING
- else if (block != NULL_TREE)
+ else if (flag_checking && block != NULL_TREE)
{
while (block && TREE_CODE (block) == BLOCK && block != p->orig_block)
block = BLOCK_SUPERCONTEXT (block);
gcc_assert (block == p->orig_block);
}
-#endif
}
else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
{
bbs.create (0);
bbs.safe_push (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
-#ifdef ENABLE_CHECKING
- verify_sese (entry_bb, exit_bb, &bbs);
-#endif
+
+ if (flag_checking)
+ verify_sese (entry_bb, exit_bb, &bbs);
/* The blocks that used to be dominated by something in BBS will now be
dominated by the new block. */
no edge to the exit block in CFG already.
Calling make_edge in such case would cause us to
mark that edge as fake and remove it later. */
-#ifdef ENABLE_CHECKING
- if (stmt == last_stmt)
+ if (flag_checking && stmt == last_stmt)
{
e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
-#endif
/* Note that the following may create a new basic block
and renumber the existing basic blocks. */
}
else
{
-#ifdef ENABLE_CHECKING
- verify_dominators (CDI_DOMINATORS);
-#endif
+ checking_verify_dominators (CDI_DOMINATORS);
changed = false;
}
gcc_assert (dom_info_available_p (CDI_DOMINATORS));
compact_blocks ();
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ checking_verify_flow_info ();
timevar_pop (TV_TREE_CLEANUP_CFG);
BITMAP_FREE (changed_bbs);
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
scev_reset ();
timevar_pop (TV_REPAIR_LOOPS);
}
-#ifdef ENABLE_CHECKING
+#if CHECKING_P
/* We do not process GIMPLE_SWITCHes for now. As long as the original source
was in fact structured, and we've not yet done jump threading, then none
of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
sbitmap_free (r_reachable);
sbitmap_free (lp_reachable);
-#ifdef ENABLE_CHECKING
- verify_eh_tree (cfun);
-#endif
+ if (flag_checking)
+ verify_eh_tree (cfun);
}
/* Remove unreachable handlers if any landing pads have been removed after
&& !loop->dont_vectorize))
todo |= tree_if_conversion (loop);
-#ifdef ENABLE_CHECKING
- {
- basic_block bb;
- FOR_EACH_BB_FN (bb, fun)
- gcc_assert (!bb->aux);
- }
-#endif
+ if (flag_checking)
+ {
+ basic_block bb;
+ FOR_EACH_BB_FN (bb, fun)
+ gcc_assert (!bb->aux);
+ }
return todo;
}
fn = cg_edge->callee->decl;
cg_edge->callee->get_untransformed_body ();
-#ifdef ENABLE_CHECKING
- if (cg_edge->callee->decl != id->dst_node->decl)
+ if (flag_checking && cg_edge->callee->decl != id->dst_node->decl)
cg_edge->callee->verify ();
-#endif
/* We will be inlining this callee. */
id->eh_lp_nr = lookup_stmt_eh_lp (stmt);
pop_gimplify_context (NULL);
-#ifdef ENABLE_CHECKING
+ if (flag_checking)
{
struct cgraph_edge *e;
for (e = id.dst_node->callees; e; e = e->next_callee)
gcc_assert (e->inline_failed);
}
-#endif
/* Fold queued statements. */
fold_marked_statements (last, id.statements_to_fold);
number_blocks (fn);
delete_unreachable_blocks_update_callgraph (&id);
-#ifdef ENABLE_CHECKING
- id.dst_node->verify ();
-#endif
+ if (flag_checking)
+ id.dst_node->verify ();
/* It would be nice to check SSA/CFG/statement consistency here, but it is
not possible yet - the IPA passes might make various functions to not
if (!need_ssa_update_p (cfun))
return;
-#ifdef ENABLE_CHECKING
- timevar_push (TV_TREE_STMT_VERIFY);
+ if (flag_checking)
+ {
+ timevar_push (TV_TREE_STMT_VERIFY);
- bool err = false;
+ bool err = false;
- FOR_EACH_BB_FN (bb, cfun)
- {
- gimple_stmt_iterator gsi;
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ FOR_EACH_BB_FN (bb, cfun)
{
- gimple *stmt = gsi_stmt (gsi);
-
- ssa_op_iter i;
- use_operand_p use_p;
- FOR_EACH_SSA_USE_OPERAND (use_p, stmt, i, SSA_OP_ALL_USES)
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
- tree use = USE_FROM_PTR (use_p);
- if (TREE_CODE (use) != SSA_NAME)
- continue;
+ gimple *stmt = gsi_stmt (gsi);
- if (SSA_NAME_IN_FREE_LIST (use))
+ ssa_op_iter i;
+ use_operand_p use_p;
+ FOR_EACH_SSA_USE_OPERAND (use_p, stmt, i, SSA_OP_ALL_USES)
{
- error ("statement uses released SSA name:");
- debug_gimple_stmt (stmt);
- fprintf (stderr, "The use of ");
- print_generic_expr (stderr, use, 0);
- fprintf (stderr," should have been replaced\n");
- err = true;
+ tree use = USE_FROM_PTR (use_p);
+ if (TREE_CODE (use) != SSA_NAME)
+ continue;
+
+ if (SSA_NAME_IN_FREE_LIST (use))
+ {
+ error ("statement uses released SSA name:");
+ debug_gimple_stmt (stmt);
+ fprintf (stderr, "The use of ");
+ print_generic_expr (stderr, use, 0);
+ fprintf (stderr," should have been replaced\n");
+ err = true;
+ }
}
}
}
- }
- if (err)
- internal_error ("cannot update SSA form");
+ if (err)
+ internal_error ("cannot update SSA form");
- timevar_pop (TV_TREE_STMT_VERIFY);
-#endif
+ timevar_pop (TV_TREE_STMT_VERIFY);
+ }
timevar_push (TV_TREE_SSA_INCREMENTAL);
placement heuristics. */
prepare_block_for_update (start_bb, insert_phi_p);
-#ifdef ENABLE_CHECKING
- for (i = 1; i < num_ssa_names; ++i)
- {
- tree name = ssa_name (i);
- if (!name
- || virtual_operand_p (name))
- continue;
-
- /* For all but virtual operands, which do not have SSA names
- with overlapping life ranges, ensure that symbols marked
- for renaming do not have existing SSA names associated with
- them as we do not re-write them out-of-SSA before going
- into SSA for the remaining symbol uses. */
- if (marked_for_renaming (SSA_NAME_VAR (name)))
- {
- fprintf (stderr, "Existing SSA name for symbol marked for "
- "renaming: ");
- print_generic_expr (stderr, name, TDF_SLIM);
- fprintf (stderr, "\n");
- internal_error ("SSA corruption");
- }
- }
-#endif
+ if (flag_checking)
+ for (i = 1; i < num_ssa_names; ++i)
+ {
+ tree name = ssa_name (i);
+ if (!name
+ || virtual_operand_p (name))
+ continue;
+
+ /* For all but virtual operands, which do not have SSA names
+ with overlapping life ranges, ensure that symbols marked
+ for renaming do not have existing SSA names associated with
+ them as we do not re-write them out-of-SSA before going
+ into SSA for the remaining symbol uses. */
+ if (marked_for_renaming (SSA_NAME_VAR (name)))
+ {
+ fprintf (stderr, "Existing SSA name for symbol marked for "
+ "renaming: ");
+ print_generic_expr (stderr, name, TDF_SLIM);
+ fprintf (stderr, "\n");
+ internal_error ("SSA corruption");
+ }
+ }
}
else
{
rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
}
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
return 0;
}
result = gimple_phi_result (phi);
if (virtual_operand_p (result))
{
-#ifdef ENABLE_CHECKING
- size_t i;
/* There should be no arguments which are not virtual, or the
results will be incorrect. */
- for (i = 0; i < gimple_phi_num_args (phi); i++)
- {
- tree arg = PHI_ARG_DEF (phi, i);
- if (TREE_CODE (arg) == SSA_NAME
- && !virtual_operand_p (arg))
- {
- fprintf (stderr, "Argument of PHI is not virtual (");
- print_generic_expr (stderr, arg, TDF_SLIM);
- fprintf (stderr, "), but the result is :");
- print_gimple_stmt (stderr, phi, 0, TDF_SLIM);
- internal_error ("SSA corruption");
- }
- }
-#endif
+ if (flag_checking)
+ for (size_t i = 0; i < gimple_phi_num_args (phi); i++)
+ {
+ tree arg = PHI_ARG_DEF (phi, i);
+ if (TREE_CODE (arg) == SSA_NAME
+ && !virtual_operand_p (arg))
+ {
+ fprintf (stderr, "Argument of PHI is not virtual (");
+ print_generic_expr (stderr, arg, TDF_SLIM);
+ fprintf (stderr, "), but the result is :");
+ print_gimple_stmt (stderr, phi, 0, TDF_SLIM);
+ internal_error ("SSA corruption");
+ }
+ }
+
remove_phi_node (&gsi, true);
}
else
variable. */
static void
-rewrite_trees (var_map map ATTRIBUTE_UNUSED)
+rewrite_trees (var_map map)
{
-#ifdef ENABLE_CHECKING
+ if (!flag_checking)
+ return;
+
basic_block bb;
/* Search for PHIs where the destination has no partition, but one
or more arguments has a partition. This should not happen and can
}
}
}
-#endif
}
/* Given the out-of-ssa info object SA (with prepared partitions)
{
fun->curr_properties &= ~(PROP_gimple_eomp);
-#ifdef ENABLE_CHECKING
- verify_loop_structure ();
-#endif
+ checking_verify_loop_structure ();
return TODO_update_ssa;
}
if (!determine_offset (first->ref, a->ref, &a->offset))
return false;
-#ifdef ENABLE_CHECKING
- {
- enum ref_step_type a_step;
- ok = suitable_reference_p (a->ref, &a_step);
- gcc_assert (ok && a_step == comp->comp_step);
- }
-#endif
+ enum ref_step_type a_step;
+ gcc_checking_assert (suitable_reference_p (a->ref, &a_step)
+ && a_step == comp->comp_step);
}
/* If there is a write inside the component, we must know whether the
unsigned base ATTRIBUTE_UNUSED)
{
/* FIXME implement this. */
-#ifdef ENABLE_CHECKING
- internal_error ("unimplemented functionality");
-#endif
+ if (flag_checking)
+ internal_error ("unimplemented functionality");
gcc_unreachable ();
}
ao_ref_alias_set (ref2), -1,
tbaa_p);
- /* We really do not want to end up here, but returning true is safe. */
-#ifdef ENABLE_CHECKING
gcc_unreachable ();
-#else
- return true;
-#endif
}
static bool
#include "ipa-utils.h"
#include "cfgloop.h"
-#ifdef ENABLE_CHECKING
-static void verify_live_on_entry (tree_live_info_p);
-#endif
+static void verify_live_on_entry (tree_live_info_p);
/* VARMAP maintains a mapping from SSA version number to real variables.
live_worklist (live);
-#ifdef ENABLE_CHECKING
- verify_live_on_entry (live);
-#endif
+ if (flag_checking)
+ verify_live_on_entry (live);
calculate_live_on_exit (live);
}
-#ifdef ENABLE_CHECKING
/* Verify that SSA_VAR is a non-virtual SSA_NAME. */
void
}
gcc_assert (num <= 0);
}
-#endif
extern void dump_var_map (FILE *, var_map);
extern void debug (_var_map &ref);
extern void debug (_var_map *ptr);
-#ifdef ENABLE_CHECKING
extern void register_ssa_partition_check (tree ssa_var);
-#endif
/* Return number of partitions in MAP. */
partitions may be filtered out by a view later. */
static inline void
-register_ssa_partition (var_map map ATTRIBUTE_UNUSED,
- tree ssa_var ATTRIBUTE_UNUSED)
+register_ssa_partition (var_map map ATTRIBUTE_UNUSED, tree ssa_var)
{
-#if defined ENABLE_CHECKING
- register_ssa_partition_check (ssa_var);
-#endif
+ if (flag_checking)
+ register_ssa_partition_check (ssa_var);
}
/* Clean up the information about numbers of iterations, since
complete unrolling might have invalidated it. */
scev_reset ();
-#ifdef ENABLE_CHECKING
- if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
+ if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA))
verify_loop_closed_ssa (true);
-#endif
}
if (loop_closed_ssa_invalidated)
BITMAP_FREE (loop_closed_ssa_invalidated);
edge e;
edge_iterator ei;
-#ifdef ENABLE_CHECKING
/* Check that at least one of the edges entering the EXIT block exits
the loop, or a superloop of that loop, that VAR is defined in. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (var);
- basic_block def_bb = gimple_bb (def_stmt);
- FOR_EACH_EDGE (e, ei, exit->preds)
+ if (flag_checking)
{
- struct loop *aloop = find_common_loop (def_bb->loop_father,
- e->src->loop_father);
- if (!flow_bb_inside_loop_p (aloop, e->dest))
- break;
+ gimple *def_stmt = SSA_NAME_DEF_STMT (var);
+ basic_block def_bb = gimple_bb (def_stmt);
+ FOR_EACH_EDGE (e, ei, exit->preds)
+ {
+ struct loop *aloop = find_common_loop (def_bb->loop_father,
+ e->src->loop_father);
+ if (!flow_bb_inside_loop_p (aloop, e->dest))
+ break;
+ }
+ gcc_assert (e);
}
- gcc_checking_assert (e);
-#endif
-
phi = create_phi_node (NULL_TREE, exit);
create_new_def_for (var, phi, gimple_phi_result_ptr (phi));
FOR_EACH_EDGE (e, ei, exit->preds)
gimple_cond_set_rhs (exit_if, exit_bound);
update_stmt (exit_if);
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
- verify_loop_structure ();
- verify_loop_closed_ssa (true);
-#endif
+ checking_verify_flow_info ();
+ checking_verify_loop_structure ();
+ checking_verify_loop_closed_ssa (true);
}
/* Wrapper over tree_transform_and_unroll_loop for case we do not
extern void rewrite_into_loop_closed_ssa (bitmap, unsigned);
extern void rewrite_virtuals_into_loop_closed_ssa (struct loop *);
extern void verify_loop_closed_ssa (bool);
+
+static inline void
+checking_verify_loop_closed_ssa (bool verify_ssa_p)
+{
+ if (flag_checking)
+ verify_loop_closed_ssa (verify_ssa_p);
+}
+
extern basic_block split_loop_exit_edge (edge);
extern basic_block ip_end_pos (struct loop *);
extern basic_block ip_normal_pos (struct loop *);
calculate_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_POST_DOMINATORS);
-#ifdef ENABLE_CHECKING
- FOR_EACH_BB_FN (bb, fun)
- gcc_assert (!bb->aux);
-#endif
+ if (flag_checking)
+ FOR_EACH_BB_FN (bb, fun)
+ gcc_assert (!bb->aux);
for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
if (FLOAT_TYPE_P (TREE_TYPE (arg))
}
/* If we get here, something has gone wrong. */
-#ifdef ENABLE_CHECKING
- fprintf (stderr, "unhandled expression in get_expr_operands():\n");
- debug_tree (expr);
- fputs ("\n", stderr);
-#endif
- gcc_unreachable ();
+ if (flag_checking)
+ {
+ fprintf (stderr, "unhandled expression in get_expr_operands():\n");
+ debug_tree (expr);
+ fputs ("\n", stderr);
+ gcc_unreachable ();
+ }
}
replace_exp_1 (use_operand_p op_p, tree val,
bool for_propagation ATTRIBUTE_UNUSED)
{
-#if defined ENABLE_CHECKING
- tree op = USE_FROM_PTR (op_p);
-
- gcc_assert (!(for_propagation
- && TREE_CODE (op) == SSA_NAME
- && TREE_CODE (val) == SSA_NAME
- && !may_propagate_copy (op, val)));
-#endif
+ if (flag_checking)
+ {
+ tree op = USE_FROM_PTR (op_p);
+ gcc_assert (!(for_propagation
+ && TREE_CODE (op) == SSA_NAME
+ && TREE_CODE (val) == SSA_NAME
+ && !may_propagate_copy (op, val)));
+ }
if (TREE_CODE (val) == SSA_NAME)
SET_USE (op_p, val);
int i;
constraint_t c;
-#ifdef ENABLE_CHECKING
- for (unsigned int j = 0; j < graph->size; j++)
- gcc_assert (find (j) == j);
-#endif
+ if (flag_checking)
+ {
+ for (unsigned int j = 0; j < graph->size; j++)
+ gcc_assert (find (j) == j);
+ }
FOR_EACH_VEC_ELT (constraints, i, c)
{
/* A place for the many, many bitmaps we create. */
static bitmap_obstack ter_bitmap_obstack;
-#ifdef ENABLE_CHECKING
extern void debug_ter (FILE *, temp_expr_table *);
-#endif
/* Create a new TER table for MAP. */
{
bitmap ret = NULL;
-#ifdef ENABLE_CHECKING
- unsigned x;
- for (x = 0; x <= num_var_partitions (t->map); x++)
- gcc_assert (!t->kill_list[x]);
- for (x = 0; x < num_ssa_names; x++)
+ if (flag_checking)
{
- gcc_assert (t->expr_decl_uids[x] == NULL);
- gcc_assert (t->partition_dependencies[x] == NULL);
+ for (unsigned x = 0; x <= num_var_partitions (t->map); x++)
+ gcc_assert (!t->kill_list[x]);
+ for (unsigned x = 0; x < num_ssa_names; x++)
+ {
+ gcc_assert (t->expr_decl_uids[x] == NULL);
+ gcc_assert (t->partition_dependencies[x] == NULL);
+ }
}
-#endif
BITMAP_FREE (t->partition_in_use);
BITMAP_FREE (t->new_replaceable_dependencies);
}
-#ifdef ENABLE_CHECKING
/* Dump the status of the various tables in the expression table. This is used
exclusively to debug TER. F is the place to send debug info and T is the
table being debugged. */
fprintf (f, "\n----------\n");
}
-#endif
scale_bbs_frequencies_int (region_copy, n_region, entry_freq, total_freq);
}
-#ifdef ENABLE_CHECKING
- verify_jump_thread (region_copy, n_region);
-#endif
+ if (flag_checking)
+ verify_jump_thread (region_copy, n_region);
/* Remove the last branch in the jump thread path. */
remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
return v->locus;
}
+/* Verify SSA invariants, if internal consistency checks are enabled. */
+
+static inline void
+checking_verify_ssa (bool check_modified_stmt, bool check_ssa_operands)
+{
+ if (flag_checking)
+ verify_ssa (check_modified_stmt, check_ssa_operands);
+}
#endif /* GCC_TREE_SSA_H */
if (MAY_HAVE_DEBUG_STMTS)
insert_debug_temp_for_var_def (NULL, var);
-#ifdef ENABLE_CHECKING
- verify_imm_links (stderr, var);
-#endif
+ if (flag_checking)
+ verify_imm_links (stderr, var);
while (imm->next != imm)
delink_imm_use (imm->next);
if ((fun->curr_properties & PROP_gimple_lva) == 0)
expand_ifn_va_arg_1 (fun);
-#if ENABLE_CHECKING
- basic_block bb;
- gimple_stmt_iterator i;
- FOR_EACH_BB_FN (bb, fun)
- for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
- gcc_assert (!gimple_call_ifn_va_arg_p (gsi_stmt (i)));
-#endif
+ if (flag_checking)
+ {
+ basic_block bb;
+ gimple_stmt_iterator i;
+ FOR_EACH_BB_FN (bb, fun)
+ for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
+ gcc_assert (!gimple_call_ifn_va_arg_p (gsi_stmt (i)));
+ }
}
namespace {
free (new_bbs);
free (bbs);
-#ifdef ENABLE_CHECKING
- verify_dominators (CDI_DOMINATORS);
-#endif
+ checking_verify_dominators (CDI_DOMINATORS);
return new_loop;
}
return true;
}
-#ifdef ENABLE_CHECKING
static void
-slpeel_verify_cfg_after_peeling (struct loop *first_loop,
- struct loop *second_loop)
+slpeel_checking_verify_cfg_after_peeling (struct loop *first_loop,
+ struct loop *second_loop)
{
+ if (!flag_checking)
+ return;
+
basic_block loop1_exit_bb = single_exit (first_loop)->dest;
basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
second_loop. */
/* TODO */
}
-#endif
/* If the run time cost model check determines that vectorization is
not profitable and hence scalar loop should be generated then set
0, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
gcc_assert (new_loop);
gcc_assert (loop_num == loop->num);
-#ifdef ENABLE_CHECKING
- slpeel_verify_cfg_after_peeling (loop, new_loop);
-#endif
+ slpeel_checking_verify_cfg_after_peeling (loop, new_loop);
/* A guard that controls whether the new_loop is to be executed or skipped
is placed in LOOP->exit. LOOP->exit therefore has two successors - one
bound, 0);
gcc_assert (new_loop);
-#ifdef ENABLE_CHECKING
- slpeel_verify_cfg_after_peeling (new_loop, loop);
-#endif
+ slpeel_checking_verify_cfg_after_peeling (new_loop, loop);
/* For vectorization factor N, we need to copy at most N-1 values
for alignment and this means N-2 loopback edge executions. */
max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 2;
supports_overflow_infinity (const_tree type)
{
tree min = vrp_val_min (type), max = vrp_val_max (type);
-#ifdef ENABLE_CHECKING
- gcc_assert (needs_overflow_infinity (type));
-#endif
+ gcc_checking_assert (needs_overflow_infinity (type));
return (min != NULL_TREE
&& CONSTANT_CLASS_P (min)
&& max != NULL_TREE
set_value_range (value_range *vr, enum value_range_type t, tree min,
tree max, bitmap equiv)
{
-#if defined ENABLE_CHECKING
/* Check the validity of the range. */
- if (t == VR_RANGE || t == VR_ANTI_RANGE)
+ if (flag_checking
+ && (t == VR_RANGE || t == VR_ANTI_RANGE))
{
int cmp;
|| !is_overflow_infinity (max));
}
- if (t == VR_UNDEFINED || t == VR_VARYING)
- gcc_assert (min == NULL_TREE && max == NULL_TREE);
-
- if (t == VR_UNDEFINED || t == VR_VARYING)
- gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
-#endif
+ if (flag_checking
+ && (t == VR_UNDEFINED || t == VR_VARYING))
+ {
+ gcc_assert (min == NULL_TREE && max == NULL_TREE);
+ gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
+ }
vr->type = t;
vr->min = min;
/* Traverse every type found freeing its language data. */
FOR_EACH_VEC_ELT (fld.types, i, t)
free_lang_data_in_type (t);
-#ifdef ENABLE_CHECKING
- FOR_EACH_VEC_ELT (fld.types, i, t)
- verify_type (t);
-#endif
+ if (flag_checking)
+ {
+ FOR_EACH_VEC_ELT (fld.types, i, t)
+ verify_type (t);
+ }
delete fld.pset;
fld.worklist.release ();
hist2->hvalue.next = hist->hvalue.next;
}
free (hist->hvalue.counters);
-#ifdef ENABLE_CHECKING
- memset (hist, 0xab, sizeof (*hist));
-#endif
+ if (flag_checking)
+ memset (hist, 0xab, sizeof (*hist));
free (hist);
}
{
histogram_value hist = *(histogram_value *) slot;
free (hist->hvalue.counters);
-#ifdef ENABLE_CHECKING
- memset (hist, 0xab, sizeof (*hist));
-#endif
+ if (flag_checking)
+ memset (hist, 0xab, sizeof (*hist));
free (hist);
return 1;
}
/* Macro to access MEM_OFFSET as an HOST_WIDE_INT. Evaluates MEM twice. */
#define INT_MEM_OFFSET(mem) (MEM_OFFSET_KNOWN_P (mem) ? MEM_OFFSET (mem) : 0)
-#if ENABLE_CHECKING && (GCC_VERSION >= 2007)
+#if CHECKING_P && (GCC_VERSION >= 2007)
/* Access VAR's Ith part's offset, checking that it's not a one-part
variable. */
return 0;
}
-#if ENABLE_CHECKING
/* Check the order of entries in one-part variables. */
int
return 1;
}
-#endif
/* Mark with VALUE_RECURSED_INTO values that have neighbors that are
more likely to be chosen as canonical for an equivalence set.
else
gcc_unreachable ();
-#if ENABLE_CHECKING
- while (list)
- {
- if (list->offset == 0
- && (dv_as_opaque (list->dv) == dv_as_opaque (dv)
- || dv_as_opaque (list->dv) == dv_as_opaque (cdv)))
- gcc_unreachable ();
+ if (flag_checking)
+ while (list)
+ {
+ if (list->offset == 0
+ && (dv_as_opaque (list->dv) == dv_as_opaque (dv)
+ || dv_as_opaque (list->dv) == dv_as_opaque (cdv)))
+ gcc_unreachable ();
- list = list->next;
- }
-#endif
+ list = list->next;
+ }
}
}
->traverse <dataflow_set *, canonicalize_values_mark> (out);
shared_hash_htab (out->vars)
->traverse <dataflow_set *, canonicalize_values_star> (out);
-#if ENABLE_CHECKING
- shared_hash_htab (out->vars)
- ->traverse <dataflow_set *, canonicalize_loc_order_check> (out);
-#endif
+ if (flag_checking)
+ shared_hash_htab (out->vars)
+ ->traverse <dataflow_set *, canonicalize_loc_order_check> (out);
}
changed = dataflow_set_different (&old_out, out);
dataflow_set_destroy (&old_out);
if (adjust)
{
dataflow_post_merge_adjust (in, &VTI (bb)->permp);
-#if ENABLE_CHECKING
- /* Merge and merge_adjust should keep entries in
- canonical order. */
- shared_hash_htab (in->vars)
- ->traverse <dataflow_set *,
- canonicalize_loc_order_check> (in);
-#endif
+
+ if (flag_checking)
+ /* Merge and merge_adjust should keep entries in
+ canonical order. */
+ shared_hash_htab (in->vars)
+ ->traverse <dataflow_set *,
+ canonicalize_loc_order_check> (in);
+
if (dst_can_be_shared)
{
shared_hash_destroy (in->vars);
again. */
dataflow_set_clear (&VTI (bb)->in);
}
-#ifdef ENABLE_CHECKING
- shared_hash_htab (cur.vars)
- ->traverse <variable_table_type *, emit_notes_for_differences_1>
- (shared_hash_htab (empty_shared_hash));
-#endif
+
+ if (flag_checking)
+ shared_hash_htab (cur.vars)
+ ->traverse <variable_table_type *, emit_notes_for_differences_1>
+ (shared_hash_htab (empty_shared_hash));
+
dataflow_set_destroy (&cur);
if (MAY_HAVE_DEBUG_INSNS)