Improve handling of memory operands in ipa-icf 2/4
Richard Biener
rguenther@suse.de
Fri Nov 13 07:52:32 GMT 2020
On Thu, 12 Nov 2020, Jan Hubicka wrote:
> Hi,
> this is updated patch. It fixes the comparsion of bitfield where I now
> check that they bitsizes and bitoffsets match (and OEP_ADDRESSOF is not
> used for bitfield references).
> I also noticed problem with dependence clique in ao_refs_may_alias that
> I copied here. Instead of base rbase should be used.
>
> Finally I ran statistics on when access paths mismatches and noticed
> that I do not really need to check that component_refs and array_refs
> are semantically equivalent since this is implied from earlier tests.
> This is described in inline comment and simplifies the code.
>
> Bootstrapped/regtested x86_64-linux, OK?
OK.
Thanks,
Richard.
> Honza
>
>
> * ipa-icf-gimple.c: Include tree-ssa-alias-compare.h.
> (find_checker::func_checker): Initialize m_tbaa.
> (func_checker::hash_operand): Use hash_ao_ref for memory accesses.
> (func_checker::compare_operand): Use compare_ao_refs for memory
> accesses.
> (func_checker::cmopare_gimple_assign): Do not check LHS types
> of memory stores.
> * ipa-icf-gimple.h (func_checker): Derive from ao_compare;
> add m_tbaa.
> * ipa-icf.c: Include tree-ssa-alias-compare.h.
> (sem_function::equals_private): Update call of
> func_checker::func_checker.
> * ipa-utils.h (lto_streaming_expected_p): New inline
> predicate.
> * tree-ssa-alias-compare.h: New file.
> * tree-ssa-alias.c: Include tree-ssa-alias-compare.h
> and bultins.h
> (view_converted_memref_p): New function.
> (types_equal_for_same_type_for_tbaa_p): New function.
> (ao_compare::compare_ao_refs): New member function.
> (ao_compare::hash_ao_ref): New function
>
> * c-c++-common/Wstringop-overflow-2.c: Disable ICF.
> * g++.dg/warn/Warray-bounds-8.C: Disable ICF.
>
> index f75951f7c49..26337dd7384 100644
> --- a/gcc/ipa-icf-gimple.c
> +++ b/gcc/ipa-icf-gimple.c
> @@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see
> #include "attribs.h"
> #include "gimple-walk.h"
>
> +#include "tree-ssa-alias-compare.h"
> #include "ipa-icf-gimple.h"
>
> namespace ipa_icf_gimple {
> @@ -52,13 +53,13 @@ namespace ipa_icf_gimple {
> of declarations that can be skipped. */
>
> func_checker::func_checker (tree source_func_decl, tree target_func_decl,
> - bool ignore_labels,
> + bool ignore_labels, bool tbaa,
> hash_set<symtab_node *> *ignored_source_nodes,
> hash_set<symtab_node *> *ignored_target_nodes)
> : m_source_func_decl (source_func_decl), m_target_func_decl (target_func_decl),
> m_ignored_source_nodes (ignored_source_nodes),
> m_ignored_target_nodes (ignored_target_nodes),
> - m_ignore_labels (ignore_labels)
> + m_ignore_labels (ignore_labels), m_tbaa (tbaa)
> {
> function *source_func = DECL_STRUCT_FUNCTION (source_func_decl);
> function *target_func = DECL_STRUCT_FUNCTION (target_func_decl);
> @@ -252,9 +253,16 @@ func_checker::hash_operand (const_tree arg, inchash::hash &hstate,
>
> void
> func_checker::hash_operand (const_tree arg, inchash::hash &hstate,
> - unsigned int flags, operand_access_type)
> + unsigned int flags, operand_access_type access)
> {
> - return hash_operand (arg, hstate, flags);
> + if (access == OP_MEMORY)
> + {
> + ao_ref ref;
> + ao_ref_init (&ref, const_cast <tree> (arg));
> + return hash_ao_ref (&ref, lto_streaming_expected_p (), m_tbaa, hstate);
> + }
> + else
> + return hash_operand (arg, hstate, flags);
> }
>
> bool
> @@ -314,18 +322,40 @@ func_checker::compare_operand (tree t1, tree t2, operand_access_type access)
> return true;
> else if (!t1 || !t2)
> return false;
> - if (operand_equal_p (t1, t2, OEP_MATCH_SIDE_EFFECTS))
> - return true;
> - switch (access)
> + if (access == OP_MEMORY)
> {
> - case OP_MEMORY:
> - return return_false_with_msg
> - ("operand_equal_p failed (access == memory)");
> - case OP_NORMAL:
> + ao_ref ref1, ref2;
> + ao_ref_init (&ref1, const_cast <tree> (t1));
> + ao_ref_init (&ref2, const_cast <tree> (t2));
> + int flags = compare_ao_refs (&ref1, &ref2,
> + lto_streaming_expected_p (), m_tbaa);
> +
> + if (!flags)
> + return true;
> + if (flags & SEMANTICS)
> + return return_false_with_msg
> + ("compare_ao_refs failed (semantic difference)");
> + if (flags & BASE_ALIAS_SET)
> + return return_false_with_msg
> + ("compare_ao_refs failed (base alias set difference)");
> + if (flags & REF_ALIAS_SET)
> + return return_false_with_msg
> + ("compare_ao_refs failed (ref alias set difference)");
> + if (flags & ACCESS_PATH)
> + return return_false_with_msg
> + ("compare_ao_refs failed (access path difference)");
> + if (flags & DEPENDENCE_CLIQUE)
> + return return_false_with_msg
> + ("compare_ao_refs failed (dependence clique difference)");
> + gcc_unreachable ();
> + }
> + else
> + {
> + if (operand_equal_p (t1, t2, OEP_MATCH_SIDE_EFFECTS))
> + return true;
> return return_false_with_msg
> - ("operand_equal_p failed (access == normal)");
> + ("operand_equal_p failed");
> }
> - gcc_unreachable ();
> }
>
> bool
> @@ -593,10 +623,17 @@ func_checker::compare_gimple_call (gcall *s1, gcall *s2)
>
> tree fntype1 = gimple_call_fntype (s1);
> tree fntype2 = gimple_call_fntype (s2);
> - if ((fntype1 && !fntype2)
> - || (!fntype1 && fntype2)
> - || (fntype1 && !types_compatible_p (fntype1, fntype2)))
> - return return_false_with_msg ("call function types are not compatible");
> +
> + /* For direct calls we verify that types are comopatible so if we matced
> + callees, callers must match, too. For indirect calls however verify
> + function type. */
> + if (!gimple_call_fndecl (s1))
> + {
> + if ((fntype1 && !fntype2)
> + || (!fntype1 && fntype2)
> + || (fntype1 && !types_compatible_p (fntype1, fntype2)))
> + return return_false_with_msg ("call function types are not compatible");
> + }
>
> if (fntype1 && fntype2 && comp_type_attributes (fntype1, fntype2) != 1)
> return return_false_with_msg ("different fntype attributes");
> @@ -658,10 +695,10 @@ func_checker::compare_gimple_assign (gimple *s1, gimple *s2)
> arg2 = gimple_op (s2, i);
>
> /* Compare types for LHS. */
> - if (i == 0)
> + if (i == 0 && !gimple_store_p (s1))
> {
> if (!compatible_types_p (TREE_TYPE (arg1), TREE_TYPE (arg2)))
> - return return_false_with_msg ("GIMPLE NOP LHS type mismatch");
> + return return_false_with_msg ("GIMPLE LHS type mismatch");
> }
>
> if (!compare_operand (arg1, arg2, get_operand_access_type (&map, arg1)))
> @@ -889,7 +926,7 @@ func_checker::compare_gimple_asm (const gasm *g1, const gasm *g2)
> /* Helper for func_checker::classify_operands. Record that T is a load. */
>
> static bool
> -visit_load_store (gimple *, tree t, tree, void *data)
> +visit_load_store (gimple *, tree, tree t, void *data)
> {
> func_checker::operand_access_type_map *map =
> (func_checker::operand_access_type_map *) data;
> diff --git a/gcc/ipa-icf-gimple.h b/gcc/ipa-icf-gimple.h
> index f251d08100f..dd4ef13c716 100644
> --- a/gcc/ipa-icf-gimple.h
> +++ b/gcc/ipa-icf-gimple.h
> @@ -118,14 +118,14 @@ public:
>
> /* A class aggregating all connections and semantic equivalents
> for a given pair of semantic function candidates. */
> -class func_checker : operand_compare
> +class func_checker : ao_compare
> {
> public:
> /* Default constructor. */
> func_checker ():
> m_source_func_decl (NULL_TREE), m_target_func_decl (NULL_TREE),
> m_ignored_source_nodes (NULL), m_ignored_target_nodes (NULL),
> - m_ignore_labels (false)
> + m_ignore_labels (false), m_tbaa (true)
> {
> m_source_ssa_names.create (0);
> m_target_ssa_names.create (0);
> @@ -139,6 +139,7 @@ public:
> of declarations that can be skipped. */
> func_checker (tree source_func_decl, tree target_func_decl,
> bool ignore_labels = false,
> + bool tbaa = true,
> hash_set<symtab_node *> *ignored_source_nodes = NULL,
> hash_set<symtab_node *> *ignored_target_nodes = NULL);
>
> @@ -275,6 +276,9 @@ private:
> /* Flag if ignore labels in comparison. */
> bool m_ignore_labels;
>
> + /* Flag if we should compare type based alias analysis info. */
> + bool m_tbaa;
> +
> public:
> /* Return true if two operands are equal. The flags fields can be used
> to specify OEP flags described above. */
> diff --git a/gcc/ipa-icf.c b/gcc/ipa-icf.c
> index 83f9786b4b2..a283195a487 100644
> --- a/gcc/ipa-icf.c
> +++ b/gcc/ipa-icf.c
> @@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see
> #include "attribs.h"
> #include "print-tree.h"
> #include "ipa-utils.h"
> +#include "tree-ssa-alias-compare.h"
> #include "ipa-icf-gimple.h"
> #include "fibonacci_heap.h"
> #include "ipa-icf.h"
> @@ -843,6 +844,8 @@ sem_function::equals_private (sem_item *item)
>
> m_checker = new func_checker (decl, m_compared_func->decl,
> false,
> + opt_for_fn (m_compared_func->decl,
> + flag_strict_aliasing),
> &refs_set,
> &m_compared_func->refs_set);
> arg1 = DECL_ARGUMENTS (decl);
> diff --git a/gcc/ipa-utils.h b/gcc/ipa-utils.h
> index 178c2cbe446..880e527c590 100644
> --- a/gcc/ipa-utils.h
> +++ b/gcc/ipa-utils.h
> @@ -265,4 +265,16 @@ get_odr_name_for_type (tree type)
> return IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (type_name));
> }
>
> +/* Return true if we are going to do LTO streaming. */
> +
> +inline bool
> +lto_streaming_expected_p ()
> +{
> + /* Compilation before LTO stremaing. */
> + if (flag_lto && !in_lto_p && symtab->state < IPA_SSA_AFTER_INLINING)
> + return true;
> + /* WPA or incremental link. */
> + return (flag_wpa || flag_incremental_link == INCREMENTAL_LINK_LTO);
> +}
> +
> #endif /* GCC_IPA_UTILS_H */
> diff --git a/gcc/testsuite/c-c++-common/Wstringop-overflow-2.c b/gcc/testsuite/c-c++-common/Wstringop-overflow-2.c
> index 7c7932e3cf0..63b1a309564 100644
> --- a/gcc/testsuite/c-c++-common/Wstringop-overflow-2.c
> +++ b/gcc/testsuite/c-c++-common/Wstringop-overflow-2.c
> @@ -1,7 +1,7 @@
> /* PR middle-end/91458 - inconsistent warning for writing past the end
> of an array member
> { dg-do compile }
> - { dg-options "-O2 -Wall -Wno-array-bounds" } */
> + { dg-options "-O2 -Wall -Wno-array-bounds -fno-ipa-icf" } */
>
> void sink (void*);
>
> diff --git a/gcc/testsuite/g++.dg/warn/Warray-bounds-8.C b/gcc/testsuite/g++.dg/warn/Warray-bounds-8.C
> index 6db20135668..6e0d7f3ccc4 100644
> --- a/gcc/testsuite/g++.dg/warn/Warray-bounds-8.C
> +++ b/gcc/testsuite/g++.dg/warn/Warray-bounds-8.C
> @@ -3,7 +3,7 @@
> See Wstringop-overflow-3.C for the same test that exercises the other
> warning.
> { dg-do compile }
> - { dg-options "-O2 -Wall -Wno-stringop-overflow" }
> + { dg-options "-O2 -Wall -Wno-stringop-overflow -fno-ipa-icf" }
> { dg-skip-if "" { *-*-aix* } } */
>
> void sink (void*);
> diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
> index ae66bd1eafe..3b1c4b2c1f7 100644
> --- a/gcc/tree-ssa-alias.c
> +++ b/gcc/tree-ssa-alias.c
> @@ -45,6 +45,8 @@ along with GCC; see the file COPYING3. If not see
> #include "dbgcnt.h"
> #include "gimple-pretty-print.h"
> #include "print-tree.h"
> +#include "tree-ssa-alias-compare.h"
> +#include "builtins.h"
>
> /* Broad overview of how alias analysis on gimple works:
>
> @@ -1205,7 +1207,7 @@ aliasing_component_refs_p (tree ref1,
> struct a {int array1[0]; int array[];};
> Such struct has size 0 but accesses to a.array may have non-zero size.
> In this case the size of TREE_TYPE (base1) is smaller than
> - size of TREE_TYPE (TREE_OPERNAD (base1, 0)).
> + size of TREE_TYPE (TREE_OPERAND (base1, 0)).
>
> Because we compare sizes of arrays just by sizes of their elements,
> we only need to care about zero sized array fields here. */
> @@ -1980,6 +1982,20 @@ decl_refs_may_alias_p (tree ref1, tree base1,
> return true;
> }
>
> +/* Return true if access with BASE is view converted.
> + Base must not be stripped from inner MEM_REF (&decl)
> + which is done by ao_ref_base and thus one extra walk
> + of handled components is needed. */
> +
> +static bool
> +view_converted_memref_p (tree base)
> +{
> + if (TREE_CODE (base) != MEM_REF && TREE_CODE (base) != TARGET_MEM_REF)
> + return false;
> + return same_type_for_tbaa (TREE_TYPE (base),
> + TREE_TYPE (TREE_OPERAND (base, 1))) != 1;
> +}
> +
> /* Return true if an indirect reference based on *PTR1 constrained
> to [OFFSET1, OFFSET1 + MAX_SIZE1) may alias a variable based on BASE2
> constrained to [OFFSET2, OFFSET2 + MAX_SIZE2). *PTR1 and BASE2 have
> @@ -3870,3 +3886,329 @@ attr_fnspec::verify ()
> internal_error ("invalid fn spec attribute \"%s\" arg %i", str, i);
> }
> }
> +
> +/* Return ture if TYPE1 and TYPE2 will always give the same answer
> + when compared wit hother types using same_type_for_tbaa_p. */
> +
> +static bool
> +types_equal_for_same_type_for_tbaa_p (tree type1, tree type2,
> + bool lto_streaming_safe)
> +{
> + /* We use same_type_for_tbaa_p to match types in the access path.
> + This check is overly conservative. */
> + type1 = TYPE_MAIN_VARIANT (type1);
> + type2 = TYPE_MAIN_VARIANT (type2);
> +
> + if (TYPE_STRUCTURAL_EQUALITY_P (type1)
> + != TYPE_STRUCTURAL_EQUALITY_P (type2))
> + return false;
> + if (TYPE_STRUCTURAL_EQUALITY_P (type1))
> + return true;
> +
> + if (lto_streaming_safe)
> + return type1 == type2;
> + else
> + return TYPE_CANONICAL (type1) == TYPE_CANONICAL (type2);
> +}
> +
> +/* Compare REF1 and REF2 and return flags specifying their differences.
> + If LTO_STREAMING_SAFE is true do not use alias sets and canonical
> + types that are going to be recomputed.
> + If TBAA is true also compare TBAA metadata. */
> +
> +int
> +ao_compare::compare_ao_refs (ao_ref *ref1, ao_ref *ref2,
> + bool lto_streaming_safe,
> + bool tbaa)
> +{
> + if (TREE_THIS_VOLATILE (ref1->ref) != TREE_THIS_VOLATILE (ref2->ref))
> + return SEMANTICS;
> + tree base1 = ao_ref_base (ref1);
> + tree base2 = ao_ref_base (ref2);
> +
> + if (!known_eq (ref1->offset, ref2->offset)
> + || !known_eq (ref1->size, ref2->size)
> + || !known_eq (ref1->max_size, ref2->max_size))
> + return SEMANTICS;
> +
> + /* For variable accesses we need to compare actual paths
> + to check that both refs are accessing same address and the access size. */
> + if (!known_eq (ref1->size, ref1->max_size))
> + {
> + if (!operand_equal_p (TYPE_SIZE (TREE_TYPE (ref1->ref)),
> + TYPE_SIZE (TREE_TYPE (ref2->ref)), 0))
> + return SEMANTICS;
> + tree r1 = ref1->ref;
> + tree r2 = ref2->ref;
> +
> + /* Handle toplevel COMPONENT_REFs of bitfields.
> + Those are special since they are not allowed in
> + ADDR_EXPR. */
> + if (TREE_CODE (r1) == COMPONENT_REF
> + && DECL_BIT_FIELD (TREE_OPERAND (r1, 1)))
> + {
> + if (TREE_CODE (r2) != COMPONENT_REF
> + || !DECL_BIT_FIELD (TREE_OPERAND (r2, 1)))
> + return SEMANTICS;
> + tree field1 = TREE_OPERAND (r1, 1);
> + tree field2 = TREE_OPERAND (r2, 1);
> + if (!operand_equal_p (DECL_FIELD_OFFSET (field1),
> + DECL_FIELD_OFFSET (field2), 0)
> + || !operand_equal_p (DECL_FIELD_BIT_OFFSET (field1),
> + DECL_FIELD_BIT_OFFSET (field2), 0)
> + || !operand_equal_p (DECL_SIZE (field1), DECL_SIZE (field2), 0)
> + || !types_compatible_p (TREE_TYPE (r1),
> + TREE_TYPE (r2)))
> + return SEMANTICS;
> + r1 = TREE_OPERAND (r1, 0);
> + r2 = TREE_OPERAND (r2, 0);
> + }
> + else if (TREE_CODE (r2) == COMPONENT_REF
> + && DECL_BIT_FIELD (TREE_OPERAND (r2, 1)))
> + return SEMANTICS;
> +
> + /* Similarly for bit field refs. */
> + if (TREE_CODE (r1) == BIT_FIELD_REF)
> + {
> + if (TREE_CODE (r2) != BIT_FIELD_REF
> + || !operand_equal_p (TREE_OPERAND (r1, 1),
> + TREE_OPERAND (r2, 1), 0)
> + || !operand_equal_p (TREE_OPERAND (r1, 2),
> + TREE_OPERAND (r2, 2), 0)
> + || !types_compatible_p (TREE_TYPE (r1),
> + TREE_TYPE (r2)))
> + return SEMANTICS;
> + r1 = TREE_OPERAND (r1, 0);
> + r2 = TREE_OPERAND (r2, 0);
> + }
> + else if (TREE_CODE (r2) == BIT_FIELD_REF)
> + return SEMANTICS;
> +
> + /* Now we can compare the address of actual memory access. */
> + if (!operand_equal_p (r1, r2, OEP_ADDRESS_OF))
> + return SEMANTICS;
> + }
> + /* For constant accesses we get more matches by comparing offset only. */
> + else if (!operand_equal_p (base1, base2, OEP_ADDRESS_OF))
> + return SEMANTICS;
> +
> + /* We can't simply use get_object_alignment_1 on the full
> + reference as for accesses with variable indexes this reports
> + too conservative alignment. */
> + unsigned int align1, align2;
> + unsigned HOST_WIDE_INT bitpos1, bitpos2;
> + bool known1 = get_object_alignment_1 (base1, &align1, &bitpos1);
> + bool known2 = get_object_alignment_1 (base2, &align2, &bitpos2);
> + /* ??? For MEMREF get_object_alignment_1 determines aligned from
> + TYPE_ALIGN but still returns false. This seem to contradict
> + its description. So compare even if alignment is unknown. */
> + if (known1 != known2
> + || (bitpos1 != bitpos2 || align1 != align2))
> + return SEMANTICS;
> +
> + /* Now we know that accesses are semantically same. */
> + int flags = 0;
> +
> + /* ao_ref_base strips inner MEM_REF [&decl], recover from that here. */
> + tree rbase1 = ref1->ref;
> + if (rbase1)
> + while (handled_component_p (rbase1))
> + rbase1 = TREE_OPERAND (rbase1, 0);
> + tree rbase2 = ref2->ref;
> + while (handled_component_p (rbase2))
> + rbase2 = TREE_OPERAND (rbase2, 0);
> +
> + /* MEM_REFs and TARGET_MEM_REFs record dependence cliques which are used to
> + implement restrict pointers. MR_DEPENDENCE_CLIQUE 0 means no information.
> + Otherwise we need to match bases and cliques. */
> + if ((((TREE_CODE (rbase1) == MEM_REF || TREE_CODE (rbase1) == TARGET_MEM_REF)
> + && MR_DEPENDENCE_CLIQUE (rbase1))
> + || ((TREE_CODE (rbase2) == MEM_REF || TREE_CODE (rbase2) == TARGET_MEM_REF)
> + && MR_DEPENDENCE_CLIQUE (rbase2)))
> + && (TREE_CODE (rbase1) != TREE_CODE (rbase2)
> + || MR_DEPENDENCE_CLIQUE (rbase1) != MR_DEPENDENCE_CLIQUE (rbase2)
> + || (MR_DEPENDENCE_BASE (rbase1) != MR_DEPENDENCE_BASE (rbase2))))
> + flags |= DEPENDENCE_CLIQUE;
> +
> + if (!tbaa)
> + return flags;
> +
> + /* Alias sets are not stable across LTO sreaming; be conservative here
> + and compare types the alias sets are ultimately based on. */
> + if (lto_streaming_safe)
> + {
> + tree t1 = ao_ref_alias_ptr_type (ref1);
> + tree t2 = ao_ref_alias_ptr_type (ref2);
> + if (!alias_ptr_types_compatible_p (t1, t2))
> + flags |= REF_ALIAS_SET;
> +
> + t1 = ao_ref_base_alias_ptr_type (ref1);
> + t2 = ao_ref_base_alias_ptr_type (ref2);
> + if (!alias_ptr_types_compatible_p (t1, t2))
> + flags |= BASE_ALIAS_SET;
> + }
> + else
> + {
> + if (ao_ref_alias_set (ref1) != ao_ref_alias_set (ref2))
> + flags |= REF_ALIAS_SET;
> + if (ao_ref_base_alias_set (ref1) != ao_ref_base_alias_set (ref2))
> + flags |= BASE_ALIAS_SET;
> + }
> +
> + /* Access path is used only on non-view-converted references. */
> + bool view_converted = view_converted_memref_p (rbase1);
> + if (view_converted_memref_p (rbase2) != view_converted)
> + return flags | ACCESS_PATH;
> + else if (view_converted)
> + return flags;
> +
> +
> + /* Find start of access paths and look for trailing arrays. */
> + tree c1 = ref1->ref, c2 = ref2->ref;
> + tree end_struct_ref1 = NULL, end_struct_ref2 = NULL;
> + int nskipped1 = 0, nskipped2 = 0;
> + int i = 0;
> +
> + for (tree p1 = ref1->ref; handled_component_p (p1); p1 = TREE_OPERAND (p1, 0))
> + {
> + if (component_ref_to_zero_sized_trailing_array_p (p1))
> + end_struct_ref1 = p1;
> + if (ends_tbaa_access_path_p (p1))
> + c1 = p1, nskipped1 = i;
> + i++;
> + }
> + for (tree p2 = ref2->ref; handled_component_p (p2); p2 = TREE_OPERAND (p2, 0))
> + {
> + if (component_ref_to_zero_sized_trailing_array_p (p2))
> + end_struct_ref2 = p2;
> + if (ends_tbaa_access_path_p (p2))
> + c2 = p2, nskipped1 = i;
> + i++;
> + }
> +
> + /* For variable accesses we can not rely on offset match bellow.
> + We know that paths are struturally same, so only check that
> + starts of TBAA paths did not diverge. */
> + if (!known_eq (ref1->size, ref1->max_size)
> + && nskipped1 != nskipped2)
> + return flags | ACCESS_PATH;
> +
> + /* Information about trailing refs is used by
> + aliasing_component_refs_p that is applied only if paths
> + has handled components.. */
> + if (!handled_component_p (c1) && !handled_component_p (c2))
> + ;
> + else if ((end_struct_ref1 != NULL) != (end_struct_ref2 != NULL))
> + return flags | ACCESS_PATH;
> + if (end_struct_ref1
> + && TYPE_MAIN_VARIANT (TREE_TYPE (end_struct_ref1))
> + != TYPE_MAIN_VARIANT (TREE_TYPE (end_struct_ref2)))
> + return flags | ACCESS_PATH;
> +
> + /* Now compare all handled components of the access path.
> + We have three oracles that cares about access paths:
> + - aliasing_component_refs_p
> + - nonoverlapping_refs_since_match_p
> + - nonoverlapping_component_refs_p
> + We need to match things these oracles compare.
> +
> + It is only necessary to check types for compatibility
> + and offsets. Rest of what oracles compares are actual
> + addresses. Those are already known to be same:
> + - for constant accesses we check offsets
> + - for variable accesses we already matched
> + the path lexically with operand_equal_p. */
> + while (true)
> + {
> + bool comp1 = handled_component_p (c1);
> + bool comp2 = handled_component_p (c2);
> +
> + if (comp1 != comp2)
> + return flags | ACCESS_PATH;
> + if (!comp1)
> + break;
> +
> + if (TREE_CODE (c1) != TREE_CODE (c2))
> + return flags | ACCESS_PATH;
> +
> + /* aliasing_component_refs_p attempts to find type match within
> + the paths. For that reason both types needs to be equal
> + with respect to same_type_for_tbaa_p. */
> + if (!types_equal_for_same_type_for_tbaa_p (TREE_TYPE (c1),
> + TREE_TYPE (c2),
> + lto_streaming_safe))
> + return flags | ACCESS_PATH;
> + if (component_ref_to_zero_sized_trailing_array_p (c1)
> + != component_ref_to_zero_sized_trailing_array_p (c2))
> + return flags | ACCESS_PATH;
> +
> + /* aliasing_matching_component_refs_p compares
> + offsets within the path. Other properties are ignored.
> + Do not bother to verify offsets in variable accesses. Here we
> + already compared them by operand_equal_p so they are
> + structurally same. */
> + if (!known_eq (ref1->size, ref1->max_size))
> + {
> + poly_int64 offadj1, sztmc1, msztmc1;
> + bool reverse1;
> + get_ref_base_and_extent (c1, &offadj1, &sztmc1, &msztmc1, &reverse1);
> + poly_int64 offadj2, sztmc2, msztmc2;
> + bool reverse2;
> + get_ref_base_and_extent (c2, &offadj2, &sztmc2, &msztmc2, &reverse2);
> + if (!known_eq (offadj1, offadj2))
> + return flags | ACCESS_PATH;
> + }
> + c1 = TREE_OPERAND (c1, 0);
> + c2 = TREE_OPERAND (c2, 0);
> + }
> + /* Finally test the access type. */
> + if (!types_equal_for_same_type_for_tbaa_p (TREE_TYPE (c1),
> + TREE_TYPE (c2),
> + lto_streaming_safe))
> + return flags | ACCESS_PATH;
> + return flags;
> +}
> +
> +/* Hash REF to HSTATE. If LTO_STREAMING_SAFE do not use alias sets
> + and canonical types. */
> +void
> +ao_compare::hash_ao_ref (ao_ref *ref, bool lto_streaming_safe, bool tbaa,
> + inchash::hash &hstate)
> +{
> + tree base = ao_ref_base (ref);
> + tree tbase = base;
> +
> + if (!known_eq (ref->size, ref->max_size))
> + {
> + tree r = ref->ref;
> + if (TREE_CODE (r) == COMPONENT_REF
> + && DECL_BIT_FIELD (TREE_OPERAND (r, 1)))
> + {
> + tree field = TREE_OPERAND (r, 1);
> + hash_operand (DECL_FIELD_OFFSET (field), hstate, 0);
> + hash_operand (DECL_FIELD_BIT_OFFSET (field), hstate, 0);
> + hash_operand (DECL_SIZE (field), hstate, 0);
> + r = TREE_OPERAND (r, 0);
> + }
> + if (TREE_CODE (r) == BIT_FIELD_REF)
> + {
> + hash_operand (TREE_OPERAND (r, 1), hstate, 0);
> + hash_operand (TREE_OPERAND (r, 2), hstate, 0);
> + r = TREE_OPERAND (r, 0);
> + }
> + hash_operand (TYPE_SIZE (TREE_TYPE (ref->ref)), hstate, 0);
> + hash_operand (r, hstate, OEP_ADDRESS_OF);
> + }
> + else
> + {
> + hash_operand (tbase, hstate, OEP_ADDRESS_OF);
> + hstate.add_poly_int (ref->offset);
> + hstate.add_poly_int (ref->size);
> + hstate.add_poly_int (ref->max_size);
> + }
> + if (!lto_streaming_safe && tbaa)
> + {
> + hstate.add_int (ao_ref_alias_set (ref));
> + hstate.add_int (ao_ref_base_alias_set (ref));
> + }
> +}
>
--
Richard Biener <rguenther@suse.de>
SUSE Software Solutions Germany GmbH, Maxfeldstrasse 5, 90409 Nuernberg,
Germany; GF: Felix Imend
More information about the Gcc-patches
mailing list