This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH, ivopts] Handle type-converted IVs better


Hello,

> > 1) using the following patch (that enables you to use affine
> >    combinations even with types whose values do not fit in
> >    HOST_WIDE_INT)
> >    http://gcc.gnu.org/ml/gcc-patches/2006-03/msg00345.html,
> >    and
> > 
> > 2) making get_computation_aff work in double_ints instead of
> >    HOST_WIDE_INTs.
> > 
> > With this cleanup (and possibly a minor change similar to your
> > aff_combination_convert), you should be able to get the required folding
> > easily
> 
> Why do you think that moving over to double_ints would help my case?
> The problem is not that we fail to use affine combinations but that we
> fail to fold the result when adding the affine combination for cbase
> (-1 * (unsigned int) data_offset_5) to ubase ((unsigned int)
> (data_offset_5 + 2)) in get_computation_aff.

I overestimated complexity of the problem; OK, you may then ignore part
1), and just care about 2) -- something like the following patch.

Zdenek

Index: tree-ssa-loop-ivopts.c
===================================================================
*** tree-ssa-loop-ivopts.c	(revision 117051)
--- tree-ssa-loop-ivopts.c	(working copy)
*************** aff_combination_add (struct affine_tree_
*** 2762,2767 ****
--- 2762,2798 ----
      aff_combination_add_elt (comb1, comb2->rest, 1);
  }
  
+ /* Convert COMB to TYPE.  */
+ 
+ static void
+ aff_combination_convert (tree type, struct affine_tree_combination *comb)
+ {
+   unsigned prec = TYPE_PRECISION (type);
+   unsigned i;
+ 
+   /* If the precision of both types is the same, it suffices to change the type
+      of the whole combination -- the elements are allowed to have another type
+      equivalent wrto STRIP_NOPS.  */
+   if (prec == TYPE_PRECISION (comb->type))
+     {
+       comb->type = type;
+       return;
+     }
+ 
+   comb->mask = (((unsigned HOST_WIDE_INT) 2 << (prec - 1)) - 1);
+   comb->offset = comb->offset & comb->mask;
+ 
+   /* The type of the elements can be different from comb->type only as
+      much as what STRIP_NOPS would remove.  We can just directly cast
+      to TYPE.  */
+   for (i = 0; i < comb->n; i++)
+     comb->elts[i] = fold_convert (type, comb->elts[i]);
+   if (comb->rest)
+     comb->rest = fold_convert (type, comb->rest);
+ 
+   comb->type = type;
+ }
+ 
  /* Splits EXPR into an affine combination of parts.  */
  
  static void
*************** fold_affine_expr (tree expr)
*** 2951,2956 ****
--- 2982,3025 ----
    return aff_combination_to_tree (&comb);
  }
  
+ /* If A is (TYPE) BA and B is (TYPE) BB, and the types of BA and BB have the
+    same precision that is at least as wide as the precision of TYPE, stores
+    BA to A and BB to B, and returns the type of BA.  Otherwise, returns the
+    type of A and B.  */
+ 
+ static tree
+ determine_common_wider_type (tree *a, tree *b)
+ {
+   tree wider_type = NULL;
+   tree suba, subb;
+   tree atype = TREE_TYPE (*a);
+ 
+   if ((TREE_CODE (*a) == NOP_EXPR
+        || TREE_CODE (*a) == CONVERT_EXPR))
+     {
+       suba = TREE_OPERAND (*a, 0);
+       wider_type = TREE_TYPE (suba);
+       if (TYPE_PRECISION (wider_type) < TYPE_PRECISION (atype))
+ 	return atype;
+     }
+   else
+     return atype;
+ 
+   if ((TREE_CODE (*b) == NOP_EXPR
+        || TREE_CODE (*b) == CONVERT_EXPR))
+     {
+       subb = TREE_OPERAND (*b, 0);
+       if (TYPE_PRECISION (wider_type) != TYPE_PRECISION (TREE_TYPE (subb)))
+ 	return atype;
+     }
+   else
+     return atype;
+ 
+   *a = suba;
+   *b = subb;
+   return wider_type;
+ }
+ 
  /* Determines the expression by that USE is expressed from induction variable
     CAND at statement AT in LOOP.  The expression is stored in a decomposed
     form into AFF.  Returns false if USE cannot be expressed using CAND.  */
*************** get_computation_aff (struct loop *loop,
*** 2965,2970 ****
--- 3034,3040 ----
    tree cbase = cand->iv->base;
    tree cstep = cand->iv->step;
    tree utype = TREE_TYPE (ubase), ctype = TREE_TYPE (cbase);
+   tree common_type;
    tree uutype;
    tree expr, delta;
    tree ratio;
*************** get_computation_aff (struct loop *loop,
*** 3088,3099 ****
       possible to compute ratioi.  */
    gcc_assert (ratioi);
  
!   tree_to_aff_combination (ubase, uutype, aff);
!   tree_to_aff_combination (cbase, uutype, &cbase_aff);
    tree_to_aff_combination (expr, uutype, &expr_aff);
    aff_combination_scale (&cbase_aff, -ratioi);
    aff_combination_scale (&expr_aff, ratioi);
    aff_combination_add (aff, &cbase_aff);
    aff_combination_add (aff, &expr_aff);
  
    return true;
--- 3158,3177 ----
       possible to compute ratioi.  */
    gcc_assert (ratioi);
  
!   /* In case both UBASE and CBASE are shortened to UUTYPE from some common
!      type, we achieve better folding by computing their difference in this
!      wider type, and cast the result to UUTYPE.  We do not need to worry about
!      overflows, as all the arithmetics will in the end be performed in UUTYPE
!      anyway.  */
!   common_type = determine_common_wider_type (&ubase, &cbase);
!   tree_to_aff_combination (ubase, common_type, aff);
!   tree_to_aff_combination (cbase, common_type, &cbase_aff);
    tree_to_aff_combination (expr, uutype, &expr_aff);
    aff_combination_scale (&cbase_aff, -ratioi);
    aff_combination_scale (&expr_aff, ratioi);
    aff_combination_add (aff, &cbase_aff);
+   if (common_type != uutype)
+     aff_combination_convert (uutype, aff);
    aff_combination_add (aff, &expr_aff);
  
    return true;


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]