This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH, PR 41775] Unsharing within build_ref_for_offset


Hi,

bug 41775 revealed missing unsharing of argument passed to
build_ref_for_offset in ipa-prop.c.  I have found another such case in
ipa-cp.c, all other users were either passing decls or doing their own
unsharing.  So I decided to move unsharing to build_ref_for_offset.

I also thought this was a good opportunity to correct the spelling of
"propagate_subacesses_accross_link."

Bootstrapped and tested on x86_64-linux. OK for trunk?

Thanks,

Martin



2009-10-26  Martin Jambor  <mjambor@suse.cz>

	PR tree-optimization/41775
	* tree-sra.c (build_ref_for_offset): Unshare *expr if not NULL.
	(generate_subtree_copies): Do not unshare agg.
	(load_assign_lhs_subreplacements): Do not unshare rhs.
	(sra_modify_assign): Do not unshare exprs.
	(propagate_subacesses_accross_link): Renamed to
	propagate_subaccesses_across_link.

	* testsuite/g++.dg/torture/pr41775.C: New testcase.

Index: mine/gcc/tree-sra.c
===================================================================
--- mine.orig/gcc/tree-sra.c
+++ mine/gcc/tree-sra.c
@@ -1304,7 +1304,8 @@ build_ref_for_offset_1 (tree *res, tree
 /* Construct an expression that would reference a part of aggregate *EXPR of
    type TYPE at the given OFFSET of the type EXP_TYPE.  If EXPR is NULL, the
    function only determines whether it can build such a reference without
-   actually doing it.
+   actually doing it, otherwise, the tree it points to is unshared first and
+   then used as a base for furhter sub-references.
 
    FIXME: Eventually this should be replaced with
    maybe_fold_offset_to_reference() from tree-ssa-ccp.c but that requires a
@@ -1317,6 +1318,9 @@ build_ref_for_offset (tree *expr, tree t
 {
   location_t loc = expr ? EXPR_LOCATION (*expr) : UNKNOWN_LOCATION;
 
+  if (expr)
+    *expr = unshare_expr (*expr);
+
   if (allow_ptr && POINTER_TYPE_P (type))
     {
       type = TREE_TYPE (type);
@@ -1759,7 +1763,7 @@ create_artificial_child_access (struct a
    access but LACC is not, change the type of the latter, if possible.  */
 
 static bool
-propagate_subacesses_accross_link (struct access *lacc, struct access *racc)
+propagate_subaccesses_across_link (struct access *lacc, struct access *racc)
 {
   struct access *rchild;
   HOST_WIDE_INT norm_delta = lacc->offset - racc->offset;
@@ -1800,7 +1804,7 @@ propagate_subacesses_accross_link (struc
 	      rchild->grp_hint = 1;
 	      new_acc->grp_hint |= new_acc->grp_read;
 	      if (rchild->first_child)
-		ret |= propagate_subacesses_accross_link (new_acc, rchild);
+		ret |= propagate_subaccesses_across_link (new_acc, rchild);
 	    }
 	  continue;
 	}
@@ -1818,7 +1822,7 @@ propagate_subacesses_accross_link (struc
 	{
 	  ret = true;
 	  if (racc->first_child)
-	    propagate_subacesses_accross_link (new_acc, rchild);
+	    propagate_subaccesses_across_link (new_acc, rchild);
 	}
     }
 
@@ -1844,7 +1848,7 @@ propagate_all_subaccesses (void)
 	  if (!bitmap_bit_p (candidate_bitmap, DECL_UID (lacc->base)))
 	    continue;
 	  lacc = lacc->group_representative;
-	  if (propagate_subacesses_accross_link (lacc, racc)
+	  if (propagate_subaccesses_across_link (lacc, racc)
 	      && lacc->first_link)
 	    add_access_to_work_queue (lacc);
 	}
@@ -1960,7 +1964,7 @@ generate_subtree_copies (struct access *
 {
   do
     {
-      tree expr = unshare_expr (agg);
+      tree expr = agg;
 
       if (chunk_size && access->offset >= start_offset + chunk_size)
 	return;
@@ -2235,7 +2239,7 @@ load_assign_lhs_subreplacements (struct
 		rhs = unshare_expr (lacc->expr);
 	      else
 		{
-		  rhs = unshare_expr (top_racc->base);
+		  rhs = top_racc->base;
 		  repl_found = build_ref_for_offset (&rhs,
 						     TREE_TYPE (top_racc->base),
 						     offset, lacc->type, false);
@@ -2372,7 +2376,7 @@ sra_modify_assign (gimple *stmt, gimple_
 	  if (AGGREGATE_TYPE_P (TREE_TYPE (lhs))
 	      && !access_has_children_p (lacc))
 	    {
-	      tree expr = unshare_expr (lhs);
+	      tree expr = lhs;
 	      if (build_ref_for_offset (&expr, TREE_TYPE (lhs), 0,
 					TREE_TYPE (rhs), false))
 		{
@@ -2383,7 +2387,7 @@ sra_modify_assign (gimple *stmt, gimple_
 	  else if (AGGREGATE_TYPE_P (TREE_TYPE (rhs))
 		   && !access_has_children_p (racc))
 	    {
-	      tree expr = unshare_expr (rhs);
+	      tree expr = rhs;
 	      if (build_ref_for_offset (&expr, TREE_TYPE (rhs), 0,
 					TREE_TYPE (lhs), false))
 		rhs = expr;
Index: mine/gcc/testsuite/g++.dg/torture/pr41775.C
===================================================================
--- /dev/null
+++ mine/gcc/testsuite/g++.dg/torture/pr41775.C
@@ -0,0 +1,283 @@
+/* { dg-do compile } */
+
+typedef unsigned int size_t;
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+  template < typename _Iterator > struct iterator_traits
+  {
+  };
+  template < typename _Tp > struct iterator_traits <_Tp * >
+  {
+    typedef _Tp & reference;
+  };
+}
+
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default")))
+{
+  using std::iterator_traits;
+  template < typename _Iterator, typename _Container > class __normal_iterator
+  {
+  public:typedef _Iterator iterator_type;
+    typedef typename iterator_traits < _Iterator >::reference reference;
+    reference operator* () const
+    {
+    }
+    __normal_iterator operator++ (int)
+    {
+    }
+  };
+  template < typename _IteratorL, typename _IteratorR,
+    typename _Container > inline bool operator!= (const __normal_iterator <
+						  _IteratorL,
+						  _Container > &__lhs,
+						  const __normal_iterator <
+						  _IteratorR,
+						  _Container > &__rhs)
+  {
+  }
+}
+
+extern "C"
+{
+  extern "C"
+  {
+    __extension__ typedef __SIZE_TYPE__ __intptr_t;
+  }
+}
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default")))
+{
+  template < typename _Tp > class new_allocator
+  {
+  public:typedef size_t size_type;
+    typedef _Tp *pointer;
+    template < typename _Tp1 > struct rebind
+    {
+      typedef new_allocator < _Tp1 > other;
+    };
+  };
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+template < typename _Tp > class allocator:public __gnu_cxx::new_allocator <
+    _Tp >
+  {
+  };
+}
+
+extern "C"
+{
+  typedef __intptr_t intptr_t;
+}
+namespace llvm
+{
+  template < typename NodeTy > class ilist_half_node
+  {
+  };
+template < typename NodeTy > class ilist_node:private ilist_half_node <
+    NodeTy >
+  {
+  };
+  class MachineBasicBlock;
+  class MachineOperand
+  {
+  public:enum MachineOperandType
+    {
+    }
+    Contents;
+    unsigned getReg () const
+    {
+    }
+  };
+  class TargetRegisterInfo;
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+  template < typename _Tp, typename _Alloc > struct _Vector_base
+  {
+    typedef typename _Alloc::template rebind < _Tp >::other _Tp_alloc_type;
+  };
+template < typename _Tp, typename _Alloc = std::allocator < _Tp > >class vector:protected _Vector_base < _Tp,
+    _Alloc
+    >
+  {
+    typedef _Vector_base < _Tp, _Alloc > _Base;
+    typedef typename _Base::_Tp_alloc_type _Tp_alloc_type;
+  public:typedef _Tp value_type;
+    typedef typename _Tp_alloc_type::pointer pointer;
+    typedef __gnu_cxx::__normal_iterator < pointer, vector > iterator;
+    iterator begin ()
+    {
+    }
+    iterator end ()
+    {
+    }
+  };
+}
+
+namespace llvm
+{
+  class MachineFunction;
+  class MachineInstr:public ilist_node < MachineInstr >
+  {
+  public:const MachineBasicBlock *getParent () const
+    {
+    }
+    const MachineOperand & getOperand (unsigned i) const
+    {
+    }
+    bool registerDefIsDead (unsigned Reg, const TargetRegisterInfo * TRI =
+			    __null) const
+    {
+    }
+  };
+  class AnalysisResolver;
+  class Pass
+  {
+    AnalysisResolver *Resolver;
+    intptr_t PassID;
+  public:  explicit Pass (intptr_t pid):Resolver (0), PassID (pid)
+    {
+    }
+    explicit Pass (const void *pid):Resolver (0), PassID ((intptr_t) pid)
+    {
+    }
+    template < typename AnalysisType > AnalysisType & getAnalysis () const;
+  };
+  class FunctionPass:public Pass
+  {
+  public:explicit FunctionPass (intptr_t pid):Pass (pid)
+    {
+    }
+    explicit FunctionPass (const void *pid):Pass (pid)
+    {
+    }
+  };
+  class PassInfo
+  {
+  public:typedef Pass *(*NormalCtor_t) ();
+  private:const char *const PassName;
+    const char *const PassArgument;
+    const intptr_t PassID;
+    const bool IsCFGOnlyPass;
+    const bool IsAnalysis;
+    const bool IsAnalysisGroup;
+    NormalCtor_t NormalCtor;
+  public:   PassInfo (const char *name, const char *arg, intptr_t pi, NormalCtor_t normal = 0, bool isCFGOnly = false, bool is_analysis = false):PassName (name), PassArgument (arg), PassID (pi),
+      IsCFGOnlyPass (isCFGOnly), IsAnalysis (is_analysis),
+      IsAnalysisGroup (false), NormalCtor (normal)
+    {
+    }
+  };
+  template < typename PassName > Pass * callDefaultCtor ()
+  {
+    return new PassName ();
+  }
+  template < typename passName > struct RegisterPass:public PassInfo
+  {
+  RegisterPass (const char *PassArg, const char *Name, bool CFGOnly = false, bool is_analysis = false):PassInfo (Name, PassArg, intptr_t (&passName::ID),
+	      PassInfo::NormalCtor_t (callDefaultCtor < passName >), CFGOnly,
+	      is_analysis)
+    {
+    }
+  };
+  template < typename T > class SmallVectorImpl
+  {
+  };
+  template < typename T,
+    unsigned N > class SmallVector:public SmallVectorImpl < T >
+  {
+  };
+  class MachineFunctionPass:public FunctionPass
+  {
+  protected:explicit MachineFunctionPass (intptr_t ID):FunctionPass (ID)
+    {
+    }
+    explicit MachineFunctionPass (void *ID):FunctionPass (ID)
+    {
+    }
+    virtual bool runOnMachineFunction (MachineFunction & MF) = 0;
+  };
+  class LiveIndex
+  {
+  private:unsigned index;
+  };
+  class VNInfo
+  {
+  };
+  struct LiveRange
+  {
+    LiveIndex start;
+    LiveIndex end;
+    VNInfo *valno;
+  };
+  class LiveInterval
+  {
+  public:typedef SmallVector < LiveRange, 4 > Ranges;
+    bool containsOneValue () const
+    {
+    }
+    LiveRange *getLiveRangeContaining (LiveIndex Idx)
+    {
+    }
+    void removeRange (LiveIndex Start, LiveIndex End, bool RemoveDeadValNo =
+		      false);
+    void removeRange (LiveRange LR, bool RemoveDeadValNo = false)
+    {
+      removeRange (LR.start, LR.end, RemoveDeadValNo);
+    }
+  };
+  class LiveIntervals:public MachineFunctionPass
+  {
+  public:static char ID;
+    LiveIndex getDefIndex (LiveIndex index)
+    {
+    }
+    LiveInterval & getInterval (unsigned reg)
+    {
+    }
+    LiveIndex getInstructionIndex (const MachineInstr * instr) const
+    {
+    }
+  };
+}
+
+using namespace llvm;
+namespace
+{
+struct __attribute__ ((visibility ("hidden"))) StrongPHIElimination:public
+    MachineFunctionPass
+  {
+    static char ID;
+  StrongPHIElimination ():MachineFunctionPass (&ID)
+    {
+    }
+    bool runOnMachineFunction (MachineFunction & Fn);
+  };
+}
+
+static RegisterPass < StrongPHIElimination > X ("strong-phi-node-elimination",
+						"Eliminate PHI nodes for register allocation, intelligently");
+bool
+StrongPHIElimination::runOnMachineFunction (MachineFunction & Fn)
+{
+  LiveIntervals & LI = getAnalysis < LiveIntervals > ();
+  std::vector < MachineInstr * >phis;
+  for (std::vector < MachineInstr * >::iterator I = phis.begin (), E =
+       phis.end (); I != E;)
+    {
+      MachineInstr *PInstr = *(I++);
+      unsigned DestReg = PInstr->getOperand (0).getReg ();
+      LiveInterval & PI = LI.getInterval (DestReg);
+      if (PInstr->registerDefIsDead (DestReg))
+	{
+	  if (PI.containsOneValue ())
+	    {
+	      LiveIndex idx =
+		LI.getDefIndex (LI.getInstructionIndex (PInstr));
+	      PI.removeRange (*PI.getLiveRangeContaining (idx), true);
+	    }
+	}
+    }
+}


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]