This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Refactor a bit of the backwards jump threading code



fsm_find_control_statement_thread_paths is getting unwieldy and I wanted to extend it further. So a bit of refactoring is warranted.

This pulls out the code to examine a thread path and determine if it's a profitable path to thread. It shouldn't affect the generated code in any way.

The primary goal here was to make the management of the last entry on the jump threading path more visible (ie, is BBI in PATH or not). I'm not at all happy with how that detail is managed, and it may well change in the future.

The diffs look far more extensive than they really are.

Bootstrapped and regression tested on x86_64.  Installed on the trunk.

Jeff
commit e5c47bcb335caf84b6abdb6c6b075919465acf0b
Author: Jeff Law <law@torsion.usersys.redhat.com>
Date:   Mon May 23 12:46:27 2016 -0400

    	* tree-ssa-threadbackward.c (profitable_jump_thread_path): New function
    	extracted from ...
    	(fsm_find_control_statement_thread_paths): Call it.

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f7a012c..f487139 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,9 @@
+2016-05-23  Jeff Law  <law@redhat.com>
+
+	* tree-ssa-threadbackward.c (profitable_jump_thread_path): New function
+	extracted from ...
+	(fsm_find_control_statement_thread_paths): Call it.
+
 2016-05-23  Martin Jambor  <mjambor@suse.cz>
 
 	PR ipa/71234
diff --git a/gcc/tree-ssa-threadbackward.c b/gcc/tree-ssa-threadbackward.c
index 44b1b47..73ab4ea 100644
--- a/gcc/tree-ssa-threadbackward.c
+++ b/gcc/tree-ssa-threadbackward.c
@@ -89,6 +89,273 @@ fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
   return false;
 }
 
+/* Examine jump threading path PATH to which we want to add BBI.
+
+   If the resulting path is profitable to thread, then return the
+   final taken edge from the path, NULL otherwise.
+
+   NAME is the SSA_NAME of the variable we found to have a constant
+   value on PATH.  ARG is the value of that SSA_NAME.
+
+   BBI will be appended to PATH when we have a profitable jump threading
+   path.  Callers are responsible for removing BBI from PATH in that case. */
+
+static edge
+profitable_jump_thread_path (vec<basic_block, va_gc> *&path,
+			     basic_block bbi, tree name, tree arg)
+{
+  /* Note BBI is not in the path yet, hence the +1 in the test below
+     to make sure BBI is accounted for in the path length test.  */
+  int path_length = path->length ();
+  if (path_length + 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file, "FSM jump-thread path not considered: "
+		 "the number of basic blocks on the path "
+		 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
+      return NULL;
+    }
+
+  if (max_threaded_paths <= 0)
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file, "FSM jump-thread path not considered: "
+		 "the number of previously recorded FSM paths to "
+		 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
+      return NULL;
+    }
+
+  /* Add BBI to the path.
+     From this point onward, if we decide we the path is not profitable
+     to thread, we must remove BBI from the path.  */
+  vec_safe_push (path, bbi);
+  ++path_length;
+
+  int n_insns = 0;
+  gimple_stmt_iterator gsi;
+  int j;
+  loop_p loop = (*path)[0]->loop_father;
+  bool path_crosses_loops = false;
+  bool threaded_through_latch = false;
+  bool multiway_branch_in_path = false;
+  bool threaded_multiway_branch = false;
+
+  /* Count the number of instructions on the path: as these instructions
+     will have to be duplicated, we will not record the path if there
+     are too many instructions on the path.  Also check that all the
+     blocks in the path belong to a single loop.  */
+  for (j = 0; j < path_length; j++)
+    {
+      basic_block bb = (*path)[j];
+
+      /* Remember, blocks in the path are stored in opposite order
+	 in the PATH array.  The last entry in the array represents
+	 the block with an outgoing edge that we will redirect to the
+	 jump threading path.  Thus we don't care about that block's
+	 loop father, nor how many statements are in that block because
+	 it will not be copied or whether or not it ends in a multiway
+	 branch.  */
+      if (j < path_length - 1)
+	{
+	  if (bb->loop_father != loop)
+	    {
+	      path_crosses_loops = true;
+	      break;
+	    }
+
+	  /* PHIs in the path will create degenerate PHIS in the
+	     copied path which will then get propagated away, so
+	     looking at just the duplicate path the PHIs would
+	     seem unimportant.
+
+	     But those PHIs, because they're assignments to objects
+	     typically with lives that exist outside the thread path,
+	     will tend to generate PHIs (or at least new PHI arguments)
+	     at points where we leave the thread path and rejoin
+	     the original blocks.  So we do want to account for them.
+
+	     We ignore virtual PHIs.  We also ignore cases where BB
+	     has a single incoming edge.  That's the most common
+	     degenerate PHI we'll see here.  Finally we ignore PHIs
+	     that are associated with the value we're tracking as
+	     that object likely dies.  */
+	  if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
+	    {
+	      for (gphi_iterator gsip = gsi_start_phis (bb);
+		   !gsi_end_p (gsip);
+		   gsi_next (&gsip))
+		{
+		  gphi *phi = gsip.phi ();
+		  tree dst = gimple_phi_result (phi);
+
+		  /* Note that if both NAME and DST are anonymous
+		     SSA_NAMEs, then we do not have enough information
+		     to consider them associated.  */
+		  if ((SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
+		       || !SSA_NAME_VAR (dst))
+		      && !virtual_operand_p (dst))
+		    ++n_insns;
+		}
+	    }
+
+	  for (gsi = gsi_after_labels (bb);
+	       !gsi_end_p (gsi);
+	       gsi_next_nondebug (&gsi))
+	    {
+	      gimple *stmt = gsi_stmt (gsi);
+	      /* Do not count empty statements and labels.  */
+	      if (gimple_code (stmt) != GIMPLE_NOP
+		  && !(gimple_code (stmt) == GIMPLE_ASSIGN
+		       && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
+		  && !is_gimple_debug (stmt))
+		++n_insns;
+	    }
+
+	  /* We do not look at the block with the threaded branch
+	     in this loop.  So if any block with a last statement that
+	     is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
+	     multiway branch on our path.
+
+	     The block in PATH[0] is special, it's the block were we're
+	     going to be able to eliminate its branch.  */
+	  gimple *last = last_stmt (bb);
+	  if (last && (gimple_code (last) == GIMPLE_SWITCH
+		       || gimple_code (last) == GIMPLE_GOTO))
+	    {
+	      if (j == 0)
+		threaded_multiway_branch = true;
+	      else
+		multiway_branch_in_path = true;
+	    }
+	}
+
+      /* Note if we thread through the latch, we will want to include
+	 the last entry in the array when determining if we thread
+	 through the loop latch.  */
+      if (loop->latch == bb)
+	threaded_through_latch = true;
+    }
+
+  /* We are going to remove the control statement at the end of the
+     last block in the threading path.  So don't count it against our
+     statement count.  */
+  n_insns--;
+
+  gimple *stmt = get_gimple_control_stmt ((*path)[0]);
+  gcc_assert (stmt);
+  /* We have found a constant value for ARG.  For GIMPLE_SWITCH
+     and GIMPLE_GOTO, we use it as-is.  However, for a GIMPLE_COND
+     we need to substitute, fold and simplify so we can determine
+     the edge taken out of the last block.  */
+  if (gimple_code (stmt) == GIMPLE_COND)
+    {
+      enum tree_code cond_code = gimple_cond_code (stmt);
+
+      /* We know the underyling format of the condition.  */
+      arg = fold_binary (cond_code, boolean_type_node,
+			 arg, gimple_cond_rhs (stmt));
+    }
+
+  /* If this path threaded through the loop latch back into the
+     same loop and the destination does not dominate the loop
+     latch, then this thread would create an irreducible loop.
+
+     We have to know the outgoing edge to figure this out.  */
+  edge taken_edge = find_taken_edge ((*path)[0], arg);
+
+  /* There are cases where we may not be able to extract the
+     taken edge.  For example, a computed goto to an absolute
+     address.  Handle those cases gracefully.  */
+  if (taken_edge == NULL)
+    {
+      path->pop ();
+      return NULL;
+    }
+
+  bool creates_irreducible_loop = false;
+  if (threaded_through_latch
+      && loop == taken_edge->dest->loop_father
+      && (determine_bb_domination_status (loop, taken_edge->dest)
+	  == DOMST_NONDOMINATING))
+    creates_irreducible_loop = true;
+
+  if (path_crosses_loops)
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file, "FSM jump-thread path not considered: "
+		 "the path crosses loops.\n");
+      path->pop ();
+      return NULL;
+    }
+
+  if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file, "FSM jump-thread path not considered: "
+		 "the number of instructions on the path "
+		 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
+      path->pop ();
+      return NULL;
+    }
+
+  /* We avoid creating irreducible inner loops unless we thread through
+     a multiway branch, in which case we have deemed it worth losing
+     other loop optimizations later.
+
+     We also consider it worth creating an irreducible inner loop if
+     the number of copied statement is low relative to the length of
+     the path -- in that case there's little the traditional loop
+     optimizer would have done anyway, so an irreducible loop is not
+     so bad.  */
+  if (!threaded_multiway_branch && creates_irreducible_loop
+      && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
+	  > path_length * PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS)))
+
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file,
+		 "FSM would create irreducible loop without threading "
+		 "multiway branch.\n");
+      path->pop ();
+      return NULL;
+    }
+
+
+  /* If this path does not thread through the loop latch, then we are
+     using the FSM threader to find old style jump threads.  This
+     is good, except the FSM threader does not re-use an existing
+     threading path to reduce code duplication.
+
+     So for that case, drastically reduce the number of statements
+     we are allowed to copy.  */
+  if (!(threaded_through_latch && threaded_multiway_branch)
+      && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
+	  >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file,
+		 "FSM did not thread around loop and would copy too "
+		 "many statements.\n");
+      path->pop ();
+      return NULL;
+    }
+
+  /* When there is a multi-way branch on the path, then threading can
+     explode the CFG due to duplicating the edges for that multi-way
+     branch.  So like above, only allow a multi-way branch on the path
+     if we actually thread a multi-way branch.  */
+  if (!threaded_multiway_branch && multiway_branch_in_path)
+    {
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	fprintf (dump_file,
+		 "FSM Thread through multiway branch without threading "
+		 "a multiway branch.\n");
+      path->pop ();
+      return NULL;
+    }
+  return taken_edge;
+}
+
 /* We trace the value of the SSA_NAME NAME back through any phi nodes looking
    for places where it gets a constant value and save the path.  Stop after
    having recorded MAX_PATHS jump threading paths.  */
@@ -229,277 +496,36 @@ fsm_find_control_statement_thread_paths (tree name,
 	  if (TREE_CODE (arg) != INTEGER_CST)
 	    continue;
 
-	  /* Note BBI is not in the path yet, hence the +1 in the test below
-	     to make sure BBI is accounted for in the path length test.  */
-	  int path_length = path->length ();
-	  if (path_length + 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
+	  /* If this is a profitable jump thread path, then convert it
+	     into the canonical form and register it.  */
+	  edge taken_edge = profitable_jump_thread_path (path, bbi, name, arg);
+	  if (taken_edge)
 	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file, "FSM jump-thread path not considered: "
-			 "the number of basic blocks on the path "
-			 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
-	      continue;
-	    }
+	      vec<jump_thread_edge *> *jump_thread_path
+		= new vec<jump_thread_edge *> ();
 
-	  if (max_threaded_paths <= 0)
-	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file, "FSM jump-thread path not considered: "
-			 "the number of previously recorded FSM paths to "
-			 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
-	      continue;
-	    }
-
-	  /* Add BBI to the path.  */
-	  vec_safe_push (path, bbi);
-	  ++path_length;
-
-	  int n_insns = 0;
-	  gimple_stmt_iterator gsi;
-	  int j;
-	  loop_p loop = (*path)[0]->loop_father;
-	  bool path_crosses_loops = false;
-	  bool threaded_through_latch = false;
-	  bool multiway_branch_in_path = false;
-	  bool threaded_multiway_branch = false;
-
-	  /* Count the number of instructions on the path: as these instructions
-	     will have to be duplicated, we will not record the path if there
-	     are too many instructions on the path.  Also check that all the
-	     blocks in the path belong to a single loop.  */
-	  for (j = 0; j < path_length; j++)
-	    {
-	      basic_block bb = (*path)[j];
-
-	      /* Remember, blocks in the path are stored in opposite order
-		 in the PATH array.  The last entry in the array represents
-		 the block with an outgoing edge that we will redirect to the
-		 jump threading path.  Thus we don't care about that block's
-		 loop father, nor how many statements are in that block because
-		 it will not be copied or whether or not it ends in a multiway
-		 branch.  */
-	      if (j < path_length - 1)
+	      /* Record the edges between the blocks in PATH.  */
+	      for (unsigned int j = 0; j < path->length () - 1; j++)
 		{
-		  if (bb->loop_father != loop)
-		    {
-		      path_crosses_loops = true;
-		      break;
-		    }
-
-		  /* PHIs in the path will create degenerate PHIS in the
-		     copied path which will then get propagated away, so
-		     looking at just the duplicate path the PHIs would
-		     seem unimportant.
-
-		     But those PHIs, because they're assignments to objects
-		     typically with lives that exist outside the thread path,
-		     will tend to generate PHIs (or at least new PHI arguments)
-		     at points where we leave the thread path and rejoin
-		     the original blocks.  So we do want to account for them.
-
-		     We ignore virtual PHIs.  We also ignore cases where BB
-		     has a single incoming edge.  That's the most common
-		     degenerate PHI we'll see here.  Finally we ignore PHIs
-		     that are associated with the value we're tracking as
-		     that object likely dies.  */
-		  if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
-		    {
-		      for (gphi_iterator gsip = gsi_start_phis (bb);
-			   !gsi_end_p (gsip);
-			   gsi_next (&gsip))
-			{
-			  gphi *phi = gsip.phi ();
-			  tree dst = gimple_phi_result (phi);
-
-			  /* Note that if both NAME and DST are anonymous
-			     SSA_NAMEs, then we do not have enough information
-			     to consider them associated.  */
-			  if ((SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
-			       || !SSA_NAME_VAR (dst))
-			      && !virtual_operand_p (dst))
-			    ++n_insns;
-			}
-		    }
-
-		  for (gsi = gsi_after_labels (bb);
-		       !gsi_end_p (gsi);
-		       gsi_next_nondebug (&gsi))
-		    {
-		      gimple *stmt = gsi_stmt (gsi);
-		      /* Do not count empty statements and labels.  */
-		      if (gimple_code (stmt) != GIMPLE_NOP
-			  && !(gimple_code (stmt) == GIMPLE_ASSIGN
-			       && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
-			  && !is_gimple_debug (stmt))
-			++n_insns;
-		    }
-
-		  /* We do not look at the block with the threaded branch
-		     in this loop.  So if any block with a last statement that
-		     is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
-		     multiway branch on our path.
-
-		     The block in PATH[0] is special, it's the block were we're
-		     going to be able to eliminate its branch.  */
-		  gimple *last = last_stmt (bb);
-		  if (last && (gimple_code (last) == GIMPLE_SWITCH
-			       || gimple_code (last) == GIMPLE_GOTO))
-		    {
-		      if (j == 0)
-			threaded_multiway_branch = true;
-		      else
-			multiway_branch_in_path = true;
-		    }
+		  edge e = find_edge ((*path)[path->length () - j - 1],
+				      (*path)[path->length () - j - 2]);
+		  gcc_assert (e);
+		  jump_thread_edge *x
+		    = new jump_thread_edge (e, EDGE_FSM_THREAD);
+		  jump_thread_path->safe_push (x);
 		}
 
-	      /* Note if we thread through the latch, we will want to include
-		 the last entry in the array when determining if we thread
-		 through the loop latch.  */
-	      if (loop->latch == bb)
-		threaded_through_latch = true;
-	    }
-
-	  /* We are going to remove the control statement at the end of the
-	     last block in the threading path.  So don't count it against our
-	     statement count.  */
-	  n_insns--;
-
-	  gimple *stmt = get_gimple_control_stmt ((*path)[0]);
-	  gcc_assert (stmt);
-	  /* We have found a constant value for ARG.  For GIMPLE_SWITCH
-	     and GIMPLE_GOTO, we use it as-is.  However, for a GIMPLE_COND
-	     we need to substitute, fold and simplify so we can determine
-	     the edge taken out of the last block.  */
-	  if (gimple_code (stmt) == GIMPLE_COND)
-	    {
-	      enum tree_code cond_code = gimple_cond_code (stmt);
-
-	      /* We know the underyling format of the condition.  */
-	      arg = fold_binary (cond_code, boolean_type_node,
-				 arg, gimple_cond_rhs (stmt));
-	    }
-
-	  /* If this path threaded through the loop latch back into the
-	     same loop and the destination does not dominate the loop
-	     latch, then this thread would create an irreducible loop.
-
-	     We have to know the outgoing edge to figure this out.  */
-	  edge taken_edge = find_taken_edge ((*path)[0], arg);
-
-	  /* There are cases where we may not be able to extract the
-	     taken edge.  For example, a computed goto to an absolute
-	     address.  Handle those cases gracefully.  */
-	  if (taken_edge == NULL)
-	    {
-	      path->pop ();
-	      continue;
-	    }
-
-	  bool creates_irreducible_loop = false;
-	  if (threaded_through_latch
-	      && loop == taken_edge->dest->loop_father
-	      && (determine_bb_domination_status (loop, taken_edge->dest)
-		  == DOMST_NONDOMINATING))
-	    creates_irreducible_loop = true;
-
-	  if (path_crosses_loops)
-	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file, "FSM jump-thread path not considered: "
-			 "the path crosses loops.\n");
-	      path->pop ();
-	      continue;
-	    }
-
-	  if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
-	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file, "FSM jump-thread path not considered: "
-			 "the number of instructions on the path "
-			 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
-	      path->pop ();
-	      continue;
-	    }
-
-	  /* We avoid creating irreducible inner loops unless we thread through
-	     a multiway branch, in which case we have deemed it worth losing
-	     other loop optimizations later.
-
-	     We also consider it worth creating an irreducible inner loop if
-	     the number of copied statement is low relative to the length of
-	     the path -- in that case there's little the traditional loop
-	     optimizer would have done anyway, so an irreducible loop is not
-	     so bad.  */
-	  if (!threaded_multiway_branch && creates_irreducible_loop
-	      && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
-		  > path_length * PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS)))
-
-	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file,
-			 "FSM would create irreducible loop without threading "
-			 "multiway branch.\n");
-	      path->pop ();
-	      continue;
-	    }
-
-
-	  /* If this path does not thread through the loop latch, then we are
-	     using the FSM threader to find old style jump threads.  This
-	     is good, except the FSM threader does not re-use an existing
-	     threading path to reduce code duplication.
+	      /* Add the edge taken when the control variable has value ARG.  */
+	      jump_thread_edge *x
+		= new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
+	      jump_thread_path->safe_push (x);
 
-	     So for that case, drastically reduce the number of statements
-	     we are allowed to copy.  */
-	  if (!(threaded_through_latch && threaded_multiway_branch)
-	      && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
-		  >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
-	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file,
-			 "FSM did not thread around loop and would copy too "
-			 "many statements.\n");
-	      path->pop ();
-	      continue;
-	    }
+	      register_jump_thread (jump_thread_path);
+	      --max_threaded_paths;
 
-	  /* When there is a multi-way branch on the path, then threading can
-	     explode the CFG due to duplicating the edges for that multi-way
-	     branch.  So like above, only allow a multi-way branch on the path
-	     if we actually thread a multi-way branch.  */
-	  if (!threaded_multiway_branch && multiway_branch_in_path)
-	    {
-	      if (dump_file && (dump_flags & TDF_DETAILS))
-		fprintf (dump_file,
-			 "FSM Thread through multiway branch without threading "
-			 "a multiway branch.\n");
+	      /* Remove BBI from the path.  */
 	      path->pop ();
-	      continue;
-	    }
-
-	  vec<jump_thread_edge *> *jump_thread_path
-	    = new vec<jump_thread_edge *> ();
-
-	  /* Record the edges between the blocks in PATH.  */
-	  for (j = 0; j < path_length - 1; j++)
-	    {
-	      edge e = find_edge ((*path)[path_length - j - 1],
-				  (*path)[path_length - j - 2]);
-	      gcc_assert (e);
-	      jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
-	      jump_thread_path->safe_push (x);
 	    }
-
-	  /* Add the edge taken when the control variable has value ARG.  */
-	  jump_thread_edge *x
-	    = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
-	  jump_thread_path->safe_push (x);
-
-	  register_jump_thread (jump_thread_path);
-	  --max_threaded_paths;
-
-	  /* Remove BBI from the path.  */
-	  path->pop ();
 	}
     }
 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]