This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[patch] gcc/*: Fix comment typos.


Hi,

Committed as obvious.

Kazu Hirata

2006-12-02  Kazu Hirata  <kazu@codesourcery.com>

	* builtins.c, cfgloop.h, cgraph.h, config/arm/arm.c,
	config/i386/i386.c, config/i386/i386.h, config/mips/mips.h,
	config/rs6000/cell.md, config/rs6000/rs6000.c, config/sh/sh.c,
	config/sh/sh4-300.md, config/spu/spu-builtins.def,
	config/spu/spu-c.c, config/spu/spu-modes.def,
	config/spu/spu.c, config/spu/spu.md,
	config/spu/spu_internals.h, config/spu/vmx2spu.h,
	fold-const.c, fwprop.c, predict.c, tree-data-ref.h,
	tree-flow.h, tree-ssa-loop-manip.c, tree-ssa-loop-niter.c,
	tree-ssa-pre.c, tree-vect-analyze.c, tree-vect-transform.c,
	tree-vectorizer.c, tree-vrp.c: Fix comment typos.  Follow
	spelling conventions.

Index: builtins.c
===================================================================
--- builtins.c	(revision 119436)
+++ builtins.c	(working copy)
@@ -554,7 +554,7 @@ expand_builtin_return_addr (enum built_i
      override us.  Therefore frame pointer elimination is OK, and using
      the soft frame pointer is OK.
 
-     For a non-zero count, or a zero count with __builtin_frame_address,
+     For a nonzero count, or a zero count with __builtin_frame_address,
      we require a stable offset from the current frame pointer to the
      previous one, so we must use the hard frame pointer, and
      we must disable frame pointer elimination.  */
@@ -11495,7 +11495,7 @@ init_target_chars (void)
 
 /* Helper function for do_mpfr_arg*().  Ensure M is a normal number
    and no overflow/underflow occurred.  INEXACT is true if M was not
-   exacly calculated.  TYPE is the tree type for the result.  This
+   exactly calculated.  TYPE is the tree type for the result.  This
    function assumes that you cleared the MPFR flags and then
    calculated M to see if anything subsequently set a flag prior to
    entering this function.  Return NULL_TREE if any checks fail.  */
Index: cfgloop.h
===================================================================
--- cfgloop.h	(revision 119436)
+++ cfgloop.h	(working copy)
@@ -143,7 +143,7 @@ struct loop
   struct nb_iter_bound *bounds;
 
   /* If not NULL, loop has just single exit edge stored here (edges to the
-     EXIT_BLOCK_PTR do not count.  Do not use direcly, this field should
+     EXIT_BLOCK_PTR do not count.  Do not use directly; this field should
      only be accessed via single_exit/set_single_exit functions.  */
   edge single_exit_;
 
Index: cgraph.h
===================================================================
--- cgraph.h	(revision 119436)
+++ cgraph.h	(working copy)
@@ -51,7 +51,7 @@ enum availability
 
 struct cgraph_local_info GTY(())
 {
-  /* Estiimated stack frame consumption by the function.  */
+  /* Estimated stack frame consumption by the function.  */
   HOST_WIDE_INT estimated_self_stack_size;
 
   /* Size of the function before inlining.  */
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c	(revision 119436)
+++ config/arm/arm.c	(working copy)
@@ -394,7 +394,7 @@ rtx arm_compare_op0, arm_compare_op1;
 /* The processor for which instructions should be scheduled.  */
 enum processor_type arm_tune = arm_none;
 
-/* The default processor used if not overriden by commandline.  */
+/* The default processor used if not overridden by commandline.  */
 static enum processor_type arm_default_cpu = arm_none;
 
 /* Which floating point model to use.  */
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c	(revision 119436)
+++ config/i386/i386.c	(working copy)
@@ -530,7 +530,7 @@ struct processor_costs athlon_cost = {
   COSTS_N_INSNS (2),			/* cost of FCHS instruction.  */
   COSTS_N_INSNS (35),			/* cost of FSQRT instruction.  */
   /* For some reason, Athlon deals better with REP prefix (relative to loops)
-     comopared to K8. Alignment becomes important after 8 bytes for mempcy and
+     compared to K8. Alignment becomes important after 8 bytes for mempcy and
      128 bytes for memset.  */
   {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
    DUMMY_STRINGOP_ALGS},
@@ -13171,7 +13171,7 @@ expand_movmem_epilogue (rtx destmem, rtx
 
   /* When there are stringops, we can cheaply increase dest and src pointers.
      Otherwise we save code size by maintaining offset (zero is readily
-     available from preceeding rep operation) and using x86 addressing modes.
+     available from preceding rep operation) and using x86 addressing modes.
    */
   if (TARGET_SINGLE_STRINGOP)
     {
@@ -13621,7 +13621,7 @@ ix86_expand_movmem (rtx dst, rtx src, rt
 
   if (GET_CODE (align_exp) == CONST_INT)
     align = INTVAL (align_exp);
-  /* i386 can do missaligned access on resonably increased cost.  */
+  /* i386 can do misaligned access on reasonably increased cost.  */
   if (GET_CODE (expected_align_exp) == CONST_INT
       && INTVAL (expected_align_exp) > align)
     align = INTVAL (expected_align_exp);
@@ -13783,7 +13783,7 @@ ix86_expand_movmem (rtx dst, rtx src, rt
       dst = change_address (dst, BLKmode, destreg);
     }
 
-  /* Epologue to copy the remaining bytes.  */
+  /* Epilogue to copy the remaining bytes.  */
   if (label)
     {
       if (size_needed < desired_align - align)
@@ -13909,7 +13909,7 @@ ix86_expand_setmem (rtx dst, rtx count_e
 
   if (GET_CODE (align_exp) == CONST_INT)
     align = INTVAL (align_exp);
-  /* i386 can do missaligned access on resonably increased cost.  */
+  /* i386 can do misaligned access on reasonably increased cost.  */
   if (GET_CODE (expected_align_exp) == CONST_INT
       && INTVAL (expected_align_exp) > align)
     align = INTVAL (expected_align_exp);
Index: config/i386/i386.h
===================================================================
--- config/i386/i386.h	(revision 119436)
+++ config/i386/i386.h	(working copy)
@@ -1494,7 +1494,7 @@ typedef struct ix86_args {
   int warn_mmx;			/* True when we want to warn about MMX ABI.  */
   int maybe_vaarg;		/* true for calls to possibly vardic fncts.  */
   int float_in_x87;		/* 1 if floating point arguments should
-				   be passed in 80387 registere.  */
+				   be passed in 80387 registers.  */
   int float_in_sse;		/* 1 if in 32-bit mode SFmode (2 for DFmode) should
 				   be passed in SSE registers.  Otherwise 0.  */
 } CUMULATIVE_ARGS;
Index: config/mips/mips.h
===================================================================
--- config/mips/mips.h	(revision 119436)
+++ config/mips/mips.h	(working copy)
@@ -576,7 +576,7 @@ extern const struct mips_rtx_cost_data *
    been generated up to this point.  */
 #define ISA_HAS_BRANCHLIKELY	(!ISA_MIPS1)
 
-/* ISA has a three-operand multiplcation instruction (usually spelt "mul").  */
+/* ISA has a three-operand multiplication instruction (usually spelt "mul").  */
 #define ISA_HAS_MUL3		((TARGET_MIPS3900                       \
 				  || TARGET_MIPS5400			\
 				  || TARGET_MIPS5500			\
Index: config/rs6000/cell.md
===================================================================
--- config/rs6000/cell.md	(revision 119436)
+++ config/rs6000/cell.md	(working copy)
@@ -21,10 +21,10 @@
 
 ;; Sources: BE BOOK4 (/sfs/enc/doc/PPU_BookIV_DD3.0_latest.pdf)
 
-;; BE Architechture *DD3.0 and DD3.1*
+;; BE Architecture *DD3.0 and DD3.1*
 ;; This file simulate PPU processor unit backend of pipeline, maualP24. 
 ;; manual P27, stall and flush points
-;; IU, XU, VSU, dipatcher decodes and dispatch 2 insns per cycle in program
+;; IU, XU, VSU, dispatcher decodes and dispatch 2 insns per cycle in program
 ;;  order, the grouped adress are aligned by 8
 ;; This file only simulate one thread situation
 ;; XU executes all fixed point insns(3 units, a simple alu, a complex unit,
@@ -43,7 +43,7 @@
 ;;VMX(perm,vsu_ls, fp_ls)					X
 ;;    X are illegal combination.
 
-;; Dual issue exceptons: 
+;; Dual issue exceptions:
 ;;(1) nop-pipelined FXU instr in slot 0 
 ;;(2) non-pipelined FPU inst in slot 0
 ;; CSI instr(contex-synchronizing insn)
@@ -51,7 +51,7 @@
 
 ;; BRU unit: bru(none register stall), bru_cr(cr register stall)
 ;; VSU unit: vus(vmx simple), vup(vmx permute), vuc(vmx complex),
-;;  vuf(vmx float), fpu(floats). fpu_div is hypthetical, it is for 
+;;  vuf(vmx float), fpu(floats). fpu_div is hypothetical, it is for
 ;;  nonpipelined simulation
 ;; micr insns will stall at least 7 cycles to get the first instr from ROM,
 ;;  micro instructions are not dual issued. 
@@ -378,7 +378,7 @@ (define_bypass 3 "cell-vecfloat" "cell-v
 ; this is not correct, 
 ;;  this is a stall in general and not dependent on result
 (define_bypass 13 "cell-vecstore" "cell-fpstore")
-; this is not correct, this can never be true, not depent on result
+; this is not correct, this can never be true, not dependent on result
 (define_bypass 7 "cell-fp" "cell-fpload")
 ;; vsu1 should avoid writing to the same target register as vsu2 insn
 ;;   within 12 cycles. 
@@ -396,6 +396,6 @@ (define_bypass 10 "cell-mtjmpr" "cell-br
 
 ;;Things are not simulated:
 ;; update instruction, update address gpr are not simulated
-;; vrefp, vrsqrtefp have latency(14), currently simluated as 12 cycle float
+;; vrefp, vrsqrtefp have latency(14), currently simulated as 12 cycle float
 ;;  insns
 
Index: config/rs6000/rs6000.c
===================================================================
--- config/rs6000/rs6000.c	(revision 119436)
+++ config/rs6000/rs6000.c	(working copy)
@@ -17557,7 +17557,7 @@ rs6000_sched_reorder2 (FILE *dump, int s
          cycle and we attempt to locate another load in the ready list to
          issue with it.
 
-       - If the pedulum is -2, then two stores have already been
+       - If the pendulum is -2, then two stores have already been
          issued in this cycle, so we increase the priority of the first load
          in the ready list to increase it's likelihood of being chosen first
          in the next cycle.
Index: config/sh/sh.c
===================================================================
--- config/sh/sh.c	(revision 119436)
+++ config/sh/sh.c	(working copy)
@@ -1416,7 +1416,7 @@ prepare_cbranch_operands (rtx *operands,
      compare r0.  Hence, if operands[1] has to be loaded from somewhere else
      into a register, that register might as well be r0, and we allow the
      constant.  If it is already in a register, this is likely to be
-     allocatated to a different hard register, thus we load the constant into
+     allocated to a different hard register, thus we load the constant into
      a register unless it is zero.  */
   if (!REG_P (operands[2])
       && (GET_CODE (operands[2]) != CONST_INT
@@ -1468,7 +1468,7 @@ expand_cbranchsi4 (rtx *operands, enum r
      operation should be EQ or NE.
    - If items are searched in an ordered tree from the root, we can expect
      the highpart to be unequal about half of the time; operation should be
-     an unequality comparison, operands non-constant, and overall probability
+     an inequality comparison, operands non-constant, and overall probability
      about 50%.  Likewise for quicksort.
    - Range checks will be often made against constants.  Even if we assume for
      simplicity an even distribution of the non-constant operand over a
@@ -2413,7 +2413,7 @@ sh_rtx_costs (rtx x, int code, int outer
 	       && CONST_OK_FOR_K08 (INTVAL (x)))
         *total = 1;
       /* prepare_cmp_insn will force costly constants int registers before
-	 the cbrach[sd]i4 pattterns can see them, so preserve potentially
+	 the cbrach[sd]i4 patterns can see them, so preserve potentially
 	 interesting ones not covered by I08 above.  */
       else if (outer_code == COMPARE
 	       && ((unsigned HOST_WIDE_INT) INTVAL (x)
@@ -2440,7 +2440,7 @@ sh_rtx_costs (rtx x, int code, int outer
       if (TARGET_SHMEDIA)
         *total = COSTS_N_INSNS (4);
       /* prepare_cmp_insn will force costly constants int registers before
-	 the cbrachdi4 patttern can see them, so preserve potentially
+	 the cbrachdi4 pattern can see them, so preserve potentially
 	 interesting ones.  */
       else if (outer_code == COMPARE && GET_MODE (x) == DImode)
         *total = 1;
Index: config/sh/sh4-300.md
===================================================================
--- config/sh/sh4-300.md	(revision 119436)
+++ config/sh/sh4-300.md	(working copy)
@@ -189,7 +189,7 @@ (define_insn_reservation "sh4_300_ocbwb"
 ;; In most cases, the insn that loads the address of the call should have
 ;; a non-zero latency (mov rn,rm doesn't make sense since we could use rn
 ;; for the address then).  Thus, a preceding insn that can be paired with
-;; a call should be elegible for the delay slot.
+;; a call should be eligible for the delay slot.
 ;;
 ;; calls introduce a longisch delay that is likely to flush the pipelines
 ;; of the caller's instructions.  Ordinary functions tend to end with a
Index: config/spu/spu-builtins.def
===================================================================
--- config/spu/spu-builtins.def	(revision 119436)
+++ config/spu/spu-builtins.def	(working copy)
@@ -1,4 +1,4 @@
-/* Definitions of builtin fuctions for the Synergistic Processing Unit (SPU). */
+/* Definitions of builtin functions for the Synergistic Processing Unit (SPU).  */
 /* Copyright (C) 2006 Free Software Foundation, Inc.
 
    This file is free software; you can redistribute it and/or modify it under
@@ -24,8 +24,8 @@
 #define _A3(a,b,c)   {a, b, c, SPU_BTI_END_OF_PARAMS}
 #define _A4(a,b,c,d) {a, b, c, d, SPU_BTI_END_OF_PARAMS}
 
-/* definitions to support si intrinisic functions: (These and other builtin 
- * definitions must preceed definitions of the overloaded generic intrinsics */
+/* definitions to support si intrinsic functions: (These and other builtin
+ * definitions must precede definitions of the overloaded generic intrinsics */
 
 DEF_BUILTIN (SI_LQD,         CODE_FOR_spu_lqd,       "si_lqd",         B_INSN,   _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10_4))
 DEF_BUILTIN (SI_LQX,         CODE_FOR_spu_lqx,       "si_lqx",         B_INSN,   _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
@@ -701,10 +701,10 @@ DEF_BUILTIN (SPU_PROMOTE_7,        CODE_
 DEF_BUILTIN (SPU_PROMOTE_8,        CODE_FOR_spu_promote,   "spu_promote_8",        B_INTERNAL, _A3(SPU_BTI_V4SF,   SPU_BTI_FLOAT,  SPU_BTI_INTSI))
 DEF_BUILTIN (SPU_PROMOTE_9,        CODE_FOR_spu_promote,   "spu_promote_9",        B_INTERNAL, _A3(SPU_BTI_V2DF,   SPU_BTI_DOUBLE, SPU_BTI_INTSI))
 
-/* We need something that is not B_INTERNAL as a sentinal. */
+/* We need something that is not B_INTERNAL as a sentinel.  */
 
-/* These are for the convenience of imlpemnting fma() in the standard
-   libraries. */
+/* These are for the convenience of implementing fma() in the standard
+   libraries.  */
 DEF_BUILTIN (SCALAR_FMA,           CODE_FOR_fma_sf,        "fmas",                 B_INSN,     _A4(SPU_BTI_FLOAT,  SPU_BTI_FLOAT, SPU_BTI_FLOAT, SPU_BTI_FLOAT))
 DEF_BUILTIN (SCALAR_DFMA,          CODE_FOR_fma_df,        "dfmas",                B_INSN,     _A4(SPU_BTI_DOUBLE, SPU_BTI_DOUBLE, SPU_BTI_DOUBLE, SPU_BTI_DOUBLE))
 
Index: config/spu/spu-c.c
===================================================================
--- config/spu/spu-c.c	(revision 119436)
+++ config/spu/spu-c.c	(working copy)
@@ -72,7 +72,7 @@ spu_resolve_overloaded_builtin (tree fnd
   struct spu_builtin_description *desc;
   tree match = NULL_TREE;
 
-  /* The vector types are not available if the backend is not initalized */
+  /* The vector types are not available if the backend is not initialized.  */
   gcc_assert (!flag_preprocess_only);
 
   desc = &spu_builtins[fcode];
Index: config/spu/spu-modes.def
===================================================================
--- config/spu/spu-modes.def	(revision 119436)
+++ config/spu/spu-modes.def	(working copy)
@@ -25,8 +25,8 @@ VECTOR_MODES (INT, 16);       /* V16QI V
 VECTOR_MODES (FLOAT, 8);      /*            V4HF V2SF */ 
 VECTOR_MODES (FLOAT, 16);     /*       V8HF V4SF V2DF */ 
         
-/* A special mode for the intr regsister so we can treat it differently
-   for conditional moves. */
+/* A special mode for the intr register so we can treat it differently
+   for conditional moves.  */
 RANDOM_MODE (INTR);
 
 /* cse_insn needs an INT_MODE larger than WORD_MODE, otherwise some
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c	(revision 119436)
+++ config/spu/spu.c	(working copy)
@@ -322,7 +322,7 @@ valid_subreg (rtx op)
 }
 
 /* When insv and ext[sz]v ar passed a TI SUBREG, we want to strip it off
-   and ajust the start offset. */
+   and adjust the start offset.  */
 static rtx
 adjust_operand (rtx op, HOST_WIDE_INT * start)
 {
@@ -1651,8 +1651,8 @@ int spu_hint_dist = (8 * 4);
 /* An array of these is used to propagate hints to predecessor blocks. */
 struct spu_bb_info
 {
-  rtx prop_jump;		/* propogated from another block */
-  basic_block bb;		/* the orignal block. */
+  rtx prop_jump;		/* propagated from another block */
+  basic_block bb;		/* the original block. */
 };
 
 /* The special $hbr register is used to prevent the insn scheduler from
@@ -2455,7 +2455,7 @@ spu_legitimate_address (enum machine_mod
 }
 
 /* When the address is reg + const_int, force the const_int into a
-   regiser. */
+   register.  */
 rtx
 spu_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
 			enum machine_mode mode)
@@ -2697,7 +2697,7 @@ spu_pass_by_reference (CUMULATIVE_ARGS *
             
         } va_list[1];
 
-   wheare __args points to the arg that will be returned by the next
+   where __args points to the arg that will be returned by the next
    va_arg(), and __skip points to the previous stack frame such that
    when __args == __skip we should advance __args by 32 bytes. */
 static tree
@@ -2913,8 +2913,8 @@ spu_conditional_register_usage (void)
    aligned.  Taking into account that CSE might replace this reg with
    another one that has not been marked aligned.  
    So this is really only true for frame, stack and virtual registers,
-   which we know are always aligned and should not be adversly effected
-   by CSE. */
+   which we know are always aligned and should not be adversely effected
+   by CSE.  */
 static int
 regno_aligned_for_load (int regno)
 {
@@ -2981,7 +2981,7 @@ store_with_one_insn_p (rtx mem)
   if (GET_CODE (addr) == SYMBOL_REF)
     {
       /* We use the associated declaration to make sure the access is
-         refering to the whole object.
+         referring to the whole object.
          We check both MEM_EXPR and and SYMBOL_REF_DECL.  I'm not sure
          if it is necessary.  Will there be cases where one exists, and
          the other does not?  Will there be cases where both exist, but
@@ -3426,8 +3426,8 @@ mem_is_padded_component_ref (rtx x)
   if (GET_MODE (x) != TYPE_MODE (TREE_TYPE (t)))
     return 0;
   /* If there are no following fields then the field alignment assures
-     the structure is padded to the alignement which means this field is
-     padded too. */
+     the structure is padded to the alignment which means this field is
+     padded too.  */
   if (TREE_CHAIN (t) == 0)
     return 1;
   /* If the following field is also aligned then this field will be
Index: config/spu/spu.md
===================================================================
--- config/spu/spu.md	(revision 119436)
+++ config/spu/spu.md	(working copy)
@@ -1178,8 +1178,8 @@ (define_insn "mpyu_si"
   [(set_attr "type" "fp7")])
 
 ;; This isn't always profitable to use.  Consider r = a * b + c * d.
-;; It's faster to do  the multplies in parallel then add them.  If we
-;; merge a multply and add it prevents the multplies from happening in 
+;; It's faster to do the multiplies in parallel then add them.  If we
+;; merge a multiply and add it prevents the multiplies from happening in
 ;; parallel.
 (define_insn "mpya_si"
   [(set (match_operand:SI 0 "spu_reg_operand" "=r")
Index: config/spu/spu_internals.h
===================================================================
--- config/spu/spu_internals.h	(revision 119436)
+++ config/spu/spu_internals.h	(working copy)
@@ -256,7 +256,7 @@
 
 #define __align_hint(ptr,base,offset) __builtin_spu_align_hint(ptr,base,offset)
 
-/* generic spu_* intrinisics */ 
+/* generic spu_* intrinsics */
 
 #define spu_splats(scalar)        __builtin_spu_splats(scalar) 
 #define spu_convtf(ra,imm)        __builtin_spu_convtf(ra,imm)
Index: config/spu/vmx2spu.h
===================================================================
--- config/spu/vmx2spu.h	(revision 119436)
+++ config/spu/vmx2spu.h	(working copy)
@@ -2155,7 +2155,7 @@ static inline vec_int4 vec_subs(vec_int4
 }
 
 
-/* vec_sum4s (vector sum across partial (1/4) staturated)
+/* vec_sum4s (vector sum across partial (1/4) saturated)
  * =========
  */
 static inline vec_uint4 vec_sum4s(vec_uchar16 a, vec_uint4 b)
@@ -2187,7 +2187,7 @@ static inline vec_int4 vec_sum4s(vec_sho
 }
 
 
-/* vec_sum2s (vector sum across partial (1/2) staturated)
+/* vec_sum2s (vector sum across partial (1/2) saturated)
  * =========
  */
 static inline vec_int4 vec_sum2s(vec_int4 a, vec_int4 b)
@@ -2223,7 +2223,7 @@ static inline vec_int4 vec_sum2s(vec_int
 }
 
 
-/* vec_sums (vector sum staturated)
+/* vec_sums (vector sum saturated)
  * ========
  */
 static inline vec_int4 vec_sums(vec_int4 a, vec_int4 b)
@@ -2909,7 +2909,7 @@ static inline int vec_all_ne(vec_float4 
 }
 
 
-/* vec_all_nge (all elements not greater than or eqaul)
+/* vec_all_nge (all elements not greater than or equal)
  * ===========
  */
 static inline int vec_all_nge(vec_float4 a, vec_float4 b)
@@ -3385,7 +3385,7 @@ static inline int vec_any_ne(vec_float4 
 }
 
 
-/* vec_any_nge (any elements not greater than or eqaul)
+/* vec_any_nge (any elements not greater than or equal)
  * ===========
  */
 static inline int vec_any_nge(vec_float4 a, vec_float4 b)
Index: fold-const.c
===================================================================
--- fold-const.c	(revision 119436)
+++ fold-const.c	(working copy)
@@ -7818,7 +7818,7 @@ maybe_canonicalize_comparison_1 (enum tr
       || TREE_OVERFLOW (cst0))
     return NULL_TREE;
 
-  /* See if we can reduce the mangitude of the constant in
+  /* See if we can reduce the magnitude of the constant in
      arg0 by changing the comparison code.  */
   if (code0 == INTEGER_CST)
     {
@@ -7899,7 +7899,7 @@ maybe_canonicalize_comparison (enum tree
     return t;
 
   /* Try canonicalization by simplifying arg1 using the swapped
-     comparsion.  */
+     comparison.  */
   code = swap_tree_comparison (code);
   return maybe_canonicalize_comparison_1 (code, type, arg1, arg0);
 }
Index: fwprop.c
===================================================================
--- fwprop.c	(revision 119436)
+++ fwprop.c	(working copy)
@@ -389,7 +389,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx n
 }
 
 /* Replace all occurrences of OLD in X with NEW and try to simplify the
-   resulting expression (in mode MODE).  Return a new expresion if it is
+   resulting expression (in mode MODE).  Return a new expression if it is
    a constant, otherwise X.
 
    Simplifications where occurrences of NEW collapse to a constant are always
Index: predict.c
===================================================================
--- predict.c	(revision 119436)
+++ predict.c	(working copy)
@@ -1601,7 +1601,7 @@ estimate_loops_at_level (struct loop *fi
     }
 }
 
-/* Propates frequencies through structure of loops.  */
+/* Propagates frequencies through structure of loops.  */
 
 static void
 estimate_loops (void)
Index: tree-data-ref.h
===================================================================
--- tree-data-ref.h	(revision 119436)
+++ tree-data-ref.h	(working copy)
@@ -119,7 +119,7 @@ struct data_reference
           a[j].b[5][j] = 0; 
 
      Here the offset expression (j * C_j + C) will not contain variables after
-     subsitution of j=3 (3*C_j + C).
+     substitution of j=3 (3*C_j + C).
 
      Misalignment can be calculated only if all the variables can be 
      substituted with constants, otherwise, we record maximum possible alignment
Index: tree-flow.h
===================================================================
--- tree-flow.h	(revision 119436)
+++ tree-flow.h	(working copy)
@@ -39,8 +39,8 @@ struct basic_block_def;
 typedef struct basic_block_def *basic_block;
 #endif
 
-/* Gimple dataflow datastructure. All publically available fields shall have
-   gimple_ accessor defined in tree-flow-inline.h, all publically modifiable
+/* Gimple dataflow datastructure. All publicly available fields shall have
+   gimple_ accessor defined in tree-flow-inline.h, all publicly modifiable
    fields should have gimple_set accessor.  */
 struct gimple_df GTY(()) {
   /* Array of all variables referenced in the function.  */
Index: tree-ssa-loop-manip.c
===================================================================
--- tree-ssa-loop-manip.c	(revision 119436)
+++ tree-ssa-loop-manip.c	(working copy)
@@ -627,7 +627,7 @@ can_unroll_loop_p (struct loop *loop, un
       || niter->cmp == ERROR_MARK
       /* Scalar evolutions analysis might have copy propagated
 	 the abnormal ssa names into these expressions, hence
-	 emiting the computations based on them during loop
+	 emitting the computations based on them during loop
 	 unrolling might create overlapping life ranges for
 	 them, and failures in out-of-ssa.  */
       || contains_abnormal_ssa_name_p (niter->may_be_zero)
Index: tree-ssa-loop-niter.c
===================================================================
--- tree-ssa-loop-niter.c	(revision 119436)
+++ tree-ssa-loop-niter.c	(working copy)
@@ -1831,7 +1831,7 @@ idx_infer_loop_bounds (tree base, tree *
      unsigned char).
 
      To make things simpler, we require both bounds to fit into type, although
-     there are cases where this would not be strightly necessary.  */
+     there are cases where this would not be strictly necessary.  */
   if (!int_fits_type_p (high, type)
       || !int_fits_type_p (low, type))
     return true;
@@ -2086,7 +2086,7 @@ n_of_executions_at_most (tree stmt,
      
      -- if NITER_BOUND->is_exit is true, then everything before
         NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
-	times, and everyting after it at most NITER_BOUND->bound times.
+	times, and everything after it at most NITER_BOUND->bound times.
 
      -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
 	is executed, then NITER_BOUND->stmt is executed as well in the same
Index: tree-ssa-pre.c
===================================================================
--- tree-ssa-pre.c	(revision 119436)
+++ tree-ssa-pre.c	(working copy)
@@ -1668,7 +1668,7 @@ compute_antic_aux (basic_block block, bo
 	 (since the maximal set often has 300+ members, even when you
 	 have a small number of blocks).
 	 Basically, we defer the computation of ANTIC for this block
-	 until we have processed it's successor, which will inveitably
+	 until we have processed it's successor, which will inevitably
 	 have a *much* smaller set of values to phi translate once
 	 clean has been run on it.
 	 The cost of doing this is that we technically perform more
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c	(revision 119436)
+++ tree-vect-analyze.c	(working copy)
@@ -1428,7 +1428,7 @@ vect_enhance_data_refs_alignment (loop_v
 	    {
 	      /* For interleaved access we peel only if number of iterations in
 		 the prolog loop ({VF - misalignment}), is a multiple of the
-		 number of the interelaved accesses.  */
+		 number of the interleaved accesses.  */
 	      int elem_size, mis_in_elements;
 	      int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
 
@@ -2228,7 +2228,8 @@ vect_mark_stmts_to_be_vectorized (loop_v
              is not used inside the loop), it will be vectorized, and therefore
              the corresponding DEF_STMTs need to marked as relevant.
 	     We distinguish between two kinds of relevant stmts - those that are
-	     used by a reduction conputation, and those that are (also) used by 	     a regular computation. This allows us later on to identify stmts 
+	     used by a reduction computation, and those that are (also) used by
+ 	     a regular computation. This allows us later on to identify stmts
 	     that are used solely by a reduction, and therefore the order of 
 	     the results that they produce does not have to be kept.
        */
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c	(revision 119436)
+++ tree-vect-transform.c	(working copy)
@@ -368,7 +368,7 @@ vect_create_data_ref_ptr (tree stmt,
 /* Function bump_vector_ptr
 
    Increment a pointer (to a vector type) by vector-size. Connect the new 
-   increment stmt to the exising def-use update-chain of the pointer.
+   increment stmt to the existing def-use update-chain of the pointer.
 
    The pointer def-use update-chain before this function:
                         DATAREF_PTR = phi (p_0, p_2)
@@ -658,7 +658,7 @@ vect_get_vec_def_for_operand (tree op, t
    stmts operating on wider types we need to create 'VF/nunits' "copies" of the
    vector stmt (each computing a vector of 'nunits' results, and together
    computing 'VF' results in each iteration).  This function is called when 
-   vectorizing such a stmt (e.g. vectorizing S2 in the illusration below, in 
+   vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
    which VF=16 and nuniti=4, so the number of copies required is 4):
 
    scalar stmt:         vectorized into:        STMT_VINFO_RELATED_STMT
@@ -2495,13 +2495,13 @@ vect_strided_store_supported (tree vecty
 
 /* Function vect_permute_store_chain.
 
-   Given a chain of interleaved strores in DR_CHAIN of LENGTH that must be
+   Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
    a power of 2, generate interleave_high/low stmts to reorder the data 
    correctly for the stores. Return the final references for stores in
    RESULT_CHAIN.
 
    E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
-   The input is 4 vectors each containg 8 elements. We assign a number to each 
+   The input is 4 vectors each containing 8 elements. We assign a number to each
    element, the input sequence is:
 
    1st vec:   0  1  2  3  4  5  6  7
@@ -2529,7 +2529,7 @@ vect_strided_store_supported (tree vecty
    and of interleave_low:                   2 6 3 7
 
    
-   The permutaion is done in log LENGTH stages. In each stage interleave_high 
+   The permutation is done in log LENGTH stages. In each stage interleave_high
    and interleave_low stmts are created for each pair of vectors in DR_CHAIN, 
    where the first argument is taken from the first half of DR_CHAIN and the 
    second argument from it's second half. 
@@ -2758,7 +2758,7 @@ vectorizable_store (tree stmt, block_stm
      And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
      (the order of the data-refs in the output of vect_permute_store_chain
      corresponds to the order of scalar stmts in the interleaving chain - see
-     the documentaion of vect_permute_store_chain()).
+     the documentation of vect_permute_store_chain()).
 
      In case of both multiple types and interleaving, above vector stores and
      permutation stmts are created for every copy. The result vector stmts are
@@ -3050,7 +3050,7 @@ vect_strided_load_supported (tree vectyp
    correctly. Return the final references for loads in RESULT_CHAIN.
 
    E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
-   The input is 4 vectors each containg 8 elements. We assign a number to each 
+   The input is 4 vectors each containing 8 elements. We assign a number to each
    element, the input sequence is:
 
    1st vec:   0  1  2  3  4  5  6  7
@@ -3078,7 +3078,7 @@ vect_strided_load_supported (tree vectyp
    and of extract_odd:     1 3 5 7
 
    
-   The permutaion is done in log LENGTH stages. In each stage extract_even and 
+   The permutation is done in log LENGTH stages. In each stage extract_even and
    extract_odd stmts are created for each pair of vectors in DR_CHAIN in their 
    order. In our example, 
 
@@ -3443,7 +3443,7 @@ vectorizable_load (tree stmt, block_stmt
      And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
      (the order of the data-refs in the output of vect_permute_load_chain
      corresponds to the order of scalar stmts in the interleaving chain - see
-     the documentaion of vect_permute_load_chain()).
+     the documentation of vect_permute_load_chain()).
      The generation of permutation stmts and recording them in
      STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
 
@@ -4332,7 +4332,7 @@ vect_gen_niters_for_prolog_loop (loop_ve
 
   if (DR_GROUP_FIRST_DR (stmt_info))
     {
-      /* For interleaved access element size must be multipled by the size of
+      /* For interleaved access element size must be multiplied by the size of
 	 the interleaved group.  */
       group_size = DR_GROUP_SIZE (vinfo_for_stmt (
 					       DR_GROUP_FIRST_DR (stmt_info)));
Index: tree-vectorizer.c
===================================================================
--- tree-vectorizer.c	(revision 119436)
+++ tree-vectorizer.c	(working copy)
@@ -1762,7 +1762,7 @@ vect_is_simple_use (tree operand, loop_v
    vector form (i.e., when operating on arguments of type VECTYPE).
     
    The two kinds of widening operations we currently support are
-   NOP and WIDEN_MULT. This function checks if these oprations
+   NOP and WIDEN_MULT. This function checks if these operations
    are supported by the target platform either directly (via vector 
    tree-codes), or via target builtins.
 
@@ -1796,9 +1796,9 @@ supportable_widening_operation (enum tre
         vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8]. 
 
      However, in the special case that the result of the widening operation is 
-     used in a reduction copmutation only, the order doesn't matter (because 
+     used in a reduction computation only, the order doesn't matter (because
      when vectorizing a reduction we change the order of the computation). 
-     Some targets can take advatage of this and generate more efficient code. 
+     Some targets can take advantage of this and generate more efficient code.
      For example, targets like Altivec, that support widen_mult using a sequence
      of {mult_even,mult_odd} generate the following vectors:
         vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].  */
Index: tree-vrp.c
===================================================================
--- tree-vrp.c	(revision 119436)
+++ tree-vrp.c	(working copy)
@@ -2902,7 +2902,7 @@ register_edge_assert_for (tree name, edg
 
   /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
      statement of NAME we can assert both operands of the TRUTH_AND_EXPR
-     have non-zero value.  */
+     have nonzero value.  */
   if (((comp_code == EQ_EXPR && integer_onep (val))
        || (comp_code == NE_EXPR && integer_zerop (val))))
     {


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]