[patch] gcc/*: Fix comment typos.
Kazu Hirata
kazu@codesourcery.com
Sun Oct 14 02:16:00 GMT 2007
Hi,
Committed as obvious.
Kazu Hirata
2007-10-14 Kazu Hirata <kazu@codesourcery.com>
* config/fixed-bit.c, config/i386/cpuid.h, config/i386/i386.c,
config/i386/i386.md, config/i386/sse.md, function.c, jump.c,
modulo-sched.c, ra-conflict.c, toplev.c, tree-eh.c, tree-sra.c,
tree-ssa-dse.c, tree-vect-analyze.c, tree-vect-patterns.c,
tree-vect-transform.c: Fix comment typos.
* doc/extend.texi: Fix a typo.
Index: config/fixed-bit.c
===================================================================
--- config/fixed-bit.c (revision 129290)
+++ config/fixed-bit.c (working copy)
@@ -465,7 +465,7 @@ FIXED_DIVHELPER (FIXED_C_TYPE a, FIXED_C
r = pos_a >> (FIXED_WIDTH - FBITS);
#endif
- /* Unsigned divide r by pos_b to quo_r. The remanider is in mod. */
+ /* Unsigned divide r by pos_b to quo_r. The remainder is in mod. */
quo_r = (UINT_C_TYPE)r / (UINT_C_TYPE)pos_b;
mod = (UINT_C_TYPE)r % (UINT_C_TYPE)pos_b;
quo_s = 0;
Index: config/i386/cpuid.h
===================================================================
--- config/i386/cpuid.h (revision 129290)
+++ config/i386/cpuid.h (working copy)
@@ -117,7 +117,7 @@ __get_cpuid_max (unsigned int __ext, uns
/* Return cpuid data for requested cpuid level, as found in returned
eax, ebx, ecx and edx registers. The function checks if cpuid is
supported and returns 1 for valid cpuid information or 0 for
- unsupported cpuid level. All pointers are requred to be non-null. */
+ unsupported cpuid level. All pointers are required to be non-null. */
static __inline int
__get_cpuid (unsigned int __level,
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 129290)
+++ config/i386/i386.c (working copy)
@@ -1429,7 +1429,7 @@ unsigned int ix86_tune_features[X86_TUNE
replacement is long decoded, so this split helps here as well. */
m_K6,
- /* X86_TUNE_USE_VECTOR_CONVERTS: Preffer vector packed SSE conversion
+ /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
from integer to FP. */
m_AMDFAM10,
};
@@ -13442,8 +13442,8 @@ ix86_expand_sse4_unpack (rtx operands[2]
#define PPERM_REV_INV 0x60 /* bit reverse & invert src */
#define PPERM_ZERO 0x80 /* all 0's */
#define PPERM_ONES 0xa0 /* all 1's */
-#define PPERM_SIGN 0xc0 /* propigate sign bit */
-#define PPERM_INV_SIGN 0xe0 /* invert & propigate sign */
+#define PPERM_SIGN 0xc0 /* propagate sign bit */
+#define PPERM_INV_SIGN 0xe0 /* invert & propagate sign */
#define PPERM_SRC1 0x00 /* use first source byte */
#define PPERM_SRC2 0x10 /* use second source byte */
@@ -24879,7 +24879,7 @@ ix86_expand_round (rtx operand0, rtx ope
/* Validate whether a SSE5 instruction is valid or not.
OPERANDS is the array of operands.
NUM is the number of operands.
- USES_OC0 is true if the instruction uses OC0 and provides 4 varients.
+ USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
NUM_MEMORY is the maximum number of memory operands to accept. */
bool ix86_sse5_valid_op_p (rtx operands[], rtx insn, int num, bool uses_oc0, int num_memory)
{
@@ -24960,7 +24960,7 @@ bool ix86_sse5_valid_op_p (rtx operands[
else if (num == 4 && num_memory == 2)
{
/* If there are two memory operations, we can load one of the memory ops
- into the destination register. This is for optimizating the
+ into the destination register. This is for optimizing the
multiply/add ops, which the combiner has optimized both the multiply
and the add insns to have a memory operation. We have to be careful
that the destination doesn't overlap with the inputs. */
Index: config/i386/i386.md
===================================================================
--- config/i386/i386.md (revision 129290)
+++ config/i386/i386.md (working copy)
@@ -207,7 +207,7 @@ (define_constants
(UNSPECV_PROLOGUE_USE 14)
])
-;; Constants to represent pcomtrue/pcomfalse varients
+;; Constants to represent pcomtrue/pcomfalse variants
(define_constants
[(PCOM_FALSE 0)
(PCOM_TRUE 1)
@@ -4840,7 +4840,7 @@ (define_expand "floatsi<mode>2"
}
/* Offload operand of cvtsi2ss and cvtsi2sd into memory for
!TARGET_INTER_UNIT_CONVERSIONS
- It is neccesary for the patterns to not accept nonemmory operands
+ It is necessary for the patterns to not accept nonmemory operands
as we would optimize out later. */
else if (!TARGET_INTER_UNIT_CONVERSIONS
&& TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
Index: config/i386/sse.md
===================================================================
--- config/i386/sse.md (revision 129290)
+++ config/i386/sse.md (working copy)
@@ -7749,7 +7749,7 @@ (define_insn "sse5_pmacsdqh"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-;; SSE5 parallel integer mutliply/add instructions for the intrinisics
+;; SSE5 parallel integer multiply/add instructions for the intrinisics
(define_insn "sse5_pmacsswd"
[(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
(ss_plus:V4SI
Index: doc/extend.texi
===================================================================
--- doc/extend.texi (revision 129290)
+++ doc/extend.texi (working copy)
@@ -8143,7 +8143,7 @@ v2di __builtin_ia32_pshlq (v2di, v2di)
v8hi __builtin_ia32_pshlw (v8hi, v8hi)
@end smallexample
-The following builtin-in functions are avaialble when @option{-msse5}
+The following builtin-in functions are available when @option{-msse5}
is used. The second argument must be an integer constant and generate
the machine instruction that is part of the name with the @samp{_imm}
suffix removed.
Index: function.c
===================================================================
--- function.c (revision 129290)
+++ function.c (working copy)
@@ -5702,7 +5702,7 @@ match_asm_constraints_1 (rtx insn, rtx *
asm ("" : "=r" (output), "=m" (input) : "0" (input))
- Here 'input' is used in two occurences as input (once for the
+ Here 'input' is used in two occurrences as input (once for the
input operand, once for the address in the second output operand).
If we would replace only the occurence of the input operand (to
make the matching) we would be left with this:
@@ -5714,7 +5714,7 @@ match_asm_constraints_1 (rtx insn, rtx *
value, but different pseudos) where we formerly had only one.
With more complicated asms this might lead to reload failures
which wouldn't have happen without this pass. So, iterate over
- all operands and replace all occurences of the register used. */
+ all operands and replace all occurrences of the register used. */
for (j = 0; j < noutputs; j++)
if (!rtx_equal_p (SET_DEST (p_sets[j]), input)
&& reg_overlap_mentioned_p (input, SET_DEST (p_sets[j])))
Index: jump.c
===================================================================
--- jump.c (revision 129290)
+++ jump.c (working copy)
@@ -975,7 +975,7 @@ mark_jump_label (rtx x, rtx insn, int in
(insn != NULL && x == PATTERN (insn) && JUMP_P (insn)));
}
-/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurrs
+/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurs
within a (MEM ...). IS_TARGET is TRUE when X is to be treated as a
jump-target; when the JUMP_LABEL field of INSN should be set or a
REG_LABEL_TARGET note should be added, not a REG_LABEL_OPERAND
Index: modulo-sched.c
===================================================================
--- modulo-sched.c (revision 129290)
+++ modulo-sched.c (working copy)
@@ -1760,7 +1760,7 @@ ps_insert_empty_row (partial_schedule_pt
/* Given U_NODE which is the node that failed to be scheduled; LOW and
UP which are the boundaries of it's scheduling window; compute using
- SCHED_NODES and II a row in the partial schedule that can be splitted
+ SCHED_NODES and II a row in the partial schedule that can be split
which will separate a critical predecessor from a critical successor
thereby expanding the window, and return it. */
static int
Index: ra-conflict.c
===================================================================
--- ra-conflict.c (revision 129290)
+++ ra-conflict.c (working copy)
@@ -1086,7 +1086,7 @@ global_conflicts (void)
}
/* Early clobbers, by definition, need to not only
- clobber the registers that are live accross the insn
+ clobber the registers that are live across the insn
but need to clobber the registers that die within the
insn. The clobbering for registers live across the
insn is handled above. */
Index: toplev.c
===================================================================
--- toplev.c (revision 129290)
+++ toplev.c (working copy)
@@ -2152,7 +2152,7 @@ lang_dependent_init (const char *name)
void
target_reinit (void)
{
- /* Reinitialise RTL backend. */
+ /* Reinitialize RTL backend. */
backend_init_target ();
/* Reinitialize lang-dependent parts. */
Index: tree-eh.c
===================================================================
--- tree-eh.c (revision 129290)
+++ tree-eh.c (working copy)
@@ -2173,7 +2173,7 @@ optimize_double_finally (tree one, tree
}
/* Perform EH refactoring optimizations that are simpler to do when code
- flow has been lowered but EH structurs haven't. */
+ flow has been lowered but EH structures haven't. */
static void
refactor_eh_r (tree t)
Index: tree-sra.c
===================================================================
--- tree-sra.c (revision 129290)
+++ tree-sra.c (working copy)
@@ -2876,7 +2876,7 @@ struct bitfield_overlap_info
};
/* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS>
- expression (refereced as BF below) accesses any of the bits in FLD,
+ expression (referenced as BF below) accesses any of the bits in FLD,
false if it doesn't. If DATA is non-null, its field_len and
field_pos are filled in such that BIT_FIELD_REF<(FLD->parent),
field_len, field_pos> (referenced as BFLD below) represents the
Index: tree-ssa-dse.c
===================================================================
--- tree-ssa-dse.c (revision 129290)
+++ tree-ssa-dse.c (working copy)
@@ -653,7 +653,7 @@ execute_simple_dse (void)
bitmap_ior_into (variables_loaded,
LOADED_SYMS (bsi_stmt (bsi)));
- /* Look for statements writting into the write only variables.
+ /* Look for statements writing into the write only variables.
And try to remove them. */
FOR_EACH_BB (bb)
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c (revision 129290)
+++ tree-vect-analyze.c (working copy)
@@ -2279,7 +2279,7 @@ vect_analyze_group_access (struct data_r
/* Analyze the access pattern of the data-reference DR.
- In case of non-consecutive accesse call vect_analyze_group_access() to
+ In case of non-consecutive accesses call vect_analyze_group_access() to
analyze groups of strided accesses. */
static bool
Index: tree-vect-patterns.c
===================================================================
--- tree-vect-patterns.c (revision 129290)
+++ tree-vect-patterns.c (working copy)
@@ -545,7 +545,7 @@ vect_recog_pow_pattern (tree last_stmt,
stmts that constitute the pattern. In this case it will be:
WIDEN_SUM <x_t, sum_0>
- Note: The widneing-sum idiom is a widening reduction pattern that is
+ Note: The widening-sum idiom is a widening reduction pattern that is
vectorized without preserving all the intermediate results. It
produces only N/2 (widened) results (by summing up pairs of
intermediate results) rather than all N results. Therefore, we
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 129290)
+++ tree-vect-transform.c (working copy)
@@ -1381,7 +1381,7 @@ vect_get_constant_vectors (slp_tree slp_
}
-/* Get vectorized defintions from SLP_NODE that contains corresponding
+/* Get vectorized definitions from SLP_NODE that contains corresponding
vectorized def-stmts. */
static void
More information about the Gcc-patches
mailing list