* doc/extend.texi, doc/invoke.texi, doc/md.texi: Fix typos.
+ * builtins.c, cfgloop.h, cgraph.h, config/arm/arm.c,
+ config/i386/i386.c, config/i386/i386.h, config/mips/mips.h,
+ config/rs6000/cell.md, config/rs6000/rs6000.c, config/sh/sh.c,
+ config/sh/sh4-300.md, config/spu/spu-builtins.def,
+ config/spu/spu-c.c, config/spu/spu-modes.def,
+ config/spu/spu.c, config/spu/spu.md,
+ config/spu/spu_internals.h, config/spu/vmx2spu.h,
+ fold-const.c, fwprop.c, predict.c, tree-data-ref.h,
+ tree-flow.h, tree-ssa-loop-manip.c, tree-ssa-loop-niter.c,
+ tree-ssa-pre.c, tree-vect-analyze.c, tree-vect-transform.c,
+ tree-vectorizer.c, tree-vrp.c: Fix comment typos. Follow
+ spelling conventions.
+
2006-12-01 Trevor Smigiel <trevor_smigiel@playstation.sony.com>
* config/spu/spu.c (spu_immediate): Remove trailing comma.
override us. Therefore frame pointer elimination is OK, and using
the soft frame pointer is OK.
- For a non-zero count, or a zero count with __builtin_frame_address,
+ For a nonzero count, or a zero count with __builtin_frame_address,
we require a stable offset from the current frame pointer to the
previous one, so we must use the hard frame pointer, and
we must disable frame pointer elimination. */
/* Helper function for do_mpfr_arg*(). Ensure M is a normal number
and no overflow/underflow occurred. INEXACT is true if M was not
- exacly calculated. TYPE is the tree type for the result. This
+ exactly calculated. TYPE is the tree type for the result. This
function assumes that you cleared the MPFR flags and then
calculated M to see if anything subsequently set a flag prior to
entering this function. Return NULL_TREE if any checks fail. */
struct nb_iter_bound *bounds;
/* If not NULL, loop has just single exit edge stored here (edges to the
- EXIT_BLOCK_PTR do not count. Do not use direcly, this field should
+ EXIT_BLOCK_PTR do not count. Do not use directly; this field should
only be accessed via single_exit/set_single_exit functions. */
edge single_exit_;
struct cgraph_local_info GTY(())
{
- /* Estiimated stack frame consumption by the function. */
+ /* Estimated stack frame consumption by the function. */
HOST_WIDE_INT estimated_self_stack_size;
/* Size of the function before inlining. */
/* The processor for which instructions should be scheduled. */
enum processor_type arm_tune = arm_none;
-/* The default processor used if not overriden by commandline. */
+/* The default processor used if not overridden by commandline. */
static enum processor_type arm_default_cpu = arm_none;
/* Which floating point model to use. */
COSTS_N_INSNS (2), /* cost of FCHS instruction. */
COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
/* For some reason, Athlon deals better with REP prefix (relative to loops)
- comopared to K8. Alignment becomes important after 8 bytes for mempcy and
+ compared to K8. Alignment becomes important after 8 bytes for mempcy and
128 bytes for memset. */
{{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
DUMMY_STRINGOP_ALGS},
/* When there are stringops, we can cheaply increase dest and src pointers.
Otherwise we save code size by maintaining offset (zero is readily
- available from preceeding rep operation) and using x86 addressing modes.
+ available from preceding rep operation) and using x86 addressing modes.
*/
if (TARGET_SINGLE_STRINGOP)
{
if (GET_CODE (align_exp) == CONST_INT)
align = INTVAL (align_exp);
- /* i386 can do missaligned access on resonably increased cost. */
+ /* i386 can do misaligned access on reasonably increased cost. */
if (GET_CODE (expected_align_exp) == CONST_INT
&& INTVAL (expected_align_exp) > align)
align = INTVAL (expected_align_exp);
dst = change_address (dst, BLKmode, destreg);
}
- /* Epologue to copy the remaining bytes. */
+ /* Epilogue to copy the remaining bytes. */
if (label)
{
if (size_needed < desired_align - align)
if (GET_CODE (align_exp) == CONST_INT)
align = INTVAL (align_exp);
- /* i386 can do missaligned access on resonably increased cost. */
+ /* i386 can do misaligned access on reasonably increased cost. */
if (GET_CODE (expected_align_exp) == CONST_INT
&& INTVAL (expected_align_exp) > align)
align = INTVAL (expected_align_exp);
int warn_mmx; /* True when we want to warn about MMX ABI. */
int maybe_vaarg; /* true for calls to possibly vardic fncts. */
int float_in_x87; /* 1 if floating point arguments should
- be passed in 80387 registere. */
+ be passed in 80387 registers. */
int float_in_sse; /* 1 if in 32-bit mode SFmode (2 for DFmode) should
be passed in SSE registers. Otherwise 0. */
} CUMULATIVE_ARGS;
been generated up to this point. */
#define ISA_HAS_BRANCHLIKELY (!ISA_MIPS1)
-/* ISA has a three-operand multiplcation instruction (usually spelt "mul"). */
+/* ISA has a three-operand multiplication instruction (usually spelt "mul"). */
#define ISA_HAS_MUL3 ((TARGET_MIPS3900 \
|| TARGET_MIPS5400 \
|| TARGET_MIPS5500 \
;; Sources: BE BOOK4 (/sfs/enc/doc/PPU_BookIV_DD3.0_latest.pdf)
-;; BE Architechture *DD3.0 and DD3.1*
+;; BE Architecture *DD3.0 and DD3.1*
;; This file simulate PPU processor unit backend of pipeline, maualP24.
;; manual P27, stall and flush points
-;; IU, XU, VSU, dipatcher decodes and dispatch 2 insns per cycle in program
+;; IU, XU, VSU, dispatcher decodes and dispatch 2 insns per cycle in program
;; order, the grouped adress are aligned by 8
;; This file only simulate one thread situation
;; XU executes all fixed point insns(3 units, a simple alu, a complex unit,
;;VMX(perm,vsu_ls, fp_ls) X
;; X are illegal combination.
-;; Dual issue exceptons:
+;; Dual issue exceptions:
;;(1) nop-pipelined FXU instr in slot 0
;;(2) non-pipelined FPU inst in slot 0
;; CSI instr(contex-synchronizing insn)
;; BRU unit: bru(none register stall), bru_cr(cr register stall)
;; VSU unit: vus(vmx simple), vup(vmx permute), vuc(vmx complex),
-;; vuf(vmx float), fpu(floats). fpu_div is hypthetical, it is for
+;; vuf(vmx float), fpu(floats). fpu_div is hypothetical, it is for
;; nonpipelined simulation
;; micr insns will stall at least 7 cycles to get the first instr from ROM,
;; micro instructions are not dual issued.
; this is not correct,
;; this is a stall in general and not dependent on result
(define_bypass 13 "cell-vecstore" "cell-fpstore")
-; this is not correct, this can never be true, not depent on result
+; this is not correct, this can never be true, not dependent on result
(define_bypass 7 "cell-fp" "cell-fpload")
;; vsu1 should avoid writing to the same target register as vsu2 insn
;; within 12 cycles.
;;Things are not simulated:
;; update instruction, update address gpr are not simulated
-;; vrefp, vrsqrtefp have latency(14), currently simluated as 12 cycle float
+;; vrefp, vrsqrtefp have latency(14), currently simulated as 12 cycle float
;; insns
cycle and we attempt to locate another load in the ready list to
issue with it.
- - If the pedulum is -2, then two stores have already been
+ - If the pendulum is -2, then two stores have already been
issued in this cycle, so we increase the priority of the first load
in the ready list to increase it's likelihood of being chosen first
in the next cycle.
compare r0. Hence, if operands[1] has to be loaded from somewhere else
into a register, that register might as well be r0, and we allow the
constant. If it is already in a register, this is likely to be
- allocatated to a different hard register, thus we load the constant into
+ allocated to a different hard register, thus we load the constant into
a register unless it is zero. */
if (!REG_P (operands[2])
&& (GET_CODE (operands[2]) != CONST_INT
operation should be EQ or NE.
- If items are searched in an ordered tree from the root, we can expect
the highpart to be unequal about half of the time; operation should be
- an unequality comparison, operands non-constant, and overall probability
+ an inequality comparison, operands non-constant, and overall probability
about 50%. Likewise for quicksort.
- Range checks will be often made against constants. Even if we assume for
simplicity an even distribution of the non-constant operand over a
&& CONST_OK_FOR_K08 (INTVAL (x)))
*total = 1;
/* prepare_cmp_insn will force costly constants int registers before
- the cbrach[sd]i4 pattterns can see them, so preserve potentially
+ the cbrach[sd]i4 patterns can see them, so preserve potentially
interesting ones not covered by I08 above. */
else if (outer_code == COMPARE
&& ((unsigned HOST_WIDE_INT) INTVAL (x)
if (TARGET_SHMEDIA)
*total = COSTS_N_INSNS (4);
/* prepare_cmp_insn will force costly constants int registers before
- the cbrachdi4 patttern can see them, so preserve potentially
+ the cbrachdi4 pattern can see them, so preserve potentially
interesting ones. */
else if (outer_code == COMPARE && GET_MODE (x) == DImode)
*total = 1;
;; In most cases, the insn that loads the address of the call should have
;; a non-zero latency (mov rn,rm doesn't make sense since we could use rn
;; for the address then). Thus, a preceding insn that can be paired with
-;; a call should be elegible for the delay slot.
+;; a call should be eligible for the delay slot.
;;
;; calls introduce a longisch delay that is likely to flush the pipelines
;; of the caller's instructions. Ordinary functions tend to end with a
-/* Definitions of builtin fuctions for the Synergistic Processing Unit (SPU). */
+/* Definitions of builtin functions for the Synergistic Processing Unit (SPU). */
/* Copyright (C) 2006 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it under
#define _A3(a,b,c) {a, b, c, SPU_BTI_END_OF_PARAMS}
#define _A4(a,b,c,d) {a, b, c, d, SPU_BTI_END_OF_PARAMS}
-/* definitions to support si intrinisic functions: (These and other builtin
- * definitions must preceed definitions of the overloaded generic intrinsics */
+/* definitions to support si intrinsic functions: (These and other builtin
+ * definitions must precede definitions of the overloaded generic intrinsics */
DEF_BUILTIN (SI_LQD, CODE_FOR_spu_lqd, "si_lqd", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10_4))
DEF_BUILTIN (SI_LQX, CODE_FOR_spu_lqx, "si_lqx", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
DEF_BUILTIN (SPU_PROMOTE_8, CODE_FOR_spu_promote, "spu_promote_8", B_INTERNAL, _A3(SPU_BTI_V4SF, SPU_BTI_FLOAT, SPU_BTI_INTSI))
DEF_BUILTIN (SPU_PROMOTE_9, CODE_FOR_spu_promote, "spu_promote_9", B_INTERNAL, _A3(SPU_BTI_V2DF, SPU_BTI_DOUBLE, SPU_BTI_INTSI))
-/* We need something that is not B_INTERNAL as a sentinal. */
+/* We need something that is not B_INTERNAL as a sentinel. */
-/* These are for the convenience of imlpemnting fma() in the standard
- libraries. */
+/* These are for the convenience of implementing fma() in the standard
+ libraries. */
DEF_BUILTIN (SCALAR_FMA, CODE_FOR_fma_sf, "fmas", B_INSN, _A4(SPU_BTI_FLOAT, SPU_BTI_FLOAT, SPU_BTI_FLOAT, SPU_BTI_FLOAT))
DEF_BUILTIN (SCALAR_DFMA, CODE_FOR_fma_df, "dfmas", B_INSN, _A4(SPU_BTI_DOUBLE, SPU_BTI_DOUBLE, SPU_BTI_DOUBLE, SPU_BTI_DOUBLE))
struct spu_builtin_description *desc;
tree match = NULL_TREE;
- /* The vector types are not available if the backend is not initalized */
+ /* The vector types are not available if the backend is not initialized. */
gcc_assert (!flag_preprocess_only);
desc = &spu_builtins[fcode];
VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
-/* A special mode for the intr regsister so we can treat it differently
- for conditional moves. */
+/* A special mode for the intr register so we can treat it differently
+ for conditional moves. */
RANDOM_MODE (INTR);
/* cse_insn needs an INT_MODE larger than WORD_MODE, otherwise some
}
/* When insv and ext[sz]v ar passed a TI SUBREG, we want to strip it off
- and ajust the start offset. */
+ and adjust the start offset. */
static rtx
adjust_operand (rtx op, HOST_WIDE_INT * start)
{
/* An array of these is used to propagate hints to predecessor blocks. */
struct spu_bb_info
{
- rtx prop_jump; /* propogated from another block */
- basic_block bb; /* the orignal block. */
+ rtx prop_jump; /* propagated from another block */
+ basic_block bb; /* the original block. */
};
/* The special $hbr register is used to prevent the insn scheduler from
}
/* When the address is reg + const_int, force the const_int into a
- regiser. */
+ register. */
rtx
spu_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
enum machine_mode mode)
} va_list[1];
- wheare __args points to the arg that will be returned by the next
+ where __args points to the arg that will be returned by the next
va_arg(), and __skip points to the previous stack frame such that
when __args == __skip we should advance __args by 32 bytes. */
static tree
aligned. Taking into account that CSE might replace this reg with
another one that has not been marked aligned.
So this is really only true for frame, stack and virtual registers,
- which we know are always aligned and should not be adversly effected
- by CSE. */
+ which we know are always aligned and should not be adversely effected
+ by CSE. */
static int
regno_aligned_for_load (int regno)
{
if (GET_CODE (addr) == SYMBOL_REF)
{
/* We use the associated declaration to make sure the access is
- refering to the whole object.
+ referring to the whole object.
We check both MEM_EXPR and and SYMBOL_REF_DECL. I'm not sure
if it is necessary. Will there be cases where one exists, and
the other does not? Will there be cases where both exist, but
if (GET_MODE (x) != TYPE_MODE (TREE_TYPE (t)))
return 0;
/* If there are no following fields then the field alignment assures
- the structure is padded to the alignement which means this field is
- padded too. */
+ the structure is padded to the alignment which means this field is
+ padded too. */
if (TREE_CHAIN (t) == 0)
return 1;
/* If the following field is also aligned then this field will be
[(set_attr "type" "fp7")])
;; This isn't always profitable to use. Consider r = a * b + c * d.
-;; It's faster to do the multplies in parallel then add them. If we
-;; merge a multply and add it prevents the multplies from happening in
+;; It's faster to do the multiplies in parallel then add them. If we
+;; merge a multiply and add it prevents the multiplies from happening in
;; parallel.
(define_insn "mpya_si"
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
#define __align_hint(ptr,base,offset) __builtin_spu_align_hint(ptr,base,offset)
-/* generic spu_* intrinisics */
+/* generic spu_* intrinsics */
#define spu_splats(scalar) __builtin_spu_splats(scalar)
#define spu_convtf(ra,imm) __builtin_spu_convtf(ra,imm)
}
-/* vec_sum4s (vector sum across partial (1/4) staturated)
+/* vec_sum4s (vector sum across partial (1/4) saturated)
* =========
*/
static inline vec_uint4 vec_sum4s(vec_uchar16 a, vec_uint4 b)
}
-/* vec_sum2s (vector sum across partial (1/2) staturated)
+/* vec_sum2s (vector sum across partial (1/2) saturated)
* =========
*/
static inline vec_int4 vec_sum2s(vec_int4 a, vec_int4 b)
}
-/* vec_sums (vector sum staturated)
+/* vec_sums (vector sum saturated)
* ========
*/
static inline vec_int4 vec_sums(vec_int4 a, vec_int4 b)
}
-/* vec_all_nge (all elements not greater than or eqaul)
+/* vec_all_nge (all elements not greater than or equal)
* ===========
*/
static inline int vec_all_nge(vec_float4 a, vec_float4 b)
}
-/* vec_any_nge (any elements not greater than or eqaul)
+/* vec_any_nge (any elements not greater than or equal)
* ===========
*/
static inline int vec_any_nge(vec_float4 a, vec_float4 b)
|| TREE_OVERFLOW (cst0))
return NULL_TREE;
- /* See if we can reduce the mangitude of the constant in
+ /* See if we can reduce the magnitude of the constant in
arg0 by changing the comparison code. */
if (code0 == INTEGER_CST)
{
return t;
/* Try canonicalization by simplifying arg1 using the swapped
- comparsion. */
+ comparison. */
code = swap_tree_comparison (code);
return maybe_canonicalize_comparison_1 (code, type, arg1, arg0);
}
}
/* Replace all occurrences of OLD in X with NEW and try to simplify the
- resulting expression (in mode MODE). Return a new expresion if it is
+ resulting expression (in mode MODE). Return a new expression if it is
a constant, otherwise X.
Simplifications where occurrences of NEW collapse to a constant are always
}
}
-/* Propates frequencies through structure of loops. */
+/* Propagates frequencies through structure of loops. */
static void
estimate_loops (void)
a[j].b[5][j] = 0;
Here the offset expression (j * C_j + C) will not contain variables after
- subsitution of j=3 (3*C_j + C).
+ substitution of j=3 (3*C_j + C).
Misalignment can be calculated only if all the variables can be
substituted with constants, otherwise, we record maximum possible alignment
typedef struct basic_block_def *basic_block;
#endif
-/* Gimple dataflow datastructure. All publically available fields shall have
- gimple_ accessor defined in tree-flow-inline.h, all publically modifiable
+/* Gimple dataflow datastructure. All publicly available fields shall have
+ gimple_ accessor defined in tree-flow-inline.h, all publicly modifiable
fields should have gimple_set accessor. */
struct gimple_df GTY(()) {
/* Array of all variables referenced in the function. */
|| niter->cmp == ERROR_MARK
/* Scalar evolutions analysis might have copy propagated
the abnormal ssa names into these expressions, hence
- emiting the computations based on them during loop
+ emitting the computations based on them during loop
unrolling might create overlapping life ranges for
them, and failures in out-of-ssa. */
|| contains_abnormal_ssa_name_p (niter->may_be_zero)
unsigned char).
To make things simpler, we require both bounds to fit into type, although
- there are cases where this would not be strightly necessary. */
+ there are cases where this would not be strictly necessary. */
if (!int_fits_type_p (high, type)
|| !int_fits_type_p (low, type))
return true;
-- if NITER_BOUND->is_exit is true, then everything before
NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
- times, and everyting after it at most NITER_BOUND->bound times.
+ times, and everything after it at most NITER_BOUND->bound times.
-- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
is executed, then NITER_BOUND->stmt is executed as well in the same
(since the maximal set often has 300+ members, even when you
have a small number of blocks).
Basically, we defer the computation of ANTIC for this block
- until we have processed it's successor, which will inveitably
+ until we have processed it's successor, which will inevitably
have a *much* smaller set of values to phi translate once
clean has been run on it.
The cost of doing this is that we technically perform more
{
/* For interleaved access we peel only if number of iterations in
the prolog loop ({VF - misalignment}), is a multiple of the
- number of the interelaved accesses. */
+ number of the interleaved accesses. */
int elem_size, mis_in_elements;
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
is not used inside the loop), it will be vectorized, and therefore
the corresponding DEF_STMTs need to marked as relevant.
We distinguish between two kinds of relevant stmts - those that are
- used by a reduction conputation, and those that are (also) used by a regular computation. This allows us later on to identify stmts
+ used by a reduction computation, and those that are (also) used by
+ a regular computation. This allows us later on to identify stmts
that are used solely by a reduction, and therefore the order of
the results that they produce does not have to be kept.
*/
/* Function bump_vector_ptr
Increment a pointer (to a vector type) by vector-size. Connect the new
- increment stmt to the exising def-use update-chain of the pointer.
+ increment stmt to the existing def-use update-chain of the pointer.
The pointer def-use update-chain before this function:
DATAREF_PTR = phi (p_0, p_2)
stmts operating on wider types we need to create 'VF/nunits' "copies" of the
vector stmt (each computing a vector of 'nunits' results, and together
computing 'VF' results in each iteration). This function is called when
- vectorizing such a stmt (e.g. vectorizing S2 in the illusration below, in
+ vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
which VF=16 and nuniti=4, so the number of copies required is 4):
scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
/* Function vect_permute_store_chain.
- Given a chain of interleaved strores in DR_CHAIN of LENGTH that must be
+ Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
a power of 2, generate interleave_high/low stmts to reorder the data
correctly for the stores. Return the final references for stores in
RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
- The input is 4 vectors each containg 8 elements. We assign a number to each
+ The input is 4 vectors each containing 8 elements. We assign a number to each
element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
and of interleave_low: 2 6 3 7
- The permutaion is done in log LENGTH stages. In each stage interleave_high
+ The permutation is done in log LENGTH stages. In each stage interleave_high
and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
where the first argument is taken from the first half of DR_CHAIN and the
second argument from it's second half.
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
(the order of the data-refs in the output of vect_permute_store_chain
corresponds to the order of scalar stmts in the interleaving chain - see
- the documentaion of vect_permute_store_chain()).
+ the documentation of vect_permute_store_chain()).
In case of both multiple types and interleaving, above vector stores and
permutation stmts are created for every copy. The result vector stmts are
correctly. Return the final references for loads in RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
- The input is 4 vectors each containg 8 elements. We assign a number to each
+ The input is 4 vectors each containing 8 elements. We assign a number to each
element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
and of extract_odd: 1 3 5 7
- The permutaion is done in log LENGTH stages. In each stage extract_even and
+ The permutation is done in log LENGTH stages. In each stage extract_even and
extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
order. In our example,
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
(the order of the data-refs in the output of vect_permute_load_chain
corresponds to the order of scalar stmts in the interleaving chain - see
- the documentaion of vect_permute_load_chain()).
+ the documentation of vect_permute_load_chain()).
The generation of permutation stmts and recording them in
STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
if (DR_GROUP_FIRST_DR (stmt_info))
{
- /* For interleaved access element size must be multipled by the size of
+ /* For interleaved access element size must be multiplied by the size of
the interleaved group. */
group_size = DR_GROUP_SIZE (vinfo_for_stmt (
DR_GROUP_FIRST_DR (stmt_info)));
vector form (i.e., when operating on arguments of type VECTYPE).
The two kinds of widening operations we currently support are
- NOP and WIDEN_MULT. This function checks if these oprations
+ NOP and WIDEN_MULT. This function checks if these operations
are supported by the target platform either directly (via vector
tree-codes), or via target builtins.
vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
However, in the special case that the result of the widening operation is
- used in a reduction copmutation only, the order doesn't matter (because
+ used in a reduction computation only, the order doesn't matter (because
when vectorizing a reduction we change the order of the computation).
- Some targets can take advatage of this and generate more efficient code.
+ Some targets can take advantage of this and generate more efficient code.
For example, targets like Altivec, that support widen_mult using a sequence
of {mult_even,mult_odd} generate the following vectors:
vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8]. */
/* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
statement of NAME we can assert both operands of the TRUTH_AND_EXPR
- have non-zero value. */
+ have nonzero value. */
if (((comp_code == EQ_EXPR && integer_onep (val))
|| (comp_code == NE_EXPR && integer_zerop (val))))
{