+2004-02-04 Kazu Hirata <kazu@cs.umass.edu>
+
+ * config/alpha/alpha.c, config/arc/arc.c,
+ config/arm/arm-cores.def, config/arm/arm.c, config/arm/arm.h,
+ config/arm/arm1026ejs.md, config/arm/arm1136jfs.md,
+ config/arm/arm926ejs.md, config/arm/vfp.md, config/avr/avr.c,
+ config/c4x/c4x.c, config/cris/cris.c, config/frv/frv.md,
+ config/i386/i386.c, config/i386/i386.h, config/i386/i386.md,
+ config/ia64/ia64.c, config/ia64/unwind-ia64.c,
+ config/iq2000/iq2000.c, config/m32r/m32r.c,
+ config/mips/mips.c, config/mmix/mmix.c, config/mmix/mmix.h,
+ config/ns32k/ns32k.c, config/pa/pa.c, config/pdp11/pdp11.c,
+ config/rs6000/darwin-ldouble.c, config/rs6000/rs6000.c,
+ config/rs6000/rs6000.h, config/sparc/sparc.c,
+ config/vax/vax.c: Fix comment typos. Follow spelling
+ conventions.
+
2004-02-04 Kazu Hirata <kazu@cs.umass.edu>
* alloc-pool.h, c-convert.c, c-lang.c, c-tree.h,
*total = 0;
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case CONST_DOUBLE:
if (x == CONST0_RTX (mode))
*total = COSTS_N_INSNS (1);
return false;
}
- /* FALLTHRU */
+ /* Fall through. */
case ASHIFTRT:
case LSHIFTRT:
*total = COSTS_N_INSNS (1);
return false;
}
- /* FALLTHRU */
+ /* Fall through. */
case ABS:
if (! float_mode_p)
*total = COSTS_N_INSNS (1) + alpha_rtx_cost_data[alpha_cpu].int_cmov;
return false;
}
- /* FALLTHRU */
+ /* Fall through. */
case FLOAT:
case UNSIGNED_FLOAT:
case NE:
if (!fp_p && op1 == const0_rtx)
break;
- /* FALLTHRU */
+ /* Fall through. */
case ORDERED:
cmp_code = reverse_condition (code);
case VOIDmode:
if (GET_CODE (operands[i]) != CONST_INT)
abort ();
- /* FALLTHRU */
+ /* Fall through. */
case DImode:
reg = gen_rtx_REG (DImode, regno);
regno += 1;
case MODE_INT:
/* Do the same thing as PROMOTE_MODE. */
mode = DImode;
- /* FALLTHRU */
+ /* Fall through. */
case MODE_COMPLEX_INT:
case MODE_VECTOR_INT:
*total = 0;
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case CONST:
case LABEL_REF:
rather than a string constant. The FLAGS are the bitwise-or of the
traits that apply to that core.
- If you update this table, you must update the "tune" attribue in
+ If you update this table, you must update the "tune" attribute in
arm.md. */
ARM_CORE(arm2, FL_CO_PROC | FL_MODE26, slowmul)
}
}
-/* RTX costs for cores with a slow MUL implimentation. */
+/* RTX costs for cores with a slow MUL implementation. */
static bool
arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
|| reg_mentioned_p (virtual_stack_vars_rtx, op)))
return FALSE;
- /* Constants are converted into offets from labels. */
+ /* Constants are converted into offsets from labels. */
if (GET_CODE (op) == MEM)
{
rtx ind;
not have base+offset addressing modes, so we use IP to
hold the address. Each block requires nregs*2+1 words. */
start_reg = FIRST_VFP_REGNUM;
- /* Cound how many blocks of registers need saving. */
+ /* Count how many blocks of registers need saving. */
for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
{
if ((!regs_ever_live[reg] || call_used_regs[reg])
/* Emit code to push or pop registers to or from the stack. F is the
assembly file. MASK is the registers to push or pop. PUSH is
- non-zero if we should push, and zero if we should pop. For debugging
+ nonzero if we should push, and zero if we should pop. For debugging
output, if pushing, adjust CFA_OFFSET by the amount of space added
to the stack. REAL_REGS should have the same number of bits set as
MASK, and will be used instead (in the same order) to describe which
*pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
}
-/* Return non-zero if the CONSUMER instruction (a store) does not need
+/* Return nonzero if the CONSUMER instruction (a store) does not need
PRODUCER's value to calculate the address. */
int
return !reg_overlap_mentioned_p (value, addr);
}
-/* Return non-zero if the CONSUMER instruction (an ALU op) does not
+/* Return nonzero if the CONSUMER instruction (an ALU op) does not
have an early register shift value or amount dependency on the
result of PRODUCER. */
return !reg_overlap_mentioned_p (value, early_op);
}
-/* Return non-zero if the CONSUMER instruction (an ALU op) does not
+/* Return nonzero if the CONSUMER instruction (an ALU op) does not
have an early register shift value dependency on the result of
PRODUCER. */
return !reg_overlap_mentioned_p (value, early_op);
}
-/* Return non-zero if the CONSUMER (a mul or mac op) does not
+/* Return nonzero if the CONSUMER (a mul or mac op) does not
have an early register mult dependency on the result of
PRODUCER. */
extern int target_flags;
/* The floating point mode. */
extern const char *target_fpu_name;
-/* For backwards compatability. */
+/* For backwards compatibility. */
extern const char *target_fpe_name;
/* Whether to use floating point hardware. */
extern const char *target_float_abi_name;
/* Default floating point architecture. Override in sub-target if
necessary.
- FIXME: Is this still neccessary/desirable? Do we want VFP chips to
+ FIXME: Is this still necessary/desirable? Do we want VFP chips to
default to VFP unless overridden by a subtarget? If so it would be best
to remove these definitions. It also assumes there is only one cpu model
with a Maverick fpu. */
#define REGNO_REG_CLASS(REGNO) arm_regno_class (REGNO)
/* FPA registers can't do subreg as all values are reformatted to internal
- precision. VFP registers may only be accesed in the mode they
+ precision. VFP registers may only be accessed in the mode they
were set. */
#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
(GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
;; The "umulls", "umlals", "smulls", and "smlals" instructions loop in
;; the execute stage for five iterations in order to set the flags.
-;; The value result is vailable after four iterations.
+;; The value result is available after four iterations.
(define_insn_reservation "mult6" 4
(and (eq_attr "tune" "arm1026ejs")
(eq_attr "insn" "umulls,umlals,smulls,smlals"))
;; base address is 64-bit aligned; if it is not, an additional cycle
;; is required. This model assumes that the address is always 64-bit
;; aligned. Because the processor can load two registers per cycle,
-;; that assumption means that we use the same instruction rservations
+;; that assumption means that we use the same instruction reservations
;; for loading 2k and 2k - 1 registers.
;;
;; The ALU pipeline is stalled until the completion of the last memory
"nothing")
;; The latency for a call is not predictable. Therefore, we use 32 as
-;; roughly equivalent to postive infinity.
+;; roughly equivalent to positive infinity.
(define_insn_reservation "call_op" 32
(and (eq_attr "tune" "arm1026ejs")
;;
;; - A 4-stage LSU pipeline. It has address generation, data cache (1),
;; data cache (2), and writeback stages. (Note that this pipeline,
-;; including the writeback stage, is independant from the ALU & LSU pipes.)
+;; including the writeback stage, is independent from the ALU & LSU pipes.)
(define_cpu_unit "e_1,e_2,e_3,e_wb" "arm1136jfs") ; ALU and MAC
; e_1 = Sh/Mac1, e_2 = ALU/Mac2, e_3 = SAT/Mac3
"arm_no_early_store_addr_dep")
;; An alu op can start sooner after a load, if that alu op does not
-;; have an early register dependancy on the load
+;; have an early register dependency on the load
(define_bypass 2 "11_load1"
"11_alu_op")
(define_bypass 2 "11_load1"
"nothing")
;; The latency for a call is not predictable. Therefore, we use 32 as
-;; roughly equivalent to postive infinity.
+;; roughly equivalent to positive infinity.
(define_insn_reservation "9_call_op" 32
(and (eq_attr "tune" "arm926ejs")
;; second memory stage for loads.
;; We do not model Write-After-Read hazards.
-;; We do not do write scheduling with the arm core, so it is only neccessary
-;; to model the first stage of each pieline
+;; We do not do write scheduling with the arm core, so it is only necessary
+;; to model the first stage of each pipeline
;; ??? Need to model LS pipeline properly for load/store multiple?
-;; We do not model fmstat properly. This could be done by modeiling pipelines
+;; We do not model fmstat properly. This could be done by modeling pipelines
;; properly and defining an absence set between a dummy fmstat unit and all
;; other vfp units.
;; The VFP "type" attributes differ from those used in the FPA model.
;; ffarith Fast floating point insns, eg. abs, neg, cpy, cmp.
;; farith Most arithmetic insns.
-;; fmul Double preision multiply.
+;; fmul Double precision multiply.
;; fdivs Single precision sqrt or division.
;; fdivd Double precision sqrt or division.
;; f_load Floating point load from memory.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;; Insn pattersn
+;; Insn pattern
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; SImode moves
;; ??? For now do not allow loading constants into vfp regs. This causes
-;; problems because small sonstants get converted into adds.
+;; problems because small constants get converted into adds.
(define_insn "*arm_movsi_vfp"
[(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r ,m,!w,r,!w,!w, U")
(match_operand:SI 1 "general_operand" "rI,K,mi,r,r,!w,!w,Ui,!w"))]
;; fldm*
;; fstm*
;; fmdhr et al (VFPv1)
-;; Support for xD (single precisio only) variants.
+;; Support for xD (single precision only) variants.
;; fmrrs, fmsrr
;; fuito*
;; ftoui*
*total = 2;
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case CONST:
case LABEL_REF:
return;
}
}
- /* Fallthrough. */
+ /* Fall through. */
default:
fatal_insn ("invalid indirect (S) memory address", op);
/* When the shift count is greater than 32 then the result
can be implementation dependent. We truncate the result to
fit in 5 bits so that we do not emit invalid code when
- optimising---such as trying to generate lhu2 with 20021124-1.c. */
+ optimizing---such as trying to generate lhu2 with 20021124-1.c. */
if (((code == ASHIFTRT || code == LSHIFTRT || code == ASHIFT)
&& (GET_CODE (operands[2]) == CONST_INT))
&& INTVAL (operands[2]) > (GET_MODE_BITSIZE (mode) - 1))
switch (code)
{
case 'b':
- /* Print the unsigned supplied integer as if it was signed
+ /* Print the unsigned supplied integer as if it were signed
and < 0, i.e print 255 or 65535 as -1, 254, 65534 as -2, etc. */
if (GET_CODE (x) != CONST_INT
|| ! CONST_OK_FOR_LETTER_P (INTVAL (x), 'O'))
= regs_ever_live[CRIS_SRP_REGNUM]
|| cfun->machine->needs_return_address_on_stack != 0;
- /* Here we act as if the frame-pointer is needed. */
+ /* Here we act as if the frame-pointer were needed. */
int ap_fp_offset = 4 + (return_address_on_stack ? 4 : 0);
if (fromreg == ARG_POINTER_REGNUM
;; Type: the name of the define_attr type
;; Conditions: "yes" if conditional variants are available
-;; FR500: Fujitsu's categorisation for the FR500
-;; FR400: Fujitsu's categorisation for the FR400 (but see below).
+;; FR500: Fujitsu's categorization for the FR500
+;; FR400: Fujitsu's categorization for the FR400 (but see below).
;; On the FR400, media instructions are divided into 2 broad categories.
;; Category 1 instructions can execute in either the M0 or M1 unit and can
(define_cpu_unit "sl2_i1, sl2_fm1, sl2_b0, sl2_b1" "nodiv")
(define_cpu_unit "sl3_fm1, sl3_b0, sl3_b1" "nodiv")
-;; The following describes conlicts by slots
+;; The following describes conflicts by slots
;; slot0
(exclusion_set "sl0_i0" "sl0_fm0,sl0_b0,sl0_c")
(exclusion_set "sl0_fm0" "sl0_b0,sl0_c")
(exclusion_set "sl3_fm1" "sl3_b0,sl3_b1")
(exclusion_set "sl3_b0" "sl3_b1")
-;; The following describes conlicts by units
+;; The following describes conflicts by units
;; fm0
(exclusion_set "sl0_fm0" "sl1_fm0")
;; "iordi3 %0,%1,%2"
;; [(set_attr "length" "4")])
-;; Excludive OR, 64 bit integers
+;; Exclusive OR, 64 bit integers
;; (define_insn "xordi3"
;; [(set (match_operand:DI 0 "register_operand" "=r")
;; (xor:DI (match_operand:DI 1 "register_operand" "%r")
case BLKmode:
if (bytes < 0)
break;
- /* FALLTHRU */
+ /* Fall through. */
case DImode:
case SImode:
case HImode:
default:
return false;
}
- /* FALLTHRU */
+ /* Fall through. */
case SYMBOL_REF:
case LABEL_REF:
case LABEL_REF:
x = XEXP (x, 0);
- /* FALLTHRU */
+ /* Fall through. */
case CODE_LABEL:
ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
assemble_name (asm_out_file, buf);
fputs ("st(0)", file);
break;
}
- /* FALLTHRU */
+ /* Fall through. */
case 8:
case 4:
case 12:
if (! ANY_FP_REG_P (x))
putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
- /* FALLTHRU */
+ /* Fall through. */
case 16:
case 2:
normal:
case CCmode:
if (req_mode == CCGCmode)
return 0;
- /* FALLTHRU */
+ /* Fall through. */
case CCGCmode:
if (req_mode == CCGOCmode || req_mode == CCNOmode)
return 0;
- /* FALLTHRU */
+ /* Fall through. */
case CCGOCmode:
if (req_mode == CCZmode)
return 0;
- /* FALLTHRU */
+ /* Fall through. */
case CCZmode:
break;
case HImode:
case SImode:
operand = gen_lowpart (DImode, operand);
- /* FALLTHRU */
+ /* Fall through. */
case DImode:
emit_insn (
gen_rtx_SET (VOIDmode,
/* It is better to store HImodes as SImodes. */
if (!TARGET_PARTIAL_REG_STALL)
operand = gen_lowpart (SImode, operand);
- /* FALLTHRU */
+ /* Fall through. */
case SImode:
emit_insn (
gen_rtx_SET (VOIDmode,
return false;
}
}
- /* FALLTHRU */
+ /* Fall through. */
case ROTATE:
case ASHIFTRT:
return true;
}
}
- /* FALLTHRU */
+ /* Fall through. */
case MINUS:
if (FLOAT_MODE_P (mode))
*total = COSTS_N_INSNS (ix86_cost->fadd);
return false;
}
- /* FALLTHRU */
+ /* Fall through. */
case AND:
case IOR:
<< (GET_MODE (XEXP (x, 1)) != DImode)));
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case NEG:
if (FLOAT_MODE_P (mode))
*total = COSTS_N_INSNS (ix86_cost->fchs);
return false;
}
- /* FALLTHRU */
+ /* Fall through. */
case NOT:
if (!TARGET_64BIT && mode == DImode)
{ \
case '3': \
builtin_define ("__tune_pentium3__"); \
- /* FALLTHRU */ \
+ /* Fall through. */ \
case '2': \
builtin_define ("__tune_pentium2__"); \
break; \
case TYPE_SSEMOV:
if (get_attr_mode (insn) == MODE_TI)
return "movdqa\t{%1, %0|%0, %1}";
- /* FALLTHRU */
+ /* Fall through. */
case TYPE_MMXMOV:
/* Moves from and into integer register is done using movd opcode with
REX prefix. */
case TYPE_SSEMOV:
if (get_attr_mode (insn) == MODE_TI)
return "movdqa\t{%1, %0|%0, %1}";
- /* FALLTHRU */
+ /* Fall through. */
case TYPE_MMXMOV:
return "movq\t{%1, %0|%0, %1}";
case TYPE_MULTI:
|| GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
break;
op = XEXP (XEXP (op, 0), 0);
- /* FALLTHRU */
+ /* Fall through. */
case SYMBOL_REF:
if (CONSTANT_POOL_ADDRESS_P (op))
case SUBREG:
x = SUBREG_REG (x);
- /* FALLTHRU */
+ /* Fall through. */
case REG:
if (REGNO (x) == AR_UNAT_REGNUM)
{
need_barrier = 1;
break;
}
- /* FALLTHRU */
+ /* Fall through. */
case INSN:
if (GET_CODE (PATTERN (insn)) == USE
*nat = 1;
return;
}
- /* FALLTHRU */
+ /* Fall through. */
case UNW_NAT_NONE:
dummy_nat = 0;
char *c;
c = strchr (buffer, '\0');
- /* Generate the reversed comparision. This takes four
+ /* Generate the reversed comparison. This takes four
bytes. */
if (float_p)
sprintf (c, "b%s\t%%Z2%s",
*total = 0;
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case CONST:
case LABEL_REF:
stack pointer (which needs the restriction) or the hard frame
pointer (which doesn't).
- All in all, it seems more consitent to only enforce this restriction
+ All in all, it seems more consistent to only enforce this restriction
during and after reload. */
if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
|| (GET_CODE (XEXP (op, 1)) == CONST_DOUBLE
&& GET_MODE (XEXP (op, 1)) == VOIDmode)))
return 1;
- /* FALLTHROUGH */
+ /* Fall through. */
default:
return address_operand (op, mode);
}
address register) without having to know the specific register or the
specific offset. The setback is that there's a limited number of
registers, and you'll not find out until link time whether you
- should've compiled with -mno-base-addresses. */
+ should have compiled with -mno-base-addresses. */
#define TARGET_MASK_BASE_ADDRESSES 128
/* FIXME: Get rid of this one. */
case MULT:
cost += 2;
- /* FALLTHRU */
+ /* Fall through. */
case PLUS:
cost += ns32k_address_cost (XEXP (operand, 0));
cost += ns32k_address_cost (XEXP (operand, 1));
*total = COSTS_N_INSNS (14);
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case UDIV:
case MOD:
*total = 0;
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case CONST:
case LABEL_REF:
c = t;
}
- /* Thanks to commutivity, sum is invariant w.r.t. the next
+ /* Thanks to commutativity, sum is invariant w.r.t. the next
conditional exchange. */
tau = d + c;
case 'Q':
if (TARGET_MFCRF)
fputc (',',file);
- /* FALLTHRU */
+ /* Fall through. */
else
return;
*total = COSTS_N_INSNS (2);
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case UDIV:
case UMOD:
: (C) == 'Y' ? (word_offset_memref_operand (OP, GET_MODE (OP))) \
: 0)
-/* Defining, which contraints are memory contraints. Tells reload,
+/* Define which constraints are memory constraints. Tell reload
that any memory address can be reloaded by copying the
memory address into a base register if required. */
*total = 0;
return true;
}
- /* FALLTHRU */
+ /* Fall through. */
case HIGH:
*total = 2;
return 1;
if (outer_code == PLUS && (unsigned HOST_WIDE_INT) -INTVAL (x) <= 077)
return 1;
- /* FALLTHRU */
+ /* Fall through. */
case CONST:
case LABEL_REF: