}
}
- if (best_fit == NULL)
- abort ();
- else
- sel = best_fit;
+ gcc_assert (best_fit);
+ sel = best_fit;
}
insn_flags = sel->flags;
/* The processor for which we should tune should now have been
chosen. */
- if (arm_tune == arm_none)
- abort ();
+ gcc_assert (arm_tune != arm_none);
tune_flags = all_cores[(int)arm_tune].flags;
if (optimize_size)
else
arm_fpu_tune = arm_fpu_arch;
arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
- if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
- abort ();
+ gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
}
if (target_float_abi_name != NULL)
tree a;
tree attr;
- if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
- abort ();
+ gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
/* Decide if the current function is volatile. Such functions
never return, and many memory cycles can be saved by not storing
argument ... */
if (sibling)
{
- if (GET_CODE (sibling) != CALL_INSN)
- abort ();
+ gcc_assert (GET_CODE (sibling) == CALL_INSN);
if (find_regno_fusage (sibling, USE, 3))
return 0;
return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
default:
- abort ();
+ gcc_unreachable ();
}
}
gen_rtx_SET (VOIDmode, target, source));
return 1;
}
- if (remainder == 0xffffffff)
- {
- if (generate)
- emit_constant_insn (cond,
- gen_rtx_SET (VOIDmode, target,
- gen_rtx_NOT (mode, source)));
- return 1;
- }
-
- /* We don't know how to handle this yet below. */
- abort ();
+
+ /* We don't know how to handle other cases yet. */
+ gcc_assert (remainder == 0xffffffff);
+
+ if (generate)
+ emit_constant_insn (cond,
+ gen_rtx_SET (VOIDmode, target,
+ gen_rtx_NOT (mode, source)));
+ return 1;
case MINUS:
/* We treat MINUS as (val - source), since (source - val) is always
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* If we can do it in one insn get out quickly. */
break;
default:
- abort ();
+ gcc_unreachable ();
}
return code;
if (reg == 0)
{
- if (no_new_pseudos)
- abort ();
- else
- reg = gen_reg_rtx (Pmode);
+ gcc_assert (!no_new_pseudos);
+ reg = gen_reg_rtx (Pmode);
subregs = 1;
}
if (reg == 0)
{
- if (no_new_pseudos)
- abort ();
- else
- reg = gen_reg_rtx (Pmode);
+ gcc_assert (!no_new_pseudos);
+ reg = gen_reg_rtx (Pmode);
}
- if (GET_CODE (XEXP (orig, 0)) == PLUS)
- {
- base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
- offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
- base == reg ? 0 : reg);
- }
- else
- abort ();
+ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
+
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
if (GET_CODE (offset) == CONST_INT)
{
test the index for the appropriate mode. */
if (!arm_legitimate_index_p (mode, offset, SET, 0))
{
- if (!no_new_pseudos)
- offset = force_reg (Pmode, offset);
- else
- abort ();
+ gcc_assert (no_new_pseudos);
+ offset = force_reg (Pmode, offset);
}
if (GET_CODE (offset) == CONST_INT)
/* Something went wrong - thumb_compute_save_reg_mask()
should have arranged for a suitable register to be pushed. */
- abort ();
+ gcc_unreachable ();
}
if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
return;
- if (!flag_pic)
- abort ();
+ gcc_assert (flag_pic);
l1 = gen_label_rtx ();
case MULT:
/* This should have been handled by the CPU specific routines. */
- abort ();
+ gcc_unreachable ();
case TRUNCATE:
if (arm_arch3m && mode == SImode
return 1;
default:
- break;
+ gcc_unreachable ();
}
- abort ();
+ gcc_unreachable ();
case CONST_INT:
if (const_ok_for_arm (INTVAL (x)))
{
enum attr_cirrus attr;
- /* get_attr aborts on USE and CLOBBER. */
+ /* get_attr cannot accept USE or CLOBBER. */
if (!insn
|| GET_CODE (insn) != INSN
|| GET_CODE (PATTERN (insn)) == USE
/* Get Arm register number for ldr insn. */
if (GET_CODE (lhs) == REG)
arm_regno = REGNO (lhs);
- else if (GET_CODE (rhs) == REG)
- arm_regno = REGNO (rhs);
else
- abort ();
+ {
+ gcc_assert (GET_CODE (rhs) == REG);
+ arm_regno = REGNO (rhs);
+ }
/* Next insn. */
first = next_nonnote_insn (first);
}
}
- /* get_attr aborts on USE and CLOBBER. */
+ /* get_attr cannot accept USE or CLOBBER. */
if (!first
|| GET_CODE (first) != INSN
|| GET_CODE (PATTERN (first)) == USE
{
enum rtx_code code = GET_CODE (x);
- if (code == SMAX)
- return GE;
- else if (code == SMIN)
- return LE;
- else if (code == UMIN)
- return LEU;
- else if (code == UMAX)
- return GEU;
-
- abort ();
+ switch (code)
+ {
+ case SMAX:
+ return GE;
+ case SMIN:
+ return LE;
+ case UMIN:
+ return LEU;
+ case UMAX:
+ return GEU;
+ default:
+ gcc_unreachable ();
+ }
}
/* Return 1 if memory locations are adjacent. */
/* Can only handle 2, 3, or 4 insns at present,
though could be easily extended if required. */
- if (nops < 2 || nops > 4)
- abort ();
+ gcc_assert (nops >= 2 && nops <= 4);
/* Loop over the operands and check that the memory references are
suitable (i.e. immediate offsets from the same base register). At
if (GET_CODE (operands[nops + i]) == SUBREG)
operands[nops + i] = alter_subreg (operands + (nops + i));
- if (GET_CODE (operands[nops + i]) != MEM)
- abort ();
+ gcc_assert (GET_CODE (operands[nops + i]) == MEM);
/* Don't reorder volatile memory references; it doesn't seem worth
looking for the case where the order is ok anyway. */
break;
default:
- abort ();
+ gcc_unreachable ();
}
sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
/* Can only handle 2, 3, or 4 insns at present, though could be easily
extended if required. */
- if (nops < 2 || nops > 4)
- abort ();
+ gcc_assert (nops >= 2 && nops <= 4);
/* Loop over the operands and check that the memory references are
suitable (i.e. immediate offsets from the same base register). At
if (GET_CODE (operands[nops + i]) == SUBREG)
operands[nops + i] = alter_subreg (operands + (nops + i));
- if (GET_CODE (operands[nops + i]) != MEM)
- abort ();
+ gcc_assert (GET_CODE (operands[nops + i]) == MEM);
/* Don't reorder volatile memory references; it doesn't seem worth
looking for the case where the order is ok anyway. */
break;
default:
- abort ();
+ gcc_unreachable ();
}
sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
emit_move_insn (mem, sreg);
in_words_to_go--;
- if (in_words_to_go) /* Sanity check */
- abort ();
+ gcc_assert (!in_words_to_go); /* Sanity check */
}
if (in_words_to_go)
{
- if (in_words_to_go < 0)
- abort ();
+ gcc_assert (in_words_to_go > 0);
mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
part_bytes_reg = copy_to_mode_reg (SImode, mem);
}
- if (last_bytes && part_bytes_reg == NULL)
- abort ();
+ gcc_assert (!last_bytes || part_bytes_reg);
if (BYTES_BIG_ENDIAN && last_bytes)
{
switch (cond1)
{
case EQ:
- if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
+ if (cond_or == DOM_CC_X_AND_Y)
return CC_DEQmode;
switch (cond2)
{
+ case EQ: return CC_DEQmode;
case LE: return CC_DLEmode;
case LEU: return CC_DLEUmode;
case GE: return CC_DGEmode;
case GEU: return CC_DGEUmode;
- default: break;
+ default: gcc_unreachable ();
}
- break;
-
case LT:
- if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
+ if (cond_or == DOM_CC_X_AND_Y)
return CC_DLTmode;
- if (cond2 == LE)
- return CC_DLEmode;
- if (cond2 == NE)
- return CC_DNEmode;
- break;
+
+ switch (cond2)
+ {
+ case LT:
+ return CC_DLTmode;
+ case LE:
+ return CC_DLEmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
case GT:
- if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
+ if (cond_or == DOM_CC_X_AND_Y)
return CC_DGTmode;
- if (cond2 == GE)
- return CC_DGEmode;
- if (cond2 == NE)
- return CC_DNEmode;
- break;
+
+ switch (cond2)
+ {
+ case GT:
+ return CC_DGTmode;
+ case GE:
+ return CC_DGEmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
case LTU:
- if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
+ if (cond_or == DOM_CC_X_AND_Y)
return CC_DLTUmode;
- if (cond2 == LEU)
- return CC_DLEUmode;
- if (cond2 == NE)
- return CC_DNEmode;
- break;
+
+ switch (cond2)
+ {
+ case LTU:
+ return CC_DLTUmode;
+ case LEU:
+ return CC_DLEUmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
case GTU:
- if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
+ if (cond_or == DOM_CC_X_AND_Y)
return CC_DGTUmode;
- if (cond2 == GEU)
- return CC_DGEUmode;
- if (cond2 == NE)
- return CC_DNEmode;
- break;
+
+ switch (cond2)
+ {
+ case GTU:
+ return CC_DGTUmode;
+ case GEU:
+ return CC_DGEUmode;
+ case NE:
+ return CC_DNEmode;
+ default:
+ gcc_unreachable ();
+ }
/* The remaining cases only occur when both comparisons are the
same. */
case NE:
+ gcc_assert (cond1 == cond2);
return CC_DNEmode;
case LE:
+ gcc_assert (cond1 == cond2);
return CC_DLEmode;
case GE:
+ gcc_assert (cond1 == cond2);
return CC_DGEmode;
case LEU:
+ gcc_assert (cond1 == cond2);
return CC_DLEUmode;
case GEU:
+ gcc_assert (cond1 == cond2);
return CC_DGEUmode;
default:
- break;
+ gcc_unreachable ();
}
-
- abort ();
}
enum machine_mode
return CCFPEmode;
default:
- abort ();
+ gcc_unreachable ();
}
}
^ (HOST_WIDE_INT) 0x80000000)
- (HOST_WIDE_INT) 0x80000000);
- if (hi + lo != offset)
- abort ();
+ gcc_assert (hi + lo == offset);
if (hi != 0)
{
^ (HOST_WIDE_INT) 0x80000000)
- (HOST_WIDE_INT) 0x80000000);
- if (hi + lo != offset)
- abort ();
+ gcc_assert (hi + lo == offset);
if (hi != 0)
{
move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
HOST_WIDE_INT max_address)
{
- /* This should never be true and the code below assumes these are
- different. */
- if (mp == max_mp)
- abort ();
+ /* The code below assumes these are different. */
+ gcc_assert (mp != max_mp);
if (max_mp == NULL)
{
{
HOST_WIDE_INT offset;
- /* This should never be true, and the code below assumes these are
- different. */
- if (mp == min_mp)
- abort ();
+ /* The code below assumes these are different. */
+ gcc_assert (mp != min_mp);
if (min_mp == NULL)
{
#endif
default:
- abort ();
- break;
+ gcc_unreachable ();
}
}
/* This code shouldn't have been called if there was a natural barrier
within range. */
- if (GET_CODE (from) == BARRIER)
- abort ();
+ gcc_assert (GET_CODE (from) != BARRIER);
/* Count the length of this insn. */
count += get_attr_length (from);
fix->minipool = NULL;
/* If an insn doesn't have a range defined for it, then it isn't
- expecting to be reworked by this code. Better to abort now than
+ expecting to be reworked by this code. Better to stop now than
to generate duff assembly code. */
- if (fix->forwards == 0 && fix->backwards == 0)
- abort ();
+ gcc_assert (fix->forwards || fix->backwards);
/* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
So there might be an empty word before the start of the pool.
/* The first insn must always be a note, or the code below won't
scan it properly. */
insn = get_insns ();
- if (GET_CODE (insn) != NOTE)
- abort ();
+ gcc_assert (GET_CODE (insn) == NOTE);
/* Scan all the insns and record the operands that will need fixing. */
for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
/* The last item on the list of fixes must be a barrier, so
we can never run off the end of the list of fixes without
last_barrier being set. */
- if (ftmp == NULL)
- abort ();
+ gcc_assert (ftmp);
max_address = minipool_vector_head->max_address;
/* Check that there isn't another fix that is in range that
if (REAL_VALUES_EQUAL (r, values_fp[i]))
return strings_fp[i];
- abort ();
+ gcc_unreachable ();
}
/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
if (REAL_VALUES_EQUAL (*r, values_fp[i]))
return strings_fp[i];
- abort ();
+ gcc_unreachable ();
}
/* Output the operands of a LDM/STM instruction to STREAM.
strcpy (pattern, "fstmfdx\t%m0!, {%P1");
p = strlen (pattern);
- if (GET_CODE (operands[1]) != REG)
- abort ();
+ gcc_assert (GET_CODE (operands[1]) == REG);
base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
for (i = 1; i < XVECLEN (operands[2], 0); i++)
const char *
output_call (rtx *operands)
{
- if (arm_arch5)
- abort (); /* Patterns should call blx <reg> directly. */
+ gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
/* Handle calls to lr using ip (which may be clobbered in subr anyway). */
if (REGNO (operands[0]) == LR_REGNUM)
int arm_reg0 = REGNO (operands[1]);
rtx ops[3];
- if (arm_reg0 == IP_REGNUM)
- abort ();
+ gcc_assert (arm_reg0 != IP_REGNUM);
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
int arm_reg0 = REGNO (operands[0]);
rtx ops[3];
- if (arm_reg0 == IP_REGNUM)
- abort ();
+ gcc_assert (arm_reg0 != IP_REGNUM);
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
int arm_reg0 = REGNO (operands[1]);
rtx ops[2];
- if (arm_reg0 == IP_REGNUM)
- abort ();
+ gcc_assert (arm_reg0 != IP_REGNUM);
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
int arm_reg0 = REGNO (operands[0]);
rtx ops[2];
- if (arm_reg0 == IP_REGNUM)
- abort ();
+ gcc_assert (arm_reg0 != IP_REGNUM);
ops[0] = gen_rtx_REG (SImode, arm_reg0);
ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
- if (code1 == MEM)
+ gcc_assert (code1 == MEM); /* Constraints should ensure this. */
+
+ switch (GET_CODE (XEXP (operands[1], 0)))
{
- switch (GET_CODE (XEXP (operands[1], 0)))
+ case REG:
+ output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
+ break;
+
+ case PRE_INC:
+ gcc_assert (TARGET_LDRD);
+ output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
+ break;
+
+ case POST_DEC:
+ gcc_assert (TARGET_LDRD);
+ output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
+ break;
+
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
+ otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
{
- case REG:
- output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
- break;
-
- case PRE_INC:
- if (!TARGET_LDRD)
- abort (); /* Should never happen now. */
- output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
- break;
-
- case PRE_DEC:
- output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
- break;
-
- case POST_INC:
- output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
- break;
-
- case POST_DEC:
- if (!TARGET_LDRD)
- abort (); /* Should never happen now. */
- output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
- break;
-
- case PRE_MODIFY:
- case POST_MODIFY:
- otherops[0] = operands[0];
- otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
- otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
-
- if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
+ if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
{
- if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
- {
- /* Registers overlap so split out the increment. */
- output_asm_insn ("add%?\t%1, %1, %2", otherops);
- output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
- }
- else
- output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
+ /* Registers overlap so split out the increment. */
+ output_asm_insn ("add%?\t%1, %1, %2", otherops);
+ output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
}
else
+ output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
+ }
+ else
+ {
+ /* We only allow constant increments, so this is safe. */
+ output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
+ }
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ output_asm_insn ("ldm%?ia\t%0, %M0", operands);
+ break;
+
+ default:
+ if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
+ GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
+ {
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (operands[1], 0), 0);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
{
- /* We only allow constant increments, so this is safe. */
- output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
- }
- break;
-
- case LABEL_REF:
- case CONST:
- output_asm_insn ("adr%?\t%0, %1", operands);
- output_asm_insn ("ldm%?ia\t%0, %M0", operands);
- break;
-
- default:
- if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
- GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
- {
- otherops[0] = operands[0];
- otherops[1] = XEXP (XEXP (operands[1], 0), 0);
- otherops[2] = XEXP (XEXP (operands[1], 0), 1);
-
- if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ if (GET_CODE (otherops[2]) == CONST_INT)
{
- if (GET_CODE (otherops[2]) == CONST_INT)
+ switch ((int) INTVAL (otherops[2]))
{
- switch ((int) INTVAL (otherops[2]))
- {
- case -8:
- output_asm_insn ("ldm%?db\t%1, %M0", otherops);
- return "";
- case -4:
- output_asm_insn ("ldm%?da\t%1, %M0", otherops);
- return "";
- case 4:
- output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
- return "";
- }
+ case -8:
+ output_asm_insn ("ldm%?db\t%1, %M0", otherops);
+ return "";
+ case -4:
+ output_asm_insn ("ldm%?da\t%1, %M0", otherops);
+ return "";
+ case 4:
+ output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
+ return "";
}
- if (TARGET_LDRD
- && (GET_CODE (otherops[2]) == REG
- || (GET_CODE (otherops[2]) == CONST_INT
- && INTVAL (otherops[2]) > -256
- && INTVAL (otherops[2]) < 256)))
+ }
+ if (TARGET_LDRD
+ && (GET_CODE (otherops[2]) == REG
+ || (GET_CODE (otherops[2]) == CONST_INT
+ && INTVAL (otherops[2]) > -256
+ && INTVAL (otherops[2]) < 256)))
+ {
+ if (reg_overlap_mentioned_p (otherops[0],
+ otherops[2]))
{
- if (reg_overlap_mentioned_p (otherops[0],
- otherops[2]))
- {
- /* Swap base and index registers over to
- avoid a conflict. */
- otherops[1] = XEXP (XEXP (operands[1], 0), 1);
- otherops[2] = XEXP (XEXP (operands[1], 0), 0);
-
- }
- /* If both registers conflict, it will usually
- have been fixed by a splitter. */
- if (reg_overlap_mentioned_p (otherops[0],
- otherops[2]))
- {
- output_asm_insn ("add%?\t%1, %1, %2", otherops);
- output_asm_insn ("ldr%?d\t%0, [%1]",
- otherops);
- return "";
- }
- else
- {
- output_asm_insn ("ldr%?d\t%0, [%1, %2]",
- otherops);
- return "";
- }
+ /* Swap base and index registers over to
+ avoid a conflict. */
+ otherops[1] = XEXP (XEXP (operands[1], 0), 1);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 0);
+
}
- if (GET_CODE (otherops[2]) == CONST_INT)
+ /* If both registers conflict, it will usually
+ have been fixed by a splitter. */
+ if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
{
- if (!(const_ok_for_arm (INTVAL (otherops[2]))))
- output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
- else
- output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ output_asm_insn ("add%?\t%1, %1, %2", otherops);
+ output_asm_insn ("ldr%?d\t%0, [%1]",
+ otherops);
}
else
- output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
+ return "";
}
- else
- output_asm_insn ("sub%?\t%0, %1, %2", otherops);
-
- return "ldm%?ia\t%0, %M0";
- }
- else
- {
- otherops[1] = adjust_address (operands[1], SImode, 4);
- /* Take care of overlapping base/data reg. */
- if (reg_mentioned_p (operands[0], operands[1]))
+
+ if (GET_CODE (otherops[2]) == CONST_INT)
{
- output_asm_insn ("ldr%?\t%0, %1", otherops);
- output_asm_insn ("ldr%?\t%0, %1", operands);
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
}
else
- {
- output_asm_insn ("ldr%?\t%0, %1", operands);
- output_asm_insn ("ldr%?\t%0, %1", otherops);
- }
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+
+ return "ldm%?ia\t%0, %M0";
+ }
+ else
+ {
+ otherops[1] = adjust_address (operands[1], SImode, 4);
+ /* Take care of overlapping base/data reg. */
+ if (reg_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
}
}
}
- else
- abort (); /* Constraints should prevent this. */
}
- else if (code0 == MEM && code1 == REG)
+ else
{
- if (REGNO (operands[1]) == IP_REGNUM)
- abort ();
+ /* Constraints should ensure this. */
+ gcc_assert (code0 == MEM && code1 == REG);
+ gcc_assert (REGNO (operands[1]) != IP_REGNUM);
switch (GET_CODE (XEXP (operands[0], 0)))
{
break;
case PRE_INC:
- if (!TARGET_LDRD)
- abort (); /* Should never happen now. */
+ gcc_assert (TARGET_LDRD);
output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
break;
break;
case POST_DEC:
- if (!TARGET_LDRD)
- abort (); /* Should never happen now. */
+ gcc_assert (TARGET_LDRD);
output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
break;
output_asm_insn ("str%?\t%1, %0", otherops);
}
}
- else
- /* Constraints should prevent this. */
- abort ();
return "";
}
return "and";
default:
- abort ();
+ gcc_unreachable ();
}
}
const char * mnem;
enum rtx_code code = GET_CODE (op);
- if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
- *amountp = -1;
- else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
- *amountp = INTVAL (XEXP (op, 1));
- else
- abort ();
+ switch (GET_CODE (XEXP (op, 1)))
+ {
+ case REG:
+ case SUBREG:
+ *amountp = -1;
+ break;
+
+ case CONST_INT:
+ *amountp = INTVAL (XEXP (op, 1));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
switch (code)
{
break;
case ROTATE:
- if (*amountp == -1)
- abort ();
+ gcc_assert (*amountp != -1);
*amountp = 32 - *amountp;
/* Fall through. */
case MULT:
/* We never have to worry about the amount being other than a
power of 2, since this case can never be reloaded from a reg. */
- if (*amountp != -1)
- *amountp = int_log2 (*amountp);
- else
- abort ();
+ gcc_assert (*amountp != -1);
+ *amountp = int_log2 (*amountp);
return "asl";
default:
- abort ();
+ gcc_unreachable ();
}
if (*amountp != -1)
while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
{
- if (shift > 31)
- abort ();
+ gcc_assert (shift <= 31);
shift++;
}
return "";
}
- if (current_function_calls_alloca && !really_return)
- abort ();
+ gcc_assert (!current_function_calls_alloca || really_return);
sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
live_regs_mask |= (1 << SP_REGNUM);
}
else
- {
- if (! IS_INTERRUPT (func_type)
- && ! TARGET_REALLY_IWMMXT)
- abort ();
- }
+ gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
}
/* On some ARM architectures it is faster to use LDR rather than
offsets = arm_get_frame_offsets ();
stack_adjust = offsets->outgoing_args - offsets->saved_regs;
- if (stack_adjust != 0 && stack_adjust != 4)
- abort ();
+ gcc_assert (stack_adjust == 0 || stack_adjust == 4);
if (stack_adjust && arm_arch5)
sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
}
/* Sanity check. */
- if (arm_ccfsm_state || arm_target_insn)
- abort ();
+ gcc_assert (!arm_ccfsm_state && !arm_target_insn);
func_type = arm_current_func_type ();
return "";
}
- if (current_function_calls_eh_return
- && ! really_return)
- /* If we are throwing an exception, then we really must
- be doing a return, so we can't tail-call. */
- abort ();
+ /* If we are throwing an exception, then we really must be doing a
+ return, so we can't tail-call. */
+ gcc_assert (!current_function_calls_eh_return || really_return);
offsets = arm_get_frame_offsets ();
saved_regs_mask = arm_compute_save_reg_mask ();
frame generation actually contains the old stack pointer. So a
quick way to unwind the stack is just pop the IP register directly
into the stack pointer. */
- if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
- abort ();
+ gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
saved_regs_mask &= ~ (1 << IP_REGNUM);
saved_regs_mask |= (1 << SP_REGNUM);
/* We need to take into account any stack-frame rounding. */
offsets = arm_get_frame_offsets ();
- if (use_return_insn (FALSE, NULL)
- && return_used_this_function
- && offsets->saved_regs != offsets->outgoing_args
- && !frame_pointer_needed)
- abort ();
+ gcc_assert (!use_return_insn (FALSE, NULL)
+ || !return_used_this_function
+ || offsets->saved_regs == offsets->outgoing_args
+ || frame_pointer_needed);
/* Reset the ARM-specific per-function variables. */
after_arm_reorg = 0;
if (mask & (1 << i))
num_regs++;
- if (num_regs == 0 || num_regs > 16)
- abort ();
+ gcc_assert (num_regs && num_regs <= 16);
/* We don't record the PC in the dwarf frame information. */
num_dwarf_regs = num_regs;
/* Ensure SP remains doubleword aligned. */
if (offsets->outgoing_args & 7)
offsets->outgoing_args += 4;
- if (offsets->outgoing_args & 7)
- abort ();
+ gcc_assert (!(offsets->outgoing_args & 7));
}
return offsets;
return offsets->outgoing_args - (offsets->saved_args + 4);
default:
- abort ();
+ gcc_unreachable ();
}
- break;
+ gcc_unreachable ();
case FRAME_POINTER_REGNUM:
switch (to)
return offsets->outgoing_args - offsets->soft_frame;
default:
- abort ();
+ gcc_unreachable ();
}
- break;
+ gcc_unreachable ();
default:
/* You cannot eliminate from the stack pointer.
pointer to the stack pointer, but this will never
happen, since if a stack frame is not needed the
hard frame pointer will never be used. */
- abort ();
+ gcc_unreachable ();
}
}
stack decrement per function, and this is not it. If
this instruction is labeled as being part of the frame
creation sequence then dwarf2out_frame_debug_expr will
- abort when it encounters the assignment of IP to FP
+ die when it encounters the assignment of IP to FP
later on, since the use of SP here establishes SP as
the CFA register and not IP.
case 'X': /* Cirrus register in D mode. */
case 'Y': /* Cirrus register in FX mode. */
case 'Z': /* Cirrus register in DX mode. */
- if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
- abort ();
+ gcc_assert (GET_CODE (x) == REG
+ && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
fprintf (stream, "mv%s%s",
code == 'W' ? "f"
return;
}
- if (GET_CODE (x) == REG)
- asm_fprintf (stream, "%r", REGNO (x));
- else if (GET_CODE (x) == MEM)
+ switch (GET_CODE (x))
{
+ case REG:
+ asm_fprintf (stream, "%r", REGNO (x));
+ break;
+
+ case MEM:
output_memory_reference_mode = GET_MODE (x);
output_address (XEXP (x, 0));
- }
- else if (GET_CODE (x) == CONST_DOUBLE)
- fprintf (stream, "#%s", fp_immediate_constant (x));
- else if (GET_CODE (x) == NEG)
- abort (); /* This should never happen now. */
- else
- {
+ break;
+
+ case CONST_DOUBLE:
+ fprintf (stream, "#%s", fp_immediate_constant (x));
+ break;
+
+ default:
+ gcc_assert (GET_CODE (x) != NEG);
fputc ('#', stream);
output_addr_const (stream, x);
+ break;
}
}
}
{
int i, units;
- if (GET_CODE (x) != CONST_VECTOR)
- abort ();
+ gcc_assert (GET_CODE (x) == CONST_VECTOR);
units = CONST_VECTOR_NUNITS (x);
case V4HImode: size = 2; break;
case V8QImode: size = 1; break;
default:
- abort ();
+ gcc_unreachable ();
}
for (i = 0; i < units; i++)
case CC_DLTUmode: code = ARM_CC;
dominance:
- if (comp_code != EQ && comp_code != NE)
- abort ();
+ gcc_assert (comp_code == EQ || comp_code == NE);
if (comp_code == EQ)
return ARM_INVERSE_CONDITION_CODE (code);
case EQ: return ARM_EQ;
case GE: return ARM_PL;
case LT: return ARM_MI;
- default: abort ();
+ default: gcc_unreachable ();
}
case CC_Zmode:
{
case NE: return ARM_NE;
case EQ: return ARM_EQ;
- default: abort ();
+ default: gcc_unreachable ();
}
case CC_Nmode:
{
case NE: return ARM_MI;
case EQ: return ARM_PL;
- default: abort ();
+ default: gcc_unreachable ();
}
case CCFPEmode:
/* UNEQ and LTGT do not have a representation. */
case UNEQ: /* Fall through. */
case LTGT: /* Fall through. */
- default: abort ();
+ default: gcc_unreachable ();
}
case CC_SWPmode:
case GTU: return ARM_CC;
case LEU: return ARM_CS;
case LTU: return ARM_HI;
- default: abort ();
+ default: gcc_unreachable ();
}
case CC_Cmode:
{
case LTU: return ARM_CS;
case GEU: return ARM_CC;
- default: abort ();
+ default: gcc_unreachable ();
}
case CCmode:
case GTU: return ARM_HI;
case LEU: return ARM_LS;
case LTU: return ARM_CC;
- default: abort ();
+ default: gcc_unreachable ();
}
- default: abort ();
+ default: gcc_unreachable ();
}
-
- abort ();
}
void
return;
}
- if (arm_ccfsm_state != 0 && !reverse)
- abort ();
+ gcc_assert (!arm_ccfsm_state || reverse);
if (GET_CODE (insn) != JUMP_INSN)
return;
then_not_else = FALSE;
}
else
- abort ();
+ gcc_unreachable ();
/* See how many insns this branch skips, and what kind of insns. If all
insns are okay, and the label or unconditional branch to the same
{
if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
arm_target_label = CODE_LABEL_NUMBER (label);
- else if (seeking_return || arm_ccfsm_state == 2)
+ else
{
+ gcc_assert (seeking_return || arm_ccfsm_state == 2);
+
while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
{
this_insn = next_nonnote_insn (this_insn);
- if (this_insn && (GET_CODE (this_insn) == BARRIER
- || GET_CODE (this_insn) == CODE_LABEL))
- abort ();
+ gcc_assert (!this_insn
+ || (GET_CODE (this_insn) != BARRIER
+ && GET_CODE (this_insn) != CODE_LABEL));
}
if (!this_insn)
{
}
arm_target_insn = this_insn;
}
- else
- abort ();
if (jump_clobbers)
{
- if (reverse)
- abort ();
+ gcc_assert (!reverse);
arm_current_cc =
get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
0), 0), 1));
break;
default:
- abort ();
+ gcc_unreachable ();
}
def_mbuiltin (d->mask, d->name, type, d->code);
|| ! (*insn_data[icode].operand[0].predicate) (target, tmode))
target = gen_reg_rtx (tmode);
- /* In case the insn wants input operands in modes different from
- the result, abort. */
- if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
- abort ();
+ gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
op0 = copy_to_mode_reg (mode0, op0);
int lo_mask = mask & 0xFF;
int pushed_words = 0;
- if (mask == 0)
- abort ();
+ gcc_assert (mask);
if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
{
int
is_called_in_ARM_mode (tree func)
{
- if (TREE_CODE (func) != FUNCTION_DECL)
- abort ();
+ gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
/* Ignore the problem about functions whose address is taken. */
if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
return offsets->saved_regs - offsets->saved_args;
default:
- abort();
+ gcc_unreachable ();
}
break;
return offsets->saved_regs - offsets->soft_frame;
default:
- abort();
+ gcc_unreachable ();
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
{
const char * name;
- if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
- abort ();
- if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
- abort ();
+ gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
+ gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
+ == SYMBOL_REF);
name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
/* Generate code sequence to switch us into Thumb mode. */
rtx arg1;
rtx arg2;
- if (GET_CODE (operands[0]) != REG)
- abort ();
-
- if (GET_CODE (operands[1]) != MEM)
- abort ();
+ gcc_assert (GET_CODE (operands[0]) == REG);
+ gcc_assert (GET_CODE (operands[1]) == MEM);
/* Get the memory address. */
addr = XEXP (operands[1], 0);
else
base = arg1, offset = arg2;
- if (GET_CODE (base) != REG)
- abort ();
+ gcc_assert (GET_CODE (base) == REG);
/* Catch the case of <address> = <reg> + <reg> */
if (GET_CODE (offset) == REG)
break;
default:
- abort ();
- break;
+ gcc_unreachable ();
}
return "";
break;
default:
- abort ();
+ gcc_unreachable ();
}
return "";
void
thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
{
- abort ();
+ gcc_unreachable ();
}
/* Return the length of a function name prefix
int i;
const char * pattern;
- if (GET_CODE (x) != CONST_VECTOR)
- abort ();
+ gcc_assert (GET_CODE (x) == CONST_VECTOR);
switch (GET_MODE (x))
{
case V2SImode: pattern = "%08x"; break;
case V4HImode: pattern = "%04x"; break;
case V8QImode: pattern = "%02x"; break;
- default: abort ();
+ default: gcc_unreachable ();
}
fprintf (file, "0x");
if (IS_IWMMXT_REGNUM (regno))
return 112 + regno - FIRST_IWMMXT_REGNUM;
- abort ();
+ gcc_unreachable ();
}