(set_attr "isa" "dm,not_dm,not_dm")])
\f
-;; V8TImode (i.e. __dmr).
-(define_expand "movv8ti"
- [(set (match_operand:V8TI 0 "nonimmediate_operand")
- (match_operand:V8TI 1 "input_operand"))]
+;; TDOmode (i.e. __dmr).
+(define_expand "movtdo"
+ [(set (match_operand:TDO 0 "nonimmediate_operand")
+ (match_operand:TDO 1 "input_operand"))]
"TARGET_DENSE_MATH"
{
- rs6000_emit_move (operands[0], operands[1], V8TImode);
+ rs6000_emit_move (operands[0], operands[1], TDOmode);
DONE;
})
-(define_insn_and_split "*movv8ti"
- [(set (match_operand:V8TI 0 "nonimmediate_operand" "=wa,m,wa,wD,wD,wa")
- (match_operand:V8TI 1 "input_operand" "m,wa,wa,wa,wD,wD"))]
+(define_insn_and_split "*movtdo"
+ [(set (match_operand:TDO 0 "nonimmediate_operand" "=wa,m,wa,wD,wD,wa")
+ (match_operand:TDO 1 "input_operand" "m,wa,wa,wa,wD,wD"))]
"TARGET_DENSE_MATH
- && (gpc_reg_operand (operands[0], V8TImode)
- || gpc_reg_operand (operands[1], V8TImode))"
+ && (gpc_reg_operand (operands[0], TDOmode)
+ || gpc_reg_operand (operands[1], TDOmode))"
"@
#
#
dmmr %0,%1
#"
"&& reload_completed
- && (!dmr_operand (operands[0], V8TImode) || !dmr_operand (operands[1], V8TImode))"
+ && (!dmr_operand (operands[0], TDOmode) || !dmr_operand (operands[1], TDOmode))"
[(const_int 0)]
{
rtx op0 = operands[0];
{
rtx op1_upper = gen_rtx_REG (XOmode, regno1);
rtx op1_lower = gen_rtx_REG (XOmode, regno1 + 4);
- emit_insn (gen_movv8ti_insert512_upper (op0, op1_upper));
- emit_insn (gen_movv8ti_insert512_lower (op0, op0, op1_lower));
+ emit_insn (gen_movtdo_insert512_upper (op0, op1_upper));
+ emit_insn (gen_movtdo_insert512_lower (op0, op0, op1_lower));
DONE;
}
{
rtx op0_upper = gen_rtx_REG (XOmode, regno0);
rtx op0_lower = gen_rtx_REG (XOmode, regno0 + 4);
- emit_insn (gen_movv8ti_extract512 (op0_upper, op1, const0_rtx));
- emit_insn (gen_movv8ti_extract512 (op0_lower, op1, const1_rtx));
+ emit_insn (gen_movtdo_extract512 (op0_upper, op1, const0_rtx));
+ emit_insn (gen_movtdo_extract512 (op0_lower, op1, const1_rtx));
DONE;
}
}
;; Move from VSX registers to DMR registers via two insert 512 bit
;; instructions.
-(define_insn "movv8ti_insert512_upper"
- [(set (match_operand:V8TI 0 "dmr_operand" "=wD")
- (unspec:V8TI [(match_operand:XO 1 "vsx_register_operand" "wa")]
- UNSPEC_DM_INSERT512_UPPER))]
+(define_insn "movtdo_insert512_upper"
+ [(set (match_operand:TDO 0 "dmr_operand" "=wD")
+ (unspec:TDO [(match_operand:XO 1 "vsx_register_operand" "wa")]
+ UNSPEC_DM_INSERT512_UPPER))]
"TARGET_DENSE_MATH"
"dmxxinstdmr512 %0,%1,%Y1,0"
[(set_attr "type" "mma")])
-(define_insn "movv8ti_insert512_lower"
- [(set (match_operand:V8TI 0 "dmr_operand" "=wD")
- (unspec:V8TI [(match_operand:V8TI 1 "dmr_operand" "0")
- (match_operand:XO 2 "vsx_register_operand" "wa")]
- UNSPEC_DM_INSERT512_LOWER))]
+(define_insn "movtdo_insert512_lower"
+ [(set (match_operand:TDO 0 "dmr_operand" "=wD")
+ (unspec:TDO [(match_operand:TDO 1 "dmr_operand" "0")
+ (match_operand:XO 2 "vsx_register_operand" "wa")]
+ UNSPEC_DM_INSERT512_LOWER))]
"TARGET_DENSE_MATH"
"dmxxinstdmr512 %0,%2,%Y2,1"
[(set_attr "type" "mma")])
;; Move from DMR registers to VSX registers via two extract 512 bit
;; instructions.
-(define_insn "movv8ti_extract512"
+(define_insn "movtdo_extract512"
[(set (match_operand:XO 0 "vsx_register_operand" "=wa")
- (unspec:XO [(match_operand:V8TI 1 "dmr_operand" "wD")
+ (unspec:XO [(match_operand:TDO 1 "dmr_operand" "wD")
(match_operand 2 "const_0_to_1_operand" "n")]
UNSPEC_DM_EXTRACT512))]
"TARGET_DENSE_MATH"
;; Reload DMR registers from memory
(define_insn_and_split "reload_dmr_from_memory"
- [(set (match_operand:V8TI 0 "dmr_operand" "=wD")
- (unspec:V8TI [(match_operand:V8TI 1 "memory_operand" "m")]
- UNSPEC_DMR_RELOAD_FROM_MEMORY))
+ [(set (match_operand:TDO 0 "dmr_operand" "=wD")
+ (unspec:TDO [(match_operand:TDO 1 "memory_operand" "m")]
+ UNSPEC_DMR_RELOAD_FROM_MEMORY))
(clobber (match_operand:XO 2 "vsx_register_operand" "=wa"))]
"TARGET_DENSE_MATH"
"#"
rtx mem_lower = adjust_address (src, XOmode, BYTES_BIG_ENDIAN ? 32 : 0);
emit_move_insn (tmp, mem_upper);
- emit_insn (gen_movv8ti_insert512_upper (dest, tmp));
+ emit_insn (gen_movtdo_insert512_upper (dest, tmp));
emit_move_insn (tmp, mem_lower);
- emit_insn (gen_movv8ti_insert512_lower (dest, dest, tmp));
+ emit_insn (gen_movtdo_insert512_lower (dest, dest, tmp));
DONE;
}
[(set_attr "length" "16")
;; Reload dense math registers to memory
(define_insn_and_split "reload_dmr_to_memory"
- [(set (match_operand:V8TI 0 "memory_operand" "=m")
- (unspec:V8TI [(match_operand:V8TI 1 "dmr_operand" "wD")]
- UNSPEC_DMR_RELOAD_TO_MEMORY))
+ [(set (match_operand:TDO 0 "memory_operand" "=m")
+ (unspec:TDO [(match_operand:TDO 1 "dmr_operand" "wD")]
+ UNSPEC_DMR_RELOAD_TO_MEMORY))
(clobber (match_operand:XO 2 "vsx_register_operand" "=wa"))]
"TARGET_DENSE_MATH"
"#"
rtx mem_upper = adjust_address (dest, XOmode, BYTES_BIG_ENDIAN ? 0 : 32);
rtx mem_lower = adjust_address (dest, XOmode, BYTES_BIG_ENDIAN ? 32 : 0);
- emit_insn (gen_movv8ti_extract512 (tmp, src, const0_rtx));
+ emit_insn (gen_movtdo_extract512 (tmp, src, const0_rtx));
emit_move_insn (mem_upper, tmp);
- emit_insn (gen_movv8ti_extract512 (tmp, src, const1_rtx));
+ emit_insn (gen_movtdo_extract512 (tmp, src, const1_rtx));
emit_move_insn (mem_lower, tmp);
DONE;
}
ptr_vector_quad_type_node = build_pointer_type (t);
dmr_type_node = make_node (OPAQUE_TYPE);
- SET_TYPE_MODE (dmr_type_node, V8TImode);
- TYPE_SIZE (dmr_type_node) = bitsize_int (GET_MODE_BITSIZE (V8TImode));
- TYPE_PRECISION (dmr_type_node) = GET_MODE_BITSIZE (V8TImode);
- TYPE_SIZE_UNIT (dmr_type_node) = size_int (GET_MODE_SIZE (V8TImode));
+ SET_TYPE_MODE (dmr_type_node, TDOmode);
+ TYPE_SIZE (dmr_type_node) = bitsize_int (GET_MODE_BITSIZE (TDOmode));
+ TYPE_PRECISION (dmr_type_node) = GET_MODE_BITSIZE (TDOmode);
+ TYPE_SIZE_UNIT (dmr_type_node) = size_int (GET_MODE_SIZE (TDOmode));
SET_TYPE_ALIGN (dmr_type_node, 512);
TYPE_USER_ALIGN (dmr_type_node) = 0;
lang_hooks.types.register_builtin_type (dmr_type_node, "__dmr");
&& !cfun->machine->mma_return_type_error
&& TREE_TYPE (cfun->decl) == fntype
&& (TYPE_MODE (type) == OOmode || TYPE_MODE (type) == XOmode
- || TYPE_MODE (type) == V8TImode))
+ || TYPE_MODE (type) == TDOmode))
{
/* Record we have now handled function CFUN, so the next time we
are called, we do not re-report the same error. */
return NULL_RTX;
}
- if (mode == V8TImode)
+ if (mode == TDOmode)
{
if (TYPE_CANONICAL (type) != NULL_TREE)
type = TYPE_CANONICAL (type);
OPAQUE_MODE (XO, 64);
/* Modes used by __dmr. */
-/* OPAQUE_MODE (TDO, 128); */
-VECTOR_MODE (INT, TI, 8); /* V8TI */
-
-
+OPAQUE_MODE (TDO, 128);
if (FP_REGNO_P (regno))
reg_size = (VECTOR_MEM_VSX_P (mode)
|| VECTOR_ALIGNMENT_P (mode)
- || mode == V8TImode
+ || mode == TDOmode
? UNITS_PER_VSX_WORD
: UNITS_PER_FP_WORD);
/* Dense math register modes need DMR registers or VSX registers divisible by
2. We need to make sure we don't cross between the boundary of FPRs and
traditional Altiviec registers. */
- if (mode == V8TImode)
+ if (mode == TDOmode)
{
if (!TARGET_DENSE_MATH)
return 0;
return 0;
}
- /* No other types other than XOmode or V8TImode can go in DMRs. */
+ /* No other types other than XOmode or TDOmode can go in DMRs. */
if (DMR_REGNO_P (regno))
return 0;
57744).
Similarly, don't allow OOmode (vector pair), XOmode (vector quad), or
- V8TImode (dmr register) to pair with anything else. Vector pairs are
+ TDOmode (dmr register) to pair with anything else. Vector pairs are
restricted to even/odd VSX registers. Without dense math, vector quads are
limited to FPR registers divisible by 4. With dense math, vector quads are
limited to even VSX registers or DMR registers.
rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
{
if (mode1 == PTImode || mode1 == OOmode || mode1 == XOmode
- || mode1 == V8TImode || mode2 == PTImode || mode2 == OOmode
- || mode2 == XOmode || mode2 == V8TImode)
+ || mode1 == TDOmode || mode2 == PTImode || mode2 == OOmode
+ || mode2 == XOmode || mode2 == TDOmode)
return mode1 == mode2;
if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
V4DFmode,
OOmode,
XOmode,
- V8TImode,
+ TDOmode,
CCmode,
CCUNSmode,
CCEQmode,
/* Special case DMR registers. */
if (rc == RELOAD_REG_DMR)
{
- if (TARGET_DENSE_MATH && (m2 == XOmode || m2 == V8TImode))
+ if (TARGET_DENSE_MATH && (m2 == XOmode || m2 == TDOmode))
{
addr_mask = RELOAD_REG_VALID;
reg_addr[m].addr_mask[rc] = addr_mask;
since it will be broken into two vector moves. Vector quads and
1,024 bit DMR values can only do offset loads. */
else if ((addr_mask != 0) && TARGET_MMA
- && (m2 == OOmode || m2 == XOmode || m2 == V8TImode))
+ && (m2 == OOmode || m2 == XOmode || m2 == TDOmode))
{
addr_mask |= RELOAD_REG_OFFSET;
if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
/* Add support for 1,024 bit DMR registers. */
if (TARGET_DENSE_MATH)
{
- rs6000_vector_unit[V8TImode] = VECTOR_NONE;
- rs6000_vector_mem[V8TImode] = VECTOR_VSX;
- rs6000_vector_align[V8TImode] = 512;
+ rs6000_vector_unit[TDOmode] = VECTOR_NONE;
+ rs6000_vector_mem[TDOmode] = VECTOR_VSX;
+ rs6000_vector_align[TDOmode] = 512;
}
/* Register class constraints for the constraints that depend on compile
if (TARGET_DENSE_MATH)
{
- reg_addr[V8TImode].reload_load = CODE_FOR_reload_dmr_from_memory;
- reg_addr[V8TImode].reload_store = CODE_FOR_reload_dmr_to_memory;
+ reg_addr[TDOmode].reload_load = CODE_FOR_reload_dmr_from_memory;
+ reg_addr[TDOmode].reload_store = CODE_FOR_reload_dmr_to_memory;
}
/* Precalculate HARD_REGNO_NREGS. */
case E_XOmode:
return TARGET_MMA;
- case E_V8TImode:
+ case E_TDOmode:
return TARGET_DENSE_MATH;
case E_SDmode:
(mode == OOmode) ? "__vector_pair" : "__vector_quad");
break;
- case E_V8TImode:
+ case E_TDOmode:
if (CONST_INT_P (operands[1]))
error ("%qs is an opaque type, and you cannot set it to constants",
"__dmr");
/* We can transfer between VSX registers and DMR registers without needing
extra registers. */
- if (TARGET_DENSE_MATH && (mode == XOmode || mode == V8TImode)
+ if (TARGET_DENSE_MATH && (mode == XOmode || mode == TDOmode)
&& ((to_type == DMR_REG_TYPE && from_type == VSX_REG_TYPE)
|| (to_type == VSX_REG_TYPE && from_type == DMR_REG_TYPE)))
return true;
if (mode == XOmode)
return TARGET_DENSE_MATH ? VSX_REGS : FLOAT_REGS;
- if (mode == V8TImode)
+ if (mode == TDOmode)
return VSX_REGS;
if (GET_MODE_CLASS (mode) == MODE_INT)
regno = -1;
/* Dense math registers don't have loads or stores. We have to go through
- the VSX registers to load XOmode (vector quad) and V8TImode (dmr 1024
+ the VSX registers to load XOmode (vector quad) and TDOmode (dmr 1024
bit). */
if (TARGET_DENSE_MATH && rclass == DM_REGS)
return VSX_REGS;
if (mode == XOmode)
return reg_move_base;
- /* __dmr (i.e. V8TImode) is transferred in 2 instructions. */
- else if (mode == V8TImode)
+ /* __dmr (i.e. TDOmode) is transferred in 2 instructions. */
+ else if (mode == TDOmode)
return reg_move_base * 2;
else
/* If we have a vector quad register for MMA or DMR register for dense math,
and this is a load or store, see if we can use vector paired
load/stores. */
- if ((mode == XOmode || mode == V8TImode) && TARGET_MMA
+ if ((mode == XOmode || mode == TDOmode) && TARGET_MMA
&& (MEM_P (dst) || MEM_P (src)))
{
reg_mode = OOmode;
}
/* If we have a vector pair/quad mode, split it into two/four separate
vectors. */
- else if (mode == OOmode || mode == XOmode || mode == V8TImode)
+ else if (mode == OOmode || mode == XOmode || mode == TDOmode)
reg_mode = V1TImode;
else if (FP_REGNO_P (reg))
reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
the last register gets the first memory location. We also need to be
careful of using the right register numbers if we are splitting XO to
OO. */
- if (mode == OOmode || mode == XOmode || mode == V8TImode)
+ if (mode == OOmode || mode == XOmode || mode == TDOmode)
{
nregs = hard_regno_nregs (reg, mode);
int reg_mode_nregs = hard_regno_nregs (reg, reg_mode);
overlap. */
int i;
/* XO/OO are opaque so cannot use subregs. */
- if (mode == OOmode || mode == XOmode || mode == V8TImode)
+ if (mode == OOmode || mode == XOmode || mode == TDOmode)
{
for (i = nregs - 1; i >= 0; i--)
{
continue;
/* XO/OO are opaque so cannot use subregs. */
- if (mode == OOmode || mode == XOmode || mode == V8TImode)
+ if (mode == OOmode || mode == XOmode || mode == TDOmode)
{
rtx dst_i = gen_rtx_REG (reg_mode, REGNO (dst) + j);
rtx src_i = gen_rtx_REG (reg_mode, REGNO (src) + j);
if (frommode != tomode)
{
- /* Do not allow conversions to/from XOmode, OOmode, and V8TImode
+ /* Do not allow conversions to/from XOmode, OOmode, and TDOmode
types. */
if (frommode == XOmode)
return N_("invalid conversion from type %<__vector_quad%>");
return N_("invalid conversion from type %<__vector_pair%>");
if (tomode == OOmode)
return N_("invalid conversion to type %<__vector_pair%>");
- if (frommode == V8TImode)
+ if (frommode == TDOmode)
return N_("invalid conversion from type %<__dmr%>");
- if (tomode == V8TImode)
+ if (tomode == TDOmode)
return N_("invalid conversion to type %<__dmr%>");
}
vectors in terms of loads and stores. */
#define VECTOR_ALIGNMENT_P(MODE) \
(FLOAT128_VECTOR_P (MODE) || (MODE) == OOmode || (MODE) == XOmode \
- || (MODE) == V8TImode)
+ || (MODE) == TDOmode)
#define ALTIVEC_VECTOR_MODE(MODE) \
((MODE) == V16QImode \
tree attributes;
unsigned int uid;
- unsigned int precision : 10;
- unsigned no_force_blk_flag : 1;
- unsigned needs_constructing_flag : 1;
- unsigned transparent_aggr_flag : 1;
- unsigned restrict_flag : 1;
- unsigned contains_placeholder_bits : 2;
-
+ unsigned int precision : 16;
ENUM_BITFIELD(machine_mode) mode : 8;
-
- /* TYPE_STRING_FLAG for INTEGER_TYPE and ARRAY_TYPE.
- TYPE_CXX_ODR_P for RECORD_TYPE and UNION_TYPE. */
- unsigned string_flag : 1;
unsigned lang_flag_0 : 1;
unsigned lang_flag_1 : 1;
unsigned lang_flag_2 : 1;
so we need to store the value 32 (not 31, as we need the zero
as well), hence six bits. */
unsigned align : 6;
+ /* TYPE_STRING_FLAG for INTEGER_TYPE and ARRAY_TYPE.
+ TYPE_CXX_ODR_P for RECORD_TYPE and UNION_TYPE. */
+ unsigned string_flag : 1;
+ unsigned no_force_blk_flag : 1;
+
unsigned warn_if_not_align : 6;
+ unsigned needs_constructing_flag : 1;
+ unsigned transparent_aggr_flag : 1;
+
+ unsigned contains_placeholder_bits : 2;
+ unsigned restrict_flag : 1;
unsigned typeless_storage : 1;
unsigned empty_flag : 1;
unsigned indivisible_p : 1;
unsigned no_named_args_stdarg_p : 1;
- unsigned spare : 15;
+ unsigned spare : 9;
alias_set_type alias_set;
tree pointer_to;