[gcc(refs/users/marxin/heads/i386-option-cleanup)] Remove TARGET_foo (ix86_tune == PROCESSOR_foo) macros.
Martin Liska
marxin@gcc.gnu.org
Tue Mar 9 14:03:46 GMT 2021
https://gcc.gnu.org/g:1a906a60f1d3b705c8d52d0cbd92b604a56d2fb0
commit 1a906a60f1d3b705c8d52d0cbd92b604a56d2fb0
Author: Martin Liska <mliska@suse.cz>
Date: Tue Mar 9 14:56:54 2021 +0100
Remove TARGET_foo (ix86_tune == PROCESSOR_foo) macros.
Diff:
---
gcc/config/i386/i386-expand.c | 2 +-
gcc/config/i386/i386.c | 16 +++++++------
gcc/config/i386/i386.h | 45 +----------------------------------
gcc/config/i386/i386.md | 8 +++----
gcc/config/i386/x86-tune-sched-atom.c | 7 +++---
gcc/config/i386/x86-tune-sched-bd.c | 5 ++--
gcc/config/i386/x86-tune-sched.c | 2 +-
7 files changed, 23 insertions(+), 62 deletions(-)
diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c
index 02d314226d1..b3627ed3868 100644
--- a/gcc/config/i386/i386-expand.c
+++ b/gcc/config/i386/i386-expand.c
@@ -7048,7 +7048,7 @@ decide_alignment (int align,
desired_align = GET_MODE_SIZE (move_mode);
/* PentiumPro has special logic triggering for 8 byte aligned blocks.
copying whole cacheline at once. */
- if (TARGET_PENTIUMPRO
+ if (TARGET_CPU_P (PENTIUMPRO)
&& (alg == rep_prefix_4_byte || alg == rep_prefix_1_byte))
desired_align = 8;
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 2603333f87b..c03947ddca5 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -10167,7 +10167,7 @@ ix86_decompose_address (rtx addr, struct ix86_address *out)
Avoid this by transforming to [%esi+0].
Reload calls address legitimization without cfun defined, so we need
to test cfun for being non-NULL. */
- if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
+ if (TARGET_CPU_P (K6) && cfun && optimize_function_for_speed_p (cfun)
&& base_reg && !index_reg && !disp
&& REGNO (base_reg) == SI_REG)
disp = const0_rtx;
@@ -10245,7 +10245,7 @@ ix86_address_cost (rtx x, machine_mode, addr_space_t, bool)
memory address, but I don't have AMD-K6 machine handy to check this
theory. */
- if (TARGET_K6
+ if (TARGET_CPU_P (K6)
&& ((!parts.disp && parts.base && parts.index && parts.scale != 1)
|| (parts.disp && !parts.base && parts.index && parts.scale != 1)
|| (!parts.disp && parts.base && parts.index && parts.scale == 1)))
@@ -14906,7 +14906,7 @@ ix86_lea_outperforms (rtx_insn *insn, unsigned int regno0, unsigned int regno1,
/* For Atom processors newer than Bonnell, if using a 2-source or
3-source LEA for non-destructive destination purposes, or due to
wanting ability to use SCALE, the use of LEA is justified. */
- if (!TARGET_BONNELL)
+ if (!TARGET_CPU_P (BONNELL))
{
if (has_scale)
return true;
@@ -15048,7 +15048,7 @@ ix86_avoid_lea_for_addr (rtx_insn *insn, rtx operands[])
than lea for most processors. For the processors like BONNELL, if
the destination register of LEA holds an actual address which will
be used soon, LEA is better and otherwise ADD is better. */
- if (!TARGET_BONNELL
+ if (!TARGET_CPU_P (BONNELL)
&& parts.scale == 1
&& (!parts.disp || parts.disp == const0_rtx)
&& (regno0 == regno1 || regno0 == regno2))
@@ -22353,7 +22353,7 @@ ix86_add_stmt_cost (class vec_info *vinfo, void *data, int count,
stmt_cost = ix86_builtin_vectorization_cost (kind, vectype, misalign);
/* Penalize DFmode vector operations for Bonnell. */
- if (TARGET_BONNELL && kind == vector_stmt
+ if (TARGET_CPU_P (BONNELL) && kind == vector_stmt
&& vectype && GET_MODE_INNER (TYPE_MODE (vectype)) == DFmode)
stmt_cost *= 5; /* FIXME: The value here is arbitrary. */
@@ -22369,8 +22369,10 @@ ix86_add_stmt_cost (class vec_info *vinfo, void *data, int count,
/* We need to multiply all vector stmt cost by 1.7 (estimated cost)
for Silvermont as it has out of order integer pipeline and can execute
2 scalar instruction per tick, but has in order SIMD pipeline. */
- if ((TARGET_SILVERMONT || TARGET_GOLDMONT || TARGET_GOLDMONT_PLUS
- || TARGET_TREMONT || TARGET_INTEL) && stmt_info && stmt_info->stmt)
+ if ((TARGET_CPU_P (SILVERMONT) || TARGET_CPU_P (GOLDMONT)
+ || TARGET_CPU_P (GOLDMONT_PLUS) || TARGET_CPU_P (TREMONT)
+ || TARGET_CPU_P (INTEL))
+ && stmt_info && stmt_info->stmt)
{
tree lhs_op = gimple_get_lhs (stmt_info->stmt);
if (lhs_op && TREE_CODE (TREE_TYPE (lhs_op)) == INTEGER_TYPE)
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 51009b2fc92..e4eb52ff4f2 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -263,50 +263,7 @@ extern const struct processor_costs ix86_size_cost;
#define HAS_LONG_COND_BRANCH 1
#define HAS_LONG_UNCOND_BRANCH 1
-#define TARGET_386 (ix86_tune == PROCESSOR_I386)
-#define TARGET_486 (ix86_tune == PROCESSOR_I486)
-#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
-#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
-#define TARGET_GEODE (ix86_tune == PROCESSOR_GEODE)
-#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
-#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
-#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
-#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
-#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
-#define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA)
-#define TARGET_CORE2 (ix86_tune == PROCESSOR_CORE2)
-#define TARGET_NEHALEM (ix86_tune == PROCESSOR_NEHALEM)
-#define TARGET_SANDYBRIDGE (ix86_tune == PROCESSOR_SANDYBRIDGE)
-#define TARGET_HASWELL (ix86_tune == PROCESSOR_HASWELL)
-#define TARGET_BONNELL (ix86_tune == PROCESSOR_BONNELL)
-#define TARGET_SILVERMONT (ix86_tune == PROCESSOR_SILVERMONT)
-#define TARGET_GOLDMONT (ix86_tune == PROCESSOR_GOLDMONT)
-#define TARGET_GOLDMONT_PLUS (ix86_tune == PROCESSOR_GOLDMONT_PLUS)
-#define TARGET_TREMONT (ix86_tune == PROCESSOR_TREMONT)
-#define TARGET_KNL (ix86_tune == PROCESSOR_KNL)
-#define TARGET_KNM (ix86_tune == PROCESSOR_KNM)
-#define TARGET_SKYLAKE (ix86_tune == PROCESSOR_SKYLAKE)
-#define TARGET_SKYLAKE_AVX512 (ix86_tune == PROCESSOR_SKYLAKE_AVX512)
-#define TARGET_CANNONLAKE (ix86_tune == PROCESSOR_CANNONLAKE)
-#define TARGET_ICELAKE_CLIENT (ix86_tune == PROCESSOR_ICELAKE_CLIENT)
-#define TARGET_ICELAKE_SERVER (ix86_tune == PROCESSOR_ICELAKE_SERVER)
-#define TARGET_CASCADELAKE (ix86_tune == PROCESSOR_CASCADELAKE)
-#define TARGET_TIGERLAKE (ix86_tune == PROCESSOR_TIGERLAKE)
-#define TARGET_COOPERLAKE (ix86_tune == PROCESSOR_COOPERLAKE)
-#define TARGET_SAPPHIRERAPIDS (ix86_tune == PROCESSOR_SAPPHIRERAPIDS)
-#define TARGET_ALDERLAKE (ix86_tune == PROCESSOR_ALDERLAKE)
-#define TARGET_INTEL (ix86_tune == PROCESSOR_INTEL)
-#define TARGET_GENERIC (ix86_tune == PROCESSOR_GENERIC)
-#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
-#define TARGET_BDVER1 (ix86_tune == PROCESSOR_BDVER1)
-#define TARGET_BDVER2 (ix86_tune == PROCESSOR_BDVER2)
-#define TARGET_BDVER3 (ix86_tune == PROCESSOR_BDVER3)
-#define TARGET_BDVER4 (ix86_tune == PROCESSOR_BDVER4)
-#define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1)
-#define TARGET_BTVER2 (ix86_tune == PROCESSOR_BTVER2)
-#define TARGET_ZNVER1 (ix86_tune == PROCESSOR_ZNVER1)
-#define TARGET_ZNVER2 (ix86_tune == PROCESSOR_ZNVER2)
-#define TARGET_ZNVER3 (ix86_tune == PROCESSOR_ZNVER3)
+#define TARGET_CPU_P(CPU) (ix86_tune == PROCESSOR_ ## CPU)
/* Feature tests against the various tunings. */
enum ix86_tune_indices {
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 2820f6d6188..707130e3839 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -14290,13 +14290,13 @@
return "tzcnt{<imodesuffix>}\t{%1, %0|%0, %1}";
else if (optimize_function_for_size_p (cfun))
;
- else if (TARGET_GENERIC)
+ else if (TARGET_CPU_P (GENERIC))
/* tzcnt expands to 'rep bsf' and we can use it even if !TARGET_BMI. */
return "rep%; bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
return "bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
}
- "(TARGET_BMI || TARGET_GENERIC)
+ "(TARGET_BMI || TARGET_CPU_P (GENERIC))
&& TARGET_AVOID_FALSE_DEP_FOR_BMI && epilogue_completed
&& optimize_function_for_speed_p (cfun)
&& !reg_mentioned_p (operands[0], operands[1])"
@@ -14312,7 +14312,7 @@
(if_then_else
(ior (match_test "TARGET_BMI")
(and (not (match_test "optimize_function_for_size_p (cfun)"))
- (match_test "TARGET_GENERIC")))
+ (match_test "TARGET_CPU_P (GENERIC)")))
(const_string "1")
(const_string "0")))
(set_attr "mode" "<MODE>")])
@@ -14331,7 +14331,7 @@
{
if (TARGET_BMI)
return "tzcnt{<imodesuffix>}\t{%1, %0|%0, %1}";
- else if (TARGET_GENERIC)
+ else if (TARGET_CPU_P (GENERIC))
/* tzcnt expands to 'rep bsf' and we can use it even if !TARGET_BMI. */
return "rep%; bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
else
diff --git a/gcc/config/i386/x86-tune-sched-atom.c b/gcc/config/i386/x86-tune-sched-atom.c
index cfb0c65ac99..1611436a05b 100644
--- a/gcc/config/i386/x86-tune-sched-atom.c
+++ b/gcc/config/i386/x86-tune-sched-atom.c
@@ -51,7 +51,7 @@ do_reorder_for_imul (rtx_insn **ready, int n_ready)
int index = -1;
int i;
- if (!TARGET_BONNELL)
+ if (!TARGET_CPU_P (BONNELL))
return index;
/* Check that IMUL instruction is on the top of ready list. */
@@ -131,7 +131,7 @@ swap_top_of_ready_list (rtx_insn **ready, int n_ready)
int clock2 = -1;
#define INSN_TICK(INSN) (HID (INSN)->tick)
- if (!TARGET_SILVERMONT && !TARGET_INTEL)
+ if (!TARGET_CPU_P (SILVERMONT) && !TARGET_CPU_P (INTEL))
return false;
if (!NONDEBUG_INSN_P (top))
@@ -204,7 +204,8 @@ ix86_atom_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
issue_rate = ix86_issue_rate ();
/* Do reodering for BONNELL/SILVERMONT only. */
- if (!TARGET_BONNELL && !TARGET_SILVERMONT && !TARGET_INTEL)
+ if (!TARGET_CPU_P (BONNELL) && !TARGET_CPU_P (SILVERMONT)
+ && !TARGET_CPU_P (INTEL))
return issue_rate;
/* Nothing to do if ready list contains only 1 instruction. */
diff --git a/gcc/config/i386/x86-tune-sched-bd.c b/gcc/config/i386/x86-tune-sched-bd.c
index d696643244c..ad0edf713f5 100644
--- a/gcc/config/i386/x86-tune-sched-bd.c
+++ b/gcc/config/i386/x86-tune-sched-bd.c
@@ -800,8 +800,9 @@ bool
ix86_bd_has_dispatch (rtx_insn *insn, int action)
{
/* Current implementation of dispatch scheduler models buldozer only. */
- if ((TARGET_BDVER1 || TARGET_BDVER2 || TARGET_BDVER3
- || TARGET_BDVER4) && flag_dispatch_scheduler)
+ if ((TARGET_CPU_P (BDVER1) || TARGET_CPU_P (BDVER2)
+ || TARGET_CPU_P (BDVER3) || TARGET_CPU_P (BDVER4))
+ && flag_dispatch_scheduler)
switch (action)
{
default:
diff --git a/gcc/config/i386/x86-tune-sched.c b/gcc/config/i386/x86-tune-sched.c
index 2bcc64b865a..6d8bca9ce85 100644
--- a/gcc/config/i386/x86-tune-sched.c
+++ b/gcc/config/i386/x86-tune-sched.c
@@ -386,7 +386,7 @@ ix86_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
loadcost = 3;
else
- loadcost = TARGET_ATHLON ? 2 : 0;
+ loadcost = TARGET_CPU_P (ATHLON) ? 2 : 0;
if (cost >= loadcost)
cost -= loadcost;
More information about the Gcc-cvs
mailing list