[PATCH, ARM 5/7, ping1] Add support for MOVT/MOVW to ARMv8-M Baseline

Kyrill Tkachov kyrylo.tkachov@foss.arm.com
Fri May 20 11:15:00 GMT 2016


Hi Thomas,

On 19/05/16 17:11, Thomas Preudhomme wrote:
> On Wednesday 18 May 2016 12:30:41 Kyrill Tkachov wrote:
>> Hi Thomas,
>>
>> This looks mostly good with a few nits inline.
>> Please repost with the comments addressed.
> Updated ChangeLog entries:
>
> *** gcc/ChangeLog ***
>
> 2016-05-18  Thomas Preud'homme  <thomas.preudhomme@arm.com>
>
>          * config/arm/arm.h (TARGET_HAVE_MOVT): Include ARMv8-M as having MOVT.
>          * config/arm/arm.c (arm_arch_name): (const_ok_for_op): Check MOVT/MOVW
>          availability with TARGET_HAVE_MOVT.
>          (thumb_legitimate_constant_p): Strip the high part of a label_ref.
>          (thumb1_rtx_costs): Also return 0 if setting a half word constant and
>          MOVW is available and replace (unsigned HOST_WIDE_INT) INTVAL by
>          UINTVAL.
>          (thumb1_size_rtx_costs): Make set of half word constant also cost 1
>          extra instruction if MOVW is available.  Use a cost variable
>          incremented by COSTS_N_INSNS (1) when the condition match rather than
>          returning an arithmetic expression based on COSTS_N_INSNS.  Make
>          constant with bottom half word zero cost 2 instruction if MOVW is
>          available.
>          * config/arm/arm.md (define_attr "arch"): Add v8mb.
>          (define_attr "arch_enabled"): Set to yes if arch value is v8mb and
>          target is ARMv8-M Baseline.
>          * config/arm/thumb1.md (thumb1_movdi_insn): Add ARMv8-M Baseline only
>          alternative for constants satisfying j constraint.
>          (thumb1_movsi_insn): Likewise.
>          (movsi splitter for K alternative): Tighten condition to not trigger
>          if movt is available and j constraint is satisfied.
>          (Pe immediate splitter): Likewise.
>          (thumb1_movhi_insn): Add ARMv8-M Baseline only alternative for
>          constant fitting in an halfword to use MOVW.
>          * doc/sourcebuild.texi (arm_thumb1_movt_ko): Document new ARM
>          effective target.
>
>
> *** gcc/testsuite/ChangeLog ***
>
> 2015-11-13  Thomas Preud'homme  <thomas.preudhomme@arm.com>
>
>          * lib/target-supports.exp (check_effective_target_arm_thumb1_movt_ko):
>          Define effective target.
>          * gcc.target/arm/pr42574.c: Require arm_thumb1_movt_ko instead of
>          arm_thumb1_ok as effective target to exclude ARMv8-M Baseline.

This is ok now, thanks for the changes.
I'd like to see some tests that generate MOVW/MOVT instructions
on ARMv8-M Baseline. They should be easy, just an:
int
foo ()
{
   return CONST;
}

and same for short and long long return types
(to exercise the HI, SI and DImode move patterns).

You can add them as part of this patch or as a separate followup.

Thanks,
Kyrill


>
> and patch:
>
> diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
> index
> 47216b4a1959ccdb18e329db411bf7f941e67163..f42e996e5a7ce979fe406b8261d50fb2ba005f6b
> 100644
> --- a/gcc/config/arm/arm.h
> +++ b/gcc/config/arm/arm.h
> @@ -269,7 +269,7 @@ extern void (*arm_lang_output_object_attributes_hook)
> (void);
>   #define TARGET_HAVE_LDACQ	(TARGET_ARM_ARCH >= 8 && arm_arch_notm)
>   
>   /* Nonzero if this chip provides the movw and movt instructions.  */
> -#define TARGET_HAVE_MOVT	(arm_arch_thumb2)
> +#define TARGET_HAVE_MOVT	(arm_arch_thumb2 || arm_arch8)
>   
>   /* Nonzero if integer division instructions supported.  */
>   #define TARGET_IDIV	((TARGET_ARM && arm_arch_arm_hwdiv)	\
> diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
> index
> d75a34f10d5ed22cff0a0b5d3ad433f111b059ee..a05e559c905daa55e686491a038342360c721912
> 100644
> --- a/gcc/config/arm/arm.c
> +++ b/gcc/config/arm/arm.c
> @@ -8220,6 +8220,12 @@ arm_legitimate_constant_p_1 (machine_mode, rtx x)
>   static bool
>   thumb_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
>   {
> +  /* Splitters for TARGET_USE_MOVT call arm_emit_movpair which creates high
> +     RTX.  These RTX must therefore be allowed for Thumb-1 so that when run
> +     for ARMv8-M baseline or later the result is valid.  */
> +  if (TARGET_HAVE_MOVT && GET_CODE (x) == HIGH)
> +    x = XEXP (x, 0);
> +
>     return (CONST_INT_P (x)
>   	  || CONST_DOUBLE_P (x)
>   	  || CONSTANT_ADDRESS_P (x)
> @@ -8306,7 +8312,8 @@ thumb1_rtx_costs (rtx x, enum rtx_code code, enum
> rtx_code outer)
>       case CONST_INT:
>         if (outer == SET)
>   	{
> -	  if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
> +	  if (UINTVAL (x) < 256
> +	      || (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000)))
>   	    return 0;
>   	  if (thumb_shiftable_const (INTVAL (x)))
>   	    return COSTS_N_INSNS (2);
> @@ -9009,7 +9016,7 @@ static inline int
>   thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
>   {
>     machine_mode mode = GET_MODE (x);
> -  int words;
> +  int words, cost;
>   
>     switch (code)
>       {
> @@ -9055,17 +9062,26 @@ thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum
> rtx_code outer)
>         /* A SET doesn't have a mode, so let's look at the SET_DEST to get
>   	 the mode.  */
>         words = ARM_NUM_INTS (GET_MODE_SIZE (GET_MODE (SET_DEST (x))));
> -      return COSTS_N_INSNS (words)
> -	     + COSTS_N_INSNS (1) * (satisfies_constraint_J (SET_SRC (x))
> -				    || satisfies_constraint_K (SET_SRC (x))
> -				       /* thumb1_movdi_insn.  */
> -				    || ((words > 1) && MEM_P (SET_SRC (x))));
> +      cost = COSTS_N_INSNS (words);
> +      if (satisfies_constraint_J (SET_SRC (x))
> +	  || satisfies_constraint_K (SET_SRC (x))
> +	     /* Too big immediate for 2byte mov, using movt.  */
> +	  || (UINTVAL (SET_SRC (x)) >= 256
> +	      && TARGET_HAVE_MOVT
> +	      && satisfies_constraint_j (SET_SRC (x)))
> +	     /* thumb1_movdi_insn.  */
> +	  || ((words > 1) && MEM_P (SET_SRC (x))))
> +	cost += COSTS_N_INSNS (1);
> +      return cost;
>   
>       case CONST_INT:
>         if (outer == SET)
>           {
>             if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
>               return COSTS_N_INSNS (1);
> +	  /* movw is 4byte long.  */
> +	  if (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000))
> +	    return COSTS_N_INSNS (2);
>   	  /* See split "TARGET_THUMB1 && satisfies_constraint_J".  */
>   	  if (INTVAL (x) >= -255 && INTVAL (x) <= -1)
>               return COSTS_N_INSNS (2);
> diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
> index
> 8aa9fedf5c07e78bc7ba793b39bebcc45a4d5921..ce39bdf5904dec3e12003c411f2a8b326ea939e3
> 100644
> --- a/gcc/config/arm/arm.md
> +++ b/gcc/config/arm/arm.md
> @@ -118,10 +118,10 @@
>   ; This can be "a" for ARM, "t" for either of the Thumbs, "32" for
>   ; TARGET_32BIT, "t1" or "t2" to specify a specific Thumb mode.  "v6"
>   ; for ARM or Thumb-2 with arm_arch6, and nov6 for ARM without
> -; arm_arch6.  "v6t2" for Thumb-2 with arm_arch6.  This attribute is
> -; used to compute attribute "enabled", use type "any" to enable an
> -; alternative in all cases.
> -(define_attr "arch"
> "any,a,t,32,t1,t2,v6,nov6,v6t2,neon_for_64bits,avoid_neon_for_64bits,iwmmxt,iwmmxt2,armv6_or_vfpv3,neon"
> +; arm_arch6.  "v6t2" for Thumb-2 with arm_arch6 and v8mb for ARMv8-M
> +; baseline.  This attribute is used to compute attribute "enabled",
> +; use type "any" to enable an alternative in all cases.
> +(define_attr "arch"
> "any,a,t,32,t1,t2,v6,nov6,v6t2,v8mb,neon_for_64bits,avoid_neon_for_64bits,iwmmxt,iwmmxt2,armv6_or_vfpv3,neon"
>     (const_string "any"))
>   
>   (define_attr "arch_enabled" "no,yes"
> @@ -160,6 +160,10 @@
>   	      (match_test "TARGET_32BIT && arm_arch6 && arm_arch_thumb2"))
>   	 (const_string "yes")
>   
> +	 (and (eq_attr "arch" "v8mb")
> +	      (match_test "TARGET_THUMB1 && arm_arch8"))
> +	 (const_string "yes")
> +
>   	 (and (eq_attr "arch" "avoid_neon_for_64bits")
>   	      (match_test "TARGET_NEON")
>   	      (not (match_test "TARGET_PREFER_NEON_64BITS")))
> diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
> index
> 072ed4da47ad164eb406bedc3fccc589ac705e9f..47e569d0c259cd17d86a03061e5b47b3dab4579f
> 100644
> --- a/gcc/config/arm/thumb1.md
> +++ b/gcc/config/arm/thumb1.md
> @@ -590,8 +590,8 @@
>   ;;; ??? The 'i' constraint looks funny, but it should always be replaced by
>   ;;; thumb_reorg with a memory reference.
>   (define_insn "*thumb1_movdi_insn"
> -  [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,l,>,l, m,*r")
> -	(match_operand:DI 1 "general_operand"      "l, I,J,>,l,mi,l,*r"))]
> +  [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,r,l,>,l, m,*r")
> +	(match_operand:DI 1 "general_operand"      "l, I,J,j,>,l,mi,l,*r"))]
>     "TARGET_THUMB1
>      && (   register_operand (operands[0], DImode)
>          || register_operand (operands[1], DImode))"
> @@ -609,37 +609,41 @@
>       case 2:
>         operands[1] = GEN_INT (- INTVAL (operands[1]));
>         return \"movs\\t%Q0, %1\;rsbs\\t%Q0, %Q0, #0\;asrs\\t%R0, %Q0, #31\";
> -    case 3:
> -      return \"ldmia\\t%1, {%0, %H0}\";
> +    case 3: gcc_assert (TARGET_HAVE_MOVT);
> +	    return \"movw\\t%Q0, %L1\;movs\\tR0, #0\";
>       case 4:
> -      return \"stmia\\t%0, {%1, %H1}\";
> +      return \"ldmia\\t%1, {%0, %H0}\";
>       case 5:
> -      return thumb_load_double_from_address (operands);
> +      return \"stmia\\t%0, {%1, %H1}\";
>       case 6:
> +      return thumb_load_double_from_address (operands);
> +    case 7:
>         operands[2] = gen_rtx_MEM (SImode,
>   			     plus_constant (Pmode, XEXP (operands[0], 0), 4));
>         output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
>         return \"\";
> -    case 7:
> +    case 8:
>         if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
>   	return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
>         return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
>       }
>     }"
> -  [(set_attr "length" "4,4,6,2,2,6,4,4")
> -   (set_attr "type"
> "multiple,multiple,multiple,load2,store2,load2,store2,multiple")
> -   (set_attr "pool_range" "*,*,*,*,*,1018,*,*")]
> +  [(set_attr "length" "4,4,6,6,2,2,6,4,4")
> +   (set_attr "type"
> "multiple,multiple,multiple,multiple,load2,store2,load2,store2,multiple")
> +   (set_attr "arch" "t1,t1,t1,v8mb,t1,t1,t1,t1,t1")
> +   (set_attr "pool_range" "*,*,*,*,*,*,1018,*,*")]
>   )
>   
>   (define_insn "*thumb1_movsi_insn"
> -  [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,
> m,*l*h*k")
> -	(match_operand:SI 1 "general_operand"      "l, I,J,K,>,l,mi,l,*l*h*k"))]
> +  [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,r,l,l,l,>,l,
> m,*l*h*k")
> +	(match_operand:SI 1 "general_operand"      "l,
> I,j,J,K,>,l,mi,l,*l*h*k"))]
>     "TARGET_THUMB1
>      && (   register_operand (operands[0], SImode)
>          || register_operand (operands[1], SImode))"
>     "@
>      movs	%0, %1
>      movs	%0, %1
> +   movw	%0, %1
>      #
>      #
>      ldmia\\t%1, {%0}
> @@ -647,10 +651,11 @@
>      ldr\\t%0, %1
>      str\\t%1, %0
>      mov\\t%0, %1"
> -  [(set_attr "length" "2,2,4,4,2,2,2,2,2")
> -   (set_attr "type"
> "mov_reg,mov_imm,multiple,multiple,load1,store1,load1,store1,mov_reg")
> -   (set_attr "pool_range" "*,*,*,*,*,*,1018,*,*")
> -   (set_attr "conds" "set,clob,*,*,nocond,nocond,nocond,nocond,nocond")])
> +  [(set_attr "length" "2,2,4,4,4,2,2,2,2,2")
> +   (set_attr "type"
> "mov_reg,mov_imm,mov_imm,multiple,multiple,load1,store1,load1,store1,mov_reg")
> +   (set_attr "pool_range" "*,*,*,*,*,*,*,1018,*,*")
> +   (set_attr "arch" "t1,t1,v8mb,t1,t1,t1,t1,t1,t1,t1")
> +   (set_attr "conds"
> "set,clob,nocond,*,*,nocond,nocond,nocond,nocond,nocond")])
>   
>   ; Split the load of 64-bit constant into two loads for high and low 32-bit
> parts respectively
>   ; to see if we can load them in fewer instructions or fewer cycles.
> @@ -687,7 +692,8 @@
>   (define_split
>     [(set (match_operand:SI 0 "register_operand" "")
>   	(match_operand:SI 1 "const_int_operand" ""))]
> -  "TARGET_THUMB1 && satisfies_constraint_K (operands[1])"
> +  "TARGET_THUMB1 && satisfies_constraint_K (operands[1])
> +   && !(TARGET_HAVE_MOVT && satisfies_constraint_j (operands[1]))"
>     [(set (match_dup 2) (match_dup 1))
>      (set (match_dup 0) (ashift:SI (match_dup 2) (match_dup 3)))]
>     "
> @@ -714,7 +720,8 @@
>   (define_split
>     [(set (match_operand:SI 0 "register_operand" "")
>   	(match_operand:SI 1 "const_int_operand" ""))]
> -  "TARGET_THUMB1 && satisfies_constraint_Pe (operands[1])"
> +  "TARGET_THUMB1 && satisfies_constraint_Pe (operands[1])
> +   && !(TARGET_HAVE_MOVT && satisfies_constraint_j (operands[1]))"
>     [(set (match_dup 2) (match_dup 1))
>      (set (match_dup 0) (plus:SI (match_dup 2) (match_dup 3)))]
>     "
> @@ -726,8 +733,8 @@
>   )
>   
>   (define_insn "*thumb1_movhi_insn"
> -  [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,l*r,*h,l")
> -	(match_operand:HI 1 "general_operand"       "l,m,l,k*h,*r,I"))]
> +  [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,l*r,*h,l,r")
> +	(match_operand:HI 1 "general_operand"       "l,m,l,k*h,*r,I,n"))]
>     "TARGET_THUMB1
>      && (   register_operand (operands[0], HImode)
>          || register_operand (operands[1], HImode))"
> @@ -739,6 +746,8 @@
>       case 3: return \"mov	%0, %1\";
>       case 4: return \"mov	%0, %1\";
>       case 5: return \"movs	%0, %1\";
> +    case 6: gcc_assert (TARGET_HAVE_MOVT);
> +	    return \"movw	%0, %L1\";
>       default: gcc_unreachable ();
>       case 1:
>         /* The stack pointer can end up being taken as an index register.
> @@ -758,9 +767,10 @@
>   	}
>         return \"ldrh	%0, %1\";
>       }"
> -  [(set_attr "length" "2,4,2,2,2,2")
> -   (set_attr "type" "alus_imm,load1,store1,mov_reg,mov_reg,mov_imm")
> -   (set_attr "conds" "clob,nocond,nocond,nocond,nocond,clob")])
> +  [(set_attr "length" "2,4,2,2,2,2,4")
> +   (set_attr "type" "alus_imm,load1,store1,mov_reg,mov_reg,mov_imm,mov_imm")
> +   (set_attr "arch" "t1,t1,t1,t1,t1,t1,v8mb")
> +   (set_attr "conds" "clob,nocond,nocond,nocond,nocond,clob,nocond")])
>   
>   (define_expand "thumb_movhi_clobber"
>     [(set (match_operand:HI     0 "memory_operand"   "")
> diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
> index
> 3142cd53ae53e90bb1af2f595fe53778c1cbfd3b..860b4c0627e094bae8b3801f9256ff4fec5c8fc2
> 100644
> --- a/gcc/doc/sourcebuild.texi
> +++ b/gcc/doc/sourcebuild.texi
> @@ -1601,6 +1601,10 @@ arm_v8_1a_neon_ok.
>   ARM target prefers @code{LDRD} and @code{STRD} instructions over
>   @code{LDM} and @code{STM} instructions.
>   
> +@item arm_thumb1_movt_ko
> +ARM target generates Thumb-1 code for @code{-mthumb} with no
> +@code{MOVT} instruction available.
> +
>   @end table
>   
>   @subsubsection AArch64-specific attributes
> diff --git a/gcc/testsuite/gcc.target/arm/pr42574.c
> b/gcc/testsuite/gcc.target/arm/pr42574.c
> index
> 0ccd05f9922c798611e479d97890aa69b180e989..53aea6224be89d2d771a22f8ecb397276f586234
> 100644
> --- a/gcc/testsuite/gcc.target/arm/pr42574.c
> +++ b/gcc/testsuite/gcc.target/arm/pr42574.c
> @@ -1,5 +1,5 @@
>   /* { dg-options "-mthumb -Os -fpic" }  */
> -/* { dg-require-effective-target arm_thumb1_ok } */
> +/* { dg-require-effective-target arm_thumb1_movt_ko } */
>   /* { dg-require-effective-target fpic } */
>   /* Make sure the address of glob.c is calculated only once and using
>      a logical shift for the offset (200<<1).  */
> diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-
> supports.exp
> index
> 46352b90de7202960116d93523b017d5dc4a1423..f5d73ad2620fa35f0091a890bca63c7d35de1a85
> 100644
> --- a/gcc/testsuite/lib/target-supports.exp
> +++ b/gcc/testsuite/lib/target-supports.exp
> @@ -3329,6 +3329,23 @@ proc check_effective_target_arm_cortex_m { } {
>       } "-mthumb"]
>   }
>   
> +# Return 1 if this is an ARM target where -mthumb causes Thumb-1 to be
> +# used and no movt/movw instructions to be available.
> +
> +proc check_effective_target_arm_thumb1_movt_ko {} {
> +    if [check_effective_target_arm_thumb1_ok] {
> +	return [expr ![check_no_compiler_messages arm_movt object {
> +	    int
> +	    foo (void)
> +	    {
> +	      asm ("movt r0, #42");
> +	    }
> +	} "-mthumb"]]
> +    } else {
> +	return 0
> +    }
> +}
> +
>   # Return 1 if this compilation turns on string_ops_prefer_neon on.
>   
>   proc check_effective_target_arm_tune_string_ops_prefer_neon { } {
>
> Best regards,
>
> Thomas
>
>> On 17/05/16 11:13, Thomas Preudhomme wrote:
>>> Ping?
>>>
>>> *** gcc/ChangeLog ***
>>>
>>> 2015-11-13  Thomas Preud'homme  <thomas.preudhomme@arm.com>
>>>
>>>           * config/arm/arm.h (TARGET_HAVE_MOVT): Include ARMv8-M as having
>>>           MOVT.
>>>           * config/arm/arm.c (arm_arch_name): (const_ok_for_op): Check
>>>           MOVT/MOVW
>>>           availability with TARGET_HAVE_MOVT.
>>>           (thumb_legitimate_constant_p): Legalize high part of a label_ref
>>>           as a
>>>           constant.
>> I don't think "Legalize" is the right word here. How about "Strip the HIGH
>> part of a label_ref"?
>>>           (thumb1_rtx_costs): Also return 0 if setting a half word constant
>>>           and
>>>           movw is available.
>>>           (thumb1_size_rtx_costs): Make set of half word constant also cost
>>>           1
>>>           extra instruction if MOVW is available.  Make constant with
>>>           bottom
>>>
>>> half
>>>
>>>           word zero cost 2 instruction if MOVW is available.
>>>           * config/arm/arm.md (define_attr "arch"): Add v8mb.
>>>           (define_attr "arch_enabled"): Set to yes if arch value is v8mb
>>>           and
>>>           target is ARMv8-M Baseline.
>>>           * config/arm/thumb1.md (thumb1_movdi_insn): Add ARMv8-M Baseline
>>>           only
>>>           alternative for constants satisfying j constraint.
>>>           (thumb1_movsi_insn): Likewise.
>>>           (movsi splitter for K alternative): Tighten condition to not
>>>           trigger
>>>           if movt is available and j constraint is satisfied.
>>>           (Pe immediate splitter): Likewise.
>>>           (thumb1_movhi_insn): Add ARMv8-M Baseline only alternative for
>>>           constant fitting in an halfword to use movw.
>> Please use 'MOVW' consistently in the ChangeLog rather than the lowercase
>> 'movw'
>>>           * doc/sourcebuild.texi (arm_thumb1_movt_ko): Document new ARM
>>>           effective target.
>>>
>>> *** gcc/testsuite/ChangeLog ***
>>>
>>> 2015-11-13  Thomas Preud'homme  <thomas.preudhomme@arm.com>
>>>
>>>           * lib/target-supports.exp
>>>           (check_effective_target_arm_thumb1_movt_ko):
>>>           Define effective target.
>>>           * gcc.target/arm/pr42574.c: Require arm_thumb1_movt_ko instead of
>>>           arm_thumb1_ok as effective target to exclude ARMv8-M Baseline.
>>>
>>> diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
>>> index
>>> 47216b4a1959ccdb18e329db411bf7f941e67163..f42e996e5a7ce979fe406b8261d50fb2
>>> ba005f6b 100644
>>> --- a/gcc/config/arm/arm.h
>>> +++ b/gcc/config/arm/arm.h
>>> @@ -269,7 +269,7 @@ extern void (*arm_lang_output_object_attributes_hook)
>>> (void);
>>>
>>>    #define TARGET_HAVE_LDACQ	(TARGET_ARM_ARCH >= 8 && arm_arch_notm)
>>>    
>>>    /* Nonzero if this chip provides the movw and movt instructions.  */
>>>
>>> -#define TARGET_HAVE_MOVT	(arm_arch_thumb2)
>>> +#define TARGET_HAVE_MOVT	(arm_arch_thumb2 || arm_arch8)
>>>
>>>    /* Nonzero if integer division instructions supported.  */
>>>    #define TARGET_IDIV	((TARGET_ARM && arm_arch_arm_hwdiv)	\
>>>
>>> diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
>>> index
>>> d75a34f10d5ed22cff0a0b5d3ad433f111b059ee..13b4b71ac8f9c1da8ef1945f7ff6985c
>>> a59f6832 100644
>>> --- a/gcc/config/arm/arm.c
>>> +++ b/gcc/config/arm/arm.c
>>> @@ -8220,6 +8220,12 @@ arm_legitimate_constant_p_1 (machine_mode, rtx x)
>>>
>>>    static bool
>>>    thumb_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
>>>    {
>>>
>>> +  /* Splitters for TARGET_USE_MOVT call arm_emit_movpair which creates
>>> high +     RTX.  These RTX must therefore be allowed for Thumb-1 so that
>>> when run +     for ARMv8-M baseline or later the result is valid.  */
>>> +  if (TARGET_HAVE_MOVT && GET_CODE (x) == HIGH)
>>> +    x = XEXP (x, 0);
>>> +
>>>
>>>      return (CONST_INT_P (x)
>>>      
>>>    	  || CONST_DOUBLE_P (x)
>>>    	  || CONSTANT_ADDRESS_P (x)
>>>
>>> @@ -8306,7 +8312,8 @@ thumb1_rtx_costs (rtx x, enum rtx_code code, enum
>>> rtx_code outer)
>>>
>>>        case CONST_INT:
>>>          if (outer == SET)
>>>    	
>>>    	{
>>>
>>> -	  if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
>>> +	  if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256
>>> +	      || (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000)))
>>>
>>>    	    return 0;
>> Since you're modifying this line please replace (unsigned HOST_WIDE_INT)
>> INTVAL (x) with UINTVAL (x).



More information about the Gcc-patches mailing list