This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Ping: [patch] Honor volatile bitfield types


Ping, update, added a few checks for non-VOID modes.

See http://gcc.gnu.org/ml/gcc-patches/2010-03/msg01036.html

Index: doc/tm.texi
===================================================================
--- doc/tm.texi	(revision 158449)
+++ doc/tm.texi	(working copy)
@@ -1352,12 +1352,27 @@ This target hook should return @code{tru
 should use the narrowest mode possible.  It should return @code{false} if
 these accesses should use the bitfield container type.
 
 The default is @code{!TARGET_STRICT_ALIGN}.
 @end deftypefn
 
+@deftypefn {Target Hook} bool TARGET_HONOR_VOLATILE_BITFIELD_TYPES (void)
+This target hook should return @code{true} if accesses to volatile
+bitfields (or other structure fields, although the compiler usually
+honors those types anyway) should use a single access in a mode of the
+same size as the container's type, aligned to a natural alignment if
+possible.  If the target requires strict alignment, and honoring the
+container type would require violating this alignment, a warning is
+issued.
+
+The default is to not honor the field's type, which results in a
+target-specific ``optimum'' access mode instead.  Returning
+@code{true} here overrides the @code{TARGET_NARROW_VOLATILE_BITFIELD}
+hook, above.
+@end deftypefn
+
 @defmac MEMBER_TYPE_FORCES_BLK (@var{field}, @var{mode})
 Return 1 if a structure or array containing @var{field} should be accessed using
 @code{BLKMODE}.
 
 If @var{field} is the only field in the structure, @var{mode} is its
 mode, otherwise @var{mode} is VOIDmode.  @var{mode} is provided in the
Index: target.h
===================================================================
--- target.h	(revision 158449)
+++ target.h	(working copy)
@@ -578,12 +578,20 @@ struct gcc_target
   bool (* align_anon_bitfield) (void);
 
   /* Return true if volatile bitfields should use the narrowest type possible.
      Return false if they should use the container type.  */
   bool (* narrow_volatile_bitfield) (void);
 
+  /* Return true if volatile bitfields (and, in general, other
+     structure fields too) should be accessed using modes the same
+     size as the types of the bitfields.  Return false if they should
+     use a target-specific type-independent mode.  Note that a true
+     return here overrides the narrow_volatile_bitfield() choice
+     above.  */
+  bool (* honor_volatile_bitfield_types) (void);
+
   /* Set up target-specific built-in functions.  */
   void (* init_builtins) (void);
 
   /* Initialize (if INITIALIZE_P is true) and return the target-specific
      built-in function decl for CODE.
      Return NULL if that is not possible.  Return error_mark_node if CODE
Index: expr.c
===================================================================
--- expr.c	(revision 158449)
+++ expr.c	(working copy)
@@ -4233,12 +4233,19 @@ expand_assignment (tree to, tree from, b
 
       /* If we are going to use store_bit_field and extract_bit_field,
 	 make sure to_rtx will be safe for multiple use.  */
 
       to_rtx = expand_normal (tem);
 
+      /* If the bitfield is volatile, we want to access it in the
+	 field's mode, not the computed mode.  */
+      if (volatilep
+	  && GET_CODE (to_rtx) == MEM
+	  && targetm.honor_volatile_bitfield_types ())
+	to_rtx = adjust_address (to_rtx, mode1, 0);
+ 
       if (offset != 0)
 	{
 	  enum machine_mode address_mode;
 	  rtx offset_rtx;
 
 	  if (!MEM_P (to_rtx))
@@ -5980,15 +5987,28 @@ get_inner_reference (tree exp, HOST_WIDE
   *pbitsize = -1;
   if (TREE_CODE (exp) == COMPONENT_REF)
     {
       tree field = TREE_OPERAND (exp, 1);
       size_tree = DECL_SIZE (field);
       if (!DECL_BIT_FIELD (field))
-	mode = DECL_MODE (field);
+	{
+	  /* Volatile bitfields should be accessed in the mode of the
+	     field's type, not the mode computed based on the bit
+	     size.  */
+	  if (TREE_THIS_VOLATILE (exp)
+	      && DECL_BIT_FIELD_TYPE (field)
+	      && targetm.honor_volatile_bitfield_types ())
+	    mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
+	  else
+	    mode = DECL_MODE (field);
+	}
       else if (DECL_MODE (field) == BLKmode)
 	blkmode_bitfield = true;
+      else if (targetm.honor_volatile_bitfield_types ())
+	/* Likewise.  */
+	mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
 
       *punsignedp = DECL_UNSIGNED (field);
     }
   else if (TREE_CODE (exp) == BIT_FIELD_REF)
     {
       size_tree = TREE_OPERAND (exp, 1);
@@ -8970,12 +8990,20 @@ expand_expr_real_1 (tree exp, rtx target
 			 VOIDmode,
 			 (modifier == EXPAND_INITIALIZER
 			  || modifier == EXPAND_CONST_ADDRESS
 			  || modifier == EXPAND_STACK_PARM)
 			 ? modifier : EXPAND_NORMAL);
 
+
+      /* If the bitfield is volatile, we want to access it in the
+	 field's mode, not the computed mode.  */
+	if (volatilep
+	    && GET_CODE (op0) == MEM
+	    && targetm.honor_volatile_bitfield_types ())
+	  op0 = adjust_address (op0, mode1, 0);
+
 	mode2
 	  = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
 
 	/* If we have either an offset, a BLKmode result, or a reference
 	   outside the underlying object, we must force it to memory.
 	   Such a case can occur in Ada if we have unchecked conversion
@@ -9095,12 +9123,15 @@ expand_expr_real_1 (tree exp, rtx target
 	    || REG_P (op0) || GET_CODE (op0) == SUBREG
 	    || (mode1 != BLKmode && ! direct_load[(int) mode1]
 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
 		&& modifier != EXPAND_CONST_ADDRESS
 		&& modifier != EXPAND_INITIALIZER)
+	    /* If the field is volatile, we always want an aligned
+	       access.  */
+	    || (volatilep && targetm.honor_volatile_bitfield_types ())
 	    /* If the field isn't aligned enough to fetch as a memref,
 	       fetch it as a bit field.  */
 	    || (mode1 != BLKmode
 		&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
 		      || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
 		      || (MEM_P (op0)
Index: expmed.c
===================================================================
--- expmed.c	(revision 158449)
+++ expmed.c	(working copy)
@@ -901,14 +901,20 @@ store_fixed_bit_field (rtx op0, unsigned
 	 We don't want a mode bigger than the destination.  */
 
       mode = GET_MODE (op0);
       if (GET_MODE_BITSIZE (mode) == 0
 	  || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
 	mode = word_mode;
-      mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
-			    MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
+
+      if (MEM_VOLATILE_P (op0)
+          && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
+	  && targetm.honor_volatile_bitfield_types ())
+	mode = GET_MODE (op0);
+      else
+	mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+			      MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
 
       if (mode == VOIDmode)
 	{
 	  /* The only way this should occur is if the field spans word
 	     boundaries.  */
 	  store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
@@ -1375,12 +1381,20 @@ extract_bit_field_1 (rtx str_rtx, unsign
      we want a mode based on the size, so we must avoid calling it for FP
      modes.  */
   mode1  = (SCALAR_INT_MODE_P (tmode)
 	    ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
 	    : mode);
 
+  /* If the bitfield is volatile, we need to make sure the access
+     remains on a type-aligned boundary.  */
+  if (GET_CODE (op0) == MEM
+      && MEM_VOLATILE_P (op0)
+      && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
+      && targetm.honor_volatile_bitfield_types ())
+    goto no_subreg_mode_swap;
+
   if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
 	&& bitpos % BITS_PER_WORD == 0)
        || (mode1 != BLKmode
 	   /* ??? The big endian test here is wrong.  This is correct
 	      if the value is in a register, and if mode_for_size is not
 	      the same mode as op0.  This causes us to get unnecessarily
@@ -1727,14 +1741,19 @@ extract_fixed_bit_field (enum machine_mo
   else
     {
       /* Get the proper mode to use for this field.  We want a mode that
 	 includes the entire field.  If such a mode would be larger than
 	 a word, we won't be doing the extraction the normal way.  */
 
-      mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
-			    MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
+      if (MEM_VOLATILE_P (op0)
+          && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
+	  && targetm.honor_volatile_bitfield_types ())
+	mode = GET_MODE (op0);
+      else
+	mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+			      MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
 
       if (mode == VOIDmode)
 	/* The only way this should occur is if the field spans word
 	   boundaries.  */
 	return extract_split_bit_field (op0, bitsize,
 					bitpos + offset * BITS_PER_UNIT,
@@ -1749,18 +1768,41 @@ extract_fixed_bit_field (enum machine_mo
 	{
 	  offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
 	  bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
 		     * BITS_PER_UNIT);
 	}
 
-      /* Get ref to an aligned byte, halfword, or word containing the field.
-	 Adjust BITPOS to be position within a word,
-	 and OFFSET to be the offset of that word.
-	 Then alter OP0 to refer to that word.  */
-      bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
-      offset -= (offset % (total_bits / BITS_PER_UNIT));
+      /* If we're accessing a volatile MEM, we can't do the next
+	 alignment step if it results in a multi-word access where we
+	 otherwise wouldn't have one.  So, check for that case
+	 here.  */
+      if (MEM_P (op0)
+	  && MEM_VOLATILE_P (op0)
+	  && targetm.honor_volatile_bitfield_types ()
+	  && bitpos + bitsize <= total_bits
+	  && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
+	{
+	  if (STRICT_ALIGNMENT)
+	    {
+	      if (bitsize == total_bits)
+		warning (0, "mis-aligned access required for structure member");
+	      else
+		warning (0, "mis-aligned access required for structure bitfield");
+	    }
+	}
+      else
+	{
+
+	  /* Get ref to an aligned byte, halfword, or word containing the field.
+	     Adjust BITPOS to be position within a word,
+	     and OFFSET to be the offset of that word.
+	     Then alter OP0 to refer to that word.  */
+	  bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
+	  offset -= (offset % (total_bits / BITS_PER_UNIT));
+	}
+
       op0 = adjust_address (op0, mode, offset);
     }
 
   mode = GET_MODE (op0);
 
   if (BYTES_BIG_ENDIAN)
Index: cfgexpand.c
===================================================================
--- cfgexpand.c	(revision 158449)
+++ cfgexpand.c	(working copy)
@@ -2613,12 +2613,27 @@ expand_debug_expr (tree exp)
       if (unsignedp)
 	return gen_rtx_UNSIGNED_FIX (mode, op0);
       else
 	return gen_rtx_FIX (mode, op0);
 
     case POINTER_PLUS_EXPR:
+      /* For the rare target where pointers are not the same size as
+	 size_t, we need to check for mis-matched modes and correct
+	 the addend.  */
+      if (op0 && op1
+	  && GET_MODE (op0) != VOIDmode && GET_MODE (op1) != VOIDmode
+	  && GET_MODE (op0) != GET_MODE (op1))
+	{
+	  if (GET_MODE_BITSIZE (GET_MODE (op0)) < GET_MODE_BITSIZE (GET_MODE (op1)))
+	    op1 = gen_rtx_TRUNCATE (GET_MODE (op0), op1);
+	  else if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1))))
+	    op1 = gen_rtx_ZERO_EXTEND (GET_MODE (op0), op1);
+	  else
+	    op1 = gen_rtx_SIGN_EXTEND (GET_MODE (op0), op1);
+	}
+      /* Fall through.  */
     case PLUS_EXPR:
       return gen_rtx_PLUS (mode, op0, op1);
 
     case MINUS_EXPR:
       return gen_rtx_MINUS (mode, op0, op1);
 
Index: target-def.h
===================================================================
--- target-def.h	(revision 158449)
+++ target-def.h	(working copy)
@@ -563,12 +563,13 @@
 #endif
 #define TARGET_INSERT_ATTRIBUTES hook_void_tree_treeptr
 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_false
 #define TARGET_MS_BITFIELD_LAYOUT_P hook_bool_const_tree_false
 #define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_false
 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
+#define TARGET_HONOR_VOLATILE_BITFIELD_TYPES hook_bool_void_false
 #define TARGET_RTX_COSTS hook_bool_rtx_int_int_intp_bool_false
 #define TARGET_MANGLE_TYPE hook_constcharptr_const_tree_null
 #define TARGET_ALLOCATE_INITIAL_VALUE NULL
 
 #define TARGET_UNSPEC_MAY_TRAP_P default_unspec_may_trap_p
 
@@ -932,12 +933,13 @@
   TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P,	\
   TARGET_MS_BITFIELD_LAYOUT_P,			\
   TARGET_DECIMAL_FLOAT_SUPPORTED_P,		\
   TARGET_FIXED_POINT_SUPPORTED_P,		\
   TARGET_ALIGN_ANON_BITFIELD,			\
   TARGET_NARROW_VOLATILE_BITFIELD,		\
+  TARGET_HONOR_VOLATILE_BITFIELD_TYPES,		\
   TARGET_INIT_BUILTINS,				\
   TARGET_BUILTIN_DECL,				\
   TARGET_EXPAND_BUILTIN,			\
   TARGET_RESOLVE_OVERLOADED_BUILTIN,		\
   TARGET_FOLD_BUILTIN,				\
   TARGET_BUILTIN_RECIPROCAL,			\
Index: config/m32c/m32c.c
===================================================================
--- config/m32c/m32c.c	(revision 158449)
+++ config/m32c/m32c.c	(working copy)
@@ -80,12 +80,13 @@ static int m32c_pushm_popm (Push_Pop_Typ
 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
 static rtx m32c_struct_value_rtx (tree, int);
 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
 static int need_to_save (int);
 static rtx m32c_function_value (const_tree, const_tree, bool);
 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
+static bool m32c_honor_volatile_bitfield_types (void);
 
 int current_function_special_page_vector (rtx);
 
 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
 
 #define streq(a,b) (strcmp ((a), (b)) == 0)
@@ -4403,11 +4404,19 @@ m32c_output_compare (rtx insn, rtx *oper
    stack pointer doesn't have as flexible addressing as the frame
    pointer, so we always assume we have it.  */
 
 #undef TARGET_FRAME_POINTER_REQUIRED
 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
 
+#undef TARGET_HONOR_VOLATILE_BITFIELD_TYPES
+#define TARGET_HONOR_VOLATILE_BITFIELD_TYPES m32c_honor_volatile_bitfield_types
+static bool
+m32c_honor_volatile_bitfield_types (void)
+{
+  return target_honor_volatile_field_types;
+}
+
 /* The Global `targetm' Variable. */
 
 struct gcc_target targetm = TARGET_INITIALIZER;
 
 #include "gt-m32c.h"
Index: config/m32c/m32c.opt
===================================================================
--- config/m32c/m32c.opt	(revision 158449)
+++ config/m32c/m32c.opt	(working copy)
@@ -39,6 +39,10 @@ mcpu=m32c
 Target RejectNegative Var(target_cpu,'3')
 -mcpu=m32c	Compile code for M32C variants
 
 memregs=
 Target RejectNegative Joined Var(target_memregs_string)
 -memregs=	Number of memreg bytes (default: 16, range: 0..16)
+
+mvoltype
+Target Var(target_honor_volatile_field_types)
+-mvoltype	Honor volatile field types


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]