This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

patch: honor volatile bitfield types


The saga continues.  Based on some private discussions, I've changed
the flag to be target-independent and added some i386-specific tests.
Thus, the functionality can be tested on all targets that have
memory-mapped peripheral registers (arm, sh, h8, m32c, rx, etc) as
well as verified on other targets.

I tried hacking gcc to make all structures volatile and running the
testsuite, but the output was full of complaints about parameter
mismatches and other problems caused just by the structs being
volatile.  It didn't totally blow up though :-)


	* common.opt (-fhonor-volatile-bitfield-types): new.
	* doc/invoke.texi: Document it.
	* fold-const.c (optimize_bit_field_compare): For volatile
	bitfields, use the field's type to determine the mode, not the
	field's size.
	* expr.c (expand_assignment): Likewise.
	(get_inner_reference): Likewise.
	(expand_expr_real_1): Likewise.
	* expmed.c (store_fixed_bit_field): Likewise.
	(extract_bit_field_1): Likewise.
	(extract_fixed_bit_field): Likewise.

	* gcc.target/i386/volatile-bitfields-1.c: New.
	* gcc.target/i386/volatile-bitfields-2.c: New.


Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi	(revision 160589)
+++ doc/invoke.texi	(working copy)
@@ -17706,12 +17706,23 @@ be thrown between DSOs must be explicitl
 visibility so that the @samp{type_info} nodes will be unified between
 the DSOs.
 
 An overview of these techniques, their benefits and how to use them
 is at @w{@uref{http://gcc.gnu.org/wiki/Visibility}}.
 
+@item -fhonor-volatile-bitfield-types
+This option should be used if accesses to volatile bitfields (or other
+structure fields, although the compiler usually honors those types
+anyway) should use a single access in a mode of the same size as the
+container's type, aligned to a natural alignment if possible.  If the
+target requires strict alignment, and honoring the container type
+would require violating this alignment, a warning is issued.
+
+The default is to not honor the field's type, which results in a
+target-specific ``optimum'' access mode instead.
+
 @end table
 
 @c man end
 
 @node Environment Variables
 @section Environment Variables Affecting GCC
Index: fold-const.c
===================================================================
--- fold-const.c	(revision 160589)
+++ fold-const.c	(working copy)
@@ -3460,17 +3460,22 @@ optimize_bit_field_compare (location_t l
 	 || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
        return 0;
    }
 
   /* See if we can find a mode to refer to this field.  We should be able to,
      but fail if we can't.  */
-  nmode = get_best_mode (lbitsize, lbitpos,
-			 const_p ? TYPE_ALIGN (TREE_TYPE (linner))
-			 : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
-				TYPE_ALIGN (TREE_TYPE (rinner))),
-			 word_mode, lvolatilep || rvolatilep);
+  if (lvolatilep
+      && GET_MODE_BITSIZE (lmode) > 0
+      && flag_honor_volatile_bitfield_types)
+    nmode = lmode;
+  else
+    nmode = get_best_mode (lbitsize, lbitpos,
+			   const_p ? TYPE_ALIGN (TREE_TYPE (linner))
+			   : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
+				  TYPE_ALIGN (TREE_TYPE (rinner))),
+			   word_mode, lvolatilep || rvolatilep);
   if (nmode == VOIDmode)
     return 0;
 
   /* Set signed and unsigned types of the precision of this mode for the
      shifts below.  */
   signed_type = lang_hooks.types.type_for_mode (nmode, 0);
Index: testsuite/gcc.target/i386/volatile-bitfields-2.c
===================================================================
--- testsuite/gcc.target/i386/volatile-bitfields-2.c	(revision 0)
+++ testsuite/gcc.target/i386/volatile-bitfields-2.c	(revision 0)
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-honor-volatile-bitfield-types" } */
+
+typedef struct {
+  char a:1;
+  char b:7;
+  int c;
+} BitStruct;
+
+volatile BitStruct bits;
+
+int foo ()
+{
+  return bits.b;
+}
+
+/* { dg-final { scan-assembler "movl.*bits" } } */
Index: testsuite/gcc.target/i386/volatile-bitfields-1.c
===================================================================
--- testsuite/gcc.target/i386/volatile-bitfields-1.c	(revision 0)
+++ testsuite/gcc.target/i386/volatile-bitfields-1.c	(revision 0)
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fhonor-volatile-bitfield-types" } */
+
+typedef struct {
+  char a:1;
+  char b:7;
+  int c;
+} BitStruct;
+
+volatile BitStruct bits;
+
+int foo ()
+{
+  return bits.b;
+}
+
+/* { dg-final { scan-assembler "movzbl.*bits" } } */
Index: expr.c
===================================================================
--- expr.c	(revision 160589)
+++ expr.c	(working copy)
@@ -4226,12 +4226,19 @@ expand_assignment (tree to, tree from, b
 
       /* If we are going to use store_bit_field and extract_bit_field,
 	 make sure to_rtx will be safe for multiple use.  */
 
       to_rtx = expand_normal (tem);
 
+      /* If the bitfield is volatile, we want to access it in the
+	 field's mode, not the computed mode.  */
+      if (volatilep
+	  && GET_CODE (to_rtx) == MEM
+	  && flag_honor_volatile_bitfield_types)
+	to_rtx = adjust_address (to_rtx, mode1, 0);
+ 
       if (offset != 0)
 	{
 	  enum machine_mode address_mode;
 	  rtx offset_rtx;
 
 	  if (!MEM_P (to_rtx))
@@ -5987,12 +5994,18 @@ get_inner_reference (tree exp, HOST_WIDE
       tree field = TREE_OPERAND (exp, 1);
       size_tree = DECL_SIZE (field);
       if (!DECL_BIT_FIELD (field))
 	mode = DECL_MODE (field);
       else if (DECL_MODE (field) == BLKmode)
 	blkmode_bitfield = true;
+      else if (TREE_THIS_VOLATILE (exp)
+	       && flag_honor_volatile_bitfield_types)
+	/* Volatile bitfields should be accessed in the mode of the
+	     field's type, not the mode computed based on the bit
+	     size.  */
+	mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
 
       *punsignedp = DECL_UNSIGNED (field);
     }
   else if (TREE_CODE (exp) == BIT_FIELD_REF)
     {
       size_tree = TREE_OPERAND (exp, 1);
@@ -8963,12 +8976,20 @@ expand_expr_real_1 (tree exp, rtx target
 			 VOIDmode,
 			 (modifier == EXPAND_INITIALIZER
 			  || modifier == EXPAND_CONST_ADDRESS
 			  || modifier == EXPAND_STACK_PARM)
 			 ? modifier : EXPAND_NORMAL);
 
+
+	/* If the bitfield is volatile, we want to access it in the
+	   field's mode, not the computed mode.  */
+	if (volatilep
+	    && GET_CODE (op0) == MEM
+	    && flag_honor_volatile_bitfield_types)
+	  op0 = adjust_address (op0, mode1, 0);
+
 	mode2
 	  = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
 
 	/* If we have either an offset, a BLKmode result, or a reference
 	   outside the underlying object, we must force it to memory.
 	   Such a case can occur in Ada if we have unchecked conversion
@@ -9088,12 +9109,15 @@ expand_expr_real_1 (tree exp, rtx target
 	    || REG_P (op0) || GET_CODE (op0) == SUBREG
 	    || (mode1 != BLKmode && ! direct_load[(int) mode1]
 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
 		&& modifier != EXPAND_CONST_ADDRESS
 		&& modifier != EXPAND_INITIALIZER)
+	    /* If the field is volatile, we always want an aligned
+	       access.  */
+	    || (volatilep && flag_honor_volatile_bitfield_types)
 	    /* If the field isn't aligned enough to fetch as a memref,
 	       fetch it as a bit field.  */
 	    || (mode1 != BLKmode
 		&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
 		      || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
 		      || (MEM_P (op0)
Index: expmed.c
===================================================================
--- expmed.c	(revision 160589)
+++ expmed.c	(working copy)
@@ -900,14 +900,20 @@ store_fixed_bit_field (rtx op0, unsigned
 	 We don't want a mode bigger than the destination.  */
 
       mode = GET_MODE (op0);
       if (GET_MODE_BITSIZE (mode) == 0
 	  || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
 	mode = word_mode;
-      mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
-			    MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
+
+      if (MEM_VOLATILE_P (op0)
+          && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
+	  && flag_honor_volatile_bitfield_types)
+	mode = GET_MODE (op0);
+      else
+	mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+			      MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
 
       if (mode == VOIDmode)
 	{
 	  /* The only way this should occur is if the field spans word
 	     boundaries.  */
 	  store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
@@ -1374,12 +1380,20 @@ extract_bit_field_1 (rtx str_rtx, unsign
      we want a mode based on the size, so we must avoid calling it for FP
      modes.  */
   mode1  = (SCALAR_INT_MODE_P (tmode)
 	    ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
 	    : mode);
 
+  /* If the bitfield is volatile, we need to make sure the access
+     remains on a type-aligned boundary.  */
+  if (GET_CODE (op0) == MEM
+      && MEM_VOLATILE_P (op0)
+      && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
+      && flag_honor_volatile_bitfield_types)
+    goto no_subreg_mode_swap;
+
   if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
 	&& bitpos % BITS_PER_WORD == 0)
        || (mode1 != BLKmode
 	   /* ??? The big endian test here is wrong.  This is correct
 	      if the value is in a register, and if mode_for_size is not
 	      the same mode as op0.  This causes us to get unnecessarily
@@ -1726,14 +1740,25 @@ extract_fixed_bit_field (enum machine_mo
   else
     {
       /* Get the proper mode to use for this field.  We want a mode that
 	 includes the entire field.  If such a mode would be larger than
 	 a word, we won't be doing the extraction the normal way.  */
 
-      mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
-			    MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
+      if (MEM_VOLATILE_P (op0)
+	  && flag_honor_volatile_bitfield_types)
+	{
+	  if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
+	    mode = GET_MODE (op0);
+	  else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
+	    mode = GET_MODE (target);
+	  else
+	    mode = tmode;
+	}
+      else
+	mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
+			      MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
 
       if (mode == VOIDmode)
 	/* The only way this should occur is if the field spans word
 	   boundaries.  */
 	return extract_split_bit_field (op0, bitsize,
 					bitpos + offset * BITS_PER_UNIT,
@@ -1748,18 +1773,41 @@ extract_fixed_bit_field (enum machine_mo
 	{
 	  offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
 	  bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
 		     * BITS_PER_UNIT);
 	}
 
-      /* Get ref to an aligned byte, halfword, or word containing the field.
-	 Adjust BITPOS to be position within a word,
-	 and OFFSET to be the offset of that word.
-	 Then alter OP0 to refer to that word.  */
-      bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
-      offset -= (offset % (total_bits / BITS_PER_UNIT));
+      /* If we're accessing a volatile MEM, we can't do the next
+	 alignment step if it results in a multi-word access where we
+	 otherwise wouldn't have one.  So, check for that case
+	 here.  */
+      if (MEM_P (op0)
+	  && MEM_VOLATILE_P (op0)
+	  && flag_honor_volatile_bitfield_types
+	  && bitpos + bitsize <= total_bits
+	  && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
+	{
+	  if (STRICT_ALIGNMENT)
+	    {
+	      if (bitsize == total_bits)
+		warning (0, "mis-aligned access required for structure member");
+	      else
+		warning (0, "mis-aligned access required for structure bitfield");
+	    }
+	}
+      else
+	{
+
+	  /* Get ref to an aligned byte, halfword, or word containing the field.
+	     Adjust BITPOS to be position within a word,
+	     and OFFSET to be the offset of that word.
+	     Then alter OP0 to refer to that word.  */
+	  bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
+	  offset -= (offset % (total_bits / BITS_PER_UNIT));
+	}
+
       op0 = adjust_address (op0, mode, offset);
     }
 
   mode = GET_MODE (op0);
 
   if (BYTES_BIG_ENDIAN)
Index: common.opt
===================================================================
--- common.opt	(revision 160589)
+++ common.opt	(working copy)
@@ -622,12 +622,16 @@ Common Report Var(flag_loop_interchange)
 Enable Loop Interchange transformation
 
 floop-block
 Common Report Var(flag_loop_block) Optimization
 Enable Loop Blocking transformation
 
+fhonor-volatile-bitfield-types
+Common Report Var(flag_honor_volatile_bitfield_types)
+Force bitfield accesses to match their type width
+
 fguess-branch-probability
 Common Report Var(flag_guess_branch_prob) Optimization
 Enable guessing of branch probabilities
 
 ; Nonzero means ignore `#ident' directives.  0 means handle them.
 ; Generate position-independent code for executables if possible


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]