This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

RFA: fix gcc/20371


Regression testing has revealed that there were actually more flaws
in the MS bitfield code, and even some in the general struct layout
code. E.g. for this code:

struct foo
{
char c __attribute__((aligned (128)));
char a[127];
};

gcc warns:

pad.c:3: warning: padding struct to align ‘c’

The appended patch fixes all the problems I have found there.


bootstrapped on i686-pc-linux-gnu, and regression tested for
i686-pc-linux-gnu X sh64-elf on the sh-elf-4_1-branch.

2005-05-04  J"orn Rennecke <joern.rennecke@st.com>

	PR middle-end/20371:
	* tree.h (record_layout_info_s): New member prev_packed.
	* stor-layout.c (update_alignment_for_field): Fix comment about
	KNOWN_ALIGN.  For MS bitfields, if we start a new run, make sure
	we start it properly aligned.
	(place_field): At the beginning of a record, pass 0 as KNOWN_ALIGN
	to update_alignment_for_field, and recompute it afterwards using
	the alignment of the record.
	When a packed bitfield precedes an MS bitfield, don't add padding
	at the end of the packed bitfield on behalf of the base type of
	the packed bit field.
	Don't adjust rli->bitpos at the end
	of an MS bitfield run if we already adjusted bitpos/offset for an
	alignment as large or larger than the bitfield type size.
	Take possible record alignment > BIGGEST_ALIGNMENT into account
	when calculating actual_align.
	Only put packed buit fields into rli->prev_field if they end up
	suitably aligned.
	Also set rli->remaining_in_alignment when we re-set rli->prev_field.
	Update rli->remaining_in_alignment when we have already started a
	run of bit fields and we process a packed bit field.

Index: tree.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree.h,v
retrieving revision 1.709
diff -p -r1.709 tree.h
*** tree.h	29 Mar 2005 16:10:02 -0000	1.709
--- tree.h	4 May 2005 20:13:11 -0000
*************** typedef struct record_layout_info_s
*** 3120,3125 ****
--- 3120,3128 ----
    tree pending_statics;
    /* Bits remaining in the current alignment group */
    int remaining_in_alignment;
+   /* True if prev_field was packed and we haven't found any non-packed
+      fields that we have put in the same alignment group.  */
+   int prev_packed;
    /* True if we've seen a packed field that didn't have normal
       alignment anyway.  */
    int packed_maybe_necessary;
Index: stor-layout.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/stor-layout.c,v
retrieving revision 1.228
diff -p -r1.228 stor-layout.c
*** stor-layout.c	30 Mar 2005 21:34:26 -0000	1.228
--- stor-layout.c	4 May 2005 20:13:11 -0000
*************** rli_size_so_far (record_layout_info rli)
*** 634,642 ****
  }
  
  /* FIELD is about to be added to RLI->T.  The alignment (in bits) of
!    the next available location is given by KNOWN_ALIGN.  Update the
!    variable alignment fields in RLI, and return the alignment to give
!    the FIELD.  */
  
  unsigned int
  update_alignment_for_field (record_layout_info rli, tree field,
--- 634,642 ----
  }
  
  /* FIELD is about to be added to RLI->T.  The alignment (in bits) of
!    the next available location within the record is given by KNOWN_ALIGN.
!    Update the variable alignment fields in RLI, and return the alignment
!    to give the FIELD.  */
  
  unsigned int
  update_alignment_for_field (record_layout_info rli, tree field,
*************** update_alignment_for_field (record_layou
*** 682,687 ****
--- 682,699 ----
  	    type_align = MIN (type_align, maximum_field_alignment);
  	  rli->record_align = MAX (rli->record_align, type_align);
  	  rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
+ 	  /* If we start a new run, make sure we start it properly aligned.  */
+ 	  if ((!rli->prev_field
+ 	       || integer_zerop (DECL_SIZE (field))
+ 	       || integer_zerop (DECL_SIZE (rli->prev_field))
+ 	       || !host_integerp (DECL_SIZE (rli->prev_field), 0)
+ 	       || !host_integerp (TYPE_SIZE (type), 0)
+ 	       || !simple_cst_equal (TYPE_SIZE (type),
+ 				     TYPE_SIZE (TREE_TYPE (rli->prev_field)))
+ 	       || (rli->remaining_in_alignment
+ 		   < tree_low_cst (DECL_SIZE (field), 0)))
+ 	      && desired_align < type_align)
+ 	    desired_align = type_align;
  	}
      }
  #ifdef PCC_BITFIELD_TYPE_MATTERS
*************** place_field (record_layout_info rli, tre
*** 820,826 ****
      known_align = (tree_low_cst (rli->bitpos, 1)
  		   & - tree_low_cst (rli->bitpos, 1));
    else if (integer_zerop (rli->offset))
!     known_align = BIGGEST_ALIGNMENT;
    else if (host_integerp (rli->offset, 1))
      known_align = (BITS_PER_UNIT
  		   * (tree_low_cst (rli->offset, 1)
--- 832,838 ----
      known_align = (tree_low_cst (rli->bitpos, 1)
  		   & - tree_low_cst (rli->bitpos, 1));
    else if (integer_zerop (rli->offset))
!     known_align = 0;
    else if (host_integerp (rli->offset, 1))
      known_align = (BITS_PER_UNIT
  		   * (tree_low_cst (rli->offset, 1)
*************** place_field (record_layout_info rli, tre
*** 829,834 ****
--- 841,848 ----
      known_align = rli->offset_align;
  
    desired_align = update_alignment_for_field (rli, field, known_align);
+   if (known_align == 0)
+     known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
  
    if (warn_packed && DECL_PACKED (field))
      {
*************** place_field (record_layout_info rli, tre
*** 1001,1018 ****
  
  	      if (rli->remaining_in_alignment < bitsize)
  		{
! 		  /* out of bits; bump up to next 'word'.  */
! 		  rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
! 		  rli->bitpos
! 		    = size_binop (PLUS_EXPR, TYPE_SIZE (type),
! 				  DECL_FIELD_BIT_OFFSET (rli->prev_field));
! 		  rli->prev_field = field;
! 		  rli->remaining_in_alignment
! 		    = tree_low_cst (TYPE_SIZE (type), 0);
  		}
! 
! 	      rli->remaining_in_alignment -= bitsize;
  	    }
  	  else
  	    {
  	      /* End of a run: if leaving a run of bitfields of the same type
--- 1015,1044 ----
  
  	      if (rli->remaining_in_alignment < bitsize)
  		{
! 		  /* If PREV_FIELD is packed, and we haven't lumped
! 		     non-packed bitfields with it, treat this as if PREV_FIELD
! 		     was not a bitfield.  This avoids anomalies where a packed
! 		     bitfield with long long base type can take up more
! 		     space than a same-size bitfield with base type short.  */
! 		  if (rli->prev_packed)
! 		    rli->prev_field = prev_saved = NULL;
! 		  else
! 		    {
! 		      /* out of bits; bump up to next 'word'.  */
! 		      rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
! 		      rli->bitpos
! 			= size_binop (PLUS_EXPR, TYPE_SIZE (type),
! 				      DECL_FIELD_BIT_OFFSET (rli->prev_field));
! 		      rli->prev_field = field;
! 		      rli->remaining_in_alignment
! 			= tree_low_cst (TYPE_SIZE (type), 0) - bitsize;
! 		    }
  		}
! 	      else
! 		rli->remaining_in_alignment -= bitsize;
  	    }
+ 	  else if (rli->prev_packed)
+ 	    rli->prev_field = prev_saved = NULL;
  	  else
  	    {
  	      /* End of a run: if leaving a run of bitfields of the same type
*************** place_field (record_layout_info rli, tre
*** 1028,1036 ****
  		{
  		  tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
  
! 		  rli->bitpos
! 		    = size_binop (PLUS_EXPR, type_size,
! 				  DECL_FIELD_BIT_OFFSET (rli->prev_field));
  		}
  	      else
  		/* We "use up" size zero fields; the code below should behave
--- 1054,1067 ----
  		{
  		  tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
  
! 		  /* If the desired alignment is greater or equal to TYPE_SIZE,
! 		     we have already adjusted rli->bitpos / rli->offset above.
! 		   */
! 		  if ((unsigned HOST_WIDE_INT) tree_low_cst (type_size, 0)
! 		      > desired_align)
! 		    rli->bitpos
! 		      = size_binop (PLUS_EXPR, type_size,
! 				    DECL_FIELD_BIT_OFFSET (rli->prev_field));
  		}
  	      else
  		/* We "use up" size zero fields; the code below should behave
*************** place_field (record_layout_info rli, tre
*** 1044,1049 ****
--- 1075,1081 ----
  		rli->prev_field = NULL;
  	    }
  
+ 	  rli->prev_packed = 0;
  	  normalize_rli (rli);
          }
  
*************** place_field (record_layout_info rli, tre
*** 1116,1135 ****
      actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
  		    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
    else if (integer_zerop (DECL_FIELD_OFFSET (field)))
!     actual_align = BIGGEST_ALIGNMENT;
    else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
      actual_align = (BITS_PER_UNIT
  		   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
  		      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
    else
      actual_align = DECL_OFFSET_ALIGN (field);
  
    if (known_align != actual_align)
      layout_decl (field, actual_align);
  
!   /* Only the MS bitfields use this.  */
!   if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE(field))
!       rli->prev_field = field;
  
    /* Now add size of this field to the size of the record.  If the size is
       not constant, treat the field as being a multiple of bytes and just
--- 1148,1206 ----
      actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
  		    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
    else if (integer_zerop (DECL_FIELD_OFFSET (field)))
!     actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
    else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
      actual_align = (BITS_PER_UNIT
  		   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
  		      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
    else
      actual_align = DECL_OFFSET_ALIGN (field);
+   /* ACTUAL_ALIGN is still the actual alignment *within the record* .
+      store / extract bit field operations will check the alignment of the
+      record against the mode of bit fields.  */
  
    if (known_align != actual_align)
      layout_decl (field, actual_align);
  
!   if (DECL_BIT_FIELD_TYPE (field))
!     {
!       unsigned int type_align = TYPE_ALIGN (type);
! 
!       /* Only the MS bitfields use this.  We used to also put any kind of
! 	 packed bit fields into prev_field, but that makes no sense, because
! 	 an 8 bit packed bit field shouldn't impose more restriction on
! 	 following fields than a char field, and the alignment requirements
! 	 are also not fulfilled.
! 	 There is no sane value to set rli->remaining_in_alignment to when
! 	 a packed bitfield in prev_field is unaligned.  */
!       if (maximum_field_alignment != 0)
! 	type_align = MIN (type_align, maximum_field_alignment);
!       gcc_assert (rli->prev_field
! 		  || actual_align >= type_align || DECL_PACKED (field)
! 		  || integer_zerop (DECL_SIZE (field))
! 		  || !targetm.ms_bitfield_layout_p (rli->t));
!       if (rli->prev_field == NULL && actual_align >= type_align
! 	  && !integer_zerop (DECL_SIZE (field)))
! 	{
! 	  rli->prev_field = field;
! 	  /* rli->remaining_in_alignment has not been set if the bitfield
! 	     has size zero, or if it is a packed bitfield.  */
! 	  rli->remaining_in_alignment
! 	    = (tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 0)
! 	       - tree_low_cst (DECL_SIZE (field), 0));
! 	  rli->prev_packed = DECL_PACKED (field);
! 
! 	}
!       else if (rli->prev_field && DECL_PACKED (field))
! 	{
! 	  HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
! 
! 	  if (rli->remaining_in_alignment < bitsize)
! 	    rli->prev_field = NULL;
! 	  else
! 	    rli->remaining_in_alignment -= bitsize;
! 	}
!     }
  
    /* Now add size of this field to the size of the record.  If the size is
       not constant, treat the field as being a multiple of bytes and just

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]