]> gcc.gnu.org Git - gcc.git/blame - gcc/stor-layout.c
c-common.c, [...]: Fix comment formatting.
[gcc.git] / gcc / stor-layout.c
CommitLineData
7306ed3f 1/* C-compiler utilities for types and variables storage layout
06ceef4e 2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
aa335b76 3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
7306ed3f 4
1322177d 5This file is part of GCC.
7306ed3f 6
1322177d
LB
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
7306ed3f 11
1322177d
LB
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
7306ed3f
JW
16
17You should have received a copy of the GNU General Public License
1322177d
LB
18along with GCC; see the file COPYING. If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA. */
7306ed3f
JW
21
22
23#include "config.h"
670ee920 24#include "system.h"
4977bab6
ZW
25#include "coretypes.h"
26#include "tm.h"
7306ed3f 27#include "tree.h"
d05a5492 28#include "rtl.h"
6baf1cc8 29#include "tm_p.h"
566cdc73 30#include "flags.h"
7306ed3f 31#include "function.h"
234042f4 32#include "expr.h"
10f0ad3d 33#include "toplev.h"
d7db6646 34#include "ggc.h"
f913c102 35#include "target.h"
43577e6b 36#include "langhooks.h"
26277d41 37#include "regs.h"
89b0433e 38#include "params.h"
7306ed3f 39
7306ed3f 40/* Data type for the expressions representing sizes of data types.
896cced4 41 It is the first integer type laid out. */
fed3cef0 42tree sizetype_tab[(int) TYPE_KIND_LAST];
7306ed3f 43
d4c40650
RS
44/* If nonzero, this is an upper limit on alignment of structure fields.
45 The value is measured in bits. */
467cecf3 46unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
8c27b7d4 47/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
467cecf3 48unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
d4c40650 49
0e9e1e0a 50/* If nonzero, the alignment of a bitstring or (power-)set value, in bits.
b5d11e41 51 May be overridden by front-ends. */
729a2125 52unsigned int set_alignment = 0;
b5d11e41 53
b5d6a2ff
RK
54/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
55 allocated in Pmode, not ptr_mode. Set only by internal_reference_types
56 called only by a front end. */
57static int reference_types_internal = 0;
58
46c5ad27
AJ
59static void finalize_record_size (record_layout_info);
60static void finalize_type_size (tree);
61static void place_union_field (record_layout_info, tree);
b8089d8d 62#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
46c5ad27
AJ
63static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
64 HOST_WIDE_INT, tree);
b8089d8d 65#endif
46c5ad27 66extern void debug_rli (record_layout_info);
7306ed3f
JW
67\f
68/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
69
e2500fed 70static GTY(()) tree pending_sizes;
7306ed3f 71
b5d6a2ff
RK
72/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
73 by front end. */
74
75void
46c5ad27 76internal_reference_types (void)
b5d6a2ff
RK
77{
78 reference_types_internal = 1;
79}
80
770ae6cc
RK
81/* Get a list of all the objects put on the pending sizes list. */
82
7306ed3f 83tree
46c5ad27 84get_pending_sizes (void)
7306ed3f
JW
85{
86 tree chain = pending_sizes;
d4b60170 87
7306ed3f
JW
88 pending_sizes = 0;
89 return chain;
90}
91
fe375cf1
JJ
92/* Add EXPR to the pending sizes list. */
93
94void
46c5ad27 95put_pending_size (tree expr)
fe375cf1 96{
3874585e
RK
97 /* Strip any simple arithmetic from EXPR to see if it has an underlying
98 SAVE_EXPR. */
a9ecacf6 99 expr = skip_simple_arithmetic (expr);
3874585e
RK
100
101 if (TREE_CODE (expr) == SAVE_EXPR)
102 pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
fe375cf1
JJ
103}
104
770ae6cc
RK
105/* Put a chain of objects into the pending sizes list, which must be
106 empty. */
107
1fd7c4ac 108void
46c5ad27 109put_pending_sizes (tree chain)
1fd7c4ac
RK
110{
111 if (pending_sizes)
112 abort ();
113
114 pending_sizes = chain;
115}
116
76ffb3a0 117/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
7306ed3f
JW
118 to serve as the actual size-expression for a type or decl. */
119
4e4b555d 120tree
46c5ad27 121variable_size (tree size)
7306ed3f 122{
3695c25f
JM
123 tree save;
124
5e9bec99
RK
125 /* If the language-processor is to take responsibility for variable-sized
126 items (e.g., languages which have elaboration procedures like Ada),
ac79cd5a
RK
127 just return SIZE unchanged. Likewise for self-referential sizes and
128 constant sizes. */
76ffb3a0 129 if (TREE_CONSTANT (size)
ae2bcd98 130 || lang_hooks.decls.global_bindings_p () < 0
679035f3 131 || CONTAINS_PLACEHOLDER_P (size))
5e9bec99
RK
132 return size;
133
1c9766da 134 size = save_expr (size);
68de3831 135
d26f8097
MM
136 /* If an array with a variable number of elements is declared, and
137 the elements require destruction, we will emit a cleanup for the
138 array. That cleanup is run both on normal exit from the block
139 and in the exception-handler for the block. Normally, when code
140 is used in both ordinary code and in an exception handler it is
141 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
142 not wish to do that here; the array-size is the same in both
143 places. */
1c9766da 144 save = skip_simple_arithmetic (size);
d26f8097 145
6a0bec2c 146 if (cfun && cfun->x_dont_save_pending_sizes_p)
6de9cd9a
DN
147 /* The front-end doesn't want us to keep a list of the expressions
148 that determine sizes for variable size objects. Trust it. */
149 return size;
150
ae2bcd98 151 if (lang_hooks.decls.global_bindings_p ())
7306ed3f 152 {
80f9c711
RS
153 if (TREE_CONSTANT (size))
154 error ("type size can't be explicitly evaluated");
155 else
156 error ("variable-size type declared outside of any function");
157
fed3cef0 158 return size_one_node;
7306ed3f
JW
159 }
160
6a0bec2c 161 put_pending_size (save);
7306ed3f
JW
162
163 return size;
164}
165\f
166#ifndef MAX_FIXED_MODE_SIZE
167#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
168#endif
169
37783865
ZW
170/* Return the machine mode to use for a nonscalar of SIZE bits. The
171 mode must be in class CLASS, and have exactly that many value bits;
172 it may have padding as well. If LIMIT is nonzero, modes of wider
173 than MAX_FIXED_MODE_SIZE will not be used. */
7306ed3f
JW
174
175enum machine_mode
46c5ad27 176mode_for_size (unsigned int size, enum mode_class class, int limit)
7306ed3f 177{
b3694847 178 enum machine_mode mode;
7306ed3f 179
72c602fc 180 if (limit && size > MAX_FIXED_MODE_SIZE)
7306ed3f
JW
181 return BLKmode;
182
5e9bec99 183 /* Get the first mode which has this size, in the specified class. */
7306ed3f
JW
184 for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
185 mode = GET_MODE_WIDER_MODE (mode))
37783865 186 if (GET_MODE_PRECISION (mode) == size)
7306ed3f
JW
187 return mode;
188
189 return BLKmode;
190}
191
72c602fc
RK
192/* Similar, except passed a tree node. */
193
194enum machine_mode
46c5ad27 195mode_for_size_tree (tree size, enum mode_class class, int limit)
72c602fc
RK
196{
197 if (TREE_CODE (size) != INTEGER_CST
5826955d 198 || TREE_OVERFLOW (size)
72c602fc
RK
199 /* What we really want to say here is that the size can fit in a
200 host integer, but we know there's no way we'd find a mode for
201 this many bits, so there's no point in doing the precise test. */
05bccae2 202 || compare_tree_int (size, 1000) > 0)
72c602fc
RK
203 return BLKmode;
204 else
0384674e 205 return mode_for_size (tree_low_cst (size, 1), class, limit);
72c602fc
RK
206}
207
5e9bec99 208/* Similar, but never return BLKmode; return the narrowest mode that
37783865 209 contains at least the requested number of value bits. */
5e9bec99 210
27922c13 211enum machine_mode
46c5ad27 212smallest_mode_for_size (unsigned int size, enum mode_class class)
5e9bec99 213{
b3694847 214 enum machine_mode mode;
5e9bec99
RK
215
216 /* Get the first mode which has at least this size, in the
217 specified class. */
218 for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
219 mode = GET_MODE_WIDER_MODE (mode))
37783865 220 if (GET_MODE_PRECISION (mode) >= size)
5e9bec99
RK
221 return mode;
222
223 abort ();
224}
225
d006aa54
RH
226/* Find an integer mode of the exact same size, or BLKmode on failure. */
227
228enum machine_mode
46c5ad27 229int_mode_for_mode (enum machine_mode mode)
d006aa54
RH
230{
231 switch (GET_MODE_CLASS (mode))
232 {
233 case MODE_INT:
234 case MODE_PARTIAL_INT:
235 break;
236
237 case MODE_COMPLEX_INT:
238 case MODE_COMPLEX_FLOAT:
239 case MODE_FLOAT:
62c07905
JM
240 case MODE_VECTOR_INT:
241 case MODE_VECTOR_FLOAT:
d006aa54
RH
242 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
243 break;
244
245 case MODE_RANDOM:
246 if (mode == BLKmode)
786de7eb 247 break;
d4b60170 248
2d76cb1a 249 /* ... fall through ... */
d006aa54
RH
250
251 case MODE_CC:
252 default:
05bccae2 253 abort ();
d006aa54
RH
254 }
255
256 return mode;
257}
258
187515f5
AO
259/* Return the alignment of MODE. This will be bounded by 1 and
260 BIGGEST_ALIGNMENT. */
261
262unsigned int
46c5ad27 263get_mode_alignment (enum machine_mode mode)
187515f5 264{
0974c7d7 265 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
187515f5
AO
266}
267
7306ed3f 268\f
78d55cc8
JM
269/* Subroutine of layout_decl: Force alignment required for the data type.
270 But if the decl itself wants greater alignment, don't override that. */
271
272static inline void
273do_type_align (tree type, tree decl)
274{
275 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
276 {
277 DECL_ALIGN (decl) = TYPE_ALIGN (type);
3acef2ae
JM
278 if (TREE_CODE (decl) == FIELD_DECL)
279 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
78d55cc8
JM
280 }
281}
282
7306ed3f
JW
283/* Set the size, mode and alignment of a ..._DECL node.
284 TYPE_DECL does need this for C++.
285 Note that LABEL_DECL and CONST_DECL nodes do not need this,
286 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
287 Don't call layout_decl for them.
288
289 KNOWN_ALIGN is the amount of alignment we can assume this
290 decl has with no special effort. It is relevant only for FIELD_DECLs
291 and depends on the previous fields.
292 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
293 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
294 the record will be aligned to suit. */
295
296void
46c5ad27 297layout_decl (tree decl, unsigned int known_align)
7306ed3f 298{
b3694847
SS
299 tree type = TREE_TYPE (decl);
300 enum tree_code code = TREE_CODE (decl);
a46666a9 301 rtx rtl = NULL_RTX;
7306ed3f
JW
302
303 if (code == CONST_DECL)
304 return;
9df2c88c 305 else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL
33433751 306 && code != TYPE_DECL && code != FIELD_DECL)
7306ed3f
JW
307 abort ();
308
a46666a9
RH
309 rtl = DECL_RTL_IF_SET (decl);
310
7306ed3f 311 if (type == error_mark_node)
33433751 312 type = void_type_node;
7306ed3f 313
770ae6cc
RK
314 /* Usually the size and mode come from the data type without change,
315 however, the front-end may set the explicit width of the field, so its
316 size may not be the same as the size of its type. This happens with
317 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
318 also happens with other fields. For example, the C++ front-end creates
319 zero-sized fields corresponding to empty base classes, and depends on
320 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
4b6bf620
RK
321 size in bytes from the size in bits. If we have already set the mode,
322 don't set it again since we can be called twice for FIELD_DECLs. */
770ae6cc 323
a150de29 324 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
4b6bf620
RK
325 if (DECL_MODE (decl) == VOIDmode)
326 DECL_MODE (decl) = TYPE_MODE (type);
770ae6cc 327
5e9bec99 328 if (DECL_SIZE (decl) == 0)
06ceef4e
RK
329 {
330 DECL_SIZE (decl) = TYPE_SIZE (type);
331 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
332 }
1a96dc46 333 else if (DECL_SIZE_UNIT (decl) == 0)
770ae6cc
RK
334 DECL_SIZE_UNIT (decl)
335 = convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
336 bitsize_unit_node));
06ceef4e 337
78d55cc8
JM
338 if (code != FIELD_DECL)
339 /* For non-fields, update the alignment from the type. */
340 do_type_align (type, decl);
341 else
342 /* For fields, it's a bit more complicated... */
786de7eb 343 {
40aae178
JM
344 bool old_user_align = DECL_USER_ALIGN (decl);
345
78d55cc8
JM
346 if (DECL_BIT_FIELD (decl))
347 {
348 DECL_BIT_FIELD_TYPE (decl) = type;
7306ed3f 349
78d55cc8
JM
350 /* A zero-length bit-field affects the alignment of the next
351 field. */
352 if (integer_zerop (DECL_SIZE (decl))
353 && ! DECL_PACKED (decl)
5fd9b178 354 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
78d55cc8
JM
355 {
356#ifdef PCC_BITFIELD_TYPE_MATTERS
357 if (PCC_BITFIELD_TYPE_MATTERS)
358 do_type_align (type, decl);
359 else
360#endif
ad3f5759 361 {
78d55cc8 362#ifdef EMPTY_FIELD_BOUNDARY
ad3f5759
AS
363 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
364 {
365 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
366 DECL_USER_ALIGN (decl) = 0;
367 }
78d55cc8 368#endif
ad3f5759 369 }
78d55cc8
JM
370 }
371
372 /* See if we can use an ordinary integer mode for a bit-field.
373 Conditions are: a fixed size that is correct for another mode
374 and occupying a complete byte or bytes on proper boundary. */
375 if (TYPE_SIZE (type) != 0
376 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
377 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
378 {
379 enum machine_mode xmode
380 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
381
f676971a 382 if (xmode != BLKmode
9a706ec7
MM
383 && (known_align == 0
384 || known_align >= GET_MODE_ALIGNMENT (xmode)))
78d55cc8
JM
385 {
386 DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
387 DECL_ALIGN (decl));
388 DECL_MODE (decl) = xmode;
389 DECL_BIT_FIELD (decl) = 0;
390 }
391 }
392
393 /* Turn off DECL_BIT_FIELD if we won't need it set. */
394 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
395 && known_align >= TYPE_ALIGN (type)
396 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
397 DECL_BIT_FIELD (decl) = 0;
398 }
399 else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl))
400 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
2038bd69 401 round up; we'll reduce it again below. We want packing to
ba228239 402 supersede USER_ALIGN inherited from the type, but defer to
2038bd69 403 alignment explicitly specified on the field decl. */;
78d55cc8 404 else
40aae178
JM
405 do_type_align (type, decl);
406
407 /* If the field is of variable size, we can't misalign it since we
408 have no way to make a temporary to align the result. But this
409 isn't an issue if the decl is not addressable. Likewise if it
410 is of unknown size.
411
412 Note that do_type_align may set DECL_USER_ALIGN, so we need to
413 check old_user_align instead. */
414 if (DECL_PACKED (decl)
415 && !old_user_align
416 && (DECL_NONADDRESSABLE_P (decl)
417 || DECL_SIZE_UNIT (decl) == 0
418 || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
419 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
78d55cc8 420
9ca75f15 421 if (! DECL_USER_ALIGN (decl) && ! DECL_PACKED (decl))
7306ed3f 422 {
78d55cc8
JM
423 /* Some targets (i.e. i386, VMS) limit struct field alignment
424 to a lower boundary than alignment of variables unless
425 it was overridden by attribute aligned. */
426#ifdef BIGGEST_FIELD_ALIGNMENT
427 DECL_ALIGN (decl)
428 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
429#endif
430#ifdef ADJUST_FIELD_ALIGN
431 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
432#endif
7306ed3f 433 }
9ca75f15
DJ
434
435 /* Should this be controlled by DECL_USER_ALIGN, too? */
436 if (maximum_field_alignment != 0)
437 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
7306ed3f
JW
438 }
439
440 /* Evaluate nonconstant size only once, either now or as soon as safe. */
441 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
442 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
06ceef4e
RK
443 if (DECL_SIZE_UNIT (decl) != 0
444 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
445 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
446
447 /* If requested, warn about definitions of large data objects. */
448 if (warn_larger_than
17aec3eb 449 && (code == VAR_DECL || code == PARM_DECL)
06ceef4e
RK
450 && ! DECL_EXTERNAL (decl))
451 {
452 tree size = DECL_SIZE_UNIT (decl);
453
454 if (size != 0 && TREE_CODE (size) == INTEGER_CST
05bccae2 455 && compare_tree_int (size, larger_than_size) > 0)
06ceef4e 456 {
0384674e 457 int size_as_int = TREE_INT_CST_LOW (size);
06ceef4e 458
05bccae2 459 if (compare_tree_int (size, size_as_int) == 0)
ddd2d57e 460 warning ("%Jsize of '%D' is %d bytes", decl, decl, size_as_int);
06ceef4e 461 else
ddd2d57e
RH
462 warning ("%Jsize of '%D' is larger than %d bytes",
463 decl, decl, larger_than_size);
06ceef4e
RK
464 }
465 }
a46666a9
RH
466
467 /* If the RTL was already set, update its mode and mem attributes. */
468 if (rtl)
469 {
470 PUT_MODE (rtl, DECL_MODE (decl));
471 SET_DECL_RTL (decl, 0);
472 set_mem_attributes (rtl, decl, 1);
473 SET_DECL_RTL (decl, rtl);
474 }
7306ed3f 475}
d8472c75
JM
476
477/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
478 a previous call to layout_decl and calls it again. */
479
480void
481relayout_decl (tree decl)
482{
483 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
484 DECL_MODE (decl) = VOIDmode;
485 DECL_ALIGN (decl) = 0;
486 SET_DECL_RTL (decl, 0);
487
488 layout_decl (decl, 0);
489}
7306ed3f 490\f
e0cea8d9
RK
491/* Hook for a front-end function that can modify the record layout as needed
492 immediately before it is finalized. */
493
46c5ad27 494void (*lang_adjust_rli) (record_layout_info) = 0;
e0cea8d9
RK
495
496void
46c5ad27 497set_lang_adjust_rli (void (*f) (record_layout_info))
e0cea8d9
RK
498{
499 lang_adjust_rli = f;
500}
501
770ae6cc
RK
502/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
503 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
504 is to be passed to all other layout functions for this record. It is the
786de7eb 505 responsibility of the caller to call `free' for the storage returned.
770ae6cc
RK
506 Note that garbage collection is not permitted until we finish laying
507 out the record. */
7306ed3f 508
9328904c 509record_layout_info
46c5ad27 510start_record_layout (tree t)
7306ed3f 511{
703ad42b 512 record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
9328904c
MM
513
514 rli->t = t;
770ae6cc 515
9328904c
MM
516 /* If the type has a minimum specified alignment (via an attribute
517 declaration, for example) use it -- otherwise, start with a
518 one-byte alignment. */
519 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
78d55cc8 520 rli->unpacked_align = rli->record_align;
770ae6cc 521 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
7306ed3f 522
5c19a356
MS
523#ifdef STRUCTURE_SIZE_BOUNDARY
524 /* Packed structures don't need to have minimum size. */
f132af85 525 if (! TYPE_PACKED (t))
fc555370 526 rli->record_align = MAX (rli->record_align, (unsigned) STRUCTURE_SIZE_BOUNDARY);
5c19a356 527#endif
7306ed3f 528
770ae6cc
RK
529 rli->offset = size_zero_node;
530 rli->bitpos = bitsize_zero_node;
f913c102 531 rli->prev_field = 0;
770ae6cc
RK
532 rli->pending_statics = 0;
533 rli->packed_maybe_necessary = 0;
534
9328904c
MM
535 return rli;
536}
7306ed3f 537
f2704b9f
RK
538/* These four routines perform computations that convert between
539 the offset/bitpos forms and byte and bit offsets. */
540
541tree
46c5ad27 542bit_from_pos (tree offset, tree bitpos)
f2704b9f
RK
543{
544 return size_binop (PLUS_EXPR, bitpos,
545 size_binop (MULT_EXPR, convert (bitsizetype, offset),
546 bitsize_unit_node));
547}
548
549tree
46c5ad27 550byte_from_pos (tree offset, tree bitpos)
f2704b9f
RK
551{
552 return size_binop (PLUS_EXPR, offset,
553 convert (sizetype,
f0fddb15 554 size_binop (TRUNC_DIV_EXPR, bitpos,
f2704b9f
RK
555 bitsize_unit_node)));
556}
557
f2704b9f 558void
46c5ad27
AJ
559pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
560 tree pos)
f2704b9f
RK
561{
562 *poffset = size_binop (MULT_EXPR,
563 convert (sizetype,
564 size_binop (FLOOR_DIV_EXPR, pos,
565 bitsize_int (off_align))),
566 size_int (off_align / BITS_PER_UNIT));
567 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
568}
569
570/* Given a pointer to bit and byte offsets and an offset alignment,
571 normalize the offsets so they are within the alignment. */
572
573void
46c5ad27 574normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
f2704b9f
RK
575{
576 /* If the bit position is now larger than it should be, adjust it
577 downwards. */
578 if (compare_tree_int (*pbitpos, off_align) >= 0)
579 {
580 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
581 bitsize_int (off_align));
582
583 *poffset
584 = size_binop (PLUS_EXPR, *poffset,
585 size_binop (MULT_EXPR, convert (sizetype, extra_aligns),
586 size_int (off_align / BITS_PER_UNIT)));
786de7eb 587
f2704b9f
RK
588 *pbitpos
589 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
590 }
591}
592
770ae6cc 593/* Print debugging information about the information in RLI. */
cc9d4a85 594
770ae6cc 595void
46c5ad27 596debug_rli (record_layout_info rli)
cc9d4a85 597{
770ae6cc
RK
598 print_node_brief (stderr, "type", rli->t, 0);
599 print_node_brief (stderr, "\noffset", rli->offset, 0);
600 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
cc9d4a85 601
78d55cc8
JM
602 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
603 rli->record_align, rli->unpacked_align,
e0cea8d9 604 rli->offset_align);
770ae6cc
RK
605 if (rli->packed_maybe_necessary)
606 fprintf (stderr, "packed may be necessary\n");
607
608 if (rli->pending_statics)
609 {
610 fprintf (stderr, "pending statics:\n");
611 debug_tree (rli->pending_statics);
612 }
613}
614
615/* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
616 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
617
618void
46c5ad27 619normalize_rli (record_layout_info rli)
770ae6cc 620{
f2704b9f 621 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
770ae6cc 622}
cc9d4a85 623
770ae6cc
RK
624/* Returns the size in bytes allocated so far. */
625
626tree
46c5ad27 627rli_size_unit_so_far (record_layout_info rli)
770ae6cc 628{
f2704b9f 629 return byte_from_pos (rli->offset, rli->bitpos);
770ae6cc
RK
630}
631
632/* Returns the size in bits allocated so far. */
633
634tree
46c5ad27 635rli_size_so_far (record_layout_info rli)
770ae6cc 636{
f2704b9f 637 return bit_from_pos (rli->offset, rli->bitpos);
770ae6cc
RK
638}
639
0645ba8f
MM
640/* FIELD is about to be added to RLI->T. The alignment (in bits) of
641 the next available location is given by KNOWN_ALIGN. Update the
642 variable alignment fields in RLI, and return the alignment to give
643 the FIELD. */
770ae6cc 644
6de9cd9a 645unsigned int
46c5ad27
AJ
646update_alignment_for_field (record_layout_info rli, tree field,
647 unsigned int known_align)
9328904c
MM
648{
649 /* The alignment required for FIELD. */
650 unsigned int desired_align;
9328904c
MM
651 /* The type of this field. */
652 tree type = TREE_TYPE (field);
0645ba8f
MM
653 /* True if the field was explicitly aligned by the user. */
654 bool user_align;
78d55cc8 655 bool is_bitfield;
9328904c 656
78d55cc8
JM
657 /* Lay out the field so we know what alignment it needs. */
658 layout_decl (field, known_align);
770ae6cc 659 desired_align = DECL_ALIGN (field);
11cf4d18 660 user_align = DECL_USER_ALIGN (field);
770ae6cc 661
78d55cc8
JM
662 is_bitfield = (type != error_mark_node
663 && DECL_BIT_FIELD_TYPE (field)
664 && ! integer_zerop (TYPE_SIZE (type)));
7306ed3f 665
9328904c
MM
666 /* Record must have at least as much alignment as any field.
667 Otherwise, the alignment of the field within the record is
668 meaningless. */
245f1bfa 669 if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t))
f913c102 670 {
e4850f36
DR
671 /* Here, the alignment of the underlying type of a bitfield can
672 affect the alignment of a record; even a zero-sized field
673 can do this. The alignment should be to the alignment of
674 the type, except that for zero-size bitfields this only
0e9e1e0a 675 applies if there was an immediately prior, nonzero-size
e4850f36
DR
676 bitfield. (That's the way it is, experimentally.) */
677 if (! integer_zerop (DECL_SIZE (field))
46c5ad27
AJ
678 ? ! DECL_PACKED (field)
679 : (rli->prev_field
680 && DECL_BIT_FIELD_TYPE (rli->prev_field)
681 && ! integer_zerop (DECL_SIZE (rli->prev_field))))
f913c102 682 {
e4850f36
DR
683 unsigned int type_align = TYPE_ALIGN (type);
684 type_align = MAX (type_align, desired_align);
685 if (maximum_field_alignment != 0)
686 type_align = MIN (type_align, maximum_field_alignment);
687 rli->record_align = MAX (rli->record_align, type_align);
f913c102
AO
688 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
689 }
786de7eb 690 }
3c12fcc2 691#ifdef PCC_BITFIELD_TYPE_MATTERS
78d55cc8 692 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
9328904c 693 {
8dc65b6e 694 /* Named bit-fields cause the entire structure to have the
13c1cd82
PB
695 alignment implied by their type. Some targets also apply the same
696 rules to unnamed bitfields. */
697 if (DECL_NAME (field) != 0
698 || targetm.align_anon_bitfield ())
7306ed3f 699 {
9328904c 700 unsigned int type_align = TYPE_ALIGN (type);
729a2125 701
ad9335eb
JJ
702#ifdef ADJUST_FIELD_ALIGN
703 if (! TYPE_USER_ALIGN (type))
704 type_align = ADJUST_FIELD_ALIGN (field, type_align);
705#endif
706
9328904c
MM
707 if (maximum_field_alignment != 0)
708 type_align = MIN (type_align, maximum_field_alignment);
709 else if (DECL_PACKED (field))
710 type_align = MIN (type_align, BITS_PER_UNIT);
e2301a83 711
8dc65b6e
MM
712 /* The alignment of the record is increased to the maximum
713 of the current alignment, the alignment indicated on the
714 field (i.e., the alignment specified by an __aligned__
715 attribute), and the alignment indicated by the type of
716 the field. */
717 rli->record_align = MAX (rli->record_align, desired_align);
9328904c 718 rli->record_align = MAX (rli->record_align, type_align);
8dc65b6e 719
3c12fcc2 720 if (warn_packed)
e0cea8d9 721 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
daf06049 722 user_align |= TYPE_USER_ALIGN (type);
3c12fcc2 723 }
9328904c 724 }
9328904c 725#endif
78d55cc8 726 else
9328904c
MM
727 {
728 rli->record_align = MAX (rli->record_align, desired_align);
770ae6cc 729 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
9328904c 730 }
3c12fcc2 731
0645ba8f
MM
732 TYPE_USER_ALIGN (rli->t) |= user_align;
733
734 return desired_align;
735}
736
737/* Called from place_field to handle unions. */
738
739static void
46c5ad27 740place_union_field (record_layout_info rli, tree field)
0645ba8f
MM
741{
742 update_alignment_for_field (rli, field, /*known_align=*/0);
743
744 DECL_FIELD_OFFSET (field) = size_zero_node;
745 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
746 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
747
748 /* We assume the union's size will be a multiple of a byte so we don't
749 bother with BITPOS. */
750 if (TREE_CODE (rli->t) == UNION_TYPE)
751 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
752 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
3244e67d
RS
753 rli->offset = fold (build3 (COND_EXPR, sizetype,
754 DECL_QUALIFIER (field),
755 DECL_SIZE_UNIT (field), rli->offset));
0645ba8f
MM
756}
757
b8089d8d 758#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
4977bab6 759/* A bitfield of SIZE with a required access alignment of ALIGN is allocated
272d0bee 760 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
4977bab6
ZW
761 units of alignment than the underlying TYPE. */
762static int
46c5ad27
AJ
763excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
764 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
4977bab6
ZW
765{
766 /* Note that the calculation of OFFSET might overflow; we calculate it so
767 that we still get the right result as long as ALIGN is a power of two. */
768 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
769
770 offset = offset % align;
771 return ((offset + size + align - 1) / align
772 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
773 / align));
774}
b8089d8d 775#endif
4977bab6 776
0645ba8f
MM
777/* RLI contains information about the layout of a RECORD_TYPE. FIELD
778 is a FIELD_DECL to be added after those fields already present in
779 T. (FIELD is not actually added to the TYPE_FIELDS list here;
780 callers that desire that behavior must manually perform that step.) */
781
782void
46c5ad27 783place_field (record_layout_info rli, tree field)
0645ba8f
MM
784{
785 /* The alignment required for FIELD. */
786 unsigned int desired_align;
787 /* The alignment FIELD would have if we just dropped it into the
788 record as it presently stands. */
789 unsigned int known_align;
790 unsigned int actual_align;
791 /* The type of this field. */
792 tree type = TREE_TYPE (field);
793
794 if (TREE_CODE (field) == ERROR_MARK || TREE_CODE (type) == ERROR_MARK)
795 return;
796
797 /* If FIELD is static, then treat it like a separate variable, not
798 really like a structure field. If it is a FUNCTION_DECL, it's a
799 method. In both cases, all we do is lay out the decl, and we do
800 it *after* the record is laid out. */
801 if (TREE_CODE (field) == VAR_DECL)
802 {
803 rli->pending_statics = tree_cons (NULL_TREE, field,
804 rli->pending_statics);
805 return;
806 }
807
808 /* Enumerators and enum types which are local to this class need not
809 be laid out. Likewise for initialized constant fields. */
810 else if (TREE_CODE (field) != FIELD_DECL)
811 return;
812
813 /* Unions are laid out very differently than records, so split
814 that code off to another function. */
815 else if (TREE_CODE (rli->t) != RECORD_TYPE)
816 {
817 place_union_field (rli, field);
818 return;
819 }
820
821 /* Work out the known alignment so far. Note that A & (-A) is the
822 value of the least-significant bit in A that is one. */
823 if (! integer_zerop (rli->bitpos))
824 known_align = (tree_low_cst (rli->bitpos, 1)
825 & - tree_low_cst (rli->bitpos, 1));
826 else if (integer_zerop (rli->offset))
827 known_align = BIGGEST_ALIGNMENT;
828 else if (host_integerp (rli->offset, 1))
829 known_align = (BITS_PER_UNIT
830 * (tree_low_cst (rli->offset, 1)
831 & - tree_low_cst (rli->offset, 1)));
832 else
833 known_align = rli->offset_align;
46c5ad27 834
0645ba8f
MM
835 desired_align = update_alignment_for_field (rli, field, known_align);
836
9328904c
MM
837 if (warn_packed && DECL_PACKED (field))
838 {
78d55cc8 839 if (known_align >= TYPE_ALIGN (type))
3c12fcc2 840 {
9328904c 841 if (TYPE_ALIGN (type) > desired_align)
3c12fcc2 842 {
9328904c 843 if (STRICT_ALIGNMENT)
ddd2d57e
RH
844 warning ("%Jpacked attribute causes inefficient alignment "
845 "for '%D'", field, field);
9328904c 846 else
ddd2d57e
RH
847 warning ("%Jpacked attribute is unnecessary for '%D'",
848 field, field);
3c12fcc2 849 }
3c12fcc2 850 }
9328904c
MM
851 else
852 rli->packed_maybe_necessary = 1;
853 }
7306ed3f 854
9328904c
MM
855 /* Does this field automatically have alignment it needs by virtue
856 of the fields that precede it and the record's own alignment? */
770ae6cc 857 if (known_align < desired_align)
9328904c
MM
858 {
859 /* No, we need to skip space before this field.
860 Bump the cumulative size to multiple of field alignment. */
7306ed3f 861
9328904c 862 if (warn_padded)
ddd2d57e 863 warning ("%Jpadding struct to align '%D'", field, field);
3c12fcc2 864
770ae6cc
RK
865 /* If the alignment is still within offset_align, just align
866 the bit position. */
867 if (desired_align < rli->offset_align)
868 rli->bitpos = round_up (rli->bitpos, desired_align);
9328904c
MM
869 else
870 {
770ae6cc
RK
871 /* First adjust OFFSET by the partial bits, then align. */
872 rli->offset
873 = size_binop (PLUS_EXPR, rli->offset,
874 convert (sizetype,
875 size_binop (CEIL_DIV_EXPR, rli->bitpos,
876 bitsize_unit_node)));
877 rli->bitpos = bitsize_zero_node;
878
879 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
7306ed3f 880 }
770ae6cc 881
b1254b72
RK
882 if (! TREE_CONSTANT (rli->offset))
883 rli->offset_align = desired_align;
884
9328904c 885 }
7306ed3f 886
770ae6cc
RK
887 /* Handle compatibility with PCC. Note that if the record has any
888 variable-sized fields, we need not worry about compatibility. */
7306ed3f 889#ifdef PCC_BITFIELD_TYPE_MATTERS
9328904c 890 if (PCC_BITFIELD_TYPE_MATTERS
245f1bfa 891 && ! targetm.ms_bitfield_layout_p (rli->t)
9328904c
MM
892 && TREE_CODE (field) == FIELD_DECL
893 && type != error_mark_node
770ae6cc
RK
894 && DECL_BIT_FIELD (field)
895 && ! DECL_PACKED (field)
9328904c 896 && maximum_field_alignment == 0
770ae6cc
RK
897 && ! integer_zerop (DECL_SIZE (field))
898 && host_integerp (DECL_SIZE (field), 1)
899 && host_integerp (rli->offset, 1)
900 && host_integerp (TYPE_SIZE (type), 1))
9328904c
MM
901 {
902 unsigned int type_align = TYPE_ALIGN (type);
770ae6cc
RK
903 tree dsize = DECL_SIZE (field);
904 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
905 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
906 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
9328904c 907
ad9335eb
JJ
908#ifdef ADJUST_FIELD_ALIGN
909 if (! TYPE_USER_ALIGN (type))
910 type_align = ADJUST_FIELD_ALIGN (field, type_align);
911#endif
912
9328904c
MM
913 /* A bit field may not span more units of alignment of its type
914 than its type itself. Advance to next boundary if necessary. */
4977bab6 915 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
770ae6cc 916 rli->bitpos = round_up (rli->bitpos, type_align);
daf06049 917
0645ba8f 918 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
9328904c 919 }
7306ed3f
JW
920#endif
921
7306ed3f 922#ifdef BITFIELD_NBYTES_LIMITED
9328904c 923 if (BITFIELD_NBYTES_LIMITED
245f1bfa 924 && ! targetm.ms_bitfield_layout_p (rli->t)
9328904c
MM
925 && TREE_CODE (field) == FIELD_DECL
926 && type != error_mark_node
927 && DECL_BIT_FIELD_TYPE (field)
770ae6cc
RK
928 && ! DECL_PACKED (field)
929 && ! integer_zerop (DECL_SIZE (field))
930 && host_integerp (DECL_SIZE (field), 1)
163d3408 931 && host_integerp (rli->offset, 1)
770ae6cc 932 && host_integerp (TYPE_SIZE (type), 1))
9328904c
MM
933 {
934 unsigned int type_align = TYPE_ALIGN (type);
770ae6cc
RK
935 tree dsize = DECL_SIZE (field);
936 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
937 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
938 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
e2301a83 939
ad9335eb
JJ
940#ifdef ADJUST_FIELD_ALIGN
941 if (! TYPE_USER_ALIGN (type))
942 type_align = ADJUST_FIELD_ALIGN (field, type_align);
943#endif
944
9328904c
MM
945 if (maximum_field_alignment != 0)
946 type_align = MIN (type_align, maximum_field_alignment);
947 /* ??? This test is opposite the test in the containing if
948 statement, so this code is unreachable currently. */
949 else if (DECL_PACKED (field))
950 type_align = MIN (type_align, BITS_PER_UNIT);
951
952 /* A bit field may not span the unit of alignment of its type.
953 Advance to next boundary if necessary. */
4977bab6 954 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
770ae6cc 955 rli->bitpos = round_up (rli->bitpos, type_align);
daf06049 956
0645ba8f 957 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
9328904c 958 }
7306ed3f
JW
959#endif
960
e4850f36
DR
961 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
962 A subtlety:
963 When a bit field is inserted into a packed record, the whole
964 size of the underlying type is used by one or more same-size
4977bab6 965 adjacent bitfields. (That is, if its long:3, 32 bits is
e4850f36
DR
966 used in the record, and any additional adjacent long bitfields are
967 packed into the same chunk of 32 bits. However, if the size
968 changes, a new field of that size is allocated.) In an unpacked
14b493d6 969 record, this is the same as using alignment, but not equivalent
4977bab6 970 when packing.
e4850f36 971
14b493d6 972 Note: for compatibility, we use the type size, not the type alignment
e4850f36
DR
973 to determine alignment, since that matches the documentation */
974
245f1bfa 975 if (targetm.ms_bitfield_layout_p (rli->t)
e4850f36 976 && ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
46c5ad27 977 || (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
f913c102 978 {
e4850f36 979 /* At this point, either the prior or current are bitfields,
991b6592 980 (possibly both), and we're dealing with MS packing. */
e4850f36 981 tree prev_saved = rli->prev_field;
f913c102 982
e4850f36 983 /* Is the prior field a bitfield? If so, handle "runs" of same
991b6592
KH
984 type size fields. */
985 if (rli->prev_field /* necessarily a bitfield if it exists. */)
e4850f36
DR
986 {
987 /* If both are bitfields, nonzero, and the same size, this is
988 the middle of a run. Zero declared size fields are special
989 and handled as "end of run". (Note: it's nonzero declared
990 size, but equal type sizes!) (Since we know that both
991 the current and previous fields are bitfields by the
992 time we check it, DECL_SIZE must be present for both.) */
993 if (DECL_BIT_FIELD_TYPE (field)
994 && !integer_zerop (DECL_SIZE (field))
995 && !integer_zerop (DECL_SIZE (rli->prev_field))
0384674e
RK
996 && host_integerp (DECL_SIZE (rli->prev_field), 0)
997 && host_integerp (TYPE_SIZE (type), 0)
e4850f36 998 && simple_cst_equal (TYPE_SIZE (type),
0384674e 999 TYPE_SIZE (TREE_TYPE (rli->prev_field))))
e4850f36
DR
1000 {
1001 /* We're in the middle of a run of equal type size fields; make
1002 sure we realign if we run out of bits. (Not decl size,
1003 type size!) */
0384674e 1004 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
e4850f36
DR
1005
1006 if (rli->remaining_in_alignment < bitsize)
1007 {
991b6592 1008 /* out of bits; bump up to next 'word'. */
5354730b 1009 rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
0384674e
RK
1010 rli->bitpos
1011 = size_binop (PLUS_EXPR, TYPE_SIZE (type),
1012 DECL_FIELD_BIT_OFFSET (rli->prev_field));
e4850f36 1013 rli->prev_field = field;
0384674e
RK
1014 rli->remaining_in_alignment
1015 = tree_low_cst (TYPE_SIZE (type), 0);
e4850f36 1016 }
0384674e 1017
e4850f36
DR
1018 rli->remaining_in_alignment -= bitsize;
1019 }
1020 else
1021 {
4977bab6
ZW
1022 /* End of a run: if leaving a run of bitfields of the same type
1023 size, we have to "use up" the rest of the bits of the type
e4850f36
DR
1024 size.
1025
1026 Compute the new position as the sum of the size for the prior
1027 type and where we first started working on that type.
1028 Note: since the beginning of the field was aligned then
1029 of course the end will be too. No round needed. */
1030
1031 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1032 {
0384674e
RK
1033 tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
1034
1035 rli->bitpos
1036 = size_binop (PLUS_EXPR, type_size,
1037 DECL_FIELD_BIT_OFFSET (rli->prev_field));
e4850f36
DR
1038 }
1039 else
0384674e
RK
1040 /* We "use up" size zero fields; the code below should behave
1041 as if the prior field was not a bitfield. */
1042 prev_saved = NULL;
e4850f36 1043
4977bab6 1044 /* Cause a new bitfield to be captured, either this time (if
991b6592 1045 currently a bitfield) or next time we see one. */
e4850f36
DR
1046 if (!DECL_BIT_FIELD_TYPE(field)
1047 || integer_zerop (DECL_SIZE (field)))
0384674e 1048 rli->prev_field = NULL;
e4850f36 1049 }
0384674e 1050
e4850f36
DR
1051 normalize_rli (rli);
1052 }
1053
1054 /* If we're starting a new run of same size type bitfields
1055 (or a run of non-bitfields), set up the "first of the run"
4977bab6 1056 fields.
e4850f36
DR
1057
1058 That is, if the current field is not a bitfield, or if there
1059 was a prior bitfield the type sizes differ, or if there wasn't
1060 a prior bitfield the size of the current field is nonzero.
1061
1062 Note: we must be sure to test ONLY the type size if there was
1063 a prior bitfield and ONLY for the current field being zero if
1064 there wasn't. */
1065
1066 if (!DECL_BIT_FIELD_TYPE (field)
4977bab6 1067 || ( prev_saved != NULL
e4850f36 1068 ? !simple_cst_equal (TYPE_SIZE (type),
0384674e
RK
1069 TYPE_SIZE (TREE_TYPE (prev_saved)))
1070 : !integer_zerop (DECL_SIZE (field)) ))
e4850f36 1071 {
0384674e
RK
1072 /* Never smaller than a byte for compatibility. */
1073 unsigned int type_align = BITS_PER_UNIT;
e4850f36 1074
4977bab6 1075 /* (When not a bitfield), we could be seeing a flex array (with
e4850f36 1076 no DECL_SIZE). Since we won't be using remaining_in_alignment
4977bab6 1077 until we see a bitfield (and come by here again) we just skip
e4850f36 1078 calculating it. */
0384674e
RK
1079 if (DECL_SIZE (field) != NULL
1080 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
1081 && host_integerp (DECL_SIZE (field), 0))
1082 rli->remaining_in_alignment
1083 = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 0)
1084 - tree_low_cst (DECL_SIZE (field), 0);
e4850f36 1085
991b6592 1086 /* Now align (conventionally) for the new type. */
e4850f36 1087 if (!DECL_PACKED(field))
0384674e 1088 type_align = MAX(TYPE_ALIGN (type), type_align);
e4850f36
DR
1089
1090 if (prev_saved
1091 && DECL_BIT_FIELD_TYPE (prev_saved)
1092 /* If the previous bit-field is zero-sized, we've already
1093 accounted for its alignment needs (or ignored it, if
1094 appropriate) while placing it. */
1095 && ! integer_zerop (DECL_SIZE (prev_saved)))
1096 type_align = MAX (type_align,
1097 TYPE_ALIGN (TREE_TYPE (prev_saved)));
f913c102 1098
e4850f36
DR
1099 if (maximum_field_alignment != 0)
1100 type_align = MIN (type_align, maximum_field_alignment);
f913c102 1101
e4850f36 1102 rli->bitpos = round_up (rli->bitpos, type_align);
0384674e 1103
e4850f36 1104 /* If we really aligned, don't allow subsequent bitfields
991b6592 1105 to undo that. */
e4850f36
DR
1106 rli->prev_field = NULL;
1107 }
f913c102
AO
1108 }
1109
770ae6cc
RK
1110 /* Offset so far becomes the position of this field after normalizing. */
1111 normalize_rli (rli);
1112 DECL_FIELD_OFFSET (field) = rli->offset;
1113 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
2f5c7f45 1114 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
770ae6cc
RK
1115
1116 /* If this field ended up more aligned than we thought it would be (we
1117 approximate this by seeing if its position changed), lay out the field
1118 again; perhaps we can use an integral mode for it now. */
4b6bf620 1119 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
770ae6cc
RK
1120 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1121 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
4b6bf620
RK
1122 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1123 actual_align = BIGGEST_ALIGNMENT;
770ae6cc
RK
1124 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1125 actual_align = (BITS_PER_UNIT
1126 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1127 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
9328904c 1128 else
770ae6cc
RK
1129 actual_align = DECL_OFFSET_ALIGN (field);
1130
1131 if (known_align != actual_align)
1132 layout_decl (field, actual_align);
1133
991b6592 1134 /* Only the MS bitfields use this. */
e4850f36
DR
1135 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE(field))
1136 rli->prev_field = field;
f913c102 1137
770ae6cc
RK
1138 /* Now add size of this field to the size of the record. If the size is
1139 not constant, treat the field as being a multiple of bytes and just
1140 adjust the offset, resetting the bit position. Otherwise, apportion the
1141 size amongst the bit position and offset. First handle the case of an
1142 unspecified size, which can happen when we have an invalid nested struct
1143 definition, such as struct j { struct j { int i; } }. The error message
1144 is printed in finish_struct. */
1145 if (DECL_SIZE (field) == 0)
1146 /* Do nothing. */;
67011d81
RK
1147 else if (TREE_CODE (DECL_SIZE_UNIT (field)) != INTEGER_CST
1148 || TREE_CONSTANT_OVERFLOW (DECL_SIZE_UNIT (field)))
9328904c 1149 {
770ae6cc
RK
1150 rli->offset
1151 = size_binop (PLUS_EXPR, rli->offset,
1152 convert (sizetype,
1153 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1154 bitsize_unit_node)));
1155 rli->offset
1156 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1157 rli->bitpos = bitsize_zero_node;
3923e410 1158 rli->offset_align = MIN (rli->offset_align, desired_align);
9328904c 1159 }
9328904c
MM
1160 else
1161 {
770ae6cc
RK
1162 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1163 normalize_rli (rli);
7306ed3f 1164 }
9328904c 1165}
7306ed3f 1166
9328904c
MM
1167/* Assuming that all the fields have been laid out, this function uses
1168 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
14b493d6 1169 indicated by RLI. */
7306ed3f 1170
9328904c 1171static void
46c5ad27 1172finalize_record_size (record_layout_info rli)
9328904c 1173{
770ae6cc
RK
1174 tree unpadded_size, unpadded_size_unit;
1175
65e14bf5
RK
1176 /* Now we want just byte and bit offsets, so set the offset alignment
1177 to be a byte and then normalize. */
1178 rli->offset_align = BITS_PER_UNIT;
1179 normalize_rli (rli);
7306ed3f
JW
1180
1181 /* Determine the desired alignment. */
1182#ifdef ROUND_TYPE_ALIGN
9328904c 1183 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
b451555a 1184 rli->record_align);
7306ed3f 1185#else
9328904c 1186 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
7306ed3f
JW
1187#endif
1188
65e14bf5
RK
1189 /* Compute the size so far. Be sure to allow for extra bits in the
1190 size in bytes. We have guaranteed above that it will be no more
1191 than a single byte. */
1192 unpadded_size = rli_size_so_far (rli);
1193 unpadded_size_unit = rli_size_unit_so_far (rli);
1194 if (! integer_zerop (rli->bitpos))
1195 unpadded_size_unit
1196 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
770ae6cc 1197
f9da5064 1198 /* Round the size up to be a multiple of the required alignment. */
770ae6cc 1199 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
a4e9ffe5
RK
1200 TYPE_SIZE_UNIT (rli->t)
1201 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
729a2125 1202
770ae6cc
RK
1203 if (warn_padded && TREE_CONSTANT (unpadded_size)
1204 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
1205 warning ("padding struct size to alignment boundary");
786de7eb 1206
770ae6cc
RK
1207 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1208 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1209 && TREE_CONSTANT (unpadded_size))
3c12fcc2
GM
1210 {
1211 tree unpacked_size;
729a2125 1212
3c12fcc2 1213#ifdef ROUND_TYPE_ALIGN
9328904c
MM
1214 rli->unpacked_align
1215 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
3c12fcc2 1216#else
9328904c 1217 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
3c12fcc2 1218#endif
770ae6cc 1219
9328904c 1220 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
9328904c 1221 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
3c12fcc2 1222 {
770ae6cc
RK
1223 TYPE_PACKED (rli->t) = 0;
1224
9328904c 1225 if (TYPE_NAME (rli->t))
3c12fcc2 1226 {
63ad61ed 1227 const char *name;
729a2125 1228
9328904c
MM
1229 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1230 name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
3c12fcc2 1231 else
9328904c 1232 name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
770ae6cc 1233
3c12fcc2
GM
1234 if (STRICT_ALIGNMENT)
1235 warning ("packed attribute causes inefficient alignment for `%s'", name);
1236 else
1237 warning ("packed attribute is unnecessary for `%s'", name);
1238 }
1239 else
1240 {
1241 if (STRICT_ALIGNMENT)
1242 warning ("packed attribute causes inefficient alignment");
1243 else
1244 warning ("packed attribute is unnecessary");
1245 }
1246 }
3c12fcc2 1247 }
9328904c
MM
1248}
1249
1250/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
7306ed3f 1251
65e14bf5 1252void
46c5ad27 1253compute_record_mode (tree type)
9328904c 1254{
770ae6cc
RK
1255 tree field;
1256 enum machine_mode mode = VOIDmode;
1257
9328904c
MM
1258 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1259 However, if possible, we use a mode that fits in a register
1260 instead, in order to allow for better optimization down the
1261 line. */
1262 TYPE_MODE (type) = BLKmode;
9328904c 1263
770ae6cc
RK
1264 if (! host_integerp (TYPE_SIZE (type), 1))
1265 return;
9328904c 1266
770ae6cc
RK
1267 /* A record which has any BLKmode members must itself be
1268 BLKmode; it can't go in a register. Unless the member is
1269 BLKmode only because it isn't aligned. */
1270 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1271 {
770ae6cc
RK
1272 if (TREE_CODE (field) != FIELD_DECL)
1273 continue;
9328904c 1274
770ae6cc
RK
1275 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1276 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
7a06d606
RK
1277 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1278 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1279 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
770ae6cc 1280 || ! host_integerp (bit_position (field), 1)
6a9f6727 1281 || DECL_SIZE (field) == 0
770ae6cc
RK
1282 || ! host_integerp (DECL_SIZE (field), 1))
1283 return;
1284
770ae6cc
RK
1285 /* If this field is the whole struct, remember its mode so
1286 that, say, we can put a double in a class into a DF
a8ca7756
JW
1287 register instead of forcing it to live in the stack. */
1288 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
770ae6cc 1289 mode = DECL_MODE (field);
9328904c 1290
31a02448 1291#ifdef MEMBER_TYPE_FORCES_BLK
770ae6cc
RK
1292 /* With some targets, eg. c4x, it is sub-optimal
1293 to access an aligned BLKmode structure as a scalar. */
0d7839da 1294
182e515e 1295 if (MEMBER_TYPE_FORCES_BLK (field, mode))
770ae6cc 1296 return;
31a02448 1297#endif /* MEMBER_TYPE_FORCES_BLK */
770ae6cc 1298 }
9328904c 1299
a8ca7756
JW
1300 /* If we only have one real field; use its mode. This only applies to
1301 RECORD_TYPE. This does not apply to unions. */
1302 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode)
770ae6cc
RK
1303 TYPE_MODE (type) = mode;
1304 else
1305 TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1306
1307 /* If structure's known alignment is less than what the scalar
1308 mode would need, and it matters, then stick with BLKmode. */
1309 if (TYPE_MODE (type) != BLKmode
1310 && STRICT_ALIGNMENT
1311 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1312 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1313 {
1314 /* If this is the only reason this type is BLKmode, then
1315 don't force containing types to be BLKmode. */
1316 TYPE_NO_FORCE_BLK (type) = 1;
1317 TYPE_MODE (type) = BLKmode;
9328904c 1318 }
7306ed3f 1319}
9328904c
MM
1320
1321/* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1322 out. */
1323
1324static void
46c5ad27 1325finalize_type_size (tree type)
9328904c
MM
1326{
1327 /* Normally, use the alignment corresponding to the mode chosen.
1328 However, where strict alignment is not required, avoid
1329 over-aligning structures, since most compilers do not do this
1330 alignment. */
1331
1332 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1333 && (STRICT_ALIGNMENT
1334 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1335 && TREE_CODE (type) != QUAL_UNION_TYPE
1336 && TREE_CODE (type) != ARRAY_TYPE)))
11cf4d18
JJ
1337 {
1338 TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1339 TYPE_USER_ALIGN (type) = 0;
1340 }
9328904c
MM
1341
1342 /* Do machine-dependent extra alignment. */
1343#ifdef ROUND_TYPE_ALIGN
1344 TYPE_ALIGN (type)
1345 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1346#endif
1347
9328904c 1348 /* If we failed to find a simple way to calculate the unit size
770ae6cc 1349 of the type, find it by division. */
9328904c
MM
1350 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1351 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1352 result will fit in sizetype. We will get more efficient code using
1353 sizetype, so we force a conversion. */
1354 TYPE_SIZE_UNIT (type)
1355 = convert (sizetype,
1356 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
770ae6cc 1357 bitsize_unit_node));
9328904c 1358
770ae6cc
RK
1359 if (TYPE_SIZE (type) != 0)
1360 {
770ae6cc 1361 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
a4e9ffe5
RK
1362 TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
1363 TYPE_ALIGN_UNIT (type));
770ae6cc
RK
1364 }
1365
1366 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1367 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1368 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
9328904c
MM
1369 if (TYPE_SIZE_UNIT (type) != 0
1370 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1371 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1372
1373 /* Also layout any other variants of the type. */
1374 if (TYPE_NEXT_VARIANT (type)
1375 || type != TYPE_MAIN_VARIANT (type))
1376 {
1377 tree variant;
1378 /* Record layout info of this variant. */
1379 tree size = TYPE_SIZE (type);
1380 tree size_unit = TYPE_SIZE_UNIT (type);
1381 unsigned int align = TYPE_ALIGN (type);
11cf4d18 1382 unsigned int user_align = TYPE_USER_ALIGN (type);
9328904c
MM
1383 enum machine_mode mode = TYPE_MODE (type);
1384
1385 /* Copy it into all variants. */
1386 for (variant = TYPE_MAIN_VARIANT (type);
1387 variant != 0;
1388 variant = TYPE_NEXT_VARIANT (variant))
1389 {
1390 TYPE_SIZE (variant) = size;
1391 TYPE_SIZE_UNIT (variant) = size_unit;
1392 TYPE_ALIGN (variant) = align;
11cf4d18 1393 TYPE_USER_ALIGN (variant) = user_align;
9328904c
MM
1394 TYPE_MODE (variant) = mode;
1395 }
1396 }
1397}
1398
1399/* Do all of the work required to layout the type indicated by RLI,
1400 once the fields have been laid out. This function will call `free'
17bbb839
MM
1401 for RLI, unless FREE_P is false. Passing a value other than false
1402 for FREE_P is bad practice; this option only exists to support the
1403 G++ 3.2 ABI. */
9328904c
MM
1404
1405void
46c5ad27 1406finish_record_layout (record_layout_info rli, int free_p)
9328904c 1407{
770ae6cc
RK
1408 /* Compute the final size. */
1409 finalize_record_size (rli);
1410
1411 /* Compute the TYPE_MODE for the record. */
1412 compute_record_mode (rli->t);
cc9d4a85 1413
8d8238b6
JM
1414 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1415 finalize_type_size (rli->t);
1416
9328904c
MM
1417 /* Lay out any static members. This is done now because their type
1418 may use the record's type. */
1419 while (rli->pending_statics)
1420 {
1421 layout_decl (TREE_VALUE (rli->pending_statics), 0);
1422 rli->pending_statics = TREE_CHAIN (rli->pending_statics);
1423 }
cc9d4a85 1424
9328904c 1425 /* Clean up. */
17bbb839
MM
1426 if (free_p)
1427 free (rli);
9328904c 1428}
7306ed3f 1429\f
4977bab6
ZW
1430
1431/* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1432 NAME, its fields are chained in reverse on FIELDS.
1433
1434 If ALIGN_TYPE is non-null, it is given the same alignment as
1435 ALIGN_TYPE. */
1436
1437void
46c5ad27
AJ
1438finish_builtin_struct (tree type, const char *name, tree fields,
1439 tree align_type)
4977bab6
ZW
1440{
1441 tree tail, next;
1442
1443 for (tail = NULL_TREE; fields; tail = fields, fields = next)
1444 {
1445 DECL_FIELD_CONTEXT (fields) = type;
1446 next = TREE_CHAIN (fields);
1447 TREE_CHAIN (fields) = tail;
1448 }
1449 TYPE_FIELDS (type) = tail;
1450
1451 if (align_type)
1452 {
1453 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1454 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1455 }
1456
1457 layout_type (type);
1458#if 0 /* not yet, should get fixed properly later */
1459 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1460#else
1461 TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
1462#endif
1463 TYPE_STUB_DECL (type) = TYPE_NAME (type);
1464 layout_decl (TYPE_NAME (type), 0);
1465}
1466
7306ed3f
JW
1467/* Calculate the mode, size, and alignment for TYPE.
1468 For an array type, calculate the element separation as well.
1469 Record TYPE on the chain of permanent or temporary types
1470 so that dbxout will find out about it.
1471
1472 TYPE_SIZE of a type is nonzero if the type has been laid out already.
1473 layout_type does nothing on such a type.
1474
1475 If the type is incomplete, its TYPE_SIZE remains zero. */
1476
1477void
46c5ad27 1478layout_type (tree type)
7306ed3f 1479{
7306ed3f
JW
1480 if (type == 0)
1481 abort ();
1482
6de9cd9a
DN
1483 if (type == error_mark_node)
1484 return;
1485
7306ed3f
JW
1486 /* Do nothing if type has been laid out before. */
1487 if (TYPE_SIZE (type))
1488 return;
1489
7306ed3f
JW
1490 switch (TREE_CODE (type))
1491 {
1492 case LANG_TYPE:
1493 /* This kind of type is the responsibility
9faa82d8 1494 of the language-specific code. */
7306ed3f
JW
1495 abort ();
1496
2d76cb1a 1497 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
e9a25f70 1498 if (TYPE_PRECISION (type) == 0)
2d76cb1a 1499 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
d4b60170 1500
2d76cb1a 1501 /* ... fall through ... */
e9a25f70 1502
7306ed3f
JW
1503 case INTEGER_TYPE:
1504 case ENUMERAL_TYPE:
fc69eca0 1505 case CHAR_TYPE:
e2a77f99
RK
1506 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1507 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
8df83eae 1508 TYPE_UNSIGNED (type) = 1;
7306ed3f 1509
5e9bec99
RK
1510 TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
1511 MODE_INT);
06ceef4e 1512 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
ead17059 1513 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
7306ed3f
JW
1514 break;
1515
1516 case REAL_TYPE:
1517 TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
06ceef4e 1518 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
ead17059 1519 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
7306ed3f
JW
1520 break;
1521
1522 case COMPLEX_TYPE:
8df83eae 1523 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
7306ed3f
JW
1524 TYPE_MODE (type)
1525 = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
8df83eae
RK
1526 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1527 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
7306ed3f 1528 0);
06ceef4e 1529 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
ead17059 1530 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
7306ed3f
JW
1531 break;
1532
0b4565c9 1533 case VECTOR_TYPE:
26277d41
PB
1534 {
1535 int nunits = TYPE_VECTOR_SUBPARTS (type);
7d60be94 1536 tree nunits_tree = build_int_cst (NULL_TREE, nunits);
26277d41
PB
1537 tree innertype = TREE_TYPE (type);
1538
1539 if (nunits & (nunits - 1))
1540 abort ();
1541
1542 /* Find an appropriate mode for the vector type. */
1543 if (TYPE_MODE (type) == VOIDmode)
1544 {
1545 enum machine_mode innermode = TYPE_MODE (innertype);
1546 enum machine_mode mode;
1547
1548 /* First, look for a supported vector type. */
1549 if (GET_MODE_CLASS (innermode) == MODE_FLOAT)
1550 mode = MIN_MODE_VECTOR_FLOAT;
1551 else
1552 mode = MIN_MODE_VECTOR_INT;
1553
1554 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
1555 if (GET_MODE_NUNITS (mode) == nunits
1556 && GET_MODE_INNER (mode) == innermode
f676971a 1557 && targetm.vector_mode_supported_p (mode))
26277d41
PB
1558 break;
1559
1560 /* For integers, try mapping it to a same-sized scalar mode. */
1561 if (mode == VOIDmode
1562 && GET_MODE_CLASS (innermode) == MODE_INT)
1563 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
1564 MODE_INT, 0);
1565
1566 if (mode == VOIDmode || !have_regs_of_mode[mode])
1567 TYPE_MODE (type) = BLKmode;
1568 else
1569 TYPE_MODE (type) = mode;
1570 }
1571
1572 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1573 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1574 TYPE_SIZE_UNIT (innertype),
1575 nunits_tree, 0);
1576 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1577 nunits_tree, 0);
1578 break;
1579 }
0b4565c9 1580
7306ed3f 1581 case VOID_TYPE:
770ae6cc 1582 /* This is an incomplete type and so doesn't have a size. */
7306ed3f 1583 TYPE_ALIGN (type) = 1;
11cf4d18 1584 TYPE_USER_ALIGN (type) = 0;
7306ed3f
JW
1585 TYPE_MODE (type) = VOIDmode;
1586 break;
1587
321cb743 1588 case OFFSET_TYPE:
06ceef4e 1589 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
ead17059 1590 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
25caaba8
R
1591 /* A pointer might be MODE_PARTIAL_INT,
1592 but ptrdiff_t must be integral. */
1593 TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0);
321cb743
MT
1594 break;
1595
7306ed3f
JW
1596 case FUNCTION_TYPE:
1597 case METHOD_TYPE:
019dd4ec
RK
1598 /* It's hard to see what the mode and size of a function ought to
1599 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1600 make it consistent with that. */
1601 TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
1602 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1603 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7306ed3f
JW
1604 break;
1605
1606 case POINTER_TYPE:
1607 case REFERENCE_TYPE:
b5d6a2ff 1608 {
b5d6a2ff 1609
4977bab6
ZW
1610 enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
1611 && reference_types_internal)
1612 ? Pmode : TYPE_MODE (type));
1613
1614 int nbits = GET_MODE_BITSIZE (mode);
1615
b5d6a2ff 1616 TYPE_SIZE (type) = bitsize_int (nbits);
4977bab6 1617 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
8df83eae 1618 TYPE_UNSIGNED (type) = 1;
b5d6a2ff
RK
1619 TYPE_PRECISION (type) = nbits;
1620 }
7306ed3f
JW
1621 break;
1622
1623 case ARRAY_TYPE:
1624 {
b3694847
SS
1625 tree index = TYPE_DOMAIN (type);
1626 tree element = TREE_TYPE (type);
7306ed3f
JW
1627
1628 build_pointer_type (element);
1629
1630 /* We need to know both bounds in order to compute the size. */
1631 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1632 && TYPE_SIZE (element))
1633 {
e24ff973
RK
1634 tree ub = TYPE_MAX_VALUE (index);
1635 tree lb = TYPE_MIN_VALUE (index);
1636 tree length;
74a4fbfc 1637 tree element_size;
e24ff973 1638
a2d53b28
RH
1639 /* The initial subtraction should happen in the original type so
1640 that (possible) negative values are handled appropriately. */
e24ff973 1641 length = size_binop (PLUS_EXPR, size_one_node,
fed3cef0 1642 convert (sizetype,
3244e67d
RS
1643 fold (build2 (MINUS_EXPR,
1644 TREE_TYPE (lb),
1645 ub, lb))));
7306ed3f 1646
74a4fbfc
DB
1647 /* Special handling for arrays of bits (for Chill). */
1648 element_size = TYPE_SIZE (element);
382110c0
RK
1649 if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
1650 && (integer_zerop (TYPE_MAX_VALUE (element))
1651 || integer_onep (TYPE_MAX_VALUE (element)))
1652 && host_integerp (TYPE_MIN_VALUE (element), 1))
74a4fbfc 1653 {
d4b60170 1654 HOST_WIDE_INT maxvalue
382110c0 1655 = tree_low_cst (TYPE_MAX_VALUE (element), 1);
d4b60170 1656 HOST_WIDE_INT minvalue
382110c0 1657 = tree_low_cst (TYPE_MIN_VALUE (element), 1);
d4b60170 1658
74a4fbfc
DB
1659 if (maxvalue - minvalue == 1
1660 && (maxvalue == 1 || maxvalue == 0))
1661 element_size = integer_one_node;
1662 }
1663
0d3c8800
RK
1664 /* If neither bound is a constant and sizetype is signed, make
1665 sure the size is never negative. We should really do this
1666 if *either* bound is non-constant, but this is the best
1667 compromise between C and Ada. */
8df83eae 1668 if (!TYPE_UNSIGNED (sizetype)
0d3c8800
RK
1669 && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
1670 && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
1671 length = size_binop (MAX_EXPR, length, size_zero_node);
1672
fed3cef0
RK
1673 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1674 convert (bitsizetype, length));
ead17059
RH
1675
1676 /* If we know the size of the element, calculate the total
1677 size directly, rather than do some division thing below.
1678 This optimization helps Fortran assumed-size arrays
1679 (where the size of the array is determined at runtime)
7771032e
DB
1680 substantially.
1681 Note that we can't do this in the case where the size of
1682 the elements is one bit since TYPE_SIZE_UNIT cannot be
1683 set correctly in that case. */
fed3cef0 1684 if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
d4b60170
RK
1685 TYPE_SIZE_UNIT (type)
1686 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
7306ed3f
JW
1687 }
1688
1689 /* Now round the alignment and size,
1690 using machine-dependent criteria if any. */
1691
1692#ifdef ROUND_TYPE_ALIGN
1693 TYPE_ALIGN (type)
1694 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
1695#else
1696 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
1697#endif
c163d21d 1698 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
7306ed3f
JW
1699 TYPE_MODE (type) = BLKmode;
1700 if (TYPE_SIZE (type) != 0
31a02448 1701#ifdef MEMBER_TYPE_FORCES_BLK
182e515e 1702 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
31a02448 1703#endif
7306ed3f
JW
1704 /* BLKmode elements force BLKmode aggregate;
1705 else extract/store fields may lose. */
1706 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
1707 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
1708 {
a1471322
RK
1709 /* One-element arrays get the component type's mode. */
1710 if (simple_cst_equal (TYPE_SIZE (type),
1711 TYPE_SIZE (TREE_TYPE (type))))
1712 TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type));
1713 else
1714 TYPE_MODE (type)
1715 = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
7306ed3f 1716
72c602fc
RK
1717 if (TYPE_MODE (type) != BLKmode
1718 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
1719 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))
7306ed3f
JW
1720 && TYPE_MODE (type) != BLKmode)
1721 {
1722 TYPE_NO_FORCE_BLK (type) = 1;
1723 TYPE_MODE (type) = BLKmode;
1724 }
7306ed3f
JW
1725 }
1726 break;
1727 }
1728
1729 case RECORD_TYPE:
cc9d4a85
MM
1730 case UNION_TYPE:
1731 case QUAL_UNION_TYPE:
9328904c
MM
1732 {
1733 tree field;
1734 record_layout_info rli;
1735
1736 /* Initialize the layout information. */
770ae6cc
RK
1737 rli = start_record_layout (type);
1738
cc9d4a85
MM
1739 /* If this is a QUAL_UNION_TYPE, we want to process the fields
1740 in the reverse order in building the COND_EXPR that denotes
1741 its size. We reverse them again later. */
1742 if (TREE_CODE (type) == QUAL_UNION_TYPE)
1743 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
770ae6cc
RK
1744
1745 /* Place all the fields. */
9328904c 1746 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
770ae6cc
RK
1747 place_field (rli, field);
1748
cc9d4a85
MM
1749 if (TREE_CODE (type) == QUAL_UNION_TYPE)
1750 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
770ae6cc 1751
e0cea8d9
RK
1752 if (lang_adjust_rli)
1753 (*lang_adjust_rli) (rli);
1754
9328904c 1755 /* Finish laying out the record. */
17bbb839 1756 finish_record_layout (rli, /*free_p=*/true);
9328904c 1757 }
7306ed3f
JW
1758 break;
1759
2d76cb1a 1760 case SET_TYPE: /* Used by Chill and Pascal. */
b5d11e41
PB
1761 if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST
1762 || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST)
cf403648 1763 abort ();
b5d11e41
PB
1764 else
1765 {
1766#ifndef SET_WORD_SIZE
1767#define SET_WORD_SIZE BITS_PER_WORD
1768#endif
729a2125
RK
1769 unsigned int alignment
1770 = set_alignment ? set_alignment : SET_WORD_SIZE;
0384674e
RK
1771 HOST_WIDE_INT size_in_bits
1772 = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
1773 - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1);
1774 HOST_WIDE_INT rounded_size
b5d11e41 1775 = ((size_in_bits + alignment - 1) / alignment) * alignment;
729a2125
RK
1776
1777 if (rounded_size > (int) alignment)
b5d11e41
PB
1778 TYPE_MODE (type) = BLKmode;
1779 else
1780 TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
729a2125 1781
06ceef4e 1782 TYPE_SIZE (type) = bitsize_int (rounded_size);
ead17059 1783 TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
b5d11e41 1784 TYPE_ALIGN (type) = alignment;
11cf4d18 1785 TYPE_USER_ALIGN (type) = 0;
b5d11e41
PB
1786 TYPE_PRECISION (type) = size_in_bits;
1787 }
1788 break;
1789
4cc89e53
RS
1790 case FILE_TYPE:
1791 /* The size may vary in different languages, so the language front end
1792 should fill in the size. */
1793 TYPE_ALIGN (type) = BIGGEST_ALIGNMENT;
11cf4d18 1794 TYPE_USER_ALIGN (type) = 0;
4cc89e53
RS
1795 TYPE_MODE (type) = BLKmode;
1796 break;
1797
7306ed3f
JW
1798 default:
1799 abort ();
729a2125 1800 }
7306ed3f 1801
9328904c 1802 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
cc9d4a85
MM
1803 records and unions, finish_record_layout already called this
1804 function. */
786de7eb 1805 if (TREE_CODE (type) != RECORD_TYPE
cc9d4a85
MM
1806 && TREE_CODE (type) != UNION_TYPE
1807 && TREE_CODE (type) != QUAL_UNION_TYPE)
9328904c 1808 finalize_type_size (type);
7306ed3f 1809
dc5041ab
JJ
1810 /* If an alias set has been set for this aggregate when it was incomplete,
1811 force it into alias set 0.
1812 This is too conservative, but we cannot call record_component_aliases
1813 here because some frontends still change the aggregates after
1814 layout_type. */
1815 if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type))
1816 TYPE_ALIAS_SET (type) = 0;
7306ed3f
JW
1817}
1818\f
1819/* Create and return a type for signed integers of PRECISION bits. */
1820
1821tree
46c5ad27 1822make_signed_type (int precision)
7306ed3f 1823{
b3694847 1824 tree type = make_node (INTEGER_TYPE);
7306ed3f
JW
1825
1826 TYPE_PRECISION (type) = precision;
1827
fed3cef0 1828 fixup_signed_type (type);
7306ed3f
JW
1829 return type;
1830}
1831
1832/* Create and return a type for unsigned integers of PRECISION bits. */
1833
1834tree
46c5ad27 1835make_unsigned_type (int precision)
7306ed3f 1836{
b3694847 1837 tree type = make_node (INTEGER_TYPE);
7306ed3f
JW
1838
1839 TYPE_PRECISION (type) = precision;
1840
7306ed3f
JW
1841 fixup_unsigned_type (type);
1842 return type;
1843}
fed3cef0
RK
1844\f
1845/* Initialize sizetype and bitsizetype to a reasonable and temporary
1846 value to enable integer types to be created. */
1847
1848void
8c1d6d62 1849initialize_sizetypes (bool signed_p)
fed3cef0
RK
1850{
1851 tree t = make_node (INTEGER_TYPE);
1852
fed3cef0
RK
1853 TYPE_MODE (t) = SImode;
1854 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
11cf4d18 1855 TYPE_USER_ALIGN (t) = 0;
3224bead 1856 TYPE_IS_SIZETYPE (t) = 1;
8c1d6d62 1857 TYPE_UNSIGNED (t) = !signed_p;
7d60be94
NS
1858 TYPE_SIZE (t) = build_int_cst (t, GET_MODE_BITSIZE (SImode));
1859 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
fed3cef0 1860 TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
7d60be94 1861 TYPE_MIN_VALUE (t) = build_int_cst (t, 0);
fed3cef0
RK
1862
1863 /* 1000 avoids problems with possible overflow and is certainly
1864 larger than any size value we'd want to be storing. */
7d60be94 1865 TYPE_MAX_VALUE (t) = build_int_cst (t, 1000);
fed3cef0 1866
fed3cef0 1867 sizetype = t;
8c1d6d62 1868 bitsizetype = build_distinct_type_copy (t);
fed3cef0 1869}
7306ed3f 1870
8c1d6d62
NS
1871/* Make sizetype a version of TYPE, and initialize *sizetype
1872 accordingly. We do this by overwriting the stub sizetype and
1873 bitsizetype nodes created by initialize_sizetypes. This makes sure
1874 that (a) anything stubby about them no longer exists, (b) any
1875 INTEGER_CSTs created with such a type, remain valid. */
f8dac6eb
R
1876
1877void
46c5ad27 1878set_sizetype (tree type)
f8dac6eb 1879{
d4b60170 1880 int oprecision = TYPE_PRECISION (type);
f8dac6eb 1881 /* The *bitsizetype types use a precision that avoids overflows when
d4b60170
RK
1882 calculating signed sizes / offsets in bits. However, when
1883 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
1884 precision. */
11a6092b 1885 int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1,
d4b60170 1886 2 * HOST_BITS_PER_WIDE_INT);
ad41cc2a 1887 tree t;
fed3cef0 1888
8c1d6d62
NS
1889 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (sizetype))
1890 abort ();
81b3411c 1891
8c1d6d62
NS
1892 t = build_distinct_type_copy (type);
1893 /* We do want to use sizetype's cache, as we will be replacing that
1894 type. */
1895 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
1896 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
1897 TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
1898 TYPE_UID (t) = TYPE_UID (sizetype);
1899 TYPE_IS_SIZETYPE (t) = 1;
1900
1901 /* Replace our original stub sizetype. */
1902 memcpy (sizetype, t, tree_size (sizetype));
1903 TYPE_MAIN_VARIANT (sizetype) = sizetype;
1904
1905 t = make_node (INTEGER_TYPE);
1906 TYPE_NAME (t) = get_identifier ("bit_size_type");
1907 /* We do want to use bitsizetype's cache, as we will be replacing that
1908 type. */
1909 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
1910 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
1911 TYPE_PRECISION (t) = precision;
1912 TYPE_UID (t) = TYPE_UID (bitsizetype);
1913 TYPE_IS_SIZETYPE (t) = 1;
1914 /* Replace our original stub bitsizetype. */
1915 memcpy (bitsizetype, t, tree_size (bitsizetype));
1916
8df83eae 1917 if (TYPE_UNSIGNED (type))
896cced4 1918 {
8c1d6d62
NS
1919 fixup_unsigned_type (bitsizetype);
1920 ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
1921 TYPE_IS_SIZETYPE (ssizetype) = 1;
1922 sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
1923 TYPE_IS_SIZETYPE (sbitsizetype) = 1;
896cced4
RH
1924 }
1925 else
1926 {
8c1d6d62 1927 fixup_signed_type (bitsizetype);
896cced4
RH
1928 ssizetype = sizetype;
1929 sbitsizetype = bitsizetype;
896cced4 1930 }
fed3cef0
RK
1931}
1932\f
7b6d72fc
MM
1933/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE,
1934 BOOLEAN_TYPE, or CHAR_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
1935 for TYPE, based on the PRECISION and whether or not the TYPE
1936 IS_UNSIGNED. PRECISION need not correspond to a width supported
1937 natively by the hardware; for example, on a machine with 8-bit,
1938 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
1939 61. */
1940
1941void
1942set_min_and_max_values_for_integral_type (tree type,
1943 int precision,
1944 bool is_unsigned)
1945{
1946 tree min_value;
1947 tree max_value;
1948
1949 if (is_unsigned)
1950 {
7d60be94 1951 min_value = build_int_cst (type, 0);
f676971a 1952 max_value
7d60be94
NS
1953 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
1954 ? -1
1955 : ((HOST_WIDE_INT) 1 << precision) - 1,
1956 precision - HOST_BITS_PER_WIDE_INT > 0
1957 ? ((unsigned HOST_WIDE_INT) ~0
1958 >> (HOST_BITS_PER_WIDE_INT
1959 - (precision - HOST_BITS_PER_WIDE_INT)))
1960 : 0);
7b6d72fc
MM
1961 }
1962 else
1963 {
f676971a 1964 min_value
7d60be94
NS
1965 = build_int_cst_wide (type,
1966 (precision - HOST_BITS_PER_WIDE_INT > 0
1967 ? 0
1968 : (HOST_WIDE_INT) (-1) << (precision - 1)),
1969 (((HOST_WIDE_INT) (-1)
1970 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
1971 ? precision - HOST_BITS_PER_WIDE_INT - 1
1972 : 0))));
7b6d72fc 1973 max_value
7d60be94
NS
1974 = build_int_cst_wide (type,
1975 (precision - HOST_BITS_PER_WIDE_INT > 0
1976 ? -1
1977 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
1978 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
1979 ? (((HOST_WIDE_INT) 1
1980 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
1981 : 0));
7b6d72fc
MM
1982 }
1983
7b6d72fc
MM
1984 TYPE_MIN_VALUE (type) = min_value;
1985 TYPE_MAX_VALUE (type) = max_value;
1986}
1987
4cc89e53 1988/* Set the extreme values of TYPE based on its precision in bits,
13756074 1989 then lay it out. Used when make_signed_type won't do
4cc89e53
RS
1990 because the tree code is not INTEGER_TYPE.
1991 E.g. for Pascal, when the -fsigned-char option is given. */
1992
1993void
46c5ad27 1994fixup_signed_type (tree type)
4cc89e53 1995{
b3694847 1996 int precision = TYPE_PRECISION (type);
4cc89e53 1997
9cd56be1
JH
1998 /* We can not represent properly constants greater then
1999 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2000 as they are used by i386 vector extensions and friends. */
2001 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2002 precision = HOST_BITS_PER_WIDE_INT * 2;
2003
f676971a 2004 set_min_and_max_values_for_integral_type (type, precision,
7b6d72fc 2005 /*is_unsigned=*/false);
4cc89e53
RS
2006
2007 /* Lay out the type: set its alignment, size, etc. */
4cc89e53
RS
2008 layout_type (type);
2009}
2010
7306ed3f 2011/* Set the extreme values of TYPE based on its precision in bits,
13756074 2012 then lay it out. This is used both in `make_unsigned_type'
7306ed3f
JW
2013 and for enumeral types. */
2014
2015void
46c5ad27 2016fixup_unsigned_type (tree type)
7306ed3f 2017{
b3694847 2018 int precision = TYPE_PRECISION (type);
7306ed3f 2019
9cd56be1
JH
2020 /* We can not represent properly constants greater then
2021 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2022 as they are used by i386 vector extensions and friends. */
2023 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2024 precision = HOST_BITS_PER_WIDE_INT * 2;
2025
89b0433e 2026 TYPE_UNSIGNED (type) = 1;
f676971a
EC
2027
2028 set_min_and_max_values_for_integral_type (type, precision,
7b6d72fc 2029 /*is_unsigned=*/true);
7306ed3f
JW
2030
2031 /* Lay out the type: set its alignment, size, etc. */
7306ed3f
JW
2032 layout_type (type);
2033}
2034\f
2035/* Find the best machine mode to use when referencing a bit field of length
2036 BITSIZE bits starting at BITPOS.
2037
2038 The underlying object is known to be aligned to a boundary of ALIGN bits.
2039 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2040 larger than LARGEST_MODE (usually SImode).
2041
2042 If no mode meets all these conditions, we return VOIDmode. Otherwise, if
2043 VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest
2044 mode meeting these conditions.
2045
77fa0940
RK
2046 Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return
2047 the largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2048 all the conditions. */
7306ed3f
JW
2049
2050enum machine_mode
46c5ad27
AJ
2051get_best_mode (int bitsize, int bitpos, unsigned int align,
2052 enum machine_mode largest_mode, int volatilep)
7306ed3f
JW
2053{
2054 enum machine_mode mode;
770ae6cc 2055 unsigned int unit = 0;
7306ed3f
JW
2056
2057 /* Find the narrowest integer mode that contains the bit field. */
2058 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2059 mode = GET_MODE_WIDER_MODE (mode))
2060 {
2061 unit = GET_MODE_BITSIZE (mode);
956d6950 2062 if ((bitpos % unit) + bitsize <= unit)
7306ed3f
JW
2063 break;
2064 }
2065
0c61f541 2066 if (mode == VOIDmode
7306ed3f 2067 /* It is tempting to omit the following line
4e4b555d 2068 if STRICT_ALIGNMENT is true.
7306ed3f
JW
2069 But that is incorrect, since if the bitfield uses part of 3 bytes
2070 and we use a 4-byte mode, we could get a spurious segv
2071 if the extra 4th byte is past the end of memory.
2072 (Though at least one Unix compiler ignores this problem:
2073 that on the Sequent 386 machine. */
770ae6cc 2074 || MIN (unit, BIGGEST_ALIGNMENT) > align
7306ed3f
JW
2075 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2076 return VOIDmode;
2077
77fa0940
RK
2078 if (SLOW_BYTE_ACCESS && ! volatilep)
2079 {
2080 enum machine_mode wide_mode = VOIDmode, tmode;
2081
2082 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2083 tmode = GET_MODE_WIDER_MODE (tmode))
2084 {
2085 unit = GET_MODE_BITSIZE (tmode);
2086 if (bitpos / unit == (bitpos + bitsize - 1) / unit
2087 && unit <= BITS_PER_WORD
770ae6cc 2088 && unit <= MIN (align, BIGGEST_ALIGNMENT)
77fa0940
RK
2089 && (largest_mode == VOIDmode
2090 || unit <= GET_MODE_BITSIZE (largest_mode)))
2091 wide_mode = tmode;
2092 }
2093
2094 if (wide_mode != VOIDmode)
2095 return wide_mode;
2096 }
7306ed3f
JW
2097
2098 return mode;
2099}
d7db6646 2100
50654f6c 2101/* Gets minimal and maximal values for MODE (signed or unsigned depending on
0aea6467 2102 SIGN). The returned constants are made to be usable in TARGET_MODE. */
50654f6c
ZD
2103
2104void
0aea6467
ZD
2105get_mode_bounds (enum machine_mode mode, int sign,
2106 enum machine_mode target_mode,
2107 rtx *mmin, rtx *mmax)
50654f6c 2108{
0aea6467
ZD
2109 unsigned size = GET_MODE_BITSIZE (mode);
2110 unsigned HOST_WIDE_INT min_val, max_val;
50654f6c
ZD
2111
2112 if (size > HOST_BITS_PER_WIDE_INT)
2113 abort ();
2114
2115 if (sign)
2116 {
0aea6467
ZD
2117 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2118 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
50654f6c
ZD
2119 }
2120 else
2121 {
0aea6467
ZD
2122 min_val = 0;
2123 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
50654f6c 2124 }
0aea6467
ZD
2125
2126 *mmin = GEN_INT (trunc_int_for_mode (min_val, target_mode));
2127 *mmax = GEN_INT (trunc_int_for_mode (max_val, target_mode));
50654f6c
ZD
2128}
2129
e2500fed 2130#include "gt-stor-layout.h"
This page took 2.838808 seconds and 5 git commands to generate.