OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [stor-layout.c] - Blame information for rev 280

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* C-compiler utilities for types and variables storage layout
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "tree.h"
28
#include "rtl.h"
29
#include "tm_p.h"
30
#include "flags.h"
31
#include "function.h"
32
#include "expr.h"
33
#include "output.h"
34
#include "toplev.h"
35
#include "ggc.h"
36
#include "target.h"
37
#include "langhooks.h"
38
#include "regs.h"
39
#include "params.h"
40
#include "cgraph.h"
41
#include "tree-inline.h"
42
#include "tree-dump.h"
43
#include "gimple.h"
44
 
45
/* Data type for the expressions representing sizes of data types.
46
   It is the first integer type laid out.  */
47
tree sizetype_tab[(int) TYPE_KIND_LAST];
48
 
49
/* If nonzero, this is an upper limit on alignment of structure fields.
50
   The value is measured in bits.  */
51
unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
52
/* ... and its original value in bytes, specified via -fpack-struct=<value>.  */
53
unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
54
 
55
/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
56
   in the address spaces' address_mode, not pointer_mode.   Set only by
57
   internal_reference_types called only by a front end.  */
58
static int reference_types_internal = 0;
59
 
60
static tree self_referential_size (tree);
61
static void finalize_record_size (record_layout_info);
62
static void finalize_type_size (tree);
63
static void place_union_field (record_layout_info, tree);
64
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
65
static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
66
                             HOST_WIDE_INT, tree);
67
#endif
68
extern void debug_rli (record_layout_info);
69
 
70
/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded.  */
71
 
72
static GTY(()) tree pending_sizes;
73
 
74
/* Show that REFERENCE_TYPES are internal and should use address_mode.
75
   Called only by front end.  */
76
 
77
void
78
internal_reference_types (void)
79
{
80
  reference_types_internal = 1;
81
}
82
 
83
/* Get a list of all the objects put on the pending sizes list.  */
84
 
85
tree
86
get_pending_sizes (void)
87
{
88
  tree chain = pending_sizes;
89
 
90
  pending_sizes = 0;
91
  return chain;
92
}
93
 
94
/* Add EXPR to the pending sizes list.  */
95
 
96
void
97
put_pending_size (tree expr)
98
{
99
  /* Strip any simple arithmetic from EXPR to see if it has an underlying
100
     SAVE_EXPR.  */
101
  expr = skip_simple_arithmetic (expr);
102
 
103
  if (TREE_CODE (expr) == SAVE_EXPR)
104
    pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
105
}
106
 
107
/* Put a chain of objects into the pending sizes list, which must be
108
   empty.  */
109
 
110
void
111
put_pending_sizes (tree chain)
112
{
113
  gcc_assert (!pending_sizes);
114
  pending_sizes = chain;
115
}
116
 
117
/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
118
   to serve as the actual size-expression for a type or decl.  */
119
 
120
tree
121
variable_size (tree size)
122
{
123
  tree save;
124
 
125
  /* Obviously.  */
126
  if (TREE_CONSTANT (size))
127
    return size;
128
 
129
  /* If the size is self-referential, we can't make a SAVE_EXPR (see
130
     save_expr for the rationale).  But we can do something else.  */
131
  if (CONTAINS_PLACEHOLDER_P (size))
132
    return self_referential_size (size);
133
 
134
  /* If the language-processor is to take responsibility for variable-sized
135
     items (e.g., languages which have elaboration procedures like Ada),
136
     just return SIZE unchanged.  */
137
  if (lang_hooks.decls.global_bindings_p () < 0)
138
    return size;
139
 
140
  size = save_expr (size);
141
 
142
  /* If an array with a variable number of elements is declared, and
143
     the elements require destruction, we will emit a cleanup for the
144
     array.  That cleanup is run both on normal exit from the block
145
     and in the exception-handler for the block.  Normally, when code
146
     is used in both ordinary code and in an exception handler it is
147
     `unsaved', i.e., all SAVE_EXPRs are recalculated.  However, we do
148
     not wish to do that here; the array-size is the same in both
149
     places.  */
150
  save = skip_simple_arithmetic (size);
151
 
152
  if (cfun && cfun->dont_save_pending_sizes_p)
153
    /* The front-end doesn't want us to keep a list of the expressions
154
       that determine sizes for variable size objects.  Trust it.  */
155
    return size;
156
 
157
  if (lang_hooks.decls.global_bindings_p ())
158
    {
159
      if (TREE_CONSTANT (size))
160
        error ("type size can%'t be explicitly evaluated");
161
      else
162
        error ("variable-size type declared outside of any function");
163
 
164
      return size_one_node;
165
    }
166
 
167
  put_pending_size (save);
168
 
169
  return size;
170
}
171
 
172
/* An array of functions used for self-referential size computation.  */
173
static GTY(()) VEC (tree, gc) *size_functions;
174
 
175
/* Similar to copy_tree_r but do not copy component references involving
176
   PLACEHOLDER_EXPRs.  These nodes are spotted in find_placeholder_in_expr
177
   and substituted in substitute_in_expr.  */
178
 
179
static tree
180
copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
181
{
182
  enum tree_code code = TREE_CODE (*tp);
183
 
184
  /* Stop at types, decls, constants like copy_tree_r.  */
185
  if (TREE_CODE_CLASS (code) == tcc_type
186
      || TREE_CODE_CLASS (code) == tcc_declaration
187
      || TREE_CODE_CLASS (code) == tcc_constant)
188
    {
189
      *walk_subtrees = 0;
190
      return NULL_TREE;
191
    }
192
 
193
  /* This is the pattern built in ada/make_aligning_type.  */
194
  else if (code == ADDR_EXPR
195
           && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
196
    {
197
      *walk_subtrees = 0;
198
      return NULL_TREE;
199
    }
200
 
201
  /* Default case: the component reference.  */
202
  else if (code == COMPONENT_REF)
203
    {
204
      tree inner;
205
      for (inner = TREE_OPERAND (*tp, 0);
206
           REFERENCE_CLASS_P (inner);
207
           inner = TREE_OPERAND (inner, 0))
208
        ;
209
 
210
      if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
211
        {
212
          *walk_subtrees = 0;
213
          return NULL_TREE;
214
        }
215
    }
216
 
217
  /* We're not supposed to have them in self-referential size trees
218
     because we wouldn't properly control when they are evaluated.
219
     However, not creating superfluous SAVE_EXPRs requires accurate
220
     tracking of readonly-ness all the way down to here, which we
221
     cannot always guarantee in practice.  So punt in this case.  */
222
  else if (code == SAVE_EXPR)
223
    return error_mark_node;
224
 
225
  return copy_tree_r (tp, walk_subtrees, data);
226
}
227
 
228
/* Given a SIZE expression that is self-referential, return an equivalent
229
   expression to serve as the actual size expression for a type.  */
230
 
231
static tree
232
self_referential_size (tree size)
233
{
234
  static unsigned HOST_WIDE_INT fnno = 0;
235
  VEC (tree, heap) *self_refs = NULL;
236
  tree param_type_list = NULL, param_decl_list = NULL, arg_list = NULL;
237
  tree t, ref, return_type, fntype, fnname, fndecl;
238
  unsigned int i;
239
  char buf[128];
240
 
241
  /* Do not factor out simple operations.  */
242
  t = skip_simple_arithmetic (size);
243
  if (TREE_CODE (t) == CALL_EXPR)
244
    return size;
245
 
246
  /* Collect the list of self-references in the expression.  */
247
  find_placeholder_in_expr (size, &self_refs);
248
  gcc_assert (VEC_length (tree, self_refs) > 0);
249
 
250
  /* Obtain a private copy of the expression.  */
251
  t = size;
252
  if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
253
    return size;
254
  size = t;
255
 
256
  /* Build the parameter and argument lists in parallel; also
257
     substitute the former for the latter in the expression.  */
258
  for (i = 0; VEC_iterate (tree, self_refs, i, ref); i++)
259
    {
260
      tree subst, param_name, param_type, param_decl;
261
 
262
      if (DECL_P (ref))
263
        {
264
          /* We shouldn't have true variables here.  */
265
          gcc_assert (TREE_READONLY (ref));
266
          subst = ref;
267
        }
268
      /* This is the pattern built in ada/make_aligning_type.  */
269
      else if (TREE_CODE (ref) == ADDR_EXPR)
270
        subst = ref;
271
      /* Default case: the component reference.  */
272
      else
273
        subst = TREE_OPERAND (ref, 1);
274
 
275
      sprintf (buf, "p%d", i);
276
      param_name = get_identifier (buf);
277
      param_type = TREE_TYPE (ref);
278
      param_decl
279
        = build_decl (input_location, PARM_DECL, param_name, param_type);
280
      if (targetm.calls.promote_prototypes (NULL_TREE)
281
          && INTEGRAL_TYPE_P (param_type)
282
          && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
283
        DECL_ARG_TYPE (param_decl) = integer_type_node;
284
      else
285
        DECL_ARG_TYPE (param_decl) = param_type;
286
      DECL_ARTIFICIAL (param_decl) = 1;
287
      TREE_READONLY (param_decl) = 1;
288
 
289
      size = substitute_in_expr (size, subst, param_decl);
290
 
291
      param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
292
      param_decl_list = chainon (param_decl, param_decl_list);
293
      arg_list = tree_cons (NULL_TREE, ref, arg_list);
294
    }
295
 
296
  VEC_free (tree, heap, self_refs);
297
 
298
  /* Append 'void' to indicate that the number of parameters is fixed.  */
299
  param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
300
 
301
  /* The 3 lists have been created in reverse order.  */
302
  param_type_list = nreverse (param_type_list);
303
  param_decl_list = nreverse (param_decl_list);
304
  arg_list = nreverse (arg_list);
305
 
306
  /* Build the function type.  */
307
  return_type = TREE_TYPE (size);
308
  fntype = build_function_type (return_type, param_type_list);
309
 
310
  /* Build the function declaration.  */
311
  sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
312
  fnname = get_file_function_name (buf);
313
  fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
314
  for (t = param_decl_list; t; t = TREE_CHAIN (t))
315
    DECL_CONTEXT (t) = fndecl;
316
  DECL_ARGUMENTS (fndecl) = param_decl_list;
317
  DECL_RESULT (fndecl)
318
    = build_decl (input_location, RESULT_DECL, 0, return_type);
319
  DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
320
 
321
  /* The function has been created by the compiler and we don't
322
     want to emit debug info for it.  */
323
  DECL_ARTIFICIAL (fndecl) = 1;
324
  DECL_IGNORED_P (fndecl) = 1;
325
 
326
  /* It is supposed to be "const" and never throw.  */
327
  TREE_READONLY (fndecl) = 1;
328
  TREE_NOTHROW (fndecl) = 1;
329
 
330
  /* We want it to be inlined when this is deemed profitable, as
331
     well as discarded if every call has been integrated.  */
332
  DECL_DECLARED_INLINE_P (fndecl) = 1;
333
 
334
  /* It is made up of a unique return statement.  */
335
  DECL_INITIAL (fndecl) = make_node (BLOCK);
336
  BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
337
  t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
338
  DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
339
  TREE_STATIC (fndecl) = 1;
340
 
341
  /* Put it onto the list of size functions.  */
342
  VEC_safe_push (tree, gc, size_functions, fndecl);
343
 
344
  /* Replace the original expression with a call to the size function.  */
345
  return build_function_call_expr (input_location, fndecl, arg_list);
346
}
347
 
348
/* Take, queue and compile all the size functions.  It is essential that
349
   the size functions be gimplified at the very end of the compilation
350
   in order to guarantee transparent handling of self-referential sizes.
351
   Otherwise the GENERIC inliner would not be able to inline them back
352
   at each of their call sites, thus creating artificial non-constant
353
   size expressions which would trigger nasty problems later on.  */
354
 
355
void
356
finalize_size_functions (void)
357
{
358
  unsigned int i;
359
  tree fndecl;
360
 
361
  for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
362
    {
363
      dump_function (TDI_original, fndecl);
364
      gimplify_function_tree (fndecl);
365
      dump_function (TDI_generic, fndecl);
366
      cgraph_finalize_function (fndecl, false);
367
    }
368
 
369
  VEC_free (tree, gc, size_functions);
370
}
371
 
372
#ifndef MAX_FIXED_MODE_SIZE
373
#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
374
#endif
375
 
376
/* Return the machine mode to use for a nonscalar of SIZE bits.  The
377
   mode must be in class MCLASS, and have exactly that many value bits;
378
   it may have padding as well.  If LIMIT is nonzero, modes of wider
379
   than MAX_FIXED_MODE_SIZE will not be used.  */
380
 
381
enum machine_mode
382
mode_for_size (unsigned int size, enum mode_class mclass, int limit)
383
{
384
  enum machine_mode mode;
385
 
386
  if (limit && size > MAX_FIXED_MODE_SIZE)
387
    return BLKmode;
388
 
389
  /* Get the first mode which has this size, in the specified class.  */
390
  for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
391
       mode = GET_MODE_WIDER_MODE (mode))
392
    if (GET_MODE_PRECISION (mode) == size)
393
      return mode;
394
 
395
  return BLKmode;
396
}
397
 
398
/* Similar, except passed a tree node.  */
399
 
400
enum machine_mode
401
mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
402
{
403
  unsigned HOST_WIDE_INT uhwi;
404
  unsigned int ui;
405
 
406
  if (!host_integerp (size, 1))
407
    return BLKmode;
408
  uhwi = tree_low_cst (size, 1);
409
  ui = uhwi;
410
  if (uhwi != ui)
411
    return BLKmode;
412
  return mode_for_size (ui, mclass, limit);
413
}
414
 
415
/* Similar, but never return BLKmode; return the narrowest mode that
416
   contains at least the requested number of value bits.  */
417
 
418
enum machine_mode
419
smallest_mode_for_size (unsigned int size, enum mode_class mclass)
420
{
421
  enum machine_mode mode;
422
 
423
  /* Get the first mode which has at least this size, in the
424
     specified class.  */
425
  for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
426
       mode = GET_MODE_WIDER_MODE (mode))
427
    if (GET_MODE_PRECISION (mode) >= size)
428
      return mode;
429
 
430
  gcc_unreachable ();
431
}
432
 
433
/* Find an integer mode of the exact same size, or BLKmode on failure.  */
434
 
435
enum machine_mode
436
int_mode_for_mode (enum machine_mode mode)
437
{
438
  switch (GET_MODE_CLASS (mode))
439
    {
440
    case MODE_INT:
441
    case MODE_PARTIAL_INT:
442
      break;
443
 
444
    case MODE_COMPLEX_INT:
445
    case MODE_COMPLEX_FLOAT:
446
    case MODE_FLOAT:
447
    case MODE_DECIMAL_FLOAT:
448
    case MODE_VECTOR_INT:
449
    case MODE_VECTOR_FLOAT:
450
    case MODE_FRACT:
451
    case MODE_ACCUM:
452
    case MODE_UFRACT:
453
    case MODE_UACCUM:
454
    case MODE_VECTOR_FRACT:
455
    case MODE_VECTOR_ACCUM:
456
    case MODE_VECTOR_UFRACT:
457
    case MODE_VECTOR_UACCUM:
458
      mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
459
      break;
460
 
461
    case MODE_RANDOM:
462
      if (mode == BLKmode)
463
        break;
464
 
465
      /* ... fall through ...  */
466
 
467
    case MODE_CC:
468
    default:
469
      gcc_unreachable ();
470
    }
471
 
472
  return mode;
473
}
474
 
475
/* Return the alignment of MODE. This will be bounded by 1 and
476
   BIGGEST_ALIGNMENT.  */
477
 
478
unsigned int
479
get_mode_alignment (enum machine_mode mode)
480
{
481
  return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
482
}
483
 
484
 
485
/* Subroutine of layout_decl: Force alignment required for the data type.
486
   But if the decl itself wants greater alignment, don't override that.  */
487
 
488
static inline void
489
do_type_align (tree type, tree decl)
490
{
491
  if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
492
    {
493
      DECL_ALIGN (decl) = TYPE_ALIGN (type);
494
      if (TREE_CODE (decl) == FIELD_DECL)
495
        DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
496
    }
497
}
498
 
499
/* Set the size, mode and alignment of a ..._DECL node.
500
   TYPE_DECL does need this for C++.
501
   Note that LABEL_DECL and CONST_DECL nodes do not need this,
502
   and FUNCTION_DECL nodes have them set up in a special (and simple) way.
503
   Don't call layout_decl for them.
504
 
505
   KNOWN_ALIGN is the amount of alignment we can assume this
506
   decl has with no special effort.  It is relevant only for FIELD_DECLs
507
   and depends on the previous fields.
508
   All that matters about KNOWN_ALIGN is which powers of 2 divide it.
509
   If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
510
   the record will be aligned to suit.  */
511
 
512
void
513
layout_decl (tree decl, unsigned int known_align)
514
{
515
  tree type = TREE_TYPE (decl);
516
  enum tree_code code = TREE_CODE (decl);
517
  rtx rtl = NULL_RTX;
518
  location_t loc = DECL_SOURCE_LOCATION (decl);
519
 
520
  if (code == CONST_DECL)
521
    return;
522
 
523
  gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
524
              || code == TYPE_DECL ||code == FIELD_DECL);
525
 
526
  rtl = DECL_RTL_IF_SET (decl);
527
 
528
  if (type == error_mark_node)
529
    type = void_type_node;
530
 
531
  /* Usually the size and mode come from the data type without change,
532
     however, the front-end may set the explicit width of the field, so its
533
     size may not be the same as the size of its type.  This happens with
534
     bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
535
     also happens with other fields.  For example, the C++ front-end creates
536
     zero-sized fields corresponding to empty base classes, and depends on
537
     layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
538
     size in bytes from the size in bits.  If we have already set the mode,
539
     don't set it again since we can be called twice for FIELD_DECLs.  */
540
 
541
  DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
542
  if (DECL_MODE (decl) == VOIDmode)
543
    DECL_MODE (decl) = TYPE_MODE (type);
544
 
545
  if (DECL_SIZE (decl) == 0)
546
    {
547
      DECL_SIZE (decl) = TYPE_SIZE (type);
548
      DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
549
    }
550
  else if (DECL_SIZE_UNIT (decl) == 0)
551
    DECL_SIZE_UNIT (decl)
552
      = fold_convert_loc (loc, sizetype,
553
                          size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
554
                                          bitsize_unit_node));
555
 
556
  if (code != FIELD_DECL)
557
    /* For non-fields, update the alignment from the type.  */
558
    do_type_align (type, decl);
559
  else
560
    /* For fields, it's a bit more complicated...  */
561
    {
562
      bool old_user_align = DECL_USER_ALIGN (decl);
563
      bool zero_bitfield = false;
564
      bool packed_p = DECL_PACKED (decl);
565
      unsigned int mfa;
566
 
567
      if (DECL_BIT_FIELD (decl))
568
        {
569
          DECL_BIT_FIELD_TYPE (decl) = type;
570
 
571
          /* A zero-length bit-field affects the alignment of the next
572
             field.  In essence such bit-fields are not influenced by
573
             any packing due to #pragma pack or attribute packed.  */
574
          if (integer_zerop (DECL_SIZE (decl))
575
              && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
576
            {
577
              zero_bitfield = true;
578
              packed_p = false;
579
#ifdef PCC_BITFIELD_TYPE_MATTERS
580
              if (PCC_BITFIELD_TYPE_MATTERS)
581
                do_type_align (type, decl);
582
              else
583
#endif
584
                {
585
#ifdef EMPTY_FIELD_BOUNDARY
586
                  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
587
                    {
588
                      DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
589
                      DECL_USER_ALIGN (decl) = 0;
590
                    }
591
#endif
592
                }
593
            }
594
 
595
          /* See if we can use an ordinary integer mode for a bit-field.
596
             Conditions are: a fixed size that is correct for another mode
597
             and occupying a complete byte or bytes on proper boundary.  */
598
          if (TYPE_SIZE (type) != 0
599
              && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
600
              && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
601
            {
602
              enum machine_mode xmode
603
                = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
604
              unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
605
 
606
              if (xmode != BLKmode
607
                  && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
608
                  && (known_align == 0 || known_align >= xalign))
609
                {
610
                  DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
611
                  DECL_MODE (decl) = xmode;
612
                  DECL_BIT_FIELD (decl) = 0;
613
                }
614
            }
615
 
616
          /* Turn off DECL_BIT_FIELD if we won't need it set.  */
617
          if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
618
              && known_align >= TYPE_ALIGN (type)
619
              && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
620
            DECL_BIT_FIELD (decl) = 0;
621
        }
622
      else if (packed_p && DECL_USER_ALIGN (decl))
623
        /* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
624
           round up; we'll reduce it again below.  We want packing to
625
           supersede USER_ALIGN inherited from the type, but defer to
626
           alignment explicitly specified on the field decl.  */;
627
      else
628
        do_type_align (type, decl);
629
 
630
      /* If the field is packed and not explicitly aligned, give it the
631
         minimum alignment.  Note that do_type_align may set
632
         DECL_USER_ALIGN, so we need to check old_user_align instead.  */
633
      if (packed_p
634
          && !old_user_align)
635
        DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
636
 
637
      if (! packed_p && ! DECL_USER_ALIGN (decl))
638
        {
639
          /* Some targets (i.e. i386, VMS) limit struct field alignment
640
             to a lower boundary than alignment of variables unless
641
             it was overridden by attribute aligned.  */
642
#ifdef BIGGEST_FIELD_ALIGNMENT
643
          DECL_ALIGN (decl)
644
            = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
645
#endif
646
#ifdef ADJUST_FIELD_ALIGN
647
          DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
648
#endif
649
        }
650
 
651
      if (zero_bitfield)
652
        mfa = initial_max_fld_align * BITS_PER_UNIT;
653
      else
654
        mfa = maximum_field_alignment;
655
      /* Should this be controlled by DECL_USER_ALIGN, too?  */
656
      if (mfa != 0)
657
        DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
658
    }
659
 
660
  /* Evaluate nonconstant size only once, either now or as soon as safe.  */
661
  if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
662
    DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
663
  if (DECL_SIZE_UNIT (decl) != 0
664
      && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
665
    DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
666
 
667
  /* If requested, warn about definitions of large data objects.  */
668
  if (warn_larger_than
669
      && (code == VAR_DECL || code == PARM_DECL)
670
      && ! DECL_EXTERNAL (decl))
671
    {
672
      tree size = DECL_SIZE_UNIT (decl);
673
 
674
      if (size != 0 && TREE_CODE (size) == INTEGER_CST
675
          && compare_tree_int (size, larger_than_size) > 0)
676
        {
677
          int size_as_int = TREE_INT_CST_LOW (size);
678
 
679
          if (compare_tree_int (size, size_as_int) == 0)
680
            warning (OPT_Wlarger_than_eq, "size of %q+D is %d bytes", decl, size_as_int);
681
          else
682
            warning (OPT_Wlarger_than_eq, "size of %q+D is larger than %wd bytes",
683
                     decl, larger_than_size);
684
        }
685
    }
686
 
687
  /* If the RTL was already set, update its mode and mem attributes.  */
688
  if (rtl)
689
    {
690
      PUT_MODE (rtl, DECL_MODE (decl));
691
      SET_DECL_RTL (decl, 0);
692
      set_mem_attributes (rtl, decl, 1);
693
      SET_DECL_RTL (decl, rtl);
694
    }
695
}
696
 
697
/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
698
   a previous call to layout_decl and calls it again.  */
699
 
700
void
701
relayout_decl (tree decl)
702
{
703
  DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
704
  DECL_MODE (decl) = VOIDmode;
705
  if (!DECL_USER_ALIGN (decl))
706
    DECL_ALIGN (decl) = 0;
707
  SET_DECL_RTL (decl, 0);
708
 
709
  layout_decl (decl, 0);
710
}
711
 
712
/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
713
   QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
714
   is to be passed to all other layout functions for this record.  It is the
715
   responsibility of the caller to call `free' for the storage returned.
716
   Note that garbage collection is not permitted until we finish laying
717
   out the record.  */
718
 
719
record_layout_info
720
start_record_layout (tree t)
721
{
722
  record_layout_info rli = XNEW (struct record_layout_info_s);
723
 
724
  rli->t = t;
725
 
726
  /* If the type has a minimum specified alignment (via an attribute
727
     declaration, for example) use it -- otherwise, start with a
728
     one-byte alignment.  */
729
  rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
730
  rli->unpacked_align = rli->record_align;
731
  rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
732
 
733
#ifdef STRUCTURE_SIZE_BOUNDARY
734
  /* Packed structures don't need to have minimum size.  */
735
  if (! TYPE_PACKED (t))
736
    {
737
      unsigned tmp;
738
 
739
      /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY.  */
740
      tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
741
      if (maximum_field_alignment != 0)
742
        tmp = MIN (tmp, maximum_field_alignment);
743
      rli->record_align = MAX (rli->record_align, tmp);
744
    }
745
#endif
746
 
747
  rli->offset = size_zero_node;
748
  rli->bitpos = bitsize_zero_node;
749
  rli->prev_field = 0;
750
  rli->pending_statics = 0;
751
  rli->packed_maybe_necessary = 0;
752
  rli->remaining_in_alignment = 0;
753
 
754
  return rli;
755
}
756
 
757
/* These four routines perform computations that convert between
758
   the offset/bitpos forms and byte and bit offsets.  */
759
 
760
tree
761
bit_from_pos (tree offset, tree bitpos)
762
{
763
  return size_binop (PLUS_EXPR, bitpos,
764
                     size_binop (MULT_EXPR,
765
                                 fold_convert (bitsizetype, offset),
766
                                 bitsize_unit_node));
767
}
768
 
769
tree
770
byte_from_pos (tree offset, tree bitpos)
771
{
772
  return size_binop (PLUS_EXPR, offset,
773
                     fold_convert (sizetype,
774
                                   size_binop (TRUNC_DIV_EXPR, bitpos,
775
                                               bitsize_unit_node)));
776
}
777
 
778
void
779
pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
780
              tree pos)
781
{
782
  *poffset = size_binop (MULT_EXPR,
783
                         fold_convert (sizetype,
784
                                       size_binop (FLOOR_DIV_EXPR, pos,
785
                                                   bitsize_int (off_align))),
786
                         size_int (off_align / BITS_PER_UNIT));
787
  *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
788
}
789
 
790
/* Given a pointer to bit and byte offsets and an offset alignment,
791
   normalize the offsets so they are within the alignment.  */
792
 
793
void
794
normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
795
{
796
  /* If the bit position is now larger than it should be, adjust it
797
     downwards.  */
798
  if (compare_tree_int (*pbitpos, off_align) >= 0)
799
    {
800
      tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
801
                                      bitsize_int (off_align));
802
 
803
      *poffset
804
        = size_binop (PLUS_EXPR, *poffset,
805
                      size_binop (MULT_EXPR,
806
                                  fold_convert (sizetype, extra_aligns),
807
                                  size_int (off_align / BITS_PER_UNIT)));
808
 
809
      *pbitpos
810
        = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
811
    }
812
}
813
 
814
/* Print debugging information about the information in RLI.  */
815
 
816
void
817
debug_rli (record_layout_info rli)
818
{
819
  print_node_brief (stderr, "type", rli->t, 0);
820
  print_node_brief (stderr, "\noffset", rli->offset, 0);
821
  print_node_brief (stderr, " bitpos", rli->bitpos, 0);
822
 
823
  fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
824
           rli->record_align, rli->unpacked_align,
825
           rli->offset_align);
826
 
827
  /* The ms_struct code is the only that uses this.  */
828
  if (targetm.ms_bitfield_layout_p (rli->t))
829
    fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
830
 
831
  if (rli->packed_maybe_necessary)
832
    fprintf (stderr, "packed may be necessary\n");
833
 
834
  if (rli->pending_statics)
835
    {
836
      fprintf (stderr, "pending statics:\n");
837
      debug_tree (rli->pending_statics);
838
    }
839
}
840
 
841
/* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
842
   BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
843
 
844
void
845
normalize_rli (record_layout_info rli)
846
{
847
  normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
848
}
849
 
850
/* Returns the size in bytes allocated so far.  */
851
 
852
tree
853
rli_size_unit_so_far (record_layout_info rli)
854
{
855
  return byte_from_pos (rli->offset, rli->bitpos);
856
}
857
 
858
/* Returns the size in bits allocated so far.  */
859
 
860
tree
861
rli_size_so_far (record_layout_info rli)
862
{
863
  return bit_from_pos (rli->offset, rli->bitpos);
864
}
865
 
866
/* FIELD is about to be added to RLI->T.  The alignment (in bits) of
867
   the next available location within the record is given by KNOWN_ALIGN.
868
   Update the variable alignment fields in RLI, and return the alignment
869
   to give the FIELD.  */
870
 
871
unsigned int
872
update_alignment_for_field (record_layout_info rli, tree field,
873
                            unsigned int known_align)
874
{
875
  /* The alignment required for FIELD.  */
876
  unsigned int desired_align;
877
  /* The type of this field.  */
878
  tree type = TREE_TYPE (field);
879
  /* True if the field was explicitly aligned by the user.  */
880
  bool user_align;
881
  bool is_bitfield;
882
 
883
  /* Do not attempt to align an ERROR_MARK node */
884
  if (TREE_CODE (type) == ERROR_MARK)
885
    return 0;
886
 
887
  /* Lay out the field so we know what alignment it needs.  */
888
  layout_decl (field, known_align);
889
  desired_align = DECL_ALIGN (field);
890
  user_align = DECL_USER_ALIGN (field);
891
 
892
  is_bitfield = (type != error_mark_node
893
                 && DECL_BIT_FIELD_TYPE (field)
894
                 && ! integer_zerop (TYPE_SIZE (type)));
895
 
896
  /* Record must have at least as much alignment as any field.
897
     Otherwise, the alignment of the field within the record is
898
     meaningless.  */
899
  if (targetm.ms_bitfield_layout_p (rli->t))
900
    {
901
      /* Here, the alignment of the underlying type of a bitfield can
902
         affect the alignment of a record; even a zero-sized field
903
         can do this.  The alignment should be to the alignment of
904
         the type, except that for zero-size bitfields this only
905
         applies if there was an immediately prior, nonzero-size
906
         bitfield.  (That's the way it is, experimentally.) */
907
      if ((!is_bitfield && !DECL_PACKED (field))
908
          || (!integer_zerop (DECL_SIZE (field))
909
              ? !DECL_PACKED (field)
910
              : (rli->prev_field
911
                 && DECL_BIT_FIELD_TYPE (rli->prev_field)
912
                 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
913
        {
914
          unsigned int type_align = TYPE_ALIGN (type);
915
          type_align = MAX (type_align, desired_align);
916
          if (maximum_field_alignment != 0)
917
            type_align = MIN (type_align, maximum_field_alignment);
918
          rli->record_align = MAX (rli->record_align, type_align);
919
          rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
920
        }
921
    }
922
#ifdef PCC_BITFIELD_TYPE_MATTERS
923
  else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
924
    {
925
      /* Named bit-fields cause the entire structure to have the
926
         alignment implied by their type.  Some targets also apply the same
927
         rules to unnamed bitfields.  */
928
      if (DECL_NAME (field) != 0
929
          || targetm.align_anon_bitfield ())
930
        {
931
          unsigned int type_align = TYPE_ALIGN (type);
932
 
933
#ifdef ADJUST_FIELD_ALIGN
934
          if (! TYPE_USER_ALIGN (type))
935
            type_align = ADJUST_FIELD_ALIGN (field, type_align);
936
#endif
937
 
938
          /* Targets might chose to handle unnamed and hence possibly
939
             zero-width bitfield.  Those are not influenced by #pragmas
940
             or packed attributes.  */
941
          if (integer_zerop (DECL_SIZE (field)))
942
            {
943
              if (initial_max_fld_align)
944
                type_align = MIN (type_align,
945
                                  initial_max_fld_align * BITS_PER_UNIT);
946
            }
947
          else if (maximum_field_alignment != 0)
948
            type_align = MIN (type_align, maximum_field_alignment);
949
          else if (DECL_PACKED (field))
950
            type_align = MIN (type_align, BITS_PER_UNIT);
951
 
952
          /* The alignment of the record is increased to the maximum
953
             of the current alignment, the alignment indicated on the
954
             field (i.e., the alignment specified by an __aligned__
955
             attribute), and the alignment indicated by the type of
956
             the field.  */
957
          rli->record_align = MAX (rli->record_align, desired_align);
958
          rli->record_align = MAX (rli->record_align, type_align);
959
 
960
          if (warn_packed)
961
            rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
962
          user_align |= TYPE_USER_ALIGN (type);
963
        }
964
    }
965
#endif
966
  else
967
    {
968
      rli->record_align = MAX (rli->record_align, desired_align);
969
      rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
970
    }
971
 
972
  TYPE_USER_ALIGN (rli->t) |= user_align;
973
 
974
  return desired_align;
975
}
976
 
977
/* Called from place_field to handle unions.  */
978
 
979
static void
980
place_union_field (record_layout_info rli, tree field)
981
{
982
  update_alignment_for_field (rli, field, /*known_align=*/0);
983
 
984
  DECL_FIELD_OFFSET (field) = size_zero_node;
985
  DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
986
  SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
987
 
988
  /* If this is an ERROR_MARK return *after* having set the
989
     field at the start of the union. This helps when parsing
990
     invalid fields. */
991
  if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
992
    return;
993
 
994
  /* We assume the union's size will be a multiple of a byte so we don't
995
     bother with BITPOS.  */
996
  if (TREE_CODE (rli->t) == UNION_TYPE)
997
    rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
998
  else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
999
    rli->offset = fold_build3_loc (input_location, COND_EXPR, sizetype,
1000
                               DECL_QUALIFIER (field),
1001
                               DECL_SIZE_UNIT (field), rli->offset);
1002
}
1003
 
1004
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1005
/* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1006
   at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
1007
   units of alignment than the underlying TYPE.  */
1008
static int
1009
excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1010
                  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1011
{
1012
  /* Note that the calculation of OFFSET might overflow; we calculate it so
1013
     that we still get the right result as long as ALIGN is a power of two.  */
1014
  unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1015
 
1016
  offset = offset % align;
1017
  return ((offset + size + align - 1) / align
1018
          > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1019
             / align));
1020
}
1021
#endif
1022
 
1023
/* RLI contains information about the layout of a RECORD_TYPE.  FIELD
1024
   is a FIELD_DECL to be added after those fields already present in
1025
   T.  (FIELD is not actually added to the TYPE_FIELDS list here;
1026
   callers that desire that behavior must manually perform that step.)  */
1027
 
1028
void
1029
place_field (record_layout_info rli, tree field)
1030
{
1031
  /* The alignment required for FIELD.  */
1032
  unsigned int desired_align;
1033
  /* The alignment FIELD would have if we just dropped it into the
1034
     record as it presently stands.  */
1035
  unsigned int known_align;
1036
  unsigned int actual_align;
1037
  /* The type of this field.  */
1038
  tree type = TREE_TYPE (field);
1039
 
1040
  gcc_assert (TREE_CODE (field) != ERROR_MARK);
1041
 
1042
  /* If FIELD is static, then treat it like a separate variable, not
1043
     really like a structure field.  If it is a FUNCTION_DECL, it's a
1044
     method.  In both cases, all we do is lay out the decl, and we do
1045
     it *after* the record is laid out.  */
1046
  if (TREE_CODE (field) == VAR_DECL)
1047
    {
1048
      rli->pending_statics = tree_cons (NULL_TREE, field,
1049
                                        rli->pending_statics);
1050
      return;
1051
    }
1052
 
1053
  /* Enumerators and enum types which are local to this class need not
1054
     be laid out.  Likewise for initialized constant fields.  */
1055
  else if (TREE_CODE (field) != FIELD_DECL)
1056
    return;
1057
 
1058
  /* Unions are laid out very differently than records, so split
1059
     that code off to another function.  */
1060
  else if (TREE_CODE (rli->t) != RECORD_TYPE)
1061
    {
1062
      place_union_field (rli, field);
1063
      return;
1064
    }
1065
 
1066
  else if (TREE_CODE (type) == ERROR_MARK)
1067
    {
1068
      /* Place this field at the current allocation position, so we
1069
         maintain monotonicity.  */
1070
      DECL_FIELD_OFFSET (field) = rli->offset;
1071
      DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1072
      SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1073
      return;
1074
    }
1075
 
1076
  /* Work out the known alignment so far.  Note that A & (-A) is the
1077
     value of the least-significant bit in A that is one.  */
1078
  if (! integer_zerop (rli->bitpos))
1079
    known_align = (tree_low_cst (rli->bitpos, 1)
1080
                   & - tree_low_cst (rli->bitpos, 1));
1081
  else if (integer_zerop (rli->offset))
1082
    known_align = 0;
1083
  else if (host_integerp (rli->offset, 1))
1084
    known_align = (BITS_PER_UNIT
1085
                   * (tree_low_cst (rli->offset, 1)
1086
                      & - tree_low_cst (rli->offset, 1)));
1087
  else
1088
    known_align = rli->offset_align;
1089
 
1090
  desired_align = update_alignment_for_field (rli, field, known_align);
1091
  if (known_align == 0)
1092
    known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1093
 
1094
  if (warn_packed && DECL_PACKED (field))
1095
    {
1096
      if (known_align >= TYPE_ALIGN (type))
1097
        {
1098
          if (TYPE_ALIGN (type) > desired_align)
1099
            {
1100
              if (STRICT_ALIGNMENT)
1101
                warning (OPT_Wattributes, "packed attribute causes "
1102
                         "inefficient alignment for %q+D", field);
1103
              /* Don't warn if DECL_PACKED was set by the type.  */
1104
              else if (!TYPE_PACKED (rli->t))
1105
                warning (OPT_Wattributes, "packed attribute is "
1106
                         "unnecessary for %q+D", field);
1107
            }
1108
        }
1109
      else
1110
        rli->packed_maybe_necessary = 1;
1111
    }
1112
 
1113
  /* Does this field automatically have alignment it needs by virtue
1114
     of the fields that precede it and the record's own alignment?
1115
     We already align ms_struct fields, so don't re-align them.  */
1116
  if (known_align < desired_align
1117
      && !targetm.ms_bitfield_layout_p (rli->t))
1118
    {
1119
      /* No, we need to skip space before this field.
1120
         Bump the cumulative size to multiple of field alignment.  */
1121
 
1122
      if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1123
        warning (OPT_Wpadded, "padding struct to align %q+D", field);
1124
 
1125
      /* If the alignment is still within offset_align, just align
1126
         the bit position.  */
1127
      if (desired_align < rli->offset_align)
1128
        rli->bitpos = round_up (rli->bitpos, desired_align);
1129
      else
1130
        {
1131
          /* First adjust OFFSET by the partial bits, then align.  */
1132
          rli->offset
1133
            = size_binop (PLUS_EXPR, rli->offset,
1134
                          fold_convert (sizetype,
1135
                                        size_binop (CEIL_DIV_EXPR, rli->bitpos,
1136
                                                    bitsize_unit_node)));
1137
          rli->bitpos = bitsize_zero_node;
1138
 
1139
          rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1140
        }
1141
 
1142
      if (! TREE_CONSTANT (rli->offset))
1143
        rli->offset_align = desired_align;
1144
 
1145
    }
1146
 
1147
  /* Handle compatibility with PCC.  Note that if the record has any
1148
     variable-sized fields, we need not worry about compatibility.  */
1149
#ifdef PCC_BITFIELD_TYPE_MATTERS
1150
  if (PCC_BITFIELD_TYPE_MATTERS
1151
      && ! targetm.ms_bitfield_layout_p (rli->t)
1152
      && TREE_CODE (field) == FIELD_DECL
1153
      && type != error_mark_node
1154
      && DECL_BIT_FIELD (field)
1155
      && (! DECL_PACKED (field)
1156
          /* Enter for these packed fields only to issue a warning.  */
1157
          || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1158
      && maximum_field_alignment == 0
1159
      && ! integer_zerop (DECL_SIZE (field))
1160
      && host_integerp (DECL_SIZE (field), 1)
1161
      && host_integerp (rli->offset, 1)
1162
      && host_integerp (TYPE_SIZE (type), 1))
1163
    {
1164
      unsigned int type_align = TYPE_ALIGN (type);
1165
      tree dsize = DECL_SIZE (field);
1166
      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1167
      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1168
      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1169
 
1170
#ifdef ADJUST_FIELD_ALIGN
1171
      if (! TYPE_USER_ALIGN (type))
1172
        type_align = ADJUST_FIELD_ALIGN (field, type_align);
1173
#endif
1174
 
1175
      /* A bit field may not span more units of alignment of its type
1176
         than its type itself.  Advance to next boundary if necessary.  */
1177
      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1178
        {
1179
          if (DECL_PACKED (field))
1180
            {
1181
              if (warn_packed_bitfield_compat == 1)
1182
                inform
1183
                  (input_location,
1184
                   "Offset of packed bit-field %qD has changed in GCC 4.4",
1185
                   field);
1186
            }
1187
          else
1188
            rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
1189
        }
1190
 
1191
      if (! DECL_PACKED (field))
1192
        TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1193
    }
1194
#endif
1195
 
1196
#ifdef BITFIELD_NBYTES_LIMITED
1197
  if (BITFIELD_NBYTES_LIMITED
1198
      && ! targetm.ms_bitfield_layout_p (rli->t)
1199
      && TREE_CODE (field) == FIELD_DECL
1200
      && type != error_mark_node
1201
      && DECL_BIT_FIELD_TYPE (field)
1202
      && ! DECL_PACKED (field)
1203
      && ! integer_zerop (DECL_SIZE (field))
1204
      && host_integerp (DECL_SIZE (field), 1)
1205
      && host_integerp (rli->offset, 1)
1206
      && host_integerp (TYPE_SIZE (type), 1))
1207
    {
1208
      unsigned int type_align = TYPE_ALIGN (type);
1209
      tree dsize = DECL_SIZE (field);
1210
      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1211
      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1212
      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1213
 
1214
#ifdef ADJUST_FIELD_ALIGN
1215
      if (! TYPE_USER_ALIGN (type))
1216
        type_align = ADJUST_FIELD_ALIGN (field, type_align);
1217
#endif
1218
 
1219
      if (maximum_field_alignment != 0)
1220
        type_align = MIN (type_align, maximum_field_alignment);
1221
      /* ??? This test is opposite the test in the containing if
1222
         statement, so this code is unreachable currently.  */
1223
      else if (DECL_PACKED (field))
1224
        type_align = MIN (type_align, BITS_PER_UNIT);
1225
 
1226
      /* A bit field may not span the unit of alignment of its type.
1227
         Advance to next boundary if necessary.  */
1228
      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1229
        rli->bitpos = round_up (rli->bitpos, type_align);
1230
 
1231
      TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1232
    }
1233
#endif
1234
 
1235
  /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1236
     A subtlety:
1237
        When a bit field is inserted into a packed record, the whole
1238
        size of the underlying type is used by one or more same-size
1239
        adjacent bitfields.  (That is, if its long:3, 32 bits is
1240
        used in the record, and any additional adjacent long bitfields are
1241
        packed into the same chunk of 32 bits. However, if the size
1242
        changes, a new field of that size is allocated.)  In an unpacked
1243
        record, this is the same as using alignment, but not equivalent
1244
        when packing.
1245
 
1246
     Note: for compatibility, we use the type size, not the type alignment
1247
     to determine alignment, since that matches the documentation */
1248
 
1249
  if (targetm.ms_bitfield_layout_p (rli->t))
1250
    {
1251
      tree prev_saved = rli->prev_field;
1252
      tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1253
 
1254
      /* This is a bitfield if it exists.  */
1255
      if (rli->prev_field)
1256
        {
1257
          /* If both are bitfields, nonzero, and the same size, this is
1258
             the middle of a run.  Zero declared size fields are special
1259
             and handled as "end of run". (Note: it's nonzero declared
1260
             size, but equal type sizes!) (Since we know that both
1261
             the current and previous fields are bitfields by the
1262
             time we check it, DECL_SIZE must be present for both.) */
1263
          if (DECL_BIT_FIELD_TYPE (field)
1264
              && !integer_zerop (DECL_SIZE (field))
1265
              && !integer_zerop (DECL_SIZE (rli->prev_field))
1266
              && host_integerp (DECL_SIZE (rli->prev_field), 0)
1267
              && host_integerp (TYPE_SIZE (type), 0)
1268
              && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1269
            {
1270
              /* We're in the middle of a run of equal type size fields; make
1271
                 sure we realign if we run out of bits.  (Not decl size,
1272
                 type size!) */
1273
              HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1274
 
1275
              if (rli->remaining_in_alignment < bitsize)
1276
                {
1277
                  HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1278
 
1279
                  /* out of bits; bump up to next 'word'.  */
1280
                  rli->bitpos
1281
                    = size_binop (PLUS_EXPR, rli->bitpos,
1282
                                  bitsize_int (rli->remaining_in_alignment));
1283
                  rli->prev_field = field;
1284
                  if (typesize < bitsize)
1285
                    rli->remaining_in_alignment = 0;
1286
                  else
1287
                    rli->remaining_in_alignment = typesize - bitsize;
1288
                }
1289
              else
1290
                rli->remaining_in_alignment -= bitsize;
1291
            }
1292
          else
1293
            {
1294
              /* End of a run: if leaving a run of bitfields of the same type
1295
                 size, we have to "use up" the rest of the bits of the type
1296
                 size.
1297
 
1298
                 Compute the new position as the sum of the size for the prior
1299
                 type and where we first started working on that type.
1300
                 Note: since the beginning of the field was aligned then
1301
                 of course the end will be too.  No round needed.  */
1302
 
1303
              if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1304
                {
1305
                  rli->bitpos
1306
                    = size_binop (PLUS_EXPR, rli->bitpos,
1307
                                  bitsize_int (rli->remaining_in_alignment));
1308
                }
1309
              else
1310
                /* We "use up" size zero fields; the code below should behave
1311
                   as if the prior field was not a bitfield.  */
1312
                prev_saved = NULL;
1313
 
1314
              /* Cause a new bitfield to be captured, either this time (if
1315
                 currently a bitfield) or next time we see one.  */
1316
              if (!DECL_BIT_FIELD_TYPE(field)
1317
                  || integer_zerop (DECL_SIZE (field)))
1318
                rli->prev_field = NULL;
1319
            }
1320
 
1321
          normalize_rli (rli);
1322
        }
1323
 
1324
      /* If we're starting a new run of same size type bitfields
1325
         (or a run of non-bitfields), set up the "first of the run"
1326
         fields.
1327
 
1328
         That is, if the current field is not a bitfield, or if there
1329
         was a prior bitfield the type sizes differ, or if there wasn't
1330
         a prior bitfield the size of the current field is nonzero.
1331
 
1332
         Note: we must be sure to test ONLY the type size if there was
1333
         a prior bitfield and ONLY for the current field being zero if
1334
         there wasn't.  */
1335
 
1336
      if (!DECL_BIT_FIELD_TYPE (field)
1337
          || (prev_saved != NULL
1338
              ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1339
              : !integer_zerop (DECL_SIZE (field)) ))
1340
        {
1341
          /* Never smaller than a byte for compatibility.  */
1342
          unsigned int type_align = BITS_PER_UNIT;
1343
 
1344
          /* (When not a bitfield), we could be seeing a flex array (with
1345
             no DECL_SIZE).  Since we won't be using remaining_in_alignment
1346
             until we see a bitfield (and come by here again) we just skip
1347
             calculating it.  */
1348
          if (DECL_SIZE (field) != NULL
1349
              && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1350
              && host_integerp (DECL_SIZE (field), 1))
1351
            {
1352
              unsigned HOST_WIDE_INT bitsize
1353
                = tree_low_cst (DECL_SIZE (field), 1);
1354
              unsigned HOST_WIDE_INT typesize
1355
                = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1356
 
1357
              if (typesize < bitsize)
1358
                rli->remaining_in_alignment = 0;
1359
              else
1360
                rli->remaining_in_alignment = typesize - bitsize;
1361
            }
1362
 
1363
          /* Now align (conventionally) for the new type.  */
1364
          type_align = TYPE_ALIGN (TREE_TYPE (field));
1365
 
1366
          if (maximum_field_alignment != 0)
1367
            type_align = MIN (type_align, maximum_field_alignment);
1368
 
1369
          rli->bitpos = round_up_loc (input_location, rli->bitpos, type_align);
1370
 
1371
          /* If we really aligned, don't allow subsequent bitfields
1372
             to undo that.  */
1373
          rli->prev_field = NULL;
1374
        }
1375
    }
1376
 
1377
  /* Offset so far becomes the position of this field after normalizing.  */
1378
  normalize_rli (rli);
1379
  DECL_FIELD_OFFSET (field) = rli->offset;
1380
  DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1381
  SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1382
 
1383
  /* If this field ended up more aligned than we thought it would be (we
1384
     approximate this by seeing if its position changed), lay out the field
1385
     again; perhaps we can use an integral mode for it now.  */
1386
  if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1387
    actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1388
                    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1389
  else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1390
    actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1391
  else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1392
    actual_align = (BITS_PER_UNIT
1393
                   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1394
                      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1395
  else
1396
    actual_align = DECL_OFFSET_ALIGN (field);
1397
  /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1398
     store / extract bit field operations will check the alignment of the
1399
     record against the mode of bit fields.  */
1400
 
1401
  if (known_align != actual_align)
1402
    layout_decl (field, actual_align);
1403
 
1404
  if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1405
    rli->prev_field = field;
1406
 
1407
  /* Now add size of this field to the size of the record.  If the size is
1408
     not constant, treat the field as being a multiple of bytes and just
1409
     adjust the offset, resetting the bit position.  Otherwise, apportion the
1410
     size amongst the bit position and offset.  First handle the case of an
1411
     unspecified size, which can happen when we have an invalid nested struct
1412
     definition, such as struct j { struct j { int i; } }.  The error message
1413
     is printed in finish_struct.  */
1414
  if (DECL_SIZE (field) == 0)
1415
    /* Do nothing.  */;
1416
  else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1417
           || TREE_OVERFLOW (DECL_SIZE (field)))
1418
    {
1419
      rli->offset
1420
        = size_binop (PLUS_EXPR, rli->offset,
1421
                      fold_convert (sizetype,
1422
                                    size_binop (CEIL_DIV_EXPR, rli->bitpos,
1423
                                                bitsize_unit_node)));
1424
      rli->offset
1425
        = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1426
      rli->bitpos = bitsize_zero_node;
1427
      rli->offset_align = MIN (rli->offset_align, desired_align);
1428
    }
1429
  else if (targetm.ms_bitfield_layout_p (rli->t))
1430
    {
1431
      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1432
 
1433
      /* If we ended a bitfield before the full length of the type then
1434
         pad the struct out to the full length of the last type.  */
1435
      if ((TREE_CHAIN (field) == NULL
1436
           || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL)
1437
          && DECL_BIT_FIELD_TYPE (field)
1438
          && !integer_zerop (DECL_SIZE (field)))
1439
        rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1440
                                  bitsize_int (rli->remaining_in_alignment));
1441
 
1442
      normalize_rli (rli);
1443
    }
1444
  else
1445
    {
1446
      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1447
      normalize_rli (rli);
1448
    }
1449
}
1450
 
1451
/* Assuming that all the fields have been laid out, this function uses
1452
   RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1453
   indicated by RLI.  */
1454
 
1455
static void
1456
finalize_record_size (record_layout_info rli)
1457
{
1458
  tree unpadded_size, unpadded_size_unit;
1459
 
1460
  /* Now we want just byte and bit offsets, so set the offset alignment
1461
     to be a byte and then normalize.  */
1462
  rli->offset_align = BITS_PER_UNIT;
1463
  normalize_rli (rli);
1464
 
1465
  /* Determine the desired alignment.  */
1466
#ifdef ROUND_TYPE_ALIGN
1467
  TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1468
                                          rli->record_align);
1469
#else
1470
  TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1471
#endif
1472
 
1473
  /* Compute the size so far.  Be sure to allow for extra bits in the
1474
     size in bytes.  We have guaranteed above that it will be no more
1475
     than a single byte.  */
1476
  unpadded_size = rli_size_so_far (rli);
1477
  unpadded_size_unit = rli_size_unit_so_far (rli);
1478
  if (! integer_zerop (rli->bitpos))
1479
    unpadded_size_unit
1480
      = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1481
 
1482
  /* Round the size up to be a multiple of the required alignment.  */
1483
  TYPE_SIZE (rli->t) = round_up_loc (input_location, unpadded_size,
1484
                                 TYPE_ALIGN (rli->t));
1485
  TYPE_SIZE_UNIT (rli->t)
1486
    = round_up_loc (input_location, unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1487
 
1488
  if (TREE_CONSTANT (unpadded_size)
1489
      && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1490
      && input_location != BUILTINS_LOCATION)
1491
    warning (OPT_Wpadded, "padding struct size to alignment boundary");
1492
 
1493
  if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1494
      && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1495
      && TREE_CONSTANT (unpadded_size))
1496
    {
1497
      tree unpacked_size;
1498
 
1499
#ifdef ROUND_TYPE_ALIGN
1500
      rli->unpacked_align
1501
        = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1502
#else
1503
      rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1504
#endif
1505
 
1506
      unpacked_size = round_up_loc (input_location, TYPE_SIZE (rli->t), rli->unpacked_align);
1507
      if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1508
        {
1509
          TYPE_PACKED (rli->t) = 0;
1510
 
1511
          if (TYPE_NAME (rli->t))
1512
            {
1513
              tree name;
1514
 
1515
              if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1516
                name = TYPE_NAME (rli->t);
1517
              else
1518
                name = DECL_NAME (TYPE_NAME (rli->t));
1519
 
1520
              if (STRICT_ALIGNMENT)
1521
                warning (OPT_Wpacked, "packed attribute causes inefficient "
1522
                         "alignment for %qE", name);
1523
              else
1524
                warning (OPT_Wpacked,
1525
                         "packed attribute is unnecessary for %qE", name);
1526
            }
1527
          else
1528
            {
1529
              if (STRICT_ALIGNMENT)
1530
                warning (OPT_Wpacked,
1531
                         "packed attribute causes inefficient alignment");
1532
              else
1533
                warning (OPT_Wpacked, "packed attribute is unnecessary");
1534
            }
1535
        }
1536
    }
1537
}
1538
 
1539
/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
1540
 
1541
void
1542
compute_record_mode (tree type)
1543
{
1544
  tree field;
1545
  enum machine_mode mode = VOIDmode;
1546
 
1547
  /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1548
     However, if possible, we use a mode that fits in a register
1549
     instead, in order to allow for better optimization down the
1550
     line.  */
1551
  SET_TYPE_MODE (type, BLKmode);
1552
 
1553
  if (! host_integerp (TYPE_SIZE (type), 1))
1554
    return;
1555
 
1556
  /* A record which has any BLKmode members must itself be
1557
     BLKmode; it can't go in a register.  Unless the member is
1558
     BLKmode only because it isn't aligned.  */
1559
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1560
    {
1561
      if (TREE_CODE (field) != FIELD_DECL)
1562
        continue;
1563
 
1564
      if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1565
          || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1566
              && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1567
              && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1568
                   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1569
          || ! host_integerp (bit_position (field), 1)
1570
          || DECL_SIZE (field) == 0
1571
          || ! host_integerp (DECL_SIZE (field), 1))
1572
        return;
1573
 
1574
      /* If this field is the whole struct, remember its mode so
1575
         that, say, we can put a double in a class into a DF
1576
         register instead of forcing it to live in the stack.  */
1577
      if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1578
        mode = DECL_MODE (field);
1579
 
1580
#ifdef MEMBER_TYPE_FORCES_BLK
1581
      /* With some targets, eg. c4x, it is sub-optimal
1582
         to access an aligned BLKmode structure as a scalar.  */
1583
 
1584
      if (MEMBER_TYPE_FORCES_BLK (field, mode))
1585
        return;
1586
#endif /* MEMBER_TYPE_FORCES_BLK  */
1587
    }
1588
 
1589
  /* If we only have one real field; use its mode if that mode's size
1590
     matches the type's size.  This only applies to RECORD_TYPE.  This
1591
     does not apply to unions.  */
1592
  if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1593
      && host_integerp (TYPE_SIZE (type), 1)
1594
      && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1595
    SET_TYPE_MODE (type, mode);
1596
  else
1597
    SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1598
 
1599
  /* If structure's known alignment is less than what the scalar
1600
     mode would need, and it matters, then stick with BLKmode.  */
1601
  if (TYPE_MODE (type) != BLKmode
1602
      && STRICT_ALIGNMENT
1603
      && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1604
            || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1605
    {
1606
      /* If this is the only reason this type is BLKmode, then
1607
         don't force containing types to be BLKmode.  */
1608
      TYPE_NO_FORCE_BLK (type) = 1;
1609
      SET_TYPE_MODE (type, BLKmode);
1610
    }
1611
}
1612
 
1613
/* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1614
   out.  */
1615
 
1616
static void
1617
finalize_type_size (tree type)
1618
{
1619
  /* Normally, use the alignment corresponding to the mode chosen.
1620
     However, where strict alignment is not required, avoid
1621
     over-aligning structures, since most compilers do not do this
1622
     alignment.  */
1623
 
1624
  if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1625
      && (STRICT_ALIGNMENT
1626
          || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1627
              && TREE_CODE (type) != QUAL_UNION_TYPE
1628
              && TREE_CODE (type) != ARRAY_TYPE)))
1629
    {
1630
      unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1631
 
1632
      /* Don't override a larger alignment requirement coming from a user
1633
         alignment of one of the fields.  */
1634
      if (mode_align >= TYPE_ALIGN (type))
1635
        {
1636
          TYPE_ALIGN (type) = mode_align;
1637
          TYPE_USER_ALIGN (type) = 0;
1638
        }
1639
    }
1640
 
1641
  /* Do machine-dependent extra alignment.  */
1642
#ifdef ROUND_TYPE_ALIGN
1643
  TYPE_ALIGN (type)
1644
    = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1645
#endif
1646
 
1647
  /* If we failed to find a simple way to calculate the unit size
1648
     of the type, find it by division.  */
1649
  if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1650
    /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
1651
       result will fit in sizetype.  We will get more efficient code using
1652
       sizetype, so we force a conversion.  */
1653
    TYPE_SIZE_UNIT (type)
1654
      = fold_convert (sizetype,
1655
                      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1656
                                  bitsize_unit_node));
1657
 
1658
  if (TYPE_SIZE (type) != 0)
1659
    {
1660
      TYPE_SIZE (type) = round_up_loc (input_location,
1661
                                   TYPE_SIZE (type), TYPE_ALIGN (type));
1662
      TYPE_SIZE_UNIT (type) = round_up_loc (input_location, TYPE_SIZE_UNIT (type),
1663
                                        TYPE_ALIGN_UNIT (type));
1664
    }
1665
 
1666
  /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
1667
  if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1668
    TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1669
  if (TYPE_SIZE_UNIT (type) != 0
1670
      && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1671
    TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1672
 
1673
  /* Also layout any other variants of the type.  */
1674
  if (TYPE_NEXT_VARIANT (type)
1675
      || type != TYPE_MAIN_VARIANT (type))
1676
    {
1677
      tree variant;
1678
      /* Record layout info of this variant.  */
1679
      tree size = TYPE_SIZE (type);
1680
      tree size_unit = TYPE_SIZE_UNIT (type);
1681
      unsigned int align = TYPE_ALIGN (type);
1682
      unsigned int user_align = TYPE_USER_ALIGN (type);
1683
      enum machine_mode mode = TYPE_MODE (type);
1684
 
1685
      /* Copy it into all variants.  */
1686
      for (variant = TYPE_MAIN_VARIANT (type);
1687
           variant != 0;
1688
           variant = TYPE_NEXT_VARIANT (variant))
1689
        {
1690
          TYPE_SIZE (variant) = size;
1691
          TYPE_SIZE_UNIT (variant) = size_unit;
1692
          TYPE_ALIGN (variant) = align;
1693
          TYPE_USER_ALIGN (variant) = user_align;
1694
          SET_TYPE_MODE (variant, mode);
1695
        }
1696
    }
1697
}
1698
 
1699
/* Do all of the work required to layout the type indicated by RLI,
1700
   once the fields have been laid out.  This function will call `free'
1701
   for RLI, unless FREE_P is false.  Passing a value other than false
1702
   for FREE_P is bad practice; this option only exists to support the
1703
   G++ 3.2 ABI.  */
1704
 
1705
void
1706
finish_record_layout (record_layout_info rli, int free_p)
1707
{
1708
  tree variant;
1709
 
1710
  /* Compute the final size.  */
1711
  finalize_record_size (rli);
1712
 
1713
  /* Compute the TYPE_MODE for the record.  */
1714
  compute_record_mode (rli->t);
1715
 
1716
  /* Perform any last tweaks to the TYPE_SIZE, etc.  */
1717
  finalize_type_size (rli->t);
1718
 
1719
  /* Propagate TYPE_PACKED to variants.  With C++ templates,
1720
     handle_packed_attribute is too early to do this.  */
1721
  for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1722
       variant = TYPE_NEXT_VARIANT (variant))
1723
    TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1724
 
1725
  /* Lay out any static members.  This is done now because their type
1726
     may use the record's type.  */
1727
  while (rli->pending_statics)
1728
    {
1729
      layout_decl (TREE_VALUE (rli->pending_statics), 0);
1730
      rli->pending_statics = TREE_CHAIN (rli->pending_statics);
1731
    }
1732
 
1733
  /* Clean up.  */
1734
  if (free_p)
1735
    free (rli);
1736
}
1737
 
1738
 
1739
/* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
1740
   NAME, its fields are chained in reverse on FIELDS.
1741
 
1742
   If ALIGN_TYPE is non-null, it is given the same alignment as
1743
   ALIGN_TYPE.  */
1744
 
1745
void
1746
finish_builtin_struct (tree type, const char *name, tree fields,
1747
                       tree align_type)
1748
{
1749
  tree tail, next;
1750
 
1751
  for (tail = NULL_TREE; fields; tail = fields, fields = next)
1752
    {
1753
      DECL_FIELD_CONTEXT (fields) = type;
1754
      next = TREE_CHAIN (fields);
1755
      TREE_CHAIN (fields) = tail;
1756
    }
1757
  TYPE_FIELDS (type) = tail;
1758
 
1759
  if (align_type)
1760
    {
1761
      TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1762
      TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1763
    }
1764
 
1765
  layout_type (type);
1766
#if 0 /* not yet, should get fixed properly later */
1767
  TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1768
#else
1769
  TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
1770
                                 TYPE_DECL, get_identifier (name), type);
1771
#endif
1772
  TYPE_STUB_DECL (type) = TYPE_NAME (type);
1773
  layout_decl (TYPE_NAME (type), 0);
1774
}
1775
 
1776
/* Calculate the mode, size, and alignment for TYPE.
1777
   For an array type, calculate the element separation as well.
1778
   Record TYPE on the chain of permanent or temporary types
1779
   so that dbxout will find out about it.
1780
 
1781
   TYPE_SIZE of a type is nonzero if the type has been laid out already.
1782
   layout_type does nothing on such a type.
1783
 
1784
   If the type is incomplete, its TYPE_SIZE remains zero.  */
1785
 
1786
void
1787
layout_type (tree type)
1788
{
1789
  gcc_assert (type);
1790
 
1791
  if (type == error_mark_node)
1792
    return;
1793
 
1794
  /* Do nothing if type has been laid out before.  */
1795
  if (TYPE_SIZE (type))
1796
    return;
1797
 
1798
  switch (TREE_CODE (type))
1799
    {
1800
    case LANG_TYPE:
1801
      /* This kind of type is the responsibility
1802
         of the language-specific code.  */
1803
      gcc_unreachable ();
1804
 
1805
    case BOOLEAN_TYPE:  /* Used for Java, Pascal, and Chill.  */
1806
      if (TYPE_PRECISION (type) == 0)
1807
        TYPE_PRECISION (type) = 1; /* default to one byte/boolean.  */
1808
 
1809
      /* ... fall through ...  */
1810
 
1811
    case INTEGER_TYPE:
1812
    case ENUMERAL_TYPE:
1813
      if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1814
          && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1815
        TYPE_UNSIGNED (type) = 1;
1816
 
1817
      SET_TYPE_MODE (type,
1818
                     smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
1819
      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1820
      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1821
      break;
1822
 
1823
    case REAL_TYPE:
1824
      SET_TYPE_MODE (type,
1825
                     mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
1826
      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1827
      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1828
      break;
1829
 
1830
   case FIXED_POINT_TYPE:
1831
     /* TYPE_MODE (type) has been set already.  */
1832
     TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1833
     TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1834
     break;
1835
 
1836
    case COMPLEX_TYPE:
1837
      TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1838
      SET_TYPE_MODE (type,
1839
                     mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1840
                                    (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1841
                                     ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1842
                                     0));
1843
      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1844
      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1845
      break;
1846
 
1847
    case VECTOR_TYPE:
1848
      {
1849
        int nunits = TYPE_VECTOR_SUBPARTS (type);
1850
        tree innertype = TREE_TYPE (type);
1851
 
1852
        gcc_assert (!(nunits & (nunits - 1)));
1853
 
1854
        /* Find an appropriate mode for the vector type.  */
1855
        if (TYPE_MODE (type) == VOIDmode)
1856
          {
1857
            enum machine_mode innermode = TYPE_MODE (innertype);
1858
            enum machine_mode mode;
1859
 
1860
            /* First, look for a supported vector type.  */
1861
            if (SCALAR_FLOAT_MODE_P (innermode))
1862
              mode = MIN_MODE_VECTOR_FLOAT;
1863
            else if (SCALAR_FRACT_MODE_P (innermode))
1864
              mode = MIN_MODE_VECTOR_FRACT;
1865
            else if (SCALAR_UFRACT_MODE_P (innermode))
1866
              mode = MIN_MODE_VECTOR_UFRACT;
1867
            else if (SCALAR_ACCUM_MODE_P (innermode))
1868
              mode = MIN_MODE_VECTOR_ACCUM;
1869
            else if (SCALAR_UACCUM_MODE_P (innermode))
1870
              mode = MIN_MODE_VECTOR_UACCUM;
1871
            else
1872
              mode = MIN_MODE_VECTOR_INT;
1873
 
1874
            /* Do not check vector_mode_supported_p here.  We'll do that
1875
               later in vector_type_mode.  */
1876
            for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
1877
              if (GET_MODE_NUNITS (mode) == nunits
1878
                  && GET_MODE_INNER (mode) == innermode)
1879
                break;
1880
 
1881
            /* For integers, try mapping it to a same-sized scalar mode.  */
1882
            if (mode == VOIDmode
1883
                && GET_MODE_CLASS (innermode) == MODE_INT)
1884
              mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
1885
                                    MODE_INT, 0);
1886
 
1887
            if (mode == VOIDmode ||
1888
                (GET_MODE_CLASS (mode) == MODE_INT
1889
                 && !have_regs_of_mode[mode]))
1890
              SET_TYPE_MODE (type, BLKmode);
1891
            else
1892
              SET_TYPE_MODE (type, mode);
1893
          }
1894
 
1895
        TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
1896
        TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1897
        TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1898
                                                 TYPE_SIZE_UNIT (innertype),
1899
                                                 size_int (nunits), 0);
1900
        TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1901
                                            bitsize_int (nunits), 0);
1902
 
1903
        /* Always naturally align vectors.  This prevents ABI changes
1904
           depending on whether or not native vector modes are supported.  */
1905
        TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1906
        break;
1907
      }
1908
 
1909
    case VOID_TYPE:
1910
      /* This is an incomplete type and so doesn't have a size.  */
1911
      TYPE_ALIGN (type) = 1;
1912
      TYPE_USER_ALIGN (type) = 0;
1913
      SET_TYPE_MODE (type, VOIDmode);
1914
      break;
1915
 
1916
    case OFFSET_TYPE:
1917
      TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1918
      TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1919
      /* A pointer might be MODE_PARTIAL_INT,
1920
         but ptrdiff_t must be integral.  */
1921
      SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
1922
      TYPE_PRECISION (type) = POINTER_SIZE;
1923
      break;
1924
 
1925
    case FUNCTION_TYPE:
1926
    case METHOD_TYPE:
1927
      /* It's hard to see what the mode and size of a function ought to
1928
         be, but we do know the alignment is FUNCTION_BOUNDARY, so
1929
         make it consistent with that.  */
1930
      SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
1931
      TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1932
      TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1933
      break;
1934
 
1935
    case POINTER_TYPE:
1936
    case REFERENCE_TYPE:
1937
      {
1938
        enum machine_mode mode = TYPE_MODE (type);
1939
        if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
1940
          {
1941
            addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
1942
            mode = targetm.addr_space.address_mode (as);
1943
          }
1944
 
1945
        TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
1946
        TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1947
        TYPE_UNSIGNED (type) = 1;
1948
        TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
1949
      }
1950
      break;
1951
 
1952
    case ARRAY_TYPE:
1953
      {
1954
        tree index = TYPE_DOMAIN (type);
1955
        tree element = TREE_TYPE (type);
1956
 
1957
        build_pointer_type (element);
1958
 
1959
        /* We need to know both bounds in order to compute the size.  */
1960
        if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1961
            && TYPE_SIZE (element))
1962
          {
1963
            tree ub = TYPE_MAX_VALUE (index);
1964
            tree lb = TYPE_MIN_VALUE (index);
1965
            tree element_size = TYPE_SIZE (element);
1966
            tree length;
1967
 
1968
            /* Make sure that an array of zero-sized element is zero-sized
1969
               regardless of its extent.  */
1970
            if (integer_zerop (element_size))
1971
              length = size_zero_node;
1972
 
1973
            /* The initial subtraction should happen in the original type so
1974
               that (possible) negative values are handled appropriately.  */
1975
            else
1976
              length
1977
                = size_binop (PLUS_EXPR, size_one_node,
1978
                              fold_convert (sizetype,
1979
                                            fold_build2_loc (input_location,
1980
                                                             MINUS_EXPR,
1981
                                                             TREE_TYPE (lb),
1982
                                                             ub, lb)));
1983
 
1984
            TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1985
                                           fold_convert (bitsizetype,
1986
                                                         length));
1987
 
1988
            /* If we know the size of the element, calculate the total size
1989
               directly, rather than do some division thing below.  This
1990
               optimization helps Fortran assumed-size arrays (where the
1991
               size of the array is determined at runtime) substantially.  */
1992
            if (TYPE_SIZE_UNIT (element))
1993
              TYPE_SIZE_UNIT (type)
1994
                = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
1995
          }
1996
 
1997
        /* Now round the alignment and size,
1998
           using machine-dependent criteria if any.  */
1999
 
2000
#ifdef ROUND_TYPE_ALIGN
2001
        TYPE_ALIGN (type)
2002
          = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2003
#else
2004
        TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2005
#endif
2006
        if (!TYPE_SIZE (element))
2007
          /* We don't know the size of the underlying element type, so
2008
             our alignment calculations will be wrong, forcing us to
2009
             fall back on structural equality. */
2010
          SET_TYPE_STRUCTURAL_EQUALITY (type);
2011
        TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2012
        SET_TYPE_MODE (type, BLKmode);
2013
        if (TYPE_SIZE (type) != 0
2014
#ifdef MEMBER_TYPE_FORCES_BLK
2015
            && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
2016
#endif
2017
            /* BLKmode elements force BLKmode aggregate;
2018
               else extract/store fields may lose.  */
2019
            && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2020
                || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2021
          {
2022
            /* One-element arrays get the component type's mode.  */
2023
            if (simple_cst_equal (TYPE_SIZE (type),
2024
                                  TYPE_SIZE (TREE_TYPE (type))))
2025
              SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
2026
            else
2027
              SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
2028
                                                       MODE_INT, 1));
2029
 
2030
            if (TYPE_MODE (type) != BLKmode
2031
                && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2032
                && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2033
              {
2034
                TYPE_NO_FORCE_BLK (type) = 1;
2035
                SET_TYPE_MODE (type, BLKmode);
2036
              }
2037
          }
2038
        /* When the element size is constant, check that it is at least as
2039
           large as the element alignment.  */
2040
        if (TYPE_SIZE_UNIT (element)
2041
            && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2042
            /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2043
               TYPE_ALIGN_UNIT.  */
2044
            && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2045
            && !integer_zerop (TYPE_SIZE_UNIT (element))
2046
            && compare_tree_int (TYPE_SIZE_UNIT (element),
2047
                                 TYPE_ALIGN_UNIT (element)) < 0)
2048
          error ("alignment of array elements is greater than element size");
2049
        break;
2050
      }
2051
 
2052
    case RECORD_TYPE:
2053
    case UNION_TYPE:
2054
    case QUAL_UNION_TYPE:
2055
      {
2056
        tree field;
2057
        record_layout_info rli;
2058
 
2059
        /* Initialize the layout information.  */
2060
        rli = start_record_layout (type);
2061
 
2062
        /* If this is a QUAL_UNION_TYPE, we want to process the fields
2063
           in the reverse order in building the COND_EXPR that denotes
2064
           its size.  We reverse them again later.  */
2065
        if (TREE_CODE (type) == QUAL_UNION_TYPE)
2066
          TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2067
 
2068
        /* Place all the fields.  */
2069
        for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2070
          place_field (rli, field);
2071
 
2072
        if (TREE_CODE (type) == QUAL_UNION_TYPE)
2073
          TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2074
 
2075
        /* Finish laying out the record.  */
2076
        finish_record_layout (rli, /*free_p=*/true);
2077
      }
2078
      break;
2079
 
2080
    default:
2081
      gcc_unreachable ();
2082
    }
2083
 
2084
  /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
2085
     records and unions, finish_record_layout already called this
2086
     function.  */
2087
  if (TREE_CODE (type) != RECORD_TYPE
2088
      && TREE_CODE (type) != UNION_TYPE
2089
      && TREE_CODE (type) != QUAL_UNION_TYPE)
2090
    finalize_type_size (type);
2091
 
2092
  /* We should never see alias sets on incomplete aggregates.  And we
2093
     should not call layout_type on not incomplete aggregates.  */
2094
  if (AGGREGATE_TYPE_P (type))
2095
    gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2096
}
2097
 
2098
/* Vector types need to re-check the target flags each time we report
2099
   the machine mode.  We need to do this because attribute target can
2100
   change the result of vector_mode_supported_p and have_regs_of_mode
2101
   on a per-function basis.  Thus the TYPE_MODE of a VECTOR_TYPE can
2102
   change on a per-function basis.  */
2103
/* ??? Possibly a better solution is to run through all the types
2104
   referenced by a function and re-compute the TYPE_MODE once, rather
2105
   than make the TYPE_MODE macro call a function.  */
2106
 
2107
enum machine_mode
2108
vector_type_mode (const_tree t)
2109
{
2110
  enum machine_mode mode;
2111
 
2112
  gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2113
 
2114
  mode = t->type.mode;
2115
  if (VECTOR_MODE_P (mode)
2116
      && (!targetm.vector_mode_supported_p (mode)
2117
          || !have_regs_of_mode[mode]))
2118
    {
2119
      enum machine_mode innermode = TREE_TYPE (t)->type.mode;
2120
 
2121
      /* For integers, try mapping it to a same-sized scalar mode.  */
2122
      if (GET_MODE_CLASS (innermode) == MODE_INT)
2123
        {
2124
          mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2125
                                * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2126
 
2127
          if (mode != VOIDmode && have_regs_of_mode[mode])
2128
            return mode;
2129
        }
2130
 
2131
      return BLKmode;
2132
    }
2133
 
2134
  return mode;
2135
}
2136
 
2137
/* Create and return a type for signed integers of PRECISION bits.  */
2138
 
2139
tree
2140
make_signed_type (int precision)
2141
{
2142
  tree type = make_node (INTEGER_TYPE);
2143
 
2144
  TYPE_PRECISION (type) = precision;
2145
 
2146
  fixup_signed_type (type);
2147
  return type;
2148
}
2149
 
2150
/* Create and return a type for unsigned integers of PRECISION bits.  */
2151
 
2152
tree
2153
make_unsigned_type (int precision)
2154
{
2155
  tree type = make_node (INTEGER_TYPE);
2156
 
2157
  TYPE_PRECISION (type) = precision;
2158
 
2159
  fixup_unsigned_type (type);
2160
  return type;
2161
}
2162
 
2163
/* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2164
   and SATP.  */
2165
 
2166
tree
2167
make_fract_type (int precision, int unsignedp, int satp)
2168
{
2169
  tree type = make_node (FIXED_POINT_TYPE);
2170
 
2171
  TYPE_PRECISION (type) = precision;
2172
 
2173
  if (satp)
2174
    TYPE_SATURATING (type) = 1;
2175
 
2176
  /* Lay out the type: set its alignment, size, etc.  */
2177
  if (unsignedp)
2178
    {
2179
      TYPE_UNSIGNED (type) = 1;
2180
      SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2181
    }
2182
  else
2183
    SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2184
  layout_type (type);
2185
 
2186
  return type;
2187
}
2188
 
2189
/* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2190
   and SATP.  */
2191
 
2192
tree
2193
make_accum_type (int precision, int unsignedp, int satp)
2194
{
2195
  tree type = make_node (FIXED_POINT_TYPE);
2196
 
2197
  TYPE_PRECISION (type) = precision;
2198
 
2199
  if (satp)
2200
    TYPE_SATURATING (type) = 1;
2201
 
2202
  /* Lay out the type: set its alignment, size, etc.  */
2203
  if (unsignedp)
2204
    {
2205
      TYPE_UNSIGNED (type) = 1;
2206
      SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2207
    }
2208
  else
2209
    SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2210
  layout_type (type);
2211
 
2212
  return type;
2213
}
2214
 
2215
/* Initialize sizetype and bitsizetype to a reasonable and temporary
2216
   value to enable integer types to be created.  */
2217
 
2218
void
2219
initialize_sizetypes (bool signed_p)
2220
{
2221
  tree t = make_node (INTEGER_TYPE);
2222
  int precision = GET_MODE_BITSIZE (SImode);
2223
 
2224
  SET_TYPE_MODE (t, SImode);
2225
  TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
2226
  TYPE_USER_ALIGN (t) = 0;
2227
  TYPE_IS_SIZETYPE (t) = 1;
2228
  TYPE_UNSIGNED (t) = !signed_p;
2229
  TYPE_SIZE (t) = build_int_cst (t, precision);
2230
  TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
2231
  TYPE_PRECISION (t) = precision;
2232
 
2233
  /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE.  */
2234
  set_min_and_max_values_for_integral_type (t, precision, !signed_p);
2235
 
2236
  sizetype = t;
2237
  bitsizetype = build_distinct_type_copy (t);
2238
}
2239
 
2240
/* Make sizetype a version of TYPE, and initialize *sizetype
2241
   accordingly.  We do this by overwriting the stub sizetype and
2242
   bitsizetype nodes created by initialize_sizetypes.  This makes sure
2243
   that (a) anything stubby about them no longer exists, (b) any
2244
   INTEGER_CSTs created with such a type, remain valid.  */
2245
 
2246
void
2247
set_sizetype (tree type)
2248
{
2249
  tree t;
2250
  int oprecision = TYPE_PRECISION (type);
2251
  /* The *bitsizetype types use a precision that avoids overflows when
2252
     calculating signed sizes / offsets in bits.  However, when
2253
     cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
2254
     precision.  */
2255
  int precision
2256
    = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2257
  precision
2258
    = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
2259
  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2260
    precision = HOST_BITS_PER_WIDE_INT * 2;
2261
 
2262
  gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
2263
 
2264
  t = build_distinct_type_copy (type);
2265
  /* We do want to use sizetype's cache, as we will be replacing that
2266
     type.  */
2267
  TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
2268
  TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
2269
  TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
2270
  TYPE_UID (t) = TYPE_UID (sizetype);
2271
  TYPE_IS_SIZETYPE (t) = 1;
2272
 
2273
  /* Replace our original stub sizetype.  */
2274
  memcpy (sizetype, t, tree_size (sizetype));
2275
  TYPE_MAIN_VARIANT (sizetype) = sizetype;
2276
  TYPE_CANONICAL (sizetype) = sizetype;
2277
 
2278
  t = make_node (INTEGER_TYPE);
2279
  TYPE_NAME (t) = get_identifier ("bit_size_type");
2280
  /* We do want to use bitsizetype's cache, as we will be replacing that
2281
     type.  */
2282
  TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2283
  TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2284
  TYPE_PRECISION (t) = precision;
2285
  TYPE_UID (t) = TYPE_UID (bitsizetype);
2286
  TYPE_IS_SIZETYPE (t) = 1;
2287
 
2288
  /* Replace our original stub bitsizetype.  */
2289
  memcpy (bitsizetype, t, tree_size (bitsizetype));
2290
  TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2291
  TYPE_CANONICAL (bitsizetype) = bitsizetype;
2292
 
2293
  if (TYPE_UNSIGNED (type))
2294
    {
2295
      fixup_unsigned_type (bitsizetype);
2296
      ssizetype = make_signed_type (oprecision);
2297
      TYPE_IS_SIZETYPE (ssizetype) = 1;
2298
      sbitsizetype = make_signed_type (precision);
2299
      TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2300
    }
2301
  else
2302
    {
2303
      fixup_signed_type (bitsizetype);
2304
      ssizetype = sizetype;
2305
      sbitsizetype = bitsizetype;
2306
    }
2307
 
2308
  /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
2309
     it is sign extended in a way consistent with force_fit_type.  */
2310
  if (TYPE_UNSIGNED (type))
2311
    {
2312
      tree orig_max, new_max;
2313
 
2314
      orig_max = TYPE_MAX_VALUE (sizetype);
2315
 
2316
      /* Build a new node with the same values, but a different type.
2317
         Sign extend it to ensure consistency.  */
2318
      new_max = build_int_cst_wide_type (sizetype,
2319
                                         TREE_INT_CST_LOW (orig_max),
2320
                                         TREE_INT_CST_HIGH (orig_max));
2321
      TYPE_MAX_VALUE (sizetype) = new_max;
2322
    }
2323
}
2324
 
2325
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2326
   or BOOLEAN_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2327
   for TYPE, based on the PRECISION and whether or not the TYPE
2328
   IS_UNSIGNED.  PRECISION need not correspond to a width supported
2329
   natively by the hardware; for example, on a machine with 8-bit,
2330
   16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2331
   61.  */
2332
 
2333
void
2334
set_min_and_max_values_for_integral_type (tree type,
2335
                                          int precision,
2336
                                          bool is_unsigned)
2337
{
2338
  tree min_value;
2339
  tree max_value;
2340
 
2341
  if (is_unsigned)
2342
    {
2343
      min_value = build_int_cst (type, 0);
2344
      max_value
2345
        = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2346
                              ? -1
2347
                              : ((HOST_WIDE_INT) 1 << precision) - 1,
2348
                              precision - HOST_BITS_PER_WIDE_INT > 0
2349
                              ? ((unsigned HOST_WIDE_INT) ~0
2350
                                 >> (HOST_BITS_PER_WIDE_INT
2351
                                     - (precision - HOST_BITS_PER_WIDE_INT)))
2352
                              : 0);
2353
    }
2354
  else
2355
    {
2356
      min_value
2357
        = build_int_cst_wide (type,
2358
                              (precision - HOST_BITS_PER_WIDE_INT > 0
2359
                               ? 0
2360
                               : (HOST_WIDE_INT) (-1) << (precision - 1)),
2361
                              (((HOST_WIDE_INT) (-1)
2362
                                << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2363
                                    ? precision - HOST_BITS_PER_WIDE_INT - 1
2364
                                    : 0))));
2365
      max_value
2366
        = build_int_cst_wide (type,
2367
                              (precision - HOST_BITS_PER_WIDE_INT > 0
2368
                               ? -1
2369
                               : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2370
                              (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2371
                               ? (((HOST_WIDE_INT) 1
2372
                                   << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2373
                               : 0));
2374
    }
2375
 
2376
  TYPE_MIN_VALUE (type) = min_value;
2377
  TYPE_MAX_VALUE (type) = max_value;
2378
}
2379
 
2380
/* Set the extreme values of TYPE based on its precision in bits,
2381
   then lay it out.  Used when make_signed_type won't do
2382
   because the tree code is not INTEGER_TYPE.
2383
   E.g. for Pascal, when the -fsigned-char option is given.  */
2384
 
2385
void
2386
fixup_signed_type (tree type)
2387
{
2388
  int precision = TYPE_PRECISION (type);
2389
 
2390
  /* We can not represent properly constants greater then
2391
     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2392
     as they are used by i386 vector extensions and friends.  */
2393
  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2394
    precision = HOST_BITS_PER_WIDE_INT * 2;
2395
 
2396
  set_min_and_max_values_for_integral_type (type, precision,
2397
                                            /*is_unsigned=*/false);
2398
 
2399
  /* Lay out the type: set its alignment, size, etc.  */
2400
  layout_type (type);
2401
}
2402
 
2403
/* Set the extreme values of TYPE based on its precision in bits,
2404
   then lay it out.  This is used both in `make_unsigned_type'
2405
   and for enumeral types.  */
2406
 
2407
void
2408
fixup_unsigned_type (tree type)
2409
{
2410
  int precision = TYPE_PRECISION (type);
2411
 
2412
  /* We can not represent properly constants greater then
2413
     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2414
     as they are used by i386 vector extensions and friends.  */
2415
  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2416
    precision = HOST_BITS_PER_WIDE_INT * 2;
2417
 
2418
  TYPE_UNSIGNED (type) = 1;
2419
 
2420
  set_min_and_max_values_for_integral_type (type, precision,
2421
                                            /*is_unsigned=*/true);
2422
 
2423
  /* Lay out the type: set its alignment, size, etc.  */
2424
  layout_type (type);
2425
}
2426
 
2427
/* Find the best machine mode to use when referencing a bit field of length
2428
   BITSIZE bits starting at BITPOS.
2429
 
2430
   The underlying object is known to be aligned to a boundary of ALIGN bits.
2431
   If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2432
   larger than LARGEST_MODE (usually SImode).
2433
 
2434
   If no mode meets all these conditions, we return VOIDmode.
2435
 
2436
   If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2437
   smallest mode meeting these conditions.
2438
 
2439
   If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2440
   largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2441
   all the conditions.
2442
 
2443
   If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2444
   decide which of the above modes should be used.  */
2445
 
2446
enum machine_mode
2447
get_best_mode (int bitsize, int bitpos, unsigned int align,
2448
               enum machine_mode largest_mode, int volatilep)
2449
{
2450
  enum machine_mode mode;
2451
  unsigned int unit = 0;
2452
 
2453
  /* Find the narrowest integer mode that contains the bit field.  */
2454
  for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2455
       mode = GET_MODE_WIDER_MODE (mode))
2456
    {
2457
      unit = GET_MODE_BITSIZE (mode);
2458
      if ((bitpos % unit) + bitsize <= unit)
2459
        break;
2460
    }
2461
 
2462
  if (mode == VOIDmode
2463
      /* It is tempting to omit the following line
2464
         if STRICT_ALIGNMENT is true.
2465
         But that is incorrect, since if the bitfield uses part of 3 bytes
2466
         and we use a 4-byte mode, we could get a spurious segv
2467
         if the extra 4th byte is past the end of memory.
2468
         (Though at least one Unix compiler ignores this problem:
2469
         that on the Sequent 386 machine.  */
2470
      || MIN (unit, BIGGEST_ALIGNMENT) > align
2471
      || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2472
    return VOIDmode;
2473
 
2474
  if ((SLOW_BYTE_ACCESS && ! volatilep)
2475
      || (volatilep && !targetm.narrow_volatile_bitfield ()))
2476
    {
2477
      enum machine_mode wide_mode = VOIDmode, tmode;
2478
 
2479
      for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2480
           tmode = GET_MODE_WIDER_MODE (tmode))
2481
        {
2482
          unit = GET_MODE_BITSIZE (tmode);
2483
          if (bitpos / unit == (bitpos + bitsize - 1) / unit
2484
              && unit <= BITS_PER_WORD
2485
              && unit <= MIN (align, BIGGEST_ALIGNMENT)
2486
              && (largest_mode == VOIDmode
2487
                  || unit <= GET_MODE_BITSIZE (largest_mode)))
2488
            wide_mode = tmode;
2489
        }
2490
 
2491
      if (wide_mode != VOIDmode)
2492
        return wide_mode;
2493
    }
2494
 
2495
  return mode;
2496
}
2497
 
2498
/* Gets minimal and maximal values for MODE (signed or unsigned depending on
2499
   SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
2500
 
2501
void
2502
get_mode_bounds (enum machine_mode mode, int sign,
2503
                 enum machine_mode target_mode,
2504
                 rtx *mmin, rtx *mmax)
2505
{
2506
  unsigned size = GET_MODE_BITSIZE (mode);
2507
  unsigned HOST_WIDE_INT min_val, max_val;
2508
 
2509
  gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2510
 
2511
  if (sign)
2512
    {
2513
      min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2514
      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2515
    }
2516
  else
2517
    {
2518
      min_val = 0;
2519
      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2520
    }
2521
 
2522
  *mmin = gen_int_mode (min_val, target_mode);
2523
  *mmax = gen_int_mode (max_val, target_mode);
2524
}
2525
 
2526
#include "gt-stor-layout.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.