OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [stor-layout.c] - Blame information for rev 749

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* C-compiler utilities for types and variables storage layout
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4
   2011 Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "tree.h"
28
#include "rtl.h"
29
#include "tm_p.h"
30
#include "flags.h"
31
#include "function.h"
32
#include "expr.h"
33
#include "output.h"
34
#include "diagnostic-core.h"
35
#include "ggc.h"
36
#include "target.h"
37
#include "langhooks.h"
38
#include "regs.h"
39
#include "params.h"
40
#include "cgraph.h"
41
#include "tree-inline.h"
42
#include "tree-dump.h"
43
#include "gimple.h"
44
 
45
/* Data type for the expressions representing sizes of data types.
46
   It is the first integer type laid out.  */
47
tree sizetype_tab[(int) TYPE_KIND_LAST];
48
 
49
/* If nonzero, this is an upper limit on alignment of structure fields.
50
   The value is measured in bits.  */
51
unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
52
 
53
/* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
54
   in the address spaces' address_mode, not pointer_mode.   Set only by
55
   internal_reference_types called only by a front end.  */
56
static int reference_types_internal = 0;
57
 
58
static tree self_referential_size (tree);
59
static void finalize_record_size (record_layout_info);
60
static void finalize_type_size (tree);
61
static void place_union_field (record_layout_info, tree);
62
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
63
static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
64
                             HOST_WIDE_INT, tree);
65
#endif
66
extern void debug_rli (record_layout_info);
67
 
68
/* Show that REFERENCE_TYPES are internal and should use address_mode.
69
   Called only by front end.  */
70
 
71
void
72
internal_reference_types (void)
73
{
74
  reference_types_internal = 1;
75
}
76
 
77
/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
78
   to serve as the actual size-expression for a type or decl.  */
79
 
80
tree
81
variable_size (tree size)
82
{
83
  /* Obviously.  */
84
  if (TREE_CONSTANT (size))
85
    return size;
86
 
87
  /* If the size is self-referential, we can't make a SAVE_EXPR (see
88
     save_expr for the rationale).  But we can do something else.  */
89
  if (CONTAINS_PLACEHOLDER_P (size))
90
    return self_referential_size (size);
91
 
92
  /* If we are in the global binding level, we can't make a SAVE_EXPR
93
     since it may end up being shared across functions, so it is up
94
     to the front-end to deal with this case.  */
95
  if (lang_hooks.decls.global_bindings_p ())
96
    return size;
97
 
98
  return save_expr (size);
99
}
100
 
101
/* An array of functions used for self-referential size computation.  */
102
static GTY(()) VEC (tree, gc) *size_functions;
103
 
104
/* Look inside EXPR into simple arithmetic operations involving constants.
105
   Return the outermost non-arithmetic or non-constant node.  */
106
 
107
static tree
108
skip_simple_constant_arithmetic (tree expr)
109
{
110
  while (true)
111
    {
112
      if (UNARY_CLASS_P (expr))
113
        expr = TREE_OPERAND (expr, 0);
114
      else if (BINARY_CLASS_P (expr))
115
        {
116
          if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
117
            expr = TREE_OPERAND (expr, 0);
118
          else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
119
            expr = TREE_OPERAND (expr, 1);
120
          else
121
            break;
122
        }
123
      else
124
        break;
125
    }
126
 
127
  return expr;
128
}
129
 
130
/* Similar to copy_tree_r but do not copy component references involving
131
   PLACEHOLDER_EXPRs.  These nodes are spotted in find_placeholder_in_expr
132
   and substituted in substitute_in_expr.  */
133
 
134
static tree
135
copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
136
{
137
  enum tree_code code = TREE_CODE (*tp);
138
 
139
  /* Stop at types, decls, constants like copy_tree_r.  */
140
  if (TREE_CODE_CLASS (code) == tcc_type
141
      || TREE_CODE_CLASS (code) == tcc_declaration
142
      || TREE_CODE_CLASS (code) == tcc_constant)
143
    {
144
      *walk_subtrees = 0;
145
      return NULL_TREE;
146
    }
147
 
148
  /* This is the pattern built in ada/make_aligning_type.  */
149
  else if (code == ADDR_EXPR
150
           && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
151
    {
152
      *walk_subtrees = 0;
153
      return NULL_TREE;
154
    }
155
 
156
  /* Default case: the component reference.  */
157
  else if (code == COMPONENT_REF)
158
    {
159
      tree inner;
160
      for (inner = TREE_OPERAND (*tp, 0);
161
           REFERENCE_CLASS_P (inner);
162
           inner = TREE_OPERAND (inner, 0))
163
        ;
164
 
165
      if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
166
        {
167
          *walk_subtrees = 0;
168
          return NULL_TREE;
169
        }
170
    }
171
 
172
  /* We're not supposed to have them in self-referential size trees
173
     because we wouldn't properly control when they are evaluated.
174
     However, not creating superfluous SAVE_EXPRs requires accurate
175
     tracking of readonly-ness all the way down to here, which we
176
     cannot always guarantee in practice.  So punt in this case.  */
177
  else if (code == SAVE_EXPR)
178
    return error_mark_node;
179
 
180
  else if (code == STATEMENT_LIST)
181
    gcc_unreachable ();
182
 
183
  return copy_tree_r (tp, walk_subtrees, data);
184
}
185
 
186
/* Given a SIZE expression that is self-referential, return an equivalent
187
   expression to serve as the actual size expression for a type.  */
188
 
189
static tree
190
self_referential_size (tree size)
191
{
192
  static unsigned HOST_WIDE_INT fnno = 0;
193
  VEC (tree, heap) *self_refs = NULL;
194
  tree param_type_list = NULL, param_decl_list = NULL;
195
  tree t, ref, return_type, fntype, fnname, fndecl;
196
  unsigned int i;
197
  char buf[128];
198
  VEC(tree,gc) *args = NULL;
199
 
200
  /* Do not factor out simple operations.  */
201
  t = skip_simple_constant_arithmetic (size);
202
  if (TREE_CODE (t) == CALL_EXPR)
203
    return size;
204
 
205
  /* Collect the list of self-references in the expression.  */
206
  find_placeholder_in_expr (size, &self_refs);
207
  gcc_assert (VEC_length (tree, self_refs) > 0);
208
 
209
  /* Obtain a private copy of the expression.  */
210
  t = size;
211
  if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
212
    return size;
213
  size = t;
214
 
215
  /* Build the parameter and argument lists in parallel; also
216
     substitute the former for the latter in the expression.  */
217
  args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
218
  FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
219
    {
220
      tree subst, param_name, param_type, param_decl;
221
 
222
      if (DECL_P (ref))
223
        {
224
          /* We shouldn't have true variables here.  */
225
          gcc_assert (TREE_READONLY (ref));
226
          subst = ref;
227
        }
228
      /* This is the pattern built in ada/make_aligning_type.  */
229
      else if (TREE_CODE (ref) == ADDR_EXPR)
230
        subst = ref;
231
      /* Default case: the component reference.  */
232
      else
233
        subst = TREE_OPERAND (ref, 1);
234
 
235
      sprintf (buf, "p%d", i);
236
      param_name = get_identifier (buf);
237
      param_type = TREE_TYPE (ref);
238
      param_decl
239
        = build_decl (input_location, PARM_DECL, param_name, param_type);
240
      if (targetm.calls.promote_prototypes (NULL_TREE)
241
          && INTEGRAL_TYPE_P (param_type)
242
          && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
243
        DECL_ARG_TYPE (param_decl) = integer_type_node;
244
      else
245
        DECL_ARG_TYPE (param_decl) = param_type;
246
      DECL_ARTIFICIAL (param_decl) = 1;
247
      TREE_READONLY (param_decl) = 1;
248
 
249
      size = substitute_in_expr (size, subst, param_decl);
250
 
251
      param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
252
      param_decl_list = chainon (param_decl, param_decl_list);
253
      VEC_quick_push (tree, args, ref);
254
    }
255
 
256
  VEC_free (tree, heap, self_refs);
257
 
258
  /* Append 'void' to indicate that the number of parameters is fixed.  */
259
  param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
260
 
261
  /* The 3 lists have been created in reverse order.  */
262
  param_type_list = nreverse (param_type_list);
263
  param_decl_list = nreverse (param_decl_list);
264
 
265
  /* Build the function type.  */
266
  return_type = TREE_TYPE (size);
267
  fntype = build_function_type (return_type, param_type_list);
268
 
269
  /* Build the function declaration.  */
270
  sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
271
  fnname = get_file_function_name (buf);
272
  fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
273
  for (t = param_decl_list; t; t = DECL_CHAIN (t))
274
    DECL_CONTEXT (t) = fndecl;
275
  DECL_ARGUMENTS (fndecl) = param_decl_list;
276
  DECL_RESULT (fndecl)
277
    = build_decl (input_location, RESULT_DECL, 0, return_type);
278
  DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
279
 
280
  /* The function has been created by the compiler and we don't
281
     want to emit debug info for it.  */
282
  DECL_ARTIFICIAL (fndecl) = 1;
283
  DECL_IGNORED_P (fndecl) = 1;
284
 
285
  /* It is supposed to be "const" and never throw.  */
286
  TREE_READONLY (fndecl) = 1;
287
  TREE_NOTHROW (fndecl) = 1;
288
 
289
  /* We want it to be inlined when this is deemed profitable, as
290
     well as discarded if every call has been integrated.  */
291
  DECL_DECLARED_INLINE_P (fndecl) = 1;
292
 
293
  /* It is made up of a unique return statement.  */
294
  DECL_INITIAL (fndecl) = make_node (BLOCK);
295
  BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
296
  t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
297
  DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
298
  TREE_STATIC (fndecl) = 1;
299
 
300
  /* Put it onto the list of size functions.  */
301
  VEC_safe_push (tree, gc, size_functions, fndecl);
302
 
303
  /* Replace the original expression with a call to the size function.  */
304
  return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
305
}
306
 
307
/* Take, queue and compile all the size functions.  It is essential that
308
   the size functions be gimplified at the very end of the compilation
309
   in order to guarantee transparent handling of self-referential sizes.
310
   Otherwise the GENERIC inliner would not be able to inline them back
311
   at each of their call sites, thus creating artificial non-constant
312
   size expressions which would trigger nasty problems later on.  */
313
 
314
void
315
finalize_size_functions (void)
316
{
317
  unsigned int i;
318
  tree fndecl;
319
 
320
  for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
321
    {
322
      dump_function (TDI_original, fndecl);
323
      gimplify_function_tree (fndecl);
324
      dump_function (TDI_generic, fndecl);
325
      cgraph_finalize_function (fndecl, false);
326
    }
327
 
328
  VEC_free (tree, gc, size_functions);
329
}
330
 
331
/* Return the machine mode to use for a nonscalar of SIZE bits.  The
332
   mode must be in class MCLASS, and have exactly that many value bits;
333
   it may have padding as well.  If LIMIT is nonzero, modes of wider
334
   than MAX_FIXED_MODE_SIZE will not be used.  */
335
 
336
enum machine_mode
337
mode_for_size (unsigned int size, enum mode_class mclass, int limit)
338
{
339
  enum machine_mode mode;
340
 
341
  if (limit && size > MAX_FIXED_MODE_SIZE)
342
    return BLKmode;
343
 
344
  /* Get the first mode which has this size, in the specified class.  */
345
  for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
346
       mode = GET_MODE_WIDER_MODE (mode))
347
    if (GET_MODE_PRECISION (mode) == size)
348
      return mode;
349
 
350
  return BLKmode;
351
}
352
 
353
/* Similar, except passed a tree node.  */
354
 
355
enum machine_mode
356
mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
357
{
358
  unsigned HOST_WIDE_INT uhwi;
359
  unsigned int ui;
360
 
361
  if (!host_integerp (size, 1))
362
    return BLKmode;
363
  uhwi = tree_low_cst (size, 1);
364
  ui = uhwi;
365
  if (uhwi != ui)
366
    return BLKmode;
367
  return mode_for_size (ui, mclass, limit);
368
}
369
 
370
/* Similar, but never return BLKmode; return the narrowest mode that
371
   contains at least the requested number of value bits.  */
372
 
373
enum machine_mode
374
smallest_mode_for_size (unsigned int size, enum mode_class mclass)
375
{
376
  enum machine_mode mode;
377
 
378
  /* Get the first mode which has at least this size, in the
379
     specified class.  */
380
  for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
381
       mode = GET_MODE_WIDER_MODE (mode))
382
    if (GET_MODE_PRECISION (mode) >= size)
383
      return mode;
384
 
385
  gcc_unreachable ();
386
}
387
 
388
/* Find an integer mode of the exact same size, or BLKmode on failure.  */
389
 
390
enum machine_mode
391
int_mode_for_mode (enum machine_mode mode)
392
{
393
  switch (GET_MODE_CLASS (mode))
394
    {
395
    case MODE_INT:
396
    case MODE_PARTIAL_INT:
397
      break;
398
 
399
    case MODE_COMPLEX_INT:
400
    case MODE_COMPLEX_FLOAT:
401
    case MODE_FLOAT:
402
    case MODE_DECIMAL_FLOAT:
403
    case MODE_VECTOR_INT:
404
    case MODE_VECTOR_FLOAT:
405
    case MODE_FRACT:
406
    case MODE_ACCUM:
407
    case MODE_UFRACT:
408
    case MODE_UACCUM:
409
    case MODE_VECTOR_FRACT:
410
    case MODE_VECTOR_ACCUM:
411
    case MODE_VECTOR_UFRACT:
412
    case MODE_VECTOR_UACCUM:
413
      mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
414
      break;
415
 
416
    case MODE_RANDOM:
417
      if (mode == BLKmode)
418
        break;
419
 
420
      /* ... fall through ...  */
421
 
422
    case MODE_CC:
423
    default:
424
      gcc_unreachable ();
425
    }
426
 
427
  return mode;
428
}
429
 
430
/* Find a mode that is suitable for representing a vector with
431
   NUNITS elements of mode INNERMODE.  Returns BLKmode if there
432
   is no suitable mode.  */
433
 
434
enum machine_mode
435
mode_for_vector (enum machine_mode innermode, unsigned nunits)
436
{
437
  enum machine_mode mode;
438
 
439
  /* First, look for a supported vector type.  */
440
  if (SCALAR_FLOAT_MODE_P (innermode))
441
    mode = MIN_MODE_VECTOR_FLOAT;
442
  else if (SCALAR_FRACT_MODE_P (innermode))
443
    mode = MIN_MODE_VECTOR_FRACT;
444
  else if (SCALAR_UFRACT_MODE_P (innermode))
445
    mode = MIN_MODE_VECTOR_UFRACT;
446
  else if (SCALAR_ACCUM_MODE_P (innermode))
447
    mode = MIN_MODE_VECTOR_ACCUM;
448
  else if (SCALAR_UACCUM_MODE_P (innermode))
449
    mode = MIN_MODE_VECTOR_UACCUM;
450
  else
451
    mode = MIN_MODE_VECTOR_INT;
452
 
453
  /* Do not check vector_mode_supported_p here.  We'll do that
454
     later in vector_type_mode.  */
455
  for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
456
    if (GET_MODE_NUNITS (mode) == nunits
457
        && GET_MODE_INNER (mode) == innermode)
458
      break;
459
 
460
  /* For integers, try mapping it to a same-sized scalar mode.  */
461
  if (mode == VOIDmode
462
      && GET_MODE_CLASS (innermode) == MODE_INT)
463
    mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
464
                          MODE_INT, 0);
465
 
466
  if (mode == VOIDmode
467
      || (GET_MODE_CLASS (mode) == MODE_INT
468
          && !have_regs_of_mode[mode]))
469
    return BLKmode;
470
 
471
  return mode;
472
}
473
 
474
/* Return the alignment of MODE. This will be bounded by 1 and
475
   BIGGEST_ALIGNMENT.  */
476
 
477
unsigned int
478
get_mode_alignment (enum machine_mode mode)
479
{
480
  return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
481
}
482
 
483
/* Return the natural mode of an array, given that it is SIZE bytes in
484
   total and has elements of type ELEM_TYPE.  */
485
 
486
static enum machine_mode
487
mode_for_array (tree elem_type, tree size)
488
{
489
  tree elem_size;
490
  unsigned HOST_WIDE_INT int_size, int_elem_size;
491
  bool limit_p;
492
 
493
  /* One-element arrays get the component type's mode.  */
494
  elem_size = TYPE_SIZE (elem_type);
495
  if (simple_cst_equal (size, elem_size))
496
    return TYPE_MODE (elem_type);
497
 
498
  limit_p = true;
499
  if (host_integerp (size, 1) && host_integerp (elem_size, 1))
500
    {
501
      int_size = tree_low_cst (size, 1);
502
      int_elem_size = tree_low_cst (elem_size, 1);
503
      if (int_elem_size > 0
504
          && int_size % int_elem_size == 0
505
          && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
506
                                             int_size / int_elem_size))
507
        limit_p = false;
508
    }
509
  return mode_for_size_tree (size, MODE_INT, limit_p);
510
}
511
 
512
/* Subroutine of layout_decl: Force alignment required for the data type.
513
   But if the decl itself wants greater alignment, don't override that.  */
514
 
515
static inline void
516
do_type_align (tree type, tree decl)
517
{
518
  if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
519
    {
520
      DECL_ALIGN (decl) = TYPE_ALIGN (type);
521
      if (TREE_CODE (decl) == FIELD_DECL)
522
        DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
523
    }
524
}
525
 
526
/* Set the size, mode and alignment of a ..._DECL node.
527
   TYPE_DECL does need this for C++.
528
   Note that LABEL_DECL and CONST_DECL nodes do not need this,
529
   and FUNCTION_DECL nodes have them set up in a special (and simple) way.
530
   Don't call layout_decl for them.
531
 
532
   KNOWN_ALIGN is the amount of alignment we can assume this
533
   decl has with no special effort.  It is relevant only for FIELD_DECLs
534
   and depends on the previous fields.
535
   All that matters about KNOWN_ALIGN is which powers of 2 divide it.
536
   If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
537
   the record will be aligned to suit.  */
538
 
539
void
540
layout_decl (tree decl, unsigned int known_align)
541
{
542
  tree type = TREE_TYPE (decl);
543
  enum tree_code code = TREE_CODE (decl);
544
  rtx rtl = NULL_RTX;
545
  location_t loc = DECL_SOURCE_LOCATION (decl);
546
 
547
  if (code == CONST_DECL)
548
    return;
549
 
550
  gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
551
              || code == TYPE_DECL ||code == FIELD_DECL);
552
 
553
  rtl = DECL_RTL_IF_SET (decl);
554
 
555
  if (type == error_mark_node)
556
    type = void_type_node;
557
 
558
  /* Usually the size and mode come from the data type without change,
559
     however, the front-end may set the explicit width of the field, so its
560
     size may not be the same as the size of its type.  This happens with
561
     bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
562
     also happens with other fields.  For example, the C++ front-end creates
563
     zero-sized fields corresponding to empty base classes, and depends on
564
     layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
565
     size in bytes from the size in bits.  If we have already set the mode,
566
     don't set it again since we can be called twice for FIELD_DECLs.  */
567
 
568
  DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
569
  if (DECL_MODE (decl) == VOIDmode)
570
    DECL_MODE (decl) = TYPE_MODE (type);
571
 
572
  if (DECL_SIZE (decl) == 0)
573
    {
574
      DECL_SIZE (decl) = TYPE_SIZE (type);
575
      DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
576
    }
577
  else if (DECL_SIZE_UNIT (decl) == 0)
578
    DECL_SIZE_UNIT (decl)
579
      = fold_convert_loc (loc, sizetype,
580
                          size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
581
                                          bitsize_unit_node));
582
 
583
  if (code != FIELD_DECL)
584
    /* For non-fields, update the alignment from the type.  */
585
    do_type_align (type, decl);
586
  else
587
    /* For fields, it's a bit more complicated...  */
588
    {
589
      bool old_user_align = DECL_USER_ALIGN (decl);
590
      bool zero_bitfield = false;
591
      bool packed_p = DECL_PACKED (decl);
592
      unsigned int mfa;
593
 
594
      if (DECL_BIT_FIELD (decl))
595
        {
596
          DECL_BIT_FIELD_TYPE (decl) = type;
597
 
598
          /* A zero-length bit-field affects the alignment of the next
599
             field.  In essence such bit-fields are not influenced by
600
             any packing due to #pragma pack or attribute packed.  */
601
          if (integer_zerop (DECL_SIZE (decl))
602
              && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
603
            {
604
              zero_bitfield = true;
605
              packed_p = false;
606
#ifdef PCC_BITFIELD_TYPE_MATTERS
607
              if (PCC_BITFIELD_TYPE_MATTERS)
608
                do_type_align (type, decl);
609
              else
610
#endif
611
                {
612
#ifdef EMPTY_FIELD_BOUNDARY
613
                  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
614
                    {
615
                      DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
616
                      DECL_USER_ALIGN (decl) = 0;
617
                    }
618
#endif
619
                }
620
            }
621
 
622
          /* See if we can use an ordinary integer mode for a bit-field.
623
             Conditions are: a fixed size that is correct for another mode,
624
             occupying a complete byte or bytes on proper boundary,
625
             and not -fstrict-volatile-bitfields.  If the latter is set,
626
             we unfortunately can't check TREE_THIS_VOLATILE, as a cast
627
             may make a volatile object later.  */
628
          if (TYPE_SIZE (type) != 0
629
              && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
630
              && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
631
              && flag_strict_volatile_bitfields <= 0)
632
            {
633
              enum machine_mode xmode
634
                = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
635
              unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
636
 
637
              if (xmode != BLKmode
638
                  && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
639
                  && (known_align == 0 || known_align >= xalign))
640
                {
641
                  DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
642
                  DECL_MODE (decl) = xmode;
643
                  DECL_BIT_FIELD (decl) = 0;
644
                }
645
            }
646
 
647
          /* Turn off DECL_BIT_FIELD if we won't need it set.  */
648
          if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
649
              && known_align >= TYPE_ALIGN (type)
650
              && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
651
            DECL_BIT_FIELD (decl) = 0;
652
        }
653
      else if (packed_p && DECL_USER_ALIGN (decl))
654
        /* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
655
           round up; we'll reduce it again below.  We want packing to
656
           supersede USER_ALIGN inherited from the type, but defer to
657
           alignment explicitly specified on the field decl.  */;
658
      else
659
        do_type_align (type, decl);
660
 
661
      /* If the field is packed and not explicitly aligned, give it the
662
         minimum alignment.  Note that do_type_align may set
663
         DECL_USER_ALIGN, so we need to check old_user_align instead.  */
664
      if (packed_p
665
          && !old_user_align)
666
        DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
667
 
668
      if (! packed_p && ! DECL_USER_ALIGN (decl))
669
        {
670
          /* Some targets (i.e. i386, VMS) limit struct field alignment
671
             to a lower boundary than alignment of variables unless
672
             it was overridden by attribute aligned.  */
673
#ifdef BIGGEST_FIELD_ALIGNMENT
674
          DECL_ALIGN (decl)
675
            = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
676
#endif
677
#ifdef ADJUST_FIELD_ALIGN
678
          DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
679
#endif
680
        }
681
 
682
      if (zero_bitfield)
683
        mfa = initial_max_fld_align * BITS_PER_UNIT;
684
      else
685
        mfa = maximum_field_alignment;
686
      /* Should this be controlled by DECL_USER_ALIGN, too?  */
687
      if (mfa != 0)
688
        DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
689
    }
690
 
691
  /* Evaluate nonconstant size only once, either now or as soon as safe.  */
692
  if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
693
    DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
694
  if (DECL_SIZE_UNIT (decl) != 0
695
      && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
696
    DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
697
 
698
  /* If requested, warn about definitions of large data objects.  */
699
  if (warn_larger_than
700
      && (code == VAR_DECL || code == PARM_DECL)
701
      && ! DECL_EXTERNAL (decl))
702
    {
703
      tree size = DECL_SIZE_UNIT (decl);
704
 
705
      if (size != 0 && TREE_CODE (size) == INTEGER_CST
706
          && compare_tree_int (size, larger_than_size) > 0)
707
        {
708
          int size_as_int = TREE_INT_CST_LOW (size);
709
 
710
          if (compare_tree_int (size, size_as_int) == 0)
711
            warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
712
          else
713
            warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
714
                     decl, larger_than_size);
715
        }
716
    }
717
 
718
  /* If the RTL was already set, update its mode and mem attributes.  */
719
  if (rtl)
720
    {
721
      PUT_MODE (rtl, DECL_MODE (decl));
722
      SET_DECL_RTL (decl, 0);
723
      set_mem_attributes (rtl, decl, 1);
724
      SET_DECL_RTL (decl, rtl);
725
    }
726
}
727
 
728
/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
729
   a previous call to layout_decl and calls it again.  */
730
 
731
void
732
relayout_decl (tree decl)
733
{
734
  DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
735
  DECL_MODE (decl) = VOIDmode;
736
  if (!DECL_USER_ALIGN (decl))
737
    DECL_ALIGN (decl) = 0;
738
  SET_DECL_RTL (decl, 0);
739
 
740
  layout_decl (decl, 0);
741
}
742
 
743
/* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
744
   QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
745
   is to be passed to all other layout functions for this record.  It is the
746
   responsibility of the caller to call `free' for the storage returned.
747
   Note that garbage collection is not permitted until we finish laying
748
   out the record.  */
749
 
750
record_layout_info
751
start_record_layout (tree t)
752
{
753
  record_layout_info rli = XNEW (struct record_layout_info_s);
754
 
755
  rli->t = t;
756
 
757
  /* If the type has a minimum specified alignment (via an attribute
758
     declaration, for example) use it -- otherwise, start with a
759
     one-byte alignment.  */
760
  rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
761
  rli->unpacked_align = rli->record_align;
762
  rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
763
 
764
#ifdef STRUCTURE_SIZE_BOUNDARY
765
  /* Packed structures don't need to have minimum size.  */
766
  if (! TYPE_PACKED (t))
767
    {
768
      unsigned tmp;
769
 
770
      /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY.  */
771
      tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
772
      if (maximum_field_alignment != 0)
773
        tmp = MIN (tmp, maximum_field_alignment);
774
      rli->record_align = MAX (rli->record_align, tmp);
775
    }
776
#endif
777
 
778
  rli->offset = size_zero_node;
779
  rli->bitpos = bitsize_zero_node;
780
  rli->prev_field = 0;
781
  rli->pending_statics = NULL;
782
  rli->packed_maybe_necessary = 0;
783
  rli->remaining_in_alignment = 0;
784
 
785
  return rli;
786
}
787
 
788
/* These four routines perform computations that convert between
789
   the offset/bitpos forms and byte and bit offsets.  */
790
 
791
tree
792
bit_from_pos (tree offset, tree bitpos)
793
{
794
  return size_binop (PLUS_EXPR, bitpos,
795
                     size_binop (MULT_EXPR,
796
                                 fold_convert (bitsizetype, offset),
797
                                 bitsize_unit_node));
798
}
799
 
800
tree
801
byte_from_pos (tree offset, tree bitpos)
802
{
803
  return size_binop (PLUS_EXPR, offset,
804
                     fold_convert (sizetype,
805
                                   size_binop (TRUNC_DIV_EXPR, bitpos,
806
                                               bitsize_unit_node)));
807
}
808
 
809
void
810
pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
811
              tree pos)
812
{
813
  *poffset = size_binop (MULT_EXPR,
814
                         fold_convert (sizetype,
815
                                       size_binop (FLOOR_DIV_EXPR, pos,
816
                                                   bitsize_int (off_align))),
817
                         size_int (off_align / BITS_PER_UNIT));
818
  *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
819
}
820
 
821
/* Given a pointer to bit and byte offsets and an offset alignment,
822
   normalize the offsets so they are within the alignment.  */
823
 
824
void
825
normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
826
{
827
  /* If the bit position is now larger than it should be, adjust it
828
     downwards.  */
829
  if (compare_tree_int (*pbitpos, off_align) >= 0)
830
    {
831
      tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
832
                                      bitsize_int (off_align));
833
 
834
      *poffset
835
        = size_binop (PLUS_EXPR, *poffset,
836
                      size_binop (MULT_EXPR,
837
                                  fold_convert (sizetype, extra_aligns),
838
                                  size_int (off_align / BITS_PER_UNIT)));
839
 
840
      *pbitpos
841
        = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
842
    }
843
}
844
 
845
/* Print debugging information about the information in RLI.  */
846
 
847
DEBUG_FUNCTION void
848
debug_rli (record_layout_info rli)
849
{
850
  print_node_brief (stderr, "type", rli->t, 0);
851
  print_node_brief (stderr, "\noffset", rli->offset, 0);
852
  print_node_brief (stderr, " bitpos", rli->bitpos, 0);
853
 
854
  fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
855
           rli->record_align, rli->unpacked_align,
856
           rli->offset_align);
857
 
858
  /* The ms_struct code is the only that uses this.  */
859
  if (targetm.ms_bitfield_layout_p (rli->t))
860
    fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
861
 
862
  if (rli->packed_maybe_necessary)
863
    fprintf (stderr, "packed may be necessary\n");
864
 
865
  if (!VEC_empty (tree, rli->pending_statics))
866
    {
867
      fprintf (stderr, "pending statics:\n");
868
      debug_vec_tree (rli->pending_statics);
869
    }
870
}
871
 
872
/* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
873
   BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
874
 
875
void
876
normalize_rli (record_layout_info rli)
877
{
878
  normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
879
}
880
 
881
/* Returns the size in bytes allocated so far.  */
882
 
883
tree
884
rli_size_unit_so_far (record_layout_info rli)
885
{
886
  return byte_from_pos (rli->offset, rli->bitpos);
887
}
888
 
889
/* Returns the size in bits allocated so far.  */
890
 
891
tree
892
rli_size_so_far (record_layout_info rli)
893
{
894
  return bit_from_pos (rli->offset, rli->bitpos);
895
}
896
 
897
/* FIELD is about to be added to RLI->T.  The alignment (in bits) of
898
   the next available location within the record is given by KNOWN_ALIGN.
899
   Update the variable alignment fields in RLI, and return the alignment
900
   to give the FIELD.  */
901
 
902
unsigned int
903
update_alignment_for_field (record_layout_info rli, tree field,
904
                            unsigned int known_align)
905
{
906
  /* The alignment required for FIELD.  */
907
  unsigned int desired_align;
908
  /* The type of this field.  */
909
  tree type = TREE_TYPE (field);
910
  /* True if the field was explicitly aligned by the user.  */
911
  bool user_align;
912
  bool is_bitfield;
913
 
914
  /* Do not attempt to align an ERROR_MARK node */
915
  if (TREE_CODE (type) == ERROR_MARK)
916
    return 0;
917
 
918
  /* Lay out the field so we know what alignment it needs.  */
919
  layout_decl (field, known_align);
920
  desired_align = DECL_ALIGN (field);
921
  user_align = DECL_USER_ALIGN (field);
922
 
923
  is_bitfield = (type != error_mark_node
924
                 && DECL_BIT_FIELD_TYPE (field)
925
                 && ! integer_zerop (TYPE_SIZE (type)));
926
 
927
  /* Record must have at least as much alignment as any field.
928
     Otherwise, the alignment of the field within the record is
929
     meaningless.  */
930
  if (targetm.ms_bitfield_layout_p (rli->t))
931
    {
932
      /* Here, the alignment of the underlying type of a bitfield can
933
         affect the alignment of a record; even a zero-sized field
934
         can do this.  The alignment should be to the alignment of
935
         the type, except that for zero-size bitfields this only
936
         applies if there was an immediately prior, nonzero-size
937
         bitfield.  (That's the way it is, experimentally.) */
938
      if ((!is_bitfield && !DECL_PACKED (field))
939
          || ((DECL_SIZE (field) == NULL_TREE
940
               || !integer_zerop (DECL_SIZE (field)))
941
              ? !DECL_PACKED (field)
942
              : (rli->prev_field
943
                 && DECL_BIT_FIELD_TYPE (rli->prev_field)
944
                 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
945
        {
946
          unsigned int type_align = TYPE_ALIGN (type);
947
          type_align = MAX (type_align, desired_align);
948
          if (maximum_field_alignment != 0)
949
            type_align = MIN (type_align, maximum_field_alignment);
950
          rli->record_align = MAX (rli->record_align, type_align);
951
          rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
952
        }
953
    }
954
#ifdef PCC_BITFIELD_TYPE_MATTERS
955
  else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
956
    {
957
      /* Named bit-fields cause the entire structure to have the
958
         alignment implied by their type.  Some targets also apply the same
959
         rules to unnamed bitfields.  */
960
      if (DECL_NAME (field) != 0
961
          || targetm.align_anon_bitfield ())
962
        {
963
          unsigned int type_align = TYPE_ALIGN (type);
964
 
965
#ifdef ADJUST_FIELD_ALIGN
966
          if (! TYPE_USER_ALIGN (type))
967
            type_align = ADJUST_FIELD_ALIGN (field, type_align);
968
#endif
969
 
970
          /* Targets might chose to handle unnamed and hence possibly
971
             zero-width bitfield.  Those are not influenced by #pragmas
972
             or packed attributes.  */
973
          if (integer_zerop (DECL_SIZE (field)))
974
            {
975
              if (initial_max_fld_align)
976
                type_align = MIN (type_align,
977
                                  initial_max_fld_align * BITS_PER_UNIT);
978
            }
979
          else if (maximum_field_alignment != 0)
980
            type_align = MIN (type_align, maximum_field_alignment);
981
          else if (DECL_PACKED (field))
982
            type_align = MIN (type_align, BITS_PER_UNIT);
983
 
984
          /* The alignment of the record is increased to the maximum
985
             of the current alignment, the alignment indicated on the
986
             field (i.e., the alignment specified by an __aligned__
987
             attribute), and the alignment indicated by the type of
988
             the field.  */
989
          rli->record_align = MAX (rli->record_align, desired_align);
990
          rli->record_align = MAX (rli->record_align, type_align);
991
 
992
          if (warn_packed)
993
            rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
994
          user_align |= TYPE_USER_ALIGN (type);
995
        }
996
    }
997
#endif
998
  else
999
    {
1000
      rli->record_align = MAX (rli->record_align, desired_align);
1001
      rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1002
    }
1003
 
1004
  TYPE_USER_ALIGN (rli->t) |= user_align;
1005
 
1006
  return desired_align;
1007
}
1008
 
1009
/* Called from place_field to handle unions.  */
1010
 
1011
static void
1012
place_union_field (record_layout_info rli, tree field)
1013
{
1014
  update_alignment_for_field (rli, field, /*known_align=*/0);
1015
 
1016
  DECL_FIELD_OFFSET (field) = size_zero_node;
1017
  DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1018
  SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1019
 
1020
  /* If this is an ERROR_MARK return *after* having set the
1021
     field at the start of the union. This helps when parsing
1022
     invalid fields. */
1023
  if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1024
    return;
1025
 
1026
  /* We assume the union's size will be a multiple of a byte so we don't
1027
     bother with BITPOS.  */
1028
  if (TREE_CODE (rli->t) == UNION_TYPE)
1029
    rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1030
  else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1031
    rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1032
                               DECL_SIZE_UNIT (field), rli->offset);
1033
}
1034
 
1035
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1036
/* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1037
   at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
1038
   units of alignment than the underlying TYPE.  */
1039
static int
1040
excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1041
                  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1042
{
1043
  /* Note that the calculation of OFFSET might overflow; we calculate it so
1044
     that we still get the right result as long as ALIGN is a power of two.  */
1045
  unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1046
 
1047
  offset = offset % align;
1048
  return ((offset + size + align - 1) / align
1049
          > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1050
             / align));
1051
}
1052
#endif
1053
 
1054
/* RLI contains information about the layout of a RECORD_TYPE.  FIELD
1055
   is a FIELD_DECL to be added after those fields already present in
1056
   T.  (FIELD is not actually added to the TYPE_FIELDS list here;
1057
   callers that desire that behavior must manually perform that step.)  */
1058
 
1059
void
1060
place_field (record_layout_info rli, tree field)
1061
{
1062
  /* The alignment required for FIELD.  */
1063
  unsigned int desired_align;
1064
  /* The alignment FIELD would have if we just dropped it into the
1065
     record as it presently stands.  */
1066
  unsigned int known_align;
1067
  unsigned int actual_align;
1068
  /* The type of this field.  */
1069
  tree type = TREE_TYPE (field);
1070
 
1071
  gcc_assert (TREE_CODE (field) != ERROR_MARK);
1072
 
1073
  /* If FIELD is static, then treat it like a separate variable, not
1074
     really like a structure field.  If it is a FUNCTION_DECL, it's a
1075
     method.  In both cases, all we do is lay out the decl, and we do
1076
     it *after* the record is laid out.  */
1077
  if (TREE_CODE (field) == VAR_DECL)
1078
    {
1079
      VEC_safe_push (tree, gc, rli->pending_statics, field);
1080
      return;
1081
    }
1082
 
1083
  /* Enumerators and enum types which are local to this class need not
1084
     be laid out.  Likewise for initialized constant fields.  */
1085
  else if (TREE_CODE (field) != FIELD_DECL)
1086
    return;
1087
 
1088
  /* Unions are laid out very differently than records, so split
1089
     that code off to another function.  */
1090
  else if (TREE_CODE (rli->t) != RECORD_TYPE)
1091
    {
1092
      place_union_field (rli, field);
1093
      return;
1094
    }
1095
 
1096
  else if (TREE_CODE (type) == ERROR_MARK)
1097
    {
1098
      /* Place this field at the current allocation position, so we
1099
         maintain monotonicity.  */
1100
      DECL_FIELD_OFFSET (field) = rli->offset;
1101
      DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1102
      SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1103
      return;
1104
    }
1105
 
1106
  /* Work out the known alignment so far.  Note that A & (-A) is the
1107
     value of the least-significant bit in A that is one.  */
1108
  if (! integer_zerop (rli->bitpos))
1109
    known_align = (tree_low_cst (rli->bitpos, 1)
1110
                   & - tree_low_cst (rli->bitpos, 1));
1111
  else if (integer_zerop (rli->offset))
1112
    known_align = 0;
1113
  else if (host_integerp (rli->offset, 1))
1114
    known_align = (BITS_PER_UNIT
1115
                   * (tree_low_cst (rli->offset, 1)
1116
                      & - tree_low_cst (rli->offset, 1)));
1117
  else
1118
    known_align = rli->offset_align;
1119
 
1120
  desired_align = update_alignment_for_field (rli, field, known_align);
1121
  if (known_align == 0)
1122
    known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1123
 
1124
  if (warn_packed && DECL_PACKED (field))
1125
    {
1126
      if (known_align >= TYPE_ALIGN (type))
1127
        {
1128
          if (TYPE_ALIGN (type) > desired_align)
1129
            {
1130
              if (STRICT_ALIGNMENT)
1131
                warning (OPT_Wattributes, "packed attribute causes "
1132
                         "inefficient alignment for %q+D", field);
1133
              /* Don't warn if DECL_PACKED was set by the type.  */
1134
              else if (!TYPE_PACKED (rli->t))
1135
                warning (OPT_Wattributes, "packed attribute is "
1136
                         "unnecessary for %q+D", field);
1137
            }
1138
        }
1139
      else
1140
        rli->packed_maybe_necessary = 1;
1141
    }
1142
 
1143
  /* Does this field automatically have alignment it needs by virtue
1144
     of the fields that precede it and the record's own alignment?  */
1145
  if (known_align < desired_align)
1146
    {
1147
      /* No, we need to skip space before this field.
1148
         Bump the cumulative size to multiple of field alignment.  */
1149
 
1150
      if (!targetm.ms_bitfield_layout_p (rli->t)
1151
          && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1152
        warning (OPT_Wpadded, "padding struct to align %q+D", field);
1153
 
1154
      /* If the alignment is still within offset_align, just align
1155
         the bit position.  */
1156
      if (desired_align < rli->offset_align)
1157
        rli->bitpos = round_up (rli->bitpos, desired_align);
1158
      else
1159
        {
1160
          /* First adjust OFFSET by the partial bits, then align.  */
1161
          rli->offset
1162
            = size_binop (PLUS_EXPR, rli->offset,
1163
                          fold_convert (sizetype,
1164
                                        size_binop (CEIL_DIV_EXPR, rli->bitpos,
1165
                                                    bitsize_unit_node)));
1166
          rli->bitpos = bitsize_zero_node;
1167
 
1168
          rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1169
        }
1170
 
1171
      if (! TREE_CONSTANT (rli->offset))
1172
        rli->offset_align = desired_align;
1173
      if (targetm.ms_bitfield_layout_p (rli->t))
1174
        rli->prev_field = NULL;
1175
    }
1176
 
1177
  /* Handle compatibility with PCC.  Note that if the record has any
1178
     variable-sized fields, we need not worry about compatibility.  */
1179
#ifdef PCC_BITFIELD_TYPE_MATTERS
1180
  if (PCC_BITFIELD_TYPE_MATTERS
1181
      && ! targetm.ms_bitfield_layout_p (rli->t)
1182
      && TREE_CODE (field) == FIELD_DECL
1183
      && type != error_mark_node
1184
      && DECL_BIT_FIELD (field)
1185
      && (! DECL_PACKED (field)
1186
          /* Enter for these packed fields only to issue a warning.  */
1187
          || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1188
      && maximum_field_alignment == 0
1189
      && ! integer_zerop (DECL_SIZE (field))
1190
      && host_integerp (DECL_SIZE (field), 1)
1191
      && host_integerp (rli->offset, 1)
1192
      && host_integerp (TYPE_SIZE (type), 1))
1193
    {
1194
      unsigned int type_align = TYPE_ALIGN (type);
1195
      tree dsize = DECL_SIZE (field);
1196
      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1197
      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1198
      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1199
 
1200
#ifdef ADJUST_FIELD_ALIGN
1201
      if (! TYPE_USER_ALIGN (type))
1202
        type_align = ADJUST_FIELD_ALIGN (field, type_align);
1203
#endif
1204
 
1205
      /* A bit field may not span more units of alignment of its type
1206
         than its type itself.  Advance to next boundary if necessary.  */
1207
      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1208
        {
1209
          if (DECL_PACKED (field))
1210
            {
1211
              if (warn_packed_bitfield_compat == 1)
1212
                inform
1213
                  (input_location,
1214
                   "offset of packed bit-field %qD has changed in GCC 4.4",
1215
                   field);
1216
            }
1217
          else
1218
            rli->bitpos = round_up (rli->bitpos, type_align);
1219
        }
1220
 
1221
      if (! DECL_PACKED (field))
1222
        TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1223
    }
1224
#endif
1225
 
1226
#ifdef BITFIELD_NBYTES_LIMITED
1227
  if (BITFIELD_NBYTES_LIMITED
1228
      && ! targetm.ms_bitfield_layout_p (rli->t)
1229
      && TREE_CODE (field) == FIELD_DECL
1230
      && type != error_mark_node
1231
      && DECL_BIT_FIELD_TYPE (field)
1232
      && ! DECL_PACKED (field)
1233
      && ! integer_zerop (DECL_SIZE (field))
1234
      && host_integerp (DECL_SIZE (field), 1)
1235
      && host_integerp (rli->offset, 1)
1236
      && host_integerp (TYPE_SIZE (type), 1))
1237
    {
1238
      unsigned int type_align = TYPE_ALIGN (type);
1239
      tree dsize = DECL_SIZE (field);
1240
      HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1241
      HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1242
      HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1243
 
1244
#ifdef ADJUST_FIELD_ALIGN
1245
      if (! TYPE_USER_ALIGN (type))
1246
        type_align = ADJUST_FIELD_ALIGN (field, type_align);
1247
#endif
1248
 
1249
      if (maximum_field_alignment != 0)
1250
        type_align = MIN (type_align, maximum_field_alignment);
1251
      /* ??? This test is opposite the test in the containing if
1252
         statement, so this code is unreachable currently.  */
1253
      else if (DECL_PACKED (field))
1254
        type_align = MIN (type_align, BITS_PER_UNIT);
1255
 
1256
      /* A bit field may not span the unit of alignment of its type.
1257
         Advance to next boundary if necessary.  */
1258
      if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1259
        rli->bitpos = round_up (rli->bitpos, type_align);
1260
 
1261
      TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1262
    }
1263
#endif
1264
 
1265
  /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1266
     A subtlety:
1267
        When a bit field is inserted into a packed record, the whole
1268
        size of the underlying type is used by one or more same-size
1269
        adjacent bitfields.  (That is, if its long:3, 32 bits is
1270
        used in the record, and any additional adjacent long bitfields are
1271
        packed into the same chunk of 32 bits. However, if the size
1272
        changes, a new field of that size is allocated.)  In an unpacked
1273
        record, this is the same as using alignment, but not equivalent
1274
        when packing.
1275
 
1276
     Note: for compatibility, we use the type size, not the type alignment
1277
     to determine alignment, since that matches the documentation */
1278
 
1279
  if (targetm.ms_bitfield_layout_p (rli->t))
1280
    {
1281
      tree prev_saved = rli->prev_field;
1282
      tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1283
 
1284
      /* This is a bitfield if it exists.  */
1285
      if (rli->prev_field)
1286
        {
1287
          /* If both are bitfields, nonzero, and the same size, this is
1288
             the middle of a run.  Zero declared size fields are special
1289
             and handled as "end of run". (Note: it's nonzero declared
1290
             size, but equal type sizes!) (Since we know that both
1291
             the current and previous fields are bitfields by the
1292
             time we check it, DECL_SIZE must be present for both.) */
1293
          if (DECL_BIT_FIELD_TYPE (field)
1294
              && !integer_zerop (DECL_SIZE (field))
1295
              && !integer_zerop (DECL_SIZE (rli->prev_field))
1296
              && host_integerp (DECL_SIZE (rli->prev_field), 0)
1297
              && host_integerp (TYPE_SIZE (type), 0)
1298
              && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1299
            {
1300
              /* We're in the middle of a run of equal type size fields; make
1301
                 sure we realign if we run out of bits.  (Not decl size,
1302
                 type size!) */
1303
              HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1304
 
1305
              if (rli->remaining_in_alignment < bitsize)
1306
                {
1307
                  HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1308
 
1309
                  /* out of bits; bump up to next 'word'.  */
1310
                  rli->bitpos
1311
                    = size_binop (PLUS_EXPR, rli->bitpos,
1312
                                  bitsize_int (rli->remaining_in_alignment));
1313
                  rli->prev_field = field;
1314
                  if (typesize < bitsize)
1315
                    rli->remaining_in_alignment = 0;
1316
                  else
1317
                    rli->remaining_in_alignment = typesize - bitsize;
1318
                }
1319
              else
1320
                rli->remaining_in_alignment -= bitsize;
1321
            }
1322
          else
1323
            {
1324
              /* End of a run: if leaving a run of bitfields of the same type
1325
                 size, we have to "use up" the rest of the bits of the type
1326
                 size.
1327
 
1328
                 Compute the new position as the sum of the size for the prior
1329
                 type and where we first started working on that type.
1330
                 Note: since the beginning of the field was aligned then
1331
                 of course the end will be too.  No round needed.  */
1332
 
1333
              if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1334
                {
1335
                  rli->bitpos
1336
                    = size_binop (PLUS_EXPR, rli->bitpos,
1337
                                  bitsize_int (rli->remaining_in_alignment));
1338
                }
1339
              else
1340
                /* We "use up" size zero fields; the code below should behave
1341
                   as if the prior field was not a bitfield.  */
1342
                prev_saved = NULL;
1343
 
1344
              /* Cause a new bitfield to be captured, either this time (if
1345
                 currently a bitfield) or next time we see one.  */
1346
              if (!DECL_BIT_FIELD_TYPE(field)
1347
                  || integer_zerop (DECL_SIZE (field)))
1348
                rli->prev_field = NULL;
1349
            }
1350
 
1351
          normalize_rli (rli);
1352
        }
1353
 
1354
      /* If we're starting a new run of same size type bitfields
1355
         (or a run of non-bitfields), set up the "first of the run"
1356
         fields.
1357
 
1358
         That is, if the current field is not a bitfield, or if there
1359
         was a prior bitfield the type sizes differ, or if there wasn't
1360
         a prior bitfield the size of the current field is nonzero.
1361
 
1362
         Note: we must be sure to test ONLY the type size if there was
1363
         a prior bitfield and ONLY for the current field being zero if
1364
         there wasn't.  */
1365
 
1366
      if (!DECL_BIT_FIELD_TYPE (field)
1367
          || (prev_saved != NULL
1368
              ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1369
              : !integer_zerop (DECL_SIZE (field)) ))
1370
        {
1371
          /* Never smaller than a byte for compatibility.  */
1372
          unsigned int type_align = BITS_PER_UNIT;
1373
 
1374
          /* (When not a bitfield), we could be seeing a flex array (with
1375
             no DECL_SIZE).  Since we won't be using remaining_in_alignment
1376
             until we see a bitfield (and come by here again) we just skip
1377
             calculating it.  */
1378
          if (DECL_SIZE (field) != NULL
1379
              && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1380
              && host_integerp (DECL_SIZE (field), 1))
1381
            {
1382
              unsigned HOST_WIDE_INT bitsize
1383
                = tree_low_cst (DECL_SIZE (field), 1);
1384
              unsigned HOST_WIDE_INT typesize
1385
                = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1386
 
1387
              if (typesize < bitsize)
1388
                rli->remaining_in_alignment = 0;
1389
              else
1390
                rli->remaining_in_alignment = typesize - bitsize;
1391
            }
1392
 
1393
          /* Now align (conventionally) for the new type.  */
1394
          type_align = TYPE_ALIGN (TREE_TYPE (field));
1395
 
1396
          if (maximum_field_alignment != 0)
1397
            type_align = MIN (type_align, maximum_field_alignment);
1398
 
1399
          rli->bitpos = round_up (rli->bitpos, type_align);
1400
 
1401
          /* If we really aligned, don't allow subsequent bitfields
1402
             to undo that.  */
1403
          rli->prev_field = NULL;
1404
        }
1405
    }
1406
 
1407
  /* Offset so far becomes the position of this field after normalizing.  */
1408
  normalize_rli (rli);
1409
  DECL_FIELD_OFFSET (field) = rli->offset;
1410
  DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1411
  SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1412
 
1413
  /* If this field ended up more aligned than we thought it would be (we
1414
     approximate this by seeing if its position changed), lay out the field
1415
     again; perhaps we can use an integral mode for it now.  */
1416
  if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1417
    actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1418
                    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1419
  else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1420
    actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1421
  else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1422
    actual_align = (BITS_PER_UNIT
1423
                   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1424
                      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1425
  else
1426
    actual_align = DECL_OFFSET_ALIGN (field);
1427
  /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1428
     store / extract bit field operations will check the alignment of the
1429
     record against the mode of bit fields.  */
1430
 
1431
  if (known_align != actual_align)
1432
    layout_decl (field, actual_align);
1433
 
1434
  if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1435
    rli->prev_field = field;
1436
 
1437
  /* Now add size of this field to the size of the record.  If the size is
1438
     not constant, treat the field as being a multiple of bytes and just
1439
     adjust the offset, resetting the bit position.  Otherwise, apportion the
1440
     size amongst the bit position and offset.  First handle the case of an
1441
     unspecified size, which can happen when we have an invalid nested struct
1442
     definition, such as struct j { struct j { int i; } }.  The error message
1443
     is printed in finish_struct.  */
1444
  if (DECL_SIZE (field) == 0)
1445
    /* Do nothing.  */;
1446
  else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1447
           || TREE_OVERFLOW (DECL_SIZE (field)))
1448
    {
1449
      rli->offset
1450
        = size_binop (PLUS_EXPR, rli->offset,
1451
                      fold_convert (sizetype,
1452
                                    size_binop (CEIL_DIV_EXPR, rli->bitpos,
1453
                                                bitsize_unit_node)));
1454
      rli->offset
1455
        = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1456
      rli->bitpos = bitsize_zero_node;
1457
      rli->offset_align = MIN (rli->offset_align, desired_align);
1458
    }
1459
  else if (targetm.ms_bitfield_layout_p (rli->t))
1460
    {
1461
      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1462
 
1463
      /* If we ended a bitfield before the full length of the type then
1464
         pad the struct out to the full length of the last type.  */
1465
      if ((DECL_CHAIN (field) == NULL
1466
           || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1467
          && DECL_BIT_FIELD_TYPE (field)
1468
          && !integer_zerop (DECL_SIZE (field)))
1469
        rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1470
                                  bitsize_int (rli->remaining_in_alignment));
1471
 
1472
      normalize_rli (rli);
1473
    }
1474
  else
1475
    {
1476
      rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1477
      normalize_rli (rli);
1478
    }
1479
}
1480
 
1481
/* Assuming that all the fields have been laid out, this function uses
1482
   RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1483
   indicated by RLI.  */
1484
 
1485
static void
1486
finalize_record_size (record_layout_info rli)
1487
{
1488
  tree unpadded_size, unpadded_size_unit;
1489
 
1490
  /* Now we want just byte and bit offsets, so set the offset alignment
1491
     to be a byte and then normalize.  */
1492
  rli->offset_align = BITS_PER_UNIT;
1493
  normalize_rli (rli);
1494
 
1495
  /* Determine the desired alignment.  */
1496
#ifdef ROUND_TYPE_ALIGN
1497
  TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1498
                                          rli->record_align);
1499
#else
1500
  TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1501
#endif
1502
 
1503
  /* Compute the size so far.  Be sure to allow for extra bits in the
1504
     size in bytes.  We have guaranteed above that it will be no more
1505
     than a single byte.  */
1506
  unpadded_size = rli_size_so_far (rli);
1507
  unpadded_size_unit = rli_size_unit_so_far (rli);
1508
  if (! integer_zerop (rli->bitpos))
1509
    unpadded_size_unit
1510
      = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1511
 
1512
  /* Round the size up to be a multiple of the required alignment.  */
1513
  TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1514
  TYPE_SIZE_UNIT (rli->t)
1515
    = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1516
 
1517
  if (TREE_CONSTANT (unpadded_size)
1518
      && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1519
      && input_location != BUILTINS_LOCATION)
1520
    warning (OPT_Wpadded, "padding struct size to alignment boundary");
1521
 
1522
  if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1523
      && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1524
      && TREE_CONSTANT (unpadded_size))
1525
    {
1526
      tree unpacked_size;
1527
 
1528
#ifdef ROUND_TYPE_ALIGN
1529
      rli->unpacked_align
1530
        = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1531
#else
1532
      rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1533
#endif
1534
 
1535
      unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1536
      if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1537
        {
1538
          if (TYPE_NAME (rli->t))
1539
            {
1540
              tree name;
1541
 
1542
              if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1543
                name = TYPE_NAME (rli->t);
1544
              else
1545
                name = DECL_NAME (TYPE_NAME (rli->t));
1546
 
1547
              if (STRICT_ALIGNMENT)
1548
                warning (OPT_Wpacked, "packed attribute causes inefficient "
1549
                         "alignment for %qE", name);
1550
              else
1551
                warning (OPT_Wpacked,
1552
                         "packed attribute is unnecessary for %qE", name);
1553
            }
1554
          else
1555
            {
1556
              if (STRICT_ALIGNMENT)
1557
                warning (OPT_Wpacked,
1558
                         "packed attribute causes inefficient alignment");
1559
              else
1560
                warning (OPT_Wpacked, "packed attribute is unnecessary");
1561
            }
1562
        }
1563
    }
1564
}
1565
 
1566
/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
1567
 
1568
void
1569
compute_record_mode (tree type)
1570
{
1571
  tree field;
1572
  enum machine_mode mode = VOIDmode;
1573
 
1574
  /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1575
     However, if possible, we use a mode that fits in a register
1576
     instead, in order to allow for better optimization down the
1577
     line.  */
1578
  SET_TYPE_MODE (type, BLKmode);
1579
 
1580
  if (! host_integerp (TYPE_SIZE (type), 1))
1581
    return;
1582
 
1583
  /* A record which has any BLKmode members must itself be
1584
     BLKmode; it can't go in a register.  Unless the member is
1585
     BLKmode only because it isn't aligned.  */
1586
  for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1587
    {
1588
      if (TREE_CODE (field) != FIELD_DECL)
1589
        continue;
1590
 
1591
      if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1592
          || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1593
              && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1594
              && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1595
                   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1596
          || ! host_integerp (bit_position (field), 1)
1597
          || DECL_SIZE (field) == 0
1598
          || ! host_integerp (DECL_SIZE (field), 1))
1599
        return;
1600
 
1601
      /* If this field is the whole struct, remember its mode so
1602
         that, say, we can put a double in a class into a DF
1603
         register instead of forcing it to live in the stack.  */
1604
      if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1605
        mode = DECL_MODE (field);
1606
 
1607
#ifdef MEMBER_TYPE_FORCES_BLK
1608
      /* With some targets, eg. c4x, it is sub-optimal
1609
         to access an aligned BLKmode structure as a scalar.  */
1610
 
1611
      if (MEMBER_TYPE_FORCES_BLK (field, mode))
1612
        return;
1613
#endif /* MEMBER_TYPE_FORCES_BLK  */
1614
    }
1615
 
1616
  /* If we only have one real field; use its mode if that mode's size
1617
     matches the type's size.  This only applies to RECORD_TYPE.  This
1618
     does not apply to unions.  */
1619
  if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1620
      && host_integerp (TYPE_SIZE (type), 1)
1621
      && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1622
    SET_TYPE_MODE (type, mode);
1623
  else
1624
    SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1625
 
1626
  /* If structure's known alignment is less than what the scalar
1627
     mode would need, and it matters, then stick with BLKmode.  */
1628
  if (TYPE_MODE (type) != BLKmode
1629
      && STRICT_ALIGNMENT
1630
      && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1631
            || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1632
    {
1633
      /* If this is the only reason this type is BLKmode, then
1634
         don't force containing types to be BLKmode.  */
1635
      TYPE_NO_FORCE_BLK (type) = 1;
1636
      SET_TYPE_MODE (type, BLKmode);
1637
    }
1638
}
1639
 
1640
/* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1641
   out.  */
1642
 
1643
static void
1644
finalize_type_size (tree type)
1645
{
1646
  /* Normally, use the alignment corresponding to the mode chosen.
1647
     However, where strict alignment is not required, avoid
1648
     over-aligning structures, since most compilers do not do this
1649
     alignment.  */
1650
 
1651
  if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1652
      && (STRICT_ALIGNMENT
1653
          || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1654
              && TREE_CODE (type) != QUAL_UNION_TYPE
1655
              && TREE_CODE (type) != ARRAY_TYPE)))
1656
    {
1657
      unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1658
 
1659
      /* Don't override a larger alignment requirement coming from a user
1660
         alignment of one of the fields.  */
1661
      if (mode_align >= TYPE_ALIGN (type))
1662
        {
1663
          TYPE_ALIGN (type) = mode_align;
1664
          TYPE_USER_ALIGN (type) = 0;
1665
        }
1666
    }
1667
 
1668
  /* Do machine-dependent extra alignment.  */
1669
#ifdef ROUND_TYPE_ALIGN
1670
  TYPE_ALIGN (type)
1671
    = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1672
#endif
1673
 
1674
  /* If we failed to find a simple way to calculate the unit size
1675
     of the type, find it by division.  */
1676
  if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1677
    /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
1678
       result will fit in sizetype.  We will get more efficient code using
1679
       sizetype, so we force a conversion.  */
1680
    TYPE_SIZE_UNIT (type)
1681
      = fold_convert (sizetype,
1682
                      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1683
                                  bitsize_unit_node));
1684
 
1685
  if (TYPE_SIZE (type) != 0)
1686
    {
1687
      TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1688
      TYPE_SIZE_UNIT (type)
1689
        = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1690
    }
1691
 
1692
  /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
1693
  if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1694
    TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1695
  if (TYPE_SIZE_UNIT (type) != 0
1696
      && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1697
    TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1698
 
1699
  /* Also layout any other variants of the type.  */
1700
  if (TYPE_NEXT_VARIANT (type)
1701
      || type != TYPE_MAIN_VARIANT (type))
1702
    {
1703
      tree variant;
1704
      /* Record layout info of this variant.  */
1705
      tree size = TYPE_SIZE (type);
1706
      tree size_unit = TYPE_SIZE_UNIT (type);
1707
      unsigned int align = TYPE_ALIGN (type);
1708
      unsigned int user_align = TYPE_USER_ALIGN (type);
1709
      enum machine_mode mode = TYPE_MODE (type);
1710
 
1711
      /* Copy it into all variants.  */
1712
      for (variant = TYPE_MAIN_VARIANT (type);
1713
           variant != 0;
1714
           variant = TYPE_NEXT_VARIANT (variant))
1715
        {
1716
          TYPE_SIZE (variant) = size;
1717
          TYPE_SIZE_UNIT (variant) = size_unit;
1718
          TYPE_ALIGN (variant) = align;
1719
          TYPE_USER_ALIGN (variant) = user_align;
1720
          SET_TYPE_MODE (variant, mode);
1721
        }
1722
    }
1723
}
1724
 
1725
/* Do all of the work required to layout the type indicated by RLI,
1726
   once the fields have been laid out.  This function will call `free'
1727
   for RLI, unless FREE_P is false.  Passing a value other than false
1728
   for FREE_P is bad practice; this option only exists to support the
1729
   G++ 3.2 ABI.  */
1730
 
1731
void
1732
finish_record_layout (record_layout_info rli, int free_p)
1733
{
1734
  tree variant;
1735
 
1736
  /* Compute the final size.  */
1737
  finalize_record_size (rli);
1738
 
1739
  /* Compute the TYPE_MODE for the record.  */
1740
  compute_record_mode (rli->t);
1741
 
1742
  /* Perform any last tweaks to the TYPE_SIZE, etc.  */
1743
  finalize_type_size (rli->t);
1744
 
1745
  /* Propagate TYPE_PACKED to variants.  With C++ templates,
1746
     handle_packed_attribute is too early to do this.  */
1747
  for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1748
       variant = TYPE_NEXT_VARIANT (variant))
1749
    TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1750
 
1751
  /* Lay out any static members.  This is done now because their type
1752
     may use the record's type.  */
1753
  while (!VEC_empty (tree, rli->pending_statics))
1754
    layout_decl (VEC_pop (tree, rli->pending_statics), 0);
1755
 
1756
  /* Clean up.  */
1757
  if (free_p)
1758
    {
1759
      VEC_free (tree, gc, rli->pending_statics);
1760
      free (rli);
1761
    }
1762
}
1763
 
1764
 
1765
/* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
1766
   NAME, its fields are chained in reverse on FIELDS.
1767
 
1768
   If ALIGN_TYPE is non-null, it is given the same alignment as
1769
   ALIGN_TYPE.  */
1770
 
1771
void
1772
finish_builtin_struct (tree type, const char *name, tree fields,
1773
                       tree align_type)
1774
{
1775
  tree tail, next;
1776
 
1777
  for (tail = NULL_TREE; fields; tail = fields, fields = next)
1778
    {
1779
      DECL_FIELD_CONTEXT (fields) = type;
1780
      next = DECL_CHAIN (fields);
1781
      DECL_CHAIN (fields) = tail;
1782
    }
1783
  TYPE_FIELDS (type) = tail;
1784
 
1785
  if (align_type)
1786
    {
1787
      TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1788
      TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1789
    }
1790
 
1791
  layout_type (type);
1792
#if 0 /* not yet, should get fixed properly later */
1793
  TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1794
#else
1795
  TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
1796
                                 TYPE_DECL, get_identifier (name), type);
1797
#endif
1798
  TYPE_STUB_DECL (type) = TYPE_NAME (type);
1799
  layout_decl (TYPE_NAME (type), 0);
1800
}
1801
 
1802
/* Calculate the mode, size, and alignment for TYPE.
1803
   For an array type, calculate the element separation as well.
1804
   Record TYPE on the chain of permanent or temporary types
1805
   so that dbxout will find out about it.
1806
 
1807
   TYPE_SIZE of a type is nonzero if the type has been laid out already.
1808
   layout_type does nothing on such a type.
1809
 
1810
   If the type is incomplete, its TYPE_SIZE remains zero.  */
1811
 
1812
void
1813
layout_type (tree type)
1814
{
1815
  gcc_assert (type);
1816
 
1817
  if (type == error_mark_node)
1818
    return;
1819
 
1820
  /* Do nothing if type has been laid out before.  */
1821
  if (TYPE_SIZE (type))
1822
    return;
1823
 
1824
  switch (TREE_CODE (type))
1825
    {
1826
    case LANG_TYPE:
1827
      /* This kind of type is the responsibility
1828
         of the language-specific code.  */
1829
      gcc_unreachable ();
1830
 
1831
    case BOOLEAN_TYPE:  /* Used for Java, Pascal, and Chill.  */
1832
      if (TYPE_PRECISION (type) == 0)
1833
        TYPE_PRECISION (type) = 1; /* default to one byte/boolean.  */
1834
 
1835
      /* ... fall through ...  */
1836
 
1837
    case INTEGER_TYPE:
1838
    case ENUMERAL_TYPE:
1839
      if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1840
          && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1841
        TYPE_UNSIGNED (type) = 1;
1842
 
1843
      SET_TYPE_MODE (type,
1844
                     smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
1845
      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1846
      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1847
      break;
1848
 
1849
    case REAL_TYPE:
1850
      SET_TYPE_MODE (type,
1851
                     mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
1852
      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1853
      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1854
      break;
1855
 
1856
   case FIXED_POINT_TYPE:
1857
     /* TYPE_MODE (type) has been set already.  */
1858
     TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1859
     TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1860
     break;
1861
 
1862
    case COMPLEX_TYPE:
1863
      TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1864
      SET_TYPE_MODE (type,
1865
                     mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1866
                                    (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1867
                                     ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1868
                                     0));
1869
      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1870
      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1871
      break;
1872
 
1873
    case VECTOR_TYPE:
1874
      {
1875
        int nunits = TYPE_VECTOR_SUBPARTS (type);
1876
        tree innertype = TREE_TYPE (type);
1877
 
1878
        gcc_assert (!(nunits & (nunits - 1)));
1879
 
1880
        /* Find an appropriate mode for the vector type.  */
1881
        if (TYPE_MODE (type) == VOIDmode)
1882
          SET_TYPE_MODE (type,
1883
                         mode_for_vector (TYPE_MODE (innertype), nunits));
1884
 
1885
        TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
1886
        TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1887
        TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1888
                                                 TYPE_SIZE_UNIT (innertype),
1889
                                                 size_int (nunits));
1890
        TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1891
                                            bitsize_int (nunits));
1892
 
1893
        /* Always naturally align vectors.  This prevents ABI changes
1894
           depending on whether or not native vector modes are supported.  */
1895
        TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1896
        break;
1897
      }
1898
 
1899
    case VOID_TYPE:
1900
      /* This is an incomplete type and so doesn't have a size.  */
1901
      TYPE_ALIGN (type) = 1;
1902
      TYPE_USER_ALIGN (type) = 0;
1903
      SET_TYPE_MODE (type, VOIDmode);
1904
      break;
1905
 
1906
    case OFFSET_TYPE:
1907
      TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1908
      TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1909
      /* A pointer might be MODE_PARTIAL_INT,
1910
         but ptrdiff_t must be integral.  */
1911
      SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
1912
      TYPE_PRECISION (type) = POINTER_SIZE;
1913
      break;
1914
 
1915
    case FUNCTION_TYPE:
1916
    case METHOD_TYPE:
1917
      /* It's hard to see what the mode and size of a function ought to
1918
         be, but we do know the alignment is FUNCTION_BOUNDARY, so
1919
         make it consistent with that.  */
1920
      SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
1921
      TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1922
      TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1923
      break;
1924
 
1925
    case POINTER_TYPE:
1926
    case REFERENCE_TYPE:
1927
      {
1928
        enum machine_mode mode = TYPE_MODE (type);
1929
        if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
1930
          {
1931
            addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
1932
            mode = targetm.addr_space.address_mode (as);
1933
          }
1934
 
1935
        TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
1936
        TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1937
        TYPE_UNSIGNED (type) = 1;
1938
        TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
1939
      }
1940
      break;
1941
 
1942
    case ARRAY_TYPE:
1943
      {
1944
        tree index = TYPE_DOMAIN (type);
1945
        tree element = TREE_TYPE (type);
1946
 
1947
        build_pointer_type (element);
1948
 
1949
        /* We need to know both bounds in order to compute the size.  */
1950
        if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1951
            && TYPE_SIZE (element))
1952
          {
1953
            tree ub = TYPE_MAX_VALUE (index);
1954
            tree lb = TYPE_MIN_VALUE (index);
1955
            tree element_size = TYPE_SIZE (element);
1956
            tree length;
1957
 
1958
            /* Make sure that an array of zero-sized element is zero-sized
1959
               regardless of its extent.  */
1960
            if (integer_zerop (element_size))
1961
              length = size_zero_node;
1962
 
1963
            /* The computation should happen in the original signedness so
1964
               that (possible) negative values are handled appropriately
1965
               when determining overflow.  */
1966
            else
1967
              length
1968
                = fold_convert (sizetype,
1969
                                size_binop (PLUS_EXPR,
1970
                                            build_int_cst (TREE_TYPE (lb), 1),
1971
                                            size_binop (MINUS_EXPR, ub, lb)));
1972
 
1973
            TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1974
                                           fold_convert (bitsizetype,
1975
                                                         length));
1976
 
1977
            /* If we know the size of the element, calculate the total size
1978
               directly, rather than do some division thing below.  This
1979
               optimization helps Fortran assumed-size arrays (where the
1980
               size of the array is determined at runtime) substantially.  */
1981
            if (TYPE_SIZE_UNIT (element))
1982
              TYPE_SIZE_UNIT (type)
1983
                = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
1984
          }
1985
 
1986
        /* Now round the alignment and size,
1987
           using machine-dependent criteria if any.  */
1988
 
1989
#ifdef ROUND_TYPE_ALIGN
1990
        TYPE_ALIGN (type)
1991
          = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
1992
#else
1993
        TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
1994
#endif
1995
        TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
1996
        SET_TYPE_MODE (type, BLKmode);
1997
        if (TYPE_SIZE (type) != 0
1998
#ifdef MEMBER_TYPE_FORCES_BLK
1999
            && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
2000
#endif
2001
            /* BLKmode elements force BLKmode aggregate;
2002
               else extract/store fields may lose.  */
2003
            && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2004
                || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2005
          {
2006
            SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2007
                                                 TYPE_SIZE (type)));
2008
            if (TYPE_MODE (type) != BLKmode
2009
                && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2010
                && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2011
              {
2012
                TYPE_NO_FORCE_BLK (type) = 1;
2013
                SET_TYPE_MODE (type, BLKmode);
2014
              }
2015
          }
2016
        /* When the element size is constant, check that it is at least as
2017
           large as the element alignment.  */
2018
        if (TYPE_SIZE_UNIT (element)
2019
            && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2020
            /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2021
               TYPE_ALIGN_UNIT.  */
2022
            && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2023
            && !integer_zerop (TYPE_SIZE_UNIT (element))
2024
            && compare_tree_int (TYPE_SIZE_UNIT (element),
2025
                                 TYPE_ALIGN_UNIT (element)) < 0)
2026
          error ("alignment of array elements is greater than element size");
2027
        break;
2028
      }
2029
 
2030
    case RECORD_TYPE:
2031
    case UNION_TYPE:
2032
    case QUAL_UNION_TYPE:
2033
      {
2034
        tree field;
2035
        record_layout_info rli;
2036
 
2037
        /* Initialize the layout information.  */
2038
        rli = start_record_layout (type);
2039
 
2040
        /* If this is a QUAL_UNION_TYPE, we want to process the fields
2041
           in the reverse order in building the COND_EXPR that denotes
2042
           its size.  We reverse them again later.  */
2043
        if (TREE_CODE (type) == QUAL_UNION_TYPE)
2044
          TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2045
 
2046
        /* Place all the fields.  */
2047
        for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2048
          place_field (rli, field);
2049
 
2050
        if (TREE_CODE (type) == QUAL_UNION_TYPE)
2051
          TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2052
 
2053
        /* Finish laying out the record.  */
2054
        finish_record_layout (rli, /*free_p=*/true);
2055
      }
2056
      break;
2057
 
2058
    default:
2059
      gcc_unreachable ();
2060
    }
2061
 
2062
  /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
2063
     records and unions, finish_record_layout already called this
2064
     function.  */
2065
  if (TREE_CODE (type) != RECORD_TYPE
2066
      && TREE_CODE (type) != UNION_TYPE
2067
      && TREE_CODE (type) != QUAL_UNION_TYPE)
2068
    finalize_type_size (type);
2069
 
2070
  /* We should never see alias sets on incomplete aggregates.  And we
2071
     should not call layout_type on not incomplete aggregates.  */
2072
  if (AGGREGATE_TYPE_P (type))
2073
    gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2074
}
2075
 
2076
/* Vector types need to re-check the target flags each time we report
2077
   the machine mode.  We need to do this because attribute target can
2078
   change the result of vector_mode_supported_p and have_regs_of_mode
2079
   on a per-function basis.  Thus the TYPE_MODE of a VECTOR_TYPE can
2080
   change on a per-function basis.  */
2081
/* ??? Possibly a better solution is to run through all the types
2082
   referenced by a function and re-compute the TYPE_MODE once, rather
2083
   than make the TYPE_MODE macro call a function.  */
2084
 
2085
enum machine_mode
2086
vector_type_mode (const_tree t)
2087
{
2088
  enum machine_mode mode;
2089
 
2090
  gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2091
 
2092
  mode = t->type_common.mode;
2093
  if (VECTOR_MODE_P (mode)
2094
      && (!targetm.vector_mode_supported_p (mode)
2095
          || !have_regs_of_mode[mode]))
2096
    {
2097
      enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
2098
 
2099
      /* For integers, try mapping it to a same-sized scalar mode.  */
2100
      if (GET_MODE_CLASS (innermode) == MODE_INT)
2101
        {
2102
          mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2103
                                * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2104
 
2105
          if (mode != VOIDmode && have_regs_of_mode[mode])
2106
            return mode;
2107
        }
2108
 
2109
      return BLKmode;
2110
    }
2111
 
2112
  return mode;
2113
}
2114
 
2115
/* Create and return a type for signed integers of PRECISION bits.  */
2116
 
2117
tree
2118
make_signed_type (int precision)
2119
{
2120
  tree type = make_node (INTEGER_TYPE);
2121
 
2122
  TYPE_PRECISION (type) = precision;
2123
 
2124
  fixup_signed_type (type);
2125
  return type;
2126
}
2127
 
2128
/* Create and return a type for unsigned integers of PRECISION bits.  */
2129
 
2130
tree
2131
make_unsigned_type (int precision)
2132
{
2133
  tree type = make_node (INTEGER_TYPE);
2134
 
2135
  TYPE_PRECISION (type) = precision;
2136
 
2137
  fixup_unsigned_type (type);
2138
  return type;
2139
}
2140
 
2141
/* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2142
   and SATP.  */
2143
 
2144
tree
2145
make_fract_type (int precision, int unsignedp, int satp)
2146
{
2147
  tree type = make_node (FIXED_POINT_TYPE);
2148
 
2149
  TYPE_PRECISION (type) = precision;
2150
 
2151
  if (satp)
2152
    TYPE_SATURATING (type) = 1;
2153
 
2154
  /* Lay out the type: set its alignment, size, etc.  */
2155
  if (unsignedp)
2156
    {
2157
      TYPE_UNSIGNED (type) = 1;
2158
      SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2159
    }
2160
  else
2161
    SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2162
  layout_type (type);
2163
 
2164
  return type;
2165
}
2166
 
2167
/* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2168
   and SATP.  */
2169
 
2170
tree
2171
make_accum_type (int precision, int unsignedp, int satp)
2172
{
2173
  tree type = make_node (FIXED_POINT_TYPE);
2174
 
2175
  TYPE_PRECISION (type) = precision;
2176
 
2177
  if (satp)
2178
    TYPE_SATURATING (type) = 1;
2179
 
2180
  /* Lay out the type: set its alignment, size, etc.  */
2181
  if (unsignedp)
2182
    {
2183
      TYPE_UNSIGNED (type) = 1;
2184
      SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2185
    }
2186
  else
2187
    SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2188
  layout_type (type);
2189
 
2190
  return type;
2191
}
2192
 
2193
/* Initialize sizetypes so layout_type can use them.  */
2194
 
2195
void
2196
initialize_sizetypes (void)
2197
{
2198
  int precision, bprecision;
2199
 
2200
  /* Get sizetypes precision from the SIZE_TYPE target macro.  */
2201
  if (strcmp (SIZE_TYPE, "unsigned int") == 0)
2202
    precision = INT_TYPE_SIZE;
2203
  else if (strcmp (SIZE_TYPE, "long unsigned int") == 0)
2204
    precision = LONG_TYPE_SIZE;
2205
  else if (strcmp (SIZE_TYPE, "long long unsigned int") == 0)
2206
    precision = LONG_LONG_TYPE_SIZE;
2207
  else if (strcmp (SIZE_TYPE, "short unsigned int") == 0)
2208
    precision = SHORT_TYPE_SIZE;
2209
  else
2210
    gcc_unreachable ();
2211
 
2212
  bprecision
2213
    = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2214
  bprecision
2215
    = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
2216
  if (bprecision > HOST_BITS_PER_WIDE_INT * 2)
2217
    bprecision = HOST_BITS_PER_WIDE_INT * 2;
2218
 
2219
  /* Create stubs for sizetype and bitsizetype so we can create constants.  */
2220
  sizetype = make_node (INTEGER_TYPE);
2221
  TYPE_NAME (sizetype) = get_identifier ("sizetype");
2222
  TYPE_PRECISION (sizetype) = precision;
2223
  TYPE_UNSIGNED (sizetype) = 1;
2224
  TYPE_IS_SIZETYPE (sizetype) = 1;
2225
  bitsizetype = make_node (INTEGER_TYPE);
2226
  TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2227
  TYPE_PRECISION (bitsizetype) = bprecision;
2228
  TYPE_UNSIGNED (bitsizetype) = 1;
2229
  TYPE_IS_SIZETYPE (bitsizetype) = 1;
2230
 
2231
  /* Now layout both types manually.  */
2232
  SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
2233
  TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
2234
  TYPE_SIZE (sizetype) = bitsize_int (precision);
2235
  TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
2236
  set_min_and_max_values_for_integral_type (sizetype, precision,
2237
                                            /*is_unsigned=*/true);
2238
  /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2239
     sign-extended in a way consistent with force_fit_type.  */
2240
  TYPE_MAX_VALUE (sizetype)
2241
    = double_int_to_tree (sizetype,
2242
                          tree_to_double_int (TYPE_MAX_VALUE (sizetype)));
2243
 
2244
  SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
2245
  TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
2246
  TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2247
  TYPE_SIZE_UNIT (bitsizetype)
2248
    = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
2249
  set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
2250
                                            /*is_unsigned=*/true);
2251
  /* bitsizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2252
     sign-extended in a way consistent with force_fit_type.  */
2253
  TYPE_MAX_VALUE (bitsizetype)
2254
    = double_int_to_tree (bitsizetype,
2255
                          tree_to_double_int (TYPE_MAX_VALUE (bitsizetype)));
2256
 
2257
  /* Create the signed variants of *sizetype.  */
2258
  ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2259
  TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2260
  TYPE_IS_SIZETYPE (ssizetype) = 1;
2261
  sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2262
  TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2263
  TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2264
}
2265
 
2266
/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2267
   or BOOLEAN_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2268
   for TYPE, based on the PRECISION and whether or not the TYPE
2269
   IS_UNSIGNED.  PRECISION need not correspond to a width supported
2270
   natively by the hardware; for example, on a machine with 8-bit,
2271
   16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2272
   61.  */
2273
 
2274
void
2275
set_min_and_max_values_for_integral_type (tree type,
2276
                                          int precision,
2277
                                          bool is_unsigned)
2278
{
2279
  tree min_value;
2280
  tree max_value;
2281
 
2282
  if (is_unsigned)
2283
    {
2284
      min_value = build_int_cst (type, 0);
2285
      max_value
2286
        = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2287
                              ? -1
2288
                              : ((HOST_WIDE_INT) 1 << precision) - 1,
2289
                              precision - HOST_BITS_PER_WIDE_INT > 0
2290
                              ? ((unsigned HOST_WIDE_INT) ~0
2291
                                 >> (HOST_BITS_PER_WIDE_INT
2292
                                     - (precision - HOST_BITS_PER_WIDE_INT)))
2293
                              : 0);
2294
    }
2295
  else
2296
    {
2297
      min_value
2298
        = build_int_cst_wide (type,
2299
                              (precision - HOST_BITS_PER_WIDE_INT > 0
2300
                               ? 0
2301
                               : (HOST_WIDE_INT) (-1) << (precision - 1)),
2302
                              (((HOST_WIDE_INT) (-1)
2303
                                << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2304
                                    ? precision - HOST_BITS_PER_WIDE_INT - 1
2305
                                    : 0))));
2306
      max_value
2307
        = build_int_cst_wide (type,
2308
                              (precision - HOST_BITS_PER_WIDE_INT > 0
2309
                               ? -1
2310
                               : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2311
                              (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2312
                               ? (((HOST_WIDE_INT) 1
2313
                                   << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2314
                               : 0));
2315
    }
2316
 
2317
  TYPE_MIN_VALUE (type) = min_value;
2318
  TYPE_MAX_VALUE (type) = max_value;
2319
}
2320
 
2321
/* Set the extreme values of TYPE based on its precision in bits,
2322
   then lay it out.  Used when make_signed_type won't do
2323
   because the tree code is not INTEGER_TYPE.
2324
   E.g. for Pascal, when the -fsigned-char option is given.  */
2325
 
2326
void
2327
fixup_signed_type (tree type)
2328
{
2329
  int precision = TYPE_PRECISION (type);
2330
 
2331
  /* We can not represent properly constants greater then
2332
     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2333
     as they are used by i386 vector extensions and friends.  */
2334
  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2335
    precision = HOST_BITS_PER_WIDE_INT * 2;
2336
 
2337
  set_min_and_max_values_for_integral_type (type, precision,
2338
                                            /*is_unsigned=*/false);
2339
 
2340
  /* Lay out the type: set its alignment, size, etc.  */
2341
  layout_type (type);
2342
}
2343
 
2344
/* Set the extreme values of TYPE based on its precision in bits,
2345
   then lay it out.  This is used both in `make_unsigned_type'
2346
   and for enumeral types.  */
2347
 
2348
void
2349
fixup_unsigned_type (tree type)
2350
{
2351
  int precision = TYPE_PRECISION (type);
2352
 
2353
  /* We can not represent properly constants greater then
2354
     2 * HOST_BITS_PER_WIDE_INT, still we need the types
2355
     as they are used by i386 vector extensions and friends.  */
2356
  if (precision > HOST_BITS_PER_WIDE_INT * 2)
2357
    precision = HOST_BITS_PER_WIDE_INT * 2;
2358
 
2359
  TYPE_UNSIGNED (type) = 1;
2360
 
2361
  set_min_and_max_values_for_integral_type (type, precision,
2362
                                            /*is_unsigned=*/true);
2363
 
2364
  /* Lay out the type: set its alignment, size, etc.  */
2365
  layout_type (type);
2366
}
2367
 
2368
/* Find the best machine mode to use when referencing a bit field of length
2369
   BITSIZE bits starting at BITPOS.
2370
 
2371
   BITREGION_START is the bit position of the first bit in this
2372
   sequence of bit fields.  BITREGION_END is the last bit in this
2373
   sequence.  If these two fields are non-zero, we should restrict the
2374
   memory access to a maximum sized chunk of
2375
   BITREGION_END - BITREGION_START + 1.  Otherwise, we are allowed to touch
2376
   any adjacent non bit-fields.
2377
 
2378
   The underlying object is known to be aligned to a boundary of ALIGN bits.
2379
   If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2380
   larger than LARGEST_MODE (usually SImode).
2381
 
2382
   If no mode meets all these conditions, we return VOIDmode.
2383
 
2384
   If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2385
   smallest mode meeting these conditions.
2386
 
2387
   If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2388
   largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2389
   all the conditions.
2390
 
2391
   If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2392
   decide which of the above modes should be used.  */
2393
 
2394
enum machine_mode
2395
get_best_mode (int bitsize, int bitpos,
2396
               unsigned HOST_WIDE_INT bitregion_start,
2397
               unsigned HOST_WIDE_INT bitregion_end,
2398
               unsigned int align,
2399
               enum machine_mode largest_mode, int volatilep)
2400
{
2401
  enum machine_mode mode;
2402
  unsigned int unit = 0;
2403
  unsigned HOST_WIDE_INT maxbits;
2404
 
2405
  /* If unset, no restriction.  */
2406
  if (!bitregion_end)
2407
    maxbits = MAX_FIXED_MODE_SIZE;
2408
  else
2409
    maxbits = (bitregion_end - bitregion_start) % align + 1;
2410
 
2411
  /* Find the narrowest integer mode that contains the bit field.  */
2412
  for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2413
       mode = GET_MODE_WIDER_MODE (mode))
2414
    {
2415
      unit = GET_MODE_BITSIZE (mode);
2416
      if (unit == GET_MODE_PRECISION (mode)
2417
          && (bitpos % unit) + bitsize <= unit)
2418
        break;
2419
    }
2420
 
2421
  if (mode == VOIDmode
2422
      /* It is tempting to omit the following line
2423
         if STRICT_ALIGNMENT is true.
2424
         But that is incorrect, since if the bitfield uses part of 3 bytes
2425
         and we use a 4-byte mode, we could get a spurious segv
2426
         if the extra 4th byte is past the end of memory.
2427
         (Though at least one Unix compiler ignores this problem:
2428
         that on the Sequent 386 machine.  */
2429
      || MIN (unit, BIGGEST_ALIGNMENT) > align
2430
      || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2431
    return VOIDmode;
2432
 
2433
  if ((SLOW_BYTE_ACCESS && ! volatilep)
2434
      || (volatilep && !targetm.narrow_volatile_bitfield ()))
2435
    {
2436
      enum machine_mode wide_mode = VOIDmode, tmode;
2437
 
2438
      for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2439
           tmode = GET_MODE_WIDER_MODE (tmode))
2440
        {
2441
          unit = GET_MODE_BITSIZE (tmode);
2442
          if (unit == GET_MODE_PRECISION (tmode)
2443
              && bitpos / unit == (bitpos + bitsize - 1) / unit
2444
              && unit <= BITS_PER_WORD
2445
              && unit <= MIN (align, BIGGEST_ALIGNMENT)
2446
              && unit <= maxbits
2447
              && (largest_mode == VOIDmode
2448
                  || unit <= GET_MODE_BITSIZE (largest_mode)))
2449
            wide_mode = tmode;
2450
        }
2451
 
2452
      if (wide_mode != VOIDmode)
2453
        return wide_mode;
2454
    }
2455
 
2456
  return mode;
2457
}
2458
 
2459
/* Gets minimal and maximal values for MODE (signed or unsigned depending on
2460
   SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
2461
 
2462
void
2463
get_mode_bounds (enum machine_mode mode, int sign,
2464
                 enum machine_mode target_mode,
2465
                 rtx *mmin, rtx *mmax)
2466
{
2467
  unsigned size = GET_MODE_BITSIZE (mode);
2468
  unsigned HOST_WIDE_INT min_val, max_val;
2469
 
2470
  gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2471
 
2472
  if (sign)
2473
    {
2474
      min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2475
      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2476
    }
2477
  else
2478
    {
2479
      min_val = 0;
2480
      max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2481
    }
2482
 
2483
  *mmin = gen_int_mode (min_val, target_mode);
2484
  *mmax = gen_int_mode (max_val, target_mode);
2485
}
2486
 
2487
#include "gt-stor-layout.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.