OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [dwarf2cfi.c] - Blame information for rev 702

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Dwarf2 Call Frame Information helper routines.
2
   Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3
   2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "version.h"
27
#include "flags.h"
28
#include "rtl.h"
29
#include "function.h"
30
#include "basic-block.h"
31
#include "dwarf2.h"
32
#include "dwarf2out.h"
33
#include "dwarf2asm.h"
34
#include "ggc.h"
35
#include "tm_p.h"
36
#include "target.h"
37
#include "common/common-target.h"
38
#include "tree-pass.h"
39
 
40
#include "except.h"             /* expand_builtin_dwarf_sp_column */
41
#include "expr.h"               /* init_return_column_size */
42
#include "regs.h"               /* expand_builtin_init_dwarf_reg_sizes */
43
#include "output.h"             /* asm_out_file */
44
#include "debug.h"              /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
45
 
46
 
47
/* ??? Poison these here until it can be done generically.  They've been
48
   totally replaced in this file; make sure it stays that way.  */
49
#undef DWARF2_UNWIND_INFO
50
#undef DWARF2_FRAME_INFO
51
#if (GCC_VERSION >= 3000)
52
 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
53
#endif
54
 
55
#ifndef INCOMING_RETURN_ADDR_RTX
56
#define INCOMING_RETURN_ADDR_RTX  (gcc_unreachable (), NULL_RTX)
57
#endif
58
 
59
/* Maximum size (in bytes) of an artificially generated label.  */
60
#define MAX_ARTIFICIAL_LABEL_BYTES      30
61
 
62
/* A collected description of an entire row of the abstract CFI table.  */
63
typedef struct GTY(()) dw_cfi_row_struct
64
{
65
  /* The expression that computes the CFA, expressed in two different ways.
66
     The CFA member for the simple cases, and the full CFI expression for
67
     the complex cases.  The later will be a DW_CFA_cfa_expression.  */
68
  dw_cfa_location cfa;
69
  dw_cfi_ref cfa_cfi;
70
 
71
  /* The expressions for any register column that is saved.  */
72
  cfi_vec reg_save;
73
} dw_cfi_row;
74
 
75
/* The caller's ORIG_REG is saved in SAVED_IN_REG.  */
76
typedef struct GTY(()) reg_saved_in_data_struct {
77
  rtx orig_reg;
78
  rtx saved_in_reg;
79
} reg_saved_in_data;
80
 
81
DEF_VEC_O (reg_saved_in_data);
82
DEF_VEC_ALLOC_O (reg_saved_in_data, heap);
83
 
84
/* Since we no longer have a proper CFG, we're going to create a facsimile
85
   of one on the fly while processing the frame-related insns.
86
 
87
   We create dw_trace_info structures for each extended basic block beginning
88
   and ending at a "save point".  Save points are labels, barriers, certain
89
   notes, and of course the beginning and end of the function.
90
 
91
   As we encounter control transfer insns, we propagate the "current"
92
   row state across the edges to the starts of traces.  When checking is
93
   enabled, we validate that we propagate the same data from all sources.
94
 
95
   All traces are members of the TRACE_INFO array, in the order in which
96
   they appear in the instruction stream.
97
 
98
   All save points are present in the TRACE_INDEX hash, mapping the insn
99
   starting a trace to the dw_trace_info describing the trace.  */
100
 
101
typedef struct
102
{
103
  /* The insn that begins the trace.  */
104
  rtx head;
105
 
106
  /* The row state at the beginning and end of the trace.  */
107
  dw_cfi_row *beg_row, *end_row;
108
 
109
  /* Tracking for DW_CFA_GNU_args_size.  The "true" sizes are those we find
110
     while scanning insns.  However, the args_size value is irrelevant at
111
     any point except can_throw_internal_p insns.  Therefore the "delay"
112
     sizes the values that must actually be emitted for this trace.  */
113
  HOST_WIDE_INT beg_true_args_size, end_true_args_size;
114
  HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
115
 
116
  /* The first EH insn in the trace, where beg_delay_args_size must be set.  */
117
  rtx eh_head;
118
 
119
  /* The following variables contain data used in interpreting frame related
120
     expressions.  These are not part of the "real" row state as defined by
121
     Dwarf, but it seems like they need to be propagated into a trace in case
122
     frame related expressions have been sunk.  */
123
  /* ??? This seems fragile.  These variables are fragments of a larger
124
     expression.  If we do not keep the entire expression together, we risk
125
     not being able to put it together properly.  Consider forcing targets
126
     to generate self-contained expressions and dropping all of the magic
127
     interpretation code in this file.  Or at least refusing to shrink wrap
128
     any frame related insn that doesn't contain a complete expression.  */
129
 
130
  /* The register used for saving registers to the stack, and its offset
131
     from the CFA.  */
132
  dw_cfa_location cfa_store;
133
 
134
  /* A temporary register holding an integral value used in adjusting SP
135
     or setting up the store_reg.  The "offset" field holds the integer
136
     value, not an offset.  */
137
  dw_cfa_location cfa_temp;
138
 
139
  /* A set of registers saved in other registers.  This is the inverse of
140
     the row->reg_save info, if the entry is a DW_CFA_register.  This is
141
     implemented as a flat array because it normally contains zero or 1
142
     entry, depending on the target.  IA-64 is the big spender here, using
143
     a maximum of 5 entries.  */
144
  VEC(reg_saved_in_data, heap) *regs_saved_in_regs;
145
 
146
  /* An identifier for this trace.  Used only for debugging dumps.  */
147
  unsigned id;
148
 
149
  /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS.  */
150
  bool switch_sections;
151
 
152
  /* True if we've seen different values incoming to beg_true_args_size.  */
153
  bool args_size_undefined;
154
} dw_trace_info;
155
 
156
DEF_VEC_O (dw_trace_info);
157
DEF_VEC_ALLOC_O (dw_trace_info, heap);
158
 
159
typedef dw_trace_info *dw_trace_info_ref;
160
 
161
DEF_VEC_P (dw_trace_info_ref);
162
DEF_VEC_ALLOC_P (dw_trace_info_ref, heap);
163
 
164
/* The variables making up the pseudo-cfg, as described above.  */
165
static VEC (dw_trace_info, heap) *trace_info;
166
static VEC (dw_trace_info_ref, heap) *trace_work_list;
167
static htab_t trace_index;
168
 
169
/* A vector of call frame insns for the CIE.  */
170
cfi_vec cie_cfi_vec;
171
 
172
/* The state of the first row of the FDE table, which includes the
173
   state provided by the CIE.  */
174
static GTY(()) dw_cfi_row *cie_cfi_row;
175
 
176
static GTY(()) reg_saved_in_data *cie_return_save;
177
 
178
static GTY(()) unsigned long dwarf2out_cfi_label_num;
179
 
180
/* The insn after which a new CFI note should be emitted.  */
181
static rtx add_cfi_insn;
182
 
183
/* When non-null, add_cfi will add the CFI to this vector.  */
184
static cfi_vec *add_cfi_vec;
185
 
186
/* The current instruction trace.  */
187
static dw_trace_info *cur_trace;
188
 
189
/* The current, i.e. most recently generated, row of the CFI table.  */
190
static dw_cfi_row *cur_row;
191
 
192
/* A copy of the current CFA, for use during the processing of a
193
   single insn.  */
194
static dw_cfa_location *cur_cfa;
195
 
196
/* We delay emitting a register save until either (a) we reach the end
197
   of the prologue or (b) the register is clobbered.  This clusters
198
   register saves so that there are fewer pc advances.  */
199
 
200
typedef struct {
201
  rtx reg;
202
  rtx saved_reg;
203
  HOST_WIDE_INT cfa_offset;
204
} queued_reg_save;
205
 
206
DEF_VEC_O (queued_reg_save);
207
DEF_VEC_ALLOC_O (queued_reg_save, heap);
208
 
209
static VEC(queued_reg_save, heap) *queued_reg_saves;
210
 
211
/* True if any CFI directives were emitted at the current insn.  */
212
static bool any_cfis_emitted;
213
 
214
/* Short-hand for commonly used register numbers.  */
215
static unsigned dw_stack_pointer_regnum;
216
static unsigned dw_frame_pointer_regnum;
217
 
218
/* Hook used by __throw.  */
219
 
220
rtx
221
expand_builtin_dwarf_sp_column (void)
222
{
223
  unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
224
  return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
225
}
226
 
227
/* MEM is a memory reference for the register size table, each element of
228
   which has mode MODE.  Initialize column C as a return address column.  */
229
 
230
static void
231
init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
232
{
233
  HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
234
  HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
235
  emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
236
}
237
 
238
/* Generate code to initialize the register size table.  */
239
 
240
void
241
expand_builtin_init_dwarf_reg_sizes (tree address)
242
{
243
  unsigned int i;
244
  enum machine_mode mode = TYPE_MODE (char_type_node);
245
  rtx addr = expand_normal (address);
246
  rtx mem = gen_rtx_MEM (BLKmode, addr);
247
  bool wrote_return_column = false;
248
 
249
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
250
    {
251
      unsigned int dnum = DWARF_FRAME_REGNUM (i);
252
      unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
253
 
254
      if (rnum < DWARF_FRAME_REGISTERS)
255
        {
256
          HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
257
          enum machine_mode save_mode = reg_raw_mode[i];
258
          HOST_WIDE_INT size;
259
 
260
          if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
261
            save_mode = choose_hard_reg_mode (i, 1, true);
262
          if (dnum == DWARF_FRAME_RETURN_COLUMN)
263
            {
264
              if (save_mode == VOIDmode)
265
                continue;
266
              wrote_return_column = true;
267
            }
268
          size = GET_MODE_SIZE (save_mode);
269
          if (offset < 0)
270
            continue;
271
 
272
          emit_move_insn (adjust_address (mem, mode, offset),
273
                          gen_int_mode (size, mode));
274
        }
275
    }
276
 
277
  if (!wrote_return_column)
278
    init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
279
 
280
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
281
  init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
282
#endif
283
 
284
  targetm.init_dwarf_reg_sizes_extra (address);
285
}
286
 
287
 
288
static hashval_t
289
dw_trace_info_hash (const void *ptr)
290
{
291
  const dw_trace_info *ti = (const dw_trace_info *) ptr;
292
  return INSN_UID (ti->head);
293
}
294
 
295
static int
296
dw_trace_info_eq (const void *ptr_a, const void *ptr_b)
297
{
298
  const dw_trace_info *a = (const dw_trace_info *) ptr_a;
299
  const dw_trace_info *b = (const dw_trace_info *) ptr_b;
300
  return a->head == b->head;
301
}
302
 
303
static dw_trace_info *
304
get_trace_info (rtx insn)
305
{
306
  dw_trace_info dummy;
307
  dummy.head = insn;
308
  return (dw_trace_info *)
309
    htab_find_with_hash (trace_index, &dummy, INSN_UID (insn));
310
}
311
 
312
static bool
313
save_point_p (rtx insn)
314
{
315
  /* Labels, except those that are really jump tables.  */
316
  if (LABEL_P (insn))
317
    return inside_basic_block_p (insn);
318
 
319
  /* We split traces at the prologue/epilogue notes because those
320
     are points at which the unwind info is usually stable.  This
321
     makes it easier to find spots with identical unwind info so
322
     that we can use remember/restore_state opcodes.  */
323
  if (NOTE_P (insn))
324
    switch (NOTE_KIND (insn))
325
      {
326
      case NOTE_INSN_PROLOGUE_END:
327
      case NOTE_INSN_EPILOGUE_BEG:
328
        return true;
329
      }
330
 
331
  return false;
332
}
333
 
334
/* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder.  */
335
 
336
static inline HOST_WIDE_INT
337
div_data_align (HOST_WIDE_INT off)
338
{
339
  HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
340
  gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
341
  return r;
342
}
343
 
344
/* Return true if we need a signed version of a given opcode
345
   (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended).  */
346
 
347
static inline bool
348
need_data_align_sf_opcode (HOST_WIDE_INT off)
349
{
350
  return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
351
}
352
 
353
/* Return a pointer to a newly allocated Call Frame Instruction.  */
354
 
355
static inline dw_cfi_ref
356
new_cfi (void)
357
{
358
  dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
359
 
360
  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
361
  cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
362
 
363
  return cfi;
364
}
365
 
366
/* Return a newly allocated CFI row, with no defined data.  */
367
 
368
static dw_cfi_row *
369
new_cfi_row (void)
370
{
371
  dw_cfi_row *row = ggc_alloc_cleared_dw_cfi_row ();
372
 
373
  row->cfa.reg = INVALID_REGNUM;
374
 
375
  return row;
376
}
377
 
378
/* Return a copy of an existing CFI row.  */
379
 
380
static dw_cfi_row *
381
copy_cfi_row (dw_cfi_row *src)
382
{
383
  dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
384
 
385
  *dst = *src;
386
  dst->reg_save = VEC_copy (dw_cfi_ref, gc, src->reg_save);
387
 
388
  return dst;
389
}
390
 
391
/* Generate a new label for the CFI info to refer to.  */
392
 
393
static char *
394
dwarf2out_cfi_label (void)
395
{
396
  int num = dwarf2out_cfi_label_num++;
397
  char label[20];
398
 
399
  ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
400
 
401
  return xstrdup (label);
402
}
403
 
404
/* Add CFI either to the current insn stream or to a vector, or both.  */
405
 
406
static void
407
add_cfi (dw_cfi_ref cfi)
408
{
409
  any_cfis_emitted = true;
410
 
411
  if (add_cfi_insn != NULL)
412
    {
413
      add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
414
      NOTE_CFI (add_cfi_insn) = cfi;
415
    }
416
 
417
  if (add_cfi_vec != NULL)
418
    VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
419
}
420
 
421
static void
422
add_cfi_args_size (HOST_WIDE_INT size)
423
{
424
  dw_cfi_ref cfi = new_cfi ();
425
 
426
  /* While we can occasionally have args_size < 0 internally, this state
427
     should not persist at a point we actually need an opcode.  */
428
  gcc_assert (size >= 0);
429
 
430
  cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
431
  cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
432
 
433
  add_cfi (cfi);
434
}
435
 
436
static void
437
add_cfi_restore (unsigned reg)
438
{
439
  dw_cfi_ref cfi = new_cfi ();
440
 
441
  cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
442
  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
443
 
444
  add_cfi (cfi);
445
}
446
 
447
/* Perform ROW->REG_SAVE[COLUMN] = CFI.  CFI may be null, indicating
448
   that the register column is no longer saved.  */
449
 
450
static void
451
update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
452
{
453
  if (VEC_length (dw_cfi_ref, row->reg_save) <= column)
454
    VEC_safe_grow_cleared (dw_cfi_ref, gc, row->reg_save, column + 1);
455
  VEC_replace (dw_cfi_ref, row->reg_save, column, cfi);
456
}
457
 
458
/* This function fills in aa dw_cfa_location structure from a dwarf location
459
   descriptor sequence.  */
460
 
461
static void
462
get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
463
{
464
  struct dw_loc_descr_struct *ptr;
465
  cfa->offset = 0;
466
  cfa->base_offset = 0;
467
  cfa->indirect = 0;
468
  cfa->reg = -1;
469
 
470
  for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
471
    {
472
      enum dwarf_location_atom op = ptr->dw_loc_opc;
473
 
474
      switch (op)
475
        {
476
        case DW_OP_reg0:
477
        case DW_OP_reg1:
478
        case DW_OP_reg2:
479
        case DW_OP_reg3:
480
        case DW_OP_reg4:
481
        case DW_OP_reg5:
482
        case DW_OP_reg6:
483
        case DW_OP_reg7:
484
        case DW_OP_reg8:
485
        case DW_OP_reg9:
486
        case DW_OP_reg10:
487
        case DW_OP_reg11:
488
        case DW_OP_reg12:
489
        case DW_OP_reg13:
490
        case DW_OP_reg14:
491
        case DW_OP_reg15:
492
        case DW_OP_reg16:
493
        case DW_OP_reg17:
494
        case DW_OP_reg18:
495
        case DW_OP_reg19:
496
        case DW_OP_reg20:
497
        case DW_OP_reg21:
498
        case DW_OP_reg22:
499
        case DW_OP_reg23:
500
        case DW_OP_reg24:
501
        case DW_OP_reg25:
502
        case DW_OP_reg26:
503
        case DW_OP_reg27:
504
        case DW_OP_reg28:
505
        case DW_OP_reg29:
506
        case DW_OP_reg30:
507
        case DW_OP_reg31:
508
          cfa->reg = op - DW_OP_reg0;
509
          break;
510
        case DW_OP_regx:
511
          cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
512
          break;
513
        case DW_OP_breg0:
514
        case DW_OP_breg1:
515
        case DW_OP_breg2:
516
        case DW_OP_breg3:
517
        case DW_OP_breg4:
518
        case DW_OP_breg5:
519
        case DW_OP_breg6:
520
        case DW_OP_breg7:
521
        case DW_OP_breg8:
522
        case DW_OP_breg9:
523
        case DW_OP_breg10:
524
        case DW_OP_breg11:
525
        case DW_OP_breg12:
526
        case DW_OP_breg13:
527
        case DW_OP_breg14:
528
        case DW_OP_breg15:
529
        case DW_OP_breg16:
530
        case DW_OP_breg17:
531
        case DW_OP_breg18:
532
        case DW_OP_breg19:
533
        case DW_OP_breg20:
534
        case DW_OP_breg21:
535
        case DW_OP_breg22:
536
        case DW_OP_breg23:
537
        case DW_OP_breg24:
538
        case DW_OP_breg25:
539
        case DW_OP_breg26:
540
        case DW_OP_breg27:
541
        case DW_OP_breg28:
542
        case DW_OP_breg29:
543
        case DW_OP_breg30:
544
        case DW_OP_breg31:
545
          cfa->reg = op - DW_OP_breg0;
546
          cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
547
          break;
548
        case DW_OP_bregx:
549
          cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
550
          cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
551
          break;
552
        case DW_OP_deref:
553
          cfa->indirect = 1;
554
          break;
555
        case DW_OP_plus_uconst:
556
          cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
557
          break;
558
        default:
559
          gcc_unreachable ();
560
        }
561
    }
562
}
563
 
564
/* Find the previous value for the CFA, iteratively.  CFI is the opcode
565
   to interpret, *LOC will be updated as necessary, *REMEMBER is used for
566
   one level of remember/restore state processing.  */
567
 
568
void
569
lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
570
{
571
  switch (cfi->dw_cfi_opc)
572
    {
573
    case DW_CFA_def_cfa_offset:
574
    case DW_CFA_def_cfa_offset_sf:
575
      loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
576
      break;
577
    case DW_CFA_def_cfa_register:
578
      loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
579
      break;
580
    case DW_CFA_def_cfa:
581
    case DW_CFA_def_cfa_sf:
582
      loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
583
      loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
584
      break;
585
    case DW_CFA_def_cfa_expression:
586
      get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
587
      break;
588
 
589
    case DW_CFA_remember_state:
590
      gcc_assert (!remember->in_use);
591
      *remember = *loc;
592
      remember->in_use = 1;
593
      break;
594
    case DW_CFA_restore_state:
595
      gcc_assert (remember->in_use);
596
      *loc = *remember;
597
      remember->in_use = 0;
598
      break;
599
 
600
    default:
601
      break;
602
    }
603
}
604
 
605
/* Determine if two dw_cfa_location structures define the same data.  */
606
 
607
bool
608
cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
609
{
610
  return (loc1->reg == loc2->reg
611
          && loc1->offset == loc2->offset
612
          && loc1->indirect == loc2->indirect
613
          && (loc1->indirect == 0
614
              || loc1->base_offset == loc2->base_offset));
615
}
616
 
617
/* Determine if two CFI operands are identical.  */
618
 
619
static bool
620
cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
621
{
622
  switch (t)
623
    {
624
    case dw_cfi_oprnd_unused:
625
      return true;
626
    case dw_cfi_oprnd_reg_num:
627
      return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
628
    case dw_cfi_oprnd_offset:
629
      return a->dw_cfi_offset == b->dw_cfi_offset;
630
    case dw_cfi_oprnd_addr:
631
      return (a->dw_cfi_addr == b->dw_cfi_addr
632
              || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
633
    case dw_cfi_oprnd_loc:
634
      return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
635
    }
636
  gcc_unreachable ();
637
}
638
 
639
/* Determine if two CFI entries are identical.  */
640
 
641
static bool
642
cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
643
{
644
  enum dwarf_call_frame_info opc;
645
 
646
  /* Make things easier for our callers, including missing operands.  */
647
  if (a == b)
648
    return true;
649
  if (a == NULL || b == NULL)
650
    return false;
651
 
652
  /* Obviously, the opcodes must match.  */
653
  opc = a->dw_cfi_opc;
654
  if (opc != b->dw_cfi_opc)
655
    return false;
656
 
657
  /* Compare the two operands, re-using the type of the operands as
658
     already exposed elsewhere.  */
659
  return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
660
                             &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
661
          && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
662
                                &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
663
}
664
 
665
/* Determine if two CFI_ROW structures are identical.  */
666
 
667
static bool
668
cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
669
{
670
  size_t i, n_a, n_b, n_max;
671
 
672
  if (a->cfa_cfi)
673
    {
674
      if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
675
        return false;
676
    }
677
  else if (!cfa_equal_p (&a->cfa, &b->cfa))
678
    return false;
679
 
680
  n_a = VEC_length (dw_cfi_ref, a->reg_save);
681
  n_b = VEC_length (dw_cfi_ref, b->reg_save);
682
  n_max = MAX (n_a, n_b);
683
 
684
  for (i = 0; i < n_max; ++i)
685
    {
686
      dw_cfi_ref r_a = NULL, r_b = NULL;
687
 
688
      if (i < n_a)
689
        r_a = VEC_index (dw_cfi_ref, a->reg_save, i);
690
      if (i < n_b)
691
        r_b = VEC_index (dw_cfi_ref, b->reg_save, i);
692
 
693
      if (!cfi_equal_p (r_a, r_b))
694
        return false;
695
    }
696
 
697
  return true;
698
}
699
 
700
/* The CFA is now calculated from NEW_CFA.  Consider OLD_CFA in determining
701
   what opcode to emit.  Returns the CFI opcode to effect the change, or
702
   NULL if NEW_CFA == OLD_CFA.  */
703
 
704
static dw_cfi_ref
705
def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
706
{
707
  dw_cfi_ref cfi;
708
 
709
  /* If nothing changed, no need to issue any call frame instructions.  */
710
  if (cfa_equal_p (old_cfa, new_cfa))
711
    return NULL;
712
 
713
  cfi = new_cfi ();
714
 
715
  if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
716
    {
717
      /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
718
         the CFA register did not change but the offset did.  The data
719
         factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
720
         in the assembler via the .cfi_def_cfa_offset directive.  */
721
      if (new_cfa->offset < 0)
722
        cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
723
      else
724
        cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
725
      cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
726
    }
727
 
728
#ifndef MIPS_DEBUGGING_INFO  /* SGI dbx thinks this means no offset.  */
729
  else if (new_cfa->offset == old_cfa->offset
730
           && old_cfa->reg != INVALID_REGNUM
731
           && !new_cfa->indirect
732
           && !old_cfa->indirect)
733
    {
734
      /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
735
         indicating the CFA register has changed to <register> but the
736
         offset has not changed.  */
737
      cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
738
      cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
739
    }
740
#endif
741
 
742
  else if (new_cfa->indirect == 0)
743
    {
744
      /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
745
         indicating the CFA register has changed to <register> with
746
         the specified offset.  The data factoring for DW_CFA_def_cfa_sf
747
         happens in output_cfi, or in the assembler via the .cfi_def_cfa
748
         directive.  */
749
      if (new_cfa->offset < 0)
750
        cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
751
      else
752
        cfi->dw_cfi_opc = DW_CFA_def_cfa;
753
      cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
754
      cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
755
    }
756
  else
757
    {
758
      /* Construct a DW_CFA_def_cfa_expression instruction to
759
         calculate the CFA using a full location expression since no
760
         register-offset pair is available.  */
761
      struct dw_loc_descr_struct *loc_list;
762
 
763
      cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
764
      loc_list = build_cfa_loc (new_cfa, 0);
765
      cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
766
    }
767
 
768
  return cfi;
769
}
770
 
771
/* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact.  */
772
 
773
static void
774
def_cfa_1 (dw_cfa_location *new_cfa)
775
{
776
  dw_cfi_ref cfi;
777
 
778
  if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
779
    cur_trace->cfa_store.offset = new_cfa->offset;
780
 
781
  cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
782
  if (cfi)
783
    {
784
      cur_row->cfa = *new_cfa;
785
      cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
786
                          ? cfi : NULL);
787
 
788
      add_cfi (cfi);
789
    }
790
}
791
 
792
/* Add the CFI for saving a register.  REG is the CFA column number.
793
   If SREG is -1, the register is saved at OFFSET from the CFA;
794
   otherwise it is saved in SREG.  */
795
 
796
static void
797
reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
798
{
799
  dw_fde_ref fde = cfun ? cfun->fde : NULL;
800
  dw_cfi_ref cfi = new_cfi ();
801
 
802
  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
803
 
804
  /* When stack is aligned, store REG using DW_CFA_expression with FP.  */
805
  if (fde
806
      && fde->stack_realign
807
      && sreg == INVALID_REGNUM)
808
    {
809
      cfi->dw_cfi_opc = DW_CFA_expression;
810
      cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
811
      cfi->dw_cfi_oprnd2.dw_cfi_loc
812
        = build_cfa_aligned_loc (&cur_row->cfa, offset,
813
                                 fde->stack_realignment);
814
    }
815
  else if (sreg == INVALID_REGNUM)
816
    {
817
      if (need_data_align_sf_opcode (offset))
818
        cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
819
      else if (reg & ~0x3f)
820
        cfi->dw_cfi_opc = DW_CFA_offset_extended;
821
      else
822
        cfi->dw_cfi_opc = DW_CFA_offset;
823
      cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
824
    }
825
  else if (sreg == reg)
826
    {
827
      /* While we could emit something like DW_CFA_same_value or
828
         DW_CFA_restore, we never expect to see something like that
829
         in a prologue.  This is more likely to be a bug.  A backend
830
         can always bypass this by using REG_CFA_RESTORE directly.  */
831
      gcc_unreachable ();
832
    }
833
  else
834
    {
835
      cfi->dw_cfi_opc = DW_CFA_register;
836
      cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
837
    }
838
 
839
  add_cfi (cfi);
840
  update_row_reg_save (cur_row, reg, cfi);
841
}
842
 
843
/* A subroutine of scan_trace.  Check INSN for a REG_ARGS_SIZE note
844
   and adjust data structures to match.  */
845
 
846
static void
847
notice_args_size (rtx insn)
848
{
849
  HOST_WIDE_INT args_size, delta;
850
  rtx note;
851
 
852
  note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
853
  if (note == NULL)
854
    return;
855
 
856
  args_size = INTVAL (XEXP (note, 0));
857
  delta = args_size - cur_trace->end_true_args_size;
858
  if (delta == 0)
859
    return;
860
 
861
  cur_trace->end_true_args_size = args_size;
862
 
863
  /* If the CFA is computed off the stack pointer, then we must adjust
864
     the computation of the CFA as well.  */
865
  if (cur_cfa->reg == dw_stack_pointer_regnum)
866
    {
867
      gcc_assert (!cur_cfa->indirect);
868
 
869
      /* Convert a change in args_size (always a positive in the
870
         direction of stack growth) to a change in stack pointer.  */
871
#ifndef STACK_GROWS_DOWNWARD
872
      delta = -delta;
873
#endif
874
      cur_cfa->offset += delta;
875
    }
876
}
877
 
878
/* A subroutine of scan_trace.  INSN is can_throw_internal.  Update the
879
   data within the trace related to EH insns and args_size.  */
880
 
881
static void
882
notice_eh_throw (rtx insn)
883
{
884
  HOST_WIDE_INT args_size;
885
 
886
  args_size = cur_trace->end_true_args_size;
887
  if (cur_trace->eh_head == NULL)
888
    {
889
      cur_trace->eh_head = insn;
890
      cur_trace->beg_delay_args_size = args_size;
891
      cur_trace->end_delay_args_size = args_size;
892
    }
893
  else if (cur_trace->end_delay_args_size != args_size)
894
    {
895
      cur_trace->end_delay_args_size = args_size;
896
 
897
      /* ??? If the CFA is the stack pointer, search backward for the last
898
         CFI note and insert there.  Given that the stack changed for the
899
         args_size change, there *must* be such a note in between here and
900
         the last eh insn.  */
901
      add_cfi_args_size (args_size);
902
    }
903
}
904
 
905
/* Short-hand inline for the very common D_F_R (REGNO (x)) operation.  */
906
/* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
907
   used in places where rtl is prohibited.  */
908
 
909
static inline unsigned
910
dwf_regno (const_rtx reg)
911
{
912
  return DWARF_FRAME_REGNUM (REGNO (reg));
913
}
914
 
915
/* Compare X and Y for equivalence.  The inputs may be REGs or PC_RTX.  */
916
 
917
static bool
918
compare_reg_or_pc (rtx x, rtx y)
919
{
920
  if (REG_P (x) && REG_P (y))
921
    return REGNO (x) == REGNO (y);
922
  return x == y;
923
}
924
 
925
/* Record SRC as being saved in DEST.  DEST may be null to delete an
926
   existing entry.  SRC may be a register or PC_RTX.  */
927
 
928
static void
929
record_reg_saved_in_reg (rtx dest, rtx src)
930
{
931
  reg_saved_in_data *elt;
932
  size_t i;
933
 
934
  FOR_EACH_VEC_ELT (reg_saved_in_data, cur_trace->regs_saved_in_regs, i, elt)
935
    if (compare_reg_or_pc (elt->orig_reg, src))
936
      {
937
        if (dest == NULL)
938
          VEC_unordered_remove (reg_saved_in_data,
939
                                cur_trace->regs_saved_in_regs, i);
940
        else
941
          elt->saved_in_reg = dest;
942
        return;
943
      }
944
 
945
  if (dest == NULL)
946
    return;
947
 
948
  elt = VEC_safe_push (reg_saved_in_data, heap,
949
                       cur_trace->regs_saved_in_regs, NULL);
950
  elt->orig_reg = src;
951
  elt->saved_in_reg = dest;
952
}
953
 
954
/* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
955
   SREG, or if SREG is NULL then it is saved at OFFSET to the CFA.  */
956
 
957
static void
958
queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
959
{
960
  queued_reg_save *q;
961
  size_t i;
962
 
963
  /* Duplicates waste space, but it's also necessary to remove them
964
     for correctness, since the queue gets output in reverse order.  */
965
  FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
966
    if (compare_reg_or_pc (q->reg, reg))
967
      goto found;
968
 
969
  q = VEC_safe_push (queued_reg_save, heap, queued_reg_saves, NULL);
970
 
971
 found:
972
  q->reg = reg;
973
  q->saved_reg = sreg;
974
  q->cfa_offset = offset;
975
}
976
 
977
/* Output all the entries in QUEUED_REG_SAVES.  */
978
 
979
static void
980
dwarf2out_flush_queued_reg_saves (void)
981
{
982
  queued_reg_save *q;
983
  size_t i;
984
 
985
  FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
986
    {
987
      unsigned int reg, sreg;
988
 
989
      record_reg_saved_in_reg (q->saved_reg, q->reg);
990
 
991
      if (q->reg == pc_rtx)
992
        reg = DWARF_FRAME_RETURN_COLUMN;
993
      else
994
        reg = dwf_regno (q->reg);
995
      if (q->saved_reg)
996
        sreg = dwf_regno (q->saved_reg);
997
      else
998
        sreg = INVALID_REGNUM;
999
      reg_save (reg, sreg, q->cfa_offset);
1000
    }
1001
 
1002
  VEC_truncate (queued_reg_save, queued_reg_saves, 0);
1003
}
1004
 
1005
/* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1006
   location for?  Or, does it clobber a register which we've previously
1007
   said that some other register is saved in, and for which we now
1008
   have a new location for?  */
1009
 
1010
static bool
1011
clobbers_queued_reg_save (const_rtx insn)
1012
{
1013
  queued_reg_save *q;
1014
  size_t iq;
1015
 
1016
  FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, iq, q)
1017
    {
1018
      size_t ir;
1019
      reg_saved_in_data *rir;
1020
 
1021
      if (modified_in_p (q->reg, insn))
1022
        return true;
1023
 
1024
      FOR_EACH_VEC_ELT (reg_saved_in_data,
1025
                        cur_trace->regs_saved_in_regs, ir, rir)
1026
        if (compare_reg_or_pc (q->reg, rir->orig_reg)
1027
            && modified_in_p (rir->saved_in_reg, insn))
1028
          return true;
1029
    }
1030
 
1031
  return false;
1032
}
1033
 
1034
/* What register, if any, is currently saved in REG?  */
1035
 
1036
static rtx
1037
reg_saved_in (rtx reg)
1038
{
1039
  unsigned int regn = REGNO (reg);
1040
  queued_reg_save *q;
1041
  reg_saved_in_data *rir;
1042
  size_t i;
1043
 
1044
  FOR_EACH_VEC_ELT (queued_reg_save, queued_reg_saves, i, q)
1045
    if (q->saved_reg && regn == REGNO (q->saved_reg))
1046
      return q->reg;
1047
 
1048
  FOR_EACH_VEC_ELT (reg_saved_in_data, cur_trace->regs_saved_in_regs, i, rir)
1049
    if (regn == REGNO (rir->saved_in_reg))
1050
      return rir->orig_reg;
1051
 
1052
  return NULL_RTX;
1053
}
1054
 
1055
/* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note.  */
1056
 
1057
static void
1058
dwarf2out_frame_debug_def_cfa (rtx pat)
1059
{
1060
  memset (cur_cfa, 0, sizeof (*cur_cfa));
1061
 
1062
  if (GET_CODE (pat) == PLUS)
1063
    {
1064
      cur_cfa->offset = INTVAL (XEXP (pat, 1));
1065
      pat = XEXP (pat, 0);
1066
    }
1067
  if (MEM_P (pat))
1068
    {
1069
      cur_cfa->indirect = 1;
1070
      pat = XEXP (pat, 0);
1071
      if (GET_CODE (pat) == PLUS)
1072
        {
1073
          cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1074
          pat = XEXP (pat, 0);
1075
        }
1076
    }
1077
  /* ??? If this fails, we could be calling into the _loc functions to
1078
     define a full expression.  So far no port does that.  */
1079
  gcc_assert (REG_P (pat));
1080
  cur_cfa->reg = dwf_regno (pat);
1081
}
1082
 
1083
/* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note.  */
1084
 
1085
static void
1086
dwarf2out_frame_debug_adjust_cfa (rtx pat)
1087
{
1088
  rtx src, dest;
1089
 
1090
  gcc_assert (GET_CODE (pat) == SET);
1091
  dest = XEXP (pat, 0);
1092
  src = XEXP (pat, 1);
1093
 
1094
  switch (GET_CODE (src))
1095
    {
1096
    case PLUS:
1097
      gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1098
      cur_cfa->offset -= INTVAL (XEXP (src, 1));
1099
      break;
1100
 
1101
    case REG:
1102
      break;
1103
 
1104
    default:
1105
      gcc_unreachable ();
1106
    }
1107
 
1108
  cur_cfa->reg = dwf_regno (dest);
1109
  gcc_assert (cur_cfa->indirect == 0);
1110
}
1111
 
1112
/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note.  */
1113
 
1114
static void
1115
dwarf2out_frame_debug_cfa_offset (rtx set)
1116
{
1117
  HOST_WIDE_INT offset;
1118
  rtx src, addr, span;
1119
  unsigned int sregno;
1120
 
1121
  src = XEXP (set, 1);
1122
  addr = XEXP (set, 0);
1123
  gcc_assert (MEM_P (addr));
1124
  addr = XEXP (addr, 0);
1125
 
1126
  /* As documented, only consider extremely simple addresses.  */
1127
  switch (GET_CODE (addr))
1128
    {
1129
    case REG:
1130
      gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1131
      offset = -cur_cfa->offset;
1132
      break;
1133
    case PLUS:
1134
      gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1135
      offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1136
      break;
1137
    default:
1138
      gcc_unreachable ();
1139
    }
1140
 
1141
  if (src == pc_rtx)
1142
    {
1143
      span = NULL;
1144
      sregno = DWARF_FRAME_RETURN_COLUMN;
1145
    }
1146
  else
1147
    {
1148
      span = targetm.dwarf_register_span (src);
1149
      sregno = dwf_regno (src);
1150
    }
1151
 
1152
  /* ??? We'd like to use queue_reg_save, but we need to come up with
1153
     a different flushing heuristic for epilogues.  */
1154
  if (!span)
1155
    reg_save (sregno, INVALID_REGNUM, offset);
1156
  else
1157
    {
1158
      /* We have a PARALLEL describing where the contents of SRC live.
1159
         Queue register saves for each piece of the PARALLEL.  */
1160
      int par_index;
1161
      int limit;
1162
      HOST_WIDE_INT span_offset = offset;
1163
 
1164
      gcc_assert (GET_CODE (span) == PARALLEL);
1165
 
1166
      limit = XVECLEN (span, 0);
1167
      for (par_index = 0; par_index < limit; par_index++)
1168
        {
1169
          rtx elem = XVECEXP (span, 0, par_index);
1170
 
1171
          sregno = dwf_regno (src);
1172
          reg_save (sregno, INVALID_REGNUM, span_offset);
1173
          span_offset += GET_MODE_SIZE (GET_MODE (elem));
1174
        }
1175
    }
1176
}
1177
 
1178
/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note.  */
1179
 
1180
static void
1181
dwarf2out_frame_debug_cfa_register (rtx set)
1182
{
1183
  rtx src, dest;
1184
  unsigned sregno, dregno;
1185
 
1186
  src = XEXP (set, 1);
1187
  dest = XEXP (set, 0);
1188
 
1189
  record_reg_saved_in_reg (dest, src);
1190
  if (src == pc_rtx)
1191
    sregno = DWARF_FRAME_RETURN_COLUMN;
1192
  else
1193
    sregno = dwf_regno (src);
1194
 
1195
  dregno = dwf_regno (dest);
1196
 
1197
  /* ??? We'd like to use queue_reg_save, but we need to come up with
1198
     a different flushing heuristic for epilogues.  */
1199
  reg_save (sregno, dregno, 0);
1200
}
1201
 
1202
/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1203
 
1204
static void
1205
dwarf2out_frame_debug_cfa_expression (rtx set)
1206
{
1207
  rtx src, dest, span;
1208
  dw_cfi_ref cfi = new_cfi ();
1209
  unsigned regno;
1210
 
1211
  dest = SET_DEST (set);
1212
  src = SET_SRC (set);
1213
 
1214
  gcc_assert (REG_P (src));
1215
  gcc_assert (MEM_P (dest));
1216
 
1217
  span = targetm.dwarf_register_span (src);
1218
  gcc_assert (!span);
1219
 
1220
  regno = dwf_regno (src);
1221
 
1222
  cfi->dw_cfi_opc = DW_CFA_expression;
1223
  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1224
  cfi->dw_cfi_oprnd2.dw_cfi_loc
1225
    = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1226
                          GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1227
 
1228
  /* ??? We'd like to use queue_reg_save, were the interface different,
1229
     and, as above, we could manage flushing for epilogues.  */
1230
  add_cfi (cfi);
1231
  update_row_reg_save (cur_row, regno, cfi);
1232
}
1233
 
1234
/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note.  */
1235
 
1236
static void
1237
dwarf2out_frame_debug_cfa_restore (rtx reg)
1238
{
1239
  unsigned int regno = dwf_regno (reg);
1240
 
1241
  add_cfi_restore (regno);
1242
  update_row_reg_save (cur_row, regno, NULL);
1243
}
1244
 
1245
/* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1246
   ??? Perhaps we should note in the CIE where windows are saved (instead of
1247
   assuming 0(cfa)) and what registers are in the window.  */
1248
 
1249
static void
1250
dwarf2out_frame_debug_cfa_window_save (void)
1251
{
1252
  dw_cfi_ref cfi = new_cfi ();
1253
 
1254
  cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1255
  add_cfi (cfi);
1256
}
1257
 
1258
/* Record call frame debugging information for an expression EXPR,
1259
   which either sets SP or FP (adjusting how we calculate the frame
1260
   address) or saves a register to the stack or another register.
1261
   LABEL indicates the address of EXPR.
1262
 
1263
   This function encodes a state machine mapping rtxes to actions on
1264
   cfa, cfa_store, and cfa_temp.reg.  We describe these rules so
1265
   users need not read the source code.
1266
 
1267
  The High-Level Picture
1268
 
1269
  Changes in the register we use to calculate the CFA: Currently we
1270
  assume that if you copy the CFA register into another register, we
1271
  should take the other one as the new CFA register; this seems to
1272
  work pretty well.  If it's wrong for some target, it's simple
1273
  enough not to set RTX_FRAME_RELATED_P on the insn in question.
1274
 
1275
  Changes in the register we use for saving registers to the stack:
1276
  This is usually SP, but not always.  Again, we deduce that if you
1277
  copy SP into another register (and SP is not the CFA register),
1278
  then the new register is the one we will be using for register
1279
  saves.  This also seems to work.
1280
 
1281
  Register saves: There's not much guesswork about this one; if
1282
  RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1283
  register save, and the register used to calculate the destination
1284
  had better be the one we think we're using for this purpose.
1285
  It's also assumed that a copy from a call-saved register to another
1286
  register is saving that register if RTX_FRAME_RELATED_P is set on
1287
  that instruction.  If the copy is from a call-saved register to
1288
  the *same* register, that means that the register is now the same
1289
  value as in the caller.
1290
 
1291
  Except: If the register being saved is the CFA register, and the
1292
  offset is nonzero, we are saving the CFA, so we assume we have to
1293
  use DW_CFA_def_cfa_expression.  If the offset is 0, we assume that
1294
  the intent is to save the value of SP from the previous frame.
1295
 
1296
  In addition, if a register has previously been saved to a different
1297
  register,
1298
 
1299
  Invariants / Summaries of Rules
1300
 
1301
  cfa          current rule for calculating the CFA.  It usually
1302
               consists of a register and an offset.  This is
1303
               actually stored in *cur_cfa, but abbreviated
1304
               for the purposes of this documentation.
1305
  cfa_store    register used by prologue code to save things to the stack
1306
               cfa_store.offset is the offset from the value of
1307
               cfa_store.reg to the actual CFA
1308
  cfa_temp     register holding an integral value.  cfa_temp.offset
1309
               stores the value, which will be used to adjust the
1310
               stack pointer.  cfa_temp is also used like cfa_store,
1311
               to track stores to the stack via fp or a temp reg.
1312
 
1313
  Rules  1- 4: Setting a register's value to cfa.reg or an expression
1314
               with cfa.reg as the first operand changes the cfa.reg and its
1315
               cfa.offset.  Rule 1 and 4 also set cfa_temp.reg and
1316
               cfa_temp.offset.
1317
 
1318
  Rules  6- 9: Set a non-cfa.reg register value to a constant or an
1319
               expression yielding a constant.  This sets cfa_temp.reg
1320
               and cfa_temp.offset.
1321
 
1322
  Rule 5:      Create a new register cfa_store used to save items to the
1323
               stack.
1324
 
1325
  Rules 10-14: Save a register to the stack.  Define offset as the
1326
               difference of the original location and cfa_store's
1327
               location (or cfa_temp's location if cfa_temp is used).
1328
 
1329
  Rules 16-20: If AND operation happens on sp in prologue, we assume
1330
               stack is realigned.  We will use a group of DW_OP_XXX
1331
               expressions to represent the location of the stored
1332
               register instead of CFA+offset.
1333
 
1334
  The Rules
1335
 
1336
  "{a,b}" indicates a choice of a xor b.
1337
  "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1338
 
1339
  Rule 1:
1340
  (set <reg1> <reg2>:cfa.reg)
1341
  effects: cfa.reg = <reg1>
1342
           cfa.offset unchanged
1343
           cfa_temp.reg = <reg1>
1344
           cfa_temp.offset = cfa.offset
1345
 
1346
  Rule 2:
1347
  (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1348
                              {<const_int>,<reg>:cfa_temp.reg}))
1349
  effects: cfa.reg = sp if fp used
1350
           cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1351
           cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1352
             if cfa_store.reg==sp
1353
 
1354
  Rule 3:
1355
  (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1356
  effects: cfa.reg = fp
1357
           cfa_offset += +/- <const_int>
1358
 
1359
  Rule 4:
1360
  (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1361
  constraints: <reg1> != fp
1362
               <reg1> != sp
1363
  effects: cfa.reg = <reg1>
1364
           cfa_temp.reg = <reg1>
1365
           cfa_temp.offset = cfa.offset
1366
 
1367
  Rule 5:
1368
  (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1369
  constraints: <reg1> != fp
1370
               <reg1> != sp
1371
  effects: cfa_store.reg = <reg1>
1372
           cfa_store.offset = cfa.offset - cfa_temp.offset
1373
 
1374
  Rule 6:
1375
  (set <reg> <const_int>)
1376
  effects: cfa_temp.reg = <reg>
1377
           cfa_temp.offset = <const_int>
1378
 
1379
  Rule 7:
1380
  (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1381
  effects: cfa_temp.reg = <reg1>
1382
           cfa_temp.offset |= <const_int>
1383
 
1384
  Rule 8:
1385
  (set <reg> (high <exp>))
1386
  effects: none
1387
 
1388
  Rule 9:
1389
  (set <reg> (lo_sum <exp> <const_int>))
1390
  effects: cfa_temp.reg = <reg>
1391
           cfa_temp.offset = <const_int>
1392
 
1393
  Rule 10:
1394
  (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1395
  effects: cfa_store.offset -= <const_int>
1396
           cfa.offset = cfa_store.offset if cfa.reg == sp
1397
           cfa.reg = sp
1398
           cfa.base_offset = -cfa_store.offset
1399
 
1400
  Rule 11:
1401
  (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1402
  effects: cfa_store.offset += -/+ mode_size(mem)
1403
           cfa.offset = cfa_store.offset if cfa.reg == sp
1404
           cfa.reg = sp
1405
           cfa.base_offset = -cfa_store.offset
1406
 
1407
  Rule 12:
1408
  (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1409
 
1410
       <reg2>)
1411
  effects: cfa.reg = <reg1>
1412
           cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1413
 
1414
  Rule 13:
1415
  (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1416
  effects: cfa.reg = <reg1>
1417
           cfa.base_offset = -{cfa_store,cfa_temp}.offset
1418
 
1419
  Rule 14:
1420
  (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1421
  effects: cfa.reg = <reg1>
1422
           cfa.base_offset = -cfa_temp.offset
1423
           cfa_temp.offset -= mode_size(mem)
1424
 
1425
  Rule 15:
1426
  (set <reg> {unspec, unspec_volatile})
1427
  effects: target-dependent
1428
 
1429
  Rule 16:
1430
  (set sp (and: sp <const_int>))
1431
  constraints: cfa_store.reg == sp
1432
  effects: cfun->fde.stack_realign = 1
1433
           cfa_store.offset = 0
1434
           fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1435
 
1436
  Rule 17:
1437
  (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1438
  effects: cfa_store.offset += -/+ mode_size(mem)
1439
 
1440
  Rule 18:
1441
  (set (mem ({pre_inc, pre_dec} sp)) fp)
1442
  constraints: fde->stack_realign == 1
1443
  effects: cfa_store.offset = 0
1444
           cfa.reg != HARD_FRAME_POINTER_REGNUM
1445
 
1446
  Rule 19:
1447
  (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1448
  constraints: fde->stack_realign == 1
1449
               && cfa.offset == 0
1450
               && cfa.indirect == 0
1451
               && cfa.reg != HARD_FRAME_POINTER_REGNUM
1452
  effects: Use DW_CFA_def_cfa_expression to define cfa
1453
           cfa.reg == fde->drap_reg  */
1454
 
1455
static void
1456
dwarf2out_frame_debug_expr (rtx expr)
1457
{
1458
  rtx src, dest, span;
1459
  HOST_WIDE_INT offset;
1460
  dw_fde_ref fde;
1461
 
1462
  /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1463
     the PARALLEL independently. The first element is always processed if
1464
     it is a SET. This is for backward compatibility.   Other elements
1465
     are processed only if they are SETs and the RTX_FRAME_RELATED_P
1466
     flag is set in them.  */
1467
  if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1468
    {
1469
      int par_index;
1470
      int limit = XVECLEN (expr, 0);
1471
      rtx elem;
1472
 
1473
      /* PARALLELs have strict read-modify-write semantics, so we
1474
         ought to evaluate every rvalue before changing any lvalue.
1475
         It's cumbersome to do that in general, but there's an
1476
         easy approximation that is enough for all current users:
1477
         handle register saves before register assignments.  */
1478
      if (GET_CODE (expr) == PARALLEL)
1479
        for (par_index = 0; par_index < limit; par_index++)
1480
          {
1481
            elem = XVECEXP (expr, 0, par_index);
1482
            if (GET_CODE (elem) == SET
1483
                && MEM_P (SET_DEST (elem))
1484
                && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1485
              dwarf2out_frame_debug_expr (elem);
1486
          }
1487
 
1488
      for (par_index = 0; par_index < limit; par_index++)
1489
        {
1490
          elem = XVECEXP (expr, 0, par_index);
1491
          if (GET_CODE (elem) == SET
1492
              && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1493
              && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1494
            dwarf2out_frame_debug_expr (elem);
1495
        }
1496
      return;
1497
    }
1498
 
1499
  gcc_assert (GET_CODE (expr) == SET);
1500
 
1501
  src = SET_SRC (expr);
1502
  dest = SET_DEST (expr);
1503
 
1504
  if (REG_P (src))
1505
    {
1506
      rtx rsi = reg_saved_in (src);
1507
      if (rsi)
1508
        src = rsi;
1509
    }
1510
 
1511
  fde = cfun->fde;
1512
 
1513
  switch (GET_CODE (dest))
1514
    {
1515
    case REG:
1516
      switch (GET_CODE (src))
1517
        {
1518
          /* Setting FP from SP.  */
1519
        case REG:
1520
          if (cur_cfa->reg == dwf_regno (src))
1521
            {
1522
              /* Rule 1 */
1523
              /* Update the CFA rule wrt SP or FP.  Make sure src is
1524
                 relative to the current CFA register.
1525
 
1526
                 We used to require that dest be either SP or FP, but the
1527
                 ARM copies SP to a temporary register, and from there to
1528
                 FP.  So we just rely on the backends to only set
1529
                 RTX_FRAME_RELATED_P on appropriate insns.  */
1530
              cur_cfa->reg = dwf_regno (dest);
1531
              cur_trace->cfa_temp.reg = cur_cfa->reg;
1532
              cur_trace->cfa_temp.offset = cur_cfa->offset;
1533
            }
1534
          else
1535
            {
1536
              /* Saving a register in a register.  */
1537
              gcc_assert (!fixed_regs [REGNO (dest)]
1538
                          /* For the SPARC and its register window.  */
1539
                          || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1540
 
1541
              /* After stack is aligned, we can only save SP in FP
1542
                 if drap register is used.  In this case, we have
1543
                 to restore stack pointer with the CFA value and we
1544
                 don't generate this DWARF information.  */
1545
              if (fde
1546
                  && fde->stack_realign
1547
                  && REGNO (src) == STACK_POINTER_REGNUM)
1548
                gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1549
                            && fde->drap_reg != INVALID_REGNUM
1550
                            && cur_cfa->reg != dwf_regno (src));
1551
              else
1552
                queue_reg_save (src, dest, 0);
1553
            }
1554
          break;
1555
 
1556
        case PLUS:
1557
        case MINUS:
1558
        case LO_SUM:
1559
          if (dest == stack_pointer_rtx)
1560
            {
1561
              /* Rule 2 */
1562
              /* Adjusting SP.  */
1563
              switch (GET_CODE (XEXP (src, 1)))
1564
                {
1565
                case CONST_INT:
1566
                  offset = INTVAL (XEXP (src, 1));
1567
                  break;
1568
                case REG:
1569
                  gcc_assert (dwf_regno (XEXP (src, 1))
1570
                              == cur_trace->cfa_temp.reg);
1571
                  offset = cur_trace->cfa_temp.offset;
1572
                  break;
1573
                default:
1574
                  gcc_unreachable ();
1575
                }
1576
 
1577
              if (XEXP (src, 0) == hard_frame_pointer_rtx)
1578
                {
1579
                  /* Restoring SP from FP in the epilogue.  */
1580
                  gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1581
                  cur_cfa->reg = dw_stack_pointer_regnum;
1582
                }
1583
              else if (GET_CODE (src) == LO_SUM)
1584
                /* Assume we've set the source reg of the LO_SUM from sp.  */
1585
                ;
1586
              else
1587
                gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1588
 
1589
              if (GET_CODE (src) != MINUS)
1590
                offset = -offset;
1591
              if (cur_cfa->reg == dw_stack_pointer_regnum)
1592
                cur_cfa->offset += offset;
1593
              if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1594
                cur_trace->cfa_store.offset += offset;
1595
            }
1596
          else if (dest == hard_frame_pointer_rtx)
1597
            {
1598
              /* Rule 3 */
1599
              /* Either setting the FP from an offset of the SP,
1600
                 or adjusting the FP */
1601
              gcc_assert (frame_pointer_needed);
1602
 
1603
              gcc_assert (REG_P (XEXP (src, 0))
1604
                          && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1605
                          && CONST_INT_P (XEXP (src, 1)));
1606
              offset = INTVAL (XEXP (src, 1));
1607
              if (GET_CODE (src) != MINUS)
1608
                offset = -offset;
1609
              cur_cfa->offset += offset;
1610
              cur_cfa->reg = dw_frame_pointer_regnum;
1611
            }
1612
          else
1613
            {
1614
              gcc_assert (GET_CODE (src) != MINUS);
1615
 
1616
              /* Rule 4 */
1617
              if (REG_P (XEXP (src, 0))
1618
                  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1619
                  && CONST_INT_P (XEXP (src, 1)))
1620
                {
1621
                  /* Setting a temporary CFA register that will be copied
1622
                     into the FP later on.  */
1623
                  offset = - INTVAL (XEXP (src, 1));
1624
                  cur_cfa->offset += offset;
1625
                  cur_cfa->reg = dwf_regno (dest);
1626
                  /* Or used to save regs to the stack.  */
1627
                  cur_trace->cfa_temp.reg = cur_cfa->reg;
1628
                  cur_trace->cfa_temp.offset = cur_cfa->offset;
1629
                }
1630
 
1631
              /* Rule 5 */
1632
              else if (REG_P (XEXP (src, 0))
1633
                       && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1634
                       && XEXP (src, 1) == stack_pointer_rtx)
1635
                {
1636
                  /* Setting a scratch register that we will use instead
1637
                     of SP for saving registers to the stack.  */
1638
                  gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1639
                  cur_trace->cfa_store.reg = dwf_regno (dest);
1640
                  cur_trace->cfa_store.offset
1641
                    = cur_cfa->offset - cur_trace->cfa_temp.offset;
1642
                }
1643
 
1644
              /* Rule 9 */
1645
              else if (GET_CODE (src) == LO_SUM
1646
                       && CONST_INT_P (XEXP (src, 1)))
1647
                {
1648
                  cur_trace->cfa_temp.reg = dwf_regno (dest);
1649
                  cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1650
                }
1651
              else
1652
                gcc_unreachable ();
1653
            }
1654
          break;
1655
 
1656
          /* Rule 6 */
1657
        case CONST_INT:
1658
          cur_trace->cfa_temp.reg = dwf_regno (dest);
1659
          cur_trace->cfa_temp.offset = INTVAL (src);
1660
          break;
1661
 
1662
          /* Rule 7 */
1663
        case IOR:
1664
          gcc_assert (REG_P (XEXP (src, 0))
1665
                      && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1666
                      && CONST_INT_P (XEXP (src, 1)));
1667
 
1668
          cur_trace->cfa_temp.reg = dwf_regno (dest);
1669
          cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1670
          break;
1671
 
1672
          /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1673
             which will fill in all of the bits.  */
1674
          /* Rule 8 */
1675
        case HIGH:
1676
          break;
1677
 
1678
          /* Rule 15 */
1679
        case UNSPEC:
1680
        case UNSPEC_VOLATILE:
1681
          /* All unspecs should be represented by REG_CFA_* notes.  */
1682
          gcc_unreachable ();
1683
          return;
1684
 
1685
          /* Rule 16 */
1686
        case AND:
1687
          /* If this AND operation happens on stack pointer in prologue,
1688
             we assume the stack is realigned and we extract the
1689
             alignment.  */
1690
          if (fde && XEXP (src, 0) == stack_pointer_rtx)
1691
            {
1692
              /* We interpret reg_save differently with stack_realign set.
1693
                 Thus we must flush whatever we have queued first.  */
1694
              dwarf2out_flush_queued_reg_saves ();
1695
 
1696
              gcc_assert (cur_trace->cfa_store.reg
1697
                          == dwf_regno (XEXP (src, 0)));
1698
              fde->stack_realign = 1;
1699
              fde->stack_realignment = INTVAL (XEXP (src, 1));
1700
              cur_trace->cfa_store.offset = 0;
1701
 
1702
              if (cur_cfa->reg != dw_stack_pointer_regnum
1703
                  && cur_cfa->reg != dw_frame_pointer_regnum)
1704
                fde->drap_reg = cur_cfa->reg;
1705
            }
1706
          return;
1707
 
1708
        default:
1709
          gcc_unreachable ();
1710
        }
1711
      break;
1712
 
1713
    case MEM:
1714
 
1715
      /* Saving a register to the stack.  Make sure dest is relative to the
1716
         CFA register.  */
1717
      switch (GET_CODE (XEXP (dest, 0)))
1718
        {
1719
          /* Rule 10 */
1720
          /* With a push.  */
1721
        case PRE_MODIFY:
1722
        case POST_MODIFY:
1723
          /* We can't handle variable size modifications.  */
1724
          gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1725
                      == CONST_INT);
1726
          offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1727
 
1728
          gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1729
                      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1730
 
1731
          cur_trace->cfa_store.offset += offset;
1732
          if (cur_cfa->reg == dw_stack_pointer_regnum)
1733
            cur_cfa->offset = cur_trace->cfa_store.offset;
1734
 
1735
          if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1736
            offset -= cur_trace->cfa_store.offset;
1737
          else
1738
            offset = -cur_trace->cfa_store.offset;
1739
          break;
1740
 
1741
          /* Rule 11 */
1742
        case PRE_INC:
1743
        case PRE_DEC:
1744
        case POST_DEC:
1745
          offset = GET_MODE_SIZE (GET_MODE (dest));
1746
          if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1747
            offset = -offset;
1748
 
1749
          gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1750
                       == STACK_POINTER_REGNUM)
1751
                      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1752
 
1753
          cur_trace->cfa_store.offset += offset;
1754
 
1755
          /* Rule 18: If stack is aligned, we will use FP as a
1756
             reference to represent the address of the stored
1757
             regiser.  */
1758
          if (fde
1759
              && fde->stack_realign
1760
              && REG_P (src)
1761
              && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1762
            {
1763
              gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1764
              cur_trace->cfa_store.offset = 0;
1765
            }
1766
 
1767
          if (cur_cfa->reg == dw_stack_pointer_regnum)
1768
            cur_cfa->offset = cur_trace->cfa_store.offset;
1769
 
1770
          if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1771
            offset += -cur_trace->cfa_store.offset;
1772
          else
1773
            offset = -cur_trace->cfa_store.offset;
1774
          break;
1775
 
1776
          /* Rule 12 */
1777
          /* With an offset.  */
1778
        case PLUS:
1779
        case MINUS:
1780
        case LO_SUM:
1781
          {
1782
            unsigned int regno;
1783
 
1784
            gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1785
                        && REG_P (XEXP (XEXP (dest, 0), 0)));
1786
            offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1787
            if (GET_CODE (XEXP (dest, 0)) == MINUS)
1788
              offset = -offset;
1789
 
1790
            regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1791
 
1792
            if (cur_cfa->reg == regno)
1793
              offset -= cur_cfa->offset;
1794
            else if (cur_trace->cfa_store.reg == regno)
1795
              offset -= cur_trace->cfa_store.offset;
1796
            else
1797
              {
1798
                gcc_assert (cur_trace->cfa_temp.reg == regno);
1799
                offset -= cur_trace->cfa_temp.offset;
1800
              }
1801
          }
1802
          break;
1803
 
1804
          /* Rule 13 */
1805
          /* Without an offset.  */
1806
        case REG:
1807
          {
1808
            unsigned int regno = dwf_regno (XEXP (dest, 0));
1809
 
1810
            if (cur_cfa->reg == regno)
1811
              offset = -cur_cfa->offset;
1812
            else if (cur_trace->cfa_store.reg == regno)
1813
              offset = -cur_trace->cfa_store.offset;
1814
            else
1815
              {
1816
                gcc_assert (cur_trace->cfa_temp.reg == regno);
1817
                offset = -cur_trace->cfa_temp.offset;
1818
              }
1819
          }
1820
          break;
1821
 
1822
          /* Rule 14 */
1823
        case POST_INC:
1824
          gcc_assert (cur_trace->cfa_temp.reg
1825
                      == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1826
          offset = -cur_trace->cfa_temp.offset;
1827
          cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1828
          break;
1829
 
1830
        default:
1831
          gcc_unreachable ();
1832
        }
1833
 
1834
      /* Rule 17 */
1835
      /* If the source operand of this MEM operation is a memory,
1836
         we only care how much stack grew.  */
1837
      if (MEM_P (src))
1838
        break;
1839
 
1840
      if (REG_P (src)
1841
          && REGNO (src) != STACK_POINTER_REGNUM
1842
          && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1843
          && dwf_regno (src) == cur_cfa->reg)
1844
        {
1845
          /* We're storing the current CFA reg into the stack.  */
1846
 
1847
          if (cur_cfa->offset == 0)
1848
            {
1849
              /* Rule 19 */
1850
              /* If stack is aligned, putting CFA reg into stack means
1851
                 we can no longer use reg + offset to represent CFA.
1852
                 Here we use DW_CFA_def_cfa_expression instead.  The
1853
                 result of this expression equals to the original CFA
1854
                 value.  */
1855
              if (fde
1856
                  && fde->stack_realign
1857
                  && cur_cfa->indirect == 0
1858
                  && cur_cfa->reg != dw_frame_pointer_regnum)
1859
                {
1860
                  gcc_assert (fde->drap_reg == cur_cfa->reg);
1861
 
1862
                  cur_cfa->indirect = 1;
1863
                  cur_cfa->reg = dw_frame_pointer_regnum;
1864
                  cur_cfa->base_offset = offset;
1865
                  cur_cfa->offset = 0;
1866
 
1867
                  fde->drap_reg_saved = 1;
1868
                  break;
1869
                }
1870
 
1871
              /* If the source register is exactly the CFA, assume
1872
                 we're saving SP like any other register; this happens
1873
                 on the ARM.  */
1874
              queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1875
              break;
1876
            }
1877
          else
1878
            {
1879
              /* Otherwise, we'll need to look in the stack to
1880
                 calculate the CFA.  */
1881
              rtx x = XEXP (dest, 0);
1882
 
1883
              if (!REG_P (x))
1884
                x = XEXP (x, 0);
1885
              gcc_assert (REG_P (x));
1886
 
1887
              cur_cfa->reg = dwf_regno (x);
1888
              cur_cfa->base_offset = offset;
1889
              cur_cfa->indirect = 1;
1890
              break;
1891
            }
1892
        }
1893
 
1894
      span = NULL;
1895
      if (REG_P (src))
1896
        span = targetm.dwarf_register_span (src);
1897
      if (!span)
1898
        queue_reg_save (src, NULL_RTX, offset);
1899
      else
1900
        {
1901
          /* We have a PARALLEL describing where the contents of SRC live.
1902
             Queue register saves for each piece of the PARALLEL.  */
1903
          int par_index;
1904
          int limit;
1905
          HOST_WIDE_INT span_offset = offset;
1906
 
1907
          gcc_assert (GET_CODE (span) == PARALLEL);
1908
 
1909
          limit = XVECLEN (span, 0);
1910
          for (par_index = 0; par_index < limit; par_index++)
1911
            {
1912
              rtx elem = XVECEXP (span, 0, par_index);
1913
              queue_reg_save (elem, NULL_RTX, span_offset);
1914
              span_offset += GET_MODE_SIZE (GET_MODE (elem));
1915
            }
1916
        }
1917
      break;
1918
 
1919
    default:
1920
      gcc_unreachable ();
1921
    }
1922
}
1923
 
1924
/* Record call frame debugging information for INSN, which either sets
1925
   SP or FP (adjusting how we calculate the frame address) or saves a
1926
   register to the stack.  */
1927
 
1928
static void
1929
dwarf2out_frame_debug (rtx insn)
1930
{
1931
  rtx note, n;
1932
  bool handled_one = false;
1933
 
1934
  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1935
    switch (REG_NOTE_KIND (note))
1936
      {
1937
      case REG_FRAME_RELATED_EXPR:
1938
        insn = XEXP (note, 0);
1939
        goto do_frame_expr;
1940
 
1941
      case REG_CFA_DEF_CFA:
1942
        dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
1943
        handled_one = true;
1944
        break;
1945
 
1946
      case REG_CFA_ADJUST_CFA:
1947
        n = XEXP (note, 0);
1948
        if (n == NULL)
1949
          {
1950
            n = PATTERN (insn);
1951
            if (GET_CODE (n) == PARALLEL)
1952
              n = XVECEXP (n, 0, 0);
1953
          }
1954
        dwarf2out_frame_debug_adjust_cfa (n);
1955
        handled_one = true;
1956
        break;
1957
 
1958
      case REG_CFA_OFFSET:
1959
        n = XEXP (note, 0);
1960
        if (n == NULL)
1961
          n = single_set (insn);
1962
        dwarf2out_frame_debug_cfa_offset (n);
1963
        handled_one = true;
1964
        break;
1965
 
1966
      case REG_CFA_REGISTER:
1967
        n = XEXP (note, 0);
1968
        if (n == NULL)
1969
          {
1970
            n = PATTERN (insn);
1971
            if (GET_CODE (n) == PARALLEL)
1972
              n = XVECEXP (n, 0, 0);
1973
          }
1974
        dwarf2out_frame_debug_cfa_register (n);
1975
        handled_one = true;
1976
        break;
1977
 
1978
      case REG_CFA_EXPRESSION:
1979
        n = XEXP (note, 0);
1980
        if (n == NULL)
1981
          n = single_set (insn);
1982
        dwarf2out_frame_debug_cfa_expression (n);
1983
        handled_one = true;
1984
        break;
1985
 
1986
      case REG_CFA_RESTORE:
1987
        n = XEXP (note, 0);
1988
        if (n == NULL)
1989
          {
1990
            n = PATTERN (insn);
1991
            if (GET_CODE (n) == PARALLEL)
1992
              n = XVECEXP (n, 0, 0);
1993
            n = XEXP (n, 0);
1994
          }
1995
        dwarf2out_frame_debug_cfa_restore (n);
1996
        handled_one = true;
1997
        break;
1998
 
1999
      case REG_CFA_SET_VDRAP:
2000
        n = XEXP (note, 0);
2001
        if (REG_P (n))
2002
          {
2003
            dw_fde_ref fde = cfun->fde;
2004
            if (fde)
2005
              {
2006
                gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2007
                if (REG_P (n))
2008
                  fde->vdrap_reg = dwf_regno (n);
2009
              }
2010
          }
2011
        handled_one = true;
2012
        break;
2013
 
2014
      case REG_CFA_WINDOW_SAVE:
2015
        dwarf2out_frame_debug_cfa_window_save ();
2016
        handled_one = true;
2017
        break;
2018
 
2019
      case REG_CFA_FLUSH_QUEUE:
2020
        /* The actual flush happens elsewhere.  */
2021
        handled_one = true;
2022
        break;
2023
 
2024
      default:
2025
        break;
2026
      }
2027
 
2028
  if (!handled_one)
2029
    {
2030
      insn = PATTERN (insn);
2031
    do_frame_expr:
2032
      dwarf2out_frame_debug_expr (insn);
2033
 
2034
      /* Check again.  A parallel can save and update the same register.
2035
         We could probably check just once, here, but this is safer than
2036
         removing the check at the start of the function.  */
2037
      if (clobbers_queued_reg_save (insn))
2038
        dwarf2out_flush_queued_reg_saves ();
2039
    }
2040
}
2041
 
2042
/* Emit CFI info to change the state from OLD_ROW to NEW_ROW.  */
2043
 
2044
static void
2045
change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2046
{
2047
  size_t i, n_old, n_new, n_max;
2048
  dw_cfi_ref cfi;
2049
 
2050
  if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2051
    add_cfi (new_row->cfa_cfi);
2052
  else
2053
    {
2054
      cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2055
      if (cfi)
2056
        add_cfi (cfi);
2057
    }
2058
 
2059
  n_old = VEC_length (dw_cfi_ref, old_row->reg_save);
2060
  n_new = VEC_length (dw_cfi_ref, new_row->reg_save);
2061
  n_max = MAX (n_old, n_new);
2062
 
2063
  for (i = 0; i < n_max; ++i)
2064
    {
2065
      dw_cfi_ref r_old = NULL, r_new = NULL;
2066
 
2067
      if (i < n_old)
2068
        r_old = VEC_index (dw_cfi_ref, old_row->reg_save, i);
2069
      if (i < n_new)
2070
        r_new = VEC_index (dw_cfi_ref, new_row->reg_save, i);
2071
 
2072
      if (r_old == r_new)
2073
        ;
2074
      else if (r_new == NULL)
2075
        add_cfi_restore (i);
2076
      else if (!cfi_equal_p (r_old, r_new))
2077
        add_cfi (r_new);
2078
    }
2079
}
2080
 
2081
/* Examine CFI and return true if a cfi label and set_loc is needed
2082
   beforehand.  Even when generating CFI assembler instructions, we
2083
   still have to add the cfi to the list so that lookup_cfa_1 works
2084
   later on.  When -g2 and above we even need to force emitting of
2085
   CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2086
   purposes.  If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2087
   and so don't use convert_cfa_to_fb_loc_list.  */
2088
 
2089
static bool
2090
cfi_label_required_p (dw_cfi_ref cfi)
2091
{
2092
  if (!dwarf2out_do_cfi_asm ())
2093
    return true;
2094
 
2095
  if (dwarf_version == 2
2096
      && debug_info_level > DINFO_LEVEL_TERSE
2097
      && (write_symbols == DWARF2_DEBUG
2098
          || write_symbols == VMS_AND_DWARF2_DEBUG))
2099
    {
2100
      switch (cfi->dw_cfi_opc)
2101
        {
2102
        case DW_CFA_def_cfa_offset:
2103
        case DW_CFA_def_cfa_offset_sf:
2104
        case DW_CFA_def_cfa_register:
2105
        case DW_CFA_def_cfa:
2106
        case DW_CFA_def_cfa_sf:
2107
        case DW_CFA_def_cfa_expression:
2108
        case DW_CFA_restore_state:
2109
          return true;
2110
        default:
2111
          return false;
2112
        }
2113
    }
2114
  return false;
2115
}
2116
 
2117
/* Walk the function, looking for NOTE_INSN_CFI notes.  Add the CFIs to the
2118
   function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2119
   necessary.  */
2120
static void
2121
add_cfis_to_fde (void)
2122
{
2123
  dw_fde_ref fde = cfun->fde;
2124
  rtx insn, next;
2125
  /* We always start with a function_begin label.  */
2126
  bool first = false;
2127
 
2128
  for (insn = get_insns (); insn; insn = next)
2129
    {
2130
      next = NEXT_INSN (insn);
2131
 
2132
      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2133
        {
2134
          fde->dw_fde_switch_cfi_index
2135
            = VEC_length (dw_cfi_ref, fde->dw_fde_cfi);
2136
          /* Don't attempt to advance_loc4 between labels
2137
             in different sections.  */
2138
          first = true;
2139
        }
2140
 
2141
      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2142
        {
2143
          bool required = cfi_label_required_p (NOTE_CFI (insn));
2144
          while (next)
2145
            if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2146
              {
2147
                required |= cfi_label_required_p (NOTE_CFI (next));
2148
                next = NEXT_INSN (next);
2149
              }
2150
            else if (active_insn_p (next)
2151
                     || (NOTE_P (next) && (NOTE_KIND (next)
2152
                                           == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2153
              break;
2154
            else
2155
              next = NEXT_INSN (next);
2156
          if (required)
2157
            {
2158
              int num = dwarf2out_cfi_label_num;
2159
              const char *label = dwarf2out_cfi_label ();
2160
              dw_cfi_ref xcfi;
2161
              rtx tmp;
2162
 
2163
              /* Set the location counter to the new label.  */
2164
              xcfi = new_cfi ();
2165
              xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2166
                                  : DW_CFA_advance_loc4);
2167
              xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2168
              VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2169
 
2170
              tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2171
              NOTE_LABEL_NUMBER (tmp) = num;
2172
            }
2173
 
2174
          do
2175
            {
2176
              if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2177
                VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi,
2178
                               NOTE_CFI (insn));
2179
              insn = NEXT_INSN (insn);
2180
            }
2181
          while (insn != next);
2182
          first = false;
2183
        }
2184
    }
2185
}
2186
 
2187
/* If LABEL is the start of a trace, then initialize the state of that
2188
   trace from CUR_TRACE and CUR_ROW.  */
2189
 
2190
static void
2191
maybe_record_trace_start (rtx start, rtx origin)
2192
{
2193
  dw_trace_info *ti;
2194
  HOST_WIDE_INT args_size;
2195
 
2196
  ti = get_trace_info (start);
2197
  gcc_assert (ti != NULL);
2198
 
2199
  if (dump_file)
2200
    {
2201
      fprintf (dump_file, "   saw edge from trace %u to %u (via %s %d)\n",
2202
               cur_trace->id, ti->id,
2203
               (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2204
               (origin ? INSN_UID (origin) : 0));
2205
    }
2206
 
2207
  args_size = cur_trace->end_true_args_size;
2208
  if (ti->beg_row == NULL)
2209
    {
2210
      /* This is the first time we've encountered this trace.  Propagate
2211
         state across the edge and push the trace onto the work list.  */
2212
      ti->beg_row = copy_cfi_row (cur_row);
2213
      ti->beg_true_args_size = args_size;
2214
 
2215
      ti->cfa_store = cur_trace->cfa_store;
2216
      ti->cfa_temp = cur_trace->cfa_temp;
2217
      ti->regs_saved_in_regs = VEC_copy (reg_saved_in_data, heap,
2218
                                         cur_trace->regs_saved_in_regs);
2219
 
2220
      VEC_safe_push (dw_trace_info_ref, heap, trace_work_list, ti);
2221
 
2222
      if (dump_file)
2223
        fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2224
    }
2225
  else
2226
    {
2227
 
2228
      /* We ought to have the same state incoming to a given trace no
2229
         matter how we arrive at the trace.  Anything else means we've
2230
         got some kind of optimization error.  */
2231
      gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2232
 
2233
      /* The args_size is allowed to conflict if it isn't actually used.  */
2234
      if (ti->beg_true_args_size != args_size)
2235
        ti->args_size_undefined = true;
2236
    }
2237
}
2238
 
2239
/* Similarly, but handle the args_size and CFA reset across EH
2240
   and non-local goto edges.  */
2241
 
2242
static void
2243
maybe_record_trace_start_abnormal (rtx start, rtx origin)
2244
{
2245
  HOST_WIDE_INT save_args_size, delta;
2246
  dw_cfa_location save_cfa;
2247
 
2248
  save_args_size = cur_trace->end_true_args_size;
2249
  if (save_args_size == 0)
2250
    {
2251
      maybe_record_trace_start (start, origin);
2252
      return;
2253
    }
2254
 
2255
  delta = -save_args_size;
2256
  cur_trace->end_true_args_size = 0;
2257
 
2258
  save_cfa = cur_row->cfa;
2259
  if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2260
    {
2261
      /* Convert a change in args_size (always a positive in the
2262
         direction of stack growth) to a change in stack pointer.  */
2263
#ifndef STACK_GROWS_DOWNWARD
2264
      delta = -delta;
2265
#endif
2266
      cur_row->cfa.offset += delta;
2267
    }
2268
 
2269
  maybe_record_trace_start (start, origin);
2270
 
2271
  cur_trace->end_true_args_size = save_args_size;
2272
  cur_row->cfa = save_cfa;
2273
}
2274
 
2275
/* Propagate CUR_TRACE state to the destinations implied by INSN.  */
2276
/* ??? Sadly, this is in large part a duplicate of make_edges.  */
2277
 
2278
static void
2279
create_trace_edges (rtx insn)
2280
{
2281
  rtx tmp, lab;
2282
  int i, n;
2283
 
2284
  if (JUMP_P (insn))
2285
    {
2286
      if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2287
        return;
2288
 
2289
      if (tablejump_p (insn, NULL, &tmp))
2290
        {
2291
          rtvec vec;
2292
 
2293
          tmp = PATTERN (tmp);
2294
          vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
2295
 
2296
          n = GET_NUM_ELEM (vec);
2297
          for (i = 0; i < n; ++i)
2298
            {
2299
              lab = XEXP (RTVEC_ELT (vec, i), 0);
2300
              maybe_record_trace_start (lab, insn);
2301
            }
2302
        }
2303
      else if (computed_jump_p (insn))
2304
        {
2305
          for (lab = forced_labels; lab; lab = XEXP (lab, 1))
2306
            maybe_record_trace_start (XEXP (lab, 0), insn);
2307
        }
2308
      else if (returnjump_p (insn))
2309
        ;
2310
      else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2311
        {
2312
          n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2313
          for (i = 0; i < n; ++i)
2314
            {
2315
              lab = XEXP (ASM_OPERANDS_LABEL (tmp, i), 0);
2316
              maybe_record_trace_start (lab, insn);
2317
            }
2318
        }
2319
      else
2320
        {
2321
          lab = JUMP_LABEL (insn);
2322
          gcc_assert (lab != NULL);
2323
          maybe_record_trace_start (lab, insn);
2324
        }
2325
    }
2326
  else if (CALL_P (insn))
2327
    {
2328
      /* Sibling calls don't have edges inside this function.  */
2329
      if (SIBLING_CALL_P (insn))
2330
        return;
2331
 
2332
      /* Process non-local goto edges.  */
2333
      if (can_nonlocal_goto (insn))
2334
        for (lab = nonlocal_goto_handler_labels; lab; lab = XEXP (lab, 1))
2335
          maybe_record_trace_start_abnormal (XEXP (lab, 0), insn);
2336
    }
2337
  else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
2338
    {
2339
      rtx seq = PATTERN (insn);
2340
      int i, n = XVECLEN (seq, 0);
2341
      for (i = 0; i < n; ++i)
2342
        create_trace_edges (XVECEXP (seq, 0, i));
2343
      return;
2344
    }
2345
 
2346
  /* Process EH edges.  */
2347
  if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2348
    {
2349
      eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2350
      if (lp)
2351
        maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2352
    }
2353
}
2354
 
2355
/* A subroutine of scan_trace.  Do what needs to be done "after" INSN.  */
2356
 
2357
static void
2358
scan_insn_after (rtx insn)
2359
{
2360
  if (RTX_FRAME_RELATED_P (insn))
2361
    dwarf2out_frame_debug (insn);
2362
  notice_args_size (insn);
2363
}
2364
 
2365
/* Scan the trace beginning at INSN and create the CFI notes for the
2366
   instructions therein.  */
2367
 
2368
static void
2369
scan_trace (dw_trace_info *trace)
2370
{
2371
  rtx prev, insn = trace->head;
2372
  dw_cfa_location this_cfa;
2373
 
2374
  if (dump_file)
2375
    fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2376
             trace->id, rtx_name[(int) GET_CODE (insn)],
2377
             INSN_UID (insn));
2378
 
2379
  trace->end_row = copy_cfi_row (trace->beg_row);
2380
  trace->end_true_args_size = trace->beg_true_args_size;
2381
 
2382
  cur_trace = trace;
2383
  cur_row = trace->end_row;
2384
 
2385
  this_cfa = cur_row->cfa;
2386
  cur_cfa = &this_cfa;
2387
 
2388
  for (prev = insn, insn = NEXT_INSN (insn);
2389
       insn;
2390
       prev = insn, insn = NEXT_INSN (insn))
2391
    {
2392
      rtx control;
2393
 
2394
      /* Do everything that happens "before" the insn.  */
2395
      add_cfi_insn = prev;
2396
 
2397
      /* Notice the end of a trace.  */
2398
      if (BARRIER_P (insn))
2399
        {
2400
          /* Don't bother saving the unneeded queued registers at all.  */
2401
          VEC_truncate (queued_reg_save, queued_reg_saves, 0);
2402
          break;
2403
        }
2404
      if (save_point_p (insn))
2405
        {
2406
          /* Propagate across fallthru edges.  */
2407
          dwarf2out_flush_queued_reg_saves ();
2408
          maybe_record_trace_start (insn, NULL);
2409
          break;
2410
        }
2411
 
2412
      if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2413
        continue;
2414
 
2415
      /* Handle all changes to the row state.  Sequences require special
2416
         handling for the positioning of the notes.  */
2417
      if (GET_CODE (PATTERN (insn)) == SEQUENCE)
2418
        {
2419
          rtx elt, pat = PATTERN (insn);
2420
          int i, n = XVECLEN (pat, 0);
2421
 
2422
          control = XVECEXP (pat, 0, 0);
2423
          if (can_throw_internal (control))
2424
            notice_eh_throw (control);
2425
          dwarf2out_flush_queued_reg_saves ();
2426
 
2427
          if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2428
            {
2429
              /* ??? Hopefully multiple delay slots are not annulled.  */
2430
              gcc_assert (n == 2);
2431
              gcc_assert (!RTX_FRAME_RELATED_P (control));
2432
              gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2433
 
2434
              elt = XVECEXP (pat, 0, 1);
2435
 
2436
              /* If ELT is an instruction from target of an annulled branch,
2437
                 the effects are for the target only and so the args_size
2438
                 and CFA along the current path shouldn't change.  */
2439
              if (INSN_FROM_TARGET_P (elt))
2440
                {
2441
                  HOST_WIDE_INT restore_args_size;
2442
                  cfi_vec save_row_reg_save;
2443
 
2444
                  add_cfi_insn = NULL;
2445
                  restore_args_size = cur_trace->end_true_args_size;
2446
                  cur_cfa = &cur_row->cfa;
2447
                  save_row_reg_save = VEC_copy (dw_cfi_ref, gc, cur_row->reg_save);
2448
 
2449
                  scan_insn_after (elt);
2450
 
2451
                  /* ??? Should we instead save the entire row state?  */
2452
                  gcc_assert (!VEC_length (queued_reg_save, queued_reg_saves));
2453
 
2454
                  create_trace_edges (control);
2455
 
2456
                  cur_trace->end_true_args_size = restore_args_size;
2457
                  cur_row->cfa = this_cfa;
2458
                  cur_row->reg_save = save_row_reg_save;
2459
                  cur_cfa = &this_cfa;
2460
                  continue;
2461
                }
2462
            }
2463
 
2464
          /* The insns in the delay slot should all be considered to happen
2465
             "before" a call insn.  Consider a call with a stack pointer
2466
             adjustment in the delay slot.  The backtrace from the callee
2467
             should include the sp adjustment.  Unfortunately, that leaves
2468
             us with an unavoidable unwinding error exactly at the call insn
2469
             itself.  For jump insns we'd prefer to avoid this error by
2470
             placing the notes after the sequence.  */
2471
          if (JUMP_P (control))
2472
            add_cfi_insn = insn;
2473
 
2474
          for (i = 1; i < n; ++i)
2475
            {
2476
              elt = XVECEXP (pat, 0, i);
2477
              scan_insn_after (elt);
2478
            }
2479
 
2480
          /* Make sure any register saves are visible at the jump target.  */
2481
          dwarf2out_flush_queued_reg_saves ();
2482
          any_cfis_emitted = false;
2483
 
2484
          /* However, if there is some adjustment on the call itself, e.g.
2485
             a call_pop, that action should be considered to happen after
2486
             the call returns.  */
2487
          add_cfi_insn = insn;
2488
          scan_insn_after (control);
2489
        }
2490
      else
2491
        {
2492
          /* Flush data before calls and jumps, and of course if necessary.  */
2493
          if (can_throw_internal (insn))
2494
            {
2495
              notice_eh_throw (insn);
2496
              dwarf2out_flush_queued_reg_saves ();
2497
            }
2498
          else if (!NONJUMP_INSN_P (insn)
2499
                   || clobbers_queued_reg_save (insn)
2500
                   || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2501
            dwarf2out_flush_queued_reg_saves ();
2502
          any_cfis_emitted = false;
2503
 
2504
          add_cfi_insn = insn;
2505
          scan_insn_after (insn);
2506
          control = insn;
2507
        }
2508
 
2509
      /* Between frame-related-p and args_size we might have otherwise
2510
         emitted two cfa adjustments.  Do it now.  */
2511
      def_cfa_1 (&this_cfa);
2512
 
2513
      /* Minimize the number of advances by emitting the entire queue
2514
         once anything is emitted.  */
2515
      if (any_cfis_emitted
2516
          || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2517
        dwarf2out_flush_queued_reg_saves ();
2518
 
2519
      /* Note that a test for control_flow_insn_p does exactly the
2520
         same tests as are done to actually create the edges.  So
2521
         always call the routine and let it not create edges for
2522
         non-control-flow insns.  */
2523
      create_trace_edges (control);
2524
    }
2525
 
2526
  add_cfi_insn = NULL;
2527
  cur_row = NULL;
2528
  cur_trace = NULL;
2529
  cur_cfa = NULL;
2530
}
2531
 
2532
/* Scan the function and create the initial set of CFI notes.  */
2533
 
2534
static void
2535
create_cfi_notes (void)
2536
{
2537
  dw_trace_info *ti;
2538
 
2539
  gcc_checking_assert (queued_reg_saves == NULL);
2540
  gcc_checking_assert (trace_work_list == NULL);
2541
 
2542
  /* Always begin at the entry trace.  */
2543
  ti = VEC_index (dw_trace_info, trace_info, 0);
2544
  scan_trace (ti);
2545
 
2546
  while (!VEC_empty (dw_trace_info_ref, trace_work_list))
2547
    {
2548
      ti = VEC_pop (dw_trace_info_ref, trace_work_list);
2549
      scan_trace (ti);
2550
    }
2551
 
2552
  VEC_free (queued_reg_save, heap, queued_reg_saves);
2553
  VEC_free (dw_trace_info_ref, heap, trace_work_list);
2554
}
2555
 
2556
/* Return the insn before the first NOTE_INSN_CFI after START.  */
2557
 
2558
static rtx
2559
before_next_cfi_note (rtx start)
2560
{
2561
  rtx prev = start;
2562
  while (start)
2563
    {
2564
      if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2565
        return prev;
2566
      prev = start;
2567
      start = NEXT_INSN (start);
2568
    }
2569
  gcc_unreachable ();
2570
}
2571
 
2572
/* Insert CFI notes between traces to properly change state between them.  */
2573
 
2574
static void
2575
connect_traces (void)
2576
{
2577
  unsigned i, n = VEC_length (dw_trace_info, trace_info);
2578
  dw_trace_info *prev_ti, *ti;
2579
 
2580
  /* ??? Ideally, we should have both queued and processed every trace.
2581
     However the current representation of constant pools on various targets
2582
     is indistinguishable from unreachable code.  Assume for the moment that
2583
     we can simply skip over such traces.  */
2584
  /* ??? Consider creating a DATA_INSN rtx code to indicate that
2585
     these are not "real" instructions, and should not be considered.
2586
     This could be generically useful for tablejump data as well.  */
2587
  /* Remove all unprocessed traces from the list.  */
2588
  for (i = n - 1; i > 0; --i)
2589
    {
2590
      ti = VEC_index (dw_trace_info, trace_info, i);
2591
      if (ti->beg_row == NULL)
2592
        {
2593
          VEC_ordered_remove (dw_trace_info, trace_info, i);
2594
          n -= 1;
2595
        }
2596
      else
2597
        gcc_assert (ti->end_row != NULL);
2598
    }
2599
 
2600
  /* Work from the end back to the beginning.  This lets us easily insert
2601
     remember/restore_state notes in the correct order wrt other notes.  */
2602
  prev_ti = VEC_index (dw_trace_info, trace_info, n - 1);
2603
  for (i = n - 1; i > 0; --i)
2604
    {
2605
      dw_cfi_row *old_row;
2606
 
2607
      ti = prev_ti;
2608
      prev_ti = VEC_index (dw_trace_info, trace_info, i - 1);
2609
 
2610
      add_cfi_insn = ti->head;
2611
 
2612
      /* In dwarf2out_switch_text_section, we'll begin a new FDE
2613
         for the portion of the function in the alternate text
2614
         section.  The row state at the very beginning of that
2615
         new FDE will be exactly the row state from the CIE.  */
2616
      if (ti->switch_sections)
2617
        old_row = cie_cfi_row;
2618
      else
2619
        {
2620
          old_row = prev_ti->end_row;
2621
          /* If there's no change from the previous end state, fine.  */
2622
          if (cfi_row_equal_p (old_row, ti->beg_row))
2623
            ;
2624
          /* Otherwise check for the common case of sharing state with
2625
             the beginning of an epilogue, but not the end.  Insert
2626
             remember/restore opcodes in that case.  */
2627
          else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2628
            {
2629
              dw_cfi_ref cfi;
2630
 
2631
              /* Note that if we blindly insert the remember at the
2632
                 start of the trace, we can wind up increasing the
2633
                 size of the unwind info due to extra advance opcodes.
2634
                 Instead, put the remember immediately before the next
2635
                 state change.  We know there must be one, because the
2636
                 state at the beginning and head of the trace differ.  */
2637
              add_cfi_insn = before_next_cfi_note (prev_ti->head);
2638
              cfi = new_cfi ();
2639
              cfi->dw_cfi_opc = DW_CFA_remember_state;
2640
              add_cfi (cfi);
2641
 
2642
              add_cfi_insn = ti->head;
2643
              cfi = new_cfi ();
2644
              cfi->dw_cfi_opc = DW_CFA_restore_state;
2645
              add_cfi (cfi);
2646
 
2647
              old_row = prev_ti->beg_row;
2648
            }
2649
          /* Otherwise, we'll simply change state from the previous end.  */
2650
        }
2651
 
2652
      change_cfi_row (old_row, ti->beg_row);
2653
 
2654
      if (dump_file && add_cfi_insn != ti->head)
2655
        {
2656
          rtx note;
2657
 
2658
          fprintf (dump_file, "Fixup between trace %u and %u:\n",
2659
                   prev_ti->id, ti->id);
2660
 
2661
          note = ti->head;
2662
          do
2663
            {
2664
              note = NEXT_INSN (note);
2665
              gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2666
              output_cfi_directive (dump_file, NOTE_CFI (note));
2667
            }
2668
          while (note != add_cfi_insn);
2669
        }
2670
    }
2671
 
2672
  /* Connect args_size between traces that have can_throw_internal insns.  */
2673
  if (cfun->eh->lp_array != NULL)
2674
    {
2675
      HOST_WIDE_INT prev_args_size = 0;
2676
 
2677
      for (i = 0; i < n; ++i)
2678
        {
2679
          ti = VEC_index (dw_trace_info, trace_info, i);
2680
 
2681
          if (ti->switch_sections)
2682
            prev_args_size = 0;
2683
          if (ti->eh_head == NULL)
2684
            continue;
2685
          gcc_assert (!ti->args_size_undefined);
2686
 
2687
          if (ti->beg_delay_args_size != prev_args_size)
2688
            {
2689
              /* ??? Search back to previous CFI note.  */
2690
              add_cfi_insn = PREV_INSN (ti->eh_head);
2691
              add_cfi_args_size (ti->beg_delay_args_size);
2692
            }
2693
 
2694
          prev_args_size = ti->end_delay_args_size;
2695
        }
2696
    }
2697
}
2698
 
2699
/* Set up the pseudo-cfg of instruction traces, as described at the
2700
   block comment at the top of the file.  */
2701
 
2702
static void
2703
create_pseudo_cfg (void)
2704
{
2705
  bool saw_barrier, switch_sections;
2706
  dw_trace_info *ti;
2707
  rtx insn;
2708
  unsigned i;
2709
 
2710
  /* The first trace begins at the start of the function,
2711
     and begins with the CIE row state.  */
2712
  trace_info = VEC_alloc (dw_trace_info, heap, 16);
2713
  ti = VEC_quick_push (dw_trace_info, trace_info, NULL);
2714
 
2715
  memset (ti, 0, sizeof (*ti));
2716
  ti->head = get_insns ();
2717
  ti->beg_row = cie_cfi_row;
2718
  ti->cfa_store = cie_cfi_row->cfa;
2719
  ti->cfa_temp.reg = INVALID_REGNUM;
2720
  if (cie_return_save)
2721
    VEC_safe_push (reg_saved_in_data, heap,
2722
                   ti->regs_saved_in_regs, cie_return_save);
2723
 
2724
  /* Walk all the insns, collecting start of trace locations.  */
2725
  saw_barrier = false;
2726
  switch_sections = false;
2727
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2728
    {
2729
      if (BARRIER_P (insn))
2730
        saw_barrier = true;
2731
      else if (NOTE_P (insn)
2732
               && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2733
        {
2734
          /* We should have just seen a barrier.  */
2735
          gcc_assert (saw_barrier);
2736
          switch_sections = true;
2737
        }
2738
      /* Watch out for save_point notes between basic blocks.
2739
         In particular, a note after a barrier.  Do not record these,
2740
         delaying trace creation until the label.  */
2741
      else if (save_point_p (insn)
2742
               && (LABEL_P (insn) || !saw_barrier))
2743
        {
2744
          ti = VEC_safe_push (dw_trace_info, heap, trace_info, NULL);
2745
          memset (ti, 0, sizeof (*ti));
2746
          ti->head = insn;
2747
          ti->switch_sections = switch_sections;
2748
          ti->id = VEC_length (dw_trace_info, trace_info) - 1;
2749
 
2750
          saw_barrier = false;
2751
          switch_sections = false;
2752
        }
2753
    }
2754
 
2755
  /* Create the trace index after we've finished building trace_info,
2756
     avoiding stale pointer problems due to reallocation.  */
2757
  trace_index = htab_create (VEC_length (dw_trace_info, trace_info),
2758
                             dw_trace_info_hash, dw_trace_info_eq, NULL);
2759
  FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, ti)
2760
    {
2761
      void **slot;
2762
 
2763
      if (dump_file)
2764
        fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", i,
2765
                 rtx_name[(int) GET_CODE (ti->head)], INSN_UID (ti->head),
2766
                 ti->switch_sections ? " (section switch)" : "");
2767
 
2768
      slot = htab_find_slot_with_hash (trace_index, ti,
2769
                                       INSN_UID (ti->head), INSERT);
2770
      gcc_assert (*slot == NULL);
2771
      *slot = (void *) ti;
2772
    }
2773
}
2774
 
2775
/* Record the initial position of the return address.  RTL is
2776
   INCOMING_RETURN_ADDR_RTX.  */
2777
 
2778
static void
2779
initial_return_save (rtx rtl)
2780
{
2781
  unsigned int reg = INVALID_REGNUM;
2782
  HOST_WIDE_INT offset = 0;
2783
 
2784
  switch (GET_CODE (rtl))
2785
    {
2786
    case REG:
2787
      /* RA is in a register.  */
2788
      reg = dwf_regno (rtl);
2789
      break;
2790
 
2791
    case MEM:
2792
      /* RA is on the stack.  */
2793
      rtl = XEXP (rtl, 0);
2794
      switch (GET_CODE (rtl))
2795
        {
2796
        case REG:
2797
          gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2798
          offset = 0;
2799
          break;
2800
 
2801
        case PLUS:
2802
          gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2803
          offset = INTVAL (XEXP (rtl, 1));
2804
          break;
2805
 
2806
        case MINUS:
2807
          gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2808
          offset = -INTVAL (XEXP (rtl, 1));
2809
          break;
2810
 
2811
        default:
2812
          gcc_unreachable ();
2813
        }
2814
 
2815
      break;
2816
 
2817
    case PLUS:
2818
      /* The return address is at some offset from any value we can
2819
         actually load.  For instance, on the SPARC it is in %i7+8. Just
2820
         ignore the offset for now; it doesn't matter for unwinding frames.  */
2821
      gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2822
      initial_return_save (XEXP (rtl, 0));
2823
      return;
2824
 
2825
    default:
2826
      gcc_unreachable ();
2827
    }
2828
 
2829
  if (reg != DWARF_FRAME_RETURN_COLUMN)
2830
    {
2831
      if (reg != INVALID_REGNUM)
2832
        record_reg_saved_in_reg (rtl, pc_rtx);
2833
      reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2834
    }
2835
}
2836
 
2837
static void
2838
create_cie_data (void)
2839
{
2840
  dw_cfa_location loc;
2841
  dw_trace_info cie_trace;
2842
 
2843
  dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2844
  dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2845
 
2846
  memset (&cie_trace, 0, sizeof(cie_trace));
2847
  cur_trace = &cie_trace;
2848
 
2849
  add_cfi_vec = &cie_cfi_vec;
2850
  cie_cfi_row = cur_row = new_cfi_row ();
2851
 
2852
  /* On entry, the Canonical Frame Address is at SP.  */
2853
  memset(&loc, 0, sizeof (loc));
2854
  loc.reg = dw_stack_pointer_regnum;
2855
  loc.offset = INCOMING_FRAME_SP_OFFSET;
2856
  def_cfa_1 (&loc);
2857
 
2858
  if (targetm.debug_unwind_info () == UI_DWARF2
2859
      || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2860
    {
2861
      initial_return_save (INCOMING_RETURN_ADDR_RTX);
2862
 
2863
      /* For a few targets, we have the return address incoming into a
2864
         register, but choose a different return column.  This will result
2865
         in a DW_CFA_register for the return, and an entry in
2866
         regs_saved_in_regs to match.  If the target later stores that
2867
         return address register to the stack, we want to be able to emit
2868
         the DW_CFA_offset against the return column, not the intermediate
2869
         save register.  Save the contents of regs_saved_in_regs so that
2870
         we can re-initialize it at the start of each function.  */
2871
      switch (VEC_length (reg_saved_in_data, cie_trace.regs_saved_in_regs))
2872
        {
2873
        case 0:
2874
          break;
2875
        case 1:
2876
          cie_return_save = ggc_alloc_reg_saved_in_data ();
2877
          *cie_return_save = *VEC_index (reg_saved_in_data,
2878
                                         cie_trace.regs_saved_in_regs, 0);
2879
          VEC_free (reg_saved_in_data, heap, cie_trace.regs_saved_in_regs);
2880
          break;
2881
        default:
2882
          gcc_unreachable ();
2883
        }
2884
    }
2885
 
2886
  add_cfi_vec = NULL;
2887
  cur_row = NULL;
2888
  cur_trace = NULL;
2889
}
2890
 
2891
/* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2892
   state at each location within the function.  These notes will be
2893
   emitted during pass_final.  */
2894
 
2895
static unsigned int
2896
execute_dwarf2_frame (void)
2897
{
2898
  /* The first time we're called, compute the incoming frame state.  */
2899
  if (cie_cfi_vec == NULL)
2900
    create_cie_data ();
2901
 
2902
  dwarf2out_alloc_current_fde ();
2903
 
2904
  create_pseudo_cfg ();
2905
 
2906
  /* Do the work.  */
2907
  create_cfi_notes ();
2908
  connect_traces ();
2909
  add_cfis_to_fde ();
2910
 
2911
  /* Free all the data we allocated.  */
2912
  {
2913
    size_t i;
2914
    dw_trace_info *ti;
2915
 
2916
    FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, ti)
2917
      VEC_free (reg_saved_in_data, heap, ti->regs_saved_in_regs);
2918
  }
2919
  VEC_free (dw_trace_info, heap, trace_info);
2920
 
2921
  htab_delete (trace_index);
2922
  trace_index = NULL;
2923
 
2924
  return 0;
2925
}
2926
 
2927
/* Convert a DWARF call frame info. operation to its string name */
2928
 
2929
static const char *
2930
dwarf_cfi_name (unsigned int cfi_opc)
2931
{
2932
  switch (cfi_opc)
2933
    {
2934
    case DW_CFA_advance_loc:
2935
      return "DW_CFA_advance_loc";
2936
    case DW_CFA_offset:
2937
      return "DW_CFA_offset";
2938
    case DW_CFA_restore:
2939
      return "DW_CFA_restore";
2940
    case DW_CFA_nop:
2941
      return "DW_CFA_nop";
2942
    case DW_CFA_set_loc:
2943
      return "DW_CFA_set_loc";
2944
    case DW_CFA_advance_loc1:
2945
      return "DW_CFA_advance_loc1";
2946
    case DW_CFA_advance_loc2:
2947
      return "DW_CFA_advance_loc2";
2948
    case DW_CFA_advance_loc4:
2949
      return "DW_CFA_advance_loc4";
2950
    case DW_CFA_offset_extended:
2951
      return "DW_CFA_offset_extended";
2952
    case DW_CFA_restore_extended:
2953
      return "DW_CFA_restore_extended";
2954
    case DW_CFA_undefined:
2955
      return "DW_CFA_undefined";
2956
    case DW_CFA_same_value:
2957
      return "DW_CFA_same_value";
2958
    case DW_CFA_register:
2959
      return "DW_CFA_register";
2960
    case DW_CFA_remember_state:
2961
      return "DW_CFA_remember_state";
2962
    case DW_CFA_restore_state:
2963
      return "DW_CFA_restore_state";
2964
    case DW_CFA_def_cfa:
2965
      return "DW_CFA_def_cfa";
2966
    case DW_CFA_def_cfa_register:
2967
      return "DW_CFA_def_cfa_register";
2968
    case DW_CFA_def_cfa_offset:
2969
      return "DW_CFA_def_cfa_offset";
2970
 
2971
    /* DWARF 3 */
2972
    case DW_CFA_def_cfa_expression:
2973
      return "DW_CFA_def_cfa_expression";
2974
    case DW_CFA_expression:
2975
      return "DW_CFA_expression";
2976
    case DW_CFA_offset_extended_sf:
2977
      return "DW_CFA_offset_extended_sf";
2978
    case DW_CFA_def_cfa_sf:
2979
      return "DW_CFA_def_cfa_sf";
2980
    case DW_CFA_def_cfa_offset_sf:
2981
      return "DW_CFA_def_cfa_offset_sf";
2982
 
2983
    /* SGI/MIPS specific */
2984
    case DW_CFA_MIPS_advance_loc8:
2985
      return "DW_CFA_MIPS_advance_loc8";
2986
 
2987
    /* GNU extensions */
2988
    case DW_CFA_GNU_window_save:
2989
      return "DW_CFA_GNU_window_save";
2990
    case DW_CFA_GNU_args_size:
2991
      return "DW_CFA_GNU_args_size";
2992
    case DW_CFA_GNU_negative_offset_extended:
2993
      return "DW_CFA_GNU_negative_offset_extended";
2994
 
2995
    default:
2996
      return "DW_CFA_<unknown>";
2997
    }
2998
}
2999
 
3000
/* This routine will generate the correct assembly data for a location
3001
   description based on a cfi entry with a complex address.  */
3002
 
3003
static void
3004
output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3005
{
3006
  dw_loc_descr_ref loc;
3007
  unsigned long size;
3008
 
3009
  if (cfi->dw_cfi_opc == DW_CFA_expression)
3010
    {
3011
      unsigned r =
3012
        DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3013
      dw2_asm_output_data (1, r, NULL);
3014
      loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3015
    }
3016
  else
3017
    loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3018
 
3019
  /* Output the size of the block.  */
3020
  size = size_of_locs (loc);
3021
  dw2_asm_output_data_uleb128 (size, NULL);
3022
 
3023
  /* Now output the operations themselves.  */
3024
  output_loc_sequence (loc, for_eh);
3025
}
3026
 
3027
/* Similar, but used for .cfi_escape.  */
3028
 
3029
static void
3030
output_cfa_loc_raw (dw_cfi_ref cfi)
3031
{
3032
  dw_loc_descr_ref loc;
3033
  unsigned long size;
3034
 
3035
  if (cfi->dw_cfi_opc == DW_CFA_expression)
3036
    {
3037
      unsigned r =
3038
        DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3039
      fprintf (asm_out_file, "%#x,", r);
3040
      loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3041
    }
3042
  else
3043
    loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3044
 
3045
  /* Output the size of the block.  */
3046
  size = size_of_locs (loc);
3047
  dw2_asm_output_data_uleb128_raw (size);
3048
  fputc (',', asm_out_file);
3049
 
3050
  /* Now output the operations themselves.  */
3051
  output_loc_sequence_raw (loc);
3052
}
3053
 
3054
/* Output a Call Frame Information opcode and its operand(s).  */
3055
 
3056
void
3057
output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3058
{
3059
  unsigned long r;
3060
  HOST_WIDE_INT off;
3061
 
3062
  if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3063
    dw2_asm_output_data (1, (cfi->dw_cfi_opc
3064
                             | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3065
                         "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3066
                         ((unsigned HOST_WIDE_INT)
3067
                          cfi->dw_cfi_oprnd1.dw_cfi_offset));
3068
  else if (cfi->dw_cfi_opc == DW_CFA_offset)
3069
    {
3070
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3071
      dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3072
                           "DW_CFA_offset, column %#lx", r);
3073
      off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3074
      dw2_asm_output_data_uleb128 (off, NULL);
3075
    }
3076
  else if (cfi->dw_cfi_opc == DW_CFA_restore)
3077
    {
3078
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3079
      dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3080
                           "DW_CFA_restore, column %#lx", r);
3081
    }
3082
  else
3083
    {
3084
      dw2_asm_output_data (1, cfi->dw_cfi_opc,
3085
                           "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3086
 
3087
      switch (cfi->dw_cfi_opc)
3088
        {
3089
        case DW_CFA_set_loc:
3090
          if (for_eh)
3091
            dw2_asm_output_encoded_addr_rtx (
3092
                ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3093
                gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3094
                false, NULL);
3095
          else
3096
            dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3097
                                 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3098
          fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3099
          break;
3100
 
3101
        case DW_CFA_advance_loc1:
3102
          dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3103
                                fde->dw_fde_current_label, NULL);
3104
          fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3105
          break;
3106
 
3107
        case DW_CFA_advance_loc2:
3108
          dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3109
                                fde->dw_fde_current_label, NULL);
3110
          fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3111
          break;
3112
 
3113
        case DW_CFA_advance_loc4:
3114
          dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3115
                                fde->dw_fde_current_label, NULL);
3116
          fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3117
          break;
3118
 
3119
        case DW_CFA_MIPS_advance_loc8:
3120
          dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3121
                                fde->dw_fde_current_label, NULL);
3122
          fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3123
          break;
3124
 
3125
        case DW_CFA_offset_extended:
3126
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3127
          dw2_asm_output_data_uleb128 (r, NULL);
3128
          off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3129
          dw2_asm_output_data_uleb128 (off, NULL);
3130
          break;
3131
 
3132
        case DW_CFA_def_cfa:
3133
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3134
          dw2_asm_output_data_uleb128 (r, NULL);
3135
          dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3136
          break;
3137
 
3138
        case DW_CFA_offset_extended_sf:
3139
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3140
          dw2_asm_output_data_uleb128 (r, NULL);
3141
          off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3142
          dw2_asm_output_data_sleb128 (off, NULL);
3143
          break;
3144
 
3145
        case DW_CFA_def_cfa_sf:
3146
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3147
          dw2_asm_output_data_uleb128 (r, NULL);
3148
          off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3149
          dw2_asm_output_data_sleb128 (off, NULL);
3150
          break;
3151
 
3152
        case DW_CFA_restore_extended:
3153
        case DW_CFA_undefined:
3154
        case DW_CFA_same_value:
3155
        case DW_CFA_def_cfa_register:
3156
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3157
          dw2_asm_output_data_uleb128 (r, NULL);
3158
          break;
3159
 
3160
        case DW_CFA_register:
3161
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3162
          dw2_asm_output_data_uleb128 (r, NULL);
3163
          r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3164
          dw2_asm_output_data_uleb128 (r, NULL);
3165
          break;
3166
 
3167
        case DW_CFA_def_cfa_offset:
3168
        case DW_CFA_GNU_args_size:
3169
          dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3170
          break;
3171
 
3172
        case DW_CFA_def_cfa_offset_sf:
3173
          off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3174
          dw2_asm_output_data_sleb128 (off, NULL);
3175
          break;
3176
 
3177
        case DW_CFA_GNU_window_save:
3178
          break;
3179
 
3180
        case DW_CFA_def_cfa_expression:
3181
        case DW_CFA_expression:
3182
          output_cfa_loc (cfi, for_eh);
3183
          break;
3184
 
3185
        case DW_CFA_GNU_negative_offset_extended:
3186
          /* Obsoleted by DW_CFA_offset_extended_sf.  */
3187
          gcc_unreachable ();
3188
 
3189
        default:
3190
          break;
3191
        }
3192
    }
3193
}
3194
 
3195
/* Similar, but do it via assembler directives instead.  */
3196
 
3197
void
3198
output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3199
{
3200
  unsigned long r, r2;
3201
 
3202
  switch (cfi->dw_cfi_opc)
3203
    {
3204
    case DW_CFA_advance_loc:
3205
    case DW_CFA_advance_loc1:
3206
    case DW_CFA_advance_loc2:
3207
    case DW_CFA_advance_loc4:
3208
    case DW_CFA_MIPS_advance_loc8:
3209
    case DW_CFA_set_loc:
3210
      /* Should only be created in a code path not followed when emitting
3211
         via directives.  The assembler is going to take care of this for
3212
         us.  But this routines is also used for debugging dumps, so
3213
         print something.  */
3214
      gcc_assert (f != asm_out_file);
3215
      fprintf (f, "\t.cfi_advance_loc\n");
3216
      break;
3217
 
3218
    case DW_CFA_offset:
3219
    case DW_CFA_offset_extended:
3220
    case DW_CFA_offset_extended_sf:
3221
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3222
      fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3223
               r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3224
      break;
3225
 
3226
    case DW_CFA_restore:
3227
    case DW_CFA_restore_extended:
3228
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3229
      fprintf (f, "\t.cfi_restore %lu\n", r);
3230
      break;
3231
 
3232
    case DW_CFA_undefined:
3233
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3234
      fprintf (f, "\t.cfi_undefined %lu\n", r);
3235
      break;
3236
 
3237
    case DW_CFA_same_value:
3238
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3239
      fprintf (f, "\t.cfi_same_value %lu\n", r);
3240
      break;
3241
 
3242
    case DW_CFA_def_cfa:
3243
    case DW_CFA_def_cfa_sf:
3244
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3245
      fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3246
               r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3247
      break;
3248
 
3249
    case DW_CFA_def_cfa_register:
3250
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3251
      fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3252
      break;
3253
 
3254
    case DW_CFA_register:
3255
      r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3256
      r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3257
      fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3258
      break;
3259
 
3260
    case DW_CFA_def_cfa_offset:
3261
    case DW_CFA_def_cfa_offset_sf:
3262
      fprintf (f, "\t.cfi_def_cfa_offset "
3263
               HOST_WIDE_INT_PRINT_DEC"\n",
3264
               cfi->dw_cfi_oprnd1.dw_cfi_offset);
3265
      break;
3266
 
3267
    case DW_CFA_remember_state:
3268
      fprintf (f, "\t.cfi_remember_state\n");
3269
      break;
3270
    case DW_CFA_restore_state:
3271
      fprintf (f, "\t.cfi_restore_state\n");
3272
      break;
3273
 
3274
    case DW_CFA_GNU_args_size:
3275
      if (f == asm_out_file)
3276
        {
3277
          fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3278
          dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3279
          if (flag_debug_asm)
3280
            fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3281
                     ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3282
          fputc ('\n', f);
3283
        }
3284
      else
3285
        {
3286
          fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3287
                   cfi->dw_cfi_oprnd1.dw_cfi_offset);
3288
        }
3289
      break;
3290
 
3291
    case DW_CFA_GNU_window_save:
3292
      fprintf (f, "\t.cfi_window_save\n");
3293
      break;
3294
 
3295
    case DW_CFA_def_cfa_expression:
3296
      if (f != asm_out_file)
3297
        {
3298
          fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3299
          break;
3300
        }
3301
      /* FALLTHRU */
3302
    case DW_CFA_expression:
3303
      if (f != asm_out_file)
3304
        {
3305
          fprintf (f, "\t.cfi_cfa_expression ...\n");
3306
          break;
3307
        }
3308
      fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3309
      output_cfa_loc_raw (cfi);
3310
      fputc ('\n', f);
3311
      break;
3312
 
3313
    default:
3314
      gcc_unreachable ();
3315
    }
3316
}
3317
 
3318
void
3319
dwarf2out_emit_cfi (dw_cfi_ref cfi)
3320
{
3321
  if (dwarf2out_do_cfi_asm ())
3322
    output_cfi_directive (asm_out_file, cfi);
3323
}
3324
 
3325
static void
3326
dump_cfi_row (FILE *f, dw_cfi_row *row)
3327
{
3328
  dw_cfi_ref cfi;
3329
  unsigned i;
3330
 
3331
  cfi = row->cfa_cfi;
3332
  if (!cfi)
3333
    {
3334
      dw_cfa_location dummy;
3335
      memset(&dummy, 0, sizeof(dummy));
3336
      dummy.reg = INVALID_REGNUM;
3337
      cfi = def_cfa_0 (&dummy, &row->cfa);
3338
    }
3339
  output_cfi_directive (f, cfi);
3340
 
3341
  FOR_EACH_VEC_ELT (dw_cfi_ref, row->reg_save, i, cfi)
3342
    if (cfi)
3343
      output_cfi_directive (f, cfi);
3344
}
3345
 
3346
void debug_cfi_row (dw_cfi_row *row);
3347
 
3348
void
3349
debug_cfi_row (dw_cfi_row *row)
3350
{
3351
  dump_cfi_row (stderr, row);
3352
}
3353
 
3354
 
3355
/* Save the result of dwarf2out_do_frame across PCH.
3356
   This variable is tri-state, with 0 unset, >0 true, <0 false.  */
3357
static GTY(()) signed char saved_do_cfi_asm = 0;
3358
 
3359
/* Decide whether we want to emit frame unwind information for the current
3360
   translation unit.  */
3361
 
3362
bool
3363
dwarf2out_do_frame (void)
3364
{
3365
  /* We want to emit correct CFA location expressions or lists, so we
3366
     have to return true if we're going to output debug info, even if
3367
     we're not going to output frame or unwind info.  */
3368
  if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3369
    return true;
3370
 
3371
  if (saved_do_cfi_asm > 0)
3372
    return true;
3373
 
3374
  if (targetm.debug_unwind_info () == UI_DWARF2)
3375
    return true;
3376
 
3377
  if ((flag_unwind_tables || flag_exceptions)
3378
      && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3379
    return true;
3380
 
3381
  return false;
3382
}
3383
 
3384
/* Decide whether to emit frame unwind via assembler directives.  */
3385
 
3386
bool
3387
dwarf2out_do_cfi_asm (void)
3388
{
3389
  int enc;
3390
 
3391
#ifdef MIPS_DEBUGGING_INFO
3392
  return false;
3393
#endif
3394
 
3395
  if (saved_do_cfi_asm != 0)
3396
    return saved_do_cfi_asm > 0;
3397
 
3398
  /* Assume failure for a moment.  */
3399
  saved_do_cfi_asm = -1;
3400
 
3401
  if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3402
    return false;
3403
  if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3404
    return false;
3405
 
3406
  /* Make sure the personality encoding is one the assembler can support.
3407
     In particular, aligned addresses can't be handled.  */
3408
  enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3409
  if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3410
    return false;
3411
  enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3412
  if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3413
    return false;
3414
 
3415
  /* If we can't get the assembler to emit only .debug_frame, and we don't need
3416
     dwarf2 unwind info for exceptions, then emit .debug_frame by hand.  */
3417
  if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3418
      && !flag_unwind_tables && !flag_exceptions
3419
      && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3420
    return false;
3421
 
3422
  /* Success!  */
3423
  saved_do_cfi_asm = 1;
3424
  return true;
3425
}
3426
 
3427
static bool
3428
gate_dwarf2_frame (void)
3429
{
3430
#ifndef HAVE_prologue
3431
  /* Targets which still implement the prologue in assembler text
3432
     cannot use the generic dwarf2 unwinding.  */
3433
  return false;
3434
#endif
3435
 
3436
  /* ??? What to do for UI_TARGET unwinding?  They might be able to benefit
3437
     from the optimized shrink-wrapping annotations that we will compute.
3438
     For now, only produce the CFI notes for dwarf2.  */
3439
  return dwarf2out_do_frame ();
3440
}
3441
 
3442
struct rtl_opt_pass pass_dwarf2_frame =
3443
{
3444
 {
3445
  RTL_PASS,
3446
  "dwarf2",                     /* name */
3447
  gate_dwarf2_frame,            /* gate */
3448
  execute_dwarf2_frame,         /* execute */
3449
  NULL,                         /* sub */
3450
  NULL,                         /* next */
3451
  0,                             /* static_pass_number */
3452
  TV_FINAL,                     /* tv_id */
3453
  0,                             /* properties_required */
3454
  0,                             /* properties_provided */
3455
  0,                             /* properties_destroyed */
3456
  0,                             /* todo_flags_start */
3457
 
3458
 }
3459
};
3460
 
3461
#include "gt-dwarf2cfi.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.