OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [struct-equiv.c] - Blame information for rev 827

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Control flow optimization code for GNU compiler.
2
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
/* Try to match two basic blocks - or their ends - for structural equivalence.
23
   We scan the blocks from their ends backwards, and expect that insns are
24
   identical, except for certain cases involving registers.  A mismatch
25
   We scan the blocks from their ends backwards, hoping to find a match, I.e.
26
   insns are identical, except for certain cases involving registers.  A
27
   mismatch between register number RX (used in block X) and RY (used in the
28
   same way in block Y) can be handled in one of the following cases:
29
   1. RX and RY are local to their respective blocks; they are set there and
30
      die there.  If so, they can effectively be ignored.
31
   2. RX and RY die in their blocks, but live at the start.  If any path
32
      gets redirected through X instead of Y, the caller must emit
33
      compensation code to move RY to RX.  If there are overlapping inputs,
34
      the function resolve_input_conflict ensures that this can be done.
35
      Information about these registers are tracked in the X_LOCAL, Y_LOCAL,
36
      LOCAL_COUNT and LOCAL_RVALUE fields.
37
   3. RX and RY live throughout their blocks, including the start and the end.
38
      Either RX and RY must be identical, or we have to replace all uses in
39
      block X with a new pseudo, which is stored in the INPUT_REG field.  The
40
      caller can then use block X instead of block Y by copying RY to the new
41
      pseudo.
42
 
43
   The main entry point to this file is struct_equiv_block_eq.  This function
44
   uses a struct equiv_info to accept some of its inputs, to keep track of its
45
   internal state, to pass down to its helper functions, and to communicate
46
   some of the results back to the caller.
47
 
48
   Most scans will result in a failure to match a sufficient number of insns
49
   to make any optimization worth while, therefore the process is geared more
50
   to quick scanning rather than the ability to exactly backtrack when we
51
   find a mismatch.  The information gathered is still meaningful to make a
52
   preliminary decision if we want to do an optimization, we might only
53
   slightly overestimate the number of matchable insns, and underestimate
54
   the number of inputs an miss an input conflict.  Sufficient information
55
   is gathered so that when we make another pass, we won't have to backtrack
56
   at the same point.
57
   Another issue is that information in memory attributes and/or REG_NOTES
58
   might have to be merged or discarded to make a valid match.  We don't want
59
   to discard such information when we are not certain that we want to merge
60
   the two (partial) blocks.
61
   For these reasons, struct_equiv_block_eq has to be called first with the
62
   STRUCT_EQUIV_START bit set in the mode parameter.  This will calculate the
63
   number of matched insns and the number and types of inputs.  If the
64
   need_rerun field is set, the results are only tentative, and the caller
65
   has to call again with STRUCT_EQUIV_RERUN till need_rerun is false in
66
   order to get a reliable match.
67
   To install the changes necessary for the match, the function has to be
68
   called again with STRUCT_EQUIV_FINAL.
69
 
70
   While scanning an insn, we process first all the SET_DESTs, then the
71
   SET_SRCes, then the REG_NOTES, in order to keep the register liveness
72
   information consistent.
73
   If we were to mix up the order for sources / destinations in an insn where
74
   a source is also a destination, we'd end up being mistaken to think that
75
   the register is not live in the preceding insn.  */
76
 
77
#include "config.h"
78
#include "system.h"
79
#include "coretypes.h"
80
#include "tm.h"
81
#include "rtl.h"
82
#include "regs.h"
83
#include "output.h"
84
#include "insn-config.h"
85
#include "flags.h"
86
#include "recog.h"
87
#include "tm_p.h"
88
#include "target.h"
89
#include "emit-rtl.h"
90
#include "reload.h"
91
 
92
static void merge_memattrs (rtx, rtx);
93
static bool set_dest_equiv_p (rtx x, rtx y, struct equiv_info *info);
94
static bool set_dest_addr_equiv_p (rtx x, rtx y, struct equiv_info *info);
95
static void find_dying_inputs (struct equiv_info *info);
96
static bool resolve_input_conflict (struct equiv_info *info);
97
 
98
/* After reload, some moves, as indicated by SECONDARY_RELOAD_CLASS and
99
   SECONDARY_MEMORY_NEEDED, cannot be done directly.  For our purposes, we
100
   consider them impossible to generate after reload (even though some
101
   might be synthesized when you throw enough code at them).
102
   Since we don't know while processing a cross-jump if a local register
103
   that is currently live will eventually be live and thus be an input,
104
   we keep track of potential inputs that would require an impossible move
105
   by using a prohibitively high cost for them.
106
   This number, multiplied with the larger of STRUCT_EQUIV_MAX_LOCAL and
107
   FIRST_PSEUDO_REGISTER, must fit in the input_cost field of
108
   struct equiv_info.  */
109
#define IMPOSSIBLE_MOVE_FACTOR 20000
110
 
111
 
112
 
113
/* Removes the memory attributes of MEM expression
114
   if they are not equal.  */
115
 
116
void
117
merge_memattrs (rtx x, rtx y)
118
{
119
  int i;
120
  int j;
121
  enum rtx_code code;
122
  const char *fmt;
123
 
124
  if (x == y)
125
    return;
126
  if (x == 0 || y == 0)
127
    return;
128
 
129
  code = GET_CODE (x);
130
 
131
  if (code != GET_CODE (y))
132
    return;
133
 
134
  if (GET_MODE (x) != GET_MODE (y))
135
    return;
136
 
137
  if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
138
    {
139
      if (! MEM_ATTRS (x))
140
        MEM_ATTRS (y) = 0;
141
      else if (! MEM_ATTRS (y))
142
        MEM_ATTRS (x) = 0;
143
      else
144
        {
145
          rtx mem_size;
146
 
147
          if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
148
            {
149
              set_mem_alias_set (x, 0);
150
              set_mem_alias_set (y, 0);
151
            }
152
 
153
          if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
154
            {
155
              set_mem_expr (x, 0);
156
              set_mem_expr (y, 0);
157
              set_mem_offset (x, 0);
158
              set_mem_offset (y, 0);
159
            }
160
          else if (MEM_OFFSET (x) != MEM_OFFSET (y))
161
            {
162
              set_mem_offset (x, 0);
163
              set_mem_offset (y, 0);
164
            }
165
 
166
          if (!MEM_SIZE (x))
167
            mem_size = NULL_RTX;
168
          else if (!MEM_SIZE (y))
169
            mem_size = NULL_RTX;
170
          else
171
            mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
172
                                     INTVAL (MEM_SIZE (y))));
173
          set_mem_size (x, mem_size);
174
          set_mem_size (y, mem_size);
175
 
176
          set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
177
          set_mem_align (y, MEM_ALIGN (x));
178
        }
179
    }
180
 
181
  fmt = GET_RTX_FORMAT (code);
182
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
183
    {
184
      switch (fmt[i])
185
        {
186
        case 'E':
187
          /* Two vectors must have the same length.  */
188
          if (XVECLEN (x, i) != XVECLEN (y, i))
189
            return;
190
 
191
          for (j = 0; j < XVECLEN (x, i); j++)
192
            merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
193
 
194
          break;
195
 
196
        case 'e':
197
          merge_memattrs (XEXP (x, i), XEXP (y, i));
198
        }
199
    }
200
  return;
201
}
202
 
203
/* In SET, assign the bit for the register number of REG the value VALUE.
204
   If REG is a hard register, do so for all its constituent registers.
205
   Return the number of registers that have become included (as a positive
206
   number) or excluded (as a negative number).  */
207
static int
208
assign_reg_reg_set (regset set, rtx reg, int value)
209
{
210
  unsigned regno = REGNO (reg);
211
  int nregs, i, old;
212
 
213
  if (regno >= FIRST_PSEUDO_REGISTER)
214
    {
215
      gcc_assert (!reload_completed);
216
      nregs = 1;
217
    }
218
  else
219
    nregs = hard_regno_nregs[regno][GET_MODE (reg)];
220
  for (old = 0, i = nregs; --i >= 0; regno++)
221
    {
222
      if ((value != 0) == REGNO_REG_SET_P (set, regno))
223
        continue;
224
      if (value)
225
        old++, SET_REGNO_REG_SET (set, regno);
226
      else
227
        old--, CLEAR_REGNO_REG_SET (set, regno);
228
    }
229
  return old;
230
}
231
 
232
/* Record state about current inputs / local registers / liveness
233
   in *P.  */
234
static inline void
235
struct_equiv_make_checkpoint (struct struct_equiv_checkpoint *p,
236
                              struct equiv_info *info)
237
{
238
  *p = info->cur;
239
}
240
 
241
/* Call struct_equiv_make_checkpoint (P, INFO) if the current partial block
242
   is suitable to split off - i.e. there is no dangling cc0 user - and
243
   if the current cost of the common instructions, minus the cost for
244
   setting up the inputs, is higher than what has been recorded before
245
   in CHECKPOINT[N].  Also, if we do so, confirm or cancel any pending
246
   changes.  */
247
static void
248
struct_equiv_improve_checkpoint (struct struct_equiv_checkpoint *p,
249
                                 struct equiv_info *info)
250
{
251
#ifdef HAVE_cc0
252
  if (reg_mentioned_p (cc0_rtx, info->cur.x_start)
253
      && !sets_cc0_p (info->cur.x_start))
254
    return;
255
#endif
256
  if (info->cur.input_count >= IMPOSSIBLE_MOVE_FACTOR)
257
    return;
258
  if (info->input_cost >= 0
259
      ? (COSTS_N_INSNS(info->cur.ninsns - p->ninsns)
260
         > info->input_cost * (info->cur.input_count - p->input_count))
261
      : info->cur.ninsns > p->ninsns && !info->cur.input_count)
262
    {
263
      if (info->check_input_conflict && ! resolve_input_conflict (info))
264
        return;
265
      /* We have a profitable set of changes.  If this is the final pass,
266
         commit them now.  Otherwise, we don't know yet if we can make any
267
         change, so put the old code back for now.  */
268
      if (info->mode & STRUCT_EQUIV_FINAL)
269
        confirm_change_group ();
270
      else
271
        cancel_changes (0);
272
      struct_equiv_make_checkpoint (p, info);
273
    }
274
}
275
 
276
/* Restore state about current inputs / local registers / liveness
277
   from P.  */
278
static void
279
struct_equiv_restore_checkpoint (struct struct_equiv_checkpoint *p,
280
                                 struct equiv_info *info)
281
{
282
  info->cur.ninsns = p->ninsns;
283
  info->cur.x_start = p->x_start;
284
  info->cur.y_start = p->y_start;
285
  info->cur.input_count = p->input_count;
286
  info->cur.input_valid = p->input_valid;
287
  while (info->cur.local_count > p->local_count)
288
    {
289
      info->cur.local_count--;
290
      info->cur.version--;
291
      if (REGNO_REG_SET_P (info->x_local_live,
292
                           REGNO (info->x_local[info->cur.local_count])))
293
        {
294
          assign_reg_reg_set (info->x_local_live,
295
                              info->x_local[info->cur.local_count], 0);
296
          assign_reg_reg_set (info->y_local_live,
297
                              info->y_local[info->cur.local_count], 0);
298
          info->cur.version--;
299
        }
300
    }
301
  if (info->cur.version != p->version)
302
    info->need_rerun = true;
303
}
304
 
305
 
306
/* Update register liveness to reflect that X is now life (if rvalue is
307
   nonzero) or dead (if rvalue is zero) in INFO->x_block, and likewise Y
308
   in INFO->y_block.  Return the number of registers the liveness of which
309
   changed in each block (as a negative number if registers became dead).  */
310
static int
311
note_local_live (struct equiv_info *info, rtx x, rtx y, int rvalue)
312
{
313
  unsigned x_regno = REGNO (x);
314
  unsigned y_regno = REGNO (y);
315
  int x_nominal_nregs = (x_regno >= FIRST_PSEUDO_REGISTER
316
                         ? 1 : hard_regno_nregs[x_regno][GET_MODE (x)]);
317
  int y_nominal_nregs = (y_regno >= FIRST_PSEUDO_REGISTER
318
                         ? 1 : hard_regno_nregs[y_regno][GET_MODE (y)]);
319
  int x_change = assign_reg_reg_set (info->x_local_live, x, rvalue);
320
  int y_change = assign_reg_reg_set (info->y_local_live, y, rvalue);
321
 
322
  gcc_assert (x_nominal_nregs && y_nominal_nregs);
323
  gcc_assert (x_change * y_nominal_nregs == y_change * x_nominal_nregs);
324
  if (y_change)
325
    {
326
      if (reload_completed)
327
        {
328
          unsigned x_regno ATTRIBUTE_UNUSED = REGNO (x);
329
          unsigned y_regno = REGNO (y);
330
          enum machine_mode x_mode = GET_MODE (x);
331
 
332
          if (secondary_reload_class (0, REGNO_REG_CLASS (y_regno), x_mode, x)
333
              != NO_REGS
334
#ifdef SECONDARY_MEMORY_NEEDED
335
              || SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (y_regno),
336
                                          REGNO_REG_CLASS (x_regno), x_mode)
337
#endif
338
              )
339
          y_change *= IMPOSSIBLE_MOVE_FACTOR;
340
        }
341
      info->cur.input_count += y_change;
342
      info->cur.version++;
343
    }
344
  return x_change;
345
}
346
 
347
/* Check if *XP is equivalent to Y.  Until an an unreconcilable difference is
348
   found, use in-group changes with validate_change on *XP to make register
349
   assignments agree.  It is the (not necessarily direct) callers
350
   responsibility to verify / confirm / cancel these changes, as appropriate.
351
   RVALUE indicates if the processed piece of rtl is used as a destination, in
352
   which case we can't have different registers being an input.  Returns
353
   nonzero if the two blocks have been identified as equivalent, zero otherwise.
354
   RVALUE == 0: destination
355
   RVALUE == 1: source
356
   RVALUE == -1: source, ignore SET_DEST of SET / clobber.  */
357
bool
358
rtx_equiv_p (rtx *xp, rtx y, int rvalue, struct equiv_info *info)
359
{
360
  rtx x = *xp;
361
  enum rtx_code code;
362
  int length;
363
  const char *format;
364
  int i;
365
 
366
  if (!y || !x)
367
    return x == y;
368
  code = GET_CODE (y);
369
  if (code != REG && x == y)
370
    return true;
371
  if (GET_CODE (x) != code
372
      || GET_MODE (x) != GET_MODE (y))
373
    return false;
374
 
375
  /* ??? could extend to allow CONST_INT inputs.  */
376
  switch (code)
377
    {
378
    case REG:
379
      {
380
        unsigned x_regno = REGNO (x);
381
        unsigned y_regno = REGNO (y);
382
        int x_common_live, y_common_live;
383
 
384
        if (reload_completed
385
            && (x_regno >= FIRST_PSEUDO_REGISTER
386
                || y_regno >= FIRST_PSEUDO_REGISTER))
387
          {
388
            /* We should only see this in REG_NOTEs.  */
389
            gcc_assert (!info->live_update);
390
            /* Returning false will cause us to remove the notes.  */
391
            return false;
392
          }
393
#ifdef STACK_REGS
394
        /* After reg-stack, can only accept literal matches of stack regs.  */
395
        if (info->mode & CLEANUP_POST_REGSTACK
396
            && (IN_RANGE (x_regno, FIRST_STACK_REG, LAST_STACK_REG)
397
                || IN_RANGE (y_regno, FIRST_STACK_REG, LAST_STACK_REG)))
398
          return x_regno == y_regno;
399
#endif
400
 
401
        /* If the register is a locally live one in one block, the
402
           corresponding one must be locally live in the other, too, and
403
           match of identical regnos doesn't apply.  */
404
        if (REGNO_REG_SET_P (info->x_local_live, x_regno))
405
          {
406
            if (!REGNO_REG_SET_P (info->y_local_live, y_regno))
407
              return false;
408
          }
409
        else if (REGNO_REG_SET_P (info->y_local_live, y_regno))
410
          return false;
411
        else if (x_regno == y_regno)
412
          {
413
            if (!rvalue && info->cur.input_valid
414
                && (reg_overlap_mentioned_p (x, info->x_input)
415
                    || reg_overlap_mentioned_p (x, info->y_input)))
416
              return false;
417
 
418
            /* Update liveness information.  */
419
            if (info->live_update
420
                && assign_reg_reg_set (info->common_live, x, rvalue))
421
              info->cur.version++;
422
 
423
            return true;
424
          }
425
 
426
        x_common_live = REGNO_REG_SET_P (info->common_live, x_regno);
427
        y_common_live = REGNO_REG_SET_P (info->common_live, y_regno);
428
        if (x_common_live != y_common_live)
429
          return false;
430
        else if (x_common_live)
431
          {
432
            if (! rvalue || info->input_cost < 0 || no_new_pseudos)
433
              return false;
434
            /* If info->live_update is not set, we are processing notes.
435
               We then allow a match with x_input / y_input found in a
436
               previous pass.  */
437
            if (info->live_update && !info->cur.input_valid)
438
              {
439
                info->cur.input_valid = true;
440
                info->x_input = x;
441
                info->y_input = y;
442
                info->cur.input_count += optimize_size ? 2 : 1;
443
                if (info->input_reg
444
                    && GET_MODE (info->input_reg) != GET_MODE (info->x_input))
445
                  info->input_reg = NULL_RTX;
446
                if (!info->input_reg)
447
                  info->input_reg = gen_reg_rtx (GET_MODE (info->x_input));
448
              }
449
            else if ((info->live_update
450
                      ? ! info->cur.input_valid : ! info->x_input)
451
                     || ! rtx_equal_p (x, info->x_input)
452
                     || ! rtx_equal_p (y, info->y_input))
453
              return false;
454
            validate_change (info->cur.x_start, xp, info->input_reg, 1);
455
          }
456
        else
457
          {
458
            int x_nregs = (x_regno >= FIRST_PSEUDO_REGISTER
459
                           ? 1 : hard_regno_nregs[x_regno][GET_MODE (x)]);
460
            int y_nregs = (y_regno >= FIRST_PSEUDO_REGISTER
461
                           ? 1 : hard_regno_nregs[y_regno][GET_MODE (y)]);
462
            int size = GET_MODE_SIZE (GET_MODE (x));
463
            enum machine_mode x_mode = GET_MODE (x);
464
            unsigned x_regno_i, y_regno_i;
465
            int x_nregs_i, y_nregs_i, size_i;
466
            int local_count = info->cur.local_count;
467
 
468
            /* This might be a register local to each block.  See if we have
469
               it already registered.  */
470
            for (i = local_count - 1; i >= 0; i--)
471
              {
472
                x_regno_i = REGNO (info->x_local[i]);
473
                x_nregs_i = (x_regno_i >= FIRST_PSEUDO_REGISTER
474
                             ? 1 : hard_regno_nregs[x_regno_i][GET_MODE (x)]);
475
                y_regno_i = REGNO (info->y_local[i]);
476
                y_nregs_i = (y_regno_i >= FIRST_PSEUDO_REGISTER
477
                             ? 1 : hard_regno_nregs[y_regno_i][GET_MODE (y)]);
478
                size_i = GET_MODE_SIZE (GET_MODE (info->x_local[i]));
479
 
480
                /* If we have a new pair of registers that is wider than an
481
                   old pair and enclosing it with matching offsets,
482
                   remove the old pair.  If we find a matching, wider, old
483
                   pair, use the old one.  If the width is the same, use the
484
                   old one if the modes match, but the new if they don't.
485
                   We don't want to get too fancy with subreg_regno_offset
486
                   here, so we just test two straightforward cases each.  */
487
                if (info->live_update
488
                    && (x_mode != GET_MODE (info->x_local[i])
489
                        ? size >= size_i : size > size_i))
490
                  {
491
                    /* If the new pair is fully enclosing a matching
492
                       existing pair, remove the old one.  N.B. because
493
                       we are removing one entry here, the check below
494
                       if we have space for a new entry will succeed.  */
495
                    if ((x_regno <= x_regno_i
496
                         && x_regno + x_nregs >= x_regno_i + x_nregs_i
497
                         && x_nregs == y_nregs && x_nregs_i == y_nregs_i
498
                         && x_regno - x_regno_i == y_regno - y_regno_i)
499
                        || (x_regno == x_regno_i && y_regno == y_regno_i
500
                            && x_nregs >= x_nregs_i && y_nregs >= y_nregs_i))
501
                      {
502
                        info->cur.local_count = --local_count;
503
                        info->x_local[i] = info->x_local[local_count];
504
                        info->y_local[i] = info->y_local[local_count];
505
                        continue;
506
                      }
507
                  }
508
                else
509
                  {
510
 
511
                    /* If the new pair is fully enclosed within a matching
512
                       existing pair, succeed.  */
513
                    if (x_regno >= x_regno_i
514
                        && x_regno + x_nregs <= x_regno_i + x_nregs_i
515
                        && x_nregs == y_nregs && x_nregs_i == y_nregs_i
516
                        && x_regno - x_regno_i == y_regno - y_regno_i)
517
                      break;
518
                    if (x_regno == x_regno_i && y_regno == y_regno_i
519
                        && x_nregs <= x_nregs_i && y_nregs <= y_nregs_i)
520
                      break;
521
                }
522
 
523
                /* Any other overlap causes a match failure.  */
524
                if (x_regno + x_nregs > x_regno_i
525
                    && x_regno_i + x_nregs_i > x_regno)
526
                  return false;
527
                if (y_regno + y_nregs > y_regno_i
528
                    && y_regno_i + y_nregs_i > y_regno)
529
                  return false;
530
              }
531
            if (i < 0)
532
              {
533
                /* Not found.  Create a new entry if possible.  */
534
                if (!info->live_update
535
                    || info->cur.local_count >= STRUCT_EQUIV_MAX_LOCAL)
536
                  return false;
537
                info->x_local[info->cur.local_count] = x;
538
                info->y_local[info->cur.local_count] = y;
539
                info->cur.local_count++;
540
                info->cur.version++;
541
              }
542
            note_local_live (info, x, y, rvalue);
543
          }
544
        return true;
545
      }
546
    case SET:
547
      gcc_assert (rvalue < 0);
548
      /* Ignore the destinations role as a destination.  Still, we have
549
         to consider input registers embedded in the addresses of a MEM.
550
         N.B., we process the rvalue aspect of STRICT_LOW_PART /
551
         ZERO_EXTEND / SIGN_EXTEND along with their lvalue aspect.  */
552
      if(!set_dest_addr_equiv_p (SET_DEST (x), SET_DEST (y), info))
553
        return false;
554
      /* Process source.  */
555
      return rtx_equiv_p (&SET_SRC (x), SET_SRC (y), 1, info);
556
    case PRE_MODIFY:
557
      /* Process destination.  */
558
      if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info))
559
        return false;
560
      /* Process source.  */
561
      return rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), 1, info);
562
    case POST_MODIFY:
563
      {
564
        rtx x_dest0, x_dest1;
565
 
566
        /* Process destination.  */
567
        x_dest0 = XEXP (x, 0);
568
        gcc_assert (REG_P (x_dest0));
569
        if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info))
570
          return false;
571
        x_dest1 = XEXP (x, 0);
572
        /* validate_change might have changed the destination.  Put it back
573
           so that we can do a proper match for its role a an input.  */
574
        XEXP (x, 0) = x_dest0;
575
        if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info))
576
          return false;
577
        gcc_assert (x_dest1 == XEXP (x, 0));
578
        /* Process source.  */
579
        return rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), 1, info);
580
      }
581
    case CLOBBER:
582
      gcc_assert (rvalue < 0);
583
      return true;
584
    /* Some special forms are also rvalues when they appear in lvalue
585
       positions.  However, we must ont try to match a register after we
586
       have already altered it with validate_change, consider the rvalue
587
       aspect while we process the lvalue.  */
588
    case STRICT_LOW_PART:
589
    case ZERO_EXTEND:
590
    case SIGN_EXTEND:
591
      {
592
        rtx x_inner, y_inner;
593
        enum rtx_code code;
594
        int change;
595
 
596
        if (rvalue)
597
          break;
598
        x_inner = XEXP (x, 0);
599
        y_inner = XEXP (y, 0);
600
        if (GET_MODE (x_inner) != GET_MODE (y_inner))
601
          return false;
602
        code = GET_CODE (x_inner);
603
        if (code != GET_CODE (y_inner))
604
          return false;
605
        /* The address of a MEM is an input that will be processed during
606
           rvalue == -1 processing.  */
607
        if (code == SUBREG)
608
          {
609
            if (SUBREG_BYTE (x_inner) != SUBREG_BYTE (y_inner))
610
              return false;
611
            x = x_inner;
612
            x_inner = SUBREG_REG (x_inner);
613
            y_inner = SUBREG_REG (y_inner);
614
            if (GET_MODE (x_inner) != GET_MODE (y_inner))
615
              return false;
616
            code = GET_CODE (x_inner);
617
            if (code != GET_CODE (y_inner))
618
              return false;
619
          }
620
        if (code == MEM)
621
          return true;
622
        gcc_assert (code == REG);
623
        if (! rtx_equiv_p (&XEXP (x, 0), y_inner, rvalue, info))
624
          return false;
625
        if (REGNO (x_inner) == REGNO (y_inner))
626
          {
627
            change = assign_reg_reg_set (info->common_live, x_inner, 1);
628
            info->cur.version++;
629
          }
630
        else
631
          change = note_local_live (info, x_inner, y_inner, 1);
632
        gcc_assert (change);
633
        return true;
634
      }
635
    /* The AUTO_INC / POST_MODIFY / PRE_MODIFY sets are modelled to take
636
       place during input processing, however, that is benign, since they
637
       are paired with reads.  */
638
    case MEM:
639
      return !rvalue || rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), rvalue, info);
640
    case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC:
641
      return (rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info)
642
              && rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info));
643
    case PARALLEL:
644
      /* If this is a top-level PATTERN PARALLEL, we expect the caller to
645
         have handled the SET_DESTs.  A complex or vector PARALLEL can be
646
         identified by having a mode.  */
647
      gcc_assert (rvalue < 0 || GET_MODE (x) != VOIDmode);
648
      break;
649
    case LABEL_REF:
650
      /* Check special tablejump match case.  */
651
      if (XEXP (y, 0) == info->y_label)
652
        return (XEXP (x, 0) == info->x_label);
653
      /* We can't assume nonlocal labels have their following insns yet.  */
654
      if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
655
        return XEXP (x, 0) == XEXP (y, 0);
656
 
657
      /* Two label-refs are equivalent if they point at labels
658
         in the same position in the instruction stream.  */
659
      return (next_real_insn (XEXP (x, 0))
660
              == next_real_insn (XEXP (y, 0)));
661
    case SYMBOL_REF:
662
      return XSTR (x, 0) == XSTR (y, 0);
663
    /* Some rtl is guaranteed to be shared, or unique;  If we didn't match
664
       EQ equality above, they aren't the same.  */
665
    case CONST_INT:
666
    case CODE_LABEL:
667
      return false;
668
    default:
669
      break;
670
    }
671
 
672
  /* For commutative operations, the RTX match if the operands match in any
673
     order.  */
674
  if (targetm.commutative_p (x, UNKNOWN))
675
    return ((rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), rvalue, info)
676
             && rtx_equiv_p (&XEXP (x, 1), XEXP (y, 1), rvalue, info))
677
            || (rtx_equiv_p (&XEXP (x, 0), XEXP (y, 1), rvalue, info)
678
                && rtx_equiv_p (&XEXP (x, 1), XEXP (y, 0), rvalue, info)));
679
 
680
  /* Process subexpressions - this is similar to rtx_equal_p.  */
681
  length = GET_RTX_LENGTH (code);
682
  format = GET_RTX_FORMAT (code);
683
 
684
  for (i = 0; i < length; ++i)
685
    {
686
      switch (format[i])
687
        {
688
        case 'w':
689
          if (XWINT (x, i) != XWINT (y, i))
690
            return false;
691
          break;
692
        case 'n':
693
        case 'i':
694
          if (XINT (x, i) != XINT (y, i))
695
            return false;
696
          break;
697
        case 'V':
698
        case 'E':
699
          if (XVECLEN (x, i) != XVECLEN (y, i))
700
            return false;
701
          if (XVEC (x, i) != 0)
702
            {
703
              int j;
704
              for (j = 0; j < XVECLEN (x, i); ++j)
705
                {
706
                  if (! rtx_equiv_p (&XVECEXP (x, i, j), XVECEXP (y, i, j),
707
                                     rvalue, info))
708
                    return false;
709
                }
710
            }
711
          break;
712
        case 'e':
713
          if (! rtx_equiv_p (&XEXP (x, i), XEXP (y, i), rvalue, info))
714
            return false;
715
          break;
716
        case 'S':
717
        case 's':
718
          if ((XSTR (x, i) || XSTR (y, i))
719
              && (! XSTR (x, i) || ! XSTR (y, i)
720
                  || strcmp (XSTR (x, i), XSTR (y, i))))
721
            return false;
722
          break;
723
        case 'u':
724
          /* These are just backpointers, so they don't matter.  */
725
          break;
726
        case '0':
727
        case 't':
728
          break;
729
          /* It is believed that rtx's at this level will never
730
             contain anything but integers and other rtx's,
731
             except for within LABEL_REFs and SYMBOL_REFs.  */
732
        default:
733
          gcc_unreachable ();
734
        }
735
    }
736
  return true;
737
}
738
 
739
/* Do only the rtx_equiv_p SET_DEST processing for SETs and CLOBBERs.
740
   Since we are scanning backwards, this the first step in processing each
741
   insn.  Return true for success.  */
742
static bool
743
set_dest_equiv_p (rtx x, rtx y, struct equiv_info *info)
744
{
745
  if (!x || !y)
746
    return x == y;
747
  if (GET_CODE (x) != GET_CODE (y))
748
    return false;
749
  else if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
750
    return rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 0, info);
751
  else if (GET_CODE (x) == PARALLEL)
752
    {
753
      int j;
754
 
755
      if (XVECLEN (x, 0) != XVECLEN (y, 0))
756
        return false;
757
      for (j = 0; j < XVECLEN (x, 0); ++j)
758
        {
759
          rtx xe = XVECEXP (x, 0, j);
760
          rtx ye = XVECEXP (y, 0, j);
761
 
762
          if (GET_CODE (xe) != GET_CODE (ye))
763
            return false;
764
          if ((GET_CODE (xe) == SET || GET_CODE (xe) == CLOBBER)
765
              && ! rtx_equiv_p (&XEXP (xe, 0), XEXP (ye, 0), 0, info))
766
            return false;
767
        }
768
    }
769
  return true;
770
}
771
 
772
/* Process MEMs in SET_DEST destinations.  We must not process this together
773
   with REG SET_DESTs, but must do it separately, lest when we see
774
   [(set (reg:SI foo) (bar))
775
    (set (mem:SI (reg:SI foo) (baz)))]
776
   struct_equiv_block_eq could get confused to assume that (reg:SI foo)
777
   is not live before this instruction.  */
778
static bool
779
set_dest_addr_equiv_p (rtx x, rtx y, struct equiv_info *info)
780
{
781
  enum rtx_code code = GET_CODE (x);
782
  int length;
783
  const char *format;
784
  int i;
785
 
786
  if (code != GET_CODE (y))
787
    return false;
788
  if (code == MEM)
789
    return rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info);
790
 
791
  /* Process subexpressions.  */
792
  length = GET_RTX_LENGTH (code);
793
  format = GET_RTX_FORMAT (code);
794
 
795
  for (i = 0; i < length; ++i)
796
    {
797
      switch (format[i])
798
        {
799
        case 'V':
800
        case 'E':
801
          if (XVECLEN (x, i) != XVECLEN (y, i))
802
            return false;
803
          if (XVEC (x, i) != 0)
804
            {
805
              int j;
806
              for (j = 0; j < XVECLEN (x, i); ++j)
807
                {
808
                  if (! set_dest_addr_equiv_p (XVECEXP (x, i, j),
809
                                               XVECEXP (y, i, j), info))
810
                    return false;
811
                }
812
            }
813
          break;
814
        case 'e':
815
          if (! set_dest_addr_equiv_p (XEXP (x, i), XEXP (y, i), info))
816
            return false;
817
          break;
818
        default:
819
          break;
820
        }
821
    }
822
  return true;
823
}
824
 
825
/* Check if the set of REG_DEAD notes attached to I1 and I2 allows us to
826
   go ahead with merging I1 and I2, which otherwise look fine.
827
   Inputs / local registers for the inputs of I1 and I2 have already been
828
   set up.  */
829
static bool
830
death_notes_match_p (rtx i1 ATTRIBUTE_UNUSED, rtx i2 ATTRIBUTE_UNUSED,
831
                     struct equiv_info *info ATTRIBUTE_UNUSED)
832
{
833
#ifdef STACK_REGS
834
  /* If cross_jump_death_matters is not 0, the insn's mode
835
     indicates whether or not the insn contains any stack-like regs.  */
836
 
837
  if ((info->mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
838
    {
839
      /* If register stack conversion has already been done, then
840
         death notes must also be compared before it is certain that
841
         the two instruction streams match.  */
842
 
843
      rtx note;
844
      HARD_REG_SET i1_regset, i2_regset;
845
 
846
      CLEAR_HARD_REG_SET (i1_regset);
847
      CLEAR_HARD_REG_SET (i2_regset);
848
 
849
      for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
850
        if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
851
          SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
852
 
853
      for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
854
        if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
855
          {
856
            unsigned regno = REGNO (XEXP (note, 0));
857
            int i;
858
 
859
            for (i = info->cur.local_count - 1; i >= 0; i--)
860
              if (regno == REGNO (info->y_local[i]))
861
                {
862
                  regno = REGNO (info->x_local[i]);
863
                  break;
864
                }
865
            SET_HARD_REG_BIT (i2_regset, regno);
866
          }
867
 
868
      GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
869
 
870
      return false;
871
 
872
    done:
873
      ;
874
    }
875
#endif
876
  return true;
877
}
878
 
879
/* Return true if I1 and I2 are equivalent and thus can be crossjumped.  */
880
 
881
bool
882
insns_match_p (rtx i1, rtx i2, struct equiv_info *info)
883
{
884
  int rvalue_change_start;
885
  struct struct_equiv_checkpoint before_rvalue_change;
886
 
887
  /* Verify that I1 and I2 are equivalent.  */
888
  if (GET_CODE (i1) != GET_CODE (i2))
889
    return false;
890
 
891
  info->cur.x_start = i1;
892
  info->cur.y_start = i2;
893
 
894
  /* If this is a CALL_INSN, compare register usage information.
895
     If we don't check this on stack register machines, the two
896
     CALL_INSNs might be merged leaving reg-stack.c with mismatching
897
     numbers of stack registers in the same basic block.
898
     If we don't check this on machines with delay slots, a delay slot may
899
     be filled that clobbers a parameter expected by the subroutine.
900
 
901
     ??? We take the simple route for now and assume that if they're
902
     equal, they were constructed identically.  */
903
 
904
  if (CALL_P (i1))
905
    {
906
      if (SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)
907
          || ! set_dest_equiv_p (PATTERN (i1), PATTERN (i2), info)
908
          || ! set_dest_equiv_p (CALL_INSN_FUNCTION_USAGE (i1),
909
                                 CALL_INSN_FUNCTION_USAGE (i2), info)
910
          || ! rtx_equiv_p (&CALL_INSN_FUNCTION_USAGE (i1),
911
                            CALL_INSN_FUNCTION_USAGE (i2), -1, info))
912
        {
913
          cancel_changes (0);
914
          return false;
915
        }
916
    }
917
  else if (INSN_P (i1))
918
    {
919
      if (! set_dest_equiv_p (PATTERN (i1), PATTERN (i2), info))
920
        {
921
          cancel_changes (0);
922
          return false;
923
        }
924
    }
925
  rvalue_change_start = num_validated_changes ();
926
  struct_equiv_make_checkpoint (&before_rvalue_change, info);
927
  /* Check death_notes_match_p *after* the inputs have been processed,
928
     so that local inputs will already have been set up.  */
929
  if (! INSN_P (i1)
930
      || (!bitmap_bit_p (info->equiv_used, info->cur.ninsns)
931
          && rtx_equiv_p (&PATTERN (i1), PATTERN (i2), -1, info)
932
          && death_notes_match_p (i1, i2, info)
933
          && verify_changes (0)))
934
    return true;
935
 
936
  /* Do not do EQUIV substitution after reload.  First, we're undoing the
937
     work of reload_cse.  Second, we may be undoing the work of the post-
938
     reload splitting pass.  */
939
  /* ??? Possibly add a new phase switch variable that can be used by
940
     targets to disallow the troublesome insns after splitting.  */
941
  if (!reload_completed)
942
    {
943
      rtx equiv1, equiv2;
944
 
945
      cancel_changes (rvalue_change_start);
946
      struct_equiv_restore_checkpoint (&before_rvalue_change, info);
947
 
948
      /* The following code helps take care of G++ cleanups.  */
949
      equiv1 = find_reg_equal_equiv_note (i1);
950
      equiv2 = find_reg_equal_equiv_note (i2);
951
      if (equiv1 && equiv2
952
          /* If the equivalences are not to a constant, they may
953
             reference pseudos that no longer exist, so we can't
954
             use them.  */
955
          && (! reload_completed
956
              || (CONSTANT_P (XEXP (equiv1, 0))
957
                  && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
958
        {
959
          rtx s1 = single_set (i1);
960
          rtx s2 = single_set (i2);
961
 
962
          if (s1 != 0 && s2 != 0)
963
            {
964
              validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
965
              validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
966
              /* Only inspecting the new SET_SRC is not good enough,
967
                 because there may also be bare USEs in a single_set
968
                 PARALLEL.  */
969
              if (rtx_equiv_p (&PATTERN (i1), PATTERN (i2), -1, info)
970
                  && death_notes_match_p (i1, i2, info)
971
                  && verify_changes (0))
972
                {
973
                  /* Mark this insn so that we'll use the equivalence in
974
                     all subsequent passes.  */
975
                  bitmap_set_bit (info->equiv_used, info->cur.ninsns);
976
                  return true;
977
                }
978
            }
979
        }
980
    }
981
 
982
  cancel_changes (0);
983
  return false;
984
}
985
 
986
/* Set up mode and register information in INFO.  Return true for success.  */
987
bool
988
struct_equiv_init (int mode, struct equiv_info *info)
989
{
990
  if ((info->x_block->flags | info->y_block->flags) & BB_DIRTY)
991
    update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
992
                                      (PROP_DEATH_NOTES
993
                                       | ((mode & CLEANUP_POST_REGSTACK)
994
                                          ? PROP_POST_REGSTACK : 0)));
995
  if (!REG_SET_EQUAL_P (info->x_block->il.rtl->global_live_at_end,
996
                        info->y_block->il.rtl->global_live_at_end))
997
    {
998
#ifdef STACK_REGS
999
      unsigned rn;
1000
 
1001
      if (!(mode & CLEANUP_POST_REGSTACK))
1002
        return false;
1003
      /* After reg-stack.  Remove bogus live info about stack regs.  N.B.
1004
         these regs are not necessarily all dead - we swap random bogosity
1005
         against constant bogosity.  However, clearing these bits at
1006
         least makes the regsets comparable.  */
1007
      for (rn = FIRST_STACK_REG; rn <= LAST_STACK_REG; rn++)
1008
        {
1009
          CLEAR_REGNO_REG_SET (info->x_block->il.rtl->global_live_at_end, rn);
1010
          CLEAR_REGNO_REG_SET (info->y_block->il.rtl->global_live_at_end, rn);
1011
        }
1012
      if (!REG_SET_EQUAL_P (info->x_block->il.rtl->global_live_at_end,
1013
                            info->y_block->il.rtl->global_live_at_end))
1014
#endif
1015
        return false;
1016
    }
1017
  info->mode = mode;
1018
  if (mode & STRUCT_EQUIV_START)
1019
    {
1020
      info->x_input = info->y_input = info->input_reg = NULL_RTX;
1021
      info->equiv_used = ALLOC_REG_SET (&reg_obstack);
1022
      info->check_input_conflict = false;
1023
    }
1024
  info->had_input_conflict = false;
1025
  info->cur.ninsns = info->cur.version = 0;
1026
  info->cur.local_count = info->cur.input_count = 0;
1027
  info->cur.x_start = info->cur.y_start = NULL_RTX;
1028
  info->x_label = info->y_label = NULL_RTX;
1029
  info->need_rerun = false;
1030
  info->live_update = true;
1031
  info->cur.input_valid = false;
1032
  info->common_live = ALLOC_REG_SET (&reg_obstack);
1033
  info->x_local_live = ALLOC_REG_SET (&reg_obstack);
1034
  info->y_local_live = ALLOC_REG_SET (&reg_obstack);
1035
  COPY_REG_SET (info->common_live, info->x_block->il.rtl->global_live_at_end);
1036
  struct_equiv_make_checkpoint (&info->best_match, info);
1037
  return true;
1038
}
1039
 
1040
/* Insns XI and YI have been matched.  Merge memory attributes and reg
1041
   notes.  */
1042
static void
1043
struct_equiv_merge (rtx xi, rtx yi, struct equiv_info *info)
1044
{
1045
  rtx equiv1, equiv2;
1046
 
1047
  merge_memattrs (xi, yi);
1048
 
1049
  /* If the merged insns have different REG_EQUAL notes, then
1050
     remove them.  */
1051
  info->live_update = false;
1052
  equiv1 = find_reg_equal_equiv_note (xi);
1053
  equiv2 = find_reg_equal_equiv_note (yi);
1054
  if (equiv1 && !equiv2)
1055
    remove_note (xi, equiv1);
1056
  else if (!equiv1 && equiv2)
1057
    remove_note (yi, equiv2);
1058
  else if (equiv1 && equiv2
1059
         && !rtx_equiv_p (&XEXP (equiv1, 0), XEXP (equiv2, 0),
1060
                           1, info))
1061
    {
1062
      remove_note (xi, equiv1);
1063
      remove_note (yi, equiv2);
1064
    }
1065
  info->live_update = true;
1066
}
1067
 
1068
/* Return number of matched insns.
1069
   This function must be called up to three times for a successful cross-jump
1070
   match:
1071
   first to find out which instructions do match.  While trying to match
1072
   another instruction that doesn't match, we destroy information in info
1073
   about the actual inputs.  So if there have been any before the last
1074
   match attempt, we need to call this function again to recompute the
1075
   actual inputs up to the actual start of the matching sequence.
1076
   When we are then satisfied that the cross-jump is worthwhile, we
1077
   call this function a third time to make any changes needed to make the
1078
   sequences match: apply equivalences, remove non-matching
1079
   notes and merge memory attributes.  */
1080
int
1081
struct_equiv_block_eq (int mode, struct equiv_info *info)
1082
{
1083
  rtx x_stop, y_stop;
1084
  rtx xi, yi;
1085
  int i;
1086
 
1087
  if (mode & STRUCT_EQUIV_START)
1088
    {
1089
      x_stop = BB_HEAD (info->x_block);
1090
      y_stop = BB_HEAD (info->y_block);
1091
      if (!x_stop || !y_stop)
1092
        return 0;
1093
    }
1094
  else
1095
    {
1096
      x_stop = info->cur.x_start;
1097
      y_stop = info->cur.y_start;
1098
    }
1099
  if (!struct_equiv_init (mode, info))
1100
    gcc_unreachable ();
1101
 
1102
  /* Skip simple jumps at the end of the blocks.  Complex jumps still
1103
     need to be compared for equivalence, which we'll do below.  */
1104
 
1105
  xi = BB_END (info->x_block);
1106
  if (onlyjump_p (xi)
1107
      || (returnjump_p (xi) && !side_effects_p (PATTERN (xi))))
1108
    {
1109
      info->cur.x_start = xi;
1110
      xi = PREV_INSN (xi);
1111
    }
1112
 
1113
  yi = BB_END (info->y_block);
1114
  if (onlyjump_p (yi)
1115
      || (returnjump_p (yi) && !side_effects_p (PATTERN (yi))))
1116
    {
1117
      info->cur.y_start = yi;
1118
      /* Count everything except for unconditional jump as insn.  */
1119
      /* ??? Is it right to count unconditional jumps with a clobber?
1120
         Should we count conditional returns?  */
1121
      if (!simplejump_p (yi) && !returnjump_p (yi) && info->cur.x_start)
1122
        info->cur.ninsns++;
1123
      yi = PREV_INSN (yi);
1124
    }
1125
 
1126
  if (mode & STRUCT_EQUIV_MATCH_JUMPS)
1127
    {
1128
      /* The caller is expected to have compared the jumps already, but we
1129
         need to match them again to get any local registers and inputs.  */
1130
      gcc_assert (!info->cur.x_start == !info->cur.y_start);
1131
      if (info->cur.x_start)
1132
        {
1133
          if (any_condjump_p (info->cur.x_start)
1134
              ? !condjump_equiv_p (info, false)
1135
              : !insns_match_p (info->cur.x_start, info->cur.y_start, info))
1136
            gcc_unreachable ();
1137
        }
1138
      else if (any_condjump_p (xi) && any_condjump_p (yi))
1139
        {
1140
          info->cur.x_start = xi;
1141
          info->cur.y_start = yi;
1142
          xi = PREV_INSN (xi);
1143
          yi = PREV_INSN (yi);
1144
          info->cur.ninsns++;
1145
          if (!condjump_equiv_p (info, false))
1146
            gcc_unreachable ();
1147
        }
1148
      if (info->cur.x_start && info->mode & STRUCT_EQUIV_FINAL)
1149
        struct_equiv_merge (info->cur.x_start, info->cur.y_start, info);
1150
    }
1151
 
1152
  struct_equiv_improve_checkpoint (&info->best_match, info);
1153
  info->x_end = xi;
1154
  info->y_end = yi;
1155
  if (info->cur.x_start != x_stop)
1156
    for (;;)
1157
      {
1158
        /* Ignore notes.  */
1159
        while (!INSN_P (xi) && xi != x_stop)
1160
          xi = PREV_INSN (xi);
1161
 
1162
        while (!INSN_P (yi) && yi != y_stop)
1163
          yi = PREV_INSN (yi);
1164
 
1165
        if (!insns_match_p (xi, yi, info))
1166
          break;
1167
        if (INSN_P (xi))
1168
          {
1169
            if (info->mode & STRUCT_EQUIV_FINAL)
1170
              struct_equiv_merge (xi, yi, info);
1171
            info->cur.ninsns++;
1172
            struct_equiv_improve_checkpoint (&info->best_match, info);
1173
          }
1174
        if (xi == x_stop || yi == y_stop)
1175
          {
1176
            /* If we reached the start of at least one of the blocks, but
1177
               best_match hasn't been advanced back to the first valid insn
1178
               yet, represent the increased benefit of completing the block
1179
               as an increased instruction count.  */
1180
            if (info->best_match.x_start != info->cur.x_start
1181
                && (xi == BB_HEAD (info->x_block)
1182
                    || yi == BB_HEAD (info->y_block)))
1183
              {
1184
                info->cur.ninsns++;
1185
                struct_equiv_improve_checkpoint (&info->best_match, info);
1186
                info->cur.ninsns--;
1187
                if (info->best_match.ninsns > info->cur.ninsns)
1188
                  info->best_match.ninsns = info->cur.ninsns;
1189
              }
1190
            break;
1191
          }
1192
        xi = PREV_INSN (xi);
1193
        yi = PREV_INSN (yi);
1194
      }
1195
 
1196
  /* If we failed to match an insn, but had some changes registered from
1197
     trying to make the insns match, we need to cancel these changes now.  */
1198
  cancel_changes (0);
1199
  /* Restore to best_match to get the sequence with the best known-so-far
1200
     cost-benefit difference.  */
1201
  struct_equiv_restore_checkpoint (&info->best_match, info);
1202
 
1203
  /* Include preceding notes and labels in the cross-jump / if-conversion.
1204
     One, this may bring us to the head of the blocks.
1205
     Two, it keeps line number notes as matched as may be.  */
1206
  if (info->cur.ninsns)
1207
    {
1208
      xi = info->cur.x_start;
1209
      yi = info->cur.y_start;
1210
      while (xi != x_stop && !INSN_P (PREV_INSN (xi)))
1211
        xi = PREV_INSN (xi);
1212
 
1213
      while (yi != y_stop && !INSN_P (PREV_INSN (yi)))
1214
        yi = PREV_INSN (yi);
1215
 
1216
      info->cur.x_start = xi;
1217
      info->cur.y_start = yi;
1218
    }
1219
 
1220
  if (!info->cur.input_valid)
1221
    info->x_input = info->y_input = info->input_reg = NULL_RTX;
1222
  if (!info->need_rerun)
1223
    {
1224
      find_dying_inputs (info);
1225
      if (info->mode & STRUCT_EQUIV_FINAL)
1226
        {
1227
          if (info->check_input_conflict && ! resolve_input_conflict (info))
1228
            gcc_unreachable ();
1229
        }
1230
      else
1231
        {
1232
          bool input_conflict = info->had_input_conflict;
1233
 
1234
          if (!input_conflict
1235
              && info->dying_inputs > 1
1236
              && bitmap_intersect_p (info->x_local_live, info->y_local_live))
1237
            {
1238
              regset_head clobbered_regs;
1239
 
1240
              INIT_REG_SET (&clobbered_regs);
1241
              for (i = 0; i < info->cur.local_count; i++)
1242
                {
1243
                  if (assign_reg_reg_set (&clobbered_regs, info->y_local[i], 0))
1244
                    {
1245
                      input_conflict = true;
1246
                      break;
1247
                    }
1248
                  assign_reg_reg_set (&clobbered_regs, info->x_local[i], 1);
1249
                }
1250
              CLEAR_REG_SET (&clobbered_regs);
1251
            }
1252
          if (input_conflict && !info->check_input_conflict)
1253
            info->need_rerun = true;
1254
          info->check_input_conflict = input_conflict;
1255
        }
1256
    }
1257
 
1258
  if (info->mode & STRUCT_EQUIV_NEED_FULL_BLOCK
1259
      && (info->cur.x_start != x_stop || info->cur.y_start != y_stop))
1260
    return 0;
1261
  return info->cur.ninsns;
1262
}
1263
 
1264
/* For each local register, set info->local_rvalue to true iff the register
1265
   is a dying input.  Store the total number of these in info->dying_inputs.  */
1266
static void
1267
find_dying_inputs (struct equiv_info *info)
1268
{
1269
  int i;
1270
 
1271
  info->dying_inputs = 0;
1272
  for (i = info->cur.local_count-1; i >=0; i--)
1273
    {
1274
      rtx x = info->x_local[i];
1275
      unsigned regno = REGNO (x);
1276
      int nregs = (regno >= FIRST_PSEUDO_REGISTER
1277
                   ? 1 : hard_regno_nregs[regno][GET_MODE (x)]);
1278
 
1279
      for (info->local_rvalue[i] = false; nregs > 0; regno++, --nregs)
1280
        if (REGNO_REG_SET_P (info->x_local_live, regno))
1281
          {
1282
            info->dying_inputs++;
1283
            info->local_rvalue[i] = true;
1284
            break;
1285
          }
1286
    }
1287
}
1288
 
1289
/* For each local register that is a dying input, y_local[i] will be
1290
   copied to x_local[i].  We'll do this in ascending order.  Try to
1291
   re-order the locals to avoid conflicts like r3 = r2; r4 = r3; .
1292
   Return true iff the re-ordering is successful, or not necessary.  */
1293
static bool
1294
resolve_input_conflict (struct equiv_info *info)
1295
{
1296
  int i, j, end;
1297
  int nswaps = 0;
1298
  rtx save_x_local[STRUCT_EQUIV_MAX_LOCAL];
1299
  rtx save_y_local[STRUCT_EQUIV_MAX_LOCAL];
1300
 
1301
  find_dying_inputs (info);
1302
  if (info->dying_inputs <= 1)
1303
    return true;
1304
  memcpy (save_x_local, info->x_local, sizeof save_x_local);
1305
  memcpy (save_y_local, info->y_local, sizeof save_y_local);
1306
  end = info->cur.local_count - 1;
1307
  for (i = 0; i <= end; i++)
1308
    {
1309
      /* Cycle detection with regsets is expensive, so we just check that
1310
         we don't exceed the maximum number of swaps needed in the acyclic
1311
         case.  */
1312
      int max_swaps = end - i;
1313
 
1314
      /* Check if x_local[i] will be clobbered.  */
1315
      if (!info->local_rvalue[i])
1316
        continue;
1317
      /* Check if any later value needs to be copied earlier.  */
1318
      for (j = i + 1; j <= end; j++)
1319
        {
1320
          rtx tmp;
1321
 
1322
          if (!info->local_rvalue[j])
1323
            continue;
1324
          if (!reg_overlap_mentioned_p (info->x_local[i], info->y_local[j]))
1325
            continue;
1326
          if (--max_swaps < 0)
1327
            {
1328
              memcpy (info->x_local, save_x_local, sizeof save_x_local);
1329
              memcpy (info->y_local, save_y_local, sizeof save_y_local);
1330
              return false;
1331
            }
1332
          nswaps++;
1333
          tmp = info->x_local[i];
1334
          info->x_local[i] = info->x_local[j];
1335
          info->x_local[j] = tmp;
1336
          tmp = info->y_local[i];
1337
          info->y_local[i] = info->y_local[j];
1338
          info->y_local[j] = tmp;
1339
          j = i;
1340
        }
1341
    }
1342
  info->had_input_conflict = true;
1343
  if (dump_file && nswaps)
1344
    fprintf (dump_file, "Resolved input conflict, %d %s.\n",
1345
             nswaps, nswaps == 1 ? "swap" : "swaps");
1346
  return true;
1347
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.