OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [expmed.c] - Blame information for rev 868

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Medium-level subroutines: convert bit-field store and extract
2
   and shifts, multiplies and divides to rtl instructions.
3
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
5
   Free Software Foundation, Inc.
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify it under
10
the terms of the GNU General Public License as published by the Free
11
Software Foundation; either version 3, or (at your option) any later
12
version.
13
 
14
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15
WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17
for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with GCC; see the file COPYING3.  If not see
21
<http://www.gnu.org/licenses/>.  */
22
 
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "tm.h"
28
#include "toplev.h"
29
#include "rtl.h"
30
#include "tree.h"
31
#include "tm_p.h"
32
#include "flags.h"
33
#include "insn-config.h"
34
#include "expr.h"
35
#include "optabs.h"
36
#include "real.h"
37
#include "recog.h"
38
#include "langhooks.h"
39
 
40
static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
41
                                   unsigned HOST_WIDE_INT,
42
                                   unsigned HOST_WIDE_INT, rtx);
43
static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
44
                                   unsigned HOST_WIDE_INT, rtx);
45
static rtx extract_fixed_bit_field (enum machine_mode, rtx,
46
                                    unsigned HOST_WIDE_INT,
47
                                    unsigned HOST_WIDE_INT,
48
                                    unsigned HOST_WIDE_INT, rtx, int);
49
static rtx mask_rtx (enum machine_mode, int, int, int);
50
static rtx lshift_value (enum machine_mode, rtx, int, int);
51
static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52
                                    unsigned HOST_WIDE_INT, int);
53
static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
54
static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
55
static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
56
 
57
/* Test whether a value is zero of a power of two.  */
58
#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
59
 
60
/* Nonzero means divides or modulus operations are relatively cheap for
61
   powers of two, so don't use branches; emit the operation instead.
62
   Usually, this will mean that the MD file will emit non-branch
63
   sequences.  */
64
 
65
static bool sdiv_pow2_cheap[NUM_MACHINE_MODES];
66
static bool smod_pow2_cheap[NUM_MACHINE_MODES];
67
 
68
#ifndef SLOW_UNALIGNED_ACCESS
69
#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70
#endif
71
 
72
/* For compilers that support multiple targets with different word sizes,
73
   MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD.  An example
74
   is the H8/300(H) compiler.  */
75
 
76
#ifndef MAX_BITS_PER_WORD
77
#define MAX_BITS_PER_WORD BITS_PER_WORD
78
#endif
79
 
80
/* Reduce conditional compilation elsewhere.  */
81
#ifndef HAVE_insv
82
#define HAVE_insv       0
83
#define CODE_FOR_insv   CODE_FOR_nothing
84
#define gen_insv(a,b,c,d) NULL_RTX
85
#endif
86
#ifndef HAVE_extv
87
#define HAVE_extv       0
88
#define CODE_FOR_extv   CODE_FOR_nothing
89
#define gen_extv(a,b,c,d) NULL_RTX
90
#endif
91
#ifndef HAVE_extzv
92
#define HAVE_extzv      0
93
#define CODE_FOR_extzv  CODE_FOR_nothing
94
#define gen_extzv(a,b,c,d) NULL_RTX
95
#endif
96
 
97
/* Cost of various pieces of RTL.  Note that some of these are indexed by
98
   shift count and some by mode.  */
99
static int zero_cost;
100
static int add_cost[NUM_MACHINE_MODES];
101
static int neg_cost[NUM_MACHINE_MODES];
102
static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
103
static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
104
static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD];
105
static int mul_cost[NUM_MACHINE_MODES];
106
static int sdiv_cost[NUM_MACHINE_MODES];
107
static int udiv_cost[NUM_MACHINE_MODES];
108
static int mul_widen_cost[NUM_MACHINE_MODES];
109
static int mul_highpart_cost[NUM_MACHINE_MODES];
110
 
111
void
112
init_expmed (void)
113
{
114
  struct
115
  {
116
    struct rtx_def reg;         rtunion reg_fld[2];
117
    struct rtx_def plus;        rtunion plus_fld1;
118
    struct rtx_def neg;
119
    struct rtx_def mult;        rtunion mult_fld1;
120
    struct rtx_def sdiv;        rtunion sdiv_fld1;
121
    struct rtx_def udiv;        rtunion udiv_fld1;
122
    struct rtx_def zext;
123
    struct rtx_def sdiv_32;     rtunion sdiv_32_fld1;
124
    struct rtx_def smod_32;     rtunion smod_32_fld1;
125
    struct rtx_def wide_mult;   rtunion wide_mult_fld1;
126
    struct rtx_def wide_lshr;   rtunion wide_lshr_fld1;
127
    struct rtx_def wide_trunc;
128
    struct rtx_def shift;       rtunion shift_fld1;
129
    struct rtx_def shift_mult;  rtunion shift_mult_fld1;
130
    struct rtx_def shift_add;   rtunion shift_add_fld1;
131
    struct rtx_def shift_sub;   rtunion shift_sub_fld1;
132
  } all;
133
 
134
  rtx pow2[MAX_BITS_PER_WORD];
135
  rtx cint[MAX_BITS_PER_WORD];
136
  int m, n;
137
  enum machine_mode mode, wider_mode;
138
 
139
  zero_cost = rtx_cost (const0_rtx, 0);
140
 
141
  for (m = 1; m < MAX_BITS_PER_WORD; m++)
142
    {
143
      pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
144
      cint[m] = GEN_INT (m);
145
    }
146
 
147
  memset (&all, 0, sizeof all);
148
 
149
  PUT_CODE (&all.reg, REG);
150
  /* Avoid using hard regs in ways which may be unsupported.  */
151
  REGNO (&all.reg) = LAST_VIRTUAL_REGISTER + 1;
152
 
153
  PUT_CODE (&all.plus, PLUS);
154
  XEXP (&all.plus, 0) = &all.reg;
155
  XEXP (&all.plus, 1) = &all.reg;
156
 
157
  PUT_CODE (&all.neg, NEG);
158
  XEXP (&all.neg, 0) = &all.reg;
159
 
160
  PUT_CODE (&all.mult, MULT);
161
  XEXP (&all.mult, 0) = &all.reg;
162
  XEXP (&all.mult, 1) = &all.reg;
163
 
164
  PUT_CODE (&all.sdiv, DIV);
165
  XEXP (&all.sdiv, 0) = &all.reg;
166
  XEXP (&all.sdiv, 1) = &all.reg;
167
 
168
  PUT_CODE (&all.udiv, UDIV);
169
  XEXP (&all.udiv, 0) = &all.reg;
170
  XEXP (&all.udiv, 1) = &all.reg;
171
 
172
  PUT_CODE (&all.sdiv_32, DIV);
173
  XEXP (&all.sdiv_32, 0) = &all.reg;
174
  XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
175
 
176
  PUT_CODE (&all.smod_32, MOD);
177
  XEXP (&all.smod_32, 0) = &all.reg;
178
  XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
179
 
180
  PUT_CODE (&all.zext, ZERO_EXTEND);
181
  XEXP (&all.zext, 0) = &all.reg;
182
 
183
  PUT_CODE (&all.wide_mult, MULT);
184
  XEXP (&all.wide_mult, 0) = &all.zext;
185
  XEXP (&all.wide_mult, 1) = &all.zext;
186
 
187
  PUT_CODE (&all.wide_lshr, LSHIFTRT);
188
  XEXP (&all.wide_lshr, 0) = &all.wide_mult;
189
 
190
  PUT_CODE (&all.wide_trunc, TRUNCATE);
191
  XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
192
 
193
  PUT_CODE (&all.shift, ASHIFT);
194
  XEXP (&all.shift, 0) = &all.reg;
195
 
196
  PUT_CODE (&all.shift_mult, MULT);
197
  XEXP (&all.shift_mult, 0) = &all.reg;
198
 
199
  PUT_CODE (&all.shift_add, PLUS);
200
  XEXP (&all.shift_add, 0) = &all.shift_mult;
201
  XEXP (&all.shift_add, 1) = &all.reg;
202
 
203
  PUT_CODE (&all.shift_sub, MINUS);
204
  XEXP (&all.shift_sub, 0) = &all.shift_mult;
205
  XEXP (&all.shift_sub, 1) = &all.reg;
206
 
207
  for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
208
       mode != VOIDmode;
209
       mode = GET_MODE_WIDER_MODE (mode))
210
    {
211
      PUT_MODE (&all.reg, mode);
212
      PUT_MODE (&all.plus, mode);
213
      PUT_MODE (&all.neg, mode);
214
      PUT_MODE (&all.mult, mode);
215
      PUT_MODE (&all.sdiv, mode);
216
      PUT_MODE (&all.udiv, mode);
217
      PUT_MODE (&all.sdiv_32, mode);
218
      PUT_MODE (&all.smod_32, mode);
219
      PUT_MODE (&all.wide_trunc, mode);
220
      PUT_MODE (&all.shift, mode);
221
      PUT_MODE (&all.shift_mult, mode);
222
      PUT_MODE (&all.shift_add, mode);
223
      PUT_MODE (&all.shift_sub, mode);
224
 
225
      add_cost[mode] = rtx_cost (&all.plus, SET);
226
      neg_cost[mode] = rtx_cost (&all.neg, SET);
227
      mul_cost[mode] = rtx_cost (&all.mult, SET);
228
      sdiv_cost[mode] = rtx_cost (&all.sdiv, SET);
229
      udiv_cost[mode] = rtx_cost (&all.udiv, SET);
230
 
231
      sdiv_pow2_cheap[mode] = (rtx_cost (&all.sdiv_32, SET)
232
                               <= 2 * add_cost[mode]);
233
      smod_pow2_cheap[mode] = (rtx_cost (&all.smod_32, SET)
234
                               <= 4 * add_cost[mode]);
235
 
236
      wider_mode = GET_MODE_WIDER_MODE (mode);
237
      if (wider_mode != VOIDmode)
238
        {
239
          PUT_MODE (&all.zext, wider_mode);
240
          PUT_MODE (&all.wide_mult, wider_mode);
241
          PUT_MODE (&all.wide_lshr, wider_mode);
242
          XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
243
 
244
          mul_widen_cost[wider_mode] = rtx_cost (&all.wide_mult, SET);
245
          mul_highpart_cost[mode] = rtx_cost (&all.wide_trunc, SET);
246
        }
247
 
248
      shift_cost[mode][0] = 0;
249
      shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode];
250
 
251
      n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
252
      for (m = 1; m < n; m++)
253
        {
254
          XEXP (&all.shift, 1) = cint[m];
255
          XEXP (&all.shift_mult, 1) = pow2[m];
256
 
257
          shift_cost[mode][m] = rtx_cost (&all.shift, SET);
258
          shiftadd_cost[mode][m] = rtx_cost (&all.shift_add, SET);
259
          shiftsub_cost[mode][m] = rtx_cost (&all.shift_sub, SET);
260
        }
261
    }
262
}
263
 
264
/* Return an rtx representing minus the value of X.
265
   MODE is the intended mode of the result,
266
   useful if X is a CONST_INT.  */
267
 
268
rtx
269
negate_rtx (enum machine_mode mode, rtx x)
270
{
271
  rtx result = simplify_unary_operation (NEG, mode, x, mode);
272
 
273
  if (result == 0)
274
    result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
275
 
276
  return result;
277
}
278
 
279
/* Report on the availability of insv/extv/extzv and the desired mode
280
   of each of their operands.  Returns MAX_MACHINE_MODE if HAVE_foo
281
   is false; else the mode of the specified operand.  If OPNO is -1,
282
   all the caller cares about is whether the insn is available.  */
283
enum machine_mode
284
mode_for_extraction (enum extraction_pattern pattern, int opno)
285
{
286
  const struct insn_data *data;
287
 
288
  switch (pattern)
289
    {
290
    case EP_insv:
291
      if (HAVE_insv)
292
        {
293
          data = &insn_data[CODE_FOR_insv];
294
          break;
295
        }
296
      return MAX_MACHINE_MODE;
297
 
298
    case EP_extv:
299
      if (HAVE_extv)
300
        {
301
          data = &insn_data[CODE_FOR_extv];
302
          break;
303
        }
304
      return MAX_MACHINE_MODE;
305
 
306
    case EP_extzv:
307
      if (HAVE_extzv)
308
        {
309
          data = &insn_data[CODE_FOR_extzv];
310
          break;
311
        }
312
      return MAX_MACHINE_MODE;
313
 
314
    default:
315
      gcc_unreachable ();
316
    }
317
 
318
  if (opno == -1)
319
    return VOIDmode;
320
 
321
  /* Everyone who uses this function used to follow it with
322
     if (result == VOIDmode) result = word_mode; */
323
  if (data->operand[opno].mode == VOIDmode)
324
    return word_mode;
325
  return data->operand[opno].mode;
326
}
327
 
328
 
329
/* Generate code to store value from rtx VALUE
330
   into a bit-field within structure STR_RTX
331
   containing BITSIZE bits starting at bit BITNUM.
332
   FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
333
   ALIGN is the alignment that STR_RTX is known to have.
334
   TOTAL_SIZE is the size of the structure in bytes, or -1 if varying.  */
335
 
336
/* ??? Note that there are two different ideas here for how
337
   to determine the size to count bits within, for a register.
338
   One is BITS_PER_WORD, and the other is the size of operand 3
339
   of the insv pattern.
340
 
341
   If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
342
   else, we use the mode of operand 3.  */
343
 
344
rtx
345
store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
346
                 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
347
                 rtx value)
348
{
349
  unsigned int unit
350
    = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
351
  unsigned HOST_WIDE_INT offset, bitpos;
352
  rtx op0 = str_rtx;
353
  int byte_offset;
354
  rtx orig_value;
355
 
356
  enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
357
 
358
  while (GET_CODE (op0) == SUBREG)
359
    {
360
      /* The following line once was done only if WORDS_BIG_ENDIAN,
361
         but I think that is a mistake.  WORDS_BIG_ENDIAN is
362
         meaningful at a much higher level; when structures are copied
363
         between memory and regs, the higher-numbered regs
364
         always get higher addresses.  */
365
      int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
366
      int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
367
 
368
      byte_offset = 0;
369
 
370
      /* Paradoxical subregs need special handling on big endian machines.  */
371
      if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
372
        {
373
          int difference = inner_mode_size - outer_mode_size;
374
 
375
          if (WORDS_BIG_ENDIAN)
376
            byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
377
          if (BYTES_BIG_ENDIAN)
378
            byte_offset += difference % UNITS_PER_WORD;
379
        }
380
      else
381
        byte_offset = SUBREG_BYTE (op0);
382
 
383
      bitnum += byte_offset * BITS_PER_UNIT;
384
      op0 = SUBREG_REG (op0);
385
    }
386
 
387
  /* No action is needed if the target is a register and if the field
388
     lies completely outside that register.  This can occur if the source
389
     code contains an out-of-bounds access to a small array.  */
390
  if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
391
    return value;
392
 
393
  /* Use vec_set patterns for inserting parts of vectors whenever
394
     available.  */
395
  if (VECTOR_MODE_P (GET_MODE (op0))
396
      && !MEM_P (op0)
397
      && (vec_set_optab->handlers[GET_MODE (op0)].insn_code
398
          != CODE_FOR_nothing)
399
      && fieldmode == GET_MODE_INNER (GET_MODE (op0))
400
      && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
401
      && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
402
    {
403
      enum machine_mode outermode = GET_MODE (op0);
404
      enum machine_mode innermode = GET_MODE_INNER (outermode);
405
      int icode = (int) vec_set_optab->handlers[outermode].insn_code;
406
      int pos = bitnum / GET_MODE_BITSIZE (innermode);
407
      rtx rtxpos = GEN_INT (pos);
408
      rtx src = value;
409
      rtx dest = op0;
410
      rtx pat, seq;
411
      enum machine_mode mode0 = insn_data[icode].operand[0].mode;
412
      enum machine_mode mode1 = insn_data[icode].operand[1].mode;
413
      enum machine_mode mode2 = insn_data[icode].operand[2].mode;
414
 
415
      start_sequence ();
416
 
417
      if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
418
        src = copy_to_mode_reg (mode1, src);
419
 
420
      if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
421
        rtxpos = copy_to_mode_reg (mode1, rtxpos);
422
 
423
      /* We could handle this, but we should always be called with a pseudo
424
         for our targets and all insns should take them as outputs.  */
425
      gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
426
                  && (*insn_data[icode].operand[1].predicate) (src, mode1)
427
                  && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
428
      pat = GEN_FCN (icode) (dest, src, rtxpos);
429
      seq = get_insns ();
430
      end_sequence ();
431
      if (pat)
432
        {
433
          emit_insn (seq);
434
          emit_insn (pat);
435
          return dest;
436
        }
437
    }
438
 
439
  /* If the target is a register, overwriting the entire object, or storing
440
     a full-word or multi-word field can be done with just a SUBREG.
441
 
442
     If the target is memory, storing any naturally aligned field can be
443
     done with a simple store.  For targets that support fast unaligned
444
     memory, any naturally sized, unit aligned field can be done directly.  */
445
 
446
  offset = bitnum / unit;
447
  bitpos = bitnum % unit;
448
  byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
449
                + (offset * UNITS_PER_WORD);
450
 
451
  if (bitpos == 0
452
      && bitsize == GET_MODE_BITSIZE (fieldmode)
453
      && (!MEM_P (op0)
454
          ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
455
             || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
456
             && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
457
          : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
458
             || (offset * BITS_PER_UNIT % bitsize == 0
459
                 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
460
    {
461
      if (MEM_P (op0))
462
        op0 = adjust_address (op0, fieldmode, offset);
463
      else if (GET_MODE (op0) != fieldmode)
464
        op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
465
                                   byte_offset);
466
      emit_move_insn (op0, value);
467
      return value;
468
    }
469
 
470
  /* Make sure we are playing with integral modes.  Pun with subregs
471
     if we aren't.  This must come after the entire register case above,
472
     since that case is valid for any mode.  The following cases are only
473
     valid for integral modes.  */
474
  {
475
    enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
476
    if (imode != GET_MODE (op0))
477
      {
478
        if (MEM_P (op0))
479
          op0 = adjust_address (op0, imode, 0);
480
        else
481
          {
482
            gcc_assert (imode != BLKmode);
483
            op0 = gen_lowpart (imode, op0);
484
          }
485
      }
486
  }
487
 
488
  /* We may be accessing data outside the field, which means
489
     we can alias adjacent data.  */
490
  if (MEM_P (op0))
491
    {
492
      op0 = shallow_copy_rtx (op0);
493
      set_mem_alias_set (op0, 0);
494
      set_mem_expr (op0, 0);
495
    }
496
 
497
  /* If OP0 is a register, BITPOS must count within a word.
498
     But as we have it, it counts within whatever size OP0 now has.
499
     On a bigendian machine, these are not the same, so convert.  */
500
  if (BYTES_BIG_ENDIAN
501
      && !MEM_P (op0)
502
      && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
503
    bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
504
 
505
  /* Storing an lsb-aligned field in a register
506
     can be done with a movestrict instruction.  */
507
 
508
  if (!MEM_P (op0)
509
      && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
510
      && bitsize == GET_MODE_BITSIZE (fieldmode)
511
      && (movstrict_optab->handlers[fieldmode].insn_code
512
          != CODE_FOR_nothing))
513
    {
514
      int icode = movstrict_optab->handlers[fieldmode].insn_code;
515
 
516
      /* Get appropriate low part of the value being stored.  */
517
      if (GET_CODE (value) == CONST_INT || REG_P (value))
518
        value = gen_lowpart (fieldmode, value);
519
      else if (!(GET_CODE (value) == SYMBOL_REF
520
                 || GET_CODE (value) == LABEL_REF
521
                 || GET_CODE (value) == CONST))
522
        value = convert_to_mode (fieldmode, value, 0);
523
 
524
      if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
525
        value = copy_to_mode_reg (fieldmode, value);
526
 
527
      if (GET_CODE (op0) == SUBREG)
528
        {
529
          /* Else we've got some float mode source being extracted into
530
             a different float mode destination -- this combination of
531
             subregs results in Severe Tire Damage.  */
532
          gcc_assert (GET_MODE (SUBREG_REG (op0)) == fieldmode
533
                      || GET_MODE_CLASS (fieldmode) == MODE_INT
534
                      || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
535
          op0 = SUBREG_REG (op0);
536
        }
537
 
538
      emit_insn (GEN_FCN (icode)
539
                 (gen_rtx_SUBREG (fieldmode, op0,
540
                                  (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
541
                                  + (offset * UNITS_PER_WORD)),
542
                                  value));
543
 
544
      return value;
545
    }
546
 
547
  /* Handle fields bigger than a word.  */
548
 
549
  if (bitsize > BITS_PER_WORD)
550
    {
551
      /* Here we transfer the words of the field
552
         in the order least significant first.
553
         This is because the most significant word is the one which may
554
         be less than full.
555
         However, only do that if the value is not BLKmode.  */
556
 
557
      unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
558
      unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
559
      unsigned int i;
560
 
561
      /* This is the mode we must force value to, so that there will be enough
562
         subwords to extract.  Note that fieldmode will often (always?) be
563
         VOIDmode, because that is what store_field uses to indicate that this
564
         is a bit field, but passing VOIDmode to operand_subword_force
565
         is not allowed.  */
566
      fieldmode = GET_MODE (value);
567
      if (fieldmode == VOIDmode)
568
        fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
569
 
570
      for (i = 0; i < nwords; i++)
571
        {
572
          /* If I is 0, use the low-order word in both field and target;
573
             if I is 1, use the next to lowest word; and so on.  */
574
          unsigned int wordnum = (backwards ? nwords - i - 1 : i);
575
          unsigned int bit_offset = (backwards
576
                                     ? MAX ((int) bitsize - ((int) i + 1)
577
                                            * BITS_PER_WORD,
578
                                            0)
579
                                     : (int) i * BITS_PER_WORD);
580
 
581
          store_bit_field (op0, MIN (BITS_PER_WORD,
582
                                     bitsize - i * BITS_PER_WORD),
583
                           bitnum + bit_offset, word_mode,
584
                           operand_subword_force (value, wordnum, fieldmode));
585
        }
586
      return value;
587
    }
588
 
589
  /* From here on we can assume that the field to be stored in is
590
     a full-word (whatever type that is), since it is shorter than a word.  */
591
 
592
  /* OFFSET is the number of words or bytes (UNIT says which)
593
     from STR_RTX to the first word or byte containing part of the field.  */
594
 
595
  if (!MEM_P (op0))
596
    {
597
      if (offset != 0
598
          || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
599
        {
600
          if (!REG_P (op0))
601
            {
602
              /* Since this is a destination (lvalue), we can't copy
603
                 it to a pseudo.  We can remove a SUBREG that does not
604
                 change the size of the operand.  Such a SUBREG may
605
                 have been added above.  */
606
              gcc_assert (GET_CODE (op0) == SUBREG
607
                          && (GET_MODE_SIZE (GET_MODE (op0))
608
                              == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
609
              op0 = SUBREG_REG (op0);
610
            }
611
          op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
612
                                op0, (offset * UNITS_PER_WORD));
613
        }
614
      offset = 0;
615
    }
616
 
617
  /* If VALUE has a floating-point or complex mode, access it as an
618
     integer of the corresponding size.  This can occur on a machine
619
     with 64 bit registers that uses SFmode for float.  It can also
620
     occur for unaligned float or complex fields.  */
621
  orig_value = value;
622
  if (GET_MODE (value) != VOIDmode
623
      && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
624
      && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
625
    {
626
      value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
627
      emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
628
    }
629
 
630
  /* Now OFFSET is nonzero only if OP0 is memory
631
     and is therefore always measured in bytes.  */
632
 
633
  if (HAVE_insv
634
      && GET_MODE (value) != BLKmode
635
      && bitsize > 0
636
      && GET_MODE_BITSIZE (op_mode) >= bitsize
637
      && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
638
            && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))
639
      && insn_data[CODE_FOR_insv].operand[1].predicate (GEN_INT (bitsize),
640
                                                        VOIDmode))
641
    {
642
      int xbitpos = bitpos;
643
      rtx value1;
644
      rtx xop0 = op0;
645
      rtx last = get_last_insn ();
646
      rtx pat;
647
      enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
648
      int save_volatile_ok = volatile_ok;
649
 
650
      volatile_ok = 1;
651
 
652
      /* If this machine's insv can only insert into a register, copy OP0
653
         into a register and save it back later.  */
654
      if (MEM_P (op0)
655
          && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
656
                (op0, VOIDmode)))
657
        {
658
          rtx tempreg;
659
          enum machine_mode bestmode;
660
 
661
          /* Get the mode to use for inserting into this field.  If OP0 is
662
             BLKmode, get the smallest mode consistent with the alignment. If
663
             OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
664
             mode. Otherwise, use the smallest mode containing the field.  */
665
 
666
          if (GET_MODE (op0) == BLKmode
667
              || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
668
            bestmode
669
              = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
670
                               MEM_VOLATILE_P (op0));
671
          else
672
            bestmode = GET_MODE (op0);
673
 
674
          if (bestmode == VOIDmode
675
              || GET_MODE_SIZE (bestmode) < GET_MODE_SIZE (fieldmode)
676
              || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
677
                  && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
678
            goto insv_loses;
679
 
680
          /* Adjust address to point to the containing unit of that mode.
681
             Compute offset as multiple of this unit, counting in bytes.  */
682
          unit = GET_MODE_BITSIZE (bestmode);
683
          offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
684
          bitpos = bitnum % unit;
685
          op0 = adjust_address (op0, bestmode,  offset);
686
 
687
          /* Fetch that unit, store the bitfield in it, then store
688
             the unit.  */
689
          tempreg = copy_to_reg (op0);
690
          store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value);
691
          emit_move_insn (op0, tempreg);
692
          return value;
693
        }
694
      volatile_ok = save_volatile_ok;
695
 
696
      /* Add OFFSET into OP0's address.  */
697
      if (MEM_P (xop0))
698
        xop0 = adjust_address (xop0, byte_mode, offset);
699
 
700
      /* If xop0 is a register, we need it in MAXMODE
701
         to make it acceptable to the format of insv.  */
702
      if (GET_CODE (xop0) == SUBREG)
703
        /* We can't just change the mode, because this might clobber op0,
704
           and we will need the original value of op0 if insv fails.  */
705
        xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
706
      if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
707
        xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
708
 
709
      /* On big-endian machines, we count bits from the most significant.
710
         If the bit field insn does not, we must invert.  */
711
 
712
      if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
713
        xbitpos = unit - bitsize - xbitpos;
714
 
715
      /* We have been counting XBITPOS within UNIT.
716
         Count instead within the size of the register.  */
717
      if (BITS_BIG_ENDIAN && !MEM_P (xop0))
718
        xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
719
 
720
      unit = GET_MODE_BITSIZE (maxmode);
721
 
722
      /* Convert VALUE to maxmode (which insv insn wants) in VALUE1.  */
723
      value1 = value;
724
      if (GET_MODE (value) != maxmode)
725
        {
726
          if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
727
            {
728
              /* Optimization: Don't bother really extending VALUE
729
                 if it has all the bits we will actually use.  However,
730
                 if we must narrow it, be sure we do it correctly.  */
731
 
732
              if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
733
                {
734
                  rtx tmp;
735
 
736
                  tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
737
                  if (! tmp)
738
                    tmp = simplify_gen_subreg (maxmode,
739
                                               force_reg (GET_MODE (value),
740
                                                          value1),
741
                                               GET_MODE (value), 0);
742
                  value1 = tmp;
743
                }
744
              else
745
                value1 = gen_lowpart (maxmode, value1);
746
            }
747
          else if (GET_CODE (value) == CONST_INT)
748
            value1 = gen_int_mode (INTVAL (value), maxmode);
749
          else
750
            /* Parse phase is supposed to make VALUE's data type
751
               match that of the component reference, which is a type
752
               at least as wide as the field; so VALUE should have
753
               a mode that corresponds to that type.  */
754
            gcc_assert (CONSTANT_P (value));
755
        }
756
 
757
      /* If this machine's insv insists on a register,
758
         get VALUE1 into a register.  */
759
      if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
760
             (value1, maxmode)))
761
        value1 = force_reg (maxmode, value1);
762
 
763
      pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
764
      if (pat)
765
        emit_insn (pat);
766
      else
767
        {
768
          delete_insns_since (last);
769
          store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
770
        }
771
    }
772
  else
773
    insv_loses:
774
    /* Insv is not available; store using shifts and boolean ops.  */
775
    store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
776
  return value;
777
}
778
 
779
/* Use shifts and boolean operations to store VALUE
780
   into a bit field of width BITSIZE
781
   in a memory location specified by OP0 except offset by OFFSET bytes.
782
     (OFFSET must be 0 if OP0 is a register.)
783
   The field starts at position BITPOS within the byte.
784
    (If OP0 is a register, it may be a full word or a narrower mode,
785
     but BITPOS still counts within a full word,
786
     which is significant on bigendian machines.)  */
787
 
788
static void
789
store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
790
                       unsigned HOST_WIDE_INT bitsize,
791
                       unsigned HOST_WIDE_INT bitpos, rtx value)
792
{
793
  enum machine_mode mode;
794
  unsigned int total_bits = BITS_PER_WORD;
795
  rtx temp;
796
  int all_zero = 0;
797
  int all_one = 0;
798
 
799
  /* There is a case not handled here:
800
     a structure with a known alignment of just a halfword
801
     and a field split across two aligned halfwords within the structure.
802
     Or likewise a structure with a known alignment of just a byte
803
     and a field split across two bytes.
804
     Such cases are not supposed to be able to occur.  */
805
 
806
  if (REG_P (op0) || GET_CODE (op0) == SUBREG)
807
    {
808
      gcc_assert (!offset);
809
      /* Special treatment for a bit field split across two registers.  */
810
      if (bitsize + bitpos > BITS_PER_WORD)
811
        {
812
          store_split_bit_field (op0, bitsize, bitpos, value);
813
          return;
814
        }
815
    }
816
  else
817
    {
818
      /* Get the proper mode to use for this field.  We want a mode that
819
         includes the entire field.  If such a mode would be larger than
820
         a word, we won't be doing the extraction the normal way.
821
         We don't want a mode bigger than the destination.  */
822
 
823
      mode = GET_MODE (op0);
824
      if (GET_MODE_BITSIZE (mode) == 0
825
          || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
826
        mode = word_mode;
827
      mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
828
                            MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
829
 
830
      if (mode == VOIDmode)
831
        {
832
          /* The only way this should occur is if the field spans word
833
             boundaries.  */
834
          store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
835
                                 value);
836
          return;
837
        }
838
 
839
      total_bits = GET_MODE_BITSIZE (mode);
840
 
841
      /* Make sure bitpos is valid for the chosen mode.  Adjust BITPOS to
842
         be in the range 0 to total_bits-1, and put any excess bytes in
843
         OFFSET.  */
844
      if (bitpos >= total_bits)
845
        {
846
          offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
847
          bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
848
                     * BITS_PER_UNIT);
849
        }
850
 
851
      /* Get ref to an aligned byte, halfword, or word containing the field.
852
         Adjust BITPOS to be position within a word,
853
         and OFFSET to be the offset of that word.
854
         Then alter OP0 to refer to that word.  */
855
      bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
856
      offset -= (offset % (total_bits / BITS_PER_UNIT));
857
      op0 = adjust_address (op0, mode, offset);
858
    }
859
 
860
  mode = GET_MODE (op0);
861
 
862
  /* Now MODE is either some integral mode for a MEM as OP0,
863
     or is a full-word for a REG as OP0.  TOTAL_BITS corresponds.
864
     The bit field is contained entirely within OP0.
865
     BITPOS is the starting bit number within OP0.
866
     (OP0's mode may actually be narrower than MODE.)  */
867
 
868
  if (BYTES_BIG_ENDIAN)
869
      /* BITPOS is the distance between our msb
870
         and that of the containing datum.
871
         Convert it to the distance from the lsb.  */
872
      bitpos = total_bits - bitsize - bitpos;
873
 
874
  /* Now BITPOS is always the distance between our lsb
875
     and that of OP0.  */
876
 
877
  /* Shift VALUE left by BITPOS bits.  If VALUE is not constant,
878
     we must first convert its mode to MODE.  */
879
 
880
  if (GET_CODE (value) == CONST_INT)
881
    {
882
      HOST_WIDE_INT v = INTVAL (value);
883
 
884
      if (bitsize < HOST_BITS_PER_WIDE_INT)
885
        v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
886
 
887
      if (v == 0)
888
        all_zero = 1;
889
      else if ((bitsize < HOST_BITS_PER_WIDE_INT
890
                && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
891
               || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
892
        all_one = 1;
893
 
894
      value = lshift_value (mode, value, bitpos, bitsize);
895
    }
896
  else
897
    {
898
      int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
899
                      && bitpos + bitsize != GET_MODE_BITSIZE (mode));
900
 
901
      if (GET_MODE (value) != mode)
902
        {
903
          if ((REG_P (value) || GET_CODE (value) == SUBREG)
904
              && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
905
            value = gen_lowpart (mode, value);
906
          else
907
            value = convert_to_mode (mode, value, 1);
908
        }
909
 
910
      if (must_and)
911
        value = expand_binop (mode, and_optab, value,
912
                              mask_rtx (mode, 0, bitsize, 0),
913
                              NULL_RTX, 1, OPTAB_LIB_WIDEN);
914
      if (bitpos > 0)
915
        value = expand_shift (LSHIFT_EXPR, mode, value,
916
                              build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
917
    }
918
 
919
  /* Now clear the chosen bits in OP0,
920
     except that if VALUE is -1 we need not bother.  */
921
  /* We keep the intermediates in registers to allow CSE to combine
922
     consecutive bitfield assignments.  */
923
 
924
  temp = force_reg (mode, op0);
925
 
926
  if (! all_one)
927
    {
928
      temp = expand_binop (mode, and_optab, temp,
929
                           mask_rtx (mode, bitpos, bitsize, 1),
930
                           NULL_RTX, 1, OPTAB_LIB_WIDEN);
931
      temp = force_reg (mode, temp);
932
    }
933
 
934
  /* Now logical-or VALUE into OP0, unless it is zero.  */
935
 
936
  if (! all_zero)
937
    {
938
      temp = expand_binop (mode, ior_optab, temp, value,
939
                           NULL_RTX, 1, OPTAB_LIB_WIDEN);
940
      temp = force_reg (mode, temp);
941
    }
942
 
943
  if (op0 != temp)
944
    emit_move_insn (op0, temp);
945
}
946
 
947
/* Store a bit field that is split across multiple accessible memory objects.
948
 
949
   OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
950
   BITSIZE is the field width; BITPOS the position of its first bit
951
   (within the word).
952
   VALUE is the value to store.
953
 
954
   This does not yet handle fields wider than BITS_PER_WORD.  */
955
 
956
static void
957
store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
958
                       unsigned HOST_WIDE_INT bitpos, rtx value)
959
{
960
  unsigned int unit;
961
  unsigned int bitsdone = 0;
962
 
963
  /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
964
     much at a time.  */
965
  if (REG_P (op0) || GET_CODE (op0) == SUBREG)
966
    unit = BITS_PER_WORD;
967
  else
968
    unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
969
 
970
  /* If VALUE is a constant other than a CONST_INT, get it into a register in
971
     WORD_MODE.  If we can do this using gen_lowpart_common, do so.  Note
972
     that VALUE might be a floating-point constant.  */
973
  if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
974
    {
975
      rtx word = gen_lowpart_common (word_mode, value);
976
 
977
      if (word && (value != word))
978
        value = word;
979
      else
980
        value = gen_lowpart_common (word_mode,
981
                                    force_reg (GET_MODE (value) != VOIDmode
982
                                               ? GET_MODE (value)
983
                                               : word_mode, value));
984
    }
985
 
986
  while (bitsdone < bitsize)
987
    {
988
      unsigned HOST_WIDE_INT thissize;
989
      rtx part, word;
990
      unsigned HOST_WIDE_INT thispos;
991
      unsigned HOST_WIDE_INT offset;
992
 
993
      offset = (bitpos + bitsdone) / unit;
994
      thispos = (bitpos + bitsdone) % unit;
995
 
996
      /* THISSIZE must not overrun a word boundary.  Otherwise,
997
         store_fixed_bit_field will call us again, and we will mutually
998
         recurse forever.  */
999
      thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1000
      thissize = MIN (thissize, unit - thispos);
1001
 
1002
      if (BYTES_BIG_ENDIAN)
1003
        {
1004
          int total_bits;
1005
 
1006
          /* We must do an endian conversion exactly the same way as it is
1007
             done in extract_bit_field, so that the two calls to
1008
             extract_fixed_bit_field will have comparable arguments.  */
1009
          if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1010
            total_bits = BITS_PER_WORD;
1011
          else
1012
            total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1013
 
1014
          /* Fetch successively less significant portions.  */
1015
          if (GET_CODE (value) == CONST_INT)
1016
            part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1017
                             >> (bitsize - bitsdone - thissize))
1018
                            & (((HOST_WIDE_INT) 1 << thissize) - 1));
1019
          else
1020
            /* The args are chosen so that the last part includes the
1021
               lsb.  Give extract_bit_field the value it needs (with
1022
               endianness compensation) to fetch the piece we want.  */
1023
            part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1024
                                            total_bits - bitsize + bitsdone,
1025
                                            NULL_RTX, 1);
1026
        }
1027
      else
1028
        {
1029
          /* Fetch successively more significant portions.  */
1030
          if (GET_CODE (value) == CONST_INT)
1031
            part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1032
                             >> bitsdone)
1033
                            & (((HOST_WIDE_INT) 1 << thissize) - 1));
1034
          else
1035
            part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1036
                                            bitsdone, NULL_RTX, 1);
1037
        }
1038
 
1039
      /* If OP0 is a register, then handle OFFSET here.
1040
 
1041
         When handling multiword bitfields, extract_bit_field may pass
1042
         down a word_mode SUBREG of a larger REG for a bitfield that actually
1043
         crosses a word boundary.  Thus, for a SUBREG, we must find
1044
         the current word starting from the base register.  */
1045
      if (GET_CODE (op0) == SUBREG)
1046
        {
1047
          int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1048
          word = operand_subword_force (SUBREG_REG (op0), word_offset,
1049
                                        GET_MODE (SUBREG_REG (op0)));
1050
          offset = 0;
1051
        }
1052
      else if (REG_P (op0))
1053
        {
1054
          word = operand_subword_force (op0, offset, GET_MODE (op0));
1055
          offset = 0;
1056
        }
1057
      else
1058
        word = op0;
1059
 
1060
      /* OFFSET is in UNITs, and UNIT is in bits.
1061
         store_fixed_bit_field wants offset in bytes.  */
1062
      store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1063
                             thispos, part);
1064
      bitsdone += thissize;
1065
    }
1066
}
1067
 
1068
/* Generate code to extract a byte-field from STR_RTX
1069
   containing BITSIZE bits, starting at BITNUM,
1070
   and put it in TARGET if possible (if TARGET is nonzero).
1071
   Regardless of TARGET, we return the rtx for where the value is placed.
1072
 
1073
   STR_RTX is the structure containing the byte (a REG or MEM).
1074
   UNSIGNEDP is nonzero if this is an unsigned bit field.
1075
   MODE is the natural mode of the field value once extracted.
1076
   TMODE is the mode the caller would like the value to have;
1077
   but the value may be returned with type MODE instead.
1078
 
1079
   TOTAL_SIZE is the size in bytes of the containing structure,
1080
   or -1 if varying.
1081
 
1082
   If a TARGET is specified and we can store in it at no extra cost,
1083
   we do so, and return TARGET.
1084
   Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1085
   if they are equally easy.  */
1086
 
1087
rtx
1088
extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1089
                   unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1090
                   enum machine_mode mode, enum machine_mode tmode)
1091
{
1092
  unsigned int unit
1093
    = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1094
  unsigned HOST_WIDE_INT offset, bitpos;
1095
  rtx op0 = str_rtx;
1096
  rtx spec_target = target;
1097
  rtx spec_target_subreg = 0;
1098
  enum machine_mode int_mode;
1099
  enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1100
  enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1101
  enum machine_mode mode1;
1102
  int byte_offset;
1103
 
1104
  if (tmode == VOIDmode)
1105
    tmode = mode;
1106
 
1107
  while (GET_CODE (op0) == SUBREG)
1108
    {
1109
      bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1110
      op0 = SUBREG_REG (op0);
1111
    }
1112
 
1113
  /* If we have an out-of-bounds access to a register, just return an
1114
     uninitialized register of the required mode.  This can occur if the
1115
     source code contains an out-of-bounds access to a small array.  */
1116
  if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1117
    return gen_reg_rtx (tmode);
1118
 
1119
  if (REG_P (op0)
1120
      && mode == GET_MODE (op0)
1121
      && bitnum == 0
1122
      && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1123
    {
1124
      /* We're trying to extract a full register from itself.  */
1125
      return op0;
1126
    }
1127
 
1128
  /* Use vec_extract patterns for extracting parts of vectors whenever
1129
     available.  */
1130
  if (VECTOR_MODE_P (GET_MODE (op0))
1131
      && !MEM_P (op0)
1132
      && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code
1133
          != CODE_FOR_nothing)
1134
      && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1135
          == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1136
    {
1137
      enum machine_mode outermode = GET_MODE (op0);
1138
      enum machine_mode innermode = GET_MODE_INNER (outermode);
1139
      int icode = (int) vec_extract_optab->handlers[outermode].insn_code;
1140
      unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1141
      rtx rtxpos = GEN_INT (pos);
1142
      rtx src = op0;
1143
      rtx dest = NULL, pat, seq;
1144
      enum machine_mode mode0 = insn_data[icode].operand[0].mode;
1145
      enum machine_mode mode1 = insn_data[icode].operand[1].mode;
1146
      enum machine_mode mode2 = insn_data[icode].operand[2].mode;
1147
 
1148
      if (innermode == tmode || innermode == mode)
1149
        dest = target;
1150
 
1151
      if (!dest)
1152
        dest = gen_reg_rtx (innermode);
1153
 
1154
      start_sequence ();
1155
 
1156
      if (! (*insn_data[icode].operand[0].predicate) (dest, mode0))
1157
        dest = copy_to_mode_reg (mode0, dest);
1158
 
1159
      if (! (*insn_data[icode].operand[1].predicate) (src, mode1))
1160
        src = copy_to_mode_reg (mode1, src);
1161
 
1162
      if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2))
1163
        rtxpos = copy_to_mode_reg (mode1, rtxpos);
1164
 
1165
      /* We could handle this, but we should always be called with a pseudo
1166
         for our targets and all insns should take them as outputs.  */
1167
      gcc_assert ((*insn_data[icode].operand[0].predicate) (dest, mode0)
1168
                  && (*insn_data[icode].operand[1].predicate) (src, mode1)
1169
                  && (*insn_data[icode].operand[2].predicate) (rtxpos, mode2));
1170
 
1171
      pat = GEN_FCN (icode) (dest, src, rtxpos);
1172
      seq = get_insns ();
1173
      end_sequence ();
1174
      if (pat)
1175
        {
1176
          emit_insn (seq);
1177
          emit_insn (pat);
1178
          return dest;
1179
        }
1180
    }
1181
 
1182
  /* Make sure we are playing with integral modes.  Pun with subregs
1183
     if we aren't.  */
1184
  {
1185
    enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1186
    if (imode != GET_MODE (op0))
1187
      {
1188
        if (MEM_P (op0))
1189
          op0 = adjust_address (op0, imode, 0);
1190
        else
1191
          {
1192
            gcc_assert (imode != BLKmode);
1193
            op0 = gen_lowpart (imode, op0);
1194
 
1195
            /* If we got a SUBREG, force it into a register since we
1196
               aren't going to be able to do another SUBREG on it.  */
1197
            if (GET_CODE (op0) == SUBREG)
1198
              op0 = force_reg (imode, op0);
1199
          }
1200
      }
1201
  }
1202
 
1203
  /* We may be accessing data outside the field, which means
1204
     we can alias adjacent data.  */
1205
  if (MEM_P (op0))
1206
    {
1207
      op0 = shallow_copy_rtx (op0);
1208
      set_mem_alias_set (op0, 0);
1209
      set_mem_expr (op0, 0);
1210
    }
1211
 
1212
  /* Extraction of a full-word or multi-word value from a structure
1213
     in a register or aligned memory can be done with just a SUBREG.
1214
     A subword value in the least significant part of a register
1215
     can also be extracted with a SUBREG.  For this, we need the
1216
     byte offset of the value in op0.  */
1217
 
1218
  bitpos = bitnum % unit;
1219
  offset = bitnum / unit;
1220
  byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1221
 
1222
  /* If OP0 is a register, BITPOS must count within a word.
1223
     But as we have it, it counts within whatever size OP0 now has.
1224
     On a bigendian machine, these are not the same, so convert.  */
1225
  if (BYTES_BIG_ENDIAN
1226
      && !MEM_P (op0)
1227
      && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1228
    bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1229
 
1230
  /* ??? We currently assume TARGET is at least as big as BITSIZE.
1231
     If that's wrong, the solution is to test for it and set TARGET to 0
1232
     if needed.  */
1233
 
1234
  /* Only scalar integer modes can be converted via subregs.  There is an
1235
     additional problem for FP modes here in that they can have a precision
1236
     which is different from the size.  mode_for_size uses precision, but
1237
     we want a mode based on the size, so we must avoid calling it for FP
1238
     modes.  */
1239
  mode1  = (SCALAR_INT_MODE_P (tmode)
1240
            ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1241
            : mode);
1242
 
1243
  if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1244
        && bitpos % BITS_PER_WORD == 0)
1245
       || (mode1 != BLKmode
1246
           /* ??? The big endian test here is wrong.  This is correct
1247
              if the value is in a register, and if mode_for_size is not
1248
              the same mode as op0.  This causes us to get unnecessarily
1249
              inefficient code from the Thumb port when -mbig-endian.  */
1250
           && (BYTES_BIG_ENDIAN
1251
               ? bitpos + bitsize == BITS_PER_WORD
1252
               : bitpos == 0)))
1253
      && ((!MEM_P (op0)
1254
           && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1255
                                     GET_MODE_BITSIZE (GET_MODE (op0)))
1256
           && GET_MODE_SIZE (mode1) != 0
1257
           && byte_offset % GET_MODE_SIZE (mode1) == 0)
1258
          || (MEM_P (op0)
1259
              && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1260
                  || (offset * BITS_PER_UNIT % bitsize == 0
1261
                      && MEM_ALIGN (op0) % bitsize == 0)))))
1262
    {
1263
      if (mode1 != GET_MODE (op0))
1264
        {
1265
          if (MEM_P (op0))
1266
            op0 = adjust_address (op0, mode1, offset);
1267
          else
1268
            {
1269
              rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1270
                                             byte_offset);
1271
              if (sub == NULL)
1272
                goto no_subreg_mode_swap;
1273
              op0 = sub;
1274
            }
1275
        }
1276
      if (mode1 != mode)
1277
        return convert_to_mode (tmode, op0, unsignedp);
1278
      return op0;
1279
    }
1280
 no_subreg_mode_swap:
1281
 
1282
  /* Handle fields bigger than a word.  */
1283
 
1284
  if (bitsize > BITS_PER_WORD)
1285
    {
1286
      /* Here we transfer the words of the field
1287
         in the order least significant first.
1288
         This is because the most significant word is the one which may
1289
         be less than full.  */
1290
 
1291
      unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1292
      unsigned int i;
1293
 
1294
      if (target == 0 || !REG_P (target))
1295
        target = gen_reg_rtx (mode);
1296
 
1297
      /* Indicate for flow that the entire target reg is being set.  */
1298
      emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1299
 
1300
      for (i = 0; i < nwords; i++)
1301
        {
1302
          /* If I is 0, use the low-order word in both field and target;
1303
             if I is 1, use the next to lowest word; and so on.  */
1304
          /* Word number in TARGET to use.  */
1305
          unsigned int wordnum
1306
            = (WORDS_BIG_ENDIAN
1307
               ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1308
               : i);
1309
          /* Offset from start of field in OP0.  */
1310
          unsigned int bit_offset = (WORDS_BIG_ENDIAN
1311
                                     ? MAX (0, ((int) bitsize - ((int) i + 1)
1312
                                                * (int) BITS_PER_WORD))
1313
                                     : (int) i * BITS_PER_WORD);
1314
          rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1315
          rtx result_part
1316
            = extract_bit_field (op0, MIN (BITS_PER_WORD,
1317
                                           bitsize - i * BITS_PER_WORD),
1318
                                 bitnum + bit_offset, 1, target_part, mode,
1319
                                 word_mode);
1320
 
1321
          gcc_assert (target_part);
1322
 
1323
          if (result_part != target_part)
1324
            emit_move_insn (target_part, result_part);
1325
        }
1326
 
1327
      if (unsignedp)
1328
        {
1329
          /* Unless we've filled TARGET, the upper regs in a multi-reg value
1330
             need to be zero'd out.  */
1331
          if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1332
            {
1333
              unsigned int i, total_words;
1334
 
1335
              total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1336
              for (i = nwords; i < total_words; i++)
1337
                emit_move_insn
1338
                  (operand_subword (target,
1339
                                    WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1340
                                    1, VOIDmode),
1341
                   const0_rtx);
1342
            }
1343
          return target;
1344
        }
1345
 
1346
      /* Signed bit field: sign-extend with two arithmetic shifts.  */
1347
      target = expand_shift (LSHIFT_EXPR, mode, target,
1348
                             build_int_cst (NULL_TREE,
1349
                                            GET_MODE_BITSIZE (mode) - bitsize),
1350
                             NULL_RTX, 0);
1351
      return expand_shift (RSHIFT_EXPR, mode, target,
1352
                           build_int_cst (NULL_TREE,
1353
                                          GET_MODE_BITSIZE (mode) - bitsize),
1354
                           NULL_RTX, 0);
1355
    }
1356
 
1357
  /* From here on we know the desired field is smaller than a word.  */
1358
 
1359
  /* Check if there is a correspondingly-sized integer field, so we can
1360
     safely extract it as one size of integer, if necessary; then
1361
     truncate or extend to the size that is wanted; then use SUBREGs or
1362
     convert_to_mode to get one of the modes we really wanted.  */
1363
 
1364
  int_mode = int_mode_for_mode (tmode);
1365
  if (int_mode == BLKmode)
1366
    int_mode = int_mode_for_mode (mode);
1367
  /* Should probably push op0 out to memory and then do a load.  */
1368
  gcc_assert (int_mode != BLKmode);
1369
 
1370
  /* OFFSET is the number of words or bytes (UNIT says which)
1371
     from STR_RTX to the first word or byte containing part of the field.  */
1372
  if (!MEM_P (op0))
1373
    {
1374
      if (offset != 0
1375
          || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1376
        {
1377
          if (!REG_P (op0))
1378
            op0 = copy_to_reg (op0);
1379
          op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1380
                                op0, (offset * UNITS_PER_WORD));
1381
        }
1382
      offset = 0;
1383
    }
1384
 
1385
  /* Now OFFSET is nonzero only for memory operands.  */
1386
 
1387
  if (unsignedp)
1388
    {
1389
      if (HAVE_extzv
1390
          && bitsize > 0
1391
          && GET_MODE_BITSIZE (extzv_mode) >= bitsize
1392
          && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1393
                && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1394
        {
1395
          unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1396
          rtx bitsize_rtx, bitpos_rtx;
1397
          rtx last = get_last_insn ();
1398
          rtx xop0 = op0;
1399
          rtx xtarget = target;
1400
          rtx xspec_target = spec_target;
1401
          rtx xspec_target_subreg = spec_target_subreg;
1402
          rtx pat;
1403
          enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1404
 
1405
          if (MEM_P (xop0))
1406
            {
1407
              int save_volatile_ok = volatile_ok;
1408
              volatile_ok = 1;
1409
 
1410
              /* Is the memory operand acceptable?  */
1411
              if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1412
                     (xop0, GET_MODE (xop0))))
1413
                {
1414
                  /* No, load into a reg and extract from there.  */
1415
                  enum machine_mode bestmode;
1416
 
1417
                  /* Get the mode to use for inserting into this field.  If
1418
                     OP0 is BLKmode, get the smallest mode consistent with the
1419
                     alignment. If OP0 is a non-BLKmode object that is no
1420
                     wider than MAXMODE, use its mode. Otherwise, use the
1421
                     smallest mode containing the field.  */
1422
 
1423
                  if (GET_MODE (xop0) == BLKmode
1424
                      || (GET_MODE_SIZE (GET_MODE (op0))
1425
                          > GET_MODE_SIZE (maxmode)))
1426
                    bestmode = get_best_mode (bitsize, bitnum,
1427
                                              MEM_ALIGN (xop0), maxmode,
1428
                                              MEM_VOLATILE_P (xop0));
1429
                  else
1430
                    bestmode = GET_MODE (xop0);
1431
 
1432
                  if (bestmode == VOIDmode
1433
                      || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1434
                          && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1435
                    goto extzv_loses;
1436
 
1437
                  /* Compute offset as multiple of this unit,
1438
                     counting in bytes.  */
1439
                  unit = GET_MODE_BITSIZE (bestmode);
1440
                  xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1441
                  xbitpos = bitnum % unit;
1442
                  xop0 = adjust_address (xop0, bestmode, xoffset);
1443
 
1444
                  /* Make sure register is big enough for the whole field. */
1445
                  if (xoffset * BITS_PER_UNIT + unit
1446
                      < offset * BITS_PER_UNIT + bitsize)
1447
                    goto extzv_loses;
1448
 
1449
                  /* Fetch it to a register in that size.  */
1450
                  xop0 = force_reg (bestmode, xop0);
1451
 
1452
                  /* XBITPOS counts within UNIT, which is what is expected.  */
1453
                }
1454
              else
1455
                /* Get ref to first byte containing part of the field.  */
1456
                xop0 = adjust_address (xop0, byte_mode, xoffset);
1457
 
1458
              volatile_ok = save_volatile_ok;
1459
            }
1460
 
1461
          /* If op0 is a register, we need it in MAXMODE (which is usually
1462
             SImode). to make it acceptable to the format of extzv.  */
1463
          if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1464
            goto extzv_loses;
1465
          if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1466
            xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1467
 
1468
          /* On big-endian machines, we count bits from the most significant.
1469
             If the bit field insn does not, we must invert.  */
1470
          if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1471
            xbitpos = unit - bitsize - xbitpos;
1472
 
1473
          /* Now convert from counting within UNIT to counting in MAXMODE.  */
1474
          if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1475
            xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1476
 
1477
          unit = GET_MODE_BITSIZE (maxmode);
1478
 
1479
          if (xtarget == 0)
1480
            xtarget = xspec_target = gen_reg_rtx (tmode);
1481
 
1482
          if (GET_MODE (xtarget) != maxmode)
1483
            {
1484
              if (REG_P (xtarget))
1485
                {
1486
                  int wider = (GET_MODE_SIZE (maxmode)
1487
                               > GET_MODE_SIZE (GET_MODE (xtarget)));
1488
                  xtarget = gen_lowpart (maxmode, xtarget);
1489
                  if (wider)
1490
                    xspec_target_subreg = xtarget;
1491
                }
1492
              else
1493
                xtarget = gen_reg_rtx (maxmode);
1494
            }
1495
 
1496
          /* If this machine's extzv insists on a register target,
1497
             make sure we have one.  */
1498
          if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1499
                 (xtarget, maxmode)))
1500
            xtarget = gen_reg_rtx (maxmode);
1501
 
1502
          bitsize_rtx = GEN_INT (bitsize);
1503
          bitpos_rtx = GEN_INT (xbitpos);
1504
 
1505
          pat = gen_extzv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1506
          if (pat)
1507
            {
1508
              emit_insn (pat);
1509
              target = xtarget;
1510
              spec_target = xspec_target;
1511
              spec_target_subreg = xspec_target_subreg;
1512
            }
1513
          else
1514
            {
1515
              delete_insns_since (last);
1516
              target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1517
                                                bitpos, target, 1);
1518
            }
1519
        }
1520
      else
1521
      extzv_loses:
1522
        target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1523
                                          bitpos, target, 1);
1524
    }
1525
  else
1526
    {
1527
      if (HAVE_extv
1528
          && bitsize > 0
1529
          && GET_MODE_BITSIZE (extv_mode) >= bitsize
1530
          && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
1531
                && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1532
        {
1533
          int xbitpos = bitpos, xoffset = offset;
1534
          rtx bitsize_rtx, bitpos_rtx;
1535
          rtx last = get_last_insn ();
1536
          rtx xop0 = op0, xtarget = target;
1537
          rtx xspec_target = spec_target;
1538
          rtx xspec_target_subreg = spec_target_subreg;
1539
          rtx pat;
1540
          enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1541
 
1542
          if (MEM_P (xop0))
1543
            {
1544
              /* Is the memory operand acceptable?  */
1545
              if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1546
                     (xop0, GET_MODE (xop0))))
1547
                {
1548
                  /* No, load into a reg and extract from there.  */
1549
                  enum machine_mode bestmode;
1550
 
1551
                  /* Get the mode to use for inserting into this field.  If
1552
                     OP0 is BLKmode, get the smallest mode consistent with the
1553
                     alignment. If OP0 is a non-BLKmode object that is no
1554
                     wider than MAXMODE, use its mode. Otherwise, use the
1555
                     smallest mode containing the field.  */
1556
 
1557
                  if (GET_MODE (xop0) == BLKmode
1558
                      || (GET_MODE_SIZE (GET_MODE (op0))
1559
                          > GET_MODE_SIZE (maxmode)))
1560
                    bestmode = get_best_mode (bitsize, bitnum,
1561
                                              MEM_ALIGN (xop0), maxmode,
1562
                                              MEM_VOLATILE_P (xop0));
1563
                  else
1564
                    bestmode = GET_MODE (xop0);
1565
 
1566
                  if (bestmode == VOIDmode
1567
                      || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1568
                          && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1569
                    goto extv_loses;
1570
 
1571
                  /* Compute offset as multiple of this unit,
1572
                     counting in bytes.  */
1573
                  unit = GET_MODE_BITSIZE (bestmode);
1574
                  xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1575
                  xbitpos = bitnum % unit;
1576
                  xop0 = adjust_address (xop0, bestmode, xoffset);
1577
 
1578
                  /* Make sure register is big enough for the whole field. */
1579
                  if (xoffset * BITS_PER_UNIT + unit
1580
                      < offset * BITS_PER_UNIT + bitsize)
1581
                    goto extv_loses;
1582
 
1583
                  /* Fetch it to a register in that size.  */
1584
                  xop0 = force_reg (bestmode, xop0);
1585
 
1586
                  /* XBITPOS counts within UNIT, which is what is expected.  */
1587
                }
1588
              else
1589
                /* Get ref to first byte containing part of the field.  */
1590
                xop0 = adjust_address (xop0, byte_mode, xoffset);
1591
            }
1592
 
1593
          /* If op0 is a register, we need it in MAXMODE (which is usually
1594
             SImode) to make it acceptable to the format of extv.  */
1595
          if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1596
            goto extv_loses;
1597
          if (REG_P (xop0) && GET_MODE (xop0) != maxmode)
1598
            xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1599
 
1600
          /* On big-endian machines, we count bits from the most significant.
1601
             If the bit field insn does not, we must invert.  */
1602
          if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1603
            xbitpos = unit - bitsize - xbitpos;
1604
 
1605
          /* XBITPOS counts within a size of UNIT.
1606
             Adjust to count within a size of MAXMODE.  */
1607
          if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1608
            xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1609
 
1610
          unit = GET_MODE_BITSIZE (maxmode);
1611
 
1612
          if (xtarget == 0)
1613
            xtarget = xspec_target = gen_reg_rtx (tmode);
1614
 
1615
          if (GET_MODE (xtarget) != maxmode)
1616
            {
1617
              if (REG_P (xtarget))
1618
                {
1619
                  int wider = (GET_MODE_SIZE (maxmode)
1620
                               > GET_MODE_SIZE (GET_MODE (xtarget)));
1621
                  xtarget = gen_lowpart (maxmode, xtarget);
1622
                  if (wider)
1623
                    xspec_target_subreg = xtarget;
1624
                }
1625
              else
1626
                xtarget = gen_reg_rtx (maxmode);
1627
            }
1628
 
1629
          /* If this machine's extv insists on a register target,
1630
             make sure we have one.  */
1631
          if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1632
                 (xtarget, maxmode)))
1633
            xtarget = gen_reg_rtx (maxmode);
1634
 
1635
          bitsize_rtx = GEN_INT (bitsize);
1636
          bitpos_rtx = GEN_INT (xbitpos);
1637
 
1638
          pat = gen_extv (xtarget, xop0, bitsize_rtx, bitpos_rtx);
1639
          if (pat)
1640
            {
1641
              emit_insn (pat);
1642
              target = xtarget;
1643
              spec_target = xspec_target;
1644
              spec_target_subreg = xspec_target_subreg;
1645
            }
1646
          else
1647
            {
1648
              delete_insns_since (last);
1649
              target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1650
                                                bitpos, target, 0);
1651
            }
1652
        }
1653
      else
1654
      extv_loses:
1655
        target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1656
                                          bitpos, target, 0);
1657
    }
1658
  if (target == spec_target)
1659
    return target;
1660
  if (target == spec_target_subreg)
1661
    return spec_target;
1662
  if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1663
    {
1664
      /* If the target mode is not a scalar integral, first convert to the
1665
         integer mode of that size and then access it as a floating-point
1666
         value via a SUBREG.  */
1667
      if (!SCALAR_INT_MODE_P (tmode))
1668
        {
1669
          enum machine_mode smode
1670
            = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1671
          target = convert_to_mode (smode, target, unsignedp);
1672
          target = force_reg (smode, target);
1673
          return gen_lowpart (tmode, target);
1674
        }
1675
 
1676
      return convert_to_mode (tmode, target, unsignedp);
1677
    }
1678
  return target;
1679
}
1680
 
1681
/* Extract a bit field using shifts and boolean operations
1682
   Returns an rtx to represent the value.
1683
   OP0 addresses a register (word) or memory (byte).
1684
   BITPOS says which bit within the word or byte the bit field starts in.
1685
   OFFSET says how many bytes farther the bit field starts;
1686
    it is 0 if OP0 is a register.
1687
   BITSIZE says how many bits long the bit field is.
1688
    (If OP0 is a register, it may be narrower than a full word,
1689
     but BITPOS still counts within a full word,
1690
     which is significant on bigendian machines.)
1691
 
1692
   UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1693
   If TARGET is nonzero, attempts to store the value there
1694
   and return TARGET, but this is not guaranteed.
1695
   If TARGET is not used, create a pseudo-reg of mode TMODE for the value.  */
1696
 
1697
static rtx
1698
extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1699
                         unsigned HOST_WIDE_INT offset,
1700
                         unsigned HOST_WIDE_INT bitsize,
1701
                         unsigned HOST_WIDE_INT bitpos, rtx target,
1702
                         int unsignedp)
1703
{
1704
  unsigned int total_bits = BITS_PER_WORD;
1705
  enum machine_mode mode;
1706
 
1707
  if (GET_CODE (op0) == SUBREG || REG_P (op0))
1708
    {
1709
      /* Special treatment for a bit field split across two registers.  */
1710
      if (bitsize + bitpos > BITS_PER_WORD)
1711
        return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1712
    }
1713
  else
1714
    {
1715
      /* Get the proper mode to use for this field.  We want a mode that
1716
         includes the entire field.  If such a mode would be larger than
1717
         a word, we won't be doing the extraction the normal way.  */
1718
 
1719
      mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1720
                            MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1721
 
1722
      if (mode == VOIDmode)
1723
        /* The only way this should occur is if the field spans word
1724
           boundaries.  */
1725
        return extract_split_bit_field (op0, bitsize,
1726
                                        bitpos + offset * BITS_PER_UNIT,
1727
                                        unsignedp);
1728
 
1729
      total_bits = GET_MODE_BITSIZE (mode);
1730
 
1731
      /* Make sure bitpos is valid for the chosen mode.  Adjust BITPOS to
1732
         be in the range 0 to total_bits-1, and put any excess bytes in
1733
         OFFSET.  */
1734
      if (bitpos >= total_bits)
1735
        {
1736
          offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1737
          bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1738
                     * BITS_PER_UNIT);
1739
        }
1740
 
1741
      /* Get ref to an aligned byte, halfword, or word containing the field.
1742
         Adjust BITPOS to be position within a word,
1743
         and OFFSET to be the offset of that word.
1744
         Then alter OP0 to refer to that word.  */
1745
      bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1746
      offset -= (offset % (total_bits / BITS_PER_UNIT));
1747
      op0 = adjust_address (op0, mode, offset);
1748
    }
1749
 
1750
  mode = GET_MODE (op0);
1751
 
1752
  if (BYTES_BIG_ENDIAN)
1753
    /* BITPOS is the distance between our msb and that of OP0.
1754
       Convert it to the distance from the lsb.  */
1755
    bitpos = total_bits - bitsize - bitpos;
1756
 
1757
  /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1758
     We have reduced the big-endian case to the little-endian case.  */
1759
 
1760
  if (unsignedp)
1761
    {
1762
      if (bitpos)
1763
        {
1764
          /* If the field does not already start at the lsb,
1765
             shift it so it does.  */
1766
          tree amount = build_int_cst (NULL_TREE, bitpos);
1767
          /* Maybe propagate the target for the shift.  */
1768
          /* But not if we will return it--could confuse integrate.c.  */
1769
          rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1770
          if (tmode != mode) subtarget = 0;
1771
          op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1772
        }
1773
      /* Convert the value to the desired mode.  */
1774
      if (mode != tmode)
1775
        op0 = convert_to_mode (tmode, op0, 1);
1776
 
1777
      /* Unless the msb of the field used to be the msb when we shifted,
1778
         mask out the upper bits.  */
1779
 
1780
      if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1781
        return expand_binop (GET_MODE (op0), and_optab, op0,
1782
                             mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1783
                             target, 1, OPTAB_LIB_WIDEN);
1784
      return op0;
1785
    }
1786
 
1787
  /* To extract a signed bit-field, first shift its msb to the msb of the word,
1788
     then arithmetic-shift its lsb to the lsb of the word.  */
1789
  op0 = force_reg (mode, op0);
1790
  if (mode != tmode)
1791
    target = 0;
1792
 
1793
  /* Find the narrowest integer mode that contains the field.  */
1794
 
1795
  for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1796
       mode = GET_MODE_WIDER_MODE (mode))
1797
    if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1798
      {
1799
        op0 = convert_to_mode (mode, op0, 0);
1800
        break;
1801
      }
1802
 
1803
  if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1804
    {
1805
      tree amount
1806
        = build_int_cst (NULL_TREE,
1807
                         GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
1808
      /* Maybe propagate the target for the shift.  */
1809
      rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1810
      op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1811
    }
1812
 
1813
  return expand_shift (RSHIFT_EXPR, mode, op0,
1814
                       build_int_cst (NULL_TREE,
1815
                                      GET_MODE_BITSIZE (mode) - bitsize),
1816
                       target, 0);
1817
}
1818
 
1819
/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1820
   of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1821
   complement of that if COMPLEMENT.  The mask is truncated if
1822
   necessary to the width of mode MODE.  The mask is zero-extended if
1823
   BITSIZE+BITPOS is too small for MODE.  */
1824
 
1825
static rtx
1826
mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1827
{
1828
  HOST_WIDE_INT masklow, maskhigh;
1829
 
1830
  if (bitsize == 0)
1831
    masklow = 0;
1832
  else if (bitpos < HOST_BITS_PER_WIDE_INT)
1833
    masklow = (HOST_WIDE_INT) -1 << bitpos;
1834
  else
1835
    masklow = 0;
1836
 
1837
  if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1838
    masklow &= ((unsigned HOST_WIDE_INT) -1
1839
                >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1840
 
1841
  if (bitpos <= HOST_BITS_PER_WIDE_INT)
1842
    maskhigh = -1;
1843
  else
1844
    maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1845
 
1846
  if (bitsize == 0)
1847
    maskhigh = 0;
1848
  else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1849
    maskhigh &= ((unsigned HOST_WIDE_INT) -1
1850
                 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1851
  else
1852
    maskhigh = 0;
1853
 
1854
  if (complement)
1855
    {
1856
      maskhigh = ~maskhigh;
1857
      masklow = ~masklow;
1858
    }
1859
 
1860
  return immed_double_const (masklow, maskhigh, mode);
1861
}
1862
 
1863
/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1864
   VALUE truncated to BITSIZE bits and then shifted left BITPOS bits.  */
1865
 
1866
static rtx
1867
lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1868
{
1869
  unsigned HOST_WIDE_INT v = INTVAL (value);
1870
  HOST_WIDE_INT low, high;
1871
 
1872
  if (bitsize < HOST_BITS_PER_WIDE_INT)
1873
    v &= ~((HOST_WIDE_INT) -1 << bitsize);
1874
 
1875
  if (bitpos < HOST_BITS_PER_WIDE_INT)
1876
    {
1877
      low = v << bitpos;
1878
      high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1879
    }
1880
  else
1881
    {
1882
      low = 0;
1883
      high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1884
    }
1885
 
1886
  return immed_double_const (low, high, mode);
1887
}
1888
 
1889
/* Extract a bit field from a memory by forcing the alignment of the
1890
   memory.  This efficient only if the field spans at least 4 boundaries.
1891
 
1892
   OP0 is the MEM.
1893
   BITSIZE is the field width; BITPOS is the position of the first bit.
1894
   UNSIGNEDP is true if the result should be zero-extended.  */
1895
 
1896
static rtx
1897
extract_force_align_mem_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1898
                                   unsigned HOST_WIDE_INT bitpos,
1899
                                   int unsignedp)
1900
{
1901
  enum machine_mode mode, dmode;
1902
  unsigned int m_bitsize, m_size;
1903
  unsigned int sign_shift_up, sign_shift_dn;
1904
  rtx base, a1, a2, v1, v2, comb, shift, result, start;
1905
 
1906
  /* Choose a mode that will fit BITSIZE.  */
1907
  mode = smallest_mode_for_size (bitsize, MODE_INT);
1908
  m_size = GET_MODE_SIZE (mode);
1909
  m_bitsize = GET_MODE_BITSIZE (mode);
1910
 
1911
  /* Choose a mode twice as wide.  Fail if no such mode exists.  */
1912
  dmode = mode_for_size (m_bitsize * 2, MODE_INT, false);
1913
  if (dmode == BLKmode)
1914
    return NULL;
1915
 
1916
  do_pending_stack_adjust ();
1917
  start = get_last_insn ();
1918
 
1919
  /* At the end, we'll need an additional shift to deal with sign/zero
1920
     extension.  By default this will be a left+right shift of the
1921
     appropriate size.  But we may be able to eliminate one of them.  */
1922
  sign_shift_up = sign_shift_dn = m_bitsize - bitsize;
1923
 
1924
  if (STRICT_ALIGNMENT)
1925
    {
1926
      base = plus_constant (XEXP (op0, 0), bitpos / BITS_PER_UNIT);
1927
      bitpos %= BITS_PER_UNIT;
1928
 
1929
      /* We load two values to be concatenate.  There's an edge condition
1930
         that bears notice -- an aligned value at the end of a page can
1931
         only load one value lest we segfault.  So the two values we load
1932
         are at "base & -size" and "(base + size - 1) & -size".  If base
1933
         is unaligned, the addresses will be aligned and sequential; if
1934
         base is aligned, the addresses will both be equal to base.  */
1935
 
1936
      a1 = expand_simple_binop (Pmode, AND, force_operand (base, NULL),
1937
                                GEN_INT (-(HOST_WIDE_INT)m_size),
1938
                                NULL, true, OPTAB_LIB_WIDEN);
1939
      mark_reg_pointer (a1, m_bitsize);
1940
      v1 = gen_rtx_MEM (mode, a1);
1941
      set_mem_align (v1, m_bitsize);
1942
      v1 = force_reg (mode, validize_mem (v1));
1943
 
1944
      a2 = plus_constant (base, GET_MODE_SIZE (mode) - 1);
1945
      a2 = expand_simple_binop (Pmode, AND, force_operand (a2, NULL),
1946
                                GEN_INT (-(HOST_WIDE_INT)m_size),
1947
                                NULL, true, OPTAB_LIB_WIDEN);
1948
      v2 = gen_rtx_MEM (mode, a2);
1949
      set_mem_align (v2, m_bitsize);
1950
      v2 = force_reg (mode, validize_mem (v2));
1951
 
1952
      /* Combine these two values into a double-word value.  */
1953
      if (m_bitsize == BITS_PER_WORD)
1954
        {
1955
          comb = gen_reg_rtx (dmode);
1956
          emit_insn (gen_rtx_CLOBBER (VOIDmode, comb));
1957
          emit_move_insn (gen_rtx_SUBREG (mode, comb, 0), v1);
1958
          emit_move_insn (gen_rtx_SUBREG (mode, comb, m_size), v2);
1959
        }
1960
      else
1961
        {
1962
          if (BYTES_BIG_ENDIAN)
1963
            comb = v1, v1 = v2, v2 = comb;
1964
          v1 = convert_modes (dmode, mode, v1, true);
1965
          if (v1 == NULL)
1966
            goto fail;
1967
          v2 = convert_modes (dmode, mode, v2, true);
1968
          v2 = expand_simple_binop (dmode, ASHIFT, v2, GEN_INT (m_bitsize),
1969
                                    NULL, true, OPTAB_LIB_WIDEN);
1970
          if (v2 == NULL)
1971
            goto fail;
1972
          comb = expand_simple_binop (dmode, IOR, v1, v2, NULL,
1973
                                      true, OPTAB_LIB_WIDEN);
1974
          if (comb == NULL)
1975
            goto fail;
1976
        }
1977
 
1978
      shift = expand_simple_binop (Pmode, AND, base, GEN_INT (m_size - 1),
1979
                                   NULL, true, OPTAB_LIB_WIDEN);
1980
      shift = expand_mult (Pmode, shift, GEN_INT (BITS_PER_UNIT), NULL, 1);
1981
 
1982
      if (bitpos != 0)
1983
        {
1984
          if (sign_shift_up <= bitpos)
1985
            bitpos -= sign_shift_up, sign_shift_up = 0;
1986
          shift = expand_simple_binop (Pmode, PLUS, shift, GEN_INT (bitpos),
1987
                                       NULL, true, OPTAB_LIB_WIDEN);
1988
        }
1989
    }
1990
  else
1991
    {
1992
      unsigned HOST_WIDE_INT offset = bitpos / BITS_PER_UNIT;
1993
      bitpos %= BITS_PER_UNIT;
1994
 
1995
      /* When strict alignment is not required, we can just load directly
1996
         from memory without masking.  If the remaining BITPOS offset is
1997
         small enough, we may be able to do all operations in MODE as
1998
         opposed to DMODE.  */
1999
      if (bitpos + bitsize <= m_bitsize)
2000
        dmode = mode;
2001
      comb = adjust_address (op0, dmode, offset);
2002
 
2003
      if (sign_shift_up <= bitpos)
2004
        bitpos -= sign_shift_up, sign_shift_up = 0;
2005
      shift = GEN_INT (bitpos);
2006
    }
2007
 
2008
  /* Shift down the double-word such that the requested value is at bit 0.  */
2009
  if (shift != const0_rtx)
2010
    comb = expand_simple_binop (dmode, unsignedp ? LSHIFTRT : ASHIFTRT,
2011
                                comb, shift, NULL, unsignedp, OPTAB_LIB_WIDEN);
2012
  if (comb == NULL)
2013
    goto fail;
2014
 
2015
  /* If the field exactly matches MODE, then all we need to do is return the
2016
     lowpart.  Otherwise, shift to get the sign bits set properly.  */
2017
  result = force_reg (mode, gen_lowpart (mode, comb));
2018
 
2019
  if (sign_shift_up)
2020
    result = expand_simple_binop (mode, ASHIFT, result,
2021
                                  GEN_INT (sign_shift_up),
2022
                                  NULL_RTX, 0, OPTAB_LIB_WIDEN);
2023
  if (sign_shift_dn)
2024
    result = expand_simple_binop (mode, unsignedp ? LSHIFTRT : ASHIFTRT,
2025
                                  result, GEN_INT (sign_shift_dn),
2026
                                  NULL_RTX, 0, OPTAB_LIB_WIDEN);
2027
 
2028
  return result;
2029
 
2030
 fail:
2031
  delete_insns_since (start);
2032
  return NULL;
2033
}
2034
 
2035
/* Extract a bit field that is split across two words
2036
   and return an RTX for the result.
2037
 
2038
   OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2039
   BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2040
   UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.  */
2041
 
2042
static rtx
2043
extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
2044
                         unsigned HOST_WIDE_INT bitpos, int unsignedp)
2045
{
2046
  unsigned int unit;
2047
  unsigned int bitsdone = 0;
2048
  rtx result = NULL_RTX;
2049
  int first = 1;
2050
 
2051
  /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2052
     much at a time.  */
2053
  if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2054
    unit = BITS_PER_WORD;
2055
  else
2056
    {
2057
      unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2058
      if (0 && bitsize / unit > 2)
2059
        {
2060
          rtx tmp = extract_force_align_mem_bit_field (op0, bitsize, bitpos,
2061
                                                       unsignedp);
2062
          if (tmp)
2063
            return tmp;
2064
        }
2065
    }
2066
 
2067
  while (bitsdone < bitsize)
2068
    {
2069
      unsigned HOST_WIDE_INT thissize;
2070
      rtx part, word;
2071
      unsigned HOST_WIDE_INT thispos;
2072
      unsigned HOST_WIDE_INT offset;
2073
 
2074
      offset = (bitpos + bitsdone) / unit;
2075
      thispos = (bitpos + bitsdone) % unit;
2076
 
2077
      /* THISSIZE must not overrun a word boundary.  Otherwise,
2078
         extract_fixed_bit_field will call us again, and we will mutually
2079
         recurse forever.  */
2080
      thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2081
      thissize = MIN (thissize, unit - thispos);
2082
 
2083
      /* If OP0 is a register, then handle OFFSET here.
2084
 
2085
         When handling multiword bitfields, extract_bit_field may pass
2086
         down a word_mode SUBREG of a larger REG for a bitfield that actually
2087
         crosses a word boundary.  Thus, for a SUBREG, we must find
2088
         the current word starting from the base register.  */
2089
      if (GET_CODE (op0) == SUBREG)
2090
        {
2091
          int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
2092
          word = operand_subword_force (SUBREG_REG (op0), word_offset,
2093
                                        GET_MODE (SUBREG_REG (op0)));
2094
          offset = 0;
2095
        }
2096
      else if (REG_P (op0))
2097
        {
2098
          word = operand_subword_force (op0, offset, GET_MODE (op0));
2099
          offset = 0;
2100
        }
2101
      else
2102
        word = op0;
2103
 
2104
      /* Extract the parts in bit-counting order,
2105
         whose meaning is determined by BYTES_PER_UNIT.
2106
         OFFSET is in UNITs, and UNIT is in bits.
2107
         extract_fixed_bit_field wants offset in bytes.  */
2108
      part = extract_fixed_bit_field (word_mode, word,
2109
                                      offset * unit / BITS_PER_UNIT,
2110
                                      thissize, thispos, 0, 1);
2111
      bitsdone += thissize;
2112
 
2113
      /* Shift this part into place for the result.  */
2114
      if (BYTES_BIG_ENDIAN)
2115
        {
2116
          if (bitsize != bitsdone)
2117
            part = expand_shift (LSHIFT_EXPR, word_mode, part,
2118
                                 build_int_cst (NULL_TREE, bitsize - bitsdone),
2119
                                 0, 1);
2120
        }
2121
      else
2122
        {
2123
          if (bitsdone != thissize)
2124
            part = expand_shift (LSHIFT_EXPR, word_mode, part,
2125
                                 build_int_cst (NULL_TREE,
2126
                                                bitsdone - thissize), 0, 1);
2127
        }
2128
 
2129
      if (first)
2130
        result = part;
2131
      else
2132
        /* Combine the parts with bitwise or.  This works
2133
           because we extracted each part as an unsigned bit field.  */
2134
        result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2135
                               OPTAB_LIB_WIDEN);
2136
 
2137
      first = 0;
2138
    }
2139
 
2140
  /* Unsigned bit field: we are done.  */
2141
  if (unsignedp)
2142
    return result;
2143
  /* Signed bit field: sign-extend with two arithmetic shifts.  */
2144
  result = expand_shift (LSHIFT_EXPR, word_mode, result,
2145
                         build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2146
                         NULL_RTX, 0);
2147
  return expand_shift (RSHIFT_EXPR, word_mode, result,
2148
                       build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
2149
                       NULL_RTX, 0);
2150
}
2151
 
2152
/* Add INC into TARGET.  */
2153
 
2154
void
2155
expand_inc (rtx target, rtx inc)
2156
{
2157
  rtx value = expand_binop (GET_MODE (target), add_optab,
2158
                            target, inc,
2159
                            target, 0, OPTAB_LIB_WIDEN);
2160
  if (value != target)
2161
    emit_move_insn (target, value);
2162
}
2163
 
2164
/* Subtract DEC from TARGET.  */
2165
 
2166
void
2167
expand_dec (rtx target, rtx dec)
2168
{
2169
  rtx value = expand_binop (GET_MODE (target), sub_optab,
2170
                            target, dec,
2171
                            target, 0, OPTAB_LIB_WIDEN);
2172
  if (value != target)
2173
    emit_move_insn (target, value);
2174
}
2175
 
2176
/* Output a shift instruction for expression code CODE,
2177
   with SHIFTED being the rtx for the value to shift,
2178
   and AMOUNT the tree for the amount to shift by.
2179
   Store the result in the rtx TARGET, if that is convenient.
2180
   If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2181
   Return the rtx for where the value is.  */
2182
 
2183
rtx
2184
expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2185
              tree amount, rtx target, int unsignedp)
2186
{
2187
  rtx op1, temp = 0;
2188
  int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2189
  int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2190
  int try;
2191
 
2192
  /* Previously detected shift-counts computed by NEGATE_EXPR
2193
     and shifted in the other direction; but that does not work
2194
     on all machines.  */
2195
 
2196
  op1 = expand_normal (amount);
2197
 
2198
  if (SHIFT_COUNT_TRUNCATED)
2199
    {
2200
      if (GET_CODE (op1) == CONST_INT
2201
          && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2202
              (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2203
        op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2204
                       % GET_MODE_BITSIZE (mode));
2205
      else if (GET_CODE (op1) == SUBREG
2206
               && subreg_lowpart_p (op1))
2207
        op1 = SUBREG_REG (op1);
2208
    }
2209
 
2210
  if (op1 == const0_rtx)
2211
    return shifted;
2212
 
2213
  /* Check whether its cheaper to implement a left shift by a constant
2214
     bit count by a sequence of additions.  */
2215
  if (code == LSHIFT_EXPR
2216
      && GET_CODE (op1) == CONST_INT
2217
      && INTVAL (op1) > 0
2218
      && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2219
      && INTVAL (op1) < MAX_BITS_PER_WORD
2220
      && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode]
2221
      && shift_cost[mode][INTVAL (op1)] != MAX_COST)
2222
    {
2223
      int i;
2224
      for (i = 0; i < INTVAL (op1); i++)
2225
        {
2226
          temp = force_reg (mode, shifted);
2227
          shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2228
                                  unsignedp, OPTAB_LIB_WIDEN);
2229
        }
2230
      return shifted;
2231
    }
2232
 
2233
  for (try = 0; temp == 0 && try < 3; try++)
2234
    {
2235
      enum optab_methods methods;
2236
 
2237
      if (try == 0)
2238
        methods = OPTAB_DIRECT;
2239
      else if (try == 1)
2240
        methods = OPTAB_WIDEN;
2241
      else
2242
        methods = OPTAB_LIB_WIDEN;
2243
 
2244
      if (rotate)
2245
        {
2246
          /* Widening does not work for rotation.  */
2247
          if (methods == OPTAB_WIDEN)
2248
            continue;
2249
          else if (methods == OPTAB_LIB_WIDEN)
2250
            {
2251
              /* If we have been unable to open-code this by a rotation,
2252
                 do it as the IOR of two shifts.  I.e., to rotate A
2253
                 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2254
                 where C is the bitsize of A.
2255
 
2256
                 It is theoretically possible that the target machine might
2257
                 not be able to perform either shift and hence we would
2258
                 be making two libcalls rather than just the one for the
2259
                 shift (similarly if IOR could not be done).  We will allow
2260
                 this extremely unlikely lossage to avoid complicating the
2261
                 code below.  */
2262
 
2263
              rtx subtarget = target == shifted ? 0 : target;
2264
              tree new_amount, other_amount;
2265
              rtx temp1;
2266
              tree type = TREE_TYPE (amount);
2267
              if (GET_MODE (op1) != TYPE_MODE (type)
2268
                  && GET_MODE (op1) != VOIDmode)
2269
                op1 = convert_to_mode (TYPE_MODE (type), op1, 1);
2270
              new_amount = make_tree (type, op1);
2271
              other_amount
2272
                = fold_build2 (MINUS_EXPR, type,
2273
                               build_int_cst (type, GET_MODE_BITSIZE (mode)),
2274
                               new_amount);
2275
 
2276
              shifted = force_reg (mode, shifted);
2277
 
2278
              temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2279
                                   mode, shifted, new_amount, 0, 1);
2280
              temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2281
                                    mode, shifted, other_amount, subtarget, 1);
2282
              return expand_binop (mode, ior_optab, temp, temp1, target,
2283
                                   unsignedp, methods);
2284
            }
2285
 
2286
          temp = expand_binop (mode,
2287
                               left ? rotl_optab : rotr_optab,
2288
                               shifted, op1, target, unsignedp, methods);
2289
        }
2290
      else if (unsignedp)
2291
        temp = expand_binop (mode,
2292
                             left ? ashl_optab : lshr_optab,
2293
                             shifted, op1, target, unsignedp, methods);
2294
 
2295
      /* Do arithmetic shifts.
2296
         Also, if we are going to widen the operand, we can just as well
2297
         use an arithmetic right-shift instead of a logical one.  */
2298
      if (temp == 0 && ! rotate
2299
          && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2300
        {
2301
          enum optab_methods methods1 = methods;
2302
 
2303
          /* If trying to widen a log shift to an arithmetic shift,
2304
             don't accept an arithmetic shift of the same size.  */
2305
          if (unsignedp)
2306
            methods1 = OPTAB_MUST_WIDEN;
2307
 
2308
          /* Arithmetic shift */
2309
 
2310
          temp = expand_binop (mode,
2311
                               left ? ashl_optab : ashr_optab,
2312
                               shifted, op1, target, unsignedp, methods1);
2313
        }
2314
 
2315
      /* We used to try extzv here for logical right shifts, but that was
2316
         only useful for one machine, the VAX, and caused poor code
2317
         generation there for lshrdi3, so the code was deleted and a
2318
         define_expand for lshrsi3 was added to vax.md.  */
2319
    }
2320
 
2321
  gcc_assert (temp);
2322
  return temp;
2323
}
2324
 
2325
enum alg_code {
2326
  alg_unknown,
2327
  alg_zero,
2328
  alg_m, alg_shift,
2329
  alg_add_t_m2,
2330
  alg_sub_t_m2,
2331
  alg_add_factor,
2332
  alg_sub_factor,
2333
  alg_add_t2_m,
2334
  alg_sub_t2_m,
2335
  alg_impossible
2336
};
2337
 
2338
/* This structure holds the "cost" of a multiply sequence.  The
2339
   "cost" field holds the total rtx_cost of every operator in the
2340
   synthetic multiplication sequence, hence cost(a op b) is defined
2341
   as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2342
   The "latency" field holds the minimum possible latency of the
2343
   synthetic multiply, on a hypothetical infinitely parallel CPU.
2344
   This is the critical path, or the maximum height, of the expression
2345
   tree which is the sum of rtx_costs on the most expensive path from
2346
   any leaf to the root.  Hence latency(a op b) is defined as zero for
2347
   leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise.  */
2348
 
2349
struct mult_cost {
2350
  short cost;     /* Total rtx_cost of the multiplication sequence.  */
2351
  short latency;  /* The latency of the multiplication sequence.  */
2352
};
2353
 
2354
/* This macro is used to compare a pointer to a mult_cost against an
2355
   single integer "rtx_cost" value.  This is equivalent to the macro
2356
   CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}.  */
2357
#define MULT_COST_LESS(X,Y) ((X)->cost < (Y)    \
2358
                             || ((X)->cost == (Y) && (X)->latency < (Y)))
2359
 
2360
/* This macro is used to compare two pointers to mult_costs against
2361
   each other.  The macro returns true if X is cheaper than Y.
2362
   Currently, the cheaper of two mult_costs is the one with the
2363
   lower "cost".  If "cost"s are tied, the lower latency is cheaper.  */
2364
#define CHEAPER_MULT_COST(X,Y)  ((X)->cost < (Y)->cost          \
2365
                                 || ((X)->cost == (Y)->cost     \
2366
                                     && (X)->latency < (Y)->latency))
2367
 
2368
/* This structure records a sequence of operations.
2369
   `ops' is the number of operations recorded.
2370
   `cost' is their total cost.
2371
   The operations are stored in `op' and the corresponding
2372
   logarithms of the integer coefficients in `log'.
2373
 
2374
   These are the operations:
2375
   alg_zero             total := 0;
2376
   alg_m                total := multiplicand;
2377
   alg_shift            total := total * coeff
2378
   alg_add_t_m2         total := total + multiplicand * coeff;
2379
   alg_sub_t_m2         total := total - multiplicand * coeff;
2380
   alg_add_factor       total := total * coeff + total;
2381
   alg_sub_factor       total := total * coeff - total;
2382
   alg_add_t2_m         total := total * coeff + multiplicand;
2383
   alg_sub_t2_m         total := total * coeff - multiplicand;
2384
 
2385
   The first operand must be either alg_zero or alg_m.  */
2386
 
2387
struct algorithm
2388
{
2389
  struct mult_cost cost;
2390
  short ops;
2391
  /* The size of the OP and LOG fields are not directly related to the
2392
     word size, but the worst-case algorithms will be if we have few
2393
     consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2394
     In that case we will generate shift-by-2, add, shift-by-2, add,...,
2395
     in total wordsize operations.  */
2396
  enum alg_code op[MAX_BITS_PER_WORD];
2397
  char log[MAX_BITS_PER_WORD];
2398
};
2399
 
2400
/* The entry for our multiplication cache/hash table.  */
2401
struct alg_hash_entry {
2402
  /* The number we are multiplying by.  */
2403
  unsigned HOST_WIDE_INT t;
2404
 
2405
  /* The mode in which we are multiplying something by T.  */
2406
  enum machine_mode mode;
2407
 
2408
  /* The best multiplication algorithm for t.  */
2409
  enum alg_code alg;
2410
 
2411
  /* The cost of multiplication if ALG_CODE is not alg_impossible.
2412
     Otherwise, the cost within which multiplication by T is
2413
     impossible.  */
2414
  struct mult_cost cost;
2415
};
2416
 
2417
/* The number of cache/hash entries.  */
2418
#if HOST_BITS_PER_WIDE_INT == 64
2419
#define NUM_ALG_HASH_ENTRIES 1031
2420
#else
2421
#define NUM_ALG_HASH_ENTRIES 307
2422
#endif
2423
 
2424
/* Each entry of ALG_HASH caches alg_code for some integer.  This is
2425
   actually a hash table.  If we have a collision, that the older
2426
   entry is kicked out.  */
2427
static struct alg_hash_entry alg_hash[NUM_ALG_HASH_ENTRIES];
2428
 
2429
/* Indicates the type of fixup needed after a constant multiplication.
2430
   BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2431
   the result should be negated, and ADD_VARIANT means that the
2432
   multiplicand should be added to the result.  */
2433
enum mult_variant {basic_variant, negate_variant, add_variant};
2434
 
2435
static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2436
                        const struct mult_cost *, enum machine_mode mode);
2437
static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2438
                                 struct algorithm *, enum mult_variant *, int);
2439
static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2440
                              const struct algorithm *, enum mult_variant);
2441
static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2442
                                                 int, rtx *, int *, int *);
2443
static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2444
static rtx extract_high_half (enum machine_mode, rtx);
2445
static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2446
static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2447
                                       int, int);
2448
/* Compute and return the best algorithm for multiplying by T.
2449
   The algorithm must cost less than cost_limit
2450
   If retval.cost >= COST_LIMIT, no algorithm was found and all
2451
   other field of the returned struct are undefined.
2452
   MODE is the machine mode of the multiplication.  */
2453
 
2454
static void
2455
synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2456
            const struct mult_cost *cost_limit, enum machine_mode mode)
2457
{
2458
  int m;
2459
  struct algorithm *alg_in, *best_alg;
2460
  struct mult_cost best_cost;
2461
  struct mult_cost new_limit;
2462
  int op_cost, op_latency;
2463
  unsigned HOST_WIDE_INT q;
2464
  int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2465
  int hash_index;
2466
  bool cache_hit = false;
2467
  enum alg_code cache_alg = alg_zero;
2468
 
2469
  /* Indicate that no algorithm is yet found.  If no algorithm
2470
     is found, this value will be returned and indicate failure.  */
2471
  alg_out->cost.cost = cost_limit->cost + 1;
2472
  alg_out->cost.latency = cost_limit->latency + 1;
2473
 
2474
  if (cost_limit->cost < 0
2475
      || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2476
    return;
2477
 
2478
  /* Restrict the bits of "t" to the multiplication's mode.  */
2479
  t &= GET_MODE_MASK (mode);
2480
 
2481
  /* t == 1 can be done in zero cost.  */
2482
  if (t == 1)
2483
    {
2484
      alg_out->ops = 1;
2485
      alg_out->cost.cost = 0;
2486
      alg_out->cost.latency = 0;
2487
      alg_out->op[0] = alg_m;
2488
      return;
2489
    }
2490
 
2491
  /* t == 0 sometimes has a cost.  If it does and it exceeds our limit,
2492
     fail now.  */
2493
  if (t == 0)
2494
    {
2495
      if (MULT_COST_LESS (cost_limit, zero_cost))
2496
        return;
2497
      else
2498
        {
2499
          alg_out->ops = 1;
2500
          alg_out->cost.cost = zero_cost;
2501
          alg_out->cost.latency = zero_cost;
2502
          alg_out->op[0] = alg_zero;
2503
          return;
2504
        }
2505
    }
2506
 
2507
  /* We'll be needing a couple extra algorithm structures now.  */
2508
 
2509
  alg_in = alloca (sizeof (struct algorithm));
2510
  best_alg = alloca (sizeof (struct algorithm));
2511
  best_cost = *cost_limit;
2512
 
2513
  /* Compute the hash index.  */
2514
  hash_index = (t ^ (unsigned int) mode) % NUM_ALG_HASH_ENTRIES;
2515
 
2516
  /* See if we already know what to do for T.  */
2517
  if (alg_hash[hash_index].t == t
2518
      && alg_hash[hash_index].mode == mode
2519
      && alg_hash[hash_index].alg != alg_unknown)
2520
    {
2521
      cache_alg = alg_hash[hash_index].alg;
2522
 
2523
      if (cache_alg == alg_impossible)
2524
        {
2525
          /* The cache tells us that it's impossible to synthesize
2526
             multiplication by T within alg_hash[hash_index].cost.  */
2527
          if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2528
            /* COST_LIMIT is at least as restrictive as the one
2529
               recorded in the hash table, in which case we have no
2530
               hope of synthesizing a multiplication.  Just
2531
               return.  */
2532
            return;
2533
 
2534
          /* If we get here, COST_LIMIT is less restrictive than the
2535
             one recorded in the hash table, so we may be able to
2536
             synthesize a multiplication.  Proceed as if we didn't
2537
             have the cache entry.  */
2538
        }
2539
      else
2540
        {
2541
          if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2542
            /* The cached algorithm shows that this multiplication
2543
               requires more cost than COST_LIMIT.  Just return.  This
2544
               way, we don't clobber this cache entry with
2545
               alg_impossible but retain useful information.  */
2546
            return;
2547
 
2548
          cache_hit = true;
2549
 
2550
          switch (cache_alg)
2551
            {
2552
            case alg_shift:
2553
              goto do_alg_shift;
2554
 
2555
            case alg_add_t_m2:
2556
            case alg_sub_t_m2:
2557
              goto do_alg_addsub_t_m2;
2558
 
2559
            case alg_add_factor:
2560
            case alg_sub_factor:
2561
              goto do_alg_addsub_factor;
2562
 
2563
            case alg_add_t2_m:
2564
              goto do_alg_add_t2_m;
2565
 
2566
            case alg_sub_t2_m:
2567
              goto do_alg_sub_t2_m;
2568
 
2569
            default:
2570
              gcc_unreachable ();
2571
            }
2572
        }
2573
    }
2574
 
2575
  /* If we have a group of zero bits at the low-order part of T, try
2576
     multiplying by the remaining bits and then doing a shift.  */
2577
 
2578
  if ((t & 1) == 0)
2579
    {
2580
    do_alg_shift:
2581
      m = floor_log2 (t & -t);  /* m = number of low zero bits */
2582
      if (m < maxm)
2583
        {
2584
          q = t >> m;
2585
          /* The function expand_shift will choose between a shift and
2586
             a sequence of additions, so the observed cost is given as
2587
             MIN (m * add_cost[mode], shift_cost[mode][m]).  */
2588
          op_cost = m * add_cost[mode];
2589
          if (shift_cost[mode][m] < op_cost)
2590
            op_cost = shift_cost[mode][m];
2591
          new_limit.cost = best_cost.cost - op_cost;
2592
          new_limit.latency = best_cost.latency - op_cost;
2593
          synth_mult (alg_in, q, &new_limit, mode);
2594
 
2595
          alg_in->cost.cost += op_cost;
2596
          alg_in->cost.latency += op_cost;
2597
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2598
            {
2599
              struct algorithm *x;
2600
              best_cost = alg_in->cost;
2601
              x = alg_in, alg_in = best_alg, best_alg = x;
2602
              best_alg->log[best_alg->ops] = m;
2603
              best_alg->op[best_alg->ops] = alg_shift;
2604
            }
2605
        }
2606
      if (cache_hit)
2607
        goto done;
2608
    }
2609
 
2610
  /* If we have an odd number, add or subtract one.  */
2611
  if ((t & 1) != 0)
2612
    {
2613
      unsigned HOST_WIDE_INT w;
2614
 
2615
    do_alg_addsub_t_m2:
2616
      for (w = 1; (w & t) != 0; w <<= 1)
2617
        ;
2618
      /* If T was -1, then W will be zero after the loop.  This is another
2619
         case where T ends with ...111.  Handling this with (T + 1) and
2620
         subtract 1 produces slightly better code and results in algorithm
2621
         selection much faster than treating it like the ...0111 case
2622
         below.  */
2623
      if (w == 0
2624
          || (w > 2
2625
              /* Reject the case where t is 3.
2626
                 Thus we prefer addition in that case.  */
2627
              && t != 3))
2628
        {
2629
          /* T ends with ...111.  Multiply by (T + 1) and subtract 1.  */
2630
 
2631
          op_cost = add_cost[mode];
2632
          new_limit.cost = best_cost.cost - op_cost;
2633
          new_limit.latency = best_cost.latency - op_cost;
2634
          synth_mult (alg_in, t + 1, &new_limit, mode);
2635
 
2636
          alg_in->cost.cost += op_cost;
2637
          alg_in->cost.latency += op_cost;
2638
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2639
            {
2640
              struct algorithm *x;
2641
              best_cost = alg_in->cost;
2642
              x = alg_in, alg_in = best_alg, best_alg = x;
2643
              best_alg->log[best_alg->ops] = 0;
2644
              best_alg->op[best_alg->ops] = alg_sub_t_m2;
2645
            }
2646
        }
2647
      else
2648
        {
2649
          /* T ends with ...01 or ...011.  Multiply by (T - 1) and add 1.  */
2650
 
2651
          op_cost = add_cost[mode];
2652
          new_limit.cost = best_cost.cost - op_cost;
2653
          new_limit.latency = best_cost.latency - op_cost;
2654
          synth_mult (alg_in, t - 1, &new_limit, mode);
2655
 
2656
          alg_in->cost.cost += op_cost;
2657
          alg_in->cost.latency += op_cost;
2658
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2659
            {
2660
              struct algorithm *x;
2661
              best_cost = alg_in->cost;
2662
              x = alg_in, alg_in = best_alg, best_alg = x;
2663
              best_alg->log[best_alg->ops] = 0;
2664
              best_alg->op[best_alg->ops] = alg_add_t_m2;
2665
            }
2666
        }
2667
      if (cache_hit)
2668
        goto done;
2669
    }
2670
 
2671
  /* Look for factors of t of the form
2672
     t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2673
     If we find such a factor, we can multiply by t using an algorithm that
2674
     multiplies by q, shift the result by m and add/subtract it to itself.
2675
 
2676
     We search for large factors first and loop down, even if large factors
2677
     are less probable than small; if we find a large factor we will find a
2678
     good sequence quickly, and therefore be able to prune (by decreasing
2679
     COST_LIMIT) the search.  */
2680
 
2681
 do_alg_addsub_factor:
2682
  for (m = floor_log2 (t - 1); m >= 2; m--)
2683
    {
2684
      unsigned HOST_WIDE_INT d;
2685
 
2686
      d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2687
      if (t % d == 0 && t > d && m < maxm
2688
          && (!cache_hit || cache_alg == alg_add_factor))
2689
        {
2690
          /* If the target has a cheap shift-and-add instruction use
2691
             that in preference to a shift insn followed by an add insn.
2692
             Assume that the shift-and-add is "atomic" with a latency
2693
             equal to its cost, otherwise assume that on superscalar
2694
             hardware the shift may be executed concurrently with the
2695
             earlier steps in the algorithm.  */
2696
          op_cost = add_cost[mode] + shift_cost[mode][m];
2697
          if (shiftadd_cost[mode][m] < op_cost)
2698
            {
2699
              op_cost = shiftadd_cost[mode][m];
2700
              op_latency = op_cost;
2701
            }
2702
          else
2703
            op_latency = add_cost[mode];
2704
 
2705
          new_limit.cost = best_cost.cost - op_cost;
2706
          new_limit.latency = best_cost.latency - op_latency;
2707
          synth_mult (alg_in, t / d, &new_limit, mode);
2708
 
2709
          alg_in->cost.cost += op_cost;
2710
          alg_in->cost.latency += op_latency;
2711
          if (alg_in->cost.latency < op_cost)
2712
            alg_in->cost.latency = op_cost;
2713
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2714
            {
2715
              struct algorithm *x;
2716
              best_cost = alg_in->cost;
2717
              x = alg_in, alg_in = best_alg, best_alg = x;
2718
              best_alg->log[best_alg->ops] = m;
2719
              best_alg->op[best_alg->ops] = alg_add_factor;
2720
            }
2721
          /* Other factors will have been taken care of in the recursion.  */
2722
          break;
2723
        }
2724
 
2725
      d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2726
      if (t % d == 0 && t > d && m < maxm
2727
          && (!cache_hit || cache_alg == alg_sub_factor))
2728
        {
2729
          /* If the target has a cheap shift-and-subtract insn use
2730
             that in preference to a shift insn followed by a sub insn.
2731
             Assume that the shift-and-sub is "atomic" with a latency
2732
             equal to it's cost, otherwise assume that on superscalar
2733
             hardware the shift may be executed concurrently with the
2734
             earlier steps in the algorithm.  */
2735
          op_cost = add_cost[mode] + shift_cost[mode][m];
2736
          if (shiftsub_cost[mode][m] < op_cost)
2737
            {
2738
              op_cost = shiftsub_cost[mode][m];
2739
              op_latency = op_cost;
2740
            }
2741
          else
2742
            op_latency = add_cost[mode];
2743
 
2744
          new_limit.cost = best_cost.cost - op_cost;
2745
          new_limit.latency = best_cost.latency - op_latency;
2746
          synth_mult (alg_in, t / d, &new_limit, mode);
2747
 
2748
          alg_in->cost.cost += op_cost;
2749
          alg_in->cost.latency += op_latency;
2750
          if (alg_in->cost.latency < op_cost)
2751
            alg_in->cost.latency = op_cost;
2752
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2753
            {
2754
              struct algorithm *x;
2755
              best_cost = alg_in->cost;
2756
              x = alg_in, alg_in = best_alg, best_alg = x;
2757
              best_alg->log[best_alg->ops] = m;
2758
              best_alg->op[best_alg->ops] = alg_sub_factor;
2759
            }
2760
          break;
2761
        }
2762
    }
2763
  if (cache_hit)
2764
    goto done;
2765
 
2766
  /* Try shift-and-add (load effective address) instructions,
2767
     i.e. do a*3, a*5, a*9.  */
2768
  if ((t & 1) != 0)
2769
    {
2770
    do_alg_add_t2_m:
2771
      q = t - 1;
2772
      q = q & -q;
2773
      m = exact_log2 (q);
2774
      if (m >= 0 && m < maxm)
2775
        {
2776
          op_cost = shiftadd_cost[mode][m];
2777
          new_limit.cost = best_cost.cost - op_cost;
2778
          new_limit.latency = best_cost.latency - op_cost;
2779
          synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2780
 
2781
          alg_in->cost.cost += op_cost;
2782
          alg_in->cost.latency += op_cost;
2783
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2784
            {
2785
              struct algorithm *x;
2786
              best_cost = alg_in->cost;
2787
              x = alg_in, alg_in = best_alg, best_alg = x;
2788
              best_alg->log[best_alg->ops] = m;
2789
              best_alg->op[best_alg->ops] = alg_add_t2_m;
2790
            }
2791
        }
2792
      if (cache_hit)
2793
        goto done;
2794
 
2795
    do_alg_sub_t2_m:
2796
      q = t + 1;
2797
      q = q & -q;
2798
      m = exact_log2 (q);
2799
      if (m >= 0 && m < maxm)
2800
        {
2801
          op_cost = shiftsub_cost[mode][m];
2802
          new_limit.cost = best_cost.cost - op_cost;
2803
          new_limit.latency = best_cost.latency - op_cost;
2804
          synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2805
 
2806
          alg_in->cost.cost += op_cost;
2807
          alg_in->cost.latency += op_cost;
2808
          if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2809
            {
2810
              struct algorithm *x;
2811
              best_cost = alg_in->cost;
2812
              x = alg_in, alg_in = best_alg, best_alg = x;
2813
              best_alg->log[best_alg->ops] = m;
2814
              best_alg->op[best_alg->ops] = alg_sub_t2_m;
2815
            }
2816
        }
2817
      if (cache_hit)
2818
        goto done;
2819
    }
2820
 
2821
 done:
2822
  /* If best_cost has not decreased, we have not found any algorithm.  */
2823
  if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2824
    {
2825
      /* We failed to find an algorithm.  Record alg_impossible for
2826
         this case (that is, <T, MODE, COST_LIMIT>) so that next time
2827
         we are asked to find an algorithm for T within the same or
2828
         lower COST_LIMIT, we can immediately return to the
2829
         caller.  */
2830
      alg_hash[hash_index].t = t;
2831
      alg_hash[hash_index].mode = mode;
2832
      alg_hash[hash_index].alg = alg_impossible;
2833
      alg_hash[hash_index].cost = *cost_limit;
2834
      return;
2835
    }
2836
 
2837
  /* Cache the result.  */
2838
  if (!cache_hit)
2839
    {
2840
      alg_hash[hash_index].t = t;
2841
      alg_hash[hash_index].mode = mode;
2842
      alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2843
      alg_hash[hash_index].cost.cost = best_cost.cost;
2844
      alg_hash[hash_index].cost.latency = best_cost.latency;
2845
    }
2846
 
2847
  /* If we are getting a too long sequence for `struct algorithm'
2848
     to record, make this search fail.  */
2849
  if (best_alg->ops == MAX_BITS_PER_WORD)
2850
    return;
2851
 
2852
  /* Copy the algorithm from temporary space to the space at alg_out.
2853
     We avoid using structure assignment because the majority of
2854
     best_alg is normally undefined, and this is a critical function.  */
2855
  alg_out->ops = best_alg->ops + 1;
2856
  alg_out->cost = best_cost;
2857
  memcpy (alg_out->op, best_alg->op,
2858
          alg_out->ops * sizeof *alg_out->op);
2859
  memcpy (alg_out->log, best_alg->log,
2860
          alg_out->ops * sizeof *alg_out->log);
2861
}
2862
 
2863
/* Find the cheapest way of multiplying a value of mode MODE by VAL.
2864
   Try three variations:
2865
 
2866
       - a shift/add sequence based on VAL itself
2867
       - a shift/add sequence based on -VAL, followed by a negation
2868
       - a shift/add sequence based on VAL - 1, followed by an addition.
2869
 
2870
   Return true if the cheapest of these cost less than MULT_COST,
2871
   describing the algorithm in *ALG and final fixup in *VARIANT.  */
2872
 
2873
static bool
2874
choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2875
                     struct algorithm *alg, enum mult_variant *variant,
2876
                     int mult_cost)
2877
{
2878
  struct algorithm alg2;
2879
  struct mult_cost limit;
2880
  int op_cost;
2881
 
2882
  /* Fail quickly for impossible bounds.  */
2883
  if (mult_cost < 0)
2884
    return false;
2885
 
2886
  /* Ensure that mult_cost provides a reasonable upper bound.
2887
     Any constant multiplication can be performed with less
2888
     than 2 * bits additions.  */
2889
  op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[mode];
2890
  if (mult_cost > op_cost)
2891
    mult_cost = op_cost;
2892
 
2893
  *variant = basic_variant;
2894
  limit.cost = mult_cost;
2895
  limit.latency = mult_cost;
2896
  synth_mult (alg, val, &limit, mode);
2897
 
2898
  /* This works only if the inverted value actually fits in an
2899
     `unsigned int' */
2900
  if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2901
    {
2902
      op_cost = neg_cost[mode];
2903
      if (MULT_COST_LESS (&alg->cost, mult_cost))
2904
        {
2905
          limit.cost = alg->cost.cost - op_cost;
2906
          limit.latency = alg->cost.latency - op_cost;
2907
        }
2908
      else
2909
        {
2910
          limit.cost = mult_cost - op_cost;
2911
          limit.latency = mult_cost - op_cost;
2912
        }
2913
 
2914
      synth_mult (&alg2, -val, &limit, mode);
2915
      alg2.cost.cost += op_cost;
2916
      alg2.cost.latency += op_cost;
2917
      if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2918
        *alg = alg2, *variant = negate_variant;
2919
    }
2920
 
2921
  /* This proves very useful for division-by-constant.  */
2922
  op_cost = add_cost[mode];
2923
  if (MULT_COST_LESS (&alg->cost, mult_cost))
2924
    {
2925
      limit.cost = alg->cost.cost - op_cost;
2926
      limit.latency = alg->cost.latency - op_cost;
2927
    }
2928
  else
2929
    {
2930
      limit.cost = mult_cost - op_cost;
2931
      limit.latency = mult_cost - op_cost;
2932
    }
2933
 
2934
  synth_mult (&alg2, val - 1, &limit, mode);
2935
  alg2.cost.cost += op_cost;
2936
  alg2.cost.latency += op_cost;
2937
  if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2938
    *alg = alg2, *variant = add_variant;
2939
 
2940
  return MULT_COST_LESS (&alg->cost, mult_cost);
2941
}
2942
 
2943
/* A subroutine of expand_mult, used for constant multiplications.
2944
   Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2945
   convenient.  Use the shift/add sequence described by ALG and apply
2946
   the final fixup specified by VARIANT.  */
2947
 
2948
static rtx
2949
expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2950
                   rtx target, const struct algorithm *alg,
2951
                   enum mult_variant variant)
2952
{
2953
  HOST_WIDE_INT val_so_far;
2954
  rtx insn, accum, tem;
2955
  int opno;
2956
  enum machine_mode nmode;
2957
 
2958
  /* Avoid referencing memory over and over.
2959
     For speed, but also for correctness when mem is volatile.  */
2960
  if (MEM_P (op0))
2961
    op0 = force_reg (mode, op0);
2962
 
2963
  /* ACCUM starts out either as OP0 or as a zero, depending on
2964
     the first operation.  */
2965
 
2966
  if (alg->op[0] == alg_zero)
2967
    {
2968
      accum = copy_to_mode_reg (mode, const0_rtx);
2969
      val_so_far = 0;
2970
    }
2971
  else if (alg->op[0] == alg_m)
2972
    {
2973
      accum = copy_to_mode_reg (mode, op0);
2974
      val_so_far = 1;
2975
    }
2976
  else
2977
    gcc_unreachable ();
2978
 
2979
  for (opno = 1; opno < alg->ops; opno++)
2980
    {
2981
      int log = alg->log[opno];
2982
      rtx shift_subtarget = optimize ? 0 : accum;
2983
      rtx add_target
2984
        = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2985
           && !optimize)
2986
          ? target : 0;
2987
      rtx accum_target = optimize ? 0 : accum;
2988
 
2989
      switch (alg->op[opno])
2990
        {
2991
        case alg_shift:
2992
          accum = expand_shift (LSHIFT_EXPR, mode, accum,
2993
                                build_int_cst (NULL_TREE, log),
2994
                                NULL_RTX, 0);
2995
          val_so_far <<= log;
2996
          break;
2997
 
2998
        case alg_add_t_m2:
2999
          tem = expand_shift (LSHIFT_EXPR, mode, op0,
3000
                              build_int_cst (NULL_TREE, log),
3001
                              NULL_RTX, 0);
3002
          accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3003
                                 add_target ? add_target : accum_target);
3004
          val_so_far += (HOST_WIDE_INT) 1 << log;
3005
          break;
3006
 
3007
        case alg_sub_t_m2:
3008
          tem = expand_shift (LSHIFT_EXPR, mode, op0,
3009
                              build_int_cst (NULL_TREE, log),
3010
                              NULL_RTX, 0);
3011
          accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3012
                                 add_target ? add_target : accum_target);
3013
          val_so_far -= (HOST_WIDE_INT) 1 << log;
3014
          break;
3015
 
3016
        case alg_add_t2_m:
3017
          accum = expand_shift (LSHIFT_EXPR, mode, accum,
3018
                                build_int_cst (NULL_TREE, log),
3019
                                shift_subtarget,
3020
                                0);
3021
          accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3022
                                 add_target ? add_target : accum_target);
3023
          val_so_far = (val_so_far << log) + 1;
3024
          break;
3025
 
3026
        case alg_sub_t2_m:
3027
          accum = expand_shift (LSHIFT_EXPR, mode, accum,
3028
                                build_int_cst (NULL_TREE, log),
3029
                                shift_subtarget, 0);
3030
          accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3031
                                 add_target ? add_target : accum_target);
3032
          val_so_far = (val_so_far << log) - 1;
3033
          break;
3034
 
3035
        case alg_add_factor:
3036
          tem = expand_shift (LSHIFT_EXPR, mode, accum,
3037
                              build_int_cst (NULL_TREE, log),
3038
                              NULL_RTX, 0);
3039
          accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3040
                                 add_target ? add_target : accum_target);
3041
          val_so_far += val_so_far << log;
3042
          break;
3043
 
3044
        case alg_sub_factor:
3045
          tem = expand_shift (LSHIFT_EXPR, mode, accum,
3046
                              build_int_cst (NULL_TREE, log),
3047
                              NULL_RTX, 0);
3048
          accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3049
                                 (add_target
3050
                                  ? add_target : (optimize ? 0 : tem)));
3051
          val_so_far = (val_so_far << log) - val_so_far;
3052
          break;
3053
 
3054
        default:
3055
          gcc_unreachable ();
3056
        }
3057
 
3058
      /* Write a REG_EQUAL note on the last insn so that we can cse
3059
         multiplication sequences.  Note that if ACCUM is a SUBREG,
3060
         we've set the inner register and must properly indicate
3061
         that.  */
3062
 
3063
      tem = op0, nmode = mode;
3064
      if (GET_CODE (accum) == SUBREG)
3065
        {
3066
          nmode = GET_MODE (SUBREG_REG (accum));
3067
          tem = gen_lowpart (nmode, op0);
3068
        }
3069
 
3070
      insn = get_last_insn ();
3071
      set_unique_reg_note (insn, REG_EQUAL,
3072
                           gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)));
3073
    }
3074
 
3075
  if (variant == negate_variant)
3076
    {
3077
      val_so_far = -val_so_far;
3078
      accum = expand_unop (mode, neg_optab, accum, target, 0);
3079
    }
3080
  else if (variant == add_variant)
3081
    {
3082
      val_so_far = val_so_far + 1;
3083
      accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3084
    }
3085
 
3086
  /* Compare only the bits of val and val_so_far that are significant
3087
     in the result mode, to avoid sign-/zero-extension confusion.  */
3088
  val &= GET_MODE_MASK (mode);
3089
  val_so_far &= GET_MODE_MASK (mode);
3090
  gcc_assert (val == val_so_far);
3091
 
3092
  return accum;
3093
}
3094
 
3095
/* Perform a multiplication and return an rtx for the result.
3096
   MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3097
   TARGET is a suggestion for where to store the result (an rtx).
3098
 
3099
   We check specially for a constant integer as OP1.
3100
   If you want this check for OP0 as well, then before calling
3101
   you should swap the two operands if OP0 would be constant.  */
3102
 
3103
rtx
3104
expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3105
             int unsignedp)
3106
{
3107
  enum mult_variant variant;
3108
  struct algorithm algorithm;
3109
  int max_cost;
3110
 
3111
  /* Handling const0_rtx here allows us to use zero as a rogue value for
3112
     coeff below.  */
3113
  if (op1 == const0_rtx)
3114
    return const0_rtx;
3115
  if (op1 == const1_rtx)
3116
    return op0;
3117
  if (op1 == constm1_rtx)
3118
    return expand_unop (mode,
3119
                        GET_MODE_CLASS (mode) == MODE_INT
3120
                        && !unsignedp && flag_trapv
3121
                        ? negv_optab : neg_optab,
3122
                        op0, target, 0);
3123
 
3124
  /* These are the operations that are potentially turned into a sequence
3125
     of shifts and additions.  */
3126
  if (SCALAR_INT_MODE_P (mode)
3127
      && (unsignedp || !flag_trapv))
3128
    {
3129
      HOST_WIDE_INT coeff = 0;
3130
      rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3131
 
3132
      /* synth_mult does an `unsigned int' multiply.  As long as the mode is
3133
         less than or equal in size to `unsigned int' this doesn't matter.
3134
         If the mode is larger than `unsigned int', then synth_mult works
3135
         only if the constant value exactly fits in an `unsigned int' without
3136
         any truncation.  This means that multiplying by negative values does
3137
         not work; results are off by 2^32 on a 32 bit machine.  */
3138
 
3139
      if (GET_CODE (op1) == CONST_INT)
3140
        {
3141
          /* Attempt to handle multiplication of DImode values by negative
3142
             coefficients, by performing the multiplication by a positive
3143
             multiplier and then inverting the result.  */
3144
          if (INTVAL (op1) < 0
3145
              && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3146
            {
3147
              /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3148
                 result is interpreted as an unsigned coefficient.
3149
                 Exclude cost of op0 from max_cost to match the cost
3150
                 calculation of the synth_mult.  */
3151
              max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET)
3152
                         - neg_cost[mode];
3153
              if (max_cost > 0
3154
                  && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3155
                                          &variant, max_cost))
3156
                {
3157
                  rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3158
                                                NULL_RTX, &algorithm,
3159
                                                variant);
3160
                  return expand_unop (mode, neg_optab, temp, target, 0);
3161
                }
3162
            }
3163
          else coeff = INTVAL (op1);
3164
        }
3165
      else if (GET_CODE (op1) == CONST_DOUBLE)
3166
        {
3167
          /* If we are multiplying in DImode, it may still be a win
3168
             to try to work with shifts and adds.  */
3169
          if (CONST_DOUBLE_HIGH (op1) == 0)
3170
            coeff = CONST_DOUBLE_LOW (op1);
3171
          else if (CONST_DOUBLE_LOW (op1) == 0
3172
                   && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3173
            {
3174
              int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3175
                          + HOST_BITS_PER_WIDE_INT;
3176
              return expand_shift (LSHIFT_EXPR, mode, op0,
3177
                                   build_int_cst (NULL_TREE, shift),
3178
                                   target, unsignedp);
3179
            }
3180
        }
3181
 
3182
      /* We used to test optimize here, on the grounds that it's better to
3183
         produce a smaller program when -O is not used.  But this causes
3184
         such a terrible slowdown sometimes that it seems better to always
3185
         use synth_mult.  */
3186
      if (coeff != 0)
3187
        {
3188
          /* Special case powers of two.  */
3189
          if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3190
            return expand_shift (LSHIFT_EXPR, mode, op0,
3191
                                 build_int_cst (NULL_TREE, floor_log2 (coeff)),
3192
                                 target, unsignedp);
3193
 
3194
          /* Exclude cost of op0 from max_cost to match the cost
3195
             calculation of the synth_mult.  */
3196
          max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET);
3197
          if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3198
                                   max_cost))
3199
            return expand_mult_const (mode, op0, coeff, target,
3200
                                      &algorithm, variant);
3201
        }
3202
    }
3203
 
3204
  if (GET_CODE (op0) == CONST_DOUBLE)
3205
    {
3206
      rtx temp = op0;
3207
      op0 = op1;
3208
      op1 = temp;
3209
    }
3210
 
3211
  /* Expand x*2.0 as x+x.  */
3212
  if (GET_CODE (op1) == CONST_DOUBLE
3213
      && SCALAR_FLOAT_MODE_P (mode))
3214
    {
3215
      REAL_VALUE_TYPE d;
3216
      REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3217
 
3218
      if (REAL_VALUES_EQUAL (d, dconst2))
3219
        {
3220
          op0 = force_reg (GET_MODE (op0), op0);
3221
          return expand_binop (mode, add_optab, op0, op0,
3222
                               target, unsignedp, OPTAB_LIB_WIDEN);
3223
        }
3224
    }
3225
 
3226
  /* This used to use umul_optab if unsigned, but for non-widening multiply
3227
     there is no difference between signed and unsigned.  */
3228
  op0 = expand_binop (mode,
3229
                      ! unsignedp
3230
                      && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3231
                      ? smulv_optab : smul_optab,
3232
                      op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3233
  gcc_assert (op0);
3234
  return op0;
3235
}
3236
 
3237
/* Return the smallest n such that 2**n >= X.  */
3238
 
3239
int
3240
ceil_log2 (unsigned HOST_WIDE_INT x)
3241
{
3242
  return floor_log2 (x - 1) + 1;
3243
}
3244
 
3245
/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3246
   replace division by D, and put the least significant N bits of the result
3247
   in *MULTIPLIER_PTR and return the most significant bit.
3248
 
3249
   The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3250
   needed precision is in PRECISION (should be <= N).
3251
 
3252
   PRECISION should be as small as possible so this function can choose
3253
   multiplier more freely.
3254
 
3255
   The rounded-up logarithm of D is placed in *lgup_ptr.  A shift count that
3256
   is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3257
 
3258
   Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3259
   where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier.  */
3260
 
3261
static
3262
unsigned HOST_WIDE_INT
3263
choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3264
                   rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3265
{
3266
  HOST_WIDE_INT mhigh_hi, mlow_hi;
3267
  unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3268
  int lgup, post_shift;
3269
  int pow, pow2;
3270
  unsigned HOST_WIDE_INT nl, dummy1;
3271
  HOST_WIDE_INT nh, dummy2;
3272
 
3273
  /* lgup = ceil(log2(divisor)); */
3274
  lgup = ceil_log2 (d);
3275
 
3276
  gcc_assert (lgup <= n);
3277
 
3278
  pow = n + lgup;
3279
  pow2 = n + lgup - precision;
3280
 
3281
  /* We could handle this with some effort, but this case is much
3282
     better handled directly with a scc insn, so rely on caller using
3283
     that.  */
3284
  gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3285
 
3286
  /* mlow = 2^(N + lgup)/d */
3287
 if (pow >= HOST_BITS_PER_WIDE_INT)
3288
    {
3289
      nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3290
      nl = 0;
3291
    }
3292
  else
3293
    {
3294
      nh = 0;
3295
      nl = (unsigned HOST_WIDE_INT) 1 << pow;
3296
    }
3297
  div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3298
                        &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3299
 
3300
  /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3301
  if (pow2 >= HOST_BITS_PER_WIDE_INT)
3302
    nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3303
  else
3304
    nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3305
  div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3306
                        &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3307
 
3308
  gcc_assert (!mhigh_hi || nh - d < d);
3309
  gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3310
  /* Assert that mlow < mhigh.  */
3311
  gcc_assert (mlow_hi < mhigh_hi
3312
              || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3313
 
3314
  /* If precision == N, then mlow, mhigh exceed 2^N
3315
     (but they do not exceed 2^(N+1)).  */
3316
 
3317
  /* Reduce to lowest terms.  */
3318
  for (post_shift = lgup; post_shift > 0; post_shift--)
3319
    {
3320
      unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3321
      unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3322
      if (ml_lo >= mh_lo)
3323
        break;
3324
 
3325
      mlow_hi = 0;
3326
      mlow_lo = ml_lo;
3327
      mhigh_hi = 0;
3328
      mhigh_lo = mh_lo;
3329
    }
3330
 
3331
  *post_shift_ptr = post_shift;
3332
  *lgup_ptr = lgup;
3333
  if (n < HOST_BITS_PER_WIDE_INT)
3334
    {
3335
      unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3336
      *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3337
      return mhigh_lo >= mask;
3338
    }
3339
  else
3340
    {
3341
      *multiplier_ptr = GEN_INT (mhigh_lo);
3342
      return mhigh_hi;
3343
    }
3344
}
3345
 
3346
/* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3347
   congruent to 1 (mod 2**N).  */
3348
 
3349
static unsigned HOST_WIDE_INT
3350
invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3351
{
3352
  /* Solve x*y == 1 (mod 2^n), where x is odd.  Return y.  */
3353
 
3354
  /* The algorithm notes that the choice y = x satisfies
3355
     x*y == 1 mod 2^3, since x is assumed odd.
3356
     Each iteration doubles the number of bits of significance in y.  */
3357
 
3358
  unsigned HOST_WIDE_INT mask;
3359
  unsigned HOST_WIDE_INT y = x;
3360
  int nbit = 3;
3361
 
3362
  mask = (n == HOST_BITS_PER_WIDE_INT
3363
          ? ~(unsigned HOST_WIDE_INT) 0
3364
          : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3365
 
3366
  while (nbit < n)
3367
    {
3368
      y = y * (2 - x*y) & mask;         /* Modulo 2^N */
3369
      nbit *= 2;
3370
    }
3371
  return y;
3372
}
3373
 
3374
/* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3375
   flavor of OP0 and OP1.  ADJ_OPERAND is already the high half of the
3376
   product OP0 x OP1.  If UNSIGNEDP is nonzero, adjust the signed product
3377
   to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3378
   become signed.
3379
 
3380
   The result is put in TARGET if that is convenient.
3381
 
3382
   MODE is the mode of operation.  */
3383
 
3384
rtx
3385
expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3386
                             rtx op1, rtx target, int unsignedp)
3387
{
3388
  rtx tem;
3389
  enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3390
 
3391
  tem = expand_shift (RSHIFT_EXPR, mode, op0,
3392
                      build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3393
                      NULL_RTX, 0);
3394
  tem = expand_and (mode, tem, op1, NULL_RTX);
3395
  adj_operand
3396
    = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3397
                     adj_operand);
3398
 
3399
  tem = expand_shift (RSHIFT_EXPR, mode, op1,
3400
                      build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
3401
                      NULL_RTX, 0);
3402
  tem = expand_and (mode, tem, op0, NULL_RTX);
3403
  target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3404
                          target);
3405
 
3406
  return target;
3407
}
3408
 
3409
/* Subroutine of expand_mult_highpart.  Return the MODE high part of OP.  */
3410
 
3411
static rtx
3412
extract_high_half (enum machine_mode mode, rtx op)
3413
{
3414
  enum machine_mode wider_mode;
3415
 
3416
  if (mode == word_mode)
3417
    return gen_highpart (mode, op);
3418
 
3419
  gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3420
 
3421
  wider_mode = GET_MODE_WIDER_MODE (mode);
3422
  op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3423
                     build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
3424
  return convert_modes (mode, wider_mode, op, 0);
3425
}
3426
 
3427
/* Like expand_mult_highpart, but only consider using a multiplication
3428
   optab.  OP1 is an rtx for the constant operand.  */
3429
 
3430
static rtx
3431
expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3432
                            rtx target, int unsignedp, int max_cost)
3433
{
3434
  rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3435
  enum machine_mode wider_mode;
3436
  optab moptab;
3437
  rtx tem;
3438
  int size;
3439
 
3440
  gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3441
 
3442
  wider_mode = GET_MODE_WIDER_MODE (mode);
3443
  size = GET_MODE_BITSIZE (mode);
3444
 
3445
  /* Firstly, try using a multiplication insn that only generates the needed
3446
     high part of the product, and in the sign flavor of unsignedp.  */
3447
  if (mul_highpart_cost[mode] < max_cost)
3448
    {
3449
      moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3450
      tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3451
                          unsignedp, OPTAB_DIRECT);
3452
      if (tem)
3453
        return tem;
3454
    }
3455
 
3456
  /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3457
     Need to adjust the result after the multiplication.  */
3458
  if (size - 1 < BITS_PER_WORD
3459
      && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1]
3460
          + 4 * add_cost[mode] < max_cost))
3461
    {
3462
      moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3463
      tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3464
                          unsignedp, OPTAB_DIRECT);
3465
      if (tem)
3466
        /* We used the wrong signedness.  Adjust the result.  */
3467
        return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3468
                                            tem, unsignedp);
3469
    }
3470
 
3471
  /* Try widening multiplication.  */
3472
  moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3473
  if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3474
      && mul_widen_cost[wider_mode] < max_cost)
3475
    {
3476
      tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3477
                          unsignedp, OPTAB_WIDEN);
3478
      if (tem)
3479
        return extract_high_half (mode, tem);
3480
    }
3481
 
3482
  /* Try widening the mode and perform a non-widening multiplication.  */
3483
  if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3484
      && size - 1 < BITS_PER_WORD
3485
      && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost)
3486
    {
3487
      rtx insns, wop0, wop1;
3488
 
3489
      /* We need to widen the operands, for example to ensure the
3490
         constant multiplier is correctly sign or zero extended.
3491
         Use a sequence to clean-up any instructions emitted by
3492
         the conversions if things don't work out.  */
3493
      start_sequence ();
3494
      wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3495
      wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3496
      tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3497
                          unsignedp, OPTAB_WIDEN);
3498
      insns = get_insns ();
3499
      end_sequence ();
3500
 
3501
      if (tem)
3502
        {
3503
          emit_insn (insns);
3504
          return extract_high_half (mode, tem);
3505
        }
3506
    }
3507
 
3508
  /* Try widening multiplication of opposite signedness, and adjust.  */
3509
  moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3510
  if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing
3511
      && size - 1 < BITS_PER_WORD
3512
      && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1]
3513
          + 4 * add_cost[mode] < max_cost))
3514
    {
3515
      tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3516
                          NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3517
      if (tem != 0)
3518
        {
3519
          tem = extract_high_half (mode, tem);
3520
          /* We used the wrong signedness.  Adjust the result.  */
3521
          return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3522
                                              target, unsignedp);
3523
        }
3524
    }
3525
 
3526
  return 0;
3527
}
3528
 
3529
/* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3530
   putting the high half of the result in TARGET if that is convenient,
3531
   and return where the result is.  If the operation can not be performed,
3532
 
3533
 
3534
   MODE is the mode of operation and result.
3535
 
3536
   UNSIGNEDP nonzero means unsigned multiply.
3537
 
3538
   MAX_COST is the total allowed cost for the expanded RTL.  */
3539
 
3540
static rtx
3541
expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3542
                      rtx target, int unsignedp, int max_cost)
3543
{
3544
  enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3545
  unsigned HOST_WIDE_INT cnst1;
3546
  int extra_cost;
3547
  bool sign_adjust = false;
3548
  enum mult_variant variant;
3549
  struct algorithm alg;
3550
  rtx tem;
3551
 
3552
  gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3553
  /* We can't support modes wider than HOST_BITS_PER_INT.  */
3554
  gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3555
 
3556
  cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3557
 
3558
  /* We can't optimize modes wider than BITS_PER_WORD.
3559
     ??? We might be able to perform double-word arithmetic if
3560
     mode == word_mode, however all the cost calculations in
3561
     synth_mult etc. assume single-word operations.  */
3562
  if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3563
    return expand_mult_highpart_optab (mode, op0, op1, target,
3564
                                       unsignedp, max_cost);
3565
 
3566
  extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1];
3567
 
3568
  /* Check whether we try to multiply by a negative constant.  */
3569
  if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3570
    {
3571
      sign_adjust = true;
3572
      extra_cost += add_cost[mode];
3573
    }
3574
 
3575
  /* See whether shift/add multiplication is cheap enough.  */
3576
  if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3577
                           max_cost - extra_cost))
3578
    {
3579
      /* See whether the specialized multiplication optabs are
3580
         cheaper than the shift/add version.  */
3581
      tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3582
                                        alg.cost.cost + extra_cost);
3583
      if (tem)
3584
        return tem;
3585
 
3586
      tem = convert_to_mode (wider_mode, op0, unsignedp);
3587
      tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3588
      tem = extract_high_half (mode, tem);
3589
 
3590
      /* Adjust result for signedness.  */
3591
      if (sign_adjust)
3592
        tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3593
 
3594
      return tem;
3595
    }
3596
  return expand_mult_highpart_optab (mode, op0, op1, target,
3597
                                     unsignedp, max_cost);
3598
}
3599
 
3600
 
3601
/* Expand signed modulus of OP0 by a power of two D in mode MODE.  */
3602
 
3603
static rtx
3604
expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3605
{
3606
  unsigned HOST_WIDE_INT masklow, maskhigh;
3607
  rtx result, temp, shift, label;
3608
  int logd;
3609
 
3610
  logd = floor_log2 (d);
3611
  result = gen_reg_rtx (mode);
3612
 
3613
  /* Avoid conditional branches when they're expensive.  */
3614
  if (BRANCH_COST >= 2
3615
      && !optimize_size)
3616
    {
3617
      rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3618
                                      mode, 0, -1);
3619
      if (signmask)
3620
        {
3621
          signmask = force_reg (mode, signmask);
3622
          masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3623
          shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3624
 
3625
          /* Use the rtx_cost of a LSHIFTRT instruction to determine
3626
             which instruction sequence to use.  If logical right shifts
3627
             are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3628
             use a LSHIFTRT, 1 ADD, 1 SUB and an AND.  */
3629
 
3630
          temp = gen_rtx_LSHIFTRT (mode, result, shift);
3631
          if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing
3632
              || rtx_cost (temp, SET) > COSTS_N_INSNS (2))
3633
            {
3634
              temp = expand_binop (mode, xor_optab, op0, signmask,
3635
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3636
              temp = expand_binop (mode, sub_optab, temp, signmask,
3637
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3638
              temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3639
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3640
              temp = expand_binop (mode, xor_optab, temp, signmask,
3641
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3642
              temp = expand_binop (mode, sub_optab, temp, signmask,
3643
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3644
            }
3645
          else
3646
            {
3647
              signmask = expand_binop (mode, lshr_optab, signmask, shift,
3648
                                       NULL_RTX, 1, OPTAB_LIB_WIDEN);
3649
              signmask = force_reg (mode, signmask);
3650
 
3651
              temp = expand_binop (mode, add_optab, op0, signmask,
3652
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3653
              temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3654
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3655
              temp = expand_binop (mode, sub_optab, temp, signmask,
3656
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3657
            }
3658
          return temp;
3659
        }
3660
    }
3661
 
3662
  /* Mask contains the mode's signbit and the significant bits of the
3663
     modulus.  By including the signbit in the operation, many targets
3664
     can avoid an explicit compare operation in the following comparison
3665
     against zero.  */
3666
 
3667
  masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3668
  if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3669
    {
3670
      masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3671
      maskhigh = -1;
3672
    }
3673
  else
3674
    maskhigh = (HOST_WIDE_INT) -1
3675
                 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3676
 
3677
  temp = expand_binop (mode, and_optab, op0,
3678
                       immed_double_const (masklow, maskhigh, mode),
3679
                       result, 1, OPTAB_LIB_WIDEN);
3680
  if (temp != result)
3681
    emit_move_insn (result, temp);
3682
 
3683
  label = gen_label_rtx ();
3684
  do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3685
 
3686
  temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3687
                       0, OPTAB_LIB_WIDEN);
3688
  masklow = (HOST_WIDE_INT) -1 << logd;
3689
  maskhigh = -1;
3690
  temp = expand_binop (mode, ior_optab, temp,
3691
                       immed_double_const (masklow, maskhigh, mode),
3692
                       result, 1, OPTAB_LIB_WIDEN);
3693
  temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3694
                       0, OPTAB_LIB_WIDEN);
3695
  if (temp != result)
3696
    emit_move_insn (result, temp);
3697
  emit_label (label);
3698
  return result;
3699
}
3700
 
3701
/* Expand signed division of OP0 by a power of two D in mode MODE.
3702
   This routine is only called for positive values of D.  */
3703
 
3704
static rtx
3705
expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3706
{
3707
  rtx temp, label;
3708
  tree shift;
3709
  int logd;
3710
 
3711
  logd = floor_log2 (d);
3712
  shift = build_int_cst (NULL_TREE, logd);
3713
 
3714
  if (d == 2 && BRANCH_COST >= 1)
3715
    {
3716
      temp = gen_reg_rtx (mode);
3717
      temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3718
      temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3719
                           0, OPTAB_LIB_WIDEN);
3720
      return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3721
    }
3722
 
3723
#ifdef HAVE_conditional_move
3724
  if (BRANCH_COST >= 2)
3725
    {
3726
      rtx temp2;
3727
 
3728
      /* ??? emit_conditional_move forces a stack adjustment via
3729
         compare_from_rtx so, if the sequence is discarded, it will
3730
         be lost.  Do it now instead.  */
3731
      do_pending_stack_adjust ();
3732
 
3733
      start_sequence ();
3734
      temp2 = copy_to_mode_reg (mode, op0);
3735
      temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3736
                           NULL_RTX, 0, OPTAB_LIB_WIDEN);
3737
      temp = force_reg (mode, temp);
3738
 
3739
      /* Construct "temp2 = (temp2 < 0) ? temp : temp2".  */
3740
      temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3741
                                     mode, temp, temp2, mode, 0);
3742
      if (temp2)
3743
        {
3744
          rtx seq = get_insns ();
3745
          end_sequence ();
3746
          emit_insn (seq);
3747
          return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
3748
        }
3749
      end_sequence ();
3750
    }
3751
#endif
3752
 
3753
  if (BRANCH_COST >= 2)
3754
    {
3755
      int ushift = GET_MODE_BITSIZE (mode) - logd;
3756
 
3757
      temp = gen_reg_rtx (mode);
3758
      temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3759
      if (shift_cost[mode][ushift] > COSTS_N_INSNS (1))
3760
        temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3761
                             NULL_RTX, 0, OPTAB_LIB_WIDEN);
3762
      else
3763
        temp = expand_shift (RSHIFT_EXPR, mode, temp,
3764
                             build_int_cst (NULL_TREE, ushift),
3765
                             NULL_RTX, 1);
3766
      temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3767
                           0, OPTAB_LIB_WIDEN);
3768
      return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3769
    }
3770
 
3771
  label = gen_label_rtx ();
3772
  temp = copy_to_mode_reg (mode, op0);
3773
  do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3774
  expand_inc (temp, GEN_INT (d - 1));
3775
  emit_label (label);
3776
  return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
3777
}
3778
 
3779
/* Emit the code to divide OP0 by OP1, putting the result in TARGET
3780
   if that is convenient, and returning where the result is.
3781
   You may request either the quotient or the remainder as the result;
3782
   specify REM_FLAG nonzero to get the remainder.
3783
 
3784
   CODE is the expression code for which kind of division this is;
3785
   it controls how rounding is done.  MODE is the machine mode to use.
3786
   UNSIGNEDP nonzero means do unsigned division.  */
3787
 
3788
/* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3789
   and then correct it by or'ing in missing high bits
3790
   if result of ANDI is nonzero.
3791
   For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3792
   This could optimize to a bfexts instruction.
3793
   But C doesn't use these operations, so their optimizations are
3794
   left for later.  */
3795
/* ??? For modulo, we don't actually need the highpart of the first product,
3796
   the low part will do nicely.  And for small divisors, the second multiply
3797
   can also be a low-part only multiply or even be completely left out.
3798
   E.g. to calculate the remainder of a division by 3 with a 32 bit
3799
   multiply, multiply with 0x55555556 and extract the upper two bits;
3800
   the result is exact for inputs up to 0x1fffffff.
3801
   The input range can be reduced by using cross-sum rules.
3802
   For odd divisors >= 3, the following table gives right shift counts
3803
   so that if a number is shifted by an integer multiple of the given
3804
   amount, the remainder stays the same:
3805
   2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3806
   14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3807
   0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3808
   20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3809
   0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3810
 
3811
   Cross-sum rules for even numbers can be derived by leaving as many bits
3812
   to the right alone as the divisor has zeros to the right.
3813
   E.g. if x is an unsigned 32 bit number:
3814
   (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3815
   */
3816
 
3817
rtx
3818
expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3819
               rtx op0, rtx op1, rtx target, int unsignedp)
3820
{
3821
  enum machine_mode compute_mode;
3822
  rtx tquotient;
3823
  rtx quotient = 0, remainder = 0;
3824
  rtx last;
3825
  int size;
3826
  rtx insn, set;
3827
  optab optab1, optab2;
3828
  int op1_is_constant, op1_is_pow2 = 0;
3829
  int max_cost, extra_cost;
3830
  static HOST_WIDE_INT last_div_const = 0;
3831
  static HOST_WIDE_INT ext_op1;
3832
 
3833
  op1_is_constant = GET_CODE (op1) == CONST_INT;
3834
  if (op1_is_constant)
3835
    {
3836
      ext_op1 = INTVAL (op1);
3837
      if (unsignedp)
3838
        ext_op1 &= GET_MODE_MASK (mode);
3839
      op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3840
                     || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3841
    }
3842
 
3843
  /*
3844
     This is the structure of expand_divmod:
3845
 
3846
     First comes code to fix up the operands so we can perform the operations
3847
     correctly and efficiently.
3848
 
3849
     Second comes a switch statement with code specific for each rounding mode.
3850
     For some special operands this code emits all RTL for the desired
3851
     operation, for other cases, it generates only a quotient and stores it in
3852
     QUOTIENT.  The case for trunc division/remainder might leave quotient = 0,
3853
     to indicate that it has not done anything.
3854
 
3855
     Last comes code that finishes the operation.  If QUOTIENT is set and
3856
     REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1.  If
3857
     QUOTIENT is not set, it is computed using trunc rounding.
3858
 
3859
     We try to generate special code for division and remainder when OP1 is a
3860
     constant.  If |OP1| = 2**n we can use shifts and some other fast
3861
     operations.  For other values of OP1, we compute a carefully selected
3862
     fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3863
     by m.
3864
 
3865
     In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3866
     half of the product.  Different strategies for generating the product are
3867
     implemented in expand_mult_highpart.
3868
 
3869
     If what we actually want is the remainder, we generate that by another
3870
     by-constant multiplication and a subtraction.  */
3871
 
3872
  /* We shouldn't be called with OP1 == const1_rtx, but some of the
3873
     code below will malfunction if we are, so check here and handle
3874
     the special case if so.  */
3875
  if (op1 == const1_rtx)
3876
    return rem_flag ? const0_rtx : op0;
3877
 
3878
    /* When dividing by -1, we could get an overflow.
3879
     negv_optab can handle overflows.  */
3880
  if (! unsignedp && op1 == constm1_rtx)
3881
    {
3882
      if (rem_flag)
3883
        return const0_rtx;
3884
      return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3885
                          ? negv_optab : neg_optab, op0, target, 0);
3886
    }
3887
 
3888
  if (target
3889
      /* Don't use the function value register as a target
3890
         since we have to read it as well as write it,
3891
         and function-inlining gets confused by this.  */
3892
      && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3893
          /* Don't clobber an operand while doing a multi-step calculation.  */
3894
          || ((rem_flag || op1_is_constant)
3895
              && (reg_mentioned_p (target, op0)
3896
                  || (MEM_P (op0) && MEM_P (target))))
3897
          || reg_mentioned_p (target, op1)
3898
          || (MEM_P (op1) && MEM_P (target))))
3899
    target = 0;
3900
 
3901
  /* Get the mode in which to perform this computation.  Normally it will
3902
     be MODE, but sometimes we can't do the desired operation in MODE.
3903
     If so, pick a wider mode in which we can do the operation.  Convert
3904
     to that mode at the start to avoid repeated conversions.
3905
 
3906
     First see what operations we need.  These depend on the expression
3907
     we are evaluating.  (We assume that divxx3 insns exist under the
3908
     same conditions that modxx3 insns and that these insns don't normally
3909
     fail.  If these assumptions are not correct, we may generate less
3910
     efficient code in some cases.)
3911
 
3912
     Then see if we find a mode in which we can open-code that operation
3913
     (either a division, modulus, or shift).  Finally, check for the smallest
3914
     mode for which we can do the operation with a library call.  */
3915
 
3916
  /* We might want to refine this now that we have division-by-constant
3917
     optimization.  Since expand_mult_highpart tries so many variants, it is
3918
     not straightforward to generalize this.  Maybe we should make an array
3919
     of possible modes in init_expmed?  Save this for GCC 2.7.  */
3920
 
3921
  optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3922
            ? (unsignedp ? lshr_optab : ashr_optab)
3923
            : (unsignedp ? udiv_optab : sdiv_optab));
3924
  optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3925
            ? optab1
3926
            : (unsignedp ? udivmod_optab : sdivmod_optab));
3927
 
3928
  for (compute_mode = mode; compute_mode != VOIDmode;
3929
       compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3930
    if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing
3931
        || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing)
3932
      break;
3933
 
3934
  if (compute_mode == VOIDmode)
3935
    for (compute_mode = mode; compute_mode != VOIDmode;
3936
         compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3937
      if (optab1->handlers[compute_mode].libfunc
3938
          || optab2->handlers[compute_mode].libfunc)
3939
        break;
3940
 
3941
  /* If we still couldn't find a mode, use MODE, but expand_binop will
3942
     probably die.  */
3943
  if (compute_mode == VOIDmode)
3944
    compute_mode = mode;
3945
 
3946
  if (target && GET_MODE (target) == compute_mode)
3947
    tquotient = target;
3948
  else
3949
    tquotient = gen_reg_rtx (compute_mode);
3950
 
3951
  size = GET_MODE_BITSIZE (compute_mode);
3952
#if 0
3953
  /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3954
     (mode), and thereby get better code when OP1 is a constant.  Do that
3955
     later.  It will require going over all usages of SIZE below.  */
3956
  size = GET_MODE_BITSIZE (mode);
3957
#endif
3958
 
3959
  /* Only deduct something for a REM if the last divide done was
3960
     for a different constant.   Then set the constant of the last
3961
     divide.  */
3962
  max_cost = unsignedp ? udiv_cost[compute_mode] : sdiv_cost[compute_mode];
3963
  if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3964
                     && INTVAL (op1) == last_div_const))
3965
    max_cost -= mul_cost[compute_mode] + add_cost[compute_mode];
3966
 
3967
  last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3968
 
3969
  /* Now convert to the best mode to use.  */
3970
  if (compute_mode != mode)
3971
    {
3972
      op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3973
      op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3974
 
3975
      /* convert_modes may have placed op1 into a register, so we
3976
         must recompute the following.  */
3977
      op1_is_constant = GET_CODE (op1) == CONST_INT;
3978
      op1_is_pow2 = (op1_is_constant
3979
                     && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3980
                          || (! unsignedp
3981
                              && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3982
    }
3983
 
3984
  /* If one of the operands is a volatile MEM, copy it into a register.  */
3985
 
3986
  if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3987
    op0 = force_reg (compute_mode, op0);
3988
  if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3989
    op1 = force_reg (compute_mode, op1);
3990
 
3991
  /* If we need the remainder or if OP1 is constant, we need to
3992
     put OP0 in a register in case it has any queued subexpressions.  */
3993
  if (rem_flag || op1_is_constant)
3994
    op0 = force_reg (compute_mode, op0);
3995
 
3996
  last = get_last_insn ();
3997
 
3998
  /* Promote floor rounding to trunc rounding for unsigned operations.  */
3999
  if (unsignedp)
4000
    {
4001
      if (code == FLOOR_DIV_EXPR)
4002
        code = TRUNC_DIV_EXPR;
4003
      if (code == FLOOR_MOD_EXPR)
4004
        code = TRUNC_MOD_EXPR;
4005
      if (code == EXACT_DIV_EXPR && op1_is_pow2)
4006
        code = TRUNC_DIV_EXPR;
4007
    }
4008
 
4009
  if (op1 != const0_rtx)
4010
    switch (code)
4011
      {
4012
      case TRUNC_MOD_EXPR:
4013
      case TRUNC_DIV_EXPR:
4014
        if (op1_is_constant)
4015
          {
4016
            if (unsignedp)
4017
              {
4018
                unsigned HOST_WIDE_INT mh;
4019
                int pre_shift, post_shift;
4020
                int dummy;
4021
                rtx ml;
4022
                unsigned HOST_WIDE_INT d = (INTVAL (op1)
4023
                                            & GET_MODE_MASK (compute_mode));
4024
 
4025
                if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4026
                  {
4027
                    pre_shift = floor_log2 (d);
4028
                    if (rem_flag)
4029
                      {
4030
                        remainder
4031
                          = expand_binop (compute_mode, and_optab, op0,
4032
                                          GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4033
                                          remainder, 1,
4034
                                          OPTAB_LIB_WIDEN);
4035
                        if (remainder)
4036
                          return gen_lowpart (mode, remainder);
4037
                      }
4038
                    quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4039
                                             build_int_cst (NULL_TREE,
4040
                                                            pre_shift),
4041
                                             tquotient, 1);
4042
                  }
4043
                else if (size <= HOST_BITS_PER_WIDE_INT)
4044
                  {
4045
                    if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4046
                      {
4047
                        /* Most significant bit of divisor is set; emit an scc
4048
                           insn.  */
4049
                        quotient = emit_store_flag (tquotient, GEU, op0, op1,
4050
                                                    compute_mode, 1, 1);
4051
                        if (quotient == 0)
4052
                          goto fail1;
4053
                      }
4054
                    else
4055
                      {
4056
                        /* Find a suitable multiplier and right shift count
4057
                           instead of multiplying with D.  */
4058
 
4059
                        mh = choose_multiplier (d, size, size,
4060
                                                &ml, &post_shift, &dummy);
4061
 
4062
                        /* If the suggested multiplier is more than SIZE bits,
4063
                           we can do better for even divisors, using an
4064
                           initial right shift.  */
4065
                        if (mh != 0 && (d & 1) == 0)
4066
                          {
4067
                            pre_shift = floor_log2 (d & -d);
4068
                            mh = choose_multiplier (d >> pre_shift, size,
4069
                                                    size - pre_shift,
4070
                                                    &ml, &post_shift, &dummy);
4071
                            gcc_assert (!mh);
4072
                          }
4073
                        else
4074
                          pre_shift = 0;
4075
 
4076
                        if (mh != 0)
4077
                          {
4078
                            rtx t1, t2, t3, t4;
4079
 
4080
                            if (post_shift - 1 >= BITS_PER_WORD)
4081
                              goto fail1;
4082
 
4083
                            extra_cost
4084
                              = (shift_cost[compute_mode][post_shift - 1]
4085
                                 + shift_cost[compute_mode][1]
4086
                                 + 2 * add_cost[compute_mode]);
4087
                            t1 = expand_mult_highpart (compute_mode, op0, ml,
4088
                                                       NULL_RTX, 1,
4089
                                                       max_cost - extra_cost);
4090
                            if (t1 == 0)
4091
                              goto fail1;
4092
                            t2 = force_operand (gen_rtx_MINUS (compute_mode,
4093
                                                               op0, t1),
4094
                                                NULL_RTX);
4095
                            t3 = expand_shift
4096
                              (RSHIFT_EXPR, compute_mode, t2,
4097
                               build_int_cst (NULL_TREE, 1),
4098
                               NULL_RTX,1);
4099
                            t4 = force_operand (gen_rtx_PLUS (compute_mode,
4100
                                                              t1, t3),
4101
                                                NULL_RTX);
4102
                            quotient = expand_shift
4103
                              (RSHIFT_EXPR, compute_mode, t4,
4104
                               build_int_cst (NULL_TREE, post_shift - 1),
4105
                               tquotient, 1);
4106
                          }
4107
                        else
4108
                          {
4109
                            rtx t1, t2;
4110
 
4111
                            if (pre_shift >= BITS_PER_WORD
4112
                                || post_shift >= BITS_PER_WORD)
4113
                              goto fail1;
4114
 
4115
                            t1 = expand_shift
4116
                              (RSHIFT_EXPR, compute_mode, op0,
4117
                               build_int_cst (NULL_TREE, pre_shift),
4118
                               NULL_RTX, 1);
4119
                            extra_cost
4120
                              = (shift_cost[compute_mode][pre_shift]
4121
                                 + shift_cost[compute_mode][post_shift]);
4122
                            t2 = expand_mult_highpart (compute_mode, t1, ml,
4123
                                                       NULL_RTX, 1,
4124
                                                       max_cost - extra_cost);
4125
                            if (t2 == 0)
4126
                              goto fail1;
4127
                            quotient = expand_shift
4128
                              (RSHIFT_EXPR, compute_mode, t2,
4129
                               build_int_cst (NULL_TREE, post_shift),
4130
                               tquotient, 1);
4131
                          }
4132
                      }
4133
                  }
4134
                else            /* Too wide mode to use tricky code */
4135
                  break;
4136
 
4137
                insn = get_last_insn ();
4138
                if (insn != last
4139
                    && (set = single_set (insn)) != 0
4140
                    && SET_DEST (set) == quotient)
4141
                  set_unique_reg_note (insn,
4142
                                       REG_EQUAL,
4143
                                       gen_rtx_UDIV (compute_mode, op0, op1));
4144
              }
4145
            else                /* TRUNC_DIV, signed */
4146
              {
4147
                unsigned HOST_WIDE_INT ml;
4148
                int lgup, post_shift;
4149
                rtx mlr;
4150
                HOST_WIDE_INT d = INTVAL (op1);
4151
                unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
4152
 
4153
                /* n rem d = n rem -d */
4154
                if (rem_flag && d < 0)
4155
                  {
4156
                    d = abs_d;
4157
                    op1 = gen_int_mode (abs_d, compute_mode);
4158
                  }
4159
 
4160
                if (d == 1)
4161
                  quotient = op0;
4162
                else if (d == -1)
4163
                  quotient = expand_unop (compute_mode, neg_optab, op0,
4164
                                          tquotient, 0);
4165
                else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4166
                  {
4167
                    /* This case is not handled correctly below.  */
4168
                    quotient = emit_store_flag (tquotient, EQ, op0, op1,
4169
                                                compute_mode, 1, 1);
4170
                    if (quotient == 0)
4171
                      goto fail1;
4172
                  }
4173
                else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4174
                         && (rem_flag ? smod_pow2_cheap[compute_mode]
4175
                                      : sdiv_pow2_cheap[compute_mode])
4176
                         /* We assume that cheap metric is true if the
4177
                            optab has an expander for this mode.  */
4178
                         && (((rem_flag ? smod_optab : sdiv_optab)
4179
                              ->handlers[compute_mode].insn_code
4180
                              != CODE_FOR_nothing)
4181
                             || (sdivmod_optab->handlers[compute_mode]
4182
                                 .insn_code != CODE_FOR_nothing)))
4183
                  ;
4184
                else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4185
                  {
4186
                    if (rem_flag)
4187
                      {
4188
                        remainder = expand_smod_pow2 (compute_mode, op0, d);
4189
                        if (remainder)
4190
                          return gen_lowpart (mode, remainder);
4191
                      }
4192
 
4193
                    if (sdiv_pow2_cheap[compute_mode]
4194
                        && ((sdiv_optab->handlers[compute_mode].insn_code
4195
                             != CODE_FOR_nothing)
4196
                            || (sdivmod_optab->handlers[compute_mode].insn_code
4197
                                != CODE_FOR_nothing)))
4198
                      quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4199
                                                compute_mode, op0,
4200
                                                gen_int_mode (abs_d,
4201
                                                              compute_mode),
4202
                                                NULL_RTX, 0);
4203
                    else
4204
                      quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4205
 
4206
                    /* We have computed OP0 / abs(OP1).  If OP1 is negative,
4207
                       negate the quotient.  */
4208
                    if (d < 0)
4209
                      {
4210
                        insn = get_last_insn ();
4211
                        if (insn != last
4212
                            && (set = single_set (insn)) != 0
4213
                            && SET_DEST (set) == quotient
4214
                            && abs_d < ((unsigned HOST_WIDE_INT) 1
4215
                                        << (HOST_BITS_PER_WIDE_INT - 1)))
4216
                          set_unique_reg_note (insn,
4217
                                               REG_EQUAL,
4218
                                               gen_rtx_DIV (compute_mode,
4219
                                                            op0,
4220
                                                            GEN_INT
4221
                                                            (trunc_int_for_mode
4222
                                                             (abs_d,
4223
                                                              compute_mode))));
4224
 
4225
                        quotient = expand_unop (compute_mode, neg_optab,
4226
                                                quotient, quotient, 0);
4227
                      }
4228
                  }
4229
                else if (size <= HOST_BITS_PER_WIDE_INT)
4230
                  {
4231
                    choose_multiplier (abs_d, size, size - 1,
4232
                                       &mlr, &post_shift, &lgup);
4233
                    ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4234
                    if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4235
                      {
4236
                        rtx t1, t2, t3;
4237
 
4238
                        if (post_shift >= BITS_PER_WORD
4239
                            || size - 1 >= BITS_PER_WORD)
4240
                          goto fail1;
4241
 
4242
                        extra_cost = (shift_cost[compute_mode][post_shift]
4243
                                      + shift_cost[compute_mode][size - 1]
4244
                                      + add_cost[compute_mode]);
4245
                        t1 = expand_mult_highpart (compute_mode, op0, mlr,
4246
                                                   NULL_RTX, 0,
4247
                                                   max_cost - extra_cost);
4248
                        if (t1 == 0)
4249
                          goto fail1;
4250
                        t2 = expand_shift
4251
                          (RSHIFT_EXPR, compute_mode, t1,
4252
                           build_int_cst (NULL_TREE, post_shift),
4253
                           NULL_RTX, 0);
4254
                        t3 = expand_shift
4255
                          (RSHIFT_EXPR, compute_mode, op0,
4256
                           build_int_cst (NULL_TREE, size - 1),
4257
                           NULL_RTX, 0);
4258
                        if (d < 0)
4259
                          quotient
4260
                            = force_operand (gen_rtx_MINUS (compute_mode,
4261
                                                            t3, t2),
4262
                                             tquotient);
4263
                        else
4264
                          quotient
4265
                            = force_operand (gen_rtx_MINUS (compute_mode,
4266
                                                            t2, t3),
4267
                                             tquotient);
4268
                      }
4269
                    else
4270
                      {
4271
                        rtx t1, t2, t3, t4;
4272
 
4273
                        if (post_shift >= BITS_PER_WORD
4274
                            || size - 1 >= BITS_PER_WORD)
4275
                          goto fail1;
4276
 
4277
                        ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4278
                        mlr = gen_int_mode (ml, compute_mode);
4279
                        extra_cost = (shift_cost[compute_mode][post_shift]
4280
                                      + shift_cost[compute_mode][size - 1]
4281
                                      + 2 * add_cost[compute_mode]);
4282
                        t1 = expand_mult_highpart (compute_mode, op0, mlr,
4283
                                                   NULL_RTX, 0,
4284
                                                   max_cost - extra_cost);
4285
                        if (t1 == 0)
4286
                          goto fail1;
4287
                        t2 = force_operand (gen_rtx_PLUS (compute_mode,
4288
                                                          t1, op0),
4289
                                            NULL_RTX);
4290
                        t3 = expand_shift
4291
                          (RSHIFT_EXPR, compute_mode, t2,
4292
                           build_int_cst (NULL_TREE, post_shift),
4293
                           NULL_RTX, 0);
4294
                        t4 = expand_shift
4295
                          (RSHIFT_EXPR, compute_mode, op0,
4296
                           build_int_cst (NULL_TREE, size - 1),
4297
                           NULL_RTX, 0);
4298
                        if (d < 0)
4299
                          quotient
4300
                            = force_operand (gen_rtx_MINUS (compute_mode,
4301
                                                            t4, t3),
4302
                                             tquotient);
4303
                        else
4304
                          quotient
4305
                            = force_operand (gen_rtx_MINUS (compute_mode,
4306
                                                            t3, t4),
4307
                                             tquotient);
4308
                      }
4309
                  }
4310
                else            /* Too wide mode to use tricky code */
4311
                  break;
4312
 
4313
                insn = get_last_insn ();
4314
                if (insn != last
4315
                    && (set = single_set (insn)) != 0
4316
                    && SET_DEST (set) == quotient)
4317
                  set_unique_reg_note (insn,
4318
                                       REG_EQUAL,
4319
                                       gen_rtx_DIV (compute_mode, op0, op1));
4320
              }
4321
            break;
4322
          }
4323
      fail1:
4324
        delete_insns_since (last);
4325
        break;
4326
 
4327
      case FLOOR_DIV_EXPR:
4328
      case FLOOR_MOD_EXPR:
4329
      /* We will come here only for signed operations.  */
4330
        if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4331
          {
4332
            unsigned HOST_WIDE_INT mh;
4333
            int pre_shift, lgup, post_shift;
4334
            HOST_WIDE_INT d = INTVAL (op1);
4335
            rtx ml;
4336
 
4337
            if (d > 0)
4338
              {
4339
                /* We could just as easily deal with negative constants here,
4340
                   but it does not seem worth the trouble for GCC 2.6.  */
4341
                if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4342
                  {
4343
                    pre_shift = floor_log2 (d);
4344
                    if (rem_flag)
4345
                      {
4346
                        remainder = expand_binop (compute_mode, and_optab, op0,
4347
                                                  GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4348
                                                  remainder, 0, OPTAB_LIB_WIDEN);
4349
                        if (remainder)
4350
                          return gen_lowpart (mode, remainder);
4351
                      }
4352
                    quotient = expand_shift
4353
                      (RSHIFT_EXPR, compute_mode, op0,
4354
                       build_int_cst (NULL_TREE, pre_shift),
4355
                       tquotient, 0);
4356
                  }
4357
                else
4358
                  {
4359
                    rtx t1, t2, t3, t4;
4360
 
4361
                    mh = choose_multiplier (d, size, size - 1,
4362
                                            &ml, &post_shift, &lgup);
4363
                    gcc_assert (!mh);
4364
 
4365
                    if (post_shift < BITS_PER_WORD
4366
                        && size - 1 < BITS_PER_WORD)
4367
                      {
4368
                        t1 = expand_shift
4369
                          (RSHIFT_EXPR, compute_mode, op0,
4370
                           build_int_cst (NULL_TREE, size - 1),
4371
                           NULL_RTX, 0);
4372
                        t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4373
                                           NULL_RTX, 0, OPTAB_WIDEN);
4374
                        extra_cost = (shift_cost[compute_mode][post_shift]
4375
                                      + shift_cost[compute_mode][size - 1]
4376
                                      + 2 * add_cost[compute_mode]);
4377
                        t3 = expand_mult_highpart (compute_mode, t2, ml,
4378
                                                   NULL_RTX, 1,
4379
                                                   max_cost - extra_cost);
4380
                        if (t3 != 0)
4381
                          {
4382
                            t4 = expand_shift
4383
                              (RSHIFT_EXPR, compute_mode, t3,
4384
                               build_int_cst (NULL_TREE, post_shift),
4385
                               NULL_RTX, 1);
4386
                            quotient = expand_binop (compute_mode, xor_optab,
4387
                                                     t4, t1, tquotient, 0,
4388
                                                     OPTAB_WIDEN);
4389
                          }
4390
                      }
4391
                  }
4392
              }
4393
            else
4394
              {
4395
                rtx nsign, t1, t2, t3, t4;
4396
                t1 = force_operand (gen_rtx_PLUS (compute_mode,
4397
                                                  op0, constm1_rtx), NULL_RTX);
4398
                t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4399
                                   0, OPTAB_WIDEN);
4400
                nsign = expand_shift
4401
                  (RSHIFT_EXPR, compute_mode, t2,
4402
                   build_int_cst (NULL_TREE, size - 1),
4403
                   NULL_RTX, 0);
4404
                t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4405
                                    NULL_RTX);
4406
                t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4407
                                    NULL_RTX, 0);
4408
                if (t4)
4409
                  {
4410
                    rtx t5;
4411
                    t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4412
                                      NULL_RTX, 0);
4413
                    quotient = force_operand (gen_rtx_PLUS (compute_mode,
4414
                                                            t4, t5),
4415
                                              tquotient);
4416
                  }
4417
              }
4418
          }
4419
 
4420
        if (quotient != 0)
4421
          break;
4422
        delete_insns_since (last);
4423
 
4424
        /* Try using an instruction that produces both the quotient and
4425
           remainder, using truncation.  We can easily compensate the quotient
4426
           or remainder to get floor rounding, once we have the remainder.
4427
           Notice that we compute also the final remainder value here,
4428
           and return the result right away.  */
4429
        if (target == 0 || GET_MODE (target) != compute_mode)
4430
          target = gen_reg_rtx (compute_mode);
4431
 
4432
        if (rem_flag)
4433
          {
4434
            remainder
4435
              = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4436
            quotient = gen_reg_rtx (compute_mode);
4437
          }
4438
        else
4439
          {
4440
            quotient
4441
              = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4442
            remainder = gen_reg_rtx (compute_mode);
4443
          }
4444
 
4445
        if (expand_twoval_binop (sdivmod_optab, op0, op1,
4446
                                 quotient, remainder, 0))
4447
          {
4448
            /* This could be computed with a branch-less sequence.
4449
               Save that for later.  */
4450
            rtx tem;
4451
            rtx label = gen_label_rtx ();
4452
            do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4453
            tem = expand_binop (compute_mode, xor_optab, op0, op1,
4454
                                NULL_RTX, 0, OPTAB_WIDEN);
4455
            do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4456
            expand_dec (quotient, const1_rtx);
4457
            expand_inc (remainder, op1);
4458
            emit_label (label);
4459
            return gen_lowpart (mode, rem_flag ? remainder : quotient);
4460
          }
4461
 
4462
        /* No luck with division elimination or divmod.  Have to do it
4463
           by conditionally adjusting op0 *and* the result.  */
4464
        {
4465
          rtx label1, label2, label3, label4, label5;
4466
          rtx adjusted_op0;
4467
          rtx tem;
4468
 
4469
          quotient = gen_reg_rtx (compute_mode);
4470
          adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4471
          label1 = gen_label_rtx ();
4472
          label2 = gen_label_rtx ();
4473
          label3 = gen_label_rtx ();
4474
          label4 = gen_label_rtx ();
4475
          label5 = gen_label_rtx ();
4476
          do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4477
          do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4478
          tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4479
                              quotient, 0, OPTAB_LIB_WIDEN);
4480
          if (tem != quotient)
4481
            emit_move_insn (quotient, tem);
4482
          emit_jump_insn (gen_jump (label5));
4483
          emit_barrier ();
4484
          emit_label (label1);
4485
          expand_inc (adjusted_op0, const1_rtx);
4486
          emit_jump_insn (gen_jump (label4));
4487
          emit_barrier ();
4488
          emit_label (label2);
4489
          do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4490
          tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4491
                              quotient, 0, OPTAB_LIB_WIDEN);
4492
          if (tem != quotient)
4493
            emit_move_insn (quotient, tem);
4494
          emit_jump_insn (gen_jump (label5));
4495
          emit_barrier ();
4496
          emit_label (label3);
4497
          expand_dec (adjusted_op0, const1_rtx);
4498
          emit_label (label4);
4499
          tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4500
                              quotient, 0, OPTAB_LIB_WIDEN);
4501
          if (tem != quotient)
4502
            emit_move_insn (quotient, tem);
4503
          expand_dec (quotient, const1_rtx);
4504
          emit_label (label5);
4505
        }
4506
        break;
4507
 
4508
      case CEIL_DIV_EXPR:
4509
      case CEIL_MOD_EXPR:
4510
        if (unsignedp)
4511
          {
4512
            if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4513
              {
4514
                rtx t1, t2, t3;
4515
                unsigned HOST_WIDE_INT d = INTVAL (op1);
4516
                t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4517
                                   build_int_cst (NULL_TREE, floor_log2 (d)),
4518
                                   tquotient, 1);
4519
                t2 = expand_binop (compute_mode, and_optab, op0,
4520
                                   GEN_INT (d - 1),
4521
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4522
                t3 = gen_reg_rtx (compute_mode);
4523
                t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4524
                                      compute_mode, 1, 1);
4525
                if (t3 == 0)
4526
                  {
4527
                    rtx lab;
4528
                    lab = gen_label_rtx ();
4529
                    do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4530
                    expand_inc (t1, const1_rtx);
4531
                    emit_label (lab);
4532
                    quotient = t1;
4533
                  }
4534
                else
4535
                  quotient = force_operand (gen_rtx_PLUS (compute_mode,
4536
                                                          t1, t3),
4537
                                            tquotient);
4538
                break;
4539
              }
4540
 
4541
            /* Try using an instruction that produces both the quotient and
4542
               remainder, using truncation.  We can easily compensate the
4543
               quotient or remainder to get ceiling rounding, once we have the
4544
               remainder.  Notice that we compute also the final remainder
4545
               value here, and return the result right away.  */
4546
            if (target == 0 || GET_MODE (target) != compute_mode)
4547
              target = gen_reg_rtx (compute_mode);
4548
 
4549
            if (rem_flag)
4550
              {
4551
                remainder = (REG_P (target)
4552
                             ? target : gen_reg_rtx (compute_mode));
4553
                quotient = gen_reg_rtx (compute_mode);
4554
              }
4555
            else
4556
              {
4557
                quotient = (REG_P (target)
4558
                            ? target : gen_reg_rtx (compute_mode));
4559
                remainder = gen_reg_rtx (compute_mode);
4560
              }
4561
 
4562
            if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4563
                                     remainder, 1))
4564
              {
4565
                /* This could be computed with a branch-less sequence.
4566
                   Save that for later.  */
4567
                rtx label = gen_label_rtx ();
4568
                do_cmp_and_jump (remainder, const0_rtx, EQ,
4569
                                 compute_mode, label);
4570
                expand_inc (quotient, const1_rtx);
4571
                expand_dec (remainder, op1);
4572
                emit_label (label);
4573
                return gen_lowpart (mode, rem_flag ? remainder : quotient);
4574
              }
4575
 
4576
            /* No luck with division elimination or divmod.  Have to do it
4577
               by conditionally adjusting op0 *and* the result.  */
4578
            {
4579
              rtx label1, label2;
4580
              rtx adjusted_op0, tem;
4581
 
4582
              quotient = gen_reg_rtx (compute_mode);
4583
              adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4584
              label1 = gen_label_rtx ();
4585
              label2 = gen_label_rtx ();
4586
              do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4587
                               compute_mode, label1);
4588
              emit_move_insn  (quotient, const0_rtx);
4589
              emit_jump_insn (gen_jump (label2));
4590
              emit_barrier ();
4591
              emit_label (label1);
4592
              expand_dec (adjusted_op0, const1_rtx);
4593
              tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4594
                                  quotient, 1, OPTAB_LIB_WIDEN);
4595
              if (tem != quotient)
4596
                emit_move_insn (quotient, tem);
4597
              expand_inc (quotient, const1_rtx);
4598
              emit_label (label2);
4599
            }
4600
          }
4601
        else /* signed */
4602
          {
4603
            if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4604
                && INTVAL (op1) >= 0)
4605
              {
4606
                /* This is extremely similar to the code for the unsigned case
4607
                   above.  For 2.7 we should merge these variants, but for
4608
                   2.6.1 I don't want to touch the code for unsigned since that
4609
                   get used in C.  The signed case will only be used by other
4610
                   languages (Ada).  */
4611
 
4612
                rtx t1, t2, t3;
4613
                unsigned HOST_WIDE_INT d = INTVAL (op1);
4614
                t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4615
                                   build_int_cst (NULL_TREE, floor_log2 (d)),
4616
                                   tquotient, 0);
4617
                t2 = expand_binop (compute_mode, and_optab, op0,
4618
                                   GEN_INT (d - 1),
4619
                                   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4620
                t3 = gen_reg_rtx (compute_mode);
4621
                t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4622
                                      compute_mode, 1, 1);
4623
                if (t3 == 0)
4624
                  {
4625
                    rtx lab;
4626
                    lab = gen_label_rtx ();
4627
                    do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4628
                    expand_inc (t1, const1_rtx);
4629
                    emit_label (lab);
4630
                    quotient = t1;
4631
                  }
4632
                else
4633
                  quotient = force_operand (gen_rtx_PLUS (compute_mode,
4634
                                                          t1, t3),
4635
                                            tquotient);
4636
                break;
4637
              }
4638
 
4639
            /* Try using an instruction that produces both the quotient and
4640
               remainder, using truncation.  We can easily compensate the
4641
               quotient or remainder to get ceiling rounding, once we have the
4642
               remainder.  Notice that we compute also the final remainder
4643
               value here, and return the result right away.  */
4644
            if (target == 0 || GET_MODE (target) != compute_mode)
4645
              target = gen_reg_rtx (compute_mode);
4646
            if (rem_flag)
4647
              {
4648
                remainder= (REG_P (target)
4649
                            ? target : gen_reg_rtx (compute_mode));
4650
                quotient = gen_reg_rtx (compute_mode);
4651
              }
4652
            else
4653
              {
4654
                quotient = (REG_P (target)
4655
                            ? target : gen_reg_rtx (compute_mode));
4656
                remainder = gen_reg_rtx (compute_mode);
4657
              }
4658
 
4659
            if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4660
                                     remainder, 0))
4661
              {
4662
                /* This could be computed with a branch-less sequence.
4663
                   Save that for later.  */
4664
                rtx tem;
4665
                rtx label = gen_label_rtx ();
4666
                do_cmp_and_jump (remainder, const0_rtx, EQ,
4667
                                 compute_mode, label);
4668
                tem = expand_binop (compute_mode, xor_optab, op0, op1,
4669
                                    NULL_RTX, 0, OPTAB_WIDEN);
4670
                do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4671
                expand_inc (quotient, const1_rtx);
4672
                expand_dec (remainder, op1);
4673
                emit_label (label);
4674
                return gen_lowpart (mode, rem_flag ? remainder : quotient);
4675
              }
4676
 
4677
            /* No luck with division elimination or divmod.  Have to do it
4678
               by conditionally adjusting op0 *and* the result.  */
4679
            {
4680
              rtx label1, label2, label3, label4, label5;
4681
              rtx adjusted_op0;
4682
              rtx tem;
4683
 
4684
              quotient = gen_reg_rtx (compute_mode);
4685
              adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4686
              label1 = gen_label_rtx ();
4687
              label2 = gen_label_rtx ();
4688
              label3 = gen_label_rtx ();
4689
              label4 = gen_label_rtx ();
4690
              label5 = gen_label_rtx ();
4691
              do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4692
              do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4693
                               compute_mode, label1);
4694
              tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4695
                                  quotient, 0, OPTAB_LIB_WIDEN);
4696
              if (tem != quotient)
4697
                emit_move_insn (quotient, tem);
4698
              emit_jump_insn (gen_jump (label5));
4699
              emit_barrier ();
4700
              emit_label (label1);
4701
              expand_dec (adjusted_op0, const1_rtx);
4702
              emit_jump_insn (gen_jump (label4));
4703
              emit_barrier ();
4704
              emit_label (label2);
4705
              do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4706
                               compute_mode, label3);
4707
              tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4708
                                  quotient, 0, OPTAB_LIB_WIDEN);
4709
              if (tem != quotient)
4710
                emit_move_insn (quotient, tem);
4711
              emit_jump_insn (gen_jump (label5));
4712
              emit_barrier ();
4713
              emit_label (label3);
4714
              expand_inc (adjusted_op0, const1_rtx);
4715
              emit_label (label4);
4716
              tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4717
                                  quotient, 0, OPTAB_LIB_WIDEN);
4718
              if (tem != quotient)
4719
                emit_move_insn (quotient, tem);
4720
              expand_inc (quotient, const1_rtx);
4721
              emit_label (label5);
4722
            }
4723
          }
4724
        break;
4725
 
4726
      case EXACT_DIV_EXPR:
4727
        if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4728
          {
4729
            HOST_WIDE_INT d = INTVAL (op1);
4730
            unsigned HOST_WIDE_INT ml;
4731
            int pre_shift;
4732
            rtx t1;
4733
 
4734
            pre_shift = floor_log2 (d & -d);
4735
            ml = invert_mod2n (d >> pre_shift, size);
4736
            t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4737
                               build_int_cst (NULL_TREE, pre_shift),
4738
                               NULL_RTX, unsignedp);
4739
            quotient = expand_mult (compute_mode, t1,
4740
                                    gen_int_mode (ml, compute_mode),
4741
                                    NULL_RTX, 1);
4742
 
4743
            insn = get_last_insn ();
4744
            set_unique_reg_note (insn,
4745
                                 REG_EQUAL,
4746
                                 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4747
                                                 compute_mode,
4748
                                                 op0, op1));
4749
          }
4750
        break;
4751
 
4752
      case ROUND_DIV_EXPR:
4753
      case ROUND_MOD_EXPR:
4754
        if (unsignedp)
4755
          {
4756
            rtx tem;
4757
            rtx label;
4758
            label = gen_label_rtx ();
4759
            quotient = gen_reg_rtx (compute_mode);
4760
            remainder = gen_reg_rtx (compute_mode);
4761
            if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4762
              {
4763
                rtx tem;
4764
                quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4765
                                         quotient, 1, OPTAB_LIB_WIDEN);
4766
                tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4767
                remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4768
                                          remainder, 1, OPTAB_LIB_WIDEN);
4769
              }
4770
            tem = plus_constant (op1, -1);
4771
            tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4772
                                build_int_cst (NULL_TREE, 1),
4773
                                NULL_RTX, 1);
4774
            do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4775
            expand_inc (quotient, const1_rtx);
4776
            expand_dec (remainder, op1);
4777
            emit_label (label);
4778
          }
4779
        else
4780
          {
4781
            rtx abs_rem, abs_op1, tem, mask;
4782
            rtx label;
4783
            label = gen_label_rtx ();
4784
            quotient = gen_reg_rtx (compute_mode);
4785
            remainder = gen_reg_rtx (compute_mode);
4786
            if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4787
              {
4788
                rtx tem;
4789
                quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4790
                                         quotient, 0, OPTAB_LIB_WIDEN);
4791
                tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4792
                remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4793
                                          remainder, 0, OPTAB_LIB_WIDEN);
4794
              }
4795
            abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4796
            abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4797
            tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4798
                                build_int_cst (NULL_TREE, 1),
4799
                                NULL_RTX, 1);
4800
            do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4801
            tem = expand_binop (compute_mode, xor_optab, op0, op1,
4802
                                NULL_RTX, 0, OPTAB_WIDEN);
4803
            mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4804
                                 build_int_cst (NULL_TREE, size - 1),
4805
                                 NULL_RTX, 0);
4806
            tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4807
                                NULL_RTX, 0, OPTAB_WIDEN);
4808
            tem = expand_binop (compute_mode, sub_optab, tem, mask,
4809
                                NULL_RTX, 0, OPTAB_WIDEN);
4810
            expand_inc (quotient, tem);
4811
            tem = expand_binop (compute_mode, xor_optab, mask, op1,
4812
                                NULL_RTX, 0, OPTAB_WIDEN);
4813
            tem = expand_binop (compute_mode, sub_optab, tem, mask,
4814
                                NULL_RTX, 0, OPTAB_WIDEN);
4815
            expand_dec (remainder, tem);
4816
            emit_label (label);
4817
          }
4818
        return gen_lowpart (mode, rem_flag ? remainder : quotient);
4819
 
4820
      default:
4821
        gcc_unreachable ();
4822
      }
4823
 
4824
  if (quotient == 0)
4825
    {
4826
      if (target && GET_MODE (target) != compute_mode)
4827
        target = 0;
4828
 
4829
      if (rem_flag)
4830
        {
4831
          /* Try to produce the remainder without producing the quotient.
4832
             If we seem to have a divmod pattern that does not require widening,
4833
             don't try widening here.  We should really have a WIDEN argument
4834
             to expand_twoval_binop, since what we'd really like to do here is
4835
             1) try a mod insn in compute_mode
4836
             2) try a divmod insn in compute_mode
4837
             3) try a div insn in compute_mode and multiply-subtract to get
4838
                remainder
4839
             4) try the same things with widening allowed.  */
4840
          remainder
4841
            = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4842
                                 op0, op1, target,
4843
                                 unsignedp,
4844
                                 ((optab2->handlers[compute_mode].insn_code
4845
                                   != CODE_FOR_nothing)
4846
                                  ? OPTAB_DIRECT : OPTAB_WIDEN));
4847
          if (remainder == 0)
4848
            {
4849
              /* No luck there.  Can we do remainder and divide at once
4850
                 without a library call?  */
4851
              remainder = gen_reg_rtx (compute_mode);
4852
              if (! expand_twoval_binop ((unsignedp
4853
                                          ? udivmod_optab
4854
                                          : sdivmod_optab),
4855
                                         op0, op1,
4856
                                         NULL_RTX, remainder, unsignedp))
4857
                remainder = 0;
4858
            }
4859
 
4860
          if (remainder)
4861
            return gen_lowpart (mode, remainder);
4862
        }
4863
 
4864
      /* Produce the quotient.  Try a quotient insn, but not a library call.
4865
         If we have a divmod in this mode, use it in preference to widening
4866
         the div (for this test we assume it will not fail). Note that optab2
4867
         is set to the one of the two optabs that the call below will use.  */
4868
      quotient
4869
        = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4870
                             op0, op1, rem_flag ? NULL_RTX : target,
4871
                             unsignedp,
4872
                             ((optab2->handlers[compute_mode].insn_code
4873
                               != CODE_FOR_nothing)
4874
                              ? OPTAB_DIRECT : OPTAB_WIDEN));
4875
 
4876
      if (quotient == 0)
4877
        {
4878
          /* No luck there.  Try a quotient-and-remainder insn,
4879
             keeping the quotient alone.  */
4880
          quotient = gen_reg_rtx (compute_mode);
4881
          if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4882
                                     op0, op1,
4883
                                     quotient, NULL_RTX, unsignedp))
4884
            {
4885
              quotient = 0;
4886
              if (! rem_flag)
4887
                /* Still no luck.  If we are not computing the remainder,
4888
                   use a library call for the quotient.  */
4889
                quotient = sign_expand_binop (compute_mode,
4890
                                              udiv_optab, sdiv_optab,
4891
                                              op0, op1, target,
4892
                                              unsignedp, OPTAB_LIB_WIDEN);
4893
            }
4894
        }
4895
    }
4896
 
4897
  if (rem_flag)
4898
    {
4899
      if (target && GET_MODE (target) != compute_mode)
4900
        target = 0;
4901
 
4902
      if (quotient == 0)
4903
        {
4904
          /* No divide instruction either.  Use library for remainder.  */
4905
          remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4906
                                         op0, op1, target,
4907
                                         unsignedp, OPTAB_LIB_WIDEN);
4908
          /* No remainder function.  Try a quotient-and-remainder
4909
             function, keeping the remainder.  */
4910
          if (!remainder)
4911
            {
4912
              remainder = gen_reg_rtx (compute_mode);
4913
              if (!expand_twoval_binop_libfunc
4914
                  (unsignedp ? udivmod_optab : sdivmod_optab,
4915
                   op0, op1,
4916
                   NULL_RTX, remainder,
4917
                   unsignedp ? UMOD : MOD))
4918
                remainder = NULL_RTX;
4919
            }
4920
        }
4921
      else
4922
        {
4923
          /* We divided.  Now finish doing X - Y * (X / Y).  */
4924
          remainder = expand_mult (compute_mode, quotient, op1,
4925
                                   NULL_RTX, unsignedp);
4926
          remainder = expand_binop (compute_mode, sub_optab, op0,
4927
                                    remainder, target, unsignedp,
4928
                                    OPTAB_LIB_WIDEN);
4929
        }
4930
    }
4931
 
4932
  return gen_lowpart (mode, rem_flag ? remainder : quotient);
4933
}
4934
 
4935
/* Return a tree node with data type TYPE, describing the value of X.
4936
   Usually this is an VAR_DECL, if there is no obvious better choice.
4937
   X may be an expression, however we only support those expressions
4938
   generated by loop.c.  */
4939
 
4940
tree
4941
make_tree (tree type, rtx x)
4942
{
4943
  tree t;
4944
 
4945
  switch (GET_CODE (x))
4946
    {
4947
    case CONST_INT:
4948
      {
4949
        HOST_WIDE_INT hi = 0;
4950
 
4951
        if (INTVAL (x) < 0
4952
            && !(TYPE_UNSIGNED (type)
4953
                 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4954
                     < HOST_BITS_PER_WIDE_INT)))
4955
          hi = -1;
4956
 
4957
        t = build_int_cst_wide (type, INTVAL (x), hi);
4958
 
4959
        return t;
4960
      }
4961
 
4962
    case CONST_DOUBLE:
4963
      if (GET_MODE (x) == VOIDmode)
4964
        t = build_int_cst_wide (type,
4965
                                CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4966
      else
4967
        {
4968
          REAL_VALUE_TYPE d;
4969
 
4970
          REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4971
          t = build_real (type, d);
4972
        }
4973
 
4974
      return t;
4975
 
4976
    case CONST_VECTOR:
4977
      {
4978
        int units = CONST_VECTOR_NUNITS (x);
4979
        tree itype = TREE_TYPE (type);
4980
        tree t = NULL_TREE;
4981
        int i;
4982
 
4983
 
4984
        /* Build a tree with vector elements.  */
4985
        for (i = units - 1; i >= 0; --i)
4986
          {
4987
            rtx elt = CONST_VECTOR_ELT (x, i);
4988
            t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4989
          }
4990
 
4991
        return build_vector (type, t);
4992
      }
4993
 
4994
    case PLUS:
4995
      return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4996
                          make_tree (type, XEXP (x, 1)));
4997
 
4998
    case MINUS:
4999
      return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5000
                          make_tree (type, XEXP (x, 1)));
5001
 
5002
    case NEG:
5003
      return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5004
 
5005
    case MULT:
5006
      return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5007
                          make_tree (type, XEXP (x, 1)));
5008
 
5009
    case ASHIFT:
5010
      return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5011
                          make_tree (type, XEXP (x, 1)));
5012
 
5013
    case LSHIFTRT:
5014
      t = lang_hooks.types.unsigned_type (type);
5015
      return fold_convert (type, build2 (RSHIFT_EXPR, t,
5016
                                         make_tree (t, XEXP (x, 0)),
5017
                                         make_tree (type, XEXP (x, 1))));
5018
 
5019
    case ASHIFTRT:
5020
      t = lang_hooks.types.signed_type (type);
5021
      return fold_convert (type, build2 (RSHIFT_EXPR, t,
5022
                                         make_tree (t, XEXP (x, 0)),
5023
                                         make_tree (type, XEXP (x, 1))));
5024
 
5025
    case DIV:
5026
      if (TREE_CODE (type) != REAL_TYPE)
5027
        t = lang_hooks.types.signed_type (type);
5028
      else
5029
        t = type;
5030
 
5031
      return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5032
                                         make_tree (t, XEXP (x, 0)),
5033
                                         make_tree (t, XEXP (x, 1))));
5034
    case UDIV:
5035
      t = lang_hooks.types.unsigned_type (type);
5036
      return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5037
                                         make_tree (t, XEXP (x, 0)),
5038
                                         make_tree (t, XEXP (x, 1))));
5039
 
5040
    case SIGN_EXTEND:
5041
    case ZERO_EXTEND:
5042
      t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5043
                                          GET_CODE (x) == ZERO_EXTEND);
5044
      return fold_convert (type, make_tree (t, XEXP (x, 0)));
5045
 
5046
    case CONST:
5047
      return make_tree (type, XEXP (x, 0));
5048
 
5049
    case SYMBOL_REF:
5050
      t = SYMBOL_REF_DECL (x);
5051
      if (t)
5052
        return fold_convert (type, build_fold_addr_expr (t));
5053
      /* else fall through.  */
5054
 
5055
    default:
5056
      t = build_decl (VAR_DECL, NULL_TREE, type);
5057
 
5058
      /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
5059
         ptr_mode.  So convert.  */
5060
      if (POINTER_TYPE_P (type))
5061
        x = convert_memory_address (TYPE_MODE (type), x);
5062
 
5063
      /* Note that we do *not* use SET_DECL_RTL here, because we do not
5064
         want set_decl_rtl to go adjusting REG_ATTRS for this temporary.  */
5065
      t->decl_with_rtl.rtl = x;
5066
 
5067
      return t;
5068
    }
5069
}
5070
 
5071
/* Compute the logical-and of OP0 and OP1, storing it in TARGET
5072
   and returning TARGET.
5073
 
5074
   If TARGET is 0, a pseudo-register or constant is returned.  */
5075
 
5076
rtx
5077
expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5078
{
5079
  rtx tem = 0;
5080
 
5081
  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5082
    tem = simplify_binary_operation (AND, mode, op0, op1);
5083
  if (tem == 0)
5084
    tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5085
 
5086
  if (target == 0)
5087
    target = tem;
5088
  else if (tem != target)
5089
    emit_move_insn (target, tem);
5090
  return target;
5091
}
5092
 
5093
/* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5094
   and storing in TARGET.  Normally return TARGET.
5095
   Return 0 if that cannot be done.
5096
 
5097
   MODE is the mode to use for OP0 and OP1 should they be CONST_INTs.  If
5098
   it is VOIDmode, they cannot both be CONST_INT.
5099
 
5100
   UNSIGNEDP is for the case where we have to widen the operands
5101
   to perform the operation.  It says to use zero-extension.
5102
 
5103
   NORMALIZEP is 1 if we should convert the result to be either zero
5104
   or one.  Normalize is -1 if we should convert the result to be
5105
   either zero or -1.  If NORMALIZEP is zero, the result will be left
5106
   "raw" out of the scc insn.  */
5107
 
5108
rtx
5109
emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5110
                 enum machine_mode mode, int unsignedp, int normalizep)
5111
{
5112
  rtx subtarget;
5113
  enum insn_code icode;
5114
  enum machine_mode compare_mode;
5115
  enum machine_mode target_mode = GET_MODE (target);
5116
  rtx tem;
5117
  rtx last = get_last_insn ();
5118
  rtx pattern, comparison;
5119
 
5120
  if (unsignedp)
5121
    code = unsigned_condition (code);
5122
 
5123
  /* If one operand is constant, make it the second one.  Only do this
5124
     if the other operand is not constant as well.  */
5125
 
5126
  if (swap_commutative_operands_p (op0, op1))
5127
    {
5128
      tem = op0;
5129
      op0 = op1;
5130
      op1 = tem;
5131
      code = swap_condition (code);
5132
    }
5133
 
5134
  if (mode == VOIDmode)
5135
    mode = GET_MODE (op0);
5136
 
5137
  /* For some comparisons with 1 and -1, we can convert this to
5138
     comparisons with zero.  This will often produce more opportunities for
5139
     store-flag insns.  */
5140
 
5141
  switch (code)
5142
    {
5143
    case LT:
5144
      if (op1 == const1_rtx)
5145
        op1 = const0_rtx, code = LE;
5146
      break;
5147
    case LE:
5148
      if (op1 == constm1_rtx)
5149
        op1 = const0_rtx, code = LT;
5150
      break;
5151
    case GE:
5152
      if (op1 == const1_rtx)
5153
        op1 = const0_rtx, code = GT;
5154
      break;
5155
    case GT:
5156
      if (op1 == constm1_rtx)
5157
        op1 = const0_rtx, code = GE;
5158
      break;
5159
    case GEU:
5160
      if (op1 == const1_rtx)
5161
        op1 = const0_rtx, code = NE;
5162
      break;
5163
    case LTU:
5164
      if (op1 == const1_rtx)
5165
        op1 = const0_rtx, code = EQ;
5166
      break;
5167
    default:
5168
      break;
5169
    }
5170
 
5171
  /* If we are comparing a double-word integer with zero or -1, we can
5172
     convert the comparison into one involving a single word.  */
5173
  if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5174
      && GET_MODE_CLASS (mode) == MODE_INT
5175
      && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5176
    {
5177
      if ((code == EQ || code == NE)
5178
          && (op1 == const0_rtx || op1 == constm1_rtx))
5179
        {
5180
          rtx op00, op01, op0both;
5181
 
5182
          /* Do a logical OR or AND of the two words and compare the result.  */
5183
          op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5184
          op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5185
          op0both = expand_binop (word_mode,
5186
                                  op1 == const0_rtx ? ior_optab : and_optab,
5187
                                  op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT);
5188
 
5189
          if (op0both != 0)
5190
            return emit_store_flag (target, code, op0both, op1, word_mode,
5191
                                    unsignedp, normalizep);
5192
        }
5193
      else if ((code == LT || code == GE) && op1 == const0_rtx)
5194
        {
5195
          rtx op0h;
5196
 
5197
          /* If testing the sign bit, can just test on high word.  */
5198
          op0h = simplify_gen_subreg (word_mode, op0, mode,
5199
                                      subreg_highpart_offset (word_mode, mode));
5200
          return emit_store_flag (target, code, op0h, op1, word_mode,
5201
                                  unsignedp, normalizep);
5202
        }
5203
    }
5204
 
5205
  /* From now on, we won't change CODE, so set ICODE now.  */
5206
  icode = setcc_gen_code[(int) code];
5207
 
5208
  /* If this is A < 0 or A >= 0, we can do this by taking the ones
5209
     complement of A (for GE) and shifting the sign bit to the low bit.  */
5210
  if (op1 == const0_rtx && (code == LT || code == GE)
5211
      && GET_MODE_CLASS (mode) == MODE_INT
5212
      && (normalizep || STORE_FLAG_VALUE == 1
5213
          || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5214
              && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5215
                  == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
5216
    {
5217
      subtarget = target;
5218
 
5219
      /* If the result is to be wider than OP0, it is best to convert it
5220
         first.  If it is to be narrower, it is *incorrect* to convert it
5221
         first.  */
5222
      if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5223
        {
5224
          op0 = convert_modes (target_mode, mode, op0, 0);
5225
          mode = target_mode;
5226
        }
5227
 
5228
      if (target_mode != mode)
5229
        subtarget = 0;
5230
 
5231
      if (code == GE)
5232
        op0 = expand_unop (mode, one_cmpl_optab, op0,
5233
                           ((STORE_FLAG_VALUE == 1 || normalizep)
5234
                            ? 0 : subtarget), 0);
5235
 
5236
      if (STORE_FLAG_VALUE == 1 || normalizep)
5237
        /* If we are supposed to produce a 0/1 value, we want to do
5238
           a logical shift from the sign bit to the low-order bit; for
5239
           a -1/0 value, we do an arithmetic shift.  */
5240
        op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5241
                            size_int (GET_MODE_BITSIZE (mode) - 1),
5242
                            subtarget, normalizep != -1);
5243
 
5244
      if (mode != target_mode)
5245
        op0 = convert_modes (target_mode, mode, op0, 0);
5246
 
5247
      return op0;
5248
    }
5249
 
5250
  if (icode != CODE_FOR_nothing)
5251
    {
5252
      insn_operand_predicate_fn pred;
5253
 
5254
      /* We think we may be able to do this with a scc insn.  Emit the
5255
         comparison and then the scc insn.  */
5256
 
5257
      do_pending_stack_adjust ();
5258
      last = get_last_insn ();
5259
 
5260
      comparison
5261
        = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
5262
      if (CONSTANT_P (comparison))
5263
        {
5264
          switch (GET_CODE (comparison))
5265
            {
5266
            case CONST_INT:
5267
              if (comparison == const0_rtx)
5268
                return const0_rtx;
5269
              break;
5270
 
5271
#ifdef FLOAT_STORE_FLAG_VALUE
5272
            case CONST_DOUBLE:
5273
              if (comparison == CONST0_RTX (GET_MODE (comparison)))
5274
                return const0_rtx;
5275
              break;
5276
#endif
5277
            default:
5278
              gcc_unreachable ();
5279
            }
5280
 
5281
          if (normalizep == 1)
5282
            return const1_rtx;
5283
          if (normalizep == -1)
5284
            return constm1_rtx;
5285
          return const_true_rtx;
5286
        }
5287
 
5288
      /* The code of COMPARISON may not match CODE if compare_from_rtx
5289
         decided to swap its operands and reverse the original code.
5290
 
5291
         We know that compare_from_rtx returns either a CONST_INT or
5292
         a new comparison code, so it is safe to just extract the
5293
         code from COMPARISON.  */
5294
      code = GET_CODE (comparison);
5295
 
5296
      /* Get a reference to the target in the proper mode for this insn.  */
5297
      compare_mode = insn_data[(int) icode].operand[0].mode;
5298
      subtarget = target;
5299
      pred = insn_data[(int) icode].operand[0].predicate;
5300
      if (optimize || ! (*pred) (subtarget, compare_mode))
5301
        subtarget = gen_reg_rtx (compare_mode);
5302
 
5303
      pattern = GEN_FCN (icode) (subtarget);
5304
      if (pattern)
5305
        {
5306
          emit_insn (pattern);
5307
 
5308
          /* If we are converting to a wider mode, first convert to
5309
             TARGET_MODE, then normalize.  This produces better combining
5310
             opportunities on machines that have a SIGN_EXTRACT when we are
5311
             testing a single bit.  This mostly benefits the 68k.
5312
 
5313
             If STORE_FLAG_VALUE does not have the sign bit set when
5314
             interpreted in COMPARE_MODE, we can do this conversion as
5315
             unsigned, which is usually more efficient.  */
5316
          if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
5317
            {
5318
              convert_move (target, subtarget,
5319
                            (GET_MODE_BITSIZE (compare_mode)
5320
                             <= HOST_BITS_PER_WIDE_INT)
5321
                            && 0 == (STORE_FLAG_VALUE
5322
                                     & ((HOST_WIDE_INT) 1
5323
                                        << (GET_MODE_BITSIZE (compare_mode) -1))));
5324
              op0 = target;
5325
              compare_mode = target_mode;
5326
            }
5327
          else
5328
            op0 = subtarget;
5329
 
5330
          /* If we want to keep subexpressions around, don't reuse our
5331
             last target.  */
5332
 
5333
          if (optimize)
5334
            subtarget = 0;
5335
 
5336
          /* Now normalize to the proper value in COMPARE_MODE.  Sometimes
5337
             we don't have to do anything.  */
5338
          if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5339
            ;
5340
          /* STORE_FLAG_VALUE might be the most negative number, so write
5341
             the comparison this way to avoid a compiler-time warning.  */
5342
          else if (- normalizep == STORE_FLAG_VALUE)
5343
            op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
5344
 
5345
          /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5346
             makes it hard to use a value of just the sign bit due to
5347
             ANSI integer constant typing rules.  */
5348
          else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
5349
                   && (STORE_FLAG_VALUE
5350
                       & ((HOST_WIDE_INT) 1
5351
                          << (GET_MODE_BITSIZE (compare_mode) - 1))))
5352
            op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
5353
                                size_int (GET_MODE_BITSIZE (compare_mode) - 1),
5354
                                subtarget, normalizep == 1);
5355
          else
5356
            {
5357
              gcc_assert (STORE_FLAG_VALUE & 1);
5358
 
5359
              op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
5360
              if (normalizep == -1)
5361
                op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
5362
            }
5363
 
5364
          /* If we were converting to a smaller mode, do the
5365
             conversion now.  */
5366
          if (target_mode != compare_mode)
5367
            {
5368
              convert_move (target, op0, 0);
5369
              return target;
5370
            }
5371
          else
5372
            return op0;
5373
        }
5374
    }
5375
 
5376
  delete_insns_since (last);
5377
 
5378
  /* If optimizing, use different pseudo registers for each insn, instead
5379
     of reusing the same pseudo.  This leads to better CSE, but slows
5380
     down the compiler, since there are more pseudos */
5381
  subtarget = (!optimize
5382
               && (target_mode == mode)) ? target : NULL_RTX;
5383
 
5384
  /* If we reached here, we can't do this with a scc insn.  However, there
5385
     are some comparisons that can be done directly.  For example, if
5386
     this is an equality comparison of integers, we can try to exclusive-or
5387
     (or subtract) the two operands and use a recursive call to try the
5388
     comparison with zero.  Don't do any of these cases if branches are
5389
     very cheap.  */
5390
 
5391
  if (BRANCH_COST > 0
5392
      && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
5393
      && op1 != const0_rtx)
5394
    {
5395
      tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5396
                          OPTAB_WIDEN);
5397
 
5398
      if (tem == 0)
5399
        tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5400
                            OPTAB_WIDEN);
5401
      if (tem != 0)
5402
        tem = emit_store_flag (target, code, tem, const0_rtx,
5403
                               mode, unsignedp, normalizep);
5404
      if (tem == 0)
5405
        delete_insns_since (last);
5406
      return tem;
5407
    }
5408
 
5409
  /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5410
     the constant zero.  Reject all other comparisons at this point.  Only
5411
     do LE and GT if branches are expensive since they are expensive on
5412
     2-operand machines.  */
5413
 
5414
  if (BRANCH_COST == 0
5415
      || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
5416
      || (code != EQ && code != NE
5417
          && (BRANCH_COST <= 1 || (code != LE && code != GT))))
5418
    return 0;
5419
 
5420
  /* See what we need to return.  We can only return a 1, -1, or the
5421
     sign bit.  */
5422
 
5423
  if (normalizep == 0)
5424
    {
5425
      if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5426
        normalizep = STORE_FLAG_VALUE;
5427
 
5428
      else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5429
               && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5430
                   == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5431
        ;
5432
      else
5433
        return 0;
5434
    }
5435
 
5436
  /* Try to put the result of the comparison in the sign bit.  Assume we can't
5437
     do the necessary operation below.  */
5438
 
5439
  tem = 0;
5440
 
5441
  /* To see if A <= 0, compute (A | (A - 1)).  A <= 0 iff that result has
5442
     the sign bit set.  */
5443
 
5444
  if (code == LE)
5445
    {
5446
      /* This is destructive, so SUBTARGET can't be OP0.  */
5447
      if (rtx_equal_p (subtarget, op0))
5448
        subtarget = 0;
5449
 
5450
      tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5451
                          OPTAB_WIDEN);
5452
      if (tem)
5453
        tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5454
                            OPTAB_WIDEN);
5455
    }
5456
 
5457
  /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5458
     number of bits in the mode of OP0, minus one.  */
5459
 
5460
  if (code == GT)
5461
    {
5462
      if (rtx_equal_p (subtarget, op0))
5463
        subtarget = 0;
5464
 
5465
      tem = expand_shift (RSHIFT_EXPR, mode, op0,
5466
                          size_int (GET_MODE_BITSIZE (mode) - 1),
5467
                          subtarget, 0);
5468
      tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5469
                          OPTAB_WIDEN);
5470
    }
5471
 
5472
  if (code == EQ || code == NE)
5473
    {
5474
      /* For EQ or NE, one way to do the comparison is to apply an operation
5475
         that converts the operand into a positive number if it is nonzero
5476
         or zero if it was originally zero.  Then, for EQ, we subtract 1 and
5477
         for NE we negate.  This puts the result in the sign bit.  Then we
5478
         normalize with a shift, if needed.
5479
 
5480
         Two operations that can do the above actions are ABS and FFS, so try
5481
         them.  If that doesn't work, and MODE is smaller than a full word,
5482
         we can use zero-extension to the wider mode (an unsigned conversion)
5483
         as the operation.  */
5484
 
5485
      /* Note that ABS doesn't yield a positive number for INT_MIN, but
5486
         that is compensated by the subsequent overflow when subtracting
5487
         one / negating.  */
5488
 
5489
      if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5490
        tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5491
      else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing)
5492
        tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5493
      else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5494
        {
5495
          tem = convert_modes (word_mode, mode, op0, 1);
5496
          mode = word_mode;
5497
        }
5498
 
5499
      if (tem != 0)
5500
        {
5501
          if (code == EQ)
5502
            tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5503
                                0, OPTAB_WIDEN);
5504
          else
5505
            tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5506
        }
5507
 
5508
      /* If we couldn't do it that way, for NE we can "or" the two's complement
5509
         of the value with itself.  For EQ, we take the one's complement of
5510
         that "or", which is an extra insn, so we only handle EQ if branches
5511
         are expensive.  */
5512
 
5513
      if (tem == 0 && (code == NE || BRANCH_COST > 1))
5514
        {
5515
          if (rtx_equal_p (subtarget, op0))
5516
            subtarget = 0;
5517
 
5518
          tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5519
          tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5520
                              OPTAB_WIDEN);
5521
 
5522
          if (tem && code == EQ)
5523
            tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5524
        }
5525
    }
5526
 
5527
  if (tem && normalizep)
5528
    tem = expand_shift (RSHIFT_EXPR, mode, tem,
5529
                        size_int (GET_MODE_BITSIZE (mode) - 1),
5530
                        subtarget, normalizep == 1);
5531
 
5532
  if (tem)
5533
    {
5534
      if (GET_MODE (tem) != target_mode)
5535
        {
5536
          convert_move (target, tem, 0);
5537
          tem = target;
5538
        }
5539
      else if (!subtarget)
5540
        {
5541
          emit_move_insn (target, tem);
5542
          tem = target;
5543
        }
5544
    }
5545
  else
5546
    delete_insns_since (last);
5547
 
5548
  return tem;
5549
}
5550
 
5551
/* Like emit_store_flag, but always succeeds.  */
5552
 
5553
rtx
5554
emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5555
                       enum machine_mode mode, int unsignedp, int normalizep)
5556
{
5557
  rtx tem, label;
5558
 
5559
  /* First see if emit_store_flag can do the job.  */
5560
  tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5561
  if (tem != 0)
5562
    return tem;
5563
 
5564
  if (normalizep == 0)
5565
    normalizep = 1;
5566
 
5567
  /* If this failed, we have to do this with set/compare/jump/set code.  */
5568
 
5569
  if (!REG_P (target)
5570
      || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5571
    target = gen_reg_rtx (GET_MODE (target));
5572
 
5573
  emit_move_insn (target, const1_rtx);
5574
  label = gen_label_rtx ();
5575
  do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5576
                           NULL_RTX, label);
5577
 
5578
  emit_move_insn (target, const0_rtx);
5579
  emit_label (label);
5580
 
5581
  return target;
5582
}
5583
 
5584
/* Perform possibly multi-word comparison and conditional jump to LABEL
5585
   if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE.  This is
5586
   now a thin wrapper around do_compare_rtx_and_jump.  */
5587
 
5588
static void
5589
do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5590
                 rtx label)
5591
{
5592
  int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5593
  do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5594
                           NULL_RTX, NULL_RTX, label);
5595
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.