OpenCores
URL https://opencores.org/ocsvn/open8_urisc/open8_urisc/trunk

Subversion Repositories open8_urisc

[/] [open8_urisc/] [trunk/] [gnu/] [binutils/] [gas/] [config/] [tc-arm.c] - Blame information for rev 160

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 16 khays
/* tc-arm.c -- Assemble for the ARM
2
   Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3
   2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4
   Free Software Foundation, Inc.
5
   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6
        Modified by David Taylor (dtaylor@armltd.co.uk)
7
        Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8
        Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9
        Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
 
11
   This file is part of GAS, the GNU Assembler.
12
 
13
   GAS is free software; you can redistribute it and/or modify
14
   it under the terms of the GNU General Public License as published by
15
   the Free Software Foundation; either version 3, or (at your option)
16
   any later version.
17
 
18
   GAS is distributed in the hope that it will be useful,
19
   but WITHOUT ANY WARRANTY; without even the implied warranty of
20
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
   GNU General Public License for more details.
22
 
23
   You should have received a copy of the GNU General Public License
24
   along with GAS; see the file COPYING.  If not, write to the Free
25
   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26
   02110-1301, USA.  */
27
 
28
#include "as.h"
29
#include <limits.h>
30
#include <stdarg.h>
31
#define  NO_RELOC 0
32
#include "safe-ctype.h"
33
#include "subsegs.h"
34
#include "obstack.h"
35
 
36
#include "opcode/arm.h"
37
 
38
#ifdef OBJ_ELF
39
#include "elf/arm.h"
40
#include "dw2gencfi.h"
41
#endif
42
 
43
#include "dwarf2dbg.h"
44
 
45
#ifdef OBJ_ELF
46
/* Must be at least the size of the largest unwind opcode (currently two).  */
47
#define ARM_OPCODE_CHUNK_SIZE 8
48
 
49
/* This structure holds the unwinding state.  */
50
 
51
static struct
52
{
53
  symbolS *       proc_start;
54
  symbolS *       table_entry;
55
  symbolS *       personality_routine;
56
  int             personality_index;
57
  /* The segment containing the function.  */
58
  segT            saved_seg;
59
  subsegT         saved_subseg;
60
  /* Opcodes generated from this function.  */
61
  unsigned char * opcodes;
62
  int             opcode_count;
63
  int             opcode_alloc;
64
  /* The number of bytes pushed to the stack.  */
65
  offsetT         frame_size;
66
  /* We don't add stack adjustment opcodes immediately so that we can merge
67
     multiple adjustments.  We can also omit the final adjustment
68
     when using a frame pointer.  */
69
  offsetT         pending_offset;
70
  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
71
     hold the reg+offset to use when restoring sp from a frame pointer.  */
72
  offsetT         fp_offset;
73
  int             fp_reg;
74
  /* Nonzero if an unwind_setfp directive has been seen.  */
75
  unsigned        fp_used:1;
76
  /* Nonzero if the last opcode restores sp from fp_reg.  */
77
  unsigned        sp_restored:1;
78
} unwind;
79
 
80
#endif /* OBJ_ELF */
81
 
82
/* Results from operand parsing worker functions.  */
83
 
84
typedef enum
85
{
86
  PARSE_OPERAND_SUCCESS,
87
  PARSE_OPERAND_FAIL,
88
  PARSE_OPERAND_FAIL_NO_BACKTRACK
89
} parse_operand_result;
90
 
91
enum arm_float_abi
92
{
93
  ARM_FLOAT_ABI_HARD,
94
  ARM_FLOAT_ABI_SOFTFP,
95
  ARM_FLOAT_ABI_SOFT
96
};
97
 
98
/* Types of processor to assemble for.  */
99
#ifndef CPU_DEFAULT
100
/* The code that was here used to select a default CPU depending on compiler
101
   pre-defines which were only present when doing native builds, thus
102
   changing gas' default behaviour depending upon the build host.
103
 
104
   If you have a target that requires a default CPU option then the you
105
   should define CPU_DEFAULT here.  */
106
#endif
107
 
108
#ifndef FPU_DEFAULT
109
# ifdef TE_LINUX
110
#  define FPU_DEFAULT FPU_ARCH_FPA
111
# elif defined (TE_NetBSD)
112
#  ifdef OBJ_ELF
113
#   define FPU_DEFAULT FPU_ARCH_VFP     /* Soft-float, but VFP order.  */
114
#  else
115
    /* Legacy a.out format.  */
116
#   define FPU_DEFAULT FPU_ARCH_FPA     /* Soft-float, but FPA order.  */
117
#  endif
118
# elif defined (TE_VXWORKS)
119
#  define FPU_DEFAULT FPU_ARCH_VFP      /* Soft-float, VFP order.  */
120
# else
121
   /* For backwards compatibility, default to FPA.  */
122
#  define FPU_DEFAULT FPU_ARCH_FPA
123
# endif
124
#endif /* ifndef FPU_DEFAULT */
125
 
126
#define streq(a, b)           (strcmp (a, b) == 0)
127
 
128
static arm_feature_set cpu_variant;
129
static arm_feature_set arm_arch_used;
130
static arm_feature_set thumb_arch_used;
131
 
132
/* Flags stored in private area of BFD structure.  */
133
static int uses_apcs_26      = FALSE;
134
static int atpcs             = FALSE;
135
static int support_interwork = FALSE;
136
static int uses_apcs_float   = FALSE;
137
static int pic_code          = FALSE;
138
static int fix_v4bx          = FALSE;
139
/* Warn on using deprecated features.  */
140
static int warn_on_deprecated = TRUE;
141
 
142
 
143
/* Variables that we set while parsing command-line options.  Once all
144
   options have been read we re-process these values to set the real
145
   assembly flags.  */
146
static const arm_feature_set *legacy_cpu = NULL;
147
static const arm_feature_set *legacy_fpu = NULL;
148
 
149
static const arm_feature_set *mcpu_cpu_opt = NULL;
150
static const arm_feature_set *mcpu_fpu_opt = NULL;
151
static const arm_feature_set *march_cpu_opt = NULL;
152
static const arm_feature_set *march_fpu_opt = NULL;
153
static const arm_feature_set *mfpu_opt = NULL;
154
static const arm_feature_set *object_arch = NULL;
155
 
156
/* Constants for known architecture features.  */
157
static const arm_feature_set fpu_default = FPU_DEFAULT;
158
static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159
static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160
static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161
static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162
static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163
static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164
static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165
static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
 
167
#ifdef CPU_DEFAULT
168
static const arm_feature_set cpu_default = CPU_DEFAULT;
169
#endif
170
 
171
static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172
static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173
static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174
static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175
static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176
static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177
static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178
static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179
static const arm_feature_set arm_ext_v4t_5 =
180
  ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181
static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182
static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183
static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184
static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185
static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186
static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187
static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188
static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189
static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190
static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191
static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192
static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193
static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194
static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195
static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196
static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197
static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198
static const arm_feature_set arm_ext_m =
199
  ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
200
static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
201
static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
202
static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
203
static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
204
static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
205
 
206
static const arm_feature_set arm_arch_any = ARM_ANY;
207
static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
208
static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
209
static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
210
static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
211
 
212
static const arm_feature_set arm_cext_iwmmxt2 =
213
  ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
214
static const arm_feature_set arm_cext_iwmmxt =
215
  ARM_FEATURE (0, ARM_CEXT_IWMMXT);
216
static const arm_feature_set arm_cext_xscale =
217
  ARM_FEATURE (0, ARM_CEXT_XSCALE);
218
static const arm_feature_set arm_cext_maverick =
219
  ARM_FEATURE (0, ARM_CEXT_MAVERICK);
220
static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
221
static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
222
static const arm_feature_set fpu_vfp_ext_v1xd =
223
  ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
224
static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
225
static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
226
static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
227
static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
228
static const arm_feature_set fpu_vfp_ext_d32 =
229
  ARM_FEATURE (0, FPU_VFP_EXT_D32);
230
static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
231
static const arm_feature_set fpu_vfp_v3_or_neon_ext =
232
  ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
233
static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
234
static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
235
static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
236
 
237
static int mfloat_abi_opt = -1;
238
/* Record user cpu selection for object attributes.  */
239
static arm_feature_set selected_cpu = ARM_ARCH_NONE;
240
/* Must be long enough to hold any of the names in arm_cpus.  */
241
static char selected_cpu_name[16];
242
 
243
/* Return if no cpu was selected on command-line.  */
244
static bfd_boolean
245
no_cpu_selected (void)
246
{
247
  return selected_cpu.core == arm_arch_none.core
248
    && selected_cpu.coproc == arm_arch_none.coproc;
249
}
250
 
251
#ifdef OBJ_ELF
252
# ifdef EABI_DEFAULT
253
static int meabi_flags = EABI_DEFAULT;
254
# else
255
static int meabi_flags = EF_ARM_EABI_UNKNOWN;
256
# endif
257
 
258
static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
259
 
260
bfd_boolean
261
arm_is_eabi (void)
262
{
263
  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
264
}
265
#endif
266
 
267
#ifdef OBJ_ELF
268
/* Pre-defined "_GLOBAL_OFFSET_TABLE_"  */
269
symbolS * GOT_symbol;
270
#endif
271
 
272
/* 0: assemble for ARM,
273
   1: assemble for Thumb,
274
   2: assemble for Thumb even though target CPU does not support thumb
275
      instructions.  */
276
static int thumb_mode = 0;
277
/* A value distinct from the possible values for thumb_mode that we
278
   can use to record whether thumb_mode has been copied into the
279
   tc_frag_data field of a frag.  */
280
#define MODE_RECORDED (1 << 4)
281
 
282
/* Specifies the intrinsic IT insn behavior mode.  */
283
enum implicit_it_mode
284
{
285
  IMPLICIT_IT_MODE_NEVER  = 0x00,
286
  IMPLICIT_IT_MODE_ARM    = 0x01,
287
  IMPLICIT_IT_MODE_THUMB  = 0x02,
288
  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
289
};
290
static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
291
 
292
/* If unified_syntax is true, we are processing the new unified
293
   ARM/Thumb syntax.  Important differences from the old ARM mode:
294
 
295
     - Immediate operands do not require a # prefix.
296
     - Conditional affixes always appear at the end of the
297
       instruction.  (For backward compatibility, those instructions
298
       that formerly had them in the middle, continue to accept them
299
       there.)
300
     - The IT instruction may appear, and if it does is validated
301
       against subsequent conditional affixes.  It does not generate
302
       machine code.
303
 
304
   Important differences from the old Thumb mode:
305
 
306
     - Immediate operands do not require a # prefix.
307
     - Most of the V6T2 instructions are only available in unified mode.
308
     - The .N and .W suffixes are recognized and honored (it is an error
309
       if they cannot be honored).
310
     - All instructions set the flags if and only if they have an 's' affix.
311
     - Conditional affixes may be used.  They are validated against
312
       preceding IT instructions.  Unlike ARM mode, you cannot use a
313
       conditional affix except in the scope of an IT instruction.  */
314
 
315
static bfd_boolean unified_syntax = FALSE;
316
 
317
enum neon_el_type
318
{
319
  NT_invtype,
320
  NT_untyped,
321
  NT_integer,
322
  NT_float,
323
  NT_poly,
324
  NT_signed,
325
  NT_unsigned
326
};
327
 
328
struct neon_type_el
329
{
330
  enum neon_el_type type;
331
  unsigned size;
332
};
333
 
334
#define NEON_MAX_TYPE_ELS 4
335
 
336
struct neon_type
337
{
338
  struct neon_type_el el[NEON_MAX_TYPE_ELS];
339
  unsigned elems;
340
};
341
 
342
enum it_instruction_type
343
{
344
   OUTSIDE_IT_INSN,
345
   INSIDE_IT_INSN,
346
   INSIDE_IT_LAST_INSN,
347
   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
348
                              if inside, should be the last one.  */
349
   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
350
                              i.e. BKPT and NOP.  */
351
   IT_INSN                 /* The IT insn has been parsed.  */
352
};
353
 
354
struct arm_it
355
{
356
  const char *  error;
357
  unsigned long instruction;
358
  int           size;
359
  int           size_req;
360
  int           cond;
361
  /* "uncond_value" is set to the value in place of the conditional field in
362
     unconditional versions of the instruction, or -1 if nothing is
363
     appropriate.  */
364
  int           uncond_value;
365
  struct neon_type vectype;
366
  /* This does not indicate an actual NEON instruction, only that
367
     the mnemonic accepts neon-style type suffixes.  */
368
  int           is_neon;
369
  /* Set to the opcode if the instruction needs relaxation.
370
     Zero if the instruction is not relaxed.  */
371
  unsigned long relax;
372
  struct
373
  {
374
    bfd_reloc_code_real_type type;
375
    expressionS              exp;
376
    int                      pc_rel;
377
  } reloc;
378
 
379
  enum it_instruction_type it_insn_type;
380
 
381
  struct
382
  {
383
    unsigned reg;
384
    signed int imm;
385
    struct neon_type_el vectype;
386
    unsigned present    : 1;  /* Operand present.  */
387
    unsigned isreg      : 1;  /* Operand was a register.  */
388
    unsigned immisreg   : 1;  /* .imm field is a second register.  */
389
    unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
390
    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
391
    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
392
    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
393
       instructions. This allows us to disambiguate ARM <-> vector insns.  */
394
    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
395
    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
396
    unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
397
    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
398
    unsigned hasreloc   : 1;  /* Operand has relocation suffix.  */
399
    unsigned writeback  : 1;  /* Operand has trailing !  */
400
    unsigned preind     : 1;  /* Preindexed address.  */
401
    unsigned postind    : 1;  /* Postindexed address.  */
402
    unsigned negative   : 1;  /* Index register was negated.  */
403
    unsigned shifted    : 1;  /* Shift applied to operation.  */
404
    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
405
  } operands[6];
406
};
407
 
408
static struct arm_it inst;
409
 
410
#define NUM_FLOAT_VALS 8
411
 
412
const char * fp_const[] =
413
{
414
  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
415
};
416
 
417
/* Number of littlenums required to hold an extended precision number.  */
418
#define MAX_LITTLENUMS 6
419
 
420
LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
421
 
422
#define FAIL    (-1)
423
#define SUCCESS (0)
424
 
425
#define SUFF_S 1
426
#define SUFF_D 2
427
#define SUFF_E 3
428
#define SUFF_P 4
429
 
430
#define CP_T_X   0x00008000
431
#define CP_T_Y   0x00400000
432
 
433
#define CONDS_BIT        0x00100000
434
#define LOAD_BIT         0x00100000
435
 
436
#define DOUBLE_LOAD_FLAG 0x00000001
437
 
438
struct asm_cond
439
{
440
  const char *   template_name;
441
  unsigned long  value;
442
};
443
 
444
#define COND_ALWAYS 0xE
445
 
446
struct asm_psr
447
{
448
  const char *   template_name;
449
  unsigned long  field;
450
};
451
 
452
struct asm_barrier_opt
453
{
454
  const char *   template_name;
455
  unsigned long  value;
456
};
457
 
458
/* The bit that distinguishes CPSR and SPSR.  */
459
#define SPSR_BIT   (1 << 22)
460
 
461
/* The individual PSR flag bits.  */
462
#define PSR_c   (1 << 16)
463
#define PSR_x   (1 << 17)
464
#define PSR_s   (1 << 18)
465
#define PSR_f   (1 << 19)
466
 
467
struct reloc_entry
468
{
469
  char *                    name;
470
  bfd_reloc_code_real_type  reloc;
471
};
472
 
473
enum vfp_reg_pos
474
{
475
  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
476
  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
477
};
478
 
479
enum vfp_ldstm_type
480
{
481
  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
482
};
483
 
484
/* Bits for DEFINED field in neon_typed_alias.  */
485
#define NTA_HASTYPE  1
486
#define NTA_HASINDEX 2
487
 
488
struct neon_typed_alias
489
{
490
  unsigned char        defined;
491
  unsigned char        index;
492
  struct neon_type_el  eltype;
493
};
494
 
495
/* ARM register categories.  This includes coprocessor numbers and various
496
   architecture extensions' registers.  */
497
enum arm_reg_type
498
{
499
  REG_TYPE_RN,
500
  REG_TYPE_CP,
501
  REG_TYPE_CN,
502
  REG_TYPE_FN,
503
  REG_TYPE_VFS,
504
  REG_TYPE_VFD,
505
  REG_TYPE_NQ,
506
  REG_TYPE_VFSD,
507
  REG_TYPE_NDQ,
508
  REG_TYPE_NSDQ,
509
  REG_TYPE_VFC,
510
  REG_TYPE_MVF,
511
  REG_TYPE_MVD,
512
  REG_TYPE_MVFX,
513
  REG_TYPE_MVDX,
514
  REG_TYPE_MVAX,
515
  REG_TYPE_DSPSC,
516
  REG_TYPE_MMXWR,
517
  REG_TYPE_MMXWC,
518
  REG_TYPE_MMXWCG,
519
  REG_TYPE_XSCALE,
520
  REG_TYPE_RNB
521
};
522
 
523
/* Structure for a hash table entry for a register.
524
   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
525
   information which states whether a vector type or index is specified (for a
526
   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
527
struct reg_entry
528
{
529
  const char *               name;
530
  unsigned int               number;
531
  unsigned char              type;
532
  unsigned char              builtin;
533
  struct neon_typed_alias *  neon;
534
};
535
 
536
/* Diagnostics used when we don't get a register of the expected type.  */
537
const char * const reg_expected_msgs[] =
538
{
539
  N_("ARM register expected"),
540
  N_("bad or missing co-processor number"),
541
  N_("co-processor register expected"),
542
  N_("FPA register expected"),
543
  N_("VFP single precision register expected"),
544
  N_("VFP/Neon double precision register expected"),
545
  N_("Neon quad precision register expected"),
546
  N_("VFP single or double precision register expected"),
547
  N_("Neon double or quad precision register expected"),
548
  N_("VFP single, double or Neon quad precision register expected"),
549
  N_("VFP system register expected"),
550
  N_("Maverick MVF register expected"),
551
  N_("Maverick MVD register expected"),
552
  N_("Maverick MVFX register expected"),
553
  N_("Maverick MVDX register expected"),
554
  N_("Maverick MVAX register expected"),
555
  N_("Maverick DSPSC register expected"),
556
  N_("iWMMXt data register expected"),
557
  N_("iWMMXt control register expected"),
558
  N_("iWMMXt scalar register expected"),
559
  N_("XScale accumulator register expected"),
560
};
561
 
562
/* Some well known registers that we refer to directly elsewhere.  */
563
#define REG_SP  13
564
#define REG_LR  14
565
#define REG_PC  15
566
 
567
/* ARM instructions take 4bytes in the object file, Thumb instructions
568
   take 2:  */
569
#define INSN_SIZE       4
570
 
571
struct asm_opcode
572
{
573
  /* Basic string to match.  */
574
  const char * template_name;
575
 
576
  /* Parameters to instruction.  */
577
  unsigned int operands[8];
578
 
579
  /* Conditional tag - see opcode_lookup.  */
580
  unsigned int tag : 4;
581
 
582
  /* Basic instruction code.  */
583
  unsigned int avalue : 28;
584
 
585
  /* Thumb-format instruction code.  */
586
  unsigned int tvalue;
587
 
588
  /* Which architecture variant provides this instruction.  */
589
  const arm_feature_set * avariant;
590
  const arm_feature_set * tvariant;
591
 
592
  /* Function to call to encode instruction in ARM format.  */
593
  void (* aencode) (void);
594
 
595
  /* Function to call to encode instruction in Thumb format.  */
596
  void (* tencode) (void);
597
};
598
 
599
/* Defines for various bits that we will want to toggle.  */
600
#define INST_IMMEDIATE  0x02000000
601
#define OFFSET_REG      0x02000000
602
#define HWOFFSET_IMM    0x00400000
603
#define SHIFT_BY_REG    0x00000010
604
#define PRE_INDEX       0x01000000
605
#define INDEX_UP        0x00800000
606
#define WRITE_BACK      0x00200000
607
#define LDM_TYPE_2_OR_3 0x00400000
608
#define CPSI_MMOD       0x00020000
609
 
610
#define LITERAL_MASK    0xf000f000
611
#define OPCODE_MASK     0xfe1fffff
612
#define V4_STR_BIT      0x00000020
613
 
614
#define T2_SUBS_PC_LR   0xf3de8f00
615
 
616
#define DATA_OP_SHIFT   21
617
 
618
#define T2_OPCODE_MASK  0xfe1fffff
619
#define T2_DATA_OP_SHIFT 21
620
 
621
/* Codes to distinguish the arithmetic instructions.  */
622
#define OPCODE_AND      0
623
#define OPCODE_EOR      1
624
#define OPCODE_SUB      2
625
#define OPCODE_RSB      3
626
#define OPCODE_ADD      4
627
#define OPCODE_ADC      5
628
#define OPCODE_SBC      6
629
#define OPCODE_RSC      7
630
#define OPCODE_TST      8
631
#define OPCODE_TEQ      9
632
#define OPCODE_CMP      10
633
#define OPCODE_CMN      11
634
#define OPCODE_ORR      12
635
#define OPCODE_MOV      13
636
#define OPCODE_BIC      14
637
#define OPCODE_MVN      15
638
 
639
#define T2_OPCODE_AND   0
640
#define T2_OPCODE_BIC   1
641
#define T2_OPCODE_ORR   2
642
#define T2_OPCODE_ORN   3
643
#define T2_OPCODE_EOR   4
644
#define T2_OPCODE_ADD   8
645
#define T2_OPCODE_ADC   10
646
#define T2_OPCODE_SBC   11
647
#define T2_OPCODE_SUB   13
648
#define T2_OPCODE_RSB   14
649
 
650
#define T_OPCODE_MUL 0x4340
651
#define T_OPCODE_TST 0x4200
652
#define T_OPCODE_CMN 0x42c0
653
#define T_OPCODE_NEG 0x4240
654
#define T_OPCODE_MVN 0x43c0
655
 
656
#define T_OPCODE_ADD_R3 0x1800
657
#define T_OPCODE_SUB_R3 0x1a00
658
#define T_OPCODE_ADD_HI 0x4400
659
#define T_OPCODE_ADD_ST 0xb000
660
#define T_OPCODE_SUB_ST 0xb080
661
#define T_OPCODE_ADD_SP 0xa800
662
#define T_OPCODE_ADD_PC 0xa000
663
#define T_OPCODE_ADD_I8 0x3000
664
#define T_OPCODE_SUB_I8 0x3800
665
#define T_OPCODE_ADD_I3 0x1c00
666
#define T_OPCODE_SUB_I3 0x1e00
667
 
668
#define T_OPCODE_ASR_R  0x4100
669
#define T_OPCODE_LSL_R  0x4080
670
#define T_OPCODE_LSR_R  0x40c0
671
#define T_OPCODE_ROR_R  0x41c0
672
#define T_OPCODE_ASR_I  0x1000
673
#define T_OPCODE_LSL_I  0x0000
674
#define T_OPCODE_LSR_I  0x0800
675
 
676
#define T_OPCODE_MOV_I8 0x2000
677
#define T_OPCODE_CMP_I8 0x2800
678
#define T_OPCODE_CMP_LR 0x4280
679
#define T_OPCODE_MOV_HR 0x4600
680
#define T_OPCODE_CMP_HR 0x4500
681
 
682
#define T_OPCODE_LDR_PC 0x4800
683
#define T_OPCODE_LDR_SP 0x9800
684
#define T_OPCODE_STR_SP 0x9000
685
#define T_OPCODE_LDR_IW 0x6800
686
#define T_OPCODE_STR_IW 0x6000
687
#define T_OPCODE_LDR_IH 0x8800
688
#define T_OPCODE_STR_IH 0x8000
689
#define T_OPCODE_LDR_IB 0x7800
690
#define T_OPCODE_STR_IB 0x7000
691
#define T_OPCODE_LDR_RW 0x5800
692
#define T_OPCODE_STR_RW 0x5000
693
#define T_OPCODE_LDR_RH 0x5a00
694
#define T_OPCODE_STR_RH 0x5200
695
#define T_OPCODE_LDR_RB 0x5c00
696
#define T_OPCODE_STR_RB 0x5400
697
 
698
#define T_OPCODE_PUSH   0xb400
699
#define T_OPCODE_POP    0xbc00
700
 
701
#define T_OPCODE_BRANCH 0xe000
702
 
703
#define THUMB_SIZE      2       /* Size of thumb instruction.  */
704
#define THUMB_PP_PC_LR 0x0100
705
#define THUMB_LOAD_BIT 0x0800
706
#define THUMB2_LOAD_BIT 0x00100000
707
 
708
#define BAD_ARGS        _("bad arguments to instruction")
709
#define BAD_SP          _("r13 not allowed here")
710
#define BAD_PC          _("r15 not allowed here")
711
#define BAD_COND        _("instruction cannot be conditional")
712
#define BAD_OVERLAP     _("registers may not be the same")
713
#define BAD_HIREG       _("lo register required")
714
#define BAD_THUMB32     _("instruction not supported in Thumb16 mode")
715
#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
716
#define BAD_BRANCH      _("branch must be last instruction in IT block")
717
#define BAD_NOT_IT      _("instruction not allowed in IT block")
718
#define BAD_FPU         _("selected FPU does not support instruction")
719
#define BAD_OUT_IT      _("thumb conditional instruction should be in IT block")
720
#define BAD_IT_COND     _("incorrect condition in IT block")
721
#define BAD_IT_IT       _("IT falling in the range of a previous IT block")
722
#define MISSING_FNSTART _("missing .fnstart before unwinding directive")
723
#define BAD_PC_ADDRESSING \
724
        _("cannot use register index with PC-relative addressing")
725
#define BAD_PC_WRITEBACK \
726
        _("cannot use writeback with PC-relative addressing")
727 160 khays
#define BAD_RANGE     _("branch out of range")
728 16 khays
 
729
static struct hash_control * arm_ops_hsh;
730
static struct hash_control * arm_cond_hsh;
731
static struct hash_control * arm_shift_hsh;
732
static struct hash_control * arm_psr_hsh;
733
static struct hash_control * arm_v7m_psr_hsh;
734
static struct hash_control * arm_reg_hsh;
735
static struct hash_control * arm_reloc_hsh;
736
static struct hash_control * arm_barrier_opt_hsh;
737
 
738
/* Stuff needed to resolve the label ambiguity
739
   As:
740
     ...
741
     label:   <insn>
742
   may differ from:
743
     ...
744
     label:
745
              <insn>  */
746
 
747
symbolS *  last_label_seen;
748
static int label_is_thumb_function_name = FALSE;
749
 
750
/* Literal pool structure.  Held on a per-section
751
   and per-sub-section basis.  */
752
 
753
#define MAX_LITERAL_POOL_SIZE 1024
754
typedef struct literal_pool
755
{
756
  expressionS            literals [MAX_LITERAL_POOL_SIZE];
757
  unsigned int           next_free_entry;
758
  unsigned int           id;
759
  symbolS *              symbol;
760
  segT                   section;
761
  subsegT                sub_section;
762 160 khays
#ifdef OBJ_ELF
763
  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
764
#endif
765 16 khays
  struct literal_pool *  next;
766
} literal_pool;
767
 
768
/* Pointer to a linked list of literal pools.  */
769
literal_pool * list_of_pools = NULL;
770
 
771
#ifdef OBJ_ELF
772
#  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
773
#else
774
static struct current_it now_it;
775
#endif
776
 
777
static inline int
778
now_it_compatible (int cond)
779
{
780
  return (cond & ~1) == (now_it.cc & ~1);
781
}
782
 
783
static inline int
784
conditional_insn (void)
785
{
786
  return inst.cond != COND_ALWAYS;
787
}
788
 
789
static int in_it_block (void);
790
 
791
static int handle_it_state (void);
792
 
793
static void force_automatic_it_block_close (void);
794
 
795
static void it_fsm_post_encode (void);
796
 
797
#define set_it_insn_type(type)                  \
798
  do                                            \
799
    {                                           \
800
      inst.it_insn_type = type;                 \
801
      if (handle_it_state () == FAIL)           \
802
        return;                                 \
803
    }                                           \
804
  while (0)
805
 
806
#define set_it_insn_type_nonvoid(type, failret) \
807
  do                                            \
808
    {                                           \
809
      inst.it_insn_type = type;                 \
810
      if (handle_it_state () == FAIL)           \
811
        return failret;                         \
812
    }                                           \
813
  while(0)
814
 
815
#define set_it_insn_type_last()                         \
816
  do                                                    \
817
    {                                                   \
818
      if (inst.cond == COND_ALWAYS)                     \
819
        set_it_insn_type (IF_INSIDE_IT_LAST_INSN);      \
820
      else                                              \
821
        set_it_insn_type (INSIDE_IT_LAST_INSN);         \
822
    }                                                   \
823
  while (0)
824
 
825
/* Pure syntax.  */
826
 
827
/* This array holds the chars that always start a comment.  If the
828
   pre-processor is disabled, these aren't very useful.  */
829
const char comment_chars[] = "@";
830
 
831
/* This array holds the chars that only start a comment at the beginning of
832
   a line.  If the line seems to have the form '# 123 filename'
833
   .line and .file directives will appear in the pre-processed output.  */
834
/* Note that input_file.c hand checks for '#' at the beginning of the
835
   first line of the input file.  This is because the compiler outputs
836
   #NO_APP at the beginning of its output.  */
837
/* Also note that comments like this one will always work.  */
838
const char line_comment_chars[] = "#";
839
 
840
const char line_separator_chars[] = ";";
841
 
842
/* Chars that can be used to separate mant
843
   from exp in floating point numbers.  */
844
const char EXP_CHARS[] = "eE";
845
 
846
/* Chars that mean this number is a floating point constant.  */
847
/* As in 0f12.456  */
848
/* or    0d1.2345e12  */
849
 
850
const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
851
 
852
/* Prefix characters that indicate the start of an immediate
853
   value.  */
854
#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
855
 
856
/* Separator character handling.  */
857
 
858
#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
859
 
860
static inline int
861
skip_past_char (char ** str, char c)
862
{
863
  if (**str == c)
864
    {
865
      (*str)++;
866
      return SUCCESS;
867
    }
868
  else
869
    return FAIL;
870
}
871
 
872
#define skip_past_comma(str) skip_past_char (str, ',')
873
 
874
/* Arithmetic expressions (possibly involving symbols).  */
875
 
876
/* Return TRUE if anything in the expression is a bignum.  */
877
 
878
static int
879
walk_no_bignums (symbolS * sp)
880
{
881
  if (symbol_get_value_expression (sp)->X_op == O_big)
882
    return 1;
883
 
884
  if (symbol_get_value_expression (sp)->X_add_symbol)
885
    {
886
      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
887
              || (symbol_get_value_expression (sp)->X_op_symbol
888
                  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
889
    }
890
 
891
  return 0;
892
}
893
 
894
static int in_my_get_expression = 0;
895
 
896
/* Third argument to my_get_expression.  */
897
#define GE_NO_PREFIX 0
898
#define GE_IMM_PREFIX 1
899
#define GE_OPT_PREFIX 2
900
/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
901
   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
902
#define GE_OPT_PREFIX_BIG 3
903
 
904
static int
905
my_get_expression (expressionS * ep, char ** str, int prefix_mode)
906
{
907
  char * save_in;
908
  segT   seg;
909
 
910
  /* In unified syntax, all prefixes are optional.  */
911
  if (unified_syntax)
912
    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
913
                  : GE_OPT_PREFIX;
914
 
915
  switch (prefix_mode)
916
    {
917
    case GE_NO_PREFIX: break;
918
    case GE_IMM_PREFIX:
919
      if (!is_immediate_prefix (**str))
920
        {
921
          inst.error = _("immediate expression requires a # prefix");
922
          return FAIL;
923
        }
924
      (*str)++;
925
      break;
926
    case GE_OPT_PREFIX:
927
    case GE_OPT_PREFIX_BIG:
928
      if (is_immediate_prefix (**str))
929
        (*str)++;
930
      break;
931
    default: abort ();
932
    }
933
 
934
  memset (ep, 0, sizeof (expressionS));
935
 
936
  save_in = input_line_pointer;
937
  input_line_pointer = *str;
938
  in_my_get_expression = 1;
939
  seg = expression (ep);
940
  in_my_get_expression = 0;
941
 
942
  if (ep->X_op == O_illegal || ep->X_op == O_absent)
943
    {
944
      /* We found a bad or missing expression in md_operand().  */
945
      *str = input_line_pointer;
946
      input_line_pointer = save_in;
947
      if (inst.error == NULL)
948
        inst.error = (ep->X_op == O_absent
949
                      ? _("missing expression") :_("bad expression"));
950
      return 1;
951
    }
952
 
953
#ifdef OBJ_AOUT
954
  if (seg != absolute_section
955
      && seg != text_section
956
      && seg != data_section
957
      && seg != bss_section
958
      && seg != undefined_section)
959
    {
960
      inst.error = _("bad segment");
961
      *str = input_line_pointer;
962
      input_line_pointer = save_in;
963
      return 1;
964
    }
965
#else
966
  (void) seg;
967
#endif
968
 
969
  /* Get rid of any bignums now, so that we don't generate an error for which
970
     we can't establish a line number later on.  Big numbers are never valid
971
     in instructions, which is where this routine is always called.  */
972
  if (prefix_mode != GE_OPT_PREFIX_BIG
973
      && (ep->X_op == O_big
974
          || (ep->X_add_symbol
975
              && (walk_no_bignums (ep->X_add_symbol)
976
                  || (ep->X_op_symbol
977
                      && walk_no_bignums (ep->X_op_symbol))))))
978
    {
979
      inst.error = _("invalid constant");
980
      *str = input_line_pointer;
981
      input_line_pointer = save_in;
982
      return 1;
983
    }
984
 
985
  *str = input_line_pointer;
986
  input_line_pointer = save_in;
987
  return 0;
988
}
989
 
990
/* Turn a string in input_line_pointer into a floating point constant
991
   of type TYPE, and store the appropriate bytes in *LITP.  The number
992
   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
993
   returned, or NULL on OK.
994
 
995
   Note that fp constants aren't represent in the normal way on the ARM.
996
   In big endian mode, things are as expected.  However, in little endian
997
   mode fp constants are big-endian word-wise, and little-endian byte-wise
998
   within the words.  For example, (double) 1.1 in big endian mode is
999
   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1000
   the byte sequence 99 99 f1 3f 9a 99 99 99.
1001
 
1002
   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1003
 
1004
char *
1005
md_atof (int type, char * litP, int * sizeP)
1006
{
1007
  int prec;
1008
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1009
  char *t;
1010
  int i;
1011
 
1012
  switch (type)
1013
    {
1014
    case 'f':
1015
    case 'F':
1016
    case 's':
1017
    case 'S':
1018
      prec = 2;
1019
      break;
1020
 
1021
    case 'd':
1022
    case 'D':
1023
    case 'r':
1024
    case 'R':
1025
      prec = 4;
1026
      break;
1027
 
1028
    case 'x':
1029
    case 'X':
1030
      prec = 5;
1031
      break;
1032
 
1033
    case 'p':
1034
    case 'P':
1035
      prec = 5;
1036
      break;
1037
 
1038
    default:
1039
      *sizeP = 0;
1040
      return _("Unrecognized or unsupported floating point constant");
1041
    }
1042
 
1043
  t = atof_ieee (input_line_pointer, type, words);
1044
  if (t)
1045
    input_line_pointer = t;
1046
  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1047
 
1048
  if (target_big_endian)
1049
    {
1050
      for (i = 0; i < prec; i++)
1051
        {
1052
          md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1053
          litP += sizeof (LITTLENUM_TYPE);
1054
        }
1055
    }
1056
  else
1057
    {
1058
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1059
        for (i = prec - 1; i >= 0; i--)
1060
          {
1061
            md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1062
            litP += sizeof (LITTLENUM_TYPE);
1063
          }
1064
      else
1065
        /* For a 4 byte float the order of elements in `words' is 1 0.
1066
           For an 8 byte float the order is 1 0 3 2.  */
1067
        for (i = 0; i < prec; i += 2)
1068
          {
1069
            md_number_to_chars (litP, (valueT) words[i + 1],
1070
                                sizeof (LITTLENUM_TYPE));
1071
            md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1072
                                (valueT) words[i], sizeof (LITTLENUM_TYPE));
1073
            litP += 2 * sizeof (LITTLENUM_TYPE);
1074
          }
1075
    }
1076
 
1077
  return NULL;
1078
}
1079
 
1080
/* We handle all bad expressions here, so that we can report the faulty
1081
   instruction in the error message.  */
1082
void
1083
md_operand (expressionS * exp)
1084
{
1085
  if (in_my_get_expression)
1086
    exp->X_op = O_illegal;
1087
}
1088
 
1089
/* Immediate values.  */
1090
 
1091
/* Generic immediate-value read function for use in directives.
1092
   Accepts anything that 'expression' can fold to a constant.
1093
   *val receives the number.  */
1094
#ifdef OBJ_ELF
1095
static int
1096
immediate_for_directive (int *val)
1097
{
1098
  expressionS exp;
1099
  exp.X_op = O_illegal;
1100
 
1101
  if (is_immediate_prefix (*input_line_pointer))
1102
    {
1103
      input_line_pointer++;
1104
      expression (&exp);
1105
    }
1106
 
1107
  if (exp.X_op != O_constant)
1108
    {
1109
      as_bad (_("expected #constant"));
1110
      ignore_rest_of_line ();
1111
      return FAIL;
1112
    }
1113
  *val = exp.X_add_number;
1114
  return SUCCESS;
1115
}
1116
#endif
1117
 
1118
/* Register parsing.  */
1119
 
1120
/* Generic register parser.  CCP points to what should be the
1121
   beginning of a register name.  If it is indeed a valid register
1122
   name, advance CCP over it and return the reg_entry structure;
1123
   otherwise return NULL.  Does not issue diagnostics.  */
1124
 
1125
static struct reg_entry *
1126
arm_reg_parse_multi (char **ccp)
1127
{
1128
  char *start = *ccp;
1129
  char *p;
1130
  struct reg_entry *reg;
1131
 
1132
#ifdef REGISTER_PREFIX
1133
  if (*start != REGISTER_PREFIX)
1134
    return NULL;
1135
  start++;
1136
#endif
1137
#ifdef OPTIONAL_REGISTER_PREFIX
1138
  if (*start == OPTIONAL_REGISTER_PREFIX)
1139
    start++;
1140
#endif
1141
 
1142
  p = start;
1143
  if (!ISALPHA (*p) || !is_name_beginner (*p))
1144
    return NULL;
1145
 
1146
  do
1147
    p++;
1148
  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1149
 
1150
  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1151
 
1152
  if (!reg)
1153
    return NULL;
1154
 
1155
  *ccp = p;
1156
  return reg;
1157
}
1158
 
1159
static int
1160
arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1161
                    enum arm_reg_type type)
1162
{
1163
  /* Alternative syntaxes are accepted for a few register classes.  */
1164
  switch (type)
1165
    {
1166
    case REG_TYPE_MVF:
1167
    case REG_TYPE_MVD:
1168
    case REG_TYPE_MVFX:
1169
    case REG_TYPE_MVDX:
1170
      /* Generic coprocessor register names are allowed for these.  */
1171
      if (reg && reg->type == REG_TYPE_CN)
1172
        return reg->number;
1173
      break;
1174
 
1175
    case REG_TYPE_CP:
1176
      /* For backward compatibility, a bare number is valid here.  */
1177
      {
1178
        unsigned long processor = strtoul (start, ccp, 10);
1179
        if (*ccp != start && processor <= 15)
1180
          return processor;
1181
      }
1182
 
1183
    case REG_TYPE_MMXWC:
1184
      /* WC includes WCG.  ??? I'm not sure this is true for all
1185
         instructions that take WC registers.  */
1186
      if (reg && reg->type == REG_TYPE_MMXWCG)
1187
        return reg->number;
1188
      break;
1189
 
1190
    default:
1191
      break;
1192
    }
1193
 
1194
  return FAIL;
1195
}
1196
 
1197
/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1198
   return value is the register number or FAIL.  */
1199
 
1200
static int
1201
arm_reg_parse (char **ccp, enum arm_reg_type type)
1202
{
1203
  char *start = *ccp;
1204
  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1205
  int ret;
1206
 
1207
  /* Do not allow a scalar (reg+index) to parse as a register.  */
1208
  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1209
    return FAIL;
1210
 
1211
  if (reg && reg->type == type)
1212
    return reg->number;
1213
 
1214
  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1215
    return ret;
1216
 
1217
  *ccp = start;
1218
  return FAIL;
1219
}
1220
 
1221
/* Parse a Neon type specifier. *STR should point at the leading '.'
1222
   character. Does no verification at this stage that the type fits the opcode
1223
   properly. E.g.,
1224
 
1225
     .i32.i32.s16
1226
     .s32.f32
1227
     .u16
1228
 
1229
   Can all be legally parsed by this function.
1230
 
1231
   Fills in neon_type struct pointer with parsed information, and updates STR
1232
   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1233
   type, FAIL if not.  */
1234
 
1235
static int
1236
parse_neon_type (struct neon_type *type, char **str)
1237
{
1238
  char *ptr = *str;
1239
 
1240
  if (type)
1241
    type->elems = 0;
1242
 
1243
  while (type->elems < NEON_MAX_TYPE_ELS)
1244
    {
1245
      enum neon_el_type thistype = NT_untyped;
1246
      unsigned thissize = -1u;
1247
 
1248
      if (*ptr != '.')
1249
        break;
1250
 
1251
      ptr++;
1252
 
1253
      /* Just a size without an explicit type.  */
1254
      if (ISDIGIT (*ptr))
1255
        goto parsesize;
1256
 
1257
      switch (TOLOWER (*ptr))
1258
        {
1259
        case 'i': thistype = NT_integer; break;
1260
        case 'f': thistype = NT_float; break;
1261
        case 'p': thistype = NT_poly; break;
1262
        case 's': thistype = NT_signed; break;
1263
        case 'u': thistype = NT_unsigned; break;
1264
        case 'd':
1265
          thistype = NT_float;
1266
          thissize = 64;
1267
          ptr++;
1268
          goto done;
1269
        default:
1270
          as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1271
          return FAIL;
1272
        }
1273
 
1274
      ptr++;
1275
 
1276
      /* .f is an abbreviation for .f32.  */
1277
      if (thistype == NT_float && !ISDIGIT (*ptr))
1278
        thissize = 32;
1279
      else
1280
        {
1281
        parsesize:
1282
          thissize = strtoul (ptr, &ptr, 10);
1283
 
1284
          if (thissize != 8 && thissize != 16 && thissize != 32
1285
              && thissize != 64)
1286
            {
1287
              as_bad (_("bad size %d in type specifier"), thissize);
1288
              return FAIL;
1289
            }
1290
        }
1291
 
1292
      done:
1293
      if (type)
1294
        {
1295
          type->el[type->elems].type = thistype;
1296
          type->el[type->elems].size = thissize;
1297
          type->elems++;
1298
        }
1299
    }
1300
 
1301
  /* Empty/missing type is not a successful parse.  */
1302
  if (type->elems == 0)
1303
    return FAIL;
1304
 
1305
  *str = ptr;
1306
 
1307
  return SUCCESS;
1308
}
1309
 
1310
/* Errors may be set multiple times during parsing or bit encoding
1311
   (particularly in the Neon bits), but usually the earliest error which is set
1312
   will be the most meaningful. Avoid overwriting it with later (cascading)
1313
   errors by calling this function.  */
1314
 
1315
static void
1316
first_error (const char *err)
1317
{
1318
  if (!inst.error)
1319
    inst.error = err;
1320
}
1321
 
1322
/* Parse a single type, e.g. ".s32", leading period included.  */
1323
static int
1324
parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1325
{
1326
  char *str = *ccp;
1327
  struct neon_type optype;
1328
 
1329
  if (*str == '.')
1330
    {
1331
      if (parse_neon_type (&optype, &str) == SUCCESS)
1332
        {
1333
          if (optype.elems == 1)
1334
            *vectype = optype.el[0];
1335
          else
1336
            {
1337
              first_error (_("only one type should be specified for operand"));
1338
              return FAIL;
1339
            }
1340
        }
1341
      else
1342
        {
1343
          first_error (_("vector type expected"));
1344
          return FAIL;
1345
        }
1346
    }
1347
  else
1348
    return FAIL;
1349
 
1350
  *ccp = str;
1351
 
1352
  return SUCCESS;
1353
}
1354
 
1355
/* Special meanings for indices (which have a range of 0-7), which will fit into
1356
   a 4-bit integer.  */
1357
 
1358
#define NEON_ALL_LANES          15
1359
#define NEON_INTERLEAVE_LANES   14
1360
 
1361
/* Parse either a register or a scalar, with an optional type. Return the
1362
   register number, and optionally fill in the actual type of the register
1363
   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1364
   type/index information in *TYPEINFO.  */
1365
 
1366
static int
1367
parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1368
                           enum arm_reg_type *rtype,
1369
                           struct neon_typed_alias *typeinfo)
1370
{
1371
  char *str = *ccp;
1372
  struct reg_entry *reg = arm_reg_parse_multi (&str);
1373
  struct neon_typed_alias atype;
1374
  struct neon_type_el parsetype;
1375
 
1376
  atype.defined = 0;
1377
  atype.index = -1;
1378
  atype.eltype.type = NT_invtype;
1379
  atype.eltype.size = -1;
1380
 
1381
  /* Try alternate syntax for some types of register. Note these are mutually
1382
     exclusive with the Neon syntax extensions.  */
1383
  if (reg == NULL)
1384
    {
1385
      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1386
      if (altreg != FAIL)
1387
        *ccp = str;
1388
      if (typeinfo)
1389
        *typeinfo = atype;
1390
      return altreg;
1391
    }
1392
 
1393
  /* Undo polymorphism when a set of register types may be accepted.  */
1394
  if ((type == REG_TYPE_NDQ
1395
       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1396
      || (type == REG_TYPE_VFSD
1397
          && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1398
      || (type == REG_TYPE_NSDQ
1399
          && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1400
              || reg->type == REG_TYPE_NQ))
1401
      || (type == REG_TYPE_MMXWC
1402
          && (reg->type == REG_TYPE_MMXWCG)))
1403
    type = (enum arm_reg_type) reg->type;
1404
 
1405
  if (type != reg->type)
1406
    return FAIL;
1407
 
1408
  if (reg->neon)
1409
    atype = *reg->neon;
1410
 
1411
  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1412
    {
1413
      if ((atype.defined & NTA_HASTYPE) != 0)
1414
        {
1415
          first_error (_("can't redefine type for operand"));
1416
          return FAIL;
1417
        }
1418
      atype.defined |= NTA_HASTYPE;
1419
      atype.eltype = parsetype;
1420
    }
1421
 
1422
  if (skip_past_char (&str, '[') == SUCCESS)
1423
    {
1424
      if (type != REG_TYPE_VFD)
1425
        {
1426
          first_error (_("only D registers may be indexed"));
1427
          return FAIL;
1428
        }
1429
 
1430
      if ((atype.defined & NTA_HASINDEX) != 0)
1431
        {
1432
          first_error (_("can't change index for operand"));
1433
          return FAIL;
1434
        }
1435
 
1436
      atype.defined |= NTA_HASINDEX;
1437
 
1438
      if (skip_past_char (&str, ']') == SUCCESS)
1439
        atype.index = NEON_ALL_LANES;
1440
      else
1441
        {
1442
          expressionS exp;
1443
 
1444
          my_get_expression (&exp, &str, GE_NO_PREFIX);
1445
 
1446
          if (exp.X_op != O_constant)
1447
            {
1448
              first_error (_("constant expression required"));
1449
              return FAIL;
1450
            }
1451
 
1452
          if (skip_past_char (&str, ']') == FAIL)
1453
            return FAIL;
1454
 
1455
          atype.index = exp.X_add_number;
1456
        }
1457
    }
1458
 
1459
  if (typeinfo)
1460
    *typeinfo = atype;
1461
 
1462
  if (rtype)
1463
    *rtype = type;
1464
 
1465
  *ccp = str;
1466
 
1467
  return reg->number;
1468
}
1469
 
1470
/* Like arm_reg_parse, but allow allow the following extra features:
1471
    - If RTYPE is non-zero, return the (possibly restricted) type of the
1472
      register (e.g. Neon double or quad reg when either has been requested).
1473
    - If this is a Neon vector type with additional type information, fill
1474
      in the struct pointed to by VECTYPE (if non-NULL).
1475
   This function will fault on encountering a scalar.  */
1476
 
1477
static int
1478
arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1479
                     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1480
{
1481
  struct neon_typed_alias atype;
1482
  char *str = *ccp;
1483
  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1484
 
1485
  if (reg == FAIL)
1486
    return FAIL;
1487
 
1488
  /* Do not allow regname(... to parse as a register.  */
1489
  if (*str == '(')
1490
    return FAIL;
1491
 
1492
  /* Do not allow a scalar (reg+index) to parse as a register.  */
1493
  if ((atype.defined & NTA_HASINDEX) != 0)
1494
    {
1495
      first_error (_("register operand expected, but got scalar"));
1496
      return FAIL;
1497
    }
1498
 
1499
  if (vectype)
1500
    *vectype = atype.eltype;
1501
 
1502
  *ccp = str;
1503
 
1504
  return reg;
1505
}
1506
 
1507
#define NEON_SCALAR_REG(X)      ((X) >> 4)
1508
#define NEON_SCALAR_INDEX(X)    ((X) & 15)
1509
 
1510
/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1511
   have enough information to be able to do a good job bounds-checking. So, we
1512
   just do easy checks here, and do further checks later.  */
1513
 
1514
static int
1515
parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1516
{
1517
  int reg;
1518
  char *str = *ccp;
1519
  struct neon_typed_alias atype;
1520
 
1521
  reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1522
 
1523
  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1524
    return FAIL;
1525
 
1526
  if (atype.index == NEON_ALL_LANES)
1527
    {
1528
      first_error (_("scalar must have an index"));
1529
      return FAIL;
1530
    }
1531
  else if (atype.index >= 64 / elsize)
1532
    {
1533
      first_error (_("scalar index out of range"));
1534
      return FAIL;
1535
    }
1536
 
1537
  if (type)
1538
    *type = atype.eltype;
1539
 
1540
  *ccp = str;
1541
 
1542
  return reg * 16 + atype.index;
1543
}
1544
 
1545
/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1546
 
1547
static long
1548
parse_reg_list (char ** strp)
1549
{
1550
  char * str = * strp;
1551
  long   range = 0;
1552
  int    another_range;
1553
 
1554
  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1555
  do
1556
    {
1557
      another_range = 0;
1558
 
1559
      if (*str == '{')
1560
        {
1561
          int in_range = 0;
1562
          int cur_reg = -1;
1563
 
1564
          str++;
1565
          do
1566
            {
1567
              int reg;
1568
 
1569
              if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1570
                {
1571
                  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1572
                  return FAIL;
1573
                }
1574
 
1575
              if (in_range)
1576
                {
1577
                  int i;
1578
 
1579
                  if (reg <= cur_reg)
1580
                    {
1581
                      first_error (_("bad range in register list"));
1582
                      return FAIL;
1583
                    }
1584
 
1585
                  for (i = cur_reg + 1; i < reg; i++)
1586
                    {
1587
                      if (range & (1 << i))
1588
                        as_tsktsk
1589
                          (_("Warning: duplicated register (r%d) in register list"),
1590
                           i);
1591
                      else
1592
                        range |= 1 << i;
1593
                    }
1594
                  in_range = 0;
1595
                }
1596
 
1597
              if (range & (1 << reg))
1598
                as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1599
                           reg);
1600
              else if (reg <= cur_reg)
1601
                as_tsktsk (_("Warning: register range not in ascending order"));
1602
 
1603
              range |= 1 << reg;
1604
              cur_reg = reg;
1605
            }
1606
          while (skip_past_comma (&str) != FAIL
1607
                 || (in_range = 1, *str++ == '-'));
1608
          str--;
1609
 
1610
          if (*str++ != '}')
1611
            {
1612
              first_error (_("missing `}'"));
1613
              return FAIL;
1614
            }
1615
        }
1616
      else
1617
        {
1618
          expressionS exp;
1619
 
1620
          if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1621
            return FAIL;
1622
 
1623
          if (exp.X_op == O_constant)
1624
            {
1625
              if (exp.X_add_number
1626
                  != (exp.X_add_number & 0x0000ffff))
1627
                {
1628
                  inst.error = _("invalid register mask");
1629
                  return FAIL;
1630
                }
1631
 
1632
              if ((range & exp.X_add_number) != 0)
1633
                {
1634
                  int regno = range & exp.X_add_number;
1635
 
1636
                  regno &= -regno;
1637
                  regno = (1 << regno) - 1;
1638
                  as_tsktsk
1639
                    (_("Warning: duplicated register (r%d) in register list"),
1640
                     regno);
1641
                }
1642
 
1643
              range |= exp.X_add_number;
1644
            }
1645
          else
1646
            {
1647
              if (inst.reloc.type != 0)
1648
                {
1649
                  inst.error = _("expression too complex");
1650
                  return FAIL;
1651
                }
1652
 
1653
              memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1654
              inst.reloc.type = BFD_RELOC_ARM_MULTI;
1655
              inst.reloc.pc_rel = 0;
1656
            }
1657
        }
1658
 
1659
      if (*str == '|' || *str == '+')
1660
        {
1661
          str++;
1662
          another_range = 1;
1663
        }
1664
    }
1665
  while (another_range);
1666
 
1667
  *strp = str;
1668
  return range;
1669
}
1670
 
1671
/* Types of registers in a list.  */
1672
 
1673
enum reg_list_els
1674
{
1675
  REGLIST_VFP_S,
1676
  REGLIST_VFP_D,
1677
  REGLIST_NEON_D
1678
};
1679
 
1680
/* Parse a VFP register list.  If the string is invalid return FAIL.
1681
   Otherwise return the number of registers, and set PBASE to the first
1682
   register.  Parses registers of type ETYPE.
1683
   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1684
     - Q registers can be used to specify pairs of D registers
1685
     - { } can be omitted from around a singleton register list
1686
         FIXME: This is not implemented, as it would require backtracking in
1687
         some cases, e.g.:
1688
           vtbl.8 d3,d4,d5
1689
         This could be done (the meaning isn't really ambiguous), but doesn't
1690
         fit in well with the current parsing framework.
1691
     - 32 D registers may be used (also true for VFPv3).
1692
   FIXME: Types are ignored in these register lists, which is probably a
1693
   bug.  */
1694
 
1695
static int
1696
parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1697
{
1698
  char *str = *ccp;
1699
  int base_reg;
1700
  int new_base;
1701
  enum arm_reg_type regtype = (enum arm_reg_type) 0;
1702
  int max_regs = 0;
1703
  int count = 0;
1704
  int warned = 0;
1705
  unsigned long mask = 0;
1706
  int i;
1707
 
1708
  if (*str != '{')
1709
    {
1710
      inst.error = _("expecting {");
1711
      return FAIL;
1712
    }
1713
 
1714
  str++;
1715
 
1716
  switch (etype)
1717
    {
1718
    case REGLIST_VFP_S:
1719
      regtype = REG_TYPE_VFS;
1720
      max_regs = 32;
1721
      break;
1722
 
1723
    case REGLIST_VFP_D:
1724
      regtype = REG_TYPE_VFD;
1725
      break;
1726
 
1727
    case REGLIST_NEON_D:
1728
      regtype = REG_TYPE_NDQ;
1729
      break;
1730
    }
1731
 
1732
  if (etype != REGLIST_VFP_S)
1733
    {
1734
      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1735
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1736
        {
1737
          max_regs = 32;
1738
          if (thumb_mode)
1739
            ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1740
                                    fpu_vfp_ext_d32);
1741
          else
1742
            ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1743
                                    fpu_vfp_ext_d32);
1744
        }
1745
      else
1746
        max_regs = 16;
1747
    }
1748
 
1749
  base_reg = max_regs;
1750
 
1751
  do
1752
    {
1753
      int setmask = 1, addregs = 1;
1754
 
1755
      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1756
 
1757
      if (new_base == FAIL)
1758
        {
1759
          first_error (_(reg_expected_msgs[regtype]));
1760
          return FAIL;
1761
        }
1762
 
1763
      if (new_base >= max_regs)
1764
        {
1765
          first_error (_("register out of range in list"));
1766
          return FAIL;
1767
        }
1768
 
1769
      /* Note: a value of 2 * n is returned for the register Q<n>.  */
1770
      if (regtype == REG_TYPE_NQ)
1771
        {
1772
          setmask = 3;
1773
          addregs = 2;
1774
        }
1775
 
1776
      if (new_base < base_reg)
1777
        base_reg = new_base;
1778
 
1779
      if (mask & (setmask << new_base))
1780
        {
1781
          first_error (_("invalid register list"));
1782
          return FAIL;
1783
        }
1784
 
1785
      if ((mask >> new_base) != 0 && ! warned)
1786
        {
1787
          as_tsktsk (_("register list not in ascending order"));
1788
          warned = 1;
1789
        }
1790
 
1791
      mask |= setmask << new_base;
1792
      count += addregs;
1793
 
1794
      if (*str == '-') /* We have the start of a range expression */
1795
        {
1796
          int high_range;
1797
 
1798
          str++;
1799
 
1800
          if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1801
              == FAIL)
1802
            {
1803
              inst.error = gettext (reg_expected_msgs[regtype]);
1804
              return FAIL;
1805
            }
1806
 
1807
          if (high_range >= max_regs)
1808
            {
1809
              first_error (_("register out of range in list"));
1810
              return FAIL;
1811
            }
1812
 
1813
          if (regtype == REG_TYPE_NQ)
1814
            high_range = high_range + 1;
1815
 
1816
          if (high_range <= new_base)
1817
            {
1818
              inst.error = _("register range not in ascending order");
1819
              return FAIL;
1820
            }
1821
 
1822
          for (new_base += addregs; new_base <= high_range; new_base += addregs)
1823
            {
1824
              if (mask & (setmask << new_base))
1825
                {
1826
                  inst.error = _("invalid register list");
1827
                  return FAIL;
1828
                }
1829
 
1830
              mask |= setmask << new_base;
1831
              count += addregs;
1832
            }
1833
        }
1834
    }
1835
  while (skip_past_comma (&str) != FAIL);
1836
 
1837
  str++;
1838
 
1839
  /* Sanity check -- should have raised a parse error above.  */
1840
  if (count == 0 || count > max_regs)
1841
    abort ();
1842
 
1843
  *pbase = base_reg;
1844
 
1845
  /* Final test -- the registers must be consecutive.  */
1846
  mask >>= base_reg;
1847
  for (i = 0; i < count; i++)
1848
    {
1849
      if ((mask & (1u << i)) == 0)
1850
        {
1851
          inst.error = _("non-contiguous register range");
1852
          return FAIL;
1853
        }
1854
    }
1855
 
1856
  *ccp = str;
1857
 
1858
  return count;
1859
}
1860
 
1861
/* True if two alias types are the same.  */
1862
 
1863
static bfd_boolean
1864
neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1865
{
1866
  if (!a && !b)
1867
    return TRUE;
1868
 
1869
  if (!a || !b)
1870
    return FALSE;
1871
 
1872
  if (a->defined != b->defined)
1873
    return FALSE;
1874
 
1875
  if ((a->defined & NTA_HASTYPE) != 0
1876
      && (a->eltype.type != b->eltype.type
1877
          || a->eltype.size != b->eltype.size))
1878
    return FALSE;
1879
 
1880
  if ((a->defined & NTA_HASINDEX) != 0
1881
      && (a->index != b->index))
1882
    return FALSE;
1883
 
1884
  return TRUE;
1885
}
1886
 
1887
/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1888
   The base register is put in *PBASE.
1889
   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1890
   the return value.
1891
   The register stride (minus one) is put in bit 4 of the return value.
1892
   Bits [6:5] encode the list length (minus one).
1893
   The type of the list elements is put in *ELTYPE, if non-NULL.  */
1894
 
1895
#define NEON_LANE(X)            ((X) & 0xf)
1896
#define NEON_REG_STRIDE(X)      ((((X) >> 4) & 1) + 1)
1897
#define NEON_REGLIST_LENGTH(X)  ((((X) >> 5) & 3) + 1)
1898
 
1899
static int
1900
parse_neon_el_struct_list (char **str, unsigned *pbase,
1901
                           struct neon_type_el *eltype)
1902
{
1903
  char *ptr = *str;
1904
  int base_reg = -1;
1905
  int reg_incr = -1;
1906
  int count = 0;
1907
  int lane = -1;
1908
  int leading_brace = 0;
1909
  enum arm_reg_type rtype = REG_TYPE_NDQ;
1910
  const char *const incr_error = _("register stride must be 1 or 2");
1911
  const char *const type_error = _("mismatched element/structure types in list");
1912
  struct neon_typed_alias firsttype;
1913
 
1914
  if (skip_past_char (&ptr, '{') == SUCCESS)
1915
    leading_brace = 1;
1916
 
1917
  do
1918
    {
1919
      struct neon_typed_alias atype;
1920
      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1921
 
1922
      if (getreg == FAIL)
1923
        {
1924
          first_error (_(reg_expected_msgs[rtype]));
1925
          return FAIL;
1926
        }
1927
 
1928
      if (base_reg == -1)
1929
        {
1930
          base_reg = getreg;
1931
          if (rtype == REG_TYPE_NQ)
1932
            {
1933
              reg_incr = 1;
1934
            }
1935
          firsttype = atype;
1936
        }
1937
      else if (reg_incr == -1)
1938
        {
1939
          reg_incr = getreg - base_reg;
1940
          if (reg_incr < 1 || reg_incr > 2)
1941
            {
1942
              first_error (_(incr_error));
1943
              return FAIL;
1944
            }
1945
        }
1946
      else if (getreg != base_reg + reg_incr * count)
1947
        {
1948
          first_error (_(incr_error));
1949
          return FAIL;
1950
        }
1951
 
1952
      if (! neon_alias_types_same (&atype, &firsttype))
1953
        {
1954
          first_error (_(type_error));
1955
          return FAIL;
1956
        }
1957
 
1958
      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1959
         modes.  */
1960
      if (ptr[0] == '-')
1961
        {
1962
          struct neon_typed_alias htype;
1963
          int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1964
          if (lane == -1)
1965
            lane = NEON_INTERLEAVE_LANES;
1966
          else if (lane != NEON_INTERLEAVE_LANES)
1967
            {
1968
              first_error (_(type_error));
1969
              return FAIL;
1970
            }
1971
          if (reg_incr == -1)
1972
            reg_incr = 1;
1973
          else if (reg_incr != 1)
1974
            {
1975
              first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1976
              return FAIL;
1977
            }
1978
          ptr++;
1979
          hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1980
          if (hireg == FAIL)
1981
            {
1982
              first_error (_(reg_expected_msgs[rtype]));
1983
              return FAIL;
1984
            }
1985
          if (! neon_alias_types_same (&htype, &firsttype))
1986
            {
1987
              first_error (_(type_error));
1988
              return FAIL;
1989
            }
1990
          count += hireg + dregs - getreg;
1991
          continue;
1992
        }
1993
 
1994
      /* If we're using Q registers, we can't use [] or [n] syntax.  */
1995
      if (rtype == REG_TYPE_NQ)
1996
        {
1997
          count += 2;
1998
          continue;
1999
        }
2000
 
2001
      if ((atype.defined & NTA_HASINDEX) != 0)
2002
        {
2003
          if (lane == -1)
2004
            lane = atype.index;
2005
          else if (lane != atype.index)
2006
            {
2007
              first_error (_(type_error));
2008
              return FAIL;
2009
            }
2010
        }
2011
      else if (lane == -1)
2012
        lane = NEON_INTERLEAVE_LANES;
2013
      else if (lane != NEON_INTERLEAVE_LANES)
2014
        {
2015
          first_error (_(type_error));
2016
          return FAIL;
2017
        }
2018
      count++;
2019
    }
2020
  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2021
 
2022
  /* No lane set by [x]. We must be interleaving structures.  */
2023
  if (lane == -1)
2024
    lane = NEON_INTERLEAVE_LANES;
2025
 
2026
  /* Sanity check.  */
2027
  if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2028
      || (count > 1 && reg_incr == -1))
2029
    {
2030
      first_error (_("error parsing element/structure list"));
2031
      return FAIL;
2032
    }
2033
 
2034
  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2035
    {
2036
      first_error (_("expected }"));
2037
      return FAIL;
2038
    }
2039
 
2040
  if (reg_incr == -1)
2041
    reg_incr = 1;
2042
 
2043
  if (eltype)
2044
    *eltype = firsttype.eltype;
2045
 
2046
  *pbase = base_reg;
2047
  *str = ptr;
2048
 
2049
  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2050
}
2051
 
2052
/* Parse an explicit relocation suffix on an expression.  This is
2053
   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2054
   arm_reloc_hsh contains no entries, so this function can only
2055
   succeed if there is no () after the word.  Returns -1 on error,
2056
   BFD_RELOC_UNUSED if there wasn't any suffix.  */
2057
static int
2058
parse_reloc (char **str)
2059
{
2060
  struct reloc_entry *r;
2061
  char *p, *q;
2062
 
2063
  if (**str != '(')
2064
    return BFD_RELOC_UNUSED;
2065
 
2066
  p = *str + 1;
2067
  q = p;
2068
 
2069
  while (*q && *q != ')' && *q != ',')
2070
    q++;
2071
  if (*q != ')')
2072
    return -1;
2073
 
2074
  if ((r = (struct reloc_entry *)
2075
       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2076
    return -1;
2077
 
2078
  *str = q + 1;
2079
  return r->reloc;
2080
}
2081
 
2082
/* Directives: register aliases.  */
2083
 
2084
static struct reg_entry *
2085
insert_reg_alias (char *str, unsigned number, int type)
2086
{
2087
  struct reg_entry *new_reg;
2088
  const char *name;
2089
 
2090
  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2091
    {
2092
      if (new_reg->builtin)
2093
        as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2094
 
2095
      /* Only warn about a redefinition if it's not defined as the
2096
         same register.  */
2097
      else if (new_reg->number != number || new_reg->type != type)
2098
        as_warn (_("ignoring redefinition of register alias '%s'"), str);
2099
 
2100
      return NULL;
2101
    }
2102
 
2103
  name = xstrdup (str);
2104
  new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2105
 
2106
  new_reg->name = name;
2107
  new_reg->number = number;
2108
  new_reg->type = type;
2109
  new_reg->builtin = FALSE;
2110
  new_reg->neon = NULL;
2111
 
2112
  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2113
    abort ();
2114
 
2115
  return new_reg;
2116
}
2117
 
2118
static void
2119
insert_neon_reg_alias (char *str, int number, int type,
2120
                       struct neon_typed_alias *atype)
2121
{
2122
  struct reg_entry *reg = insert_reg_alias (str, number, type);
2123
 
2124
  if (!reg)
2125
    {
2126
      first_error (_("attempt to redefine typed alias"));
2127
      return;
2128
    }
2129
 
2130
  if (atype)
2131
    {
2132
      reg->neon = (struct neon_typed_alias *)
2133
          xmalloc (sizeof (struct neon_typed_alias));
2134
      *reg->neon = *atype;
2135
    }
2136
}
2137
 
2138
/* Look for the .req directive.  This is of the form:
2139
 
2140
        new_register_name .req existing_register_name
2141
 
2142
   If we find one, or if it looks sufficiently like one that we want to
2143
   handle any error here, return TRUE.  Otherwise return FALSE.  */
2144
 
2145
static bfd_boolean
2146
create_register_alias (char * newname, char *p)
2147
{
2148
  struct reg_entry *old;
2149
  char *oldname, *nbuf;
2150
  size_t nlen;
2151
 
2152
  /* The input scrubber ensures that whitespace after the mnemonic is
2153
     collapsed to single spaces.  */
2154
  oldname = p;
2155
  if (strncmp (oldname, " .req ", 6) != 0)
2156
    return FALSE;
2157
 
2158
  oldname += 6;
2159
  if (*oldname == '\0')
2160
    return FALSE;
2161
 
2162
  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2163
  if (!old)
2164
    {
2165
      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2166
      return TRUE;
2167
    }
2168
 
2169
  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2170
     the desired alias name, and p points to its end.  If not, then
2171
     the desired alias name is in the global original_case_string.  */
2172
#ifdef TC_CASE_SENSITIVE
2173
  nlen = p - newname;
2174
#else
2175
  newname = original_case_string;
2176
  nlen = strlen (newname);
2177
#endif
2178
 
2179
  nbuf = (char *) alloca (nlen + 1);
2180
  memcpy (nbuf, newname, nlen);
2181
  nbuf[nlen] = '\0';
2182
 
2183
  /* Create aliases under the new name as stated; an all-lowercase
2184
     version of the new name; and an all-uppercase version of the new
2185
     name.  */
2186
  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2187
    {
2188
      for (p = nbuf; *p; p++)
2189
        *p = TOUPPER (*p);
2190
 
2191
      if (strncmp (nbuf, newname, nlen))
2192
        {
2193
          /* If this attempt to create an additional alias fails, do not bother
2194
             trying to create the all-lower case alias.  We will fail and issue
2195
             a second, duplicate error message.  This situation arises when the
2196
             programmer does something like:
2197
               foo .req r0
2198
               Foo .req r1
2199
             The second .req creates the "Foo" alias but then fails to create
2200
             the artificial FOO alias because it has already been created by the
2201
             first .req.  */
2202
          if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2203
            return TRUE;
2204
        }
2205
 
2206
      for (p = nbuf; *p; p++)
2207
        *p = TOLOWER (*p);
2208
 
2209
      if (strncmp (nbuf, newname, nlen))
2210
        insert_reg_alias (nbuf, old->number, old->type);
2211
    }
2212
 
2213
  return TRUE;
2214
}
2215
 
2216
/* Create a Neon typed/indexed register alias using directives, e.g.:
2217
     X .dn d5.s32[1]
2218
     Y .qn 6.s16
2219
     Z .dn d7
2220
     T .dn Z[0]
2221
   These typed registers can be used instead of the types specified after the
2222
   Neon mnemonic, so long as all operands given have types. Types can also be
2223
   specified directly, e.g.:
2224
     vadd d0.s32, d1.s32, d2.s32  */
2225
 
2226
static bfd_boolean
2227
create_neon_reg_alias (char *newname, char *p)
2228
{
2229
  enum arm_reg_type basetype;
2230
  struct reg_entry *basereg;
2231
  struct reg_entry mybasereg;
2232
  struct neon_type ntype;
2233
  struct neon_typed_alias typeinfo;
2234
  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2235
  int namelen;
2236
 
2237
  typeinfo.defined = 0;
2238
  typeinfo.eltype.type = NT_invtype;
2239
  typeinfo.eltype.size = -1;
2240
  typeinfo.index = -1;
2241
 
2242
  nameend = p;
2243
 
2244
  if (strncmp (p, " .dn ", 5) == 0)
2245
    basetype = REG_TYPE_VFD;
2246
  else if (strncmp (p, " .qn ", 5) == 0)
2247
    basetype = REG_TYPE_NQ;
2248
  else
2249
    return FALSE;
2250
 
2251
  p += 5;
2252
 
2253
  if (*p == '\0')
2254
    return FALSE;
2255
 
2256
  basereg = arm_reg_parse_multi (&p);
2257
 
2258
  if (basereg && basereg->type != basetype)
2259
    {
2260
      as_bad (_("bad type for register"));
2261
      return FALSE;
2262
    }
2263
 
2264
  if (basereg == NULL)
2265
    {
2266
      expressionS exp;
2267
      /* Try parsing as an integer.  */
2268
      my_get_expression (&exp, &p, GE_NO_PREFIX);
2269
      if (exp.X_op != O_constant)
2270
        {
2271
          as_bad (_("expression must be constant"));
2272
          return FALSE;
2273
        }
2274
      basereg = &mybasereg;
2275
      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2276
                                                  : exp.X_add_number;
2277
      basereg->neon = 0;
2278
    }
2279
 
2280
  if (basereg->neon)
2281
    typeinfo = *basereg->neon;
2282
 
2283
  if (parse_neon_type (&ntype, &p) == SUCCESS)
2284
    {
2285
      /* We got a type.  */
2286
      if (typeinfo.defined & NTA_HASTYPE)
2287
        {
2288
          as_bad (_("can't redefine the type of a register alias"));
2289
          return FALSE;
2290
        }
2291
 
2292
      typeinfo.defined |= NTA_HASTYPE;
2293
      if (ntype.elems != 1)
2294
        {
2295
          as_bad (_("you must specify a single type only"));
2296
          return FALSE;
2297
        }
2298
      typeinfo.eltype = ntype.el[0];
2299
    }
2300
 
2301
  if (skip_past_char (&p, '[') == SUCCESS)
2302
    {
2303
      expressionS exp;
2304
      /* We got a scalar index.  */
2305
 
2306
      if (typeinfo.defined & NTA_HASINDEX)
2307
        {
2308
          as_bad (_("can't redefine the index of a scalar alias"));
2309
          return FALSE;
2310
        }
2311
 
2312
      my_get_expression (&exp, &p, GE_NO_PREFIX);
2313
 
2314
      if (exp.X_op != O_constant)
2315
        {
2316
          as_bad (_("scalar index must be constant"));
2317
          return FALSE;
2318
        }
2319
 
2320
      typeinfo.defined |= NTA_HASINDEX;
2321
      typeinfo.index = exp.X_add_number;
2322
 
2323
      if (skip_past_char (&p, ']') == FAIL)
2324
        {
2325
          as_bad (_("expecting ]"));
2326
          return FALSE;
2327
        }
2328
    }
2329
 
2330
  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2331
     the desired alias name, and p points to its end.  If not, then
2332
     the desired alias name is in the global original_case_string.  */
2333
#ifdef TC_CASE_SENSITIVE
2334
  namelen = nameend - newname;
2335
#else
2336
  newname = original_case_string;
2337
  namelen = strlen (newname);
2338
#endif
2339
 
2340
  namebuf = (char *) alloca (namelen + 1);
2341
  strncpy (namebuf, newname, namelen);
2342
  namebuf[namelen] = '\0';
2343
 
2344
  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2345
                         typeinfo.defined != 0 ? &typeinfo : NULL);
2346
 
2347
  /* Insert name in all uppercase.  */
2348
  for (p = namebuf; *p; p++)
2349
    *p = TOUPPER (*p);
2350
 
2351
  if (strncmp (namebuf, newname, namelen))
2352
    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2353
                           typeinfo.defined != 0 ? &typeinfo : NULL);
2354
 
2355
  /* Insert name in all lowercase.  */
2356
  for (p = namebuf; *p; p++)
2357
    *p = TOLOWER (*p);
2358
 
2359
  if (strncmp (namebuf, newname, namelen))
2360
    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2361
                           typeinfo.defined != 0 ? &typeinfo : NULL);
2362
 
2363
  return TRUE;
2364
}
2365
 
2366
/* Should never be called, as .req goes between the alias and the
2367
   register name, not at the beginning of the line.  */
2368
 
2369
static void
2370
s_req (int a ATTRIBUTE_UNUSED)
2371
{
2372
  as_bad (_("invalid syntax for .req directive"));
2373
}
2374
 
2375
static void
2376
s_dn (int a ATTRIBUTE_UNUSED)
2377
{
2378
  as_bad (_("invalid syntax for .dn directive"));
2379
}
2380
 
2381
static void
2382
s_qn (int a ATTRIBUTE_UNUSED)
2383
{
2384
  as_bad (_("invalid syntax for .qn directive"));
2385
}
2386
 
2387
/* The .unreq directive deletes an alias which was previously defined
2388
   by .req.  For example:
2389
 
2390
       my_alias .req r11
2391
       .unreq my_alias    */
2392
 
2393
static void
2394
s_unreq (int a ATTRIBUTE_UNUSED)
2395
{
2396
  char * name;
2397
  char saved_char;
2398
 
2399
  name = input_line_pointer;
2400
 
2401
  while (*input_line_pointer != 0
2402
         && *input_line_pointer != ' '
2403
         && *input_line_pointer != '\n')
2404
    ++input_line_pointer;
2405
 
2406
  saved_char = *input_line_pointer;
2407
  *input_line_pointer = 0;
2408
 
2409
  if (!*name)
2410
    as_bad (_("invalid syntax for .unreq directive"));
2411
  else
2412
    {
2413
      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2414
                                                              name);
2415
 
2416
      if (!reg)
2417
        as_bad (_("unknown register alias '%s'"), name);
2418
      else if (reg->builtin)
2419
        as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2420
                 name);
2421
      else
2422
        {
2423
          char * p;
2424
          char * nbuf;
2425
 
2426
          hash_delete (arm_reg_hsh, name, FALSE);
2427
          free ((char *) reg->name);
2428
          if (reg->neon)
2429
            free (reg->neon);
2430
          free (reg);
2431
 
2432
          /* Also locate the all upper case and all lower case versions.
2433
             Do not complain if we cannot find one or the other as it
2434
             was probably deleted above.  */
2435
 
2436
          nbuf = strdup (name);
2437
          for (p = nbuf; *p; p++)
2438
            *p = TOUPPER (*p);
2439
          reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2440
          if (reg)
2441
            {
2442
              hash_delete (arm_reg_hsh, nbuf, FALSE);
2443
              free ((char *) reg->name);
2444
              if (reg->neon)
2445
                free (reg->neon);
2446
              free (reg);
2447
            }
2448
 
2449
          for (p = nbuf; *p; p++)
2450
            *p = TOLOWER (*p);
2451
          reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2452
          if (reg)
2453
            {
2454
              hash_delete (arm_reg_hsh, nbuf, FALSE);
2455
              free ((char *) reg->name);
2456
              if (reg->neon)
2457
                free (reg->neon);
2458
              free (reg);
2459
            }
2460
 
2461
          free (nbuf);
2462
        }
2463
    }
2464
 
2465
  *input_line_pointer = saved_char;
2466
  demand_empty_rest_of_line ();
2467
}
2468
 
2469
/* Directives: Instruction set selection.  */
2470
 
2471
#ifdef OBJ_ELF
2472
/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2473
   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2474
   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2475
   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2476
 
2477
/* Create a new mapping symbol for the transition to STATE.  */
2478
 
2479
static void
2480
make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2481
{
2482
  symbolS * symbolP;
2483
  const char * symname;
2484
  int type;
2485
 
2486
  switch (state)
2487
    {
2488
    case MAP_DATA:
2489
      symname = "$d";
2490
      type = BSF_NO_FLAGS;
2491
      break;
2492
    case MAP_ARM:
2493
      symname = "$a";
2494
      type = BSF_NO_FLAGS;
2495
      break;
2496
    case MAP_THUMB:
2497
      symname = "$t";
2498
      type = BSF_NO_FLAGS;
2499
      break;
2500
    default:
2501
      abort ();
2502
    }
2503
 
2504
  symbolP = symbol_new (symname, now_seg, value, frag);
2505
  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2506
 
2507
  switch (state)
2508
    {
2509
    case MAP_ARM:
2510
      THUMB_SET_FUNC (symbolP, 0);
2511
      ARM_SET_THUMB (symbolP, 0);
2512
      ARM_SET_INTERWORK (symbolP, support_interwork);
2513
      break;
2514
 
2515
    case MAP_THUMB:
2516
      THUMB_SET_FUNC (symbolP, 1);
2517
      ARM_SET_THUMB (symbolP, 1);
2518
      ARM_SET_INTERWORK (symbolP, support_interwork);
2519
      break;
2520
 
2521
    case MAP_DATA:
2522
    default:
2523
      break;
2524
    }
2525
 
2526
  /* Save the mapping symbols for future reference.  Also check that
2527
     we do not place two mapping symbols at the same offset within a
2528
     frag.  We'll handle overlap between frags in
2529
     check_mapping_symbols.
2530
 
2531
     If .fill or other data filling directive generates zero sized data,
2532
     the mapping symbol for the following code will have the same value
2533
     as the one generated for the data filling directive.  In this case,
2534
     we replace the old symbol with the new one at the same address.  */
2535
  if (value == 0)
2536
    {
2537
      if (frag->tc_frag_data.first_map != NULL)
2538
        {
2539
          know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2540
          symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2541
        }
2542
      frag->tc_frag_data.first_map = symbolP;
2543
    }
2544
  if (frag->tc_frag_data.last_map != NULL)
2545
    {
2546
      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2547
      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2548
        symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2549
    }
2550
  frag->tc_frag_data.last_map = symbolP;
2551
}
2552
 
2553
/* We must sometimes convert a region marked as code to data during
2554
   code alignment, if an odd number of bytes have to be padded.  The
2555
   code mapping symbol is pushed to an aligned address.  */
2556
 
2557
static void
2558
insert_data_mapping_symbol (enum mstate state,
2559
                            valueT value, fragS *frag, offsetT bytes)
2560
{
2561
  /* If there was already a mapping symbol, remove it.  */
2562
  if (frag->tc_frag_data.last_map != NULL
2563
      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2564
    {
2565
      symbolS *symp = frag->tc_frag_data.last_map;
2566
 
2567
      if (value == 0)
2568
        {
2569
          know (frag->tc_frag_data.first_map == symp);
2570
          frag->tc_frag_data.first_map = NULL;
2571
        }
2572
      frag->tc_frag_data.last_map = NULL;
2573
      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2574
    }
2575
 
2576
  make_mapping_symbol (MAP_DATA, value, frag);
2577
  make_mapping_symbol (state, value + bytes, frag);
2578
}
2579
 
2580
static void mapping_state_2 (enum mstate state, int max_chars);
2581
 
2582
/* Set the mapping state to STATE.  Only call this when about to
2583
   emit some STATE bytes to the file.  */
2584
 
2585
void
2586
mapping_state (enum mstate state)
2587
{
2588
  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2589
 
2590
#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2591
 
2592
  if (mapstate == state)
2593
    /* The mapping symbol has already been emitted.
2594
       There is nothing else to do.  */
2595
    return;
2596 160 khays
 
2597
  if (state == MAP_ARM || state == MAP_THUMB)
2598
    /*  PR gas/12931
2599
        All ARM instructions require 4-byte alignment.
2600
        (Almost) all Thumb instructions require 2-byte alignment.
2601
 
2602
        When emitting instructions into any section, mark the section
2603
        appropriately.
2604
 
2605
        Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2606
        but themselves require 2-byte alignment; this applies to some
2607
        PC- relative forms.  However, these cases will invovle implicit
2608
        literal pool generation or an explicit .align >=2, both of
2609
        which will cause the section to me marked with sufficient
2610
        alignment.  Thus, we don't handle those cases here.  */
2611
    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2612
 
2613
  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2614 16 khays
    /* This case will be evaluated later in the next else.  */
2615
    return;
2616
  else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2617
          || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2618
    {
2619
      /* Only add the symbol if the offset is > 0:
2620
         if we're at the first frag, check it's size > 0;
2621
         if we're not at the first frag, then for sure
2622
            the offset is > 0.  */
2623
      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2624
      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2625
 
2626
      if (add_symbol)
2627
        make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2628
    }
2629
 
2630
  mapping_state_2 (state, 0);
2631
#undef TRANSITION
2632
}
2633
 
2634
/* Same as mapping_state, but MAX_CHARS bytes have already been
2635
   allocated.  Put the mapping symbol that far back.  */
2636
 
2637
static void
2638
mapping_state_2 (enum mstate state, int max_chars)
2639
{
2640
  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2641
 
2642
  if (!SEG_NORMAL (now_seg))
2643
    return;
2644
 
2645
  if (mapstate == state)
2646
    /* The mapping symbol has already been emitted.
2647
       There is nothing else to do.  */
2648
    return;
2649
 
2650
  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2651
  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2652
}
2653
#else
2654
#define mapping_state(x) ((void)0)
2655
#define mapping_state_2(x, y) ((void)0)
2656
#endif
2657
 
2658
/* Find the real, Thumb encoded start of a Thumb function.  */
2659
 
2660
#ifdef OBJ_COFF
2661
static symbolS *
2662
find_real_start (symbolS * symbolP)
2663
{
2664
  char *       real_start;
2665
  const char * name = S_GET_NAME (symbolP);
2666
  symbolS *    new_target;
2667
 
2668
  /* This definition must agree with the one in gcc/config/arm/thumb.c.  */
2669
#define STUB_NAME ".real_start_of"
2670
 
2671
  if (name == NULL)
2672
    abort ();
2673
 
2674
  /* The compiler may generate BL instructions to local labels because
2675
     it needs to perform a branch to a far away location. These labels
2676
     do not have a corresponding ".real_start_of" label.  We check
2677
     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2678
     the ".real_start_of" convention for nonlocal branches.  */
2679
  if (S_IS_LOCAL (symbolP) || name[0] == '.')
2680
    return symbolP;
2681
 
2682
  real_start = ACONCAT ((STUB_NAME, name, NULL));
2683
  new_target = symbol_find (real_start);
2684
 
2685
  if (new_target == NULL)
2686
    {
2687
      as_warn (_("Failed to find real start of function: %s\n"), name);
2688
      new_target = symbolP;
2689
    }
2690
 
2691
  return new_target;
2692
}
2693
#endif
2694
 
2695
static void
2696
opcode_select (int width)
2697
{
2698
  switch (width)
2699
    {
2700
    case 16:
2701
      if (! thumb_mode)
2702
        {
2703
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2704
            as_bad (_("selected processor does not support THUMB opcodes"));
2705
 
2706
          thumb_mode = 1;
2707
          /* No need to force the alignment, since we will have been
2708
             coming from ARM mode, which is word-aligned.  */
2709
          record_alignment (now_seg, 1);
2710
        }
2711
      break;
2712
 
2713
    case 32:
2714
      if (thumb_mode)
2715
        {
2716
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2717
            as_bad (_("selected processor does not support ARM opcodes"));
2718
 
2719
          thumb_mode = 0;
2720
 
2721
          if (!need_pass_2)
2722
            frag_align (2, 0, 0);
2723
 
2724
          record_alignment (now_seg, 1);
2725
        }
2726
      break;
2727
 
2728
    default:
2729
      as_bad (_("invalid instruction size selected (%d)"), width);
2730
    }
2731
}
2732
 
2733
static void
2734
s_arm (int ignore ATTRIBUTE_UNUSED)
2735
{
2736
  opcode_select (32);
2737
  demand_empty_rest_of_line ();
2738
}
2739
 
2740
static void
2741
s_thumb (int ignore ATTRIBUTE_UNUSED)
2742
{
2743
  opcode_select (16);
2744
  demand_empty_rest_of_line ();
2745
}
2746
 
2747
static void
2748
s_code (int unused ATTRIBUTE_UNUSED)
2749
{
2750
  int temp;
2751
 
2752
  temp = get_absolute_expression ();
2753
  switch (temp)
2754
    {
2755
    case 16:
2756
    case 32:
2757
      opcode_select (temp);
2758
      break;
2759
 
2760
    default:
2761
      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2762
    }
2763
}
2764
 
2765
static void
2766
s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2767
{
2768
  /* If we are not already in thumb mode go into it, EVEN if
2769
     the target processor does not support thumb instructions.
2770
     This is used by gcc/config/arm/lib1funcs.asm for example
2771
     to compile interworking support functions even if the
2772
     target processor should not support interworking.  */
2773
  if (! thumb_mode)
2774
    {
2775
      thumb_mode = 2;
2776
      record_alignment (now_seg, 1);
2777
    }
2778
 
2779
  demand_empty_rest_of_line ();
2780
}
2781
 
2782
static void
2783
s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2784
{
2785
  s_thumb (0);
2786
 
2787
  /* The following label is the name/address of the start of a Thumb function.
2788
     We need to know this for the interworking support.  */
2789
  label_is_thumb_function_name = TRUE;
2790
}
2791
 
2792
/* Perform a .set directive, but also mark the alias as
2793
   being a thumb function.  */
2794
 
2795
static void
2796
s_thumb_set (int equiv)
2797
{
2798
  /* XXX the following is a duplicate of the code for s_set() in read.c
2799
     We cannot just call that code as we need to get at the symbol that
2800
     is created.  */
2801
  char *    name;
2802
  char      delim;
2803
  char *    end_name;
2804
  symbolS * symbolP;
2805
 
2806
  /* Especial apologies for the random logic:
2807
     This just grew, and could be parsed much more simply!
2808
     Dean - in haste.  */
2809
  name      = input_line_pointer;
2810
  delim     = get_symbol_end ();
2811
  end_name  = input_line_pointer;
2812
  *end_name = delim;
2813
 
2814
  if (*input_line_pointer != ',')
2815
    {
2816
      *end_name = 0;
2817
      as_bad (_("expected comma after name \"%s\""), name);
2818
      *end_name = delim;
2819
      ignore_rest_of_line ();
2820
      return;
2821
    }
2822
 
2823
  input_line_pointer++;
2824
  *end_name = 0;
2825
 
2826
  if (name[0] == '.' && name[1] == '\0')
2827
    {
2828
      /* XXX - this should not happen to .thumb_set.  */
2829
      abort ();
2830
    }
2831
 
2832
  if ((symbolP = symbol_find (name)) == NULL
2833
      && (symbolP = md_undefined_symbol (name)) == NULL)
2834
    {
2835
#ifndef NO_LISTING
2836
      /* When doing symbol listings, play games with dummy fragments living
2837
         outside the normal fragment chain to record the file and line info
2838
         for this symbol.  */
2839
      if (listing & LISTING_SYMBOLS)
2840
        {
2841
          extern struct list_info_struct * listing_tail;
2842
          fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2843
 
2844
          memset (dummy_frag, 0, sizeof (fragS));
2845
          dummy_frag->fr_type = rs_fill;
2846
          dummy_frag->line = listing_tail;
2847
          symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2848
          dummy_frag->fr_symbol = symbolP;
2849
        }
2850
      else
2851
#endif
2852
        symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2853
 
2854
#ifdef OBJ_COFF
2855
      /* "set" symbols are local unless otherwise specified.  */
2856
      SF_SET_LOCAL (symbolP);
2857
#endif /* OBJ_COFF  */
2858
    }                           /* Make a new symbol.  */
2859
 
2860
  symbol_table_insert (symbolP);
2861
 
2862
  * end_name = delim;
2863
 
2864
  if (equiv
2865
      && S_IS_DEFINED (symbolP)
2866
      && S_GET_SEGMENT (symbolP) != reg_section)
2867
    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2868
 
2869
  pseudo_set (symbolP);
2870
 
2871
  demand_empty_rest_of_line ();
2872
 
2873
  /* XXX Now we come to the Thumb specific bit of code.  */
2874
 
2875
  THUMB_SET_FUNC (symbolP, 1);
2876
  ARM_SET_THUMB (symbolP, 1);
2877
#if defined OBJ_ELF || defined OBJ_COFF
2878
  ARM_SET_INTERWORK (symbolP, support_interwork);
2879
#endif
2880
}
2881
 
2882
/* Directives: Mode selection.  */
2883
 
2884
/* .syntax [unified|divided] - choose the new unified syntax
2885
   (same for Arm and Thumb encoding, modulo slight differences in what
2886
   can be represented) or the old divergent syntax for each mode.  */
2887
static void
2888
s_syntax (int unused ATTRIBUTE_UNUSED)
2889
{
2890
  char *name, delim;
2891
 
2892
  name = input_line_pointer;
2893
  delim = get_symbol_end ();
2894
 
2895
  if (!strcasecmp (name, "unified"))
2896
    unified_syntax = TRUE;
2897
  else if (!strcasecmp (name, "divided"))
2898
    unified_syntax = FALSE;
2899
  else
2900
    {
2901
      as_bad (_("unrecognized syntax mode \"%s\""), name);
2902
      return;
2903
    }
2904
  *input_line_pointer = delim;
2905
  demand_empty_rest_of_line ();
2906
}
2907
 
2908
/* Directives: sectioning and alignment.  */
2909
 
2910
/* Same as s_align_ptwo but align 0 => align 2.  */
2911
 
2912
static void
2913
s_align (int unused ATTRIBUTE_UNUSED)
2914
{
2915
  int temp;
2916
  bfd_boolean fill_p;
2917
  long temp_fill;
2918
  long max_alignment = 15;
2919
 
2920
  temp = get_absolute_expression ();
2921
  if (temp > max_alignment)
2922
    as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2923
  else if (temp < 0)
2924
    {
2925
      as_bad (_("alignment negative. 0 assumed."));
2926
      temp = 0;
2927
    }
2928
 
2929
  if (*input_line_pointer == ',')
2930
    {
2931
      input_line_pointer++;
2932
      temp_fill = get_absolute_expression ();
2933
      fill_p = TRUE;
2934
    }
2935
  else
2936
    {
2937
      fill_p = FALSE;
2938
      temp_fill = 0;
2939
    }
2940
 
2941
  if (!temp)
2942
    temp = 2;
2943
 
2944
  /* Only make a frag if we HAVE to.  */
2945
  if (temp && !need_pass_2)
2946
    {
2947
      if (!fill_p && subseg_text_p (now_seg))
2948
        frag_align_code (temp, 0);
2949
      else
2950
        frag_align (temp, (int) temp_fill, 0);
2951
    }
2952
  demand_empty_rest_of_line ();
2953
 
2954
  record_alignment (now_seg, temp);
2955
}
2956
 
2957
static void
2958
s_bss (int ignore ATTRIBUTE_UNUSED)
2959
{
2960
  /* We don't support putting frags in the BSS segment, we fake it by
2961
     marking in_bss, then looking at s_skip for clues.  */
2962
  subseg_set (bss_section, 0);
2963
  demand_empty_rest_of_line ();
2964
 
2965
#ifdef md_elf_section_change_hook
2966
  md_elf_section_change_hook ();
2967
#endif
2968
}
2969
 
2970
static void
2971
s_even (int ignore ATTRIBUTE_UNUSED)
2972
{
2973
  /* Never make frag if expect extra pass.  */
2974
  if (!need_pass_2)
2975
    frag_align (1, 0, 0);
2976
 
2977
  record_alignment (now_seg, 1);
2978
 
2979
  demand_empty_rest_of_line ();
2980
}
2981
 
2982
/* Directives: Literal pools.  */
2983
 
2984
static literal_pool *
2985
find_literal_pool (void)
2986
{
2987
  literal_pool * pool;
2988
 
2989
  for (pool = list_of_pools; pool != NULL; pool = pool->next)
2990
    {
2991
      if (pool->section == now_seg
2992
          && pool->sub_section == now_subseg)
2993
        break;
2994
    }
2995
 
2996
  return pool;
2997
}
2998
 
2999
static literal_pool *
3000
find_or_make_literal_pool (void)
3001
{
3002
  /* Next literal pool ID number.  */
3003
  static unsigned int latest_pool_num = 1;
3004
  literal_pool *      pool;
3005
 
3006
  pool = find_literal_pool ();
3007
 
3008
  if (pool == NULL)
3009
    {
3010
      /* Create a new pool.  */
3011
      pool = (literal_pool *) xmalloc (sizeof (* pool));
3012
      if (! pool)
3013
        return NULL;
3014
 
3015
      pool->next_free_entry = 0;
3016
      pool->section         = now_seg;
3017
      pool->sub_section     = now_subseg;
3018
      pool->next            = list_of_pools;
3019
      pool->symbol          = NULL;
3020
 
3021
      /* Add it to the list.  */
3022
      list_of_pools = pool;
3023
    }
3024
 
3025
  /* New pools, and emptied pools, will have a NULL symbol.  */
3026
  if (pool->symbol == NULL)
3027
    {
3028
      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3029
                                    (valueT) 0, &zero_address_frag);
3030
      pool->id = latest_pool_num ++;
3031
    }
3032
 
3033
  /* Done.  */
3034
  return pool;
3035
}
3036
 
3037
/* Add the literal in the global 'inst'
3038
   structure to the relevant literal pool.  */
3039
 
3040
static int
3041
add_to_lit_pool (void)
3042
{
3043
  literal_pool * pool;
3044
  unsigned int entry;
3045
 
3046
  pool = find_or_make_literal_pool ();
3047
 
3048
  /* Check if this literal value is already in the pool.  */
3049
  for (entry = 0; entry < pool->next_free_entry; entry ++)
3050
    {
3051
      if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3052
          && (inst.reloc.exp.X_op == O_constant)
3053
          && (pool->literals[entry].X_add_number
3054
              == inst.reloc.exp.X_add_number)
3055
          && (pool->literals[entry].X_unsigned
3056
              == inst.reloc.exp.X_unsigned))
3057
        break;
3058
 
3059
      if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3060
          && (inst.reloc.exp.X_op == O_symbol)
3061
          && (pool->literals[entry].X_add_number
3062
              == inst.reloc.exp.X_add_number)
3063
          && (pool->literals[entry].X_add_symbol
3064
              == inst.reloc.exp.X_add_symbol)
3065
          && (pool->literals[entry].X_op_symbol
3066
              == inst.reloc.exp.X_op_symbol))
3067
        break;
3068
    }
3069
 
3070
  /* Do we need to create a new entry?  */
3071
  if (entry == pool->next_free_entry)
3072
    {
3073
      if (entry >= MAX_LITERAL_POOL_SIZE)
3074
        {
3075
          inst.error = _("literal pool overflow");
3076
          return FAIL;
3077
        }
3078
 
3079
      pool->literals[entry] = inst.reloc.exp;
3080 160 khays
#ifdef OBJ_ELF
3081
      /* PR ld/12974: Record the location of the first source line to reference
3082
         this entry in the literal pool.  If it turns out during linking that the
3083
         symbol does not exist we will be able to give an accurate line number for
3084
         the (first use of the) missing reference.  */
3085
      if (debug_type == DEBUG_DWARF2)
3086
        dwarf2_where (pool->locs + entry);
3087
#endif
3088 16 khays
      pool->next_free_entry += 1;
3089
    }
3090
 
3091
  inst.reloc.exp.X_op         = O_symbol;
3092
  inst.reloc.exp.X_add_number = ((int) entry) * 4;
3093
  inst.reloc.exp.X_add_symbol = pool->symbol;
3094
 
3095
  return SUCCESS;
3096
}
3097
 
3098
/* Can't use symbol_new here, so have to create a symbol and then at
3099
   a later date assign it a value. Thats what these functions do.  */
3100
 
3101
static void
3102
symbol_locate (symbolS *    symbolP,
3103
               const char * name,       /* It is copied, the caller can modify.  */
3104
               segT         segment,    /* Segment identifier (SEG_<something>).  */
3105
               valueT       valu,       /* Symbol value.  */
3106
               fragS *      frag)       /* Associated fragment.  */
3107
{
3108
  unsigned int name_length;
3109
  char * preserved_copy_of_name;
3110
 
3111
  name_length = strlen (name) + 1;   /* +1 for \0.  */
3112
  obstack_grow (&notes, name, name_length);
3113
  preserved_copy_of_name = (char *) obstack_finish (&notes);
3114
 
3115
#ifdef tc_canonicalize_symbol_name
3116
  preserved_copy_of_name =
3117
    tc_canonicalize_symbol_name (preserved_copy_of_name);
3118
#endif
3119
 
3120
  S_SET_NAME (symbolP, preserved_copy_of_name);
3121
 
3122
  S_SET_SEGMENT (symbolP, segment);
3123
  S_SET_VALUE (symbolP, valu);
3124
  symbol_clear_list_pointers (symbolP);
3125
 
3126
  symbol_set_frag (symbolP, frag);
3127
 
3128
  /* Link to end of symbol chain.  */
3129
  {
3130
    extern int symbol_table_frozen;
3131
 
3132
    if (symbol_table_frozen)
3133
      abort ();
3134
  }
3135
 
3136
  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3137
 
3138
  obj_symbol_new_hook (symbolP);
3139
 
3140
#ifdef tc_symbol_new_hook
3141
  tc_symbol_new_hook (symbolP);
3142
#endif
3143
 
3144
#ifdef DEBUG_SYMS
3145
  verify_symbol_chain (symbol_rootP, symbol_lastP);
3146
#endif /* DEBUG_SYMS  */
3147
}
3148
 
3149
 
3150
static void
3151
s_ltorg (int ignored ATTRIBUTE_UNUSED)
3152
{
3153
  unsigned int entry;
3154
  literal_pool * pool;
3155
  char sym_name[20];
3156
 
3157
  pool = find_literal_pool ();
3158
  if (pool == NULL
3159
      || pool->symbol == NULL
3160
      || pool->next_free_entry == 0)
3161
    return;
3162
 
3163
  mapping_state (MAP_DATA);
3164
 
3165
  /* Align pool as you have word accesses.
3166
     Only make a frag if we have to.  */
3167
  if (!need_pass_2)
3168
    frag_align (2, 0, 0);
3169
 
3170
  record_alignment (now_seg, 2);
3171
 
3172
  sprintf (sym_name, "$$lit_\002%x", pool->id);
3173
 
3174
  symbol_locate (pool->symbol, sym_name, now_seg,
3175
                 (valueT) frag_now_fix (), frag_now);
3176
  symbol_table_insert (pool->symbol);
3177
 
3178
  ARM_SET_THUMB (pool->symbol, thumb_mode);
3179
 
3180
#if defined OBJ_COFF || defined OBJ_ELF
3181
  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3182
#endif
3183
 
3184
  for (entry = 0; entry < pool->next_free_entry; entry ++)
3185 160 khays
    {
3186
#ifdef OBJ_ELF
3187
      if (debug_type == DEBUG_DWARF2)
3188
        dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3189
#endif
3190
      /* First output the expression in the instruction to the pool.  */
3191
      emit_expr (&(pool->literals[entry]), 4); /* .word  */
3192
    }
3193 16 khays
 
3194
  /* Mark the pool as empty.  */
3195
  pool->next_free_entry = 0;
3196
  pool->symbol = NULL;
3197
}
3198
 
3199
#ifdef OBJ_ELF
3200
/* Forward declarations for functions below, in the MD interface
3201
   section.  */
3202
static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3203
static valueT create_unwind_entry (int);
3204
static void start_unwind_section (const segT, int);
3205
static void add_unwind_opcode (valueT, int);
3206
static void flush_pending_unwind (void);
3207
 
3208
/* Directives: Data.  */
3209
 
3210
static void
3211
s_arm_elf_cons (int nbytes)
3212
{
3213
  expressionS exp;
3214
 
3215
#ifdef md_flush_pending_output
3216
  md_flush_pending_output ();
3217
#endif
3218
 
3219
  if (is_it_end_of_statement ())
3220
    {
3221
      demand_empty_rest_of_line ();
3222
      return;
3223
    }
3224
 
3225
#ifdef md_cons_align
3226
  md_cons_align (nbytes);
3227
#endif
3228
 
3229
  mapping_state (MAP_DATA);
3230
  do
3231
    {
3232
      int reloc;
3233
      char *base = input_line_pointer;
3234
 
3235
      expression (& exp);
3236
 
3237
      if (exp.X_op != O_symbol)
3238
        emit_expr (&exp, (unsigned int) nbytes);
3239
      else
3240
        {
3241
          char *before_reloc = input_line_pointer;
3242
          reloc = parse_reloc (&input_line_pointer);
3243
          if (reloc == -1)
3244
            {
3245
              as_bad (_("unrecognized relocation suffix"));
3246
              ignore_rest_of_line ();
3247
              return;
3248
            }
3249
          else if (reloc == BFD_RELOC_UNUSED)
3250
            emit_expr (&exp, (unsigned int) nbytes);
3251
          else
3252
            {
3253
              reloc_howto_type *howto = (reloc_howto_type *)
3254
                  bfd_reloc_type_lookup (stdoutput,
3255
                                         (bfd_reloc_code_real_type) reloc);
3256
              int size = bfd_get_reloc_size (howto);
3257
 
3258
              if (reloc == BFD_RELOC_ARM_PLT32)
3259
                {
3260
                  as_bad (_("(plt) is only valid on branch targets"));
3261
                  reloc = BFD_RELOC_UNUSED;
3262
                  size = 0;
3263
                }
3264
 
3265
              if (size > nbytes)
3266
                as_bad (_("%s relocations do not fit in %d bytes"),
3267
                        howto->name, nbytes);
3268
              else
3269
                {
3270
                  /* We've parsed an expression stopping at O_symbol.
3271
                     But there may be more expression left now that we
3272
                     have parsed the relocation marker.  Parse it again.
3273
                     XXX Surely there is a cleaner way to do this.  */
3274
                  char *p = input_line_pointer;
3275
                  int offset;
3276
                  char *save_buf = (char *) alloca (input_line_pointer - base);
3277
                  memcpy (save_buf, base, input_line_pointer - base);
3278
                  memmove (base + (input_line_pointer - before_reloc),
3279
                           base, before_reloc - base);
3280
 
3281
                  input_line_pointer = base + (input_line_pointer-before_reloc);
3282
                  expression (&exp);
3283
                  memcpy (base, save_buf, p - base);
3284
 
3285
                  offset = nbytes - size;
3286
                  p = frag_more ((int) nbytes);
3287
                  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3288
                               size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3289
                }
3290
            }
3291
        }
3292
    }
3293
  while (*input_line_pointer++ == ',');
3294
 
3295
  /* Put terminator back into stream.  */
3296
  input_line_pointer --;
3297
  demand_empty_rest_of_line ();
3298
}
3299
 
3300
/* Emit an expression containing a 32-bit thumb instruction.
3301
   Implementation based on put_thumb32_insn.  */
3302
 
3303
static void
3304
emit_thumb32_expr (expressionS * exp)
3305
{
3306
  expressionS exp_high = *exp;
3307
 
3308
  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3309
  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3310
  exp->X_add_number &= 0xffff;
3311
  emit_expr (exp, (unsigned int) THUMB_SIZE);
3312
}
3313
 
3314
/*  Guess the instruction size based on the opcode.  */
3315
 
3316
static int
3317
thumb_insn_size (int opcode)
3318
{
3319
  if ((unsigned int) opcode < 0xe800u)
3320
    return 2;
3321
  else if ((unsigned int) opcode >= 0xe8000000u)
3322
    return 4;
3323
  else
3324
    return 0;
3325
}
3326
 
3327
static bfd_boolean
3328
emit_insn (expressionS *exp, int nbytes)
3329
{
3330
  int size = 0;
3331
 
3332
  if (exp->X_op == O_constant)
3333
    {
3334
      size = nbytes;
3335
 
3336
      if (size == 0)
3337
        size = thumb_insn_size (exp->X_add_number);
3338
 
3339
      if (size != 0)
3340
        {
3341
          if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3342
            {
3343
              as_bad (_(".inst.n operand too big. "\
3344
                        "Use .inst.w instead"));
3345
              size = 0;
3346
            }
3347
          else
3348
            {
3349
              if (now_it.state == AUTOMATIC_IT_BLOCK)
3350
                set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3351
              else
3352
                set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3353
 
3354
              if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3355
                emit_thumb32_expr (exp);
3356
              else
3357
                emit_expr (exp, (unsigned int) size);
3358
 
3359
              it_fsm_post_encode ();
3360
            }
3361
        }
3362
      else
3363
        as_bad (_("cannot determine Thumb instruction size. "   \
3364
                  "Use .inst.n/.inst.w instead"));
3365
    }
3366
  else
3367
    as_bad (_("constant expression required"));
3368
 
3369
  return (size != 0);
3370
}
3371
 
3372
/* Like s_arm_elf_cons but do not use md_cons_align and
3373
   set the mapping state to MAP_ARM/MAP_THUMB.  */
3374
 
3375
static void
3376
s_arm_elf_inst (int nbytes)
3377
{
3378
  if (is_it_end_of_statement ())
3379
    {
3380
      demand_empty_rest_of_line ();
3381
      return;
3382
    }
3383
 
3384
  /* Calling mapping_state () here will not change ARM/THUMB,
3385
     but will ensure not to be in DATA state.  */
3386
 
3387
  if (thumb_mode)
3388
    mapping_state (MAP_THUMB);
3389
  else
3390
    {
3391
      if (nbytes != 0)
3392
        {
3393
          as_bad (_("width suffixes are invalid in ARM mode"));
3394
          ignore_rest_of_line ();
3395
          return;
3396
        }
3397
 
3398
      nbytes = 4;
3399
 
3400
      mapping_state (MAP_ARM);
3401
    }
3402
 
3403
  do
3404
    {
3405
      expressionS exp;
3406
 
3407
      expression (& exp);
3408
 
3409
      if (! emit_insn (& exp, nbytes))
3410
        {
3411
          ignore_rest_of_line ();
3412
          return;
3413
        }
3414
    }
3415
  while (*input_line_pointer++ == ',');
3416
 
3417
  /* Put terminator back into stream.  */
3418
  input_line_pointer --;
3419
  demand_empty_rest_of_line ();
3420
}
3421
 
3422
/* Parse a .rel31 directive.  */
3423
 
3424
static void
3425
s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3426
{
3427
  expressionS exp;
3428
  char *p;
3429
  valueT highbit;
3430
 
3431
  highbit = 0;
3432
  if (*input_line_pointer == '1')
3433
    highbit = 0x80000000;
3434
  else if (*input_line_pointer != '0')
3435
    as_bad (_("expected 0 or 1"));
3436
 
3437
  input_line_pointer++;
3438
  if (*input_line_pointer != ',')
3439
    as_bad (_("missing comma"));
3440
  input_line_pointer++;
3441
 
3442
#ifdef md_flush_pending_output
3443
  md_flush_pending_output ();
3444
#endif
3445
 
3446
#ifdef md_cons_align
3447
  md_cons_align (4);
3448
#endif
3449
 
3450
  mapping_state (MAP_DATA);
3451
 
3452
  expression (&exp);
3453
 
3454
  p = frag_more (4);
3455
  md_number_to_chars (p, highbit, 4);
3456
  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3457
               BFD_RELOC_ARM_PREL31);
3458
 
3459
  demand_empty_rest_of_line ();
3460
}
3461
 
3462
/* Directives: AEABI stack-unwind tables.  */
3463
 
3464
/* Parse an unwind_fnstart directive.  Simply records the current location.  */
3465
 
3466
static void
3467
s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3468
{
3469
  demand_empty_rest_of_line ();
3470
  if (unwind.proc_start)
3471
    {
3472
      as_bad (_("duplicate .fnstart directive"));
3473
      return;
3474
    }
3475
 
3476
  /* Mark the start of the function.  */
3477
  unwind.proc_start = expr_build_dot ();
3478
 
3479
  /* Reset the rest of the unwind info.  */
3480
  unwind.opcode_count = 0;
3481
  unwind.table_entry = NULL;
3482
  unwind.personality_routine = NULL;
3483
  unwind.personality_index = -1;
3484
  unwind.frame_size = 0;
3485
  unwind.fp_offset = 0;
3486
  unwind.fp_reg = REG_SP;
3487
  unwind.fp_used = 0;
3488
  unwind.sp_restored = 0;
3489
}
3490
 
3491
 
3492
/* Parse a handlerdata directive.  Creates the exception handling table entry
3493
   for the function.  */
3494
 
3495
static void
3496
s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3497
{
3498
  demand_empty_rest_of_line ();
3499
  if (!unwind.proc_start)
3500
    as_bad (MISSING_FNSTART);
3501
 
3502
  if (unwind.table_entry)
3503
    as_bad (_("duplicate .handlerdata directive"));
3504
 
3505
  create_unwind_entry (1);
3506
}
3507
 
3508
/* Parse an unwind_fnend directive.  Generates the index table entry.  */
3509
 
3510
static void
3511
s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3512
{
3513
  long where;
3514
  char *ptr;
3515
  valueT val;
3516
  unsigned int marked_pr_dependency;
3517
 
3518
  demand_empty_rest_of_line ();
3519
 
3520
  if (!unwind.proc_start)
3521
    {
3522
      as_bad (_(".fnend directive without .fnstart"));
3523
      return;
3524
    }
3525
 
3526
  /* Add eh table entry.  */
3527
  if (unwind.table_entry == NULL)
3528
    val = create_unwind_entry (0);
3529
  else
3530
    val = 0;
3531
 
3532
  /* Add index table entry.  This is two words.  */
3533
  start_unwind_section (unwind.saved_seg, 1);
3534
  frag_align (2, 0, 0);
3535
  record_alignment (now_seg, 2);
3536
 
3537
  ptr = frag_more (8);
3538
  where = frag_now_fix () - 8;
3539
 
3540
  /* Self relative offset of the function start.  */
3541
  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3542
           BFD_RELOC_ARM_PREL31);
3543
 
3544
  /* Indicate dependency on EHABI-defined personality routines to the
3545
     linker, if it hasn't been done already.  */
3546
  marked_pr_dependency
3547
    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3548
  if (unwind.personality_index >= 0 && unwind.personality_index < 3
3549
      && !(marked_pr_dependency & (1 << unwind.personality_index)))
3550
    {
3551
      static const char *const name[] =
3552
        {
3553
          "__aeabi_unwind_cpp_pr0",
3554
          "__aeabi_unwind_cpp_pr1",
3555
          "__aeabi_unwind_cpp_pr2"
3556
        };
3557
      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3558
      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3559
      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3560
        |= 1 << unwind.personality_index;
3561
    }
3562
 
3563
  if (val)
3564
    /* Inline exception table entry.  */
3565
    md_number_to_chars (ptr + 4, val, 4);
3566
  else
3567
    /* Self relative offset of the table entry.  */
3568
    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3569
             BFD_RELOC_ARM_PREL31);
3570
 
3571
  /* Restore the original section.  */
3572
  subseg_set (unwind.saved_seg, unwind.saved_subseg);
3573
 
3574
  unwind.proc_start = NULL;
3575
}
3576
 
3577
 
3578
/* Parse an unwind_cantunwind directive.  */
3579
 
3580
static void
3581
s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3582
{
3583
  demand_empty_rest_of_line ();
3584
  if (!unwind.proc_start)
3585
    as_bad (MISSING_FNSTART);
3586
 
3587
  if (unwind.personality_routine || unwind.personality_index != -1)
3588
    as_bad (_("personality routine specified for cantunwind frame"));
3589
 
3590
  unwind.personality_index = -2;
3591
}
3592
 
3593
 
3594
/* Parse a personalityindex directive.  */
3595
 
3596
static void
3597
s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3598
{
3599
  expressionS exp;
3600
 
3601
  if (!unwind.proc_start)
3602
    as_bad (MISSING_FNSTART);
3603
 
3604
  if (unwind.personality_routine || unwind.personality_index != -1)
3605
    as_bad (_("duplicate .personalityindex directive"));
3606
 
3607
  expression (&exp);
3608
 
3609
  if (exp.X_op != O_constant
3610
      || exp.X_add_number < 0 || exp.X_add_number > 15)
3611
    {
3612
      as_bad (_("bad personality routine number"));
3613
      ignore_rest_of_line ();
3614
      return;
3615
    }
3616
 
3617
  unwind.personality_index = exp.X_add_number;
3618
 
3619
  demand_empty_rest_of_line ();
3620
}
3621
 
3622
 
3623
/* Parse a personality directive.  */
3624
 
3625
static void
3626
s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3627
{
3628
  char *name, *p, c;
3629
 
3630
  if (!unwind.proc_start)
3631
    as_bad (MISSING_FNSTART);
3632
 
3633
  if (unwind.personality_routine || unwind.personality_index != -1)
3634
    as_bad (_("duplicate .personality directive"));
3635
 
3636
  name = input_line_pointer;
3637
  c = get_symbol_end ();
3638
  p = input_line_pointer;
3639
  unwind.personality_routine = symbol_find_or_make (name);
3640
  *p = c;
3641
  demand_empty_rest_of_line ();
3642
}
3643
 
3644
 
3645
/* Parse a directive saving core registers.  */
3646
 
3647
static void
3648
s_arm_unwind_save_core (void)
3649
{
3650
  valueT op;
3651
  long range;
3652
  int n;
3653
 
3654
  range = parse_reg_list (&input_line_pointer);
3655
  if (range == FAIL)
3656
    {
3657
      as_bad (_("expected register list"));
3658
      ignore_rest_of_line ();
3659
      return;
3660
    }
3661
 
3662
  demand_empty_rest_of_line ();
3663
 
3664
  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3665
     into .unwind_save {..., sp...}.  We aren't bothered about the value of
3666
     ip because it is clobbered by calls.  */
3667
  if (unwind.sp_restored && unwind.fp_reg == 12
3668
      && (range & 0x3000) == 0x1000)
3669
    {
3670
      unwind.opcode_count--;
3671
      unwind.sp_restored = 0;
3672
      range = (range | 0x2000) & ~0x1000;
3673
      unwind.pending_offset = 0;
3674
    }
3675
 
3676
  /* Pop r4-r15.  */
3677
  if (range & 0xfff0)
3678
    {
3679
      /* See if we can use the short opcodes.  These pop a block of up to 8
3680
         registers starting with r4, plus maybe r14.  */
3681
      for (n = 0; n < 8; n++)
3682
        {
3683
          /* Break at the first non-saved register.      */
3684
          if ((range & (1 << (n + 4))) == 0)
3685
            break;
3686
        }
3687
      /* See if there are any other bits set.  */
3688
      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3689
        {
3690
          /* Use the long form.  */
3691
          op = 0x8000 | ((range >> 4) & 0xfff);
3692
          add_unwind_opcode (op, 2);
3693
        }
3694
      else
3695
        {
3696
          /* Use the short form.  */
3697
          if (range & 0x4000)
3698
            op = 0xa8; /* Pop r14.      */
3699
          else
3700
            op = 0xa0; /* Do not pop r14.  */
3701
          op |= (n - 1);
3702
          add_unwind_opcode (op, 1);
3703
        }
3704
    }
3705
 
3706
  /* Pop r0-r3.  */
3707
  if (range & 0xf)
3708
    {
3709
      op = 0xb100 | (range & 0xf);
3710
      add_unwind_opcode (op, 2);
3711
    }
3712
 
3713
  /* Record the number of bytes pushed.  */
3714
  for (n = 0; n < 16; n++)
3715
    {
3716
      if (range & (1 << n))
3717
        unwind.frame_size += 4;
3718
    }
3719
}
3720
 
3721
 
3722
/* Parse a directive saving FPA registers.  */
3723
 
3724
static void
3725
s_arm_unwind_save_fpa (int reg)
3726
{
3727
  expressionS exp;
3728
  int num_regs;
3729
  valueT op;
3730
 
3731
  /* Get Number of registers to transfer.  */
3732
  if (skip_past_comma (&input_line_pointer) != FAIL)
3733
    expression (&exp);
3734
  else
3735
    exp.X_op = O_illegal;
3736
 
3737
  if (exp.X_op != O_constant)
3738
    {
3739
      as_bad (_("expected , <constant>"));
3740
      ignore_rest_of_line ();
3741
      return;
3742
    }
3743
 
3744
  num_regs = exp.X_add_number;
3745
 
3746
  if (num_regs < 1 || num_regs > 4)
3747
    {
3748
      as_bad (_("number of registers must be in the range [1:4]"));
3749
      ignore_rest_of_line ();
3750
      return;
3751
    }
3752
 
3753
  demand_empty_rest_of_line ();
3754
 
3755
  if (reg == 4)
3756
    {
3757
      /* Short form.  */
3758
      op = 0xb4 | (num_regs - 1);
3759
      add_unwind_opcode (op, 1);
3760
    }
3761
  else
3762
    {
3763
      /* Long form.  */
3764
      op = 0xc800 | (reg << 4) | (num_regs - 1);
3765
      add_unwind_opcode (op, 2);
3766
    }
3767
  unwind.frame_size += num_regs * 12;
3768
}
3769
 
3770
 
3771
/* Parse a directive saving VFP registers for ARMv6 and above.  */
3772
 
3773
static void
3774
s_arm_unwind_save_vfp_armv6 (void)
3775
{
3776
  int count;
3777
  unsigned int start;
3778
  valueT op;
3779
  int num_vfpv3_regs = 0;
3780
  int num_regs_below_16;
3781
 
3782
  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3783
  if (count == FAIL)
3784
    {
3785
      as_bad (_("expected register list"));
3786
      ignore_rest_of_line ();
3787
      return;
3788
    }
3789
 
3790
  demand_empty_rest_of_line ();
3791
 
3792
  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3793
     than FSTMX/FLDMX-style ones).  */
3794
 
3795
  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
3796
  if (start >= 16)
3797
    num_vfpv3_regs = count;
3798
  else if (start + count > 16)
3799
    num_vfpv3_regs = start + count - 16;
3800
 
3801
  if (num_vfpv3_regs > 0)
3802
    {
3803
      int start_offset = start > 16 ? start - 16 : 0;
3804
      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3805
      add_unwind_opcode (op, 2);
3806
    }
3807
 
3808
  /* Generate opcode for registers numbered in the range 0 .. 15.  */
3809
  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3810
  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3811
  if (num_regs_below_16 > 0)
3812
    {
3813
      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3814
      add_unwind_opcode (op, 2);
3815
    }
3816
 
3817
  unwind.frame_size += count * 8;
3818
}
3819
 
3820
 
3821
/* Parse a directive saving VFP registers for pre-ARMv6.  */
3822
 
3823
static void
3824
s_arm_unwind_save_vfp (void)
3825
{
3826
  int count;
3827
  unsigned int reg;
3828
  valueT op;
3829
 
3830
  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3831
  if (count == FAIL)
3832
    {
3833
      as_bad (_("expected register list"));
3834
      ignore_rest_of_line ();
3835
      return;
3836
    }
3837
 
3838
  demand_empty_rest_of_line ();
3839
 
3840
  if (reg == 8)
3841
    {
3842
      /* Short form.  */
3843
      op = 0xb8 | (count - 1);
3844
      add_unwind_opcode (op, 1);
3845
    }
3846
  else
3847
    {
3848
      /* Long form.  */
3849
      op = 0xb300 | (reg << 4) | (count - 1);
3850
      add_unwind_opcode (op, 2);
3851
    }
3852
  unwind.frame_size += count * 8 + 4;
3853
}
3854
 
3855
 
3856
/* Parse a directive saving iWMMXt data registers.  */
3857
 
3858
static void
3859
s_arm_unwind_save_mmxwr (void)
3860
{
3861
  int reg;
3862
  int hi_reg;
3863
  int i;
3864
  unsigned mask = 0;
3865
  valueT op;
3866
 
3867
  if (*input_line_pointer == '{')
3868
    input_line_pointer++;
3869
 
3870
  do
3871
    {
3872
      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3873
 
3874
      if (reg == FAIL)
3875
        {
3876
          as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3877
          goto error;
3878
        }
3879
 
3880
      if (mask >> reg)
3881
        as_tsktsk (_("register list not in ascending order"));
3882
      mask |= 1 << reg;
3883
 
3884
      if (*input_line_pointer == '-')
3885
        {
3886
          input_line_pointer++;
3887
          hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3888
          if (hi_reg == FAIL)
3889
            {
3890
              as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3891
              goto error;
3892
            }
3893
          else if (reg >= hi_reg)
3894
            {
3895
              as_bad (_("bad register range"));
3896
              goto error;
3897
            }
3898
          for (; reg < hi_reg; reg++)
3899
            mask |= 1 << reg;
3900
        }
3901
    }
3902
  while (skip_past_comma (&input_line_pointer) != FAIL);
3903
 
3904
  if (*input_line_pointer == '}')
3905
    input_line_pointer++;
3906
 
3907
  demand_empty_rest_of_line ();
3908
 
3909
  /* Generate any deferred opcodes because we're going to be looking at
3910
     the list.  */
3911
  flush_pending_unwind ();
3912
 
3913
  for (i = 0; i < 16; i++)
3914
    {
3915
      if (mask & (1 << i))
3916
        unwind.frame_size += 8;
3917
    }
3918
 
3919
  /* Attempt to combine with a previous opcode.  We do this because gcc
3920
     likes to output separate unwind directives for a single block of
3921
     registers.  */
3922
  if (unwind.opcode_count > 0)
3923
    {
3924
      i = unwind.opcodes[unwind.opcode_count - 1];
3925
      if ((i & 0xf8) == 0xc0)
3926
        {
3927
          i &= 7;
3928
          /* Only merge if the blocks are contiguous.  */
3929
          if (i < 6)
3930
            {
3931
              if ((mask & 0xfe00) == (1 << 9))
3932
                {
3933
                  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3934
                  unwind.opcode_count--;
3935
                }
3936
            }
3937
          else if (i == 6 && unwind.opcode_count >= 2)
3938
            {
3939
              i = unwind.opcodes[unwind.opcode_count - 2];
3940
              reg = i >> 4;
3941
              i &= 0xf;
3942
 
3943
              op = 0xffff << (reg - 1);
3944
              if (reg > 0
3945
                  && ((mask & op) == (1u << (reg - 1))))
3946
                {
3947
                  op = (1 << (reg + i + 1)) - 1;
3948
                  op &= ~((1 << reg) - 1);
3949
                  mask |= op;
3950
                  unwind.opcode_count -= 2;
3951
                }
3952
            }
3953
        }
3954
    }
3955
 
3956
  hi_reg = 15;
3957
  /* We want to generate opcodes in the order the registers have been
3958
     saved, ie. descending order.  */
3959
  for (reg = 15; reg >= -1; reg--)
3960
    {
3961
      /* Save registers in blocks.  */
3962
      if (reg < 0
3963
          || !(mask & (1 << reg)))
3964
        {
3965
          /* We found an unsaved reg.  Generate opcodes to save the
3966
             preceding block.   */
3967
          if (reg != hi_reg)
3968
            {
3969
              if (reg == 9)
3970
                {
3971
                  /* Short form.  */
3972
                  op = 0xc0 | (hi_reg - 10);
3973
                  add_unwind_opcode (op, 1);
3974
                }
3975
              else
3976
                {
3977
                  /* Long form.  */
3978
                  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3979
                  add_unwind_opcode (op, 2);
3980
                }
3981
            }
3982
          hi_reg = reg - 1;
3983
        }
3984
    }
3985
 
3986
  return;
3987
error:
3988
  ignore_rest_of_line ();
3989
}
3990
 
3991
static void
3992
s_arm_unwind_save_mmxwcg (void)
3993
{
3994
  int reg;
3995
  int hi_reg;
3996
  unsigned mask = 0;
3997
  valueT op;
3998
 
3999
  if (*input_line_pointer == '{')
4000
    input_line_pointer++;
4001
 
4002
  do
4003
    {
4004
      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4005
 
4006
      if (reg == FAIL)
4007
        {
4008
          as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4009
          goto error;
4010
        }
4011
 
4012
      reg -= 8;
4013
      if (mask >> reg)
4014
        as_tsktsk (_("register list not in ascending order"));
4015
      mask |= 1 << reg;
4016
 
4017
      if (*input_line_pointer == '-')
4018
        {
4019
          input_line_pointer++;
4020
          hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4021
          if (hi_reg == FAIL)
4022
            {
4023
              as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4024
              goto error;
4025
            }
4026
          else if (reg >= hi_reg)
4027
            {
4028
              as_bad (_("bad register range"));
4029
              goto error;
4030
            }
4031
          for (; reg < hi_reg; reg++)
4032
            mask |= 1 << reg;
4033
        }
4034
    }
4035
  while (skip_past_comma (&input_line_pointer) != FAIL);
4036
 
4037
  if (*input_line_pointer == '}')
4038
    input_line_pointer++;
4039
 
4040
  demand_empty_rest_of_line ();
4041
 
4042
  /* Generate any deferred opcodes because we're going to be looking at
4043
     the list.  */
4044
  flush_pending_unwind ();
4045
 
4046
  for (reg = 0; reg < 16; reg++)
4047
    {
4048
      if (mask & (1 << reg))
4049
        unwind.frame_size += 4;
4050
    }
4051
  op = 0xc700 | mask;
4052
  add_unwind_opcode (op, 2);
4053
  return;
4054
error:
4055
  ignore_rest_of_line ();
4056
}
4057
 
4058
 
4059
/* Parse an unwind_save directive.
4060
   If the argument is non-zero, this is a .vsave directive.  */
4061
 
4062
static void
4063
s_arm_unwind_save (int arch_v6)
4064
{
4065
  char *peek;
4066
  struct reg_entry *reg;
4067
  bfd_boolean had_brace = FALSE;
4068
 
4069
  if (!unwind.proc_start)
4070
    as_bad (MISSING_FNSTART);
4071
 
4072
  /* Figure out what sort of save we have.  */
4073
  peek = input_line_pointer;
4074
 
4075
  if (*peek == '{')
4076
    {
4077
      had_brace = TRUE;
4078
      peek++;
4079
    }
4080
 
4081
  reg = arm_reg_parse_multi (&peek);
4082
 
4083
  if (!reg)
4084
    {
4085
      as_bad (_("register expected"));
4086
      ignore_rest_of_line ();
4087
      return;
4088
    }
4089
 
4090
  switch (reg->type)
4091
    {
4092
    case REG_TYPE_FN:
4093
      if (had_brace)
4094
        {
4095
          as_bad (_("FPA .unwind_save does not take a register list"));
4096
          ignore_rest_of_line ();
4097
          return;
4098
        }
4099
      input_line_pointer = peek;
4100
      s_arm_unwind_save_fpa (reg->number);
4101
      return;
4102
 
4103
    case REG_TYPE_RN:     s_arm_unwind_save_core ();   return;
4104
    case REG_TYPE_VFD:
4105
      if (arch_v6)
4106
        s_arm_unwind_save_vfp_armv6 ();
4107
      else
4108
        s_arm_unwind_save_vfp ();
4109
      return;
4110
    case REG_TYPE_MMXWR:  s_arm_unwind_save_mmxwr ();  return;
4111
    case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4112
 
4113
    default:
4114
      as_bad (_(".unwind_save does not support this kind of register"));
4115
      ignore_rest_of_line ();
4116
    }
4117
}
4118
 
4119
 
4120
/* Parse an unwind_movsp directive.  */
4121
 
4122
static void
4123
s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4124
{
4125
  int reg;
4126
  valueT op;
4127
  int offset;
4128
 
4129
  if (!unwind.proc_start)
4130
    as_bad (MISSING_FNSTART);
4131
 
4132
  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4133
  if (reg == FAIL)
4134
    {
4135
      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4136
      ignore_rest_of_line ();
4137
      return;
4138
    }
4139
 
4140
  /* Optional constant.  */
4141
  if (skip_past_comma (&input_line_pointer) != FAIL)
4142
    {
4143
      if (immediate_for_directive (&offset) == FAIL)
4144
        return;
4145
    }
4146
  else
4147
    offset = 0;
4148
 
4149
  demand_empty_rest_of_line ();
4150
 
4151
  if (reg == REG_SP || reg == REG_PC)
4152
    {
4153
      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4154
      return;
4155
    }
4156
 
4157
  if (unwind.fp_reg != REG_SP)
4158
    as_bad (_("unexpected .unwind_movsp directive"));
4159
 
4160
  /* Generate opcode to restore the value.  */
4161
  op = 0x90 | reg;
4162
  add_unwind_opcode (op, 1);
4163
 
4164
  /* Record the information for later.  */
4165
  unwind.fp_reg = reg;
4166
  unwind.fp_offset = unwind.frame_size - offset;
4167
  unwind.sp_restored = 1;
4168
}
4169
 
4170
/* Parse an unwind_pad directive.  */
4171
 
4172
static void
4173
s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4174
{
4175
  int offset;
4176
 
4177
  if (!unwind.proc_start)
4178
    as_bad (MISSING_FNSTART);
4179
 
4180
  if (immediate_for_directive (&offset) == FAIL)
4181
    return;
4182
 
4183
  if (offset & 3)
4184
    {
4185
      as_bad (_("stack increment must be multiple of 4"));
4186
      ignore_rest_of_line ();
4187
      return;
4188
    }
4189
 
4190
  /* Don't generate any opcodes, just record the details for later.  */
4191
  unwind.frame_size += offset;
4192
  unwind.pending_offset += offset;
4193
 
4194
  demand_empty_rest_of_line ();
4195
}
4196
 
4197
/* Parse an unwind_setfp directive.  */
4198
 
4199
static void
4200
s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4201
{
4202
  int sp_reg;
4203
  int fp_reg;
4204
  int offset;
4205
 
4206
  if (!unwind.proc_start)
4207
    as_bad (MISSING_FNSTART);
4208
 
4209
  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4210
  if (skip_past_comma (&input_line_pointer) == FAIL)
4211
    sp_reg = FAIL;
4212
  else
4213
    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4214
 
4215
  if (fp_reg == FAIL || sp_reg == FAIL)
4216
    {
4217
      as_bad (_("expected <reg>, <reg>"));
4218
      ignore_rest_of_line ();
4219
      return;
4220
    }
4221
 
4222
  /* Optional constant.  */
4223
  if (skip_past_comma (&input_line_pointer) != FAIL)
4224
    {
4225
      if (immediate_for_directive (&offset) == FAIL)
4226
        return;
4227
    }
4228
  else
4229
    offset = 0;
4230
 
4231
  demand_empty_rest_of_line ();
4232
 
4233
  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4234
    {
4235
      as_bad (_("register must be either sp or set by a previous"
4236
                "unwind_movsp directive"));
4237
      return;
4238
    }
4239
 
4240
  /* Don't generate any opcodes, just record the information for later.  */
4241
  unwind.fp_reg = fp_reg;
4242
  unwind.fp_used = 1;
4243
  if (sp_reg == REG_SP)
4244
    unwind.fp_offset = unwind.frame_size - offset;
4245
  else
4246
    unwind.fp_offset -= offset;
4247
}
4248
 
4249
/* Parse an unwind_raw directive.  */
4250
 
4251
static void
4252
s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4253
{
4254
  expressionS exp;
4255
  /* This is an arbitrary limit.         */
4256
  unsigned char op[16];
4257
  int count;
4258
 
4259
  if (!unwind.proc_start)
4260
    as_bad (MISSING_FNSTART);
4261
 
4262
  expression (&exp);
4263
  if (exp.X_op == O_constant
4264
      && skip_past_comma (&input_line_pointer) != FAIL)
4265
    {
4266
      unwind.frame_size += exp.X_add_number;
4267
      expression (&exp);
4268
    }
4269
  else
4270
    exp.X_op = O_illegal;
4271
 
4272
  if (exp.X_op != O_constant)
4273
    {
4274
      as_bad (_("expected <offset>, <opcode>"));
4275
      ignore_rest_of_line ();
4276
      return;
4277
    }
4278
 
4279
  count = 0;
4280
 
4281
  /* Parse the opcode.  */
4282
  for (;;)
4283
    {
4284
      if (count >= 16)
4285
        {
4286
          as_bad (_("unwind opcode too long"));
4287
          ignore_rest_of_line ();
4288
        }
4289
      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4290
        {
4291
          as_bad (_("invalid unwind opcode"));
4292
          ignore_rest_of_line ();
4293
          return;
4294
        }
4295
      op[count++] = exp.X_add_number;
4296
 
4297
      /* Parse the next byte.  */
4298
      if (skip_past_comma (&input_line_pointer) == FAIL)
4299
        break;
4300
 
4301
      expression (&exp);
4302
    }
4303
 
4304
  /* Add the opcode bytes in reverse order.  */
4305
  while (count--)
4306
    add_unwind_opcode (op[count], 1);
4307
 
4308
  demand_empty_rest_of_line ();
4309
}
4310
 
4311
 
4312
/* Parse a .eabi_attribute directive.  */
4313
 
4314
static void
4315
s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4316
{
4317
  int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4318
 
4319
  if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4320
    attributes_set_explicitly[tag] = 1;
4321
}
4322
 
4323
/* Emit a tls fix for the symbol.  */
4324
 
4325
static void
4326
s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4327
{
4328
  char *p;
4329
  expressionS exp;
4330
#ifdef md_flush_pending_output
4331
  md_flush_pending_output ();
4332
#endif
4333
 
4334
#ifdef md_cons_align
4335
  md_cons_align (4);
4336
#endif
4337
 
4338
  /* Since we're just labelling the code, there's no need to define a
4339
     mapping symbol.  */
4340
  expression (&exp);
4341
  p = obstack_next_free (&frchain_now->frch_obstack);
4342
  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4343
               thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4344
               : BFD_RELOC_ARM_TLS_DESCSEQ);
4345
}
4346
#endif /* OBJ_ELF */
4347
 
4348
static void s_arm_arch (int);
4349
static void s_arm_object_arch (int);
4350
static void s_arm_cpu (int);
4351
static void s_arm_fpu (int);
4352
static void s_arm_arch_extension (int);
4353
 
4354
#ifdef TE_PE
4355
 
4356
static void
4357
pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4358
{
4359
  expressionS exp;
4360
 
4361
  do
4362
    {
4363
      expression (&exp);
4364
      if (exp.X_op == O_symbol)
4365
        exp.X_op = O_secrel;
4366
 
4367
      emit_expr (&exp, 4);
4368
    }
4369
  while (*input_line_pointer++ == ',');
4370
 
4371
  input_line_pointer--;
4372
  demand_empty_rest_of_line ();
4373
}
4374
#endif /* TE_PE */
4375
 
4376
/* This table describes all the machine specific pseudo-ops the assembler
4377
   has to support.  The fields are:
4378
     pseudo-op name without dot
4379
     function to call to execute this pseudo-op
4380
     Integer arg to pass to the function.  */
4381
 
4382
const pseudo_typeS md_pseudo_table[] =
4383
{
4384
  /* Never called because '.req' does not start a line.  */
4385
  { "req",         s_req,         0 },
4386
  /* Following two are likewise never called.  */
4387
  { "dn",          s_dn,          0 },
4388
  { "qn",          s_qn,          0 },
4389
  { "unreq",       s_unreq,       0 },
4390
  { "bss",         s_bss,         0 },
4391
  { "align",       s_align,       0 },
4392
  { "arm",         s_arm,         0 },
4393
  { "thumb",       s_thumb,       0 },
4394
  { "code",        s_code,        0 },
4395
  { "force_thumb", s_force_thumb, 0 },
4396
  { "thumb_func",  s_thumb_func,  0 },
4397
  { "thumb_set",   s_thumb_set,   0 },
4398
  { "even",        s_even,        0 },
4399
  { "ltorg",       s_ltorg,       0 },
4400
  { "pool",        s_ltorg,       0 },
4401
  { "syntax",      s_syntax,      0 },
4402
  { "cpu",         s_arm_cpu,     0 },
4403
  { "arch",        s_arm_arch,    0 },
4404
  { "object_arch", s_arm_object_arch,   0 },
4405
  { "fpu",         s_arm_fpu,     0 },
4406
  { "arch_extension", s_arm_arch_extension, 0 },
4407
#ifdef OBJ_ELF
4408
  { "word",             s_arm_elf_cons, 4 },
4409
  { "long",             s_arm_elf_cons, 4 },
4410
  { "inst.n",           s_arm_elf_inst, 2 },
4411
  { "inst.w",           s_arm_elf_inst, 4 },
4412
  { "inst",             s_arm_elf_inst, 0 },
4413
  { "rel31",            s_arm_rel31,      0 },
4414
  { "fnstart",          s_arm_unwind_fnstart,   0 },
4415
  { "fnend",            s_arm_unwind_fnend,     0 },
4416
  { "cantunwind",       s_arm_unwind_cantunwind, 0 },
4417
  { "personality",      s_arm_unwind_personality, 0 },
4418
  { "personalityindex", s_arm_unwind_personalityindex, 0 },
4419
  { "handlerdata",      s_arm_unwind_handlerdata, 0 },
4420
  { "save",             s_arm_unwind_save,      0 },
4421
  { "vsave",            s_arm_unwind_save,      1 },
4422
  { "movsp",            s_arm_unwind_movsp,     0 },
4423
  { "pad",              s_arm_unwind_pad,       0 },
4424
  { "setfp",            s_arm_unwind_setfp,     0 },
4425
  { "unwind_raw",       s_arm_unwind_raw,       0 },
4426
  { "eabi_attribute",   s_arm_eabi_attribute,   0 },
4427
  { "tlsdescseq",       s_arm_tls_descseq,      0 },
4428
#else
4429
  { "word",        cons, 4},
4430
 
4431
  /* These are used for dwarf.  */
4432
  {"2byte", cons, 2},
4433
  {"4byte", cons, 4},
4434
  {"8byte", cons, 8},
4435
  /* These are used for dwarf2.  */
4436
  { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4437
  { "loc",  dwarf2_directive_loc,  0 },
4438
  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4439
#endif
4440
  { "extend",      float_cons, 'x' },
4441
  { "ldouble",     float_cons, 'x' },
4442
  { "packed",      float_cons, 'p' },
4443
#ifdef TE_PE
4444
  {"secrel32", pe_directive_secrel, 0},
4445
#endif
4446
  { 0, 0, 0 }
4447
};
4448
 
4449
/* Parser functions used exclusively in instruction operands.  */
4450
 
4451
/* Generic immediate-value read function for use in insn parsing.
4452
   STR points to the beginning of the immediate (the leading #);
4453
   VAL receives the value; if the value is outside [MIN, MAX]
4454
   issue an error.  PREFIX_OPT is true if the immediate prefix is
4455
   optional.  */
4456
 
4457
static int
4458
parse_immediate (char **str, int *val, int min, int max,
4459
                 bfd_boolean prefix_opt)
4460
{
4461
  expressionS exp;
4462
  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4463
  if (exp.X_op != O_constant)
4464
    {
4465
      inst.error = _("constant expression required");
4466
      return FAIL;
4467
    }
4468
 
4469
  if (exp.X_add_number < min || exp.X_add_number > max)
4470
    {
4471
      inst.error = _("immediate value out of range");
4472
      return FAIL;
4473
    }
4474
 
4475
  *val = exp.X_add_number;
4476
  return SUCCESS;
4477
}
4478
 
4479
/* Less-generic immediate-value read function with the possibility of loading a
4480
   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4481
   instructions. Puts the result directly in inst.operands[i].  */
4482
 
4483
static int
4484
parse_big_immediate (char **str, int i)
4485
{
4486
  expressionS exp;
4487
  char *ptr = *str;
4488
 
4489
  my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4490
 
4491
  if (exp.X_op == O_constant)
4492
    {
4493
      inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4494
      /* If we're on a 64-bit host, then a 64-bit number can be returned using
4495
         O_constant.  We have to be careful not to break compilation for
4496
         32-bit X_add_number, though.  */
4497
      if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4498
        {
4499
          /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4.  */
4500
          inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4501
          inst.operands[i].regisimm = 1;
4502
        }
4503
    }
4504
  else if (exp.X_op == O_big
4505
           && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4506
    {
4507
      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4508
 
4509
      /* Bignums have their least significant bits in
4510
         generic_bignum[0]. Make sure we put 32 bits in imm and
4511
         32 bits in reg,  in a (hopefully) portable way.  */
4512
      gas_assert (parts != 0);
4513
 
4514
      /* Make sure that the number is not too big.
4515
         PR 11972: Bignums can now be sign-extended to the
4516
         size of a .octa so check that the out of range bits
4517
         are all zero or all one.  */
4518
      if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4519
        {
4520
          LITTLENUM_TYPE m = -1;
4521
 
4522
          if (generic_bignum[parts * 2] != 0
4523
              && generic_bignum[parts * 2] != m)
4524
            return FAIL;
4525
 
4526
          for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4527
            if (generic_bignum[j] != generic_bignum[j-1])
4528
              return FAIL;
4529
        }
4530
 
4531
      inst.operands[i].imm = 0;
4532
      for (j = 0; j < parts; j++, idx++)
4533
        inst.operands[i].imm |= generic_bignum[idx]
4534
                                << (LITTLENUM_NUMBER_OF_BITS * j);
4535
      inst.operands[i].reg = 0;
4536
      for (j = 0; j < parts; j++, idx++)
4537
        inst.operands[i].reg |= generic_bignum[idx]
4538
                                << (LITTLENUM_NUMBER_OF_BITS * j);
4539
      inst.operands[i].regisimm = 1;
4540
    }
4541
  else
4542
    return FAIL;
4543
 
4544
  *str = ptr;
4545
 
4546
  return SUCCESS;
4547
}
4548
 
4549
/* Returns the pseudo-register number of an FPA immediate constant,
4550
   or FAIL if there isn't a valid constant here.  */
4551
 
4552
static int
4553
parse_fpa_immediate (char ** str)
4554
{
4555
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4556
  char *         save_in;
4557
  expressionS    exp;
4558
  int            i;
4559
  int            j;
4560
 
4561
  /* First try and match exact strings, this is to guarantee
4562
     that some formats will work even for cross assembly.  */
4563
 
4564
  for (i = 0; fp_const[i]; i++)
4565
    {
4566
      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4567
        {
4568
          char *start = *str;
4569
 
4570
          *str += strlen (fp_const[i]);
4571
          if (is_end_of_line[(unsigned char) **str])
4572
            return i + 8;
4573
          *str = start;
4574
        }
4575
    }
4576
 
4577
  /* Just because we didn't get a match doesn't mean that the constant
4578
     isn't valid, just that it is in a format that we don't
4579
     automatically recognize.  Try parsing it with the standard
4580
     expression routines.  */
4581
 
4582
  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4583
 
4584
  /* Look for a raw floating point number.  */
4585
  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4586
      && is_end_of_line[(unsigned char) *save_in])
4587
    {
4588
      for (i = 0; i < NUM_FLOAT_VALS; i++)
4589
        {
4590
          for (j = 0; j < MAX_LITTLENUMS; j++)
4591
            {
4592
              if (words[j] != fp_values[i][j])
4593
                break;
4594
            }
4595
 
4596
          if (j == MAX_LITTLENUMS)
4597
            {
4598
              *str = save_in;
4599
              return i + 8;
4600
            }
4601
        }
4602
    }
4603
 
4604
  /* Try and parse a more complex expression, this will probably fail
4605
     unless the code uses a floating point prefix (eg "0f").  */
4606
  save_in = input_line_pointer;
4607
  input_line_pointer = *str;
4608
  if (expression (&exp) == absolute_section
4609
      && exp.X_op == O_big
4610
      && exp.X_add_number < 0)
4611
    {
4612
      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4613
         Ditto for 15.  */
4614
      if (gen_to_words (words, 5, (long) 15) == 0)
4615
        {
4616
          for (i = 0; i < NUM_FLOAT_VALS; i++)
4617
            {
4618
              for (j = 0; j < MAX_LITTLENUMS; j++)
4619
                {
4620
                  if (words[j] != fp_values[i][j])
4621
                    break;
4622
                }
4623
 
4624
              if (j == MAX_LITTLENUMS)
4625
                {
4626
                  *str = input_line_pointer;
4627
                  input_line_pointer = save_in;
4628
                  return i + 8;
4629
                }
4630
            }
4631
        }
4632
    }
4633
 
4634
  *str = input_line_pointer;
4635
  input_line_pointer = save_in;
4636
  inst.error = _("invalid FPA immediate expression");
4637
  return FAIL;
4638
}
4639
 
4640
/* Returns 1 if a number has "quarter-precision" float format
4641
   0baBbbbbbc defgh000 00000000 00000000.  */
4642
 
4643
static int
4644
is_quarter_float (unsigned imm)
4645
{
4646
  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4647
  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4648
}
4649
 
4650
/* Parse an 8-bit "quarter-precision" floating point number of the form:
4651
   0baBbbbbbc defgh000 00000000 00000000.
4652
   The zero and minus-zero cases need special handling, since they can't be
4653
   encoded in the "quarter-precision" float format, but can nonetheless be
4654
   loaded as integer constants.  */
4655
 
4656
static unsigned
4657
parse_qfloat_immediate (char **ccp, int *immed)
4658
{
4659
  char *str = *ccp;
4660
  char *fpnum;
4661
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4662
  int found_fpchar = 0;
4663
 
4664
  skip_past_char (&str, '#');
4665
 
4666
  /* We must not accidentally parse an integer as a floating-point number. Make
4667
     sure that the value we parse is not an integer by checking for special
4668
     characters '.' or 'e'.
4669
     FIXME: This is a horrible hack, but doing better is tricky because type
4670
     information isn't in a very usable state at parse time.  */
4671
  fpnum = str;
4672
  skip_whitespace (fpnum);
4673
 
4674
  if (strncmp (fpnum, "0x", 2) == 0)
4675
    return FAIL;
4676
  else
4677
    {
4678
      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4679
        if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4680
          {
4681
            found_fpchar = 1;
4682
            break;
4683
          }
4684
 
4685
      if (!found_fpchar)
4686
        return FAIL;
4687
    }
4688
 
4689
  if ((str = atof_ieee (str, 's', words)) != NULL)
4690
    {
4691
      unsigned fpword = 0;
4692
      int i;
4693
 
4694
      /* Our FP word must be 32 bits (single-precision FP).  */
4695
      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4696
        {
4697
          fpword <<= LITTLENUM_NUMBER_OF_BITS;
4698
          fpword |= words[i];
4699
        }
4700
 
4701
      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4702
        *immed = fpword;
4703
      else
4704
        return FAIL;
4705
 
4706
      *ccp = str;
4707
 
4708
      return SUCCESS;
4709
    }
4710
 
4711
  return FAIL;
4712
}
4713
 
4714
/* Shift operands.  */
4715
enum shift_kind
4716
{
4717
  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4718
};
4719
 
4720
struct asm_shift_name
4721
{
4722
  const char      *name;
4723
  enum shift_kind  kind;
4724
};
4725
 
4726
/* Third argument to parse_shift.  */
4727
enum parse_shift_mode
4728
{
4729
  NO_SHIFT_RESTRICT,            /* Any kind of shift is accepted.  */
4730
  SHIFT_IMMEDIATE,              /* Shift operand must be an immediate.  */
4731
  SHIFT_LSL_OR_ASR_IMMEDIATE,   /* Shift must be LSL or ASR immediate.  */
4732
  SHIFT_ASR_IMMEDIATE,          /* Shift must be ASR immediate.  */
4733
  SHIFT_LSL_IMMEDIATE,          /* Shift must be LSL immediate.  */
4734
};
4735
 
4736
/* Parse a <shift> specifier on an ARM data processing instruction.
4737
   This has three forms:
4738
 
4739
     (LSL|LSR|ASL|ASR|ROR) Rs
4740
     (LSL|LSR|ASL|ASR|ROR) #imm
4741
     RRX
4742
 
4743
   Note that ASL is assimilated to LSL in the instruction encoding, and
4744
   RRX to ROR #0 (which cannot be written as such).  */
4745
 
4746
static int
4747
parse_shift (char **str, int i, enum parse_shift_mode mode)
4748
{
4749
  const struct asm_shift_name *shift_name;
4750
  enum shift_kind shift;
4751
  char *s = *str;
4752
  char *p = s;
4753
  int reg;
4754
 
4755
  for (p = *str; ISALPHA (*p); p++)
4756
    ;
4757
 
4758
  if (p == *str)
4759
    {
4760
      inst.error = _("shift expression expected");
4761
      return FAIL;
4762
    }
4763
 
4764
  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4765
                                                            p - *str);
4766
 
4767
  if (shift_name == NULL)
4768
    {
4769
      inst.error = _("shift expression expected");
4770
      return FAIL;
4771
    }
4772
 
4773
  shift = shift_name->kind;
4774
 
4775
  switch (mode)
4776
    {
4777
    case NO_SHIFT_RESTRICT:
4778
    case SHIFT_IMMEDIATE:   break;
4779
 
4780
    case SHIFT_LSL_OR_ASR_IMMEDIATE:
4781
      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4782
        {
4783
          inst.error = _("'LSL' or 'ASR' required");
4784
          return FAIL;
4785
        }
4786
      break;
4787
 
4788
    case SHIFT_LSL_IMMEDIATE:
4789
      if (shift != SHIFT_LSL)
4790
        {
4791
          inst.error = _("'LSL' required");
4792
          return FAIL;
4793
        }
4794
      break;
4795
 
4796
    case SHIFT_ASR_IMMEDIATE:
4797
      if (shift != SHIFT_ASR)
4798
        {
4799
          inst.error = _("'ASR' required");
4800
          return FAIL;
4801
        }
4802
      break;
4803
 
4804
    default: abort ();
4805
    }
4806
 
4807
  if (shift != SHIFT_RRX)
4808
    {
4809
      /* Whitespace can appear here if the next thing is a bare digit.  */
4810
      skip_whitespace (p);
4811
 
4812
      if (mode == NO_SHIFT_RESTRICT
4813
          && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4814
        {
4815
          inst.operands[i].imm = reg;
4816
          inst.operands[i].immisreg = 1;
4817
        }
4818
      else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4819
        return FAIL;
4820
    }
4821
  inst.operands[i].shift_kind = shift;
4822
  inst.operands[i].shifted = 1;
4823
  *str = p;
4824
  return SUCCESS;
4825
}
4826
 
4827
/* Parse a <shifter_operand> for an ARM data processing instruction:
4828
 
4829
      #<immediate>
4830
      #<immediate>, <rotate>
4831
      <Rm>
4832
      <Rm>, <shift>
4833
 
4834
   where <shift> is defined by parse_shift above, and <rotate> is a
4835
   multiple of 2 between 0 and 30.  Validation of immediate operands
4836
   is deferred to md_apply_fix.  */
4837
 
4838
static int
4839
parse_shifter_operand (char **str, int i)
4840
{
4841
  int value;
4842
  expressionS exp;
4843
 
4844
  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4845
    {
4846
      inst.operands[i].reg = value;
4847
      inst.operands[i].isreg = 1;
4848
 
4849
      /* parse_shift will override this if appropriate */
4850
      inst.reloc.exp.X_op = O_constant;
4851
      inst.reloc.exp.X_add_number = 0;
4852
 
4853
      if (skip_past_comma (str) == FAIL)
4854
        return SUCCESS;
4855
 
4856
      /* Shift operation on register.  */
4857
      return parse_shift (str, i, NO_SHIFT_RESTRICT);
4858
    }
4859
 
4860
  if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4861
    return FAIL;
4862
 
4863
  if (skip_past_comma (str) == SUCCESS)
4864
    {
4865
      /* #x, y -- ie explicit rotation by Y.  */
4866
      if (my_get_expression (&exp, str, GE_NO_PREFIX))
4867
        return FAIL;
4868
 
4869
      if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4870
        {
4871
          inst.error = _("constant expression expected");
4872
          return FAIL;
4873
        }
4874
 
4875
      value = exp.X_add_number;
4876
      if (value < 0 || value > 30 || value % 2 != 0)
4877
        {
4878
          inst.error = _("invalid rotation");
4879
          return FAIL;
4880
        }
4881
      if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4882
        {
4883
          inst.error = _("invalid constant");
4884
          return FAIL;
4885
        }
4886
 
4887
      /* Convert to decoded value.  md_apply_fix will put it back.  */
4888
      inst.reloc.exp.X_add_number
4889
        = (((inst.reloc.exp.X_add_number << (32 - value))
4890
            | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4891
    }
4892
 
4893
  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4894
  inst.reloc.pc_rel = 0;
4895
  return SUCCESS;
4896
}
4897
 
4898
/* Group relocation information.  Each entry in the table contains the
4899
   textual name of the relocation as may appear in assembler source
4900
   and must end with a colon.
4901
   Along with this textual name are the relocation codes to be used if
4902
   the corresponding instruction is an ALU instruction (ADD or SUB only),
4903
   an LDR, an LDRS, or an LDC.  */
4904
 
4905
struct group_reloc_table_entry
4906
{
4907
  const char *name;
4908
  int alu_code;
4909
  int ldr_code;
4910
  int ldrs_code;
4911
  int ldc_code;
4912
};
4913
 
4914
typedef enum
4915
{
4916
  /* Varieties of non-ALU group relocation.  */
4917
 
4918
  GROUP_LDR,
4919
  GROUP_LDRS,
4920
  GROUP_LDC
4921
} group_reloc_type;
4922
 
4923
static struct group_reloc_table_entry group_reloc_table[] =
4924
  { /* Program counter relative: */
4925
    { "pc_g0_nc",
4926
      BFD_RELOC_ARM_ALU_PC_G0_NC,       /* ALU */
4927
      0,                         /* LDR */
4928
      0,                         /* LDRS */
4929
 
4930
    { "pc_g0",
4931
      BFD_RELOC_ARM_ALU_PC_G0,          /* ALU */
4932
      BFD_RELOC_ARM_LDR_PC_G0,          /* LDR */
4933
      BFD_RELOC_ARM_LDRS_PC_G0,         /* LDRS */
4934
      BFD_RELOC_ARM_LDC_PC_G0 },        /* LDC */
4935
    { "pc_g1_nc",
4936
      BFD_RELOC_ARM_ALU_PC_G1_NC,       /* ALU */
4937
      0,                         /* LDR */
4938
      0,                         /* LDRS */
4939
 
4940
    { "pc_g1",
4941
      BFD_RELOC_ARM_ALU_PC_G1,          /* ALU */
4942
      BFD_RELOC_ARM_LDR_PC_G1,          /* LDR */
4943
      BFD_RELOC_ARM_LDRS_PC_G1,         /* LDRS */
4944
      BFD_RELOC_ARM_LDC_PC_G1 },        /* LDC */
4945
    { "pc_g2",
4946
      BFD_RELOC_ARM_ALU_PC_G2,          /* ALU */
4947
      BFD_RELOC_ARM_LDR_PC_G2,          /* LDR */
4948
      BFD_RELOC_ARM_LDRS_PC_G2,         /* LDRS */
4949
      BFD_RELOC_ARM_LDC_PC_G2 },        /* LDC */
4950
    /* Section base relative */
4951
    { "sb_g0_nc",
4952
      BFD_RELOC_ARM_ALU_SB_G0_NC,       /* ALU */
4953
      0,                         /* LDR */
4954
      0,                         /* LDRS */
4955
 
4956
    { "sb_g0",
4957
      BFD_RELOC_ARM_ALU_SB_G0,          /* ALU */
4958
      BFD_RELOC_ARM_LDR_SB_G0,          /* LDR */
4959
      BFD_RELOC_ARM_LDRS_SB_G0,         /* LDRS */
4960
      BFD_RELOC_ARM_LDC_SB_G0 },        /* LDC */
4961
    { "sb_g1_nc",
4962
      BFD_RELOC_ARM_ALU_SB_G1_NC,       /* ALU */
4963
      0,                         /* LDR */
4964
      0,                         /* LDRS */
4965
 
4966
    { "sb_g1",
4967
      BFD_RELOC_ARM_ALU_SB_G1,          /* ALU */
4968
      BFD_RELOC_ARM_LDR_SB_G1,          /* LDR */
4969
      BFD_RELOC_ARM_LDRS_SB_G1,         /* LDRS */
4970
      BFD_RELOC_ARM_LDC_SB_G1 },        /* LDC */
4971
    { "sb_g2",
4972
      BFD_RELOC_ARM_ALU_SB_G2,          /* ALU */
4973
      BFD_RELOC_ARM_LDR_SB_G2,          /* LDR */
4974
      BFD_RELOC_ARM_LDRS_SB_G2,         /* LDRS */
4975
      BFD_RELOC_ARM_LDC_SB_G2 } };      /* LDC */
4976
 
4977
/* Given the address of a pointer pointing to the textual name of a group
4978
   relocation as may appear in assembler source, attempt to find its details
4979
   in group_reloc_table.  The pointer will be updated to the character after
4980
   the trailing colon.  On failure, FAIL will be returned; SUCCESS
4981
   otherwise.  On success, *entry will be updated to point at the relevant
4982
   group_reloc_table entry. */
4983
 
4984
static int
4985
find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4986
{
4987
  unsigned int i;
4988
  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4989
    {
4990
      int length = strlen (group_reloc_table[i].name);
4991
 
4992
      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4993
          && (*str)[length] == ':')
4994
        {
4995
          *out = &group_reloc_table[i];
4996
          *str += (length + 1);
4997
          return SUCCESS;
4998
        }
4999
    }
5000
 
5001
  return FAIL;
5002
}
5003
 
5004
/* Parse a <shifter_operand> for an ARM data processing instruction
5005
   (as for parse_shifter_operand) where group relocations are allowed:
5006
 
5007
      #<immediate>
5008
      #<immediate>, <rotate>
5009
      #:<group_reloc>:<expression>
5010
      <Rm>
5011
      <Rm>, <shift>
5012
 
5013
   where <group_reloc> is one of the strings defined in group_reloc_table.
5014
   The hashes are optional.
5015
 
5016
   Everything else is as for parse_shifter_operand.  */
5017
 
5018
static parse_operand_result
5019
parse_shifter_operand_group_reloc (char **str, int i)
5020
{
5021
  /* Determine if we have the sequence of characters #: or just :
5022
     coming next.  If we do, then we check for a group relocation.
5023
     If we don't, punt the whole lot to parse_shifter_operand.  */
5024
 
5025
  if (((*str)[0] == '#' && (*str)[1] == ':')
5026
      || (*str)[0] == ':')
5027
    {
5028
      struct group_reloc_table_entry *entry;
5029
 
5030
      if ((*str)[0] == '#')
5031
        (*str) += 2;
5032
      else
5033
        (*str)++;
5034
 
5035
      /* Try to parse a group relocation.  Anything else is an error.  */
5036
      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5037
        {
5038
          inst.error = _("unknown group relocation");
5039
          return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5040
        }
5041
 
5042
      /* We now have the group relocation table entry corresponding to
5043
         the name in the assembler source.  Next, we parse the expression.  */
5044
      if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5045
        return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5046
 
5047
      /* Record the relocation type (always the ALU variant here).  */
5048
      inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5049
      gas_assert (inst.reloc.type != 0);
5050
 
5051
      return PARSE_OPERAND_SUCCESS;
5052
    }
5053
  else
5054
    return parse_shifter_operand (str, i) == SUCCESS
5055
           ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5056
 
5057
  /* Never reached.  */
5058
}
5059
 
5060
/* Parse a Neon alignment expression.  Information is written to
5061
   inst.operands[i].  We assume the initial ':' has been skipped.
5062
 
5063
   align        .imm = align << 8, .immisalign=1, .preind=0  */
5064
static parse_operand_result
5065
parse_neon_alignment (char **str, int i)
5066
{
5067
  char *p = *str;
5068
  expressionS exp;
5069
 
5070
  my_get_expression (&exp, &p, GE_NO_PREFIX);
5071
 
5072
  if (exp.X_op != O_constant)
5073
    {
5074
      inst.error = _("alignment must be constant");
5075
      return PARSE_OPERAND_FAIL;
5076
    }
5077
 
5078
  inst.operands[i].imm = exp.X_add_number << 8;
5079
  inst.operands[i].immisalign = 1;
5080
  /* Alignments are not pre-indexes.  */
5081
  inst.operands[i].preind = 0;
5082
 
5083
  *str = p;
5084
  return PARSE_OPERAND_SUCCESS;
5085
}
5086
 
5087
/* Parse all forms of an ARM address expression.  Information is written
5088
   to inst.operands[i] and/or inst.reloc.
5089
 
5090
   Preindexed addressing (.preind=1):
5091
 
5092
   [Rn, #offset]       .reg=Rn .reloc.exp=offset
5093
   [Rn, +/-Rm]         .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5094
   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5095
                       .shift_kind=shift .reloc.exp=shift_imm
5096
 
5097
   These three may have a trailing ! which causes .writeback to be set also.
5098
 
5099
   Postindexed addressing (.postind=1, .writeback=1):
5100
 
5101
   [Rn], #offset       .reg=Rn .reloc.exp=offset
5102
   [Rn], +/-Rm         .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5103
   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5104
                       .shift_kind=shift .reloc.exp=shift_imm
5105
 
5106
   Unindexed addressing (.preind=0, .postind=0):
5107
 
5108
   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5109
 
5110
   Other:
5111
 
5112
   [Rn]{!}             shorthand for [Rn,#0]{!}
5113
   =immediate          .isreg=0 .reloc.exp=immediate
5114
   label               .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5115
 
5116
  It is the caller's responsibility to check for addressing modes not
5117
  supported by the instruction, and to set inst.reloc.type.  */
5118
 
5119
static parse_operand_result
5120
parse_address_main (char **str, int i, int group_relocations,
5121
                    group_reloc_type group_type)
5122
{
5123
  char *p = *str;
5124
  int reg;
5125
 
5126
  if (skip_past_char (&p, '[') == FAIL)
5127
    {
5128
      if (skip_past_char (&p, '=') == FAIL)
5129
        {
5130
          /* Bare address - translate to PC-relative offset.  */
5131
          inst.reloc.pc_rel = 1;
5132
          inst.operands[i].reg = REG_PC;
5133
          inst.operands[i].isreg = 1;
5134
          inst.operands[i].preind = 1;
5135
        }
5136
      /* Otherwise a load-constant pseudo op, no special treatment needed here.  */
5137
 
5138
      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5139
        return PARSE_OPERAND_FAIL;
5140
 
5141
      *str = p;
5142
      return PARSE_OPERAND_SUCCESS;
5143
    }
5144
 
5145
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5146
    {
5147
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5148
      return PARSE_OPERAND_FAIL;
5149
    }
5150
  inst.operands[i].reg = reg;
5151
  inst.operands[i].isreg = 1;
5152
 
5153
  if (skip_past_comma (&p) == SUCCESS)
5154
    {
5155
      inst.operands[i].preind = 1;
5156
 
5157
      if (*p == '+') p++;
5158
      else if (*p == '-') p++, inst.operands[i].negative = 1;
5159
 
5160
      if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5161
        {
5162
          inst.operands[i].imm = reg;
5163
          inst.operands[i].immisreg = 1;
5164
 
5165
          if (skip_past_comma (&p) == SUCCESS)
5166
            if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5167
              return PARSE_OPERAND_FAIL;
5168
        }
5169
      else if (skip_past_char (&p, ':') == SUCCESS)
5170
        {
5171
          /* FIXME: '@' should be used here, but it's filtered out by generic
5172
             code before we get to see it here. This may be subject to
5173
             change.  */
5174
          parse_operand_result result = parse_neon_alignment (&p, i);
5175
 
5176
          if (result != PARSE_OPERAND_SUCCESS)
5177
            return result;
5178
        }
5179
      else
5180
        {
5181
          if (inst.operands[i].negative)
5182
            {
5183
              inst.operands[i].negative = 0;
5184
              p--;
5185
            }
5186
 
5187
          if (group_relocations
5188
              && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5189
            {
5190
              struct group_reloc_table_entry *entry;
5191
 
5192
              /* Skip over the #: or : sequence.  */
5193
              if (*p == '#')
5194
                p += 2;
5195
              else
5196
                p++;
5197
 
5198
              /* Try to parse a group relocation.  Anything else is an
5199
                 error.  */
5200
              if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5201
                {
5202
                  inst.error = _("unknown group relocation");
5203
                  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5204
                }
5205
 
5206
              /* We now have the group relocation table entry corresponding to
5207
                 the name in the assembler source.  Next, we parse the
5208
                 expression.  */
5209
              if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5210
                return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5211
 
5212
              /* Record the relocation type.  */
5213
              switch (group_type)
5214
                {
5215
                  case GROUP_LDR:
5216
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5217
                    break;
5218
 
5219
                  case GROUP_LDRS:
5220
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5221
                    break;
5222
 
5223
                  case GROUP_LDC:
5224
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5225
                    break;
5226
 
5227
                  default:
5228
                    gas_assert (0);
5229
                }
5230
 
5231
              if (inst.reloc.type == 0)
5232
                {
5233
                  inst.error = _("this group relocation is not allowed on this instruction");
5234
                  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5235
                }
5236
            }
5237
          else
5238
            {
5239
              char *q = p;
5240
              if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5241
                return PARSE_OPERAND_FAIL;
5242
              /* If the offset is 0, find out if it's a +0 or -0.  */
5243
              if (inst.reloc.exp.X_op == O_constant
5244
                  && inst.reloc.exp.X_add_number == 0)
5245
                {
5246
                  skip_whitespace (q);
5247
                  if (*q == '#')
5248
                    {
5249
                      q++;
5250
                      skip_whitespace (q);
5251
                    }
5252
                  if (*q == '-')
5253
                    inst.operands[i].negative = 1;
5254
                }
5255
            }
5256
        }
5257
    }
5258
  else if (skip_past_char (&p, ':') == SUCCESS)
5259
    {
5260
      /* FIXME: '@' should be used here, but it's filtered out by generic code
5261
         before we get to see it here. This may be subject to change.  */
5262
      parse_operand_result result = parse_neon_alignment (&p, i);
5263
 
5264
      if (result != PARSE_OPERAND_SUCCESS)
5265
        return result;
5266
    }
5267
 
5268
  if (skip_past_char (&p, ']') == FAIL)
5269
    {
5270
      inst.error = _("']' expected");
5271
      return PARSE_OPERAND_FAIL;
5272
    }
5273
 
5274
  if (skip_past_char (&p, '!') == SUCCESS)
5275
    inst.operands[i].writeback = 1;
5276
 
5277
  else if (skip_past_comma (&p) == SUCCESS)
5278
    {
5279
      if (skip_past_char (&p, '{') == SUCCESS)
5280
        {
5281
          /* [Rn], {expr} - unindexed, with option */
5282
          if (parse_immediate (&p, &inst.operands[i].imm,
5283
                               0, 255, TRUE) == FAIL)
5284
            return PARSE_OPERAND_FAIL;
5285
 
5286
          if (skip_past_char (&p, '}') == FAIL)
5287
            {
5288
              inst.error = _("'}' expected at end of 'option' field");
5289
              return PARSE_OPERAND_FAIL;
5290
            }
5291
          if (inst.operands[i].preind)
5292
            {
5293
              inst.error = _("cannot combine index with option");
5294
              return PARSE_OPERAND_FAIL;
5295
            }
5296
          *str = p;
5297
          return PARSE_OPERAND_SUCCESS;
5298
        }
5299
      else
5300
        {
5301
          inst.operands[i].postind = 1;
5302
          inst.operands[i].writeback = 1;
5303
 
5304
          if (inst.operands[i].preind)
5305
            {
5306
              inst.error = _("cannot combine pre- and post-indexing");
5307
              return PARSE_OPERAND_FAIL;
5308
            }
5309
 
5310
          if (*p == '+') p++;
5311
          else if (*p == '-') p++, inst.operands[i].negative = 1;
5312
 
5313
          if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5314
            {
5315
              /* We might be using the immediate for alignment already. If we
5316
                 are, OR the register number into the low-order bits.  */
5317
              if (inst.operands[i].immisalign)
5318
                inst.operands[i].imm |= reg;
5319
              else
5320
                inst.operands[i].imm = reg;
5321
              inst.operands[i].immisreg = 1;
5322
 
5323
              if (skip_past_comma (&p) == SUCCESS)
5324
                if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5325
                  return PARSE_OPERAND_FAIL;
5326
            }
5327
          else
5328
            {
5329
              char *q = p;
5330
              if (inst.operands[i].negative)
5331
                {
5332
                  inst.operands[i].negative = 0;
5333
                  p--;
5334
                }
5335
              if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5336
                return PARSE_OPERAND_FAIL;
5337
              /* If the offset is 0, find out if it's a +0 or -0.  */
5338
              if (inst.reloc.exp.X_op == O_constant
5339
                  && inst.reloc.exp.X_add_number == 0)
5340
                {
5341
                  skip_whitespace (q);
5342
                  if (*q == '#')
5343
                    {
5344
                      q++;
5345
                      skip_whitespace (q);
5346
                    }
5347
                  if (*q == '-')
5348
                    inst.operands[i].negative = 1;
5349
                }
5350
            }
5351
        }
5352
    }
5353
 
5354
  /* If at this point neither .preind nor .postind is set, we have a
5355
     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5356
  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5357
    {
5358
      inst.operands[i].preind = 1;
5359
      inst.reloc.exp.X_op = O_constant;
5360
      inst.reloc.exp.X_add_number = 0;
5361
    }
5362
  *str = p;
5363
  return PARSE_OPERAND_SUCCESS;
5364
}
5365
 
5366
static int
5367
parse_address (char **str, int i)
5368
{
5369
  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5370
         ? SUCCESS : FAIL;
5371
}
5372
 
5373
static parse_operand_result
5374
parse_address_group_reloc (char **str, int i, group_reloc_type type)
5375
{
5376
  return parse_address_main (str, i, 1, type);
5377
}
5378
 
5379
/* Parse an operand for a MOVW or MOVT instruction.  */
5380
static int
5381
parse_half (char **str)
5382
{
5383
  char * p;
5384
 
5385
  p = *str;
5386
  skip_past_char (&p, '#');
5387
  if (strncasecmp (p, ":lower16:", 9) == 0)
5388
    inst.reloc.type = BFD_RELOC_ARM_MOVW;
5389
  else if (strncasecmp (p, ":upper16:", 9) == 0)
5390
    inst.reloc.type = BFD_RELOC_ARM_MOVT;
5391
 
5392
  if (inst.reloc.type != BFD_RELOC_UNUSED)
5393
    {
5394
      p += 9;
5395
      skip_whitespace (p);
5396
    }
5397
 
5398
  if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5399
    return FAIL;
5400
 
5401
  if (inst.reloc.type == BFD_RELOC_UNUSED)
5402
    {
5403
      if (inst.reloc.exp.X_op != O_constant)
5404
        {
5405
          inst.error = _("constant expression expected");
5406
          return FAIL;
5407
        }
5408
      if (inst.reloc.exp.X_add_number < 0
5409
          || inst.reloc.exp.X_add_number > 0xffff)
5410
        {
5411
          inst.error = _("immediate value out of range");
5412
          return FAIL;
5413
        }
5414
    }
5415
  *str = p;
5416
  return SUCCESS;
5417
}
5418
 
5419
/* Miscellaneous. */
5420
 
5421
/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5422
   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5423
static int
5424
parse_psr (char **str, bfd_boolean lhs)
5425
{
5426
  char *p;
5427
  unsigned long psr_field;
5428
  const struct asm_psr *psr;
5429
  char *start;
5430
  bfd_boolean is_apsr = FALSE;
5431
  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5432
 
5433
  /* PR gas/12698:  If the user has specified -march=all then m_profile will
5434
     be TRUE, but we want to ignore it in this case as we are building for any
5435
     CPU type, including non-m variants.  */
5436
  if (selected_cpu.core == arm_arch_any.core)
5437
    m_profile = FALSE;
5438
 
5439
  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5440
     feature for ease of use and backwards compatibility.  */
5441
  p = *str;
5442
  if (strncasecmp (p, "SPSR", 4) == 0)
5443
    {
5444
      if (m_profile)
5445
        goto unsupported_psr;
5446
 
5447
      psr_field = SPSR_BIT;
5448
    }
5449
  else if (strncasecmp (p, "CPSR", 4) == 0)
5450
    {
5451
      if (m_profile)
5452
        goto unsupported_psr;
5453
 
5454
      psr_field = 0;
5455
    }
5456
  else if (strncasecmp (p, "APSR", 4) == 0)
5457
    {
5458
      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5459
         and ARMv7-R architecture CPUs.  */
5460
      is_apsr = TRUE;
5461
      psr_field = 0;
5462
    }
5463
  else if (m_profile)
5464
    {
5465
      start = p;
5466
      do
5467
        p++;
5468
      while (ISALNUM (*p) || *p == '_');
5469
 
5470
      if (strncasecmp (start, "iapsr", 5) == 0
5471
          || strncasecmp (start, "eapsr", 5) == 0
5472
          || strncasecmp (start, "xpsr", 4) == 0
5473
          || strncasecmp (start, "psr", 3) == 0)
5474
        p = start + strcspn (start, "rR") + 1;
5475
 
5476
      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5477
                                                  p - start);
5478
 
5479
      if (!psr)
5480
        return FAIL;
5481
 
5482
      /* If APSR is being written, a bitfield may be specified.  Note that
5483
         APSR itself is handled above.  */
5484
      if (psr->field <= 3)
5485
        {
5486
          psr_field = psr->field;
5487
          is_apsr = TRUE;
5488
          goto check_suffix;
5489
        }
5490
 
5491
      *str = p;
5492
      /* M-profile MSR instructions have the mask field set to "10", except
5493
         *PSR variants which modify APSR, which may use a different mask (and
5494
         have been handled already).  Do that by setting the PSR_f field
5495
         here.  */
5496
      return psr->field | (lhs ? PSR_f : 0);
5497
    }
5498
  else
5499
    goto unsupported_psr;
5500
 
5501
  p += 4;
5502
check_suffix:
5503
  if (*p == '_')
5504
    {
5505
      /* A suffix follows.  */
5506
      p++;
5507
      start = p;
5508
 
5509
      do
5510
        p++;
5511
      while (ISALNUM (*p) || *p == '_');
5512
 
5513
      if (is_apsr)
5514
        {
5515
          /* APSR uses a notation for bits, rather than fields.  */
5516
          unsigned int nzcvq_bits = 0;
5517
          unsigned int g_bit = 0;
5518
          char *bit;
5519
 
5520
          for (bit = start; bit != p; bit++)
5521
            {
5522
              switch (TOLOWER (*bit))
5523
                {
5524
                case 'n':
5525
                  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5526
                  break;
5527
 
5528
                case 'z':
5529
                  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5530
                  break;
5531
 
5532
                case 'c':
5533
                  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5534
                  break;
5535
 
5536
                case 'v':
5537
                  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5538
                  break;
5539
 
5540
                case 'q':
5541
                  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5542
                  break;
5543
 
5544
                case 'g':
5545
                  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5546
                  break;
5547
 
5548
                default:
5549
                  inst.error = _("unexpected bit specified after APSR");
5550
                  return FAIL;
5551
                }
5552
            }
5553
 
5554
          if (nzcvq_bits == 0x1f)
5555
            psr_field |= PSR_f;
5556
 
5557
          if (g_bit == 0x1)
5558
            {
5559
              if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5560
                {
5561
                  inst.error = _("selected processor does not "
5562
                                 "support DSP extension");
5563
                  return FAIL;
5564
                }
5565
 
5566
              psr_field |= PSR_s;
5567
            }
5568
 
5569
          if ((nzcvq_bits & 0x20) != 0
5570
              || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5571
              || (g_bit & 0x2) != 0)
5572
            {
5573
              inst.error = _("bad bitmask specified after APSR");
5574
              return FAIL;
5575
            }
5576
        }
5577
      else
5578
        {
5579
          psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5580
                                                      p - start);
5581
          if (!psr)
5582
            goto error;
5583
 
5584
          psr_field |= psr->field;
5585
        }
5586
    }
5587
  else
5588
    {
5589
      if (ISALNUM (*p))
5590
        goto error;    /* Garbage after "[CS]PSR".  */
5591
 
5592
      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5593
         is deprecated, but allow it anyway.  */
5594
      if (is_apsr && lhs)
5595
        {
5596
          psr_field |= PSR_f;
5597
          as_tsktsk (_("writing to APSR without specifying a bitmask is "
5598
                       "deprecated"));
5599
        }
5600
      else if (!m_profile)
5601
        /* These bits are never right for M-profile devices: don't set them
5602
           (only code paths which read/write APSR reach here).  */
5603
        psr_field |= (PSR_c | PSR_f);
5604
    }
5605
  *str = p;
5606
  return psr_field;
5607
 
5608
 unsupported_psr:
5609
  inst.error = _("selected processor does not support requested special "
5610
                 "purpose register");
5611
  return FAIL;
5612
 
5613
 error:
5614
  inst.error = _("flag for {c}psr instruction expected");
5615
  return FAIL;
5616
}
5617
 
5618
/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5619
   value suitable for splatting into the AIF field of the instruction.  */
5620
 
5621
static int
5622
parse_cps_flags (char **str)
5623
{
5624
  int val = 0;
5625
  int saw_a_flag = 0;
5626
  char *s = *str;
5627
 
5628
  for (;;)
5629
    switch (*s++)
5630
      {
5631
      case '\0': case ',':
5632
        goto done;
5633
 
5634
      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5635
      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5636
      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5637
 
5638
      default:
5639
        inst.error = _("unrecognized CPS flag");
5640
        return FAIL;
5641
      }
5642
 
5643
 done:
5644
  if (saw_a_flag == 0)
5645
    {
5646
      inst.error = _("missing CPS flags");
5647
      return FAIL;
5648
    }
5649
 
5650
  *str = s - 1;
5651
  return val;
5652
}
5653
 
5654
/* Parse an endian specifier ("BE" or "LE", case insensitive);
5655
   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
5656
 
5657
static int
5658
parse_endian_specifier (char **str)
5659
{
5660
  int little_endian;
5661
  char *s = *str;
5662
 
5663
  if (strncasecmp (s, "BE", 2))
5664
    little_endian = 0;
5665
  else if (strncasecmp (s, "LE", 2))
5666
    little_endian = 1;
5667
  else
5668
    {
5669
      inst.error = _("valid endian specifiers are be or le");
5670
      return FAIL;
5671
    }
5672
 
5673
  if (ISALNUM (s[2]) || s[2] == '_')
5674
    {
5675
      inst.error = _("valid endian specifiers are be or le");
5676
      return FAIL;
5677
    }
5678
 
5679
  *str = s + 2;
5680
  return little_endian;
5681
}
5682
 
5683
/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
5684
   value suitable for poking into the rotate field of an sxt or sxta
5685
   instruction, or FAIL on error.  */
5686
 
5687
static int
5688
parse_ror (char **str)
5689
{
5690
  int rot;
5691
  char *s = *str;
5692
 
5693
  if (strncasecmp (s, "ROR", 3) == 0)
5694
    s += 3;
5695
  else
5696
    {
5697
      inst.error = _("missing rotation field after comma");
5698
      return FAIL;
5699
    }
5700
 
5701
  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5702
    return FAIL;
5703
 
5704
  switch (rot)
5705
    {
5706
    case  0: *str = s; return 0x0;
5707
    case  8: *str = s; return 0x1;
5708
    case 16: *str = s; return 0x2;
5709
    case 24: *str = s; return 0x3;
5710
 
5711
    default:
5712
      inst.error = _("rotation can only be 0, 8, 16, or 24");
5713
      return FAIL;
5714
    }
5715
}
5716
 
5717
/* Parse a conditional code (from conds[] below).  The value returned is in the
5718
   range 0 .. 14, or FAIL.  */
5719
static int
5720
parse_cond (char **str)
5721
{
5722
  char *q;
5723
  const struct asm_cond *c;
5724
  int n;
5725
  /* Condition codes are always 2 characters, so matching up to
5726
     3 characters is sufficient.  */
5727
  char cond[3];
5728
 
5729
  q = *str;
5730
  n = 0;
5731
  while (ISALPHA (*q) && n < 3)
5732
    {
5733
      cond[n] = TOLOWER (*q);
5734
      q++;
5735
      n++;
5736
    }
5737
 
5738
  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5739
  if (!c)
5740
    {
5741
      inst.error = _("condition required");
5742
      return FAIL;
5743
    }
5744
 
5745
  *str = q;
5746
  return c->value;
5747
}
5748
 
5749
/* Parse an option for a barrier instruction.  Returns the encoding for the
5750
   option, or FAIL.  */
5751
static int
5752
parse_barrier (char **str)
5753
{
5754
  char *p, *q;
5755
  const struct asm_barrier_opt *o;
5756
 
5757
  p = q = *str;
5758
  while (ISALPHA (*q))
5759
    q++;
5760
 
5761
  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5762
                                                    q - p);
5763
  if (!o)
5764
    return FAIL;
5765
 
5766
  *str = q;
5767
  return o->value;
5768
}
5769
 
5770
/* Parse the operands of a table branch instruction.  Similar to a memory
5771
   operand.  */
5772
static int
5773
parse_tb (char **str)
5774
{
5775
  char * p = *str;
5776
  int reg;
5777
 
5778
  if (skip_past_char (&p, '[') == FAIL)
5779
    {
5780
      inst.error = _("'[' expected");
5781
      return FAIL;
5782
    }
5783
 
5784
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5785
    {
5786
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5787
      return FAIL;
5788
    }
5789
  inst.operands[0].reg = reg;
5790
 
5791
  if (skip_past_comma (&p) == FAIL)
5792
    {
5793
      inst.error = _("',' expected");
5794
      return FAIL;
5795
    }
5796
 
5797
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5798
    {
5799
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5800
      return FAIL;
5801
    }
5802
  inst.operands[0].imm = reg;
5803
 
5804
  if (skip_past_comma (&p) == SUCCESS)
5805
    {
5806
      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5807
        return FAIL;
5808
      if (inst.reloc.exp.X_add_number != 1)
5809
        {
5810
          inst.error = _("invalid shift");
5811
          return FAIL;
5812
        }
5813
      inst.operands[0].shifted = 1;
5814
    }
5815
 
5816
  if (skip_past_char (&p, ']') == FAIL)
5817
    {
5818
      inst.error = _("']' expected");
5819
      return FAIL;
5820
    }
5821
  *str = p;
5822
  return SUCCESS;
5823
}
5824
 
5825
/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5826
   information on the types the operands can take and how they are encoded.
5827
   Up to four operands may be read; this function handles setting the
5828
   ".present" field for each read operand itself.
5829
   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5830
   else returns FAIL.  */
5831
 
5832
static int
5833
parse_neon_mov (char **str, int *which_operand)
5834
{
5835
  int i = *which_operand, val;
5836
  enum arm_reg_type rtype;
5837
  char *ptr = *str;
5838
  struct neon_type_el optype;
5839
 
5840
  if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5841
    {
5842
      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
5843
      inst.operands[i].reg = val;
5844
      inst.operands[i].isscalar = 1;
5845
      inst.operands[i].vectype = optype;
5846
      inst.operands[i++].present = 1;
5847
 
5848
      if (skip_past_comma (&ptr) == FAIL)
5849
        goto wanted_comma;
5850
 
5851
      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5852
        goto wanted_arm;
5853
 
5854
      inst.operands[i].reg = val;
5855
      inst.operands[i].isreg = 1;
5856
      inst.operands[i].present = 1;
5857
    }
5858
  else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5859
           != FAIL)
5860
    {
5861
      /* Cases 0, 1, 2, 3, 5 (D only).  */
5862
      if (skip_past_comma (&ptr) == FAIL)
5863
        goto wanted_comma;
5864
 
5865
      inst.operands[i].reg = val;
5866
      inst.operands[i].isreg = 1;
5867
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5868
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5869
      inst.operands[i].isvec = 1;
5870
      inst.operands[i].vectype = optype;
5871
      inst.operands[i++].present = 1;
5872
 
5873
      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5874
        {
5875
          /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5876
             Case 13: VMOV <Sd>, <Rm>  */
5877
          inst.operands[i].reg = val;
5878
          inst.operands[i].isreg = 1;
5879
          inst.operands[i].present = 1;
5880
 
5881
          if (rtype == REG_TYPE_NQ)
5882
            {
5883
              first_error (_("can't use Neon quad register here"));
5884
              return FAIL;
5885
            }
5886
          else if (rtype != REG_TYPE_VFS)
5887
            {
5888
              i++;
5889
              if (skip_past_comma (&ptr) == FAIL)
5890
                goto wanted_comma;
5891
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5892
                goto wanted_arm;
5893
              inst.operands[i].reg = val;
5894
              inst.operands[i].isreg = 1;
5895
              inst.operands[i].present = 1;
5896
            }
5897
        }
5898
      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5899
                                           &optype)) != FAIL)
5900
        {
5901
          /* Case 0: VMOV<c><q> <Qd>, <Qm>
5902
             Case 1: VMOV<c><q> <Dd>, <Dm>
5903
             Case 8: VMOV.F32 <Sd>, <Sm>
5904
             Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
5905
 
5906
          inst.operands[i].reg = val;
5907
          inst.operands[i].isreg = 1;
5908
          inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5909
          inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5910
          inst.operands[i].isvec = 1;
5911
          inst.operands[i].vectype = optype;
5912
          inst.operands[i].present = 1;
5913
 
5914
          if (skip_past_comma (&ptr) == SUCCESS)
5915
            {
5916
              /* Case 15.  */
5917
              i++;
5918
 
5919
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5920
                goto wanted_arm;
5921
 
5922
              inst.operands[i].reg = val;
5923
              inst.operands[i].isreg = 1;
5924
              inst.operands[i++].present = 1;
5925
 
5926
              if (skip_past_comma (&ptr) == FAIL)
5927
                goto wanted_comma;
5928
 
5929
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5930
                goto wanted_arm;
5931
 
5932
              inst.operands[i].reg = val;
5933
              inst.operands[i].isreg = 1;
5934
              inst.operands[i++].present = 1;
5935
            }
5936
        }
5937
      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5938
          /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5939
             Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5940
             Case 10: VMOV.F32 <Sd>, #<imm>
5941
             Case 11: VMOV.F64 <Dd>, #<imm>  */
5942
        inst.operands[i].immisfloat = 1;
5943
      else if (parse_big_immediate (&ptr, i) == SUCCESS)
5944
          /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5945
             Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
5946
        ;
5947
      else
5948
        {
5949
          first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5950
          return FAIL;
5951
        }
5952
    }
5953
  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5954
    {
5955
      /* Cases 6, 7.  */
5956
      inst.operands[i].reg = val;
5957
      inst.operands[i].isreg = 1;
5958
      inst.operands[i++].present = 1;
5959
 
5960
      if (skip_past_comma (&ptr) == FAIL)
5961
        goto wanted_comma;
5962
 
5963
      if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5964
        {
5965
          /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
5966
          inst.operands[i].reg = val;
5967
          inst.operands[i].isscalar = 1;
5968
          inst.operands[i].present = 1;
5969
          inst.operands[i].vectype = optype;
5970
        }
5971
      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5972
        {
5973
          /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
5974
          inst.operands[i].reg = val;
5975
          inst.operands[i].isreg = 1;
5976
          inst.operands[i++].present = 1;
5977
 
5978
          if (skip_past_comma (&ptr) == FAIL)
5979
            goto wanted_comma;
5980
 
5981
          if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5982
              == FAIL)
5983
            {
5984
              first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5985
              return FAIL;
5986
            }
5987
 
5988
          inst.operands[i].reg = val;
5989
          inst.operands[i].isreg = 1;
5990
          inst.operands[i].isvec = 1;
5991
          inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5992
          inst.operands[i].vectype = optype;
5993
          inst.operands[i].present = 1;
5994
 
5995
          if (rtype == REG_TYPE_VFS)
5996
            {
5997
              /* Case 14.  */
5998
              i++;
5999
              if (skip_past_comma (&ptr) == FAIL)
6000
                goto wanted_comma;
6001
              if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6002
                                              &optype)) == FAIL)
6003
                {
6004
                  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6005
                  return FAIL;
6006
                }
6007
              inst.operands[i].reg = val;
6008
              inst.operands[i].isreg = 1;
6009
              inst.operands[i].isvec = 1;
6010
              inst.operands[i].issingle = 1;
6011
              inst.operands[i].vectype = optype;
6012
              inst.operands[i].present = 1;
6013
            }
6014
        }
6015
      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6016
               != FAIL)
6017
        {
6018
          /* Case 13.  */
6019
          inst.operands[i].reg = val;
6020
          inst.operands[i].isreg = 1;
6021
          inst.operands[i].isvec = 1;
6022
          inst.operands[i].issingle = 1;
6023
          inst.operands[i].vectype = optype;
6024
          inst.operands[i++].present = 1;
6025
        }
6026
    }
6027
  else
6028
    {
6029
      first_error (_("parse error"));
6030
      return FAIL;
6031
    }
6032
 
6033
  /* Successfully parsed the operands. Update args.  */
6034
  *which_operand = i;
6035
  *str = ptr;
6036
  return SUCCESS;
6037
 
6038
 wanted_comma:
6039
  first_error (_("expected comma"));
6040
  return FAIL;
6041
 
6042
 wanted_arm:
6043
  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6044
  return FAIL;
6045
}
6046
 
6047
/* Use this macro when the operand constraints are different
6048
   for ARM and THUMB (e.g. ldrd).  */
6049
#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6050
        ((arm_operand) | ((thumb_operand) << 16))
6051
 
6052
/* Matcher codes for parse_operands.  */
6053
enum operand_parse_code
6054
{
6055
  OP_stop,      /* end of line */
6056
 
6057
  OP_RR,        /* ARM register */
6058
  OP_RRnpc,     /* ARM register, not r15 */
6059
  OP_RRnpcsp,   /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6060
  OP_RRnpcb,    /* ARM register, not r15, in square brackets */
6061
  OP_RRnpctw,   /* ARM register, not r15 in Thumb-state or with writeback,
6062
                   optional trailing ! */
6063
  OP_RRw,       /* ARM register, not r15, optional trailing ! */
6064
  OP_RCP,       /* Coprocessor number */
6065
  OP_RCN,       /* Coprocessor register */
6066
  OP_RF,        /* FPA register */
6067
  OP_RVS,       /* VFP single precision register */
6068
  OP_RVD,       /* VFP double precision register (0..15) */
6069
  OP_RND,       /* Neon double precision register (0..31) */
6070
  OP_RNQ,       /* Neon quad precision register */
6071
  OP_RVSD,      /* VFP single or double precision register */
6072
  OP_RNDQ,      /* Neon double or quad precision register */
6073
  OP_RNSDQ,     /* Neon single, double or quad precision register */
6074
  OP_RNSC,      /* Neon scalar D[X] */
6075
  OP_RVC,       /* VFP control register */
6076
  OP_RMF,       /* Maverick F register */
6077
  OP_RMD,       /* Maverick D register */
6078
  OP_RMFX,      /* Maverick FX register */
6079
  OP_RMDX,      /* Maverick DX register */
6080
  OP_RMAX,      /* Maverick AX register */
6081
  OP_RMDS,      /* Maverick DSPSC register */
6082
  OP_RIWR,      /* iWMMXt wR register */
6083
  OP_RIWC,      /* iWMMXt wC register */
6084
  OP_RIWG,      /* iWMMXt wCG register */
6085
  OP_RXA,       /* XScale accumulator register */
6086
 
6087
  OP_REGLST,    /* ARM register list */
6088
  OP_VRSLST,    /* VFP single-precision register list */
6089
  OP_VRDLST,    /* VFP double-precision register list */
6090
  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6091
  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6092
  OP_NSTRLST,   /* Neon element/structure list */
6093
 
6094
  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6095
  OP_RVSD_I0,   /* VFP S or D reg, or immediate zero.  */
6096
  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6097
  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6098
  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6099
  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6100
  OP_VMOV,      /* Neon VMOV operands.  */
6101
  OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN.  */
6102
  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6103
  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6104
 
6105
  OP_I0,        /* immediate zero */
6106
  OP_I7,        /* immediate value 0 .. 7 */
6107
  OP_I15,       /*                 0 .. 15 */
6108
  OP_I16,       /*                 1 .. 16 */
6109
  OP_I16z,      /*                 0 .. 16 */
6110
  OP_I31,       /*                 0 .. 31 */
6111
  OP_I31w,      /*                 0 .. 31, optional trailing ! */
6112
  OP_I32,       /*                 1 .. 32 */
6113
  OP_I32z,      /*                 0 .. 32 */
6114
  OP_I63,       /*                 0 .. 63 */
6115
  OP_I63s,      /*               -64 .. 63 */
6116
  OP_I64,       /*                 1 .. 64 */
6117
  OP_I64z,      /*                 0 .. 64 */
6118
  OP_I255,      /*                 0 .. 255 */
6119
 
6120
  OP_I4b,       /* immediate, prefix optional, 1 .. 4 */
6121
  OP_I7b,       /*                             0 .. 7 */
6122
  OP_I15b,      /*                             0 .. 15 */
6123
  OP_I31b,      /*                             0 .. 31 */
6124
 
6125
  OP_SH,        /* shifter operand */
6126
  OP_SHG,       /* shifter operand with possible group relocation */
6127
  OP_ADDR,      /* Memory address expression (any mode) */
6128
  OP_ADDRGLDR,  /* Mem addr expr (any mode) with possible LDR group reloc */
6129
  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6130
  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6131
  OP_EXP,       /* arbitrary expression */
6132
  OP_EXPi,      /* same, with optional immediate prefix */
6133
  OP_EXPr,      /* same, with optional relocation suffix */
6134
  OP_HALF,      /* 0 .. 65535 or low/high reloc.  */
6135
 
6136
  OP_CPSF,      /* CPS flags */
6137
  OP_ENDI,      /* Endianness specifier */
6138
  OP_wPSR,      /* CPSR/SPSR/APSR mask for msr (writing).  */
6139
  OP_rPSR,      /* CPSR/SPSR/APSR mask for msr (reading).  */
6140
  OP_COND,      /* conditional code */
6141
  OP_TB,        /* Table branch.  */
6142
 
6143
  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6144
 
6145
  OP_RRnpc_I0,  /* ARM register or literal 0 */
6146
  OP_RR_EXr,    /* ARM register or expression with opt. reloc suff. */
6147
  OP_RR_EXi,    /* ARM register or expression with imm prefix */
6148
  OP_RF_IF,     /* FPA register or immediate */
6149
  OP_RIWR_RIWC, /* iWMMXt R or C reg */
6150
  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6151
 
6152
  /* Optional operands.  */
6153
  OP_oI7b,       /* immediate, prefix optional, 0 .. 7 */
6154
  OP_oI31b,      /*                             0 .. 31 */
6155
  OP_oI32b,      /*                             1 .. 32 */
6156 160 khays
  OP_oI32z,      /*                             0 .. 32 */
6157 16 khays
  OP_oIffffb,    /*                             0 .. 65535 */
6158
  OP_oI255c,     /*       curly-brace enclosed, 0 .. 255 */
6159
 
6160
  OP_oRR,        /* ARM register */
6161
  OP_oRRnpc,     /* ARM register, not the PC */
6162
  OP_oRRnpcsp,   /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6163
  OP_oRRw,       /* ARM register, not r15, optional trailing ! */
6164
  OP_oRND,       /* Optional Neon double precision register */
6165
  OP_oRNQ,       /* Optional Neon quad precision register */
6166
  OP_oRNDQ,      /* Optional Neon double or quad precision register */
6167
  OP_oRNSDQ,     /* Optional single, double or quad precision vector register */
6168
  OP_oSHll,      /* LSL immediate */
6169
  OP_oSHar,      /* ASR immediate */
6170
  OP_oSHllar,    /* LSL or ASR immediate */
6171
  OP_oROR,       /* ROR 0/8/16/24 */
6172
  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6173
 
6174
  /* Some pre-defined mixed (ARM/THUMB) operands.  */
6175
  OP_RR_npcsp           = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6176
  OP_RRnpc_npcsp        = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6177
  OP_oRRnpc_npcsp       = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6178
 
6179
  OP_FIRST_OPTIONAL = OP_oI7b
6180
};
6181
 
6182
/* Generic instruction operand parser.  This does no encoding and no
6183
   semantic validation; it merely squirrels values away in the inst
6184
   structure.  Returns SUCCESS or FAIL depending on whether the
6185
   specified grammar matched.  */
6186
static int
6187
parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6188
{
6189
  unsigned const int *upat = pattern;
6190
  char *backtrack_pos = 0;
6191
  const char *backtrack_error = 0;
6192
  int i, val, backtrack_index = 0;
6193
  enum arm_reg_type rtype;
6194
  parse_operand_result result;
6195
  unsigned int op_parse_code;
6196
 
6197
#define po_char_or_fail(chr)                    \
6198
  do                                            \
6199
    {                                           \
6200
      if (skip_past_char (&str, chr) == FAIL)   \
6201
        goto bad_args;                          \
6202
    }                                           \
6203
  while (0)
6204
 
6205
#define po_reg_or_fail(regtype)                                 \
6206
  do                                                            \
6207
    {                                                           \
6208
      val = arm_typed_reg_parse (& str, regtype, & rtype,       \
6209
                                 & inst.operands[i].vectype);   \
6210
      if (val == FAIL)                                          \
6211
        {                                                       \
6212
          first_error (_(reg_expected_msgs[regtype]));          \
6213
          goto failure;                                         \
6214
        }                                                       \
6215
      inst.operands[i].reg = val;                               \
6216
      inst.operands[i].isreg = 1;                               \
6217
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);         \
6218
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);      \
6219
      inst.operands[i].isvec = (rtype == REG_TYPE_VFS           \
6220
                             || rtype == REG_TYPE_VFD           \
6221
                             || rtype == REG_TYPE_NQ);          \
6222
    }                                                           \
6223
  while (0)
6224
 
6225
#define po_reg_or_goto(regtype, label)                          \
6226
  do                                                            \
6227
    {                                                           \
6228
      val = arm_typed_reg_parse (& str, regtype, & rtype,       \
6229
                                 & inst.operands[i].vectype);   \
6230
      if (val == FAIL)                                          \
6231
        goto label;                                             \
6232
                                                                \
6233
      inst.operands[i].reg = val;                               \
6234
      inst.operands[i].isreg = 1;                               \
6235
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);         \
6236
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);      \
6237
      inst.operands[i].isvec = (rtype == REG_TYPE_VFS           \
6238
                             || rtype == REG_TYPE_VFD           \
6239
                             || rtype == REG_TYPE_NQ);          \
6240
    }                                                           \
6241
  while (0)
6242
 
6243
#define po_imm_or_fail(min, max, popt)                          \
6244
  do                                                            \
6245
    {                                                           \
6246
      if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6247
        goto failure;                                           \
6248
      inst.operands[i].imm = val;                               \
6249
    }                                                           \
6250
  while (0)
6251
 
6252
#define po_scalar_or_goto(elsz, label)                                  \
6253
  do                                                                    \
6254
    {                                                                   \
6255
      val = parse_scalar (& str, elsz, & inst.operands[i].vectype);     \
6256
      if (val == FAIL)                                                  \
6257
        goto label;                                                     \
6258
      inst.operands[i].reg = val;                                       \
6259
      inst.operands[i].isscalar = 1;                                    \
6260
    }                                                                   \
6261
  while (0)
6262
 
6263
#define po_misc_or_fail(expr)                   \
6264
  do                                            \
6265
    {                                           \
6266
      if (expr)                                 \
6267
        goto failure;                           \
6268
    }                                           \
6269
  while (0)
6270
 
6271
#define po_misc_or_fail_no_backtrack(expr)              \
6272
  do                                                    \
6273
    {                                                   \
6274
      result = expr;                                    \
6275
      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)    \
6276
        backtrack_pos = 0;                               \
6277
      if (result != PARSE_OPERAND_SUCCESS)              \
6278
        goto failure;                                   \
6279
    }                                                   \
6280
  while (0)
6281
 
6282
#define po_barrier_or_imm(str)                             \
6283
  do                                                       \
6284
    {                                                      \
6285
      val = parse_barrier (&str);                          \
6286
      if (val == FAIL)                                     \
6287
        {                                                  \
6288
          if (ISALPHA (*str))                              \
6289
              goto failure;                                \
6290
          else                                             \
6291
              goto immediate;                              \
6292
        }                                                  \
6293
      else                                                 \
6294
        {                                                  \
6295
          if ((inst.instruction & 0xf0) == 0x60            \
6296
              && val != 0xf)                               \
6297
            {                                              \
6298
               /* ISB can only take SY as an option.  */   \
6299
               inst.error = _("invalid barrier type");     \
6300
               goto failure;                               \
6301
            }                                              \
6302
        }                                                  \
6303
    }                                                      \
6304
  while (0)
6305
 
6306
  skip_whitespace (str);
6307
 
6308
  for (i = 0; upat[i] != OP_stop; i++)
6309
    {
6310
      op_parse_code = upat[i];
6311
      if (op_parse_code >= 1<<16)
6312
        op_parse_code = thumb ? (op_parse_code >> 16)
6313
                                : (op_parse_code & ((1<<16)-1));
6314
 
6315
      if (op_parse_code >= OP_FIRST_OPTIONAL)
6316
        {
6317
          /* Remember where we are in case we need to backtrack.  */
6318
          gas_assert (!backtrack_pos);
6319
          backtrack_pos = str;
6320
          backtrack_error = inst.error;
6321
          backtrack_index = i;
6322
        }
6323
 
6324
      if (i > 0 && (i > 1 || inst.operands[0].present))
6325
        po_char_or_fail (',');
6326
 
6327
      switch (op_parse_code)
6328
        {
6329
          /* Registers */
6330
        case OP_oRRnpc:
6331
        case OP_oRRnpcsp:
6332
        case OP_RRnpc:
6333
        case OP_RRnpcsp:
6334
        case OP_oRR:
6335
        case OP_RR:    po_reg_or_fail (REG_TYPE_RN);      break;
6336
        case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);      break;
6337
        case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);      break;
6338
        case OP_RF:    po_reg_or_fail (REG_TYPE_FN);      break;
6339
        case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);     break;
6340
        case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);     break;
6341
        case OP_oRND:
6342
        case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);     break;
6343
        case OP_RVC:
6344
          po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6345
          break;
6346
          /* Also accept generic coprocessor regs for unknown registers.  */
6347
          coproc_reg:
6348
          po_reg_or_fail (REG_TYPE_CN);
6349
          break;
6350
        case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);     break;
6351
        case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);     break;
6352
        case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);    break;
6353
        case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);    break;
6354
        case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);    break;
6355
        case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);   break;
6356
        case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);   break;
6357
        case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);   break;
6358
        case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6359
        case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6360
        case OP_oRNQ:
6361
        case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6362
        case OP_oRNDQ:
6363
        case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6364
        case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6365
        case OP_oRNSDQ:
6366
        case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6367
 
6368
        /* Neon scalar. Using an element size of 8 means that some invalid
6369
           scalars are accepted here, so deal with those in later code.  */
6370
        case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6371
 
6372
        case OP_RNDQ_I0:
6373
          {
6374
            po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6375
            break;
6376
            try_imm0:
6377
            po_imm_or_fail (0, 0, TRUE);
6378
          }
6379
          break;
6380
 
6381
        case OP_RVSD_I0:
6382
          po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6383
          break;
6384
 
6385
        case OP_RR_RNSC:
6386
          {
6387
            po_scalar_or_goto (8, try_rr);
6388
            break;
6389
            try_rr:
6390
            po_reg_or_fail (REG_TYPE_RN);
6391
          }
6392
          break;
6393
 
6394
        case OP_RNSDQ_RNSC:
6395
          {
6396
            po_scalar_or_goto (8, try_nsdq);
6397
            break;
6398
            try_nsdq:
6399
            po_reg_or_fail (REG_TYPE_NSDQ);
6400
          }
6401
          break;
6402
 
6403
        case OP_RNDQ_RNSC:
6404
          {
6405
            po_scalar_or_goto (8, try_ndq);
6406
            break;
6407
            try_ndq:
6408
            po_reg_or_fail (REG_TYPE_NDQ);
6409
          }
6410
          break;
6411
 
6412
        case OP_RND_RNSC:
6413
          {
6414
            po_scalar_or_goto (8, try_vfd);
6415
            break;
6416
            try_vfd:
6417
            po_reg_or_fail (REG_TYPE_VFD);
6418
          }
6419
          break;
6420
 
6421
        case OP_VMOV:
6422
          /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6423
             not careful then bad things might happen.  */
6424
          po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6425
          break;
6426
 
6427
        case OP_RNDQ_Ibig:
6428
          {
6429
            po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6430
            break;
6431
            try_immbig:
6432
            /* There's a possibility of getting a 64-bit immediate here, so
6433
               we need special handling.  */
6434
            if (parse_big_immediate (&str, i) == FAIL)
6435
              {
6436
                inst.error = _("immediate value is out of range");
6437
                goto failure;
6438
              }
6439
          }
6440
          break;
6441
 
6442
        case OP_RNDQ_I63b:
6443
          {
6444
            po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6445
            break;
6446
            try_shimm:
6447
            po_imm_or_fail (0, 63, TRUE);
6448
          }
6449
          break;
6450
 
6451
        case OP_RRnpcb:
6452
          po_char_or_fail ('[');
6453
          po_reg_or_fail  (REG_TYPE_RN);
6454
          po_char_or_fail (']');
6455
          break;
6456
 
6457
        case OP_RRnpctw:
6458
        case OP_RRw:
6459
        case OP_oRRw:
6460
          po_reg_or_fail (REG_TYPE_RN);
6461
          if (skip_past_char (&str, '!') == SUCCESS)
6462
            inst.operands[i].writeback = 1;
6463
          break;
6464
 
6465
          /* Immediates */
6466
        case OP_I7:      po_imm_or_fail (  0,       7, FALSE);   break;
6467
        case OP_I15:     po_imm_or_fail (  0,      15, FALSE);   break;
6468
        case OP_I16:     po_imm_or_fail (  1,     16, FALSE);   break;
6469
        case OP_I16z:    po_imm_or_fail (  0,     16, FALSE);   break;
6470
        case OP_I31:     po_imm_or_fail (  0,      31, FALSE);   break;
6471
        case OP_I32:     po_imm_or_fail (  1,     32, FALSE);   break;
6472
        case OP_I32z:    po_imm_or_fail (  0,     32, FALSE);   break;
6473
        case OP_I63s:    po_imm_or_fail (-64,     63, FALSE);   break;
6474
        case OP_I63:     po_imm_or_fail (  0,     63, FALSE);   break;
6475
        case OP_I64:     po_imm_or_fail (  1,     64, FALSE);   break;
6476
        case OP_I64z:    po_imm_or_fail (  0,     64, FALSE);   break;
6477
        case OP_I255:    po_imm_or_fail (  0,     255, FALSE);   break;
6478
 
6479
        case OP_I4b:     po_imm_or_fail (  1,      4, TRUE);    break;
6480
        case OP_oI7b:
6481
        case OP_I7b:     po_imm_or_fail (  0,       7, TRUE);    break;
6482
        case OP_I15b:    po_imm_or_fail (  0,      15, TRUE);    break;
6483
        case OP_oI31b:
6484
        case OP_I31b:    po_imm_or_fail (  0,      31, TRUE);    break;
6485
        case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6486 160 khays
        case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6487 16 khays
        case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);     break;
6488
 
6489
          /* Immediate variants */
6490
        case OP_oI255c:
6491
          po_char_or_fail ('{');
6492
          po_imm_or_fail (0, 255, TRUE);
6493
          po_char_or_fail ('}');
6494
          break;
6495
 
6496
        case OP_I31w:
6497
          /* The expression parser chokes on a trailing !, so we have
6498
             to find it first and zap it.  */
6499
          {
6500
            char *s = str;
6501
            while (*s && *s != ',')
6502
              s++;
6503
            if (s[-1] == '!')
6504
              {
6505
                s[-1] = '\0';
6506
                inst.operands[i].writeback = 1;
6507
              }
6508
            po_imm_or_fail (0, 31, TRUE);
6509
            if (str == s - 1)
6510
              str = s;
6511
          }
6512
          break;
6513
 
6514
          /* Expressions */
6515
        case OP_EXPi:   EXPi:
6516
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6517
                                              GE_OPT_PREFIX));
6518
          break;
6519
 
6520
        case OP_EXP:
6521
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6522
                                              GE_NO_PREFIX));
6523
          break;
6524
 
6525
        case OP_EXPr:   EXPr:
6526
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6527
                                              GE_NO_PREFIX));
6528
          if (inst.reloc.exp.X_op == O_symbol)
6529
            {
6530
              val = parse_reloc (&str);
6531
              if (val == -1)
6532
                {
6533
                  inst.error = _("unrecognized relocation suffix");
6534
                  goto failure;
6535
                }
6536
              else if (val != BFD_RELOC_UNUSED)
6537
                {
6538
                  inst.operands[i].imm = val;
6539
                  inst.operands[i].hasreloc = 1;
6540
                }
6541
            }
6542
          break;
6543
 
6544
          /* Operand for MOVW or MOVT.  */
6545
        case OP_HALF:
6546
          po_misc_or_fail (parse_half (&str));
6547
          break;
6548
 
6549
          /* Register or expression.  */
6550
        case OP_RR_EXr:   po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6551
        case OP_RR_EXi:   po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6552
 
6553
          /* Register or immediate.  */
6554
        case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6555
        I0:               po_imm_or_fail (0, 0, FALSE);         break;
6556
 
6557
        case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6558
        IF:
6559
          if (!is_immediate_prefix (*str))
6560
            goto bad_args;
6561
          str++;
6562
          val = parse_fpa_immediate (&str);
6563
          if (val == FAIL)
6564
            goto failure;
6565
          /* FPA immediates are encoded as registers 8-15.
6566
             parse_fpa_immediate has already applied the offset.  */
6567
          inst.operands[i].reg = val;
6568
          inst.operands[i].isreg = 1;
6569
          break;
6570
 
6571
        case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6572
        I32z:             po_imm_or_fail (0, 32, FALSE);   break;
6573
 
6574
          /* Two kinds of register.  */
6575
        case OP_RIWR_RIWC:
6576
          {
6577
            struct reg_entry *rege = arm_reg_parse_multi (&str);
6578
            if (!rege
6579
                || (rege->type != REG_TYPE_MMXWR
6580
                    && rege->type != REG_TYPE_MMXWC
6581
                    && rege->type != REG_TYPE_MMXWCG))
6582
              {
6583
                inst.error = _("iWMMXt data or control register expected");
6584
                goto failure;
6585
              }
6586
            inst.operands[i].reg = rege->number;
6587
            inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6588
          }
6589
          break;
6590
 
6591
        case OP_RIWC_RIWG:
6592
          {
6593
            struct reg_entry *rege = arm_reg_parse_multi (&str);
6594
            if (!rege
6595
                || (rege->type != REG_TYPE_MMXWC
6596
                    && rege->type != REG_TYPE_MMXWCG))
6597
              {
6598
                inst.error = _("iWMMXt control register expected");
6599
                goto failure;
6600
              }
6601
            inst.operands[i].reg = rege->number;
6602
            inst.operands[i].isreg = 1;
6603
          }
6604
          break;
6605
 
6606
          /* Misc */
6607
        case OP_CPSF:    val = parse_cps_flags (&str);          break;
6608
        case OP_ENDI:    val = parse_endian_specifier (&str);   break;
6609
        case OP_oROR:    val = parse_ror (&str);                break;
6610
        case OP_COND:    val = parse_cond (&str);               break;
6611
        case OP_oBARRIER_I15:
6612
          po_barrier_or_imm (str); break;
6613
          immediate:
6614
          if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6615
            goto failure;
6616
          break;
6617
 
6618
        case OP_wPSR:
6619
        case OP_rPSR:
6620
          po_reg_or_goto (REG_TYPE_RNB, try_psr);
6621
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6622
            {
6623
              inst.error = _("Banked registers are not available with this "
6624
                             "architecture.");
6625
              goto failure;
6626
            }
6627
          break;
6628
          try_psr:
6629
          val = parse_psr (&str, op_parse_code == OP_wPSR);
6630
          break;
6631
 
6632
        case OP_APSR_RR:
6633
          po_reg_or_goto (REG_TYPE_RN, try_apsr);
6634
          break;
6635
          try_apsr:
6636
          /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6637
             instruction).  */
6638
          if (strncasecmp (str, "APSR_", 5) == 0)
6639
            {
6640
              unsigned found = 0;
6641
              str += 5;
6642
              while (found < 15)
6643
                switch (*str++)
6644
                  {
6645
                  case 'c': found = (found & 1) ? 16 : found | 1; break;
6646
                  case 'n': found = (found & 2) ? 16 : found | 2; break;
6647
                  case 'z': found = (found & 4) ? 16 : found | 4; break;
6648
                  case 'v': found = (found & 8) ? 16 : found | 8; break;
6649
                  default: found = 16;
6650
                  }
6651
              if (found != 15)
6652
                goto failure;
6653
              inst.operands[i].isvec = 1;
6654
              /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
6655
              inst.operands[i].reg = REG_PC;
6656
            }
6657
          else
6658
            goto failure;
6659
          break;
6660
 
6661
        case OP_TB:
6662
          po_misc_or_fail (parse_tb (&str));
6663
          break;
6664
 
6665
          /* Register lists.  */
6666
        case OP_REGLST:
6667
          val = parse_reg_list (&str);
6668
          if (*str == '^')
6669
            {
6670
              inst.operands[1].writeback = 1;
6671
              str++;
6672
            }
6673
          break;
6674
 
6675
        case OP_VRSLST:
6676
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6677
          break;
6678
 
6679
        case OP_VRDLST:
6680
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6681
          break;
6682
 
6683
        case OP_VRSDLST:
6684
          /* Allow Q registers too.  */
6685
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6686
                                    REGLIST_NEON_D);
6687
          if (val == FAIL)
6688
            {
6689
              inst.error = NULL;
6690
              val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6691
                                        REGLIST_VFP_S);
6692
              inst.operands[i].issingle = 1;
6693
            }
6694
          break;
6695
 
6696
        case OP_NRDLST:
6697
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6698
                                    REGLIST_NEON_D);
6699
          break;
6700
 
6701
        case OP_NSTRLST:
6702
          val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6703
                                           &inst.operands[i].vectype);
6704
          break;
6705
 
6706
          /* Addressing modes */
6707
        case OP_ADDR:
6708
          po_misc_or_fail (parse_address (&str, i));
6709
          break;
6710
 
6711
        case OP_ADDRGLDR:
6712
          po_misc_or_fail_no_backtrack (
6713
            parse_address_group_reloc (&str, i, GROUP_LDR));
6714
          break;
6715
 
6716
        case OP_ADDRGLDRS:
6717
          po_misc_or_fail_no_backtrack (
6718
            parse_address_group_reloc (&str, i, GROUP_LDRS));
6719
          break;
6720
 
6721
        case OP_ADDRGLDC:
6722
          po_misc_or_fail_no_backtrack (
6723
            parse_address_group_reloc (&str, i, GROUP_LDC));
6724
          break;
6725
 
6726
        case OP_SH:
6727
          po_misc_or_fail (parse_shifter_operand (&str, i));
6728
          break;
6729
 
6730
        case OP_SHG:
6731
          po_misc_or_fail_no_backtrack (
6732
            parse_shifter_operand_group_reloc (&str, i));
6733
          break;
6734
 
6735
        case OP_oSHll:
6736
          po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6737
          break;
6738
 
6739
        case OP_oSHar:
6740
          po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6741
          break;
6742
 
6743
        case OP_oSHllar:
6744
          po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6745
          break;
6746
 
6747
        default:
6748
          as_fatal (_("unhandled operand code %d"), op_parse_code);
6749
        }
6750
 
6751
      /* Various value-based sanity checks and shared operations.  We
6752
         do not signal immediate failures for the register constraints;
6753
         this allows a syntax error to take precedence.  */
6754
      switch (op_parse_code)
6755
        {
6756
        case OP_oRRnpc:
6757
        case OP_RRnpc:
6758
        case OP_RRnpcb:
6759
        case OP_RRw:
6760
        case OP_oRRw:
6761
        case OP_RRnpc_I0:
6762
          if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6763
            inst.error = BAD_PC;
6764
          break;
6765
 
6766
        case OP_oRRnpcsp:
6767
        case OP_RRnpcsp:
6768
          if (inst.operands[i].isreg)
6769
            {
6770
              if (inst.operands[i].reg == REG_PC)
6771
                inst.error = BAD_PC;
6772
              else if (inst.operands[i].reg == REG_SP)
6773
                inst.error = BAD_SP;
6774
            }
6775
          break;
6776
 
6777
        case OP_RRnpctw:
6778
          if (inst.operands[i].isreg
6779
              && inst.operands[i].reg == REG_PC
6780
              && (inst.operands[i].writeback || thumb))
6781
            inst.error = BAD_PC;
6782
          break;
6783
 
6784
        case OP_CPSF:
6785
        case OP_ENDI:
6786
        case OP_oROR:
6787
        case OP_wPSR:
6788
        case OP_rPSR:
6789
        case OP_COND:
6790
        case OP_oBARRIER_I15:
6791
        case OP_REGLST:
6792
        case OP_VRSLST:
6793
        case OP_VRDLST:
6794
        case OP_VRSDLST:
6795
        case OP_NRDLST:
6796
        case OP_NSTRLST:
6797
          if (val == FAIL)
6798
            goto failure;
6799
          inst.operands[i].imm = val;
6800
          break;
6801
 
6802
        default:
6803
          break;
6804
        }
6805
 
6806
      /* If we get here, this operand was successfully parsed.  */
6807
      inst.operands[i].present = 1;
6808
      continue;
6809
 
6810
    bad_args:
6811
      inst.error = BAD_ARGS;
6812
 
6813
    failure:
6814
      if (!backtrack_pos)
6815
        {
6816
          /* The parse routine should already have set inst.error, but set a
6817
             default here just in case.  */
6818
          if (!inst.error)
6819
            inst.error = _("syntax error");
6820
          return FAIL;
6821
        }
6822
 
6823
      /* Do not backtrack over a trailing optional argument that
6824
         absorbed some text.  We will only fail again, with the
6825
         'garbage following instruction' error message, which is
6826
         probably less helpful than the current one.  */
6827
      if (backtrack_index == i && backtrack_pos != str
6828
          && upat[i+1] == OP_stop)
6829
        {
6830
          if (!inst.error)
6831
            inst.error = _("syntax error");
6832
          return FAIL;
6833
        }
6834
 
6835
      /* Try again, skipping the optional argument at backtrack_pos.  */
6836
      str = backtrack_pos;
6837
      inst.error = backtrack_error;
6838
      inst.operands[backtrack_index].present = 0;
6839
      i = backtrack_index;
6840
      backtrack_pos = 0;
6841
    }
6842
 
6843
  /* Check that we have parsed all the arguments.  */
6844
  if (*str != '\0' && !inst.error)
6845
    inst.error = _("garbage following instruction");
6846
 
6847
  return inst.error ? FAIL : SUCCESS;
6848
}
6849
 
6850
#undef po_char_or_fail
6851
#undef po_reg_or_fail
6852
#undef po_reg_or_goto
6853
#undef po_imm_or_fail
6854
#undef po_scalar_or_fail
6855
#undef po_barrier_or_imm
6856
 
6857
/* Shorthand macro for instruction encoding functions issuing errors.  */
6858
#define constraint(expr, err)                   \
6859
  do                                            \
6860
    {                                           \
6861
      if (expr)                                 \
6862
        {                                       \
6863
          inst.error = err;                     \
6864
          return;                               \
6865
        }                                       \
6866
    }                                           \
6867
  while (0)
6868
 
6869
/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
6870
   instructions are unpredictable if these registers are used.  This
6871
   is the BadReg predicate in ARM's Thumb-2 documentation.  */
6872
#define reject_bad_reg(reg)                             \
6873
  do                                                    \
6874
   if (reg == REG_SP || reg == REG_PC)                  \
6875
     {                                                  \
6876
       inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;  \
6877
       return;                                          \
6878
     }                                                  \
6879
  while (0)
6880
 
6881
/* If REG is R13 (the stack pointer), warn that its use is
6882
   deprecated.  */
6883
#define warn_deprecated_sp(reg)                 \
6884
  do                                            \
6885
    if (warn_on_deprecated && reg == REG_SP)    \
6886
       as_warn (_("use of r13 is deprecated")); \
6887
  while (0)
6888
 
6889
/* Functions for operand encoding.  ARM, then Thumb.  */
6890
 
6891
#define rotate_left(v, n) (v << n | v >> (32 - n))
6892
 
6893
/* If VAL can be encoded in the immediate field of an ARM instruction,
6894
   return the encoded form.  Otherwise, return FAIL.  */
6895
 
6896
static unsigned int
6897
encode_arm_immediate (unsigned int val)
6898
{
6899
  unsigned int a, i;
6900
 
6901
  for (i = 0; i < 32; i += 2)
6902
    if ((a = rotate_left (val, i)) <= 0xff)
6903
      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
6904
 
6905
  return FAIL;
6906
}
6907
 
6908
/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6909
   return the encoded form.  Otherwise, return FAIL.  */
6910
static unsigned int
6911
encode_thumb32_immediate (unsigned int val)
6912
{
6913
  unsigned int a, i;
6914
 
6915
  if (val <= 0xff)
6916
    return val;
6917
 
6918
  for (i = 1; i <= 24; i++)
6919
    {
6920
      a = val >> i;
6921
      if ((val & ~(0xff << i)) == 0)
6922
        return ((val >> i) & 0x7f) | ((32 - i) << 7);
6923
    }
6924
 
6925
  a = val & 0xff;
6926
  if (val == ((a << 16) | a))
6927
    return 0x100 | a;
6928
  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6929
    return 0x300 | a;
6930
 
6931
  a = val & 0xff00;
6932
  if (val == ((a << 16) | a))
6933
    return 0x200 | (a >> 8);
6934
 
6935
  return FAIL;
6936
}
6937
/* Encode a VFP SP or DP register number into inst.instruction.  */
6938
 
6939
static void
6940
encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6941
{
6942
  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6943
      && reg > 15)
6944
    {
6945
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6946
        {
6947
          if (thumb_mode)
6948
            ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6949
                                    fpu_vfp_ext_d32);
6950
          else
6951
            ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6952
                                    fpu_vfp_ext_d32);
6953
        }
6954
      else
6955
        {
6956
          first_error (_("D register out of range for selected VFP version"));
6957
          return;
6958
        }
6959
    }
6960
 
6961
  switch (pos)
6962
    {
6963
    case VFP_REG_Sd:
6964
      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6965
      break;
6966
 
6967
    case VFP_REG_Sn:
6968
      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6969
      break;
6970
 
6971
    case VFP_REG_Sm:
6972
      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6973
      break;
6974
 
6975
    case VFP_REG_Dd:
6976
      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6977
      break;
6978
 
6979
    case VFP_REG_Dn:
6980
      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6981
      break;
6982
 
6983
    case VFP_REG_Dm:
6984
      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6985
      break;
6986
 
6987
    default:
6988
      abort ();
6989
    }
6990
}
6991
 
6992
/* Encode a <shift> in an ARM-format instruction.  The immediate,
6993
   if any, is handled by md_apply_fix.   */
6994
static void
6995
encode_arm_shift (int i)
6996
{
6997
  if (inst.operands[i].shift_kind == SHIFT_RRX)
6998
    inst.instruction |= SHIFT_ROR << 5;
6999
  else
7000
    {
7001
      inst.instruction |= inst.operands[i].shift_kind << 5;
7002
      if (inst.operands[i].immisreg)
7003
        {
7004
          inst.instruction |= SHIFT_BY_REG;
7005
          inst.instruction |= inst.operands[i].imm << 8;
7006
        }
7007
      else
7008
        inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7009
    }
7010
}
7011
 
7012
static void
7013
encode_arm_shifter_operand (int i)
7014
{
7015
  if (inst.operands[i].isreg)
7016
    {
7017
      inst.instruction |= inst.operands[i].reg;
7018
      encode_arm_shift (i);
7019
    }
7020
  else
7021
    inst.instruction |= INST_IMMEDIATE;
7022
}
7023
 
7024
/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7025
static void
7026
encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7027
{
7028
  gas_assert (inst.operands[i].isreg);
7029
  inst.instruction |= inst.operands[i].reg << 16;
7030
 
7031
  if (inst.operands[i].preind)
7032
    {
7033
      if (is_t)
7034
        {
7035
          inst.error = _("instruction does not accept preindexed addressing");
7036
          return;
7037
        }
7038
      inst.instruction |= PRE_INDEX;
7039
      if (inst.operands[i].writeback)
7040
        inst.instruction |= WRITE_BACK;
7041
 
7042
    }
7043
  else if (inst.operands[i].postind)
7044
    {
7045
      gas_assert (inst.operands[i].writeback);
7046
      if (is_t)
7047
        inst.instruction |= WRITE_BACK;
7048
    }
7049
  else /* unindexed - only for coprocessor */
7050
    {
7051
      inst.error = _("instruction does not accept unindexed addressing");
7052
      return;
7053
    }
7054
 
7055
  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7056
      && (((inst.instruction & 0x000f0000) >> 16)
7057
          == ((inst.instruction & 0x0000f000) >> 12)))
7058
    as_warn ((inst.instruction & LOAD_BIT)
7059
             ? _("destination register same as write-back base")
7060
             : _("source register same as write-back base"));
7061
}
7062
 
7063
/* inst.operands[i] was set up by parse_address.  Encode it into an
7064
   ARM-format mode 2 load or store instruction.  If is_t is true,
7065
   reject forms that cannot be used with a T instruction (i.e. not
7066
   post-indexed).  */
7067
static void
7068
encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7069
{
7070
  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7071
 
7072
  encode_arm_addr_mode_common (i, is_t);
7073
 
7074
  if (inst.operands[i].immisreg)
7075
    {
7076
      constraint ((inst.operands[i].imm == REG_PC
7077
                   || (is_pc && inst.operands[i].writeback)),
7078
                  BAD_PC_ADDRESSING);
7079
      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7080
      inst.instruction |= inst.operands[i].imm;
7081
      if (!inst.operands[i].negative)
7082
        inst.instruction |= INDEX_UP;
7083
      if (inst.operands[i].shifted)
7084
        {
7085
          if (inst.operands[i].shift_kind == SHIFT_RRX)
7086
            inst.instruction |= SHIFT_ROR << 5;
7087
          else
7088
            {
7089
              inst.instruction |= inst.operands[i].shift_kind << 5;
7090
              inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7091
            }
7092
        }
7093
    }
7094
  else /* immediate offset in inst.reloc */
7095
    {
7096
      if (is_pc && !inst.reloc.pc_rel)
7097
        {
7098
          const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7099
 
7100
          /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7101
             cannot use PC in addressing.
7102
             PC cannot be used in writeback addressing, either.  */
7103
          constraint ((is_t || inst.operands[i].writeback),
7104
                      BAD_PC_ADDRESSING);
7105
 
7106
          /* Use of PC in str is deprecated for ARMv7.  */
7107
          if (warn_on_deprecated
7108
              && !is_load
7109
              && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7110
            as_warn (_("use of PC in this instruction is deprecated"));
7111
        }
7112
 
7113
      if (inst.reloc.type == BFD_RELOC_UNUSED)
7114
        {
7115
          /* Prefer + for zero encoded value.  */
7116
          if (!inst.operands[i].negative)
7117
            inst.instruction |= INDEX_UP;
7118
          inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7119
        }
7120
    }
7121
}
7122
 
7123
/* inst.operands[i] was set up by parse_address.  Encode it into an
7124
   ARM-format mode 3 load or store instruction.  Reject forms that
7125
   cannot be used with such instructions.  If is_t is true, reject
7126
   forms that cannot be used with a T instruction (i.e. not
7127
   post-indexed).  */
7128
static void
7129
encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7130
{
7131
  if (inst.operands[i].immisreg && inst.operands[i].shifted)
7132
    {
7133
      inst.error = _("instruction does not accept scaled register index");
7134
      return;
7135
    }
7136
 
7137
  encode_arm_addr_mode_common (i, is_t);
7138
 
7139
  if (inst.operands[i].immisreg)
7140
    {
7141
      constraint ((inst.operands[i].imm == REG_PC
7142
                   || inst.operands[i].reg == REG_PC),
7143
                  BAD_PC_ADDRESSING);
7144
      inst.instruction |= inst.operands[i].imm;
7145
      if (!inst.operands[i].negative)
7146
        inst.instruction |= INDEX_UP;
7147
    }
7148
  else /* immediate offset in inst.reloc */
7149
    {
7150
      constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7151
                   && inst.operands[i].writeback),
7152
                  BAD_PC_WRITEBACK);
7153
      inst.instruction |= HWOFFSET_IMM;
7154
      if (inst.reloc.type == BFD_RELOC_UNUSED)
7155
        {
7156
          /* Prefer + for zero encoded value.  */
7157
          if (!inst.operands[i].negative)
7158
            inst.instruction |= INDEX_UP;
7159
 
7160
          inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7161
        }
7162
    }
7163
}
7164
 
7165
/* inst.operands[i] was set up by parse_address.  Encode it into an
7166
   ARM-format instruction.  Reject all forms which cannot be encoded
7167
   into a coprocessor load/store instruction.  If wb_ok is false,
7168
   reject use of writeback; if unind_ok is false, reject use of
7169
   unindexed addressing.  If reloc_override is not 0, use it instead
7170
   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7171
   (in which case it is preserved).  */
7172
 
7173
static int
7174
encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7175
{
7176
  inst.instruction |= inst.operands[i].reg << 16;
7177
 
7178
  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7179
 
7180
  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7181
    {
7182
      gas_assert (!inst.operands[i].writeback);
7183
      if (!unind_ok)
7184
        {
7185
          inst.error = _("instruction does not support unindexed addressing");
7186
          return FAIL;
7187
        }
7188
      inst.instruction |= inst.operands[i].imm;
7189
      inst.instruction |= INDEX_UP;
7190
      return SUCCESS;
7191
    }
7192
 
7193
  if (inst.operands[i].preind)
7194
    inst.instruction |= PRE_INDEX;
7195
 
7196
  if (inst.operands[i].writeback)
7197
    {
7198
      if (inst.operands[i].reg == REG_PC)
7199
        {
7200
          inst.error = _("pc may not be used with write-back");
7201
          return FAIL;
7202
        }
7203
      if (!wb_ok)
7204
        {
7205
          inst.error = _("instruction does not support writeback");
7206
          return FAIL;
7207
        }
7208
      inst.instruction |= WRITE_BACK;
7209
    }
7210
 
7211
  if (reloc_override)
7212
    inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7213
  else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7214
            || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7215
           && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7216
    {
7217
      if (thumb_mode)
7218
        inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7219
      else
7220
        inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7221
    }
7222
 
7223
  /* Prefer + for zero encoded value.  */
7224
  if (!inst.operands[i].negative)
7225
    inst.instruction |= INDEX_UP;
7226
 
7227
  return SUCCESS;
7228
}
7229
 
7230
/* inst.reloc.exp describes an "=expr" load pseudo-operation.
7231
   Determine whether it can be performed with a move instruction; if
7232
   it can, convert inst.instruction to that move instruction and
7233
   return TRUE; if it can't, convert inst.instruction to a literal-pool
7234
   load and return FALSE.  If this is not a valid thing to do in the
7235
   current context, set inst.error and return TRUE.
7236
 
7237
   inst.operands[i] describes the destination register.  */
7238
 
7239
static bfd_boolean
7240
move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7241
{
7242
  unsigned long tbit;
7243
 
7244
  if (thumb_p)
7245
    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7246
  else
7247
    tbit = LOAD_BIT;
7248
 
7249
  if ((inst.instruction & tbit) == 0)
7250
    {
7251
      inst.error = _("invalid pseudo operation");
7252
      return TRUE;
7253
    }
7254
  if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7255
    {
7256
      inst.error = _("constant expression expected");
7257
      return TRUE;
7258
    }
7259
  if (inst.reloc.exp.X_op == O_constant)
7260
    {
7261
      if (thumb_p)
7262
        {
7263
          if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7264
            {
7265
              /* This can be done with a mov(1) instruction.  */
7266
              inst.instruction  = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7267
              inst.instruction |= inst.reloc.exp.X_add_number;
7268
              return TRUE;
7269
            }
7270
        }
7271
      else
7272
        {
7273
          int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7274
          if (value != FAIL)
7275
            {
7276
              /* This can be done with a mov instruction.  */
7277
              inst.instruction &= LITERAL_MASK;
7278
              inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7279
              inst.instruction |= value & 0xfff;
7280
              return TRUE;
7281
            }
7282
 
7283
          value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7284
          if (value != FAIL)
7285
            {
7286
              /* This can be done with a mvn instruction.  */
7287
              inst.instruction &= LITERAL_MASK;
7288
              inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7289
              inst.instruction |= value & 0xfff;
7290
              return TRUE;
7291
            }
7292
        }
7293
    }
7294
 
7295
  if (add_to_lit_pool () == FAIL)
7296
    {
7297
      inst.error = _("literal pool insertion failed");
7298
      return TRUE;
7299
    }
7300
  inst.operands[1].reg = REG_PC;
7301
  inst.operands[1].isreg = 1;
7302
  inst.operands[1].preind = 1;
7303
  inst.reloc.pc_rel = 1;
7304
  inst.reloc.type = (thumb_p
7305
                     ? BFD_RELOC_ARM_THUMB_OFFSET
7306
                     : (mode_3
7307
                        ? BFD_RELOC_ARM_HWLITERAL
7308
                        : BFD_RELOC_ARM_LITERAL));
7309
  return FALSE;
7310
}
7311
 
7312
/* Functions for instruction encoding, sorted by sub-architecture.
7313
   First some generics; their names are taken from the conventional
7314
   bit positions for register arguments in ARM format instructions.  */
7315
 
7316
static void
7317
do_noargs (void)
7318
{
7319
}
7320
 
7321
static void
7322
do_rd (void)
7323
{
7324
  inst.instruction |= inst.operands[0].reg << 12;
7325
}
7326
 
7327
static void
7328
do_rd_rm (void)
7329
{
7330
  inst.instruction |= inst.operands[0].reg << 12;
7331
  inst.instruction |= inst.operands[1].reg;
7332
}
7333
 
7334
static void
7335
do_rd_rn (void)
7336
{
7337
  inst.instruction |= inst.operands[0].reg << 12;
7338
  inst.instruction |= inst.operands[1].reg << 16;
7339
}
7340
 
7341
static void
7342
do_rn_rd (void)
7343
{
7344
  inst.instruction |= inst.operands[0].reg << 16;
7345
  inst.instruction |= inst.operands[1].reg << 12;
7346
}
7347
 
7348
static void
7349
do_rd_rm_rn (void)
7350
{
7351
  unsigned Rn = inst.operands[2].reg;
7352
  /* Enforce restrictions on SWP instruction.  */
7353
  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7354
    {
7355
      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7356
                  _("Rn must not overlap other operands"));
7357
 
7358
      /* SWP{b} is deprecated for ARMv6* and ARMv7.  */
7359
      if (warn_on_deprecated
7360
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7361
        as_warn (_("swp{b} use is deprecated for this architecture"));
7362
 
7363
    }
7364
  inst.instruction |= inst.operands[0].reg << 12;
7365
  inst.instruction |= inst.operands[1].reg;
7366
  inst.instruction |= Rn << 16;
7367
}
7368
 
7369
static void
7370
do_rd_rn_rm (void)
7371
{
7372
  inst.instruction |= inst.operands[0].reg << 12;
7373
  inst.instruction |= inst.operands[1].reg << 16;
7374
  inst.instruction |= inst.operands[2].reg;
7375
}
7376
 
7377
static void
7378
do_rm_rd_rn (void)
7379
{
7380
  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7381
  constraint (((inst.reloc.exp.X_op != O_constant
7382
                && inst.reloc.exp.X_op != O_illegal)
7383
               || inst.reloc.exp.X_add_number != 0),
7384
              BAD_ADDR_MODE);
7385
  inst.instruction |= inst.operands[0].reg;
7386
  inst.instruction |= inst.operands[1].reg << 12;
7387
  inst.instruction |= inst.operands[2].reg << 16;
7388
}
7389
 
7390
static void
7391
do_imm0 (void)
7392
{
7393
  inst.instruction |= inst.operands[0].imm;
7394
}
7395
 
7396
static void
7397
do_rd_cpaddr (void)
7398
{
7399
  inst.instruction |= inst.operands[0].reg << 12;
7400
  encode_arm_cp_address (1, TRUE, TRUE, 0);
7401
}
7402
 
7403
/* ARM instructions, in alphabetical order by function name (except
7404
   that wrapper functions appear immediately after the function they
7405
   wrap).  */
7406
 
7407
/* This is a pseudo-op of the form "adr rd, label" to be converted
7408
   into a relative address of the form "add rd, pc, #label-.-8".  */
7409
 
7410
static void
7411
do_adr (void)
7412
{
7413
  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7414
 
7415
  /* Frag hacking will turn this into a sub instruction if the offset turns
7416
     out to be negative.  */
7417
  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7418
  inst.reloc.pc_rel = 1;
7419
  inst.reloc.exp.X_add_number -= 8;
7420
}
7421
 
7422
/* This is a pseudo-op of the form "adrl rd, label" to be converted
7423
   into a relative address of the form:
7424
   add rd, pc, #low(label-.-8)"
7425
   add rd, rd, #high(label-.-8)"  */
7426
 
7427
static void
7428
do_adrl (void)
7429
{
7430
  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7431
 
7432
  /* Frag hacking will turn this into a sub instruction if the offset turns
7433
     out to be negative.  */
7434
  inst.reloc.type              = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7435
  inst.reloc.pc_rel            = 1;
7436
  inst.size                    = INSN_SIZE * 2;
7437
  inst.reloc.exp.X_add_number -= 8;
7438
}
7439
 
7440
static void
7441
do_arit (void)
7442
{
7443
  if (!inst.operands[1].present)
7444
    inst.operands[1].reg = inst.operands[0].reg;
7445
  inst.instruction |= inst.operands[0].reg << 12;
7446
  inst.instruction |= inst.operands[1].reg << 16;
7447
  encode_arm_shifter_operand (2);
7448
}
7449
 
7450
static void
7451
do_barrier (void)
7452
{
7453
  if (inst.operands[0].present)
7454
    {
7455
      constraint ((inst.instruction & 0xf0) != 0x40
7456
                  && inst.operands[0].imm > 0xf
7457
                  && inst.operands[0].imm < 0x0,
7458
                  _("bad barrier type"));
7459
      inst.instruction |= inst.operands[0].imm;
7460
    }
7461
  else
7462
    inst.instruction |= 0xf;
7463
}
7464
 
7465
static void
7466
do_bfc (void)
7467
{
7468
  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7469
  constraint (msb > 32, _("bit-field extends past end of register"));
7470
  /* The instruction encoding stores the LSB and MSB,
7471
     not the LSB and width.  */
7472
  inst.instruction |= inst.operands[0].reg << 12;
7473
  inst.instruction |= inst.operands[1].imm << 7;
7474
  inst.instruction |= (msb - 1) << 16;
7475
}
7476
 
7477
static void
7478
do_bfi (void)
7479
{
7480
  unsigned int msb;
7481
 
7482
  /* #0 in second position is alternative syntax for bfc, which is
7483
     the same instruction but with REG_PC in the Rm field.  */
7484
  if (!inst.operands[1].isreg)
7485
    inst.operands[1].reg = REG_PC;
7486
 
7487
  msb = inst.operands[2].imm + inst.operands[3].imm;
7488
  constraint (msb > 32, _("bit-field extends past end of register"));
7489
  /* The instruction encoding stores the LSB and MSB,
7490
     not the LSB and width.  */
7491
  inst.instruction |= inst.operands[0].reg << 12;
7492
  inst.instruction |= inst.operands[1].reg;
7493
  inst.instruction |= inst.operands[2].imm << 7;
7494
  inst.instruction |= (msb - 1) << 16;
7495
}
7496
 
7497
static void
7498
do_bfx (void)
7499
{
7500
  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7501
              _("bit-field extends past end of register"));
7502
  inst.instruction |= inst.operands[0].reg << 12;
7503
  inst.instruction |= inst.operands[1].reg;
7504
  inst.instruction |= inst.operands[2].imm << 7;
7505
  inst.instruction |= (inst.operands[3].imm - 1) << 16;
7506
}
7507
 
7508
/* ARM V5 breakpoint instruction (argument parse)
7509
     BKPT <16 bit unsigned immediate>
7510
     Instruction is not conditional.
7511
        The bit pattern given in insns[] has the COND_ALWAYS condition,
7512
        and it is an error if the caller tried to override that.  */
7513
 
7514
static void
7515
do_bkpt (void)
7516
{
7517
  /* Top 12 of 16 bits to bits 19:8.  */
7518
  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7519
 
7520
  /* Bottom 4 of 16 bits to bits 3:0.  */
7521
  inst.instruction |= inst.operands[0].imm & 0xf;
7522
}
7523
 
7524
static void
7525
encode_branch (int default_reloc)
7526
{
7527
  if (inst.operands[0].hasreloc)
7528
    {
7529
      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7530
                  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7531
                  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7532
      inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7533
        ? BFD_RELOC_ARM_PLT32
7534
        : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7535
    }
7536
  else
7537
    inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7538
  inst.reloc.pc_rel = 1;
7539
}
7540
 
7541
static void
7542
do_branch (void)
7543
{
7544
#ifdef OBJ_ELF
7545
  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7546
    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7547
  else
7548
#endif
7549
    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7550
}
7551
 
7552
static void
7553
do_bl (void)
7554
{
7555
#ifdef OBJ_ELF
7556
  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7557
    {
7558
      if (inst.cond == COND_ALWAYS)
7559
        encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7560
      else
7561
        encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7562
    }
7563
  else
7564
#endif
7565
    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7566
}
7567
 
7568
/* ARM V5 branch-link-exchange instruction (argument parse)
7569
     BLX <target_addr>          ie BLX(1)
7570
     BLX{<condition>} <Rm>      ie BLX(2)
7571
   Unfortunately, there are two different opcodes for this mnemonic.
7572
   So, the insns[].value is not used, and the code here zaps values
7573
        into inst.instruction.
7574
   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
7575
 
7576
static void
7577
do_blx (void)
7578
{
7579
  if (inst.operands[0].isreg)
7580
    {
7581
      /* Arg is a register; the opcode provided by insns[] is correct.
7582
         It is not illegal to do "blx pc", just useless.  */
7583
      if (inst.operands[0].reg == REG_PC)
7584
        as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7585
 
7586
      inst.instruction |= inst.operands[0].reg;
7587
    }
7588
  else
7589
    {
7590
      /* Arg is an address; this instruction cannot be executed
7591
         conditionally, and the opcode must be adjusted.
7592
         We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7593
         where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
7594
      constraint (inst.cond != COND_ALWAYS, BAD_COND);
7595
      inst.instruction = 0xfa000000;
7596
      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7597
    }
7598
}
7599
 
7600
static void
7601
do_bx (void)
7602
{
7603
  bfd_boolean want_reloc;
7604
 
7605
  if (inst.operands[0].reg == REG_PC)
7606
    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7607
 
7608
  inst.instruction |= inst.operands[0].reg;
7609
  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7610
     it is for ARMv4t or earlier.  */
7611
  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7612
  if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7613
      want_reloc = TRUE;
7614
 
7615
#ifdef OBJ_ELF
7616
  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7617
#endif
7618
    want_reloc = FALSE;
7619
 
7620
  if (want_reloc)
7621
    inst.reloc.type = BFD_RELOC_ARM_V4BX;
7622
}
7623
 
7624
 
7625
/* ARM v5TEJ.  Jump to Jazelle code.  */
7626
 
7627
static void
7628
do_bxj (void)
7629
{
7630
  if (inst.operands[0].reg == REG_PC)
7631
    as_tsktsk (_("use of r15 in bxj is not really useful"));
7632
 
7633
  inst.instruction |= inst.operands[0].reg;
7634
}
7635
 
7636
/* Co-processor data operation:
7637
      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7638
      CDP2      <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}  */
7639
static void
7640
do_cdp (void)
7641
{
7642
  inst.instruction |= inst.operands[0].reg << 8;
7643
  inst.instruction |= inst.operands[1].imm << 20;
7644
  inst.instruction |= inst.operands[2].reg << 12;
7645
  inst.instruction |= inst.operands[3].reg << 16;
7646
  inst.instruction |= inst.operands[4].reg;
7647
  inst.instruction |= inst.operands[5].imm << 5;
7648
}
7649
 
7650
static void
7651
do_cmp (void)
7652
{
7653
  inst.instruction |= inst.operands[0].reg << 16;
7654
  encode_arm_shifter_operand (1);
7655
}
7656
 
7657
/* Transfer between coprocessor and ARM registers.
7658
   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7659
   MRC2
7660
   MCR{cond}
7661
   MCR2
7662
 
7663
   No special properties.  */
7664
 
7665
static void
7666
do_co_reg (void)
7667
{
7668
  unsigned Rd;
7669
 
7670
  Rd = inst.operands[2].reg;
7671
  if (thumb_mode)
7672
    {
7673
      if (inst.instruction == 0xee000010
7674
          || inst.instruction == 0xfe000010)
7675
        /* MCR, MCR2  */
7676
        reject_bad_reg (Rd);
7677
      else
7678
        /* MRC, MRC2  */
7679
        constraint (Rd == REG_SP, BAD_SP);
7680
    }
7681
  else
7682
    {
7683
      /* MCR */
7684
      if (inst.instruction == 0xe000010)
7685
        constraint (Rd == REG_PC, BAD_PC);
7686
    }
7687
 
7688
 
7689
  inst.instruction |= inst.operands[0].reg << 8;
7690
  inst.instruction |= inst.operands[1].imm << 21;
7691
  inst.instruction |= Rd << 12;
7692
  inst.instruction |= inst.operands[3].reg << 16;
7693
  inst.instruction |= inst.operands[4].reg;
7694
  inst.instruction |= inst.operands[5].imm << 5;
7695
}
7696
 
7697
/* Transfer between coprocessor register and pair of ARM registers.
7698
   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7699
   MCRR2
7700
   MRRC{cond}
7701
   MRRC2
7702
 
7703
   Two XScale instructions are special cases of these:
7704
 
7705
     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7706
     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7707
 
7708
   Result unpredictable if Rd or Rn is R15.  */
7709
 
7710
static void
7711
do_co_reg2c (void)
7712
{
7713
  unsigned Rd, Rn;
7714
 
7715
  Rd = inst.operands[2].reg;
7716
  Rn = inst.operands[3].reg;
7717
 
7718
  if (thumb_mode)
7719
    {
7720
      reject_bad_reg (Rd);
7721
      reject_bad_reg (Rn);
7722
    }
7723
  else
7724
    {
7725
      constraint (Rd == REG_PC, BAD_PC);
7726
      constraint (Rn == REG_PC, BAD_PC);
7727
    }
7728
 
7729
  inst.instruction |= inst.operands[0].reg << 8;
7730
  inst.instruction |= inst.operands[1].imm << 4;
7731
  inst.instruction |= Rd << 12;
7732
  inst.instruction |= Rn << 16;
7733
  inst.instruction |= inst.operands[4].reg;
7734
}
7735
 
7736
static void
7737
do_cpsi (void)
7738
{
7739
  inst.instruction |= inst.operands[0].imm << 6;
7740
  if (inst.operands[1].present)
7741
    {
7742
      inst.instruction |= CPSI_MMOD;
7743
      inst.instruction |= inst.operands[1].imm;
7744
    }
7745
}
7746
 
7747
static void
7748
do_dbg (void)
7749
{
7750
  inst.instruction |= inst.operands[0].imm;
7751
}
7752
 
7753
static void
7754
do_div (void)
7755
{
7756
  unsigned Rd, Rn, Rm;
7757
 
7758
  Rd = inst.operands[0].reg;
7759
  Rn = (inst.operands[1].present
7760
        ? inst.operands[1].reg : Rd);
7761
  Rm = inst.operands[2].reg;
7762
 
7763
  constraint ((Rd == REG_PC), BAD_PC);
7764
  constraint ((Rn == REG_PC), BAD_PC);
7765
  constraint ((Rm == REG_PC), BAD_PC);
7766
 
7767
  inst.instruction |= Rd << 16;
7768
  inst.instruction |= Rn << 0;
7769
  inst.instruction |= Rm << 8;
7770
}
7771
 
7772
static void
7773
do_it (void)
7774
{
7775
  /* There is no IT instruction in ARM mode.  We
7776
     process it to do the validation as if in
7777
     thumb mode, just in case the code gets
7778
     assembled for thumb using the unified syntax.  */
7779
 
7780
  inst.size = 0;
7781
  if (unified_syntax)
7782
    {
7783
      set_it_insn_type (IT_INSN);
7784
      now_it.mask = (inst.instruction & 0xf) | 0x10;
7785
      now_it.cc = inst.operands[0].imm;
7786
    }
7787
}
7788
 
7789
static void
7790
do_ldmstm (void)
7791
{
7792
  int base_reg = inst.operands[0].reg;
7793
  int range = inst.operands[1].imm;
7794
 
7795
  inst.instruction |= base_reg << 16;
7796
  inst.instruction |= range;
7797
 
7798
  if (inst.operands[1].writeback)
7799
    inst.instruction |= LDM_TYPE_2_OR_3;
7800
 
7801
  if (inst.operands[0].writeback)
7802
    {
7803
      inst.instruction |= WRITE_BACK;
7804
      /* Check for unpredictable uses of writeback.  */
7805
      if (inst.instruction & LOAD_BIT)
7806
        {
7807
          /* Not allowed in LDM type 2.  */
7808
          if ((inst.instruction & LDM_TYPE_2_OR_3)
7809
              && ((range & (1 << REG_PC)) == 0))
7810
            as_warn (_("writeback of base register is UNPREDICTABLE"));
7811
          /* Only allowed if base reg not in list for other types.  */
7812
          else if (range & (1 << base_reg))
7813
            as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7814
        }
7815
      else /* STM.  */
7816
        {
7817
          /* Not allowed for type 2.  */
7818
          if (inst.instruction & LDM_TYPE_2_OR_3)
7819
            as_warn (_("writeback of base register is UNPREDICTABLE"));
7820
          /* Only allowed if base reg not in list, or first in list.  */
7821
          else if ((range & (1 << base_reg))
7822
                   && (range & ((1 << base_reg) - 1)))
7823
            as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7824
        }
7825
    }
7826
}
7827
 
7828
/* ARMv5TE load-consecutive (argument parse)
7829
   Mode is like LDRH.
7830
 
7831
     LDRccD R, mode
7832
     STRccD R, mode.  */
7833
 
7834
static void
7835
do_ldrd (void)
7836
{
7837
  constraint (inst.operands[0].reg % 2 != 0,
7838 148 khays
              _("first transfer register must be even"));
7839 16 khays
  constraint (inst.operands[1].present
7840
              && inst.operands[1].reg != inst.operands[0].reg + 1,
7841 148 khays
              _("can only transfer two consecutive registers"));
7842 16 khays
  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7843
  constraint (!inst.operands[2].isreg, _("'[' expected"));
7844
 
7845
  if (!inst.operands[1].present)
7846
    inst.operands[1].reg = inst.operands[0].reg + 1;
7847
 
7848 148 khays
  /* encode_arm_addr_mode_3 will diagnose overlap between the base
7849
     register and the first register written; we have to diagnose
7850
     overlap between the base and the second register written here.  */
7851 16 khays
 
7852 148 khays
  if (inst.operands[2].reg == inst.operands[1].reg
7853
      && (inst.operands[2].writeback || inst.operands[2].postind))
7854
    as_warn (_("base register written back, and overlaps "
7855
               "second transfer register"));
7856 16 khays
 
7857 148 khays
  if (!(inst.instruction & V4_STR_BIT))
7858
    {
7859 16 khays
      /* For an index-register load, the index register must not overlap the
7860 148 khays
        destination (even if not write-back).  */
7861
      if (inst.operands[2].immisreg
7862
              && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7863
              || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7864
        as_warn (_("index register overlaps transfer register"));
7865 16 khays
    }
7866
  inst.instruction |= inst.operands[0].reg << 12;
7867
  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7868
}
7869
 
7870
static void
7871
do_ldrex (void)
7872
{
7873
  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7874
              || inst.operands[1].postind || inst.operands[1].writeback
7875
              || inst.operands[1].immisreg || inst.operands[1].shifted
7876
              || inst.operands[1].negative
7877
              /* This can arise if the programmer has written
7878
                   strex rN, rM, foo
7879
                 or if they have mistakenly used a register name as the last
7880
                 operand,  eg:
7881
                   strex rN, rM, rX
7882
                 It is very difficult to distinguish between these two cases
7883
                 because "rX" might actually be a label. ie the register
7884
                 name has been occluded by a symbol of the same name. So we
7885
                 just generate a general 'bad addressing mode' type error
7886
                 message and leave it up to the programmer to discover the
7887
                 true cause and fix their mistake.  */
7888
              || (inst.operands[1].reg == REG_PC),
7889
              BAD_ADDR_MODE);
7890
 
7891
  constraint (inst.reloc.exp.X_op != O_constant
7892
              || inst.reloc.exp.X_add_number != 0,
7893
              _("offset must be zero in ARM encoding"));
7894
 
7895
  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
7896
 
7897
  inst.instruction |= inst.operands[0].reg << 12;
7898
  inst.instruction |= inst.operands[1].reg << 16;
7899
  inst.reloc.type = BFD_RELOC_UNUSED;
7900
}
7901
 
7902
static void
7903
do_ldrexd (void)
7904
{
7905
  constraint (inst.operands[0].reg % 2 != 0,
7906
              _("even register required"));
7907
  constraint (inst.operands[1].present
7908
              && inst.operands[1].reg != inst.operands[0].reg + 1,
7909
              _("can only load two consecutive registers"));
7910
  /* If op 1 were present and equal to PC, this function wouldn't
7911
     have been called in the first place.  */
7912
  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7913
 
7914
  inst.instruction |= inst.operands[0].reg << 12;
7915
  inst.instruction |= inst.operands[2].reg << 16;
7916
}
7917
 
7918
static void
7919
do_ldst (void)
7920
{
7921
  inst.instruction |= inst.operands[0].reg << 12;
7922
  if (!inst.operands[1].isreg)
7923
    if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7924
      return;
7925
  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7926
}
7927
 
7928
static void
7929
do_ldstt (void)
7930
{
7931
  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
7932
     reject [Rn,...].  */
7933
  if (inst.operands[1].preind)
7934
    {
7935
      constraint (inst.reloc.exp.X_op != O_constant
7936
                  || inst.reloc.exp.X_add_number != 0,
7937
                  _("this instruction requires a post-indexed address"));
7938
 
7939
      inst.operands[1].preind = 0;
7940
      inst.operands[1].postind = 1;
7941
      inst.operands[1].writeback = 1;
7942
    }
7943
  inst.instruction |= inst.operands[0].reg << 12;
7944
  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7945
}
7946
 
7947
/* Halfword and signed-byte load/store operations.  */
7948
 
7949
static void
7950
do_ldstv4 (void)
7951
{
7952
  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7953
  inst.instruction |= inst.operands[0].reg << 12;
7954
  if (!inst.operands[1].isreg)
7955
    if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7956
      return;
7957
  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7958
}
7959
 
7960
static void
7961
do_ldsttv4 (void)
7962
{
7963
  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
7964
     reject [Rn,...].  */
7965
  if (inst.operands[1].preind)
7966
    {
7967
      constraint (inst.reloc.exp.X_op != O_constant
7968
                  || inst.reloc.exp.X_add_number != 0,
7969
                  _("this instruction requires a post-indexed address"));
7970
 
7971
      inst.operands[1].preind = 0;
7972
      inst.operands[1].postind = 1;
7973
      inst.operands[1].writeback = 1;
7974
    }
7975
  inst.instruction |= inst.operands[0].reg << 12;
7976
  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7977
}
7978
 
7979
/* Co-processor register load/store.
7980
   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>  */
7981
static void
7982
do_lstc (void)
7983
{
7984
  inst.instruction |= inst.operands[0].reg << 8;
7985
  inst.instruction |= inst.operands[1].reg << 12;
7986
  encode_arm_cp_address (2, TRUE, TRUE, 0);
7987
}
7988
 
7989
static void
7990
do_mlas (void)
7991
{
7992
  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
7993
  if (inst.operands[0].reg == inst.operands[1].reg
7994
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7995
      && !(inst.instruction & 0x00400000))
7996
    as_tsktsk (_("Rd and Rm should be different in mla"));
7997
 
7998
  inst.instruction |= inst.operands[0].reg << 16;
7999
  inst.instruction |= inst.operands[1].reg;
8000
  inst.instruction |= inst.operands[2].reg << 8;
8001
  inst.instruction |= inst.operands[3].reg << 12;
8002
}
8003
 
8004
static void
8005
do_mov (void)
8006
{
8007
  inst.instruction |= inst.operands[0].reg << 12;
8008
  encode_arm_shifter_operand (1);
8009
}
8010
 
8011
/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.  */
8012
static void
8013
do_mov16 (void)
8014
{
8015
  bfd_vma imm;
8016
  bfd_boolean top;
8017
 
8018
  top = (inst.instruction & 0x00400000) != 0;
8019
  constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8020
              _(":lower16: not allowed this instruction"));
8021
  constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8022
              _(":upper16: not allowed instruction"));
8023
  inst.instruction |= inst.operands[0].reg << 12;
8024
  if (inst.reloc.type == BFD_RELOC_UNUSED)
8025
    {
8026
      imm = inst.reloc.exp.X_add_number;
8027
      /* The value is in two pieces: 0:11, 16:19.  */
8028
      inst.instruction |= (imm & 0x00000fff);
8029
      inst.instruction |= (imm & 0x0000f000) << 4;
8030
    }
8031
}
8032
 
8033
static void do_vfp_nsyn_opcode (const char *);
8034
 
8035
static int
8036
do_vfp_nsyn_mrs (void)
8037
{
8038
  if (inst.operands[0].isvec)
8039
    {
8040
      if (inst.operands[1].reg != 1)
8041
        first_error (_("operand 1 must be FPSCR"));
8042
      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8043
      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8044
      do_vfp_nsyn_opcode ("fmstat");
8045
    }
8046
  else if (inst.operands[1].isvec)
8047
    do_vfp_nsyn_opcode ("fmrx");
8048
  else
8049
    return FAIL;
8050
 
8051
  return SUCCESS;
8052
}
8053
 
8054
static int
8055
do_vfp_nsyn_msr (void)
8056
{
8057
  if (inst.operands[0].isvec)
8058
    do_vfp_nsyn_opcode ("fmxr");
8059
  else
8060
    return FAIL;
8061
 
8062
  return SUCCESS;
8063
}
8064
 
8065
static void
8066
do_vmrs (void)
8067
{
8068
  unsigned Rt = inst.operands[0].reg;
8069
 
8070
  if (thumb_mode && inst.operands[0].reg == REG_SP)
8071
    {
8072
      inst.error = BAD_SP;
8073
      return;
8074
    }
8075
 
8076
  /* APSR_ sets isvec. All other refs to PC are illegal.  */
8077
  if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8078
    {
8079
      inst.error = BAD_PC;
8080
      return;
8081
    }
8082
 
8083
  if (inst.operands[1].reg != 1)
8084
    first_error (_("operand 1 must be FPSCR"));
8085
 
8086
  inst.instruction |= (Rt << 12);
8087
}
8088
 
8089
static void
8090
do_vmsr (void)
8091
{
8092
  unsigned Rt = inst.operands[1].reg;
8093
 
8094
  if (thumb_mode)
8095
    reject_bad_reg (Rt);
8096
  else if (Rt == REG_PC)
8097
    {
8098
      inst.error = BAD_PC;
8099
      return;
8100
    }
8101
 
8102
  if (inst.operands[0].reg != 1)
8103
    first_error (_("operand 0 must be FPSCR"));
8104
 
8105
  inst.instruction |= (Rt << 12);
8106
}
8107
 
8108
static void
8109
do_mrs (void)
8110
{
8111
  unsigned br;
8112
 
8113
  if (do_vfp_nsyn_mrs () == SUCCESS)
8114
    return;
8115
 
8116
  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8117
  inst.instruction |= inst.operands[0].reg << 12;
8118
 
8119
  if (inst.operands[1].isreg)
8120
    {
8121
      br = inst.operands[1].reg;
8122
      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8123
        as_bad (_("bad register for mrs"));
8124
    }
8125
  else
8126
    {
8127
      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
8128
      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8129
                  != (PSR_c|PSR_f),
8130
                  _("'APSR', 'CPSR' or 'SPSR' expected"));
8131
      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8132
    }
8133
 
8134
  inst.instruction |= br;
8135
}
8136
 
8137
/* Two possible forms:
8138
      "{C|S}PSR_<field>, Rm",
8139
      "{C|S}PSR_f, #expression".  */
8140
 
8141
static void
8142
do_msr (void)
8143
{
8144
  if (do_vfp_nsyn_msr () == SUCCESS)
8145
    return;
8146
 
8147
  inst.instruction |= inst.operands[0].imm;
8148
  if (inst.operands[1].isreg)
8149
    inst.instruction |= inst.operands[1].reg;
8150
  else
8151
    {
8152
      inst.instruction |= INST_IMMEDIATE;
8153
      inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8154
      inst.reloc.pc_rel = 0;
8155
    }
8156
}
8157
 
8158
static void
8159
do_mul (void)
8160
{
8161
  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8162
 
8163
  if (!inst.operands[2].present)
8164
    inst.operands[2].reg = inst.operands[0].reg;
8165
  inst.instruction |= inst.operands[0].reg << 16;
8166
  inst.instruction |= inst.operands[1].reg;
8167
  inst.instruction |= inst.operands[2].reg << 8;
8168
 
8169
  if (inst.operands[0].reg == inst.operands[1].reg
8170
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8171
    as_tsktsk (_("Rd and Rm should be different in mul"));
8172
}
8173
 
8174
/* Long Multiply Parser
8175
   UMULL RdLo, RdHi, Rm, Rs
8176
   SMULL RdLo, RdHi, Rm, Rs
8177
   UMLAL RdLo, RdHi, Rm, Rs
8178
   SMLAL RdLo, RdHi, Rm, Rs.  */
8179
 
8180
static void
8181
do_mull (void)
8182
{
8183
  inst.instruction |= inst.operands[0].reg << 12;
8184
  inst.instruction |= inst.operands[1].reg << 16;
8185
  inst.instruction |= inst.operands[2].reg;
8186
  inst.instruction |= inst.operands[3].reg << 8;
8187
 
8188
  /* rdhi and rdlo must be different.  */
8189
  if (inst.operands[0].reg == inst.operands[1].reg)
8190
    as_tsktsk (_("rdhi and rdlo must be different"));
8191
 
8192
  /* rdhi, rdlo and rm must all be different before armv6.  */
8193
  if ((inst.operands[0].reg == inst.operands[2].reg
8194
      || inst.operands[1].reg == inst.operands[2].reg)
8195
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8196
    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8197
}
8198
 
8199
static void
8200
do_nop (void)
8201
{
8202
  if (inst.operands[0].present
8203
      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8204
    {
8205
      /* Architectural NOP hints are CPSR sets with no bits selected.  */
8206
      inst.instruction &= 0xf0000000;
8207
      inst.instruction |= 0x0320f000;
8208
      if (inst.operands[0].present)
8209
        inst.instruction |= inst.operands[0].imm;
8210
    }
8211
}
8212
 
8213
/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8214
   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8215
   Condition defaults to COND_ALWAYS.
8216
   Error if Rd, Rn or Rm are R15.  */
8217
 
8218
static void
8219
do_pkhbt (void)
8220
{
8221
  inst.instruction |= inst.operands[0].reg << 12;
8222
  inst.instruction |= inst.operands[1].reg << 16;
8223
  inst.instruction |= inst.operands[2].reg;
8224
  if (inst.operands[3].present)
8225
    encode_arm_shift (3);
8226
}
8227
 
8228
/* ARM V6 PKHTB (Argument Parse).  */
8229
 
8230
static void
8231
do_pkhtb (void)
8232
{
8233
  if (!inst.operands[3].present)
8234
    {
8235
      /* If the shift specifier is omitted, turn the instruction
8236
         into pkhbt rd, rm, rn. */
8237
      inst.instruction &= 0xfff00010;
8238
      inst.instruction |= inst.operands[0].reg << 12;
8239
      inst.instruction |= inst.operands[1].reg;
8240
      inst.instruction |= inst.operands[2].reg << 16;
8241
    }
8242
  else
8243
    {
8244
      inst.instruction |= inst.operands[0].reg << 12;
8245
      inst.instruction |= inst.operands[1].reg << 16;
8246
      inst.instruction |= inst.operands[2].reg;
8247
      encode_arm_shift (3);
8248
    }
8249
}
8250
 
8251
/* ARMv5TE: Preload-Cache
8252
   MP Extensions: Preload for write
8253
 
8254
    PLD(W) <addr_mode>
8255
 
8256
  Syntactically, like LDR with B=1, W=0, L=1.  */
8257
 
8258
static void
8259
do_pld (void)
8260
{
8261
  constraint (!inst.operands[0].isreg,
8262
              _("'[' expected after PLD mnemonic"));
8263
  constraint (inst.operands[0].postind,
8264
              _("post-indexed expression used in preload instruction"));
8265
  constraint (inst.operands[0].writeback,
8266
              _("writeback used in preload instruction"));
8267
  constraint (!inst.operands[0].preind,
8268
              _("unindexed addressing used in preload instruction"));
8269
  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8270
}
8271
 
8272
/* ARMv7: PLI <addr_mode>  */
8273
static void
8274
do_pli (void)
8275
{
8276
  constraint (!inst.operands[0].isreg,
8277
              _("'[' expected after PLI mnemonic"));
8278
  constraint (inst.operands[0].postind,
8279
              _("post-indexed expression used in preload instruction"));
8280
  constraint (inst.operands[0].writeback,
8281
              _("writeback used in preload instruction"));
8282
  constraint (!inst.operands[0].preind,
8283
              _("unindexed addressing used in preload instruction"));
8284
  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8285
  inst.instruction &= ~PRE_INDEX;
8286
}
8287
 
8288
static void
8289
do_push_pop (void)
8290
{
8291
  inst.operands[1] = inst.operands[0];
8292
  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8293
  inst.operands[0].isreg = 1;
8294
  inst.operands[0].writeback = 1;
8295
  inst.operands[0].reg = REG_SP;
8296
  do_ldmstm ();
8297
}
8298
 
8299
/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8300
   word at the specified address and the following word
8301
   respectively.
8302
   Unconditionally executed.
8303
   Error if Rn is R15.  */
8304
 
8305
static void
8306
do_rfe (void)
8307
{
8308
  inst.instruction |= inst.operands[0].reg << 16;
8309
  if (inst.operands[0].writeback)
8310
    inst.instruction |= WRITE_BACK;
8311
}
8312
 
8313
/* ARM V6 ssat (argument parse).  */
8314
 
8315
static void
8316
do_ssat (void)
8317
{
8318
  inst.instruction |= inst.operands[0].reg << 12;
8319
  inst.instruction |= (inst.operands[1].imm - 1) << 16;
8320
  inst.instruction |= inst.operands[2].reg;
8321
 
8322
  if (inst.operands[3].present)
8323
    encode_arm_shift (3);
8324
}
8325
 
8326
/* ARM V6 usat (argument parse).  */
8327
 
8328
static void
8329
do_usat (void)
8330
{
8331
  inst.instruction |= inst.operands[0].reg << 12;
8332
  inst.instruction |= inst.operands[1].imm << 16;
8333
  inst.instruction |= inst.operands[2].reg;
8334
 
8335
  if (inst.operands[3].present)
8336
    encode_arm_shift (3);
8337
}
8338
 
8339
/* ARM V6 ssat16 (argument parse).  */
8340
 
8341
static void
8342
do_ssat16 (void)
8343
{
8344
  inst.instruction |= inst.operands[0].reg << 12;
8345
  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8346
  inst.instruction |= inst.operands[2].reg;
8347
}
8348
 
8349
static void
8350
do_usat16 (void)
8351
{
8352
  inst.instruction |= inst.operands[0].reg << 12;
8353
  inst.instruction |= inst.operands[1].imm << 16;
8354
  inst.instruction |= inst.operands[2].reg;
8355
}
8356
 
8357
/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
8358
   preserving the other bits.
8359
 
8360
   setend <endian_specifier>, where <endian_specifier> is either
8361
   BE or LE.  */
8362
 
8363
static void
8364
do_setend (void)
8365
{
8366
  if (inst.operands[0].imm)
8367
    inst.instruction |= 0x200;
8368
}
8369
 
8370
static void
8371
do_shift (void)
8372
{
8373
  unsigned int Rm = (inst.operands[1].present
8374
                     ? inst.operands[1].reg
8375
                     : inst.operands[0].reg);
8376
 
8377
  inst.instruction |= inst.operands[0].reg << 12;
8378
  inst.instruction |= Rm;
8379
  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
8380
    {
8381
      inst.instruction |= inst.operands[2].reg << 8;
8382
      inst.instruction |= SHIFT_BY_REG;
8383 148 khays
      /* PR 12854: Error on extraneous shifts.  */
8384
      constraint (inst.operands[2].shifted,
8385
                  _("extraneous shift as part of operand to shift insn"));
8386 16 khays
    }
8387
  else
8388
    inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8389
}
8390
 
8391
static void
8392
do_smc (void)
8393
{
8394
  inst.reloc.type = BFD_RELOC_ARM_SMC;
8395
  inst.reloc.pc_rel = 0;
8396
}
8397
 
8398
static void
8399
do_hvc (void)
8400
{
8401
  inst.reloc.type = BFD_RELOC_ARM_HVC;
8402
  inst.reloc.pc_rel = 0;
8403
}
8404
 
8405
static void
8406
do_swi (void)
8407
{
8408
  inst.reloc.type = BFD_RELOC_ARM_SWI;
8409
  inst.reloc.pc_rel = 0;
8410
}
8411
 
8412
/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8413
   SMLAxy{cond} Rd,Rm,Rs,Rn
8414
   SMLAWy{cond} Rd,Rm,Rs,Rn
8415
   Error if any register is R15.  */
8416
 
8417
static void
8418
do_smla (void)
8419
{
8420
  inst.instruction |= inst.operands[0].reg << 16;
8421
  inst.instruction |= inst.operands[1].reg;
8422
  inst.instruction |= inst.operands[2].reg << 8;
8423
  inst.instruction |= inst.operands[3].reg << 12;
8424
}
8425
 
8426
/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8427
   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8428
   Error if any register is R15.
8429
   Warning if Rdlo == Rdhi.  */
8430
 
8431
static void
8432
do_smlal (void)
8433
{
8434
  inst.instruction |= inst.operands[0].reg << 12;
8435
  inst.instruction |= inst.operands[1].reg << 16;
8436
  inst.instruction |= inst.operands[2].reg;
8437
  inst.instruction |= inst.operands[3].reg << 8;
8438
 
8439
  if (inst.operands[0].reg == inst.operands[1].reg)
8440
    as_tsktsk (_("rdhi and rdlo must be different"));
8441
}
8442
 
8443
/* ARM V5E (El Segundo) signed-multiply (argument parse)
8444
   SMULxy{cond} Rd,Rm,Rs
8445
   Error if any register is R15.  */
8446
 
8447
static void
8448
do_smul (void)
8449
{
8450
  inst.instruction |= inst.operands[0].reg << 16;
8451
  inst.instruction |= inst.operands[1].reg;
8452
  inst.instruction |= inst.operands[2].reg << 8;
8453
}
8454
 
8455
/* ARM V6 srs (argument parse).  The variable fields in the encoding are
8456
   the same for both ARM and Thumb-2.  */
8457
 
8458
static void
8459
do_srs (void)
8460
{
8461
  int reg;
8462
 
8463
  if (inst.operands[0].present)
8464
    {
8465
      reg = inst.operands[0].reg;
8466
      constraint (reg != REG_SP, _("SRS base register must be r13"));
8467
    }
8468
  else
8469
    reg = REG_SP;
8470
 
8471
  inst.instruction |= reg << 16;
8472
  inst.instruction |= inst.operands[1].imm;
8473
  if (inst.operands[0].writeback || inst.operands[1].writeback)
8474
    inst.instruction |= WRITE_BACK;
8475
}
8476
 
8477
/* ARM V6 strex (argument parse).  */
8478
 
8479
static void
8480
do_strex (void)
8481
{
8482
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8483
              || inst.operands[2].postind || inst.operands[2].writeback
8484
              || inst.operands[2].immisreg || inst.operands[2].shifted
8485
              || inst.operands[2].negative
8486
              /* See comment in do_ldrex().  */
8487
              || (inst.operands[2].reg == REG_PC),
8488
              BAD_ADDR_MODE);
8489
 
8490
  constraint (inst.operands[0].reg == inst.operands[1].reg
8491
              || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8492
 
8493
  constraint (inst.reloc.exp.X_op != O_constant
8494
              || inst.reloc.exp.X_add_number != 0,
8495
              _("offset must be zero in ARM encoding"));
8496
 
8497
  inst.instruction |= inst.operands[0].reg << 12;
8498
  inst.instruction |= inst.operands[1].reg;
8499
  inst.instruction |= inst.operands[2].reg << 16;
8500
  inst.reloc.type = BFD_RELOC_UNUSED;
8501
}
8502
 
8503
static void
8504 160 khays
do_t_strexbh (void)
8505
{
8506
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8507
              || inst.operands[2].postind || inst.operands[2].writeback
8508
              || inst.operands[2].immisreg || inst.operands[2].shifted
8509
              || inst.operands[2].negative,
8510
              BAD_ADDR_MODE);
8511
 
8512
  constraint (inst.operands[0].reg == inst.operands[1].reg
8513
              || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8514
 
8515
  do_rm_rd_rn ();
8516
}
8517
 
8518
static void
8519 16 khays
do_strexd (void)
8520
{
8521
  constraint (inst.operands[1].reg % 2 != 0,
8522
              _("even register required"));
8523
  constraint (inst.operands[2].present
8524
              && inst.operands[2].reg != inst.operands[1].reg + 1,
8525
              _("can only store two consecutive registers"));
8526
  /* If op 2 were present and equal to PC, this function wouldn't
8527
     have been called in the first place.  */
8528
  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8529
 
8530
  constraint (inst.operands[0].reg == inst.operands[1].reg
8531
              || inst.operands[0].reg == inst.operands[1].reg + 1
8532
              || inst.operands[0].reg == inst.operands[3].reg,
8533
              BAD_OVERLAP);
8534
 
8535
  inst.instruction |= inst.operands[0].reg << 12;
8536
  inst.instruction |= inst.operands[1].reg;
8537
  inst.instruction |= inst.operands[3].reg << 16;
8538
}
8539
 
8540
/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8541
   extends it to 32-bits, and adds the result to a value in another
8542
   register.  You can specify a rotation by 0, 8, 16, or 24 bits
8543
   before extracting the 16-bit value.
8544
   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8545
   Condition defaults to COND_ALWAYS.
8546
   Error if any register uses R15.  */
8547
 
8548
static void
8549
do_sxtah (void)
8550
{
8551
  inst.instruction |= inst.operands[0].reg << 12;
8552
  inst.instruction |= inst.operands[1].reg << 16;
8553
  inst.instruction |= inst.operands[2].reg;
8554
  inst.instruction |= inst.operands[3].imm << 10;
8555
}
8556
 
8557
/* ARM V6 SXTH.
8558
 
8559
   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8560
   Condition defaults to COND_ALWAYS.
8561
   Error if any register uses R15.  */
8562
 
8563
static void
8564
do_sxth (void)
8565
{
8566
  inst.instruction |= inst.operands[0].reg << 12;
8567
  inst.instruction |= inst.operands[1].reg;
8568
  inst.instruction |= inst.operands[2].imm << 10;
8569
}
8570
 
8571
/* VFP instructions.  In a logical order: SP variant first, monad
8572
   before dyad, arithmetic then move then load/store.  */
8573
 
8574
static void
8575
do_vfp_sp_monadic (void)
8576
{
8577
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8578
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8579
}
8580
 
8581
static void
8582
do_vfp_sp_dyadic (void)
8583
{
8584
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8585
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8586
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8587
}
8588
 
8589
static void
8590
do_vfp_sp_compare_z (void)
8591
{
8592
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8593
}
8594
 
8595
static void
8596
do_vfp_dp_sp_cvt (void)
8597
{
8598
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8599
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8600
}
8601
 
8602
static void
8603
do_vfp_sp_dp_cvt (void)
8604
{
8605
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8606
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8607
}
8608
 
8609
static void
8610
do_vfp_reg_from_sp (void)
8611
{
8612
  inst.instruction |= inst.operands[0].reg << 12;
8613
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8614
}
8615
 
8616
static void
8617
do_vfp_reg2_from_sp2 (void)
8618
{
8619
  constraint (inst.operands[2].imm != 2,
8620
              _("only two consecutive VFP SP registers allowed here"));
8621
  inst.instruction |= inst.operands[0].reg << 12;
8622
  inst.instruction |= inst.operands[1].reg << 16;
8623
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8624
}
8625
 
8626
static void
8627
do_vfp_sp_from_reg (void)
8628
{
8629
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8630
  inst.instruction |= inst.operands[1].reg << 12;
8631
}
8632
 
8633
static void
8634
do_vfp_sp2_from_reg2 (void)
8635
{
8636
  constraint (inst.operands[0].imm != 2,
8637
              _("only two consecutive VFP SP registers allowed here"));
8638
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8639
  inst.instruction |= inst.operands[1].reg << 12;
8640
  inst.instruction |= inst.operands[2].reg << 16;
8641
}
8642
 
8643
static void
8644
do_vfp_sp_ldst (void)
8645
{
8646
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8647
  encode_arm_cp_address (1, FALSE, TRUE, 0);
8648
}
8649
 
8650
static void
8651
do_vfp_dp_ldst (void)
8652
{
8653
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8654
  encode_arm_cp_address (1, FALSE, TRUE, 0);
8655
}
8656
 
8657
 
8658
static void
8659
vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8660
{
8661
  if (inst.operands[0].writeback)
8662
    inst.instruction |= WRITE_BACK;
8663
  else
8664
    constraint (ldstm_type != VFP_LDSTMIA,
8665
                _("this addressing mode requires base-register writeback"));
8666
  inst.instruction |= inst.operands[0].reg << 16;
8667
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8668
  inst.instruction |= inst.operands[1].imm;
8669
}
8670
 
8671
static void
8672
vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8673
{
8674
  int count;
8675
 
8676
  if (inst.operands[0].writeback)
8677
    inst.instruction |= WRITE_BACK;
8678
  else
8679
    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8680
                _("this addressing mode requires base-register writeback"));
8681
 
8682
  inst.instruction |= inst.operands[0].reg << 16;
8683
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8684
 
8685
  count = inst.operands[1].imm << 1;
8686
  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8687
    count += 1;
8688
 
8689
  inst.instruction |= count;
8690
}
8691
 
8692
static void
8693
do_vfp_sp_ldstmia (void)
8694
{
8695
  vfp_sp_ldstm (VFP_LDSTMIA);
8696
}
8697
 
8698
static void
8699
do_vfp_sp_ldstmdb (void)
8700
{
8701
  vfp_sp_ldstm (VFP_LDSTMDB);
8702
}
8703
 
8704
static void
8705
do_vfp_dp_ldstmia (void)
8706
{
8707
  vfp_dp_ldstm (VFP_LDSTMIA);
8708
}
8709
 
8710
static void
8711
do_vfp_dp_ldstmdb (void)
8712
{
8713
  vfp_dp_ldstm (VFP_LDSTMDB);
8714
}
8715
 
8716
static void
8717
do_vfp_xp_ldstmia (void)
8718
{
8719
  vfp_dp_ldstm (VFP_LDSTMIAX);
8720
}
8721
 
8722
static void
8723
do_vfp_xp_ldstmdb (void)
8724
{
8725
  vfp_dp_ldstm (VFP_LDSTMDBX);
8726
}
8727
 
8728
static void
8729
do_vfp_dp_rd_rm (void)
8730
{
8731
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8732
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8733
}
8734
 
8735
static void
8736
do_vfp_dp_rn_rd (void)
8737
{
8738
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8739
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8740
}
8741
 
8742
static void
8743
do_vfp_dp_rd_rn (void)
8744
{
8745
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8746
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8747
}
8748
 
8749
static void
8750
do_vfp_dp_rd_rn_rm (void)
8751
{
8752
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8753
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8754
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8755
}
8756
 
8757
static void
8758
do_vfp_dp_rd (void)
8759
{
8760
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8761
}
8762
 
8763
static void
8764
do_vfp_dp_rm_rd_rn (void)
8765
{
8766
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8767
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8768
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8769
}
8770
 
8771
/* VFPv3 instructions.  */
8772
static void
8773
do_vfp_sp_const (void)
8774
{
8775
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8776
  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8777
  inst.instruction |= (inst.operands[1].imm & 0x0f);
8778
}
8779
 
8780
static void
8781
do_vfp_dp_const (void)
8782
{
8783
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8784
  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8785
  inst.instruction |= (inst.operands[1].imm & 0x0f);
8786
}
8787
 
8788
static void
8789
vfp_conv (int srcsize)
8790
{
8791 160 khays
  int immbits = srcsize - inst.operands[1].imm;
8792
 
8793
  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
8794
    {
8795
      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
8796
         i.e. immbits must be in range 0 - 16.  */
8797
      inst.error = _("immediate value out of range, expected range [0, 16]");
8798
      return;
8799
    }
8800
  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
8801
    {
8802
      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
8803
         i.e. immbits must be in range 0 - 31.  */
8804
      inst.error = _("immediate value out of range, expected range [1, 32]");
8805
      return;
8806
    }
8807
 
8808 16 khays
  inst.instruction |= (immbits & 1) << 5;
8809
  inst.instruction |= (immbits >> 1);
8810
}
8811
 
8812
static void
8813
do_vfp_sp_conv_16 (void)
8814
{
8815
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8816
  vfp_conv (16);
8817
}
8818
 
8819
static void
8820
do_vfp_dp_conv_16 (void)
8821
{
8822
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8823
  vfp_conv (16);
8824
}
8825
 
8826
static void
8827
do_vfp_sp_conv_32 (void)
8828
{
8829
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8830
  vfp_conv (32);
8831
}
8832
 
8833
static void
8834
do_vfp_dp_conv_32 (void)
8835
{
8836
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8837
  vfp_conv (32);
8838
}
8839
 
8840
/* FPA instructions.  Also in a logical order.  */
8841
 
8842
static void
8843
do_fpa_cmp (void)
8844
{
8845
  inst.instruction |= inst.operands[0].reg << 16;
8846
  inst.instruction |= inst.operands[1].reg;
8847
}
8848
 
8849
static void
8850
do_fpa_ldmstm (void)
8851
{
8852
  inst.instruction |= inst.operands[0].reg << 12;
8853
  switch (inst.operands[1].imm)
8854
    {
8855
    case 1: inst.instruction |= CP_T_X;          break;
8856
    case 2: inst.instruction |= CP_T_Y;          break;
8857
    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8858
    case 4:                                      break;
8859
    default: abort ();
8860
    }
8861
 
8862
  if (inst.instruction & (PRE_INDEX | INDEX_UP))
8863
    {
8864
      /* The instruction specified "ea" or "fd", so we can only accept
8865
         [Rn]{!}.  The instruction does not really support stacking or
8866
         unstacking, so we have to emulate these by setting appropriate
8867
         bits and offsets.  */
8868
      constraint (inst.reloc.exp.X_op != O_constant
8869
                  || inst.reloc.exp.X_add_number != 0,
8870
                  _("this instruction does not support indexing"));
8871
 
8872
      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8873
        inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8874
 
8875
      if (!(inst.instruction & INDEX_UP))
8876
        inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8877
 
8878
      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8879
        {
8880
          inst.operands[2].preind = 0;
8881
          inst.operands[2].postind = 1;
8882
        }
8883
    }
8884
 
8885
  encode_arm_cp_address (2, TRUE, TRUE, 0);
8886
}
8887
 
8888
/* iWMMXt instructions: strictly in alphabetical order.  */
8889
 
8890
static void
8891
do_iwmmxt_tandorc (void)
8892
{
8893
  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8894
}
8895
 
8896
static void
8897
do_iwmmxt_textrc (void)
8898
{
8899
  inst.instruction |= inst.operands[0].reg << 12;
8900
  inst.instruction |= inst.operands[1].imm;
8901
}
8902
 
8903
static void
8904
do_iwmmxt_textrm (void)
8905
{
8906
  inst.instruction |= inst.operands[0].reg << 12;
8907
  inst.instruction |= inst.operands[1].reg << 16;
8908
  inst.instruction |= inst.operands[2].imm;
8909
}
8910
 
8911
static void
8912
do_iwmmxt_tinsr (void)
8913
{
8914
  inst.instruction |= inst.operands[0].reg << 16;
8915
  inst.instruction |= inst.operands[1].reg << 12;
8916
  inst.instruction |= inst.operands[2].imm;
8917
}
8918
 
8919
static void
8920
do_iwmmxt_tmia (void)
8921
{
8922
  inst.instruction |= inst.operands[0].reg << 5;
8923
  inst.instruction |= inst.operands[1].reg;
8924
  inst.instruction |= inst.operands[2].reg << 12;
8925
}
8926
 
8927
static void
8928
do_iwmmxt_waligni (void)
8929
{
8930
  inst.instruction |= inst.operands[0].reg << 12;
8931
  inst.instruction |= inst.operands[1].reg << 16;
8932
  inst.instruction |= inst.operands[2].reg;
8933
  inst.instruction |= inst.operands[3].imm << 20;
8934
}
8935
 
8936
static void
8937
do_iwmmxt_wmerge (void)
8938
{
8939
  inst.instruction |= inst.operands[0].reg << 12;
8940
  inst.instruction |= inst.operands[1].reg << 16;
8941
  inst.instruction |= inst.operands[2].reg;
8942
  inst.instruction |= inst.operands[3].imm << 21;
8943
}
8944
 
8945
static void
8946
do_iwmmxt_wmov (void)
8947
{
8948
  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
8949
  inst.instruction |= inst.operands[0].reg << 12;
8950
  inst.instruction |= inst.operands[1].reg << 16;
8951
  inst.instruction |= inst.operands[1].reg;
8952
}
8953
 
8954
static void
8955
do_iwmmxt_wldstbh (void)
8956
{
8957
  int reloc;
8958
  inst.instruction |= inst.operands[0].reg << 12;
8959
  if (thumb_mode)
8960
    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8961
  else
8962
    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8963
  encode_arm_cp_address (1, TRUE, FALSE, reloc);
8964
}
8965
 
8966
static void
8967
do_iwmmxt_wldstw (void)
8968
{
8969
  /* RIWR_RIWC clears .isreg for a control register.  */
8970
  if (!inst.operands[0].isreg)
8971
    {
8972
      constraint (inst.cond != COND_ALWAYS, BAD_COND);
8973
      inst.instruction |= 0xf0000000;
8974
    }
8975
 
8976
  inst.instruction |= inst.operands[0].reg << 12;
8977
  encode_arm_cp_address (1, TRUE, TRUE, 0);
8978
}
8979
 
8980
static void
8981
do_iwmmxt_wldstd (void)
8982
{
8983
  inst.instruction |= inst.operands[0].reg << 12;
8984
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8985
      && inst.operands[1].immisreg)
8986
    {
8987
      inst.instruction &= ~0x1a000ff;
8988
      inst.instruction |= (0xf << 28);
8989
      if (inst.operands[1].preind)
8990
        inst.instruction |= PRE_INDEX;
8991
      if (!inst.operands[1].negative)
8992
        inst.instruction |= INDEX_UP;
8993
      if (inst.operands[1].writeback)
8994
        inst.instruction |= WRITE_BACK;
8995
      inst.instruction |= inst.operands[1].reg << 16;
8996
      inst.instruction |= inst.reloc.exp.X_add_number << 4;
8997
      inst.instruction |= inst.operands[1].imm;
8998
    }
8999
  else
9000
    encode_arm_cp_address (1, TRUE, FALSE, 0);
9001
}
9002
 
9003
static void
9004
do_iwmmxt_wshufh (void)
9005
{
9006
  inst.instruction |= inst.operands[0].reg << 12;
9007
  inst.instruction |= inst.operands[1].reg << 16;
9008
  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9009
  inst.instruction |= (inst.operands[2].imm & 0x0f);
9010
}
9011
 
9012
static void
9013
do_iwmmxt_wzero (void)
9014
{
9015
  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
9016
  inst.instruction |= inst.operands[0].reg;
9017
  inst.instruction |= inst.operands[0].reg << 12;
9018
  inst.instruction |= inst.operands[0].reg << 16;
9019
}
9020
 
9021
static void
9022
do_iwmmxt_wrwrwr_or_imm5 (void)
9023
{
9024
  if (inst.operands[2].isreg)
9025
    do_rd_rn_rm ();
9026
  else {
9027
    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9028
                _("immediate operand requires iWMMXt2"));
9029
    do_rd_rn ();
9030
    if (inst.operands[2].imm == 0)
9031
      {
9032
        switch ((inst.instruction >> 20) & 0xf)
9033
          {
9034
          case 4:
9035
          case 5:
9036
          case 6:
9037
          case 7:
9038
            /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
9039
            inst.operands[2].imm = 16;
9040
            inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9041
            break;
9042
          case 8:
9043
          case 9:
9044
          case 10:
9045
          case 11:
9046
            /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
9047
            inst.operands[2].imm = 32;
9048
            inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9049
            break;
9050
          case 12:
9051
          case 13:
9052
          case 14:
9053
          case 15:
9054
            {
9055
              /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
9056
              unsigned long wrn;
9057
              wrn = (inst.instruction >> 16) & 0xf;
9058
              inst.instruction &= 0xff0fff0f;
9059
              inst.instruction |= wrn;
9060
              /* Bail out here; the instruction is now assembled.  */
9061
              return;
9062
            }
9063
          }
9064
      }
9065
    /* Map 32 -> 0, etc.  */
9066
    inst.operands[2].imm &= 0x1f;
9067
    inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9068
  }
9069
}
9070
 
9071
/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
9072
   operations first, then control, shift, and load/store.  */
9073
 
9074
/* Insns like "foo X,Y,Z".  */
9075
 
9076
static void
9077
do_mav_triple (void)
9078
{
9079
  inst.instruction |= inst.operands[0].reg << 16;
9080
  inst.instruction |= inst.operands[1].reg;
9081
  inst.instruction |= inst.operands[2].reg << 12;
9082
}
9083
 
9084
/* Insns like "foo W,X,Y,Z".
9085
    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
9086
 
9087
static void
9088
do_mav_quad (void)
9089
{
9090
  inst.instruction |= inst.operands[0].reg << 5;
9091
  inst.instruction |= inst.operands[1].reg << 12;
9092
  inst.instruction |= inst.operands[2].reg << 16;
9093
  inst.instruction |= inst.operands[3].reg;
9094
}
9095
 
9096
/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
9097
static void
9098
do_mav_dspsc (void)
9099
{
9100
  inst.instruction |= inst.operands[1].reg << 12;
9101
}
9102
 
9103
/* Maverick shift immediate instructions.
9104
   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9105
   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
9106
 
9107
static void
9108
do_mav_shift (void)
9109
{
9110
  int imm = inst.operands[2].imm;
9111
 
9112
  inst.instruction |= inst.operands[0].reg << 12;
9113
  inst.instruction |= inst.operands[1].reg << 16;
9114
 
9115
  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9116
     Bits 5-7 of the insn should have bits 4-6 of the immediate.
9117
     Bit 4 should be 0.  */
9118
  imm = (imm & 0xf) | ((imm & 0x70) << 1);
9119
 
9120
  inst.instruction |= imm;
9121
}
9122
 
9123
/* XScale instructions.  Also sorted arithmetic before move.  */
9124
 
9125
/* Xscale multiply-accumulate (argument parse)
9126
     MIAcc   acc0,Rm,Rs
9127
     MIAPHcc acc0,Rm,Rs
9128
     MIAxycc acc0,Rm,Rs.  */
9129
 
9130
static void
9131
do_xsc_mia (void)
9132
{
9133
  inst.instruction |= inst.operands[1].reg;
9134
  inst.instruction |= inst.operands[2].reg << 12;
9135
}
9136
 
9137
/* Xscale move-accumulator-register (argument parse)
9138
 
9139
     MARcc   acc0,RdLo,RdHi.  */
9140
 
9141
static void
9142
do_xsc_mar (void)
9143
{
9144
  inst.instruction |= inst.operands[1].reg << 12;
9145
  inst.instruction |= inst.operands[2].reg << 16;
9146
}
9147
 
9148
/* Xscale move-register-accumulator (argument parse)
9149
 
9150
     MRAcc   RdLo,RdHi,acc0.  */
9151
 
9152
static void
9153
do_xsc_mra (void)
9154
{
9155
  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9156
  inst.instruction |= inst.operands[0].reg << 12;
9157
  inst.instruction |= inst.operands[1].reg << 16;
9158
}
9159
 
9160
/* Encoding functions relevant only to Thumb.  */
9161
 
9162
/* inst.operands[i] is a shifted-register operand; encode
9163
   it into inst.instruction in the format used by Thumb32.  */
9164
 
9165
static void
9166
encode_thumb32_shifted_operand (int i)
9167
{
9168
  unsigned int value = inst.reloc.exp.X_add_number;
9169
  unsigned int shift = inst.operands[i].shift_kind;
9170
 
9171
  constraint (inst.operands[i].immisreg,
9172
              _("shift by register not allowed in thumb mode"));
9173
  inst.instruction |= inst.operands[i].reg;
9174
  if (shift == SHIFT_RRX)
9175
    inst.instruction |= SHIFT_ROR << 4;
9176
  else
9177
    {
9178
      constraint (inst.reloc.exp.X_op != O_constant,
9179
                  _("expression too complex"));
9180
 
9181
      constraint (value > 32
9182
                  || (value == 32 && (shift == SHIFT_LSL
9183
                                      || shift == SHIFT_ROR)),
9184
                  _("shift expression is too large"));
9185
 
9186
      if (value == 0)
9187
        shift = SHIFT_LSL;
9188
      else if (value == 32)
9189
        value = 0;
9190
 
9191
      inst.instruction |= shift << 4;
9192
      inst.instruction |= (value & 0x1c) << 10;
9193
      inst.instruction |= (value & 0x03) << 6;
9194
    }
9195
}
9196
 
9197
 
9198
/* inst.operands[i] was set up by parse_address.  Encode it into a
9199
   Thumb32 format load or store instruction.  Reject forms that cannot
9200
   be used with such instructions.  If is_t is true, reject forms that
9201
   cannot be used with a T instruction; if is_d is true, reject forms
9202
   that cannot be used with a D instruction.  If it is a store insn,
9203
   reject PC in Rn.  */
9204
 
9205
static void
9206
encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9207
{
9208
  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9209
 
9210
  constraint (!inst.operands[i].isreg,
9211
              _("Instruction does not support =N addresses"));
9212
 
9213
  inst.instruction |= inst.operands[i].reg << 16;
9214
  if (inst.operands[i].immisreg)
9215
    {
9216
      constraint (is_pc, BAD_PC_ADDRESSING);
9217
      constraint (is_t || is_d, _("cannot use register index with this instruction"));
9218
      constraint (inst.operands[i].negative,
9219
                  _("Thumb does not support negative register indexing"));
9220
      constraint (inst.operands[i].postind,
9221
                  _("Thumb does not support register post-indexing"));
9222
      constraint (inst.operands[i].writeback,
9223
                  _("Thumb does not support register indexing with writeback"));
9224
      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9225
                  _("Thumb supports only LSL in shifted register indexing"));
9226
 
9227
      inst.instruction |= inst.operands[i].imm;
9228
      if (inst.operands[i].shifted)
9229
        {
9230
          constraint (inst.reloc.exp.X_op != O_constant,
9231
                      _("expression too complex"));
9232
          constraint (inst.reloc.exp.X_add_number < 0
9233
                      || inst.reloc.exp.X_add_number > 3,
9234
                      _("shift out of range"));
9235
          inst.instruction |= inst.reloc.exp.X_add_number << 4;
9236
        }
9237
      inst.reloc.type = BFD_RELOC_UNUSED;
9238
    }
9239
  else if (inst.operands[i].preind)
9240
    {
9241
      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9242
      constraint (is_t && inst.operands[i].writeback,
9243
                  _("cannot use writeback with this instruction"));
9244
      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9245
                  && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9246
 
9247
      if (is_d)
9248
        {
9249
          inst.instruction |= 0x01000000;
9250
          if (inst.operands[i].writeback)
9251
            inst.instruction |= 0x00200000;
9252
        }
9253
      else
9254
        {
9255
          inst.instruction |= 0x00000c00;
9256
          if (inst.operands[i].writeback)
9257
            inst.instruction |= 0x00000100;
9258
        }
9259
      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9260
    }
9261
  else if (inst.operands[i].postind)
9262
    {
9263
      gas_assert (inst.operands[i].writeback);
9264
      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9265
      constraint (is_t, _("cannot use post-indexing with this instruction"));
9266
 
9267
      if (is_d)
9268
        inst.instruction |= 0x00200000;
9269
      else
9270
        inst.instruction |= 0x00000900;
9271
      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9272
    }
9273
  else /* unindexed - only for coprocessor */
9274
    inst.error = _("instruction does not accept unindexed addressing");
9275
}
9276
 
9277
/* Table of Thumb instructions which exist in both 16- and 32-bit
9278
   encodings (the latter only in post-V6T2 cores).  The index is the
9279
   value used in the insns table below.  When there is more than one
9280
   possible 16-bit encoding for the instruction, this table always
9281
   holds variant (1).
9282
   Also contains several pseudo-instructions used during relaxation.  */
9283
#define T16_32_TAB                              \
9284
  X(_adc,   4140, eb400000),                    \
9285
  X(_adcs,  4140, eb500000),                    \
9286
  X(_add,   1c00, eb000000),                    \
9287
  X(_adds,  1c00, eb100000),                    \
9288
  X(_addi,  0000, f1000000),                    \
9289
  X(_addis, 0000, f1100000),                    \
9290
  X(_add_pc,000f, f20f0000),                    \
9291
  X(_add_sp,000d, f10d0000),                    \
9292
  X(_adr,   000f, f20f0000),                    \
9293
  X(_and,   4000, ea000000),                    \
9294
  X(_ands,  4000, ea100000),                    \
9295
  X(_asr,   1000, fa40f000),                    \
9296
  X(_asrs,  1000, fa50f000),                    \
9297
  X(_b,     e000, f000b000),                    \
9298
  X(_bcond, d000, f0008000),                    \
9299
  X(_bic,   4380, ea200000),                    \
9300
  X(_bics,  4380, ea300000),                    \
9301
  X(_cmn,   42c0, eb100f00),                    \
9302
  X(_cmp,   2800, ebb00f00),                    \
9303
  X(_cpsie, b660, f3af8400),                    \
9304
  X(_cpsid, b670, f3af8600),                    \
9305
  X(_cpy,   4600, ea4f0000),                    \
9306
  X(_dec_sp,80dd, f1ad0d00),                    \
9307
  X(_eor,   4040, ea800000),                    \
9308
  X(_eors,  4040, ea900000),                    \
9309
  X(_inc_sp,00dd, f10d0d00),                    \
9310
  X(_ldmia, c800, e8900000),                    \
9311
  X(_ldr,   6800, f8500000),                    \
9312
  X(_ldrb,  7800, f8100000),                    \
9313
  X(_ldrh,  8800, f8300000),                    \
9314
  X(_ldrsb, 5600, f9100000),                    \
9315
  X(_ldrsh, 5e00, f9300000),                    \
9316
  X(_ldr_pc,4800, f85f0000),                    \
9317
  X(_ldr_pc2,4800, f85f0000),                   \
9318
  X(_ldr_sp,9800, f85d0000),                    \
9319
  X(_lsl,   0000, fa00f000),                    \
9320
  X(_lsls,  0000, fa10f000),                    \
9321
  X(_lsr,   0800, fa20f000),                    \
9322
  X(_lsrs,  0800, fa30f000),                    \
9323
  X(_mov,   2000, ea4f0000),                    \
9324
  X(_movs,  2000, ea5f0000),                    \
9325
  X(_mul,   4340, fb00f000),                     \
9326
  X(_muls,  4340, ffffffff), /* no 32b muls */  \
9327
  X(_mvn,   43c0, ea6f0000),                    \
9328
  X(_mvns,  43c0, ea7f0000),                    \
9329
  X(_neg,   4240, f1c00000), /* rsb #0 */       \
9330
  X(_negs,  4240, f1d00000), /* rsbs #0 */      \
9331
  X(_orr,   4300, ea400000),                    \
9332
  X(_orrs,  4300, ea500000),                    \
9333
  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */        \
9334
  X(_push,  b400, e92d0000), /* stmdb sp!,... */        \
9335
  X(_rev,   ba00, fa90f080),                    \
9336
  X(_rev16, ba40, fa90f090),                    \
9337
  X(_revsh, bac0, fa90f0b0),                    \
9338
  X(_ror,   41c0, fa60f000),                    \
9339
  X(_rors,  41c0, fa70f000),                    \
9340
  X(_sbc,   4180, eb600000),                    \
9341
  X(_sbcs,  4180, eb700000),                    \
9342
  X(_stmia, c000, e8800000),                    \
9343
  X(_str,   6000, f8400000),                    \
9344
  X(_strb,  7000, f8000000),                    \
9345
  X(_strh,  8000, f8200000),                    \
9346
  X(_str_sp,9000, f84d0000),                    \
9347
  X(_sub,   1e00, eba00000),                    \
9348
  X(_subs,  1e00, ebb00000),                    \
9349
  X(_subi,  8000, f1a00000),                    \
9350
  X(_subis, 8000, f1b00000),                    \
9351
  X(_sxtb,  b240, fa4ff080),                    \
9352
  X(_sxth,  b200, fa0ff080),                    \
9353
  X(_tst,   4200, ea100f00),                    \
9354
  X(_uxtb,  b2c0, fa5ff080),                    \
9355
  X(_uxth,  b280, fa1ff080),                    \
9356
  X(_nop,   bf00, f3af8000),                    \
9357
  X(_yield, bf10, f3af8001),                    \
9358
  X(_wfe,   bf20, f3af8002),                    \
9359
  X(_wfi,   bf30, f3af8003),                    \
9360
  X(_sev,   bf40, f3af8004),
9361
 
9362
/* To catch errors in encoding functions, the codes are all offset by
9363
   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9364
   as 16-bit instructions.  */
9365
#define X(a,b,c) T_MNEM##a
9366
enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9367
#undef X
9368
 
9369
#define X(a,b,c) 0x##b
9370
static const unsigned short thumb_op16[] = { T16_32_TAB };
9371
#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9372
#undef X
9373
 
9374
#define X(a,b,c) 0x##c
9375
static const unsigned int thumb_op32[] = { T16_32_TAB };
9376
#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9377
#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
9378
#undef X
9379
#undef T16_32_TAB
9380
 
9381
/* Thumb instruction encoders, in alphabetical order.  */
9382
 
9383
/* ADDW or SUBW.  */
9384
 
9385
static void
9386
do_t_add_sub_w (void)
9387
{
9388
  int Rd, Rn;
9389
 
9390
  Rd = inst.operands[0].reg;
9391
  Rn = inst.operands[1].reg;
9392
 
9393
  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9394
     is the SP-{plus,minus}-immediate form of the instruction.  */
9395
  if (Rn == REG_SP)
9396
    constraint (Rd == REG_PC, BAD_PC);
9397
  else
9398
    reject_bad_reg (Rd);
9399
 
9400
  inst.instruction |= (Rn << 16) | (Rd << 8);
9401
  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9402
}
9403
 
9404
/* Parse an add or subtract instruction.  We get here with inst.instruction
9405
   equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
9406
 
9407
static void
9408
do_t_add_sub (void)
9409
{
9410
  int Rd, Rs, Rn;
9411
 
9412
  Rd = inst.operands[0].reg;
9413
  Rs = (inst.operands[1].present
9414
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9415
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9416
 
9417
  if (Rd == REG_PC)
9418
    set_it_insn_type_last ();
9419
 
9420
  if (unified_syntax)
9421
    {
9422
      bfd_boolean flags;
9423
      bfd_boolean narrow;
9424
      int opcode;
9425
 
9426
      flags = (inst.instruction == T_MNEM_adds
9427
               || inst.instruction == T_MNEM_subs);
9428
      if (flags)
9429
        narrow = !in_it_block ();
9430
      else
9431
        narrow = in_it_block ();
9432
      if (!inst.operands[2].isreg)
9433
        {
9434
          int add;
9435
 
9436
          constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9437
 
9438
          add = (inst.instruction == T_MNEM_add
9439
                 || inst.instruction == T_MNEM_adds);
9440
          opcode = 0;
9441
          if (inst.size_req != 4)
9442
            {
9443
              /* Attempt to use a narrow opcode, with relaxation if
9444
                 appropriate.  */
9445
              if (Rd == REG_SP && Rs == REG_SP && !flags)
9446
                opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9447
              else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9448
                opcode = T_MNEM_add_sp;
9449
              else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9450
                opcode = T_MNEM_add_pc;
9451
              else if (Rd <= 7 && Rs <= 7 && narrow)
9452
                {
9453
                  if (flags)
9454
                    opcode = add ? T_MNEM_addis : T_MNEM_subis;
9455
                  else
9456
                    opcode = add ? T_MNEM_addi : T_MNEM_subi;
9457
                }
9458
              if (opcode)
9459
                {
9460
                  inst.instruction = THUMB_OP16(opcode);
9461
                  inst.instruction |= (Rd << 4) | Rs;
9462
                  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9463
                  if (inst.size_req != 2)
9464
                    inst.relax = opcode;
9465
                }
9466
              else
9467
                constraint (inst.size_req == 2, BAD_HIREG);
9468
            }
9469
          if (inst.size_req == 4
9470
              || (inst.size_req != 2 && !opcode))
9471
            {
9472
              if (Rd == REG_PC)
9473
                {
9474
                  constraint (add, BAD_PC);
9475
                  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9476
                             _("only SUBS PC, LR, #const allowed"));
9477
                  constraint (inst.reloc.exp.X_op != O_constant,
9478
                              _("expression too complex"));
9479
                  constraint (inst.reloc.exp.X_add_number < 0
9480
                              || inst.reloc.exp.X_add_number > 0xff,
9481
                             _("immediate value out of range"));
9482
                  inst.instruction = T2_SUBS_PC_LR
9483
                                     | inst.reloc.exp.X_add_number;
9484
                  inst.reloc.type = BFD_RELOC_UNUSED;
9485
                  return;
9486
                }
9487
              else if (Rs == REG_PC)
9488
                {
9489
                  /* Always use addw/subw.  */
9490
                  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9491
                  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9492
                }
9493
              else
9494
                {
9495
                  inst.instruction = THUMB_OP32 (inst.instruction);
9496
                  inst.instruction = (inst.instruction & 0xe1ffffff)
9497
                                     | 0x10000000;
9498
                  if (flags)
9499
                    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9500
                  else
9501
                    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9502
                }
9503
              inst.instruction |= Rd << 8;
9504
              inst.instruction |= Rs << 16;
9505
            }
9506
        }
9507
      else
9508
        {
9509 160 khays
          unsigned int value = inst.reloc.exp.X_add_number;
9510
          unsigned int shift = inst.operands[2].shift_kind;
9511
 
9512 16 khays
          Rn = inst.operands[2].reg;
9513
          /* See if we can do this with a 16-bit instruction.  */
9514
          if (!inst.operands[2].shifted && inst.size_req != 4)
9515
            {
9516
              if (Rd > 7 || Rs > 7 || Rn > 7)
9517
                narrow = FALSE;
9518
 
9519
              if (narrow)
9520
                {
9521
                  inst.instruction = ((inst.instruction == T_MNEM_adds
9522
                                       || inst.instruction == T_MNEM_add)
9523
                                      ? T_OPCODE_ADD_R3
9524
                                      : T_OPCODE_SUB_R3);
9525
                  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9526
                  return;
9527
                }
9528
 
9529
              if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9530
                {
9531
                  /* Thumb-1 cores (except v6-M) require at least one high
9532
                     register in a narrow non flag setting add.  */
9533
                  if (Rd > 7 || Rn > 7
9534
                      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9535
                      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9536
                    {
9537
                      if (Rd == Rn)
9538
                        {
9539
                          Rn = Rs;
9540
                          Rs = Rd;
9541
                        }
9542
                      inst.instruction = T_OPCODE_ADD_HI;
9543
                      inst.instruction |= (Rd & 8) << 4;
9544
                      inst.instruction |= (Rd & 7);
9545
                      inst.instruction |= Rn << 3;
9546
                      return;
9547
                    }
9548
                }
9549
            }
9550
 
9551
          constraint (Rd == REG_PC, BAD_PC);
9552
          constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9553
          constraint (Rs == REG_PC, BAD_PC);
9554
          reject_bad_reg (Rn);
9555
 
9556
          /* If we get here, it can't be done in 16 bits.  */
9557
          constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9558
                      _("shift must be constant"));
9559
          inst.instruction = THUMB_OP32 (inst.instruction);
9560
          inst.instruction |= Rd << 8;
9561
          inst.instruction |= Rs << 16;
9562 160 khays
          constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9563
                      _("shift value over 3 not allowed in thumb mode"));
9564
          constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9565
                      _("only LSL shift allowed in thumb mode"));
9566 16 khays
          encode_thumb32_shifted_operand (2);
9567
        }
9568
    }
9569
  else
9570
    {
9571
      constraint (inst.instruction == T_MNEM_adds
9572
                  || inst.instruction == T_MNEM_subs,
9573
                  BAD_THUMB32);
9574
 
9575
      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9576
        {
9577
          constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9578
                      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9579
                      BAD_HIREG);
9580
 
9581
          inst.instruction = (inst.instruction == T_MNEM_add
9582
                              ? 0x0000 : 0x8000);
9583
          inst.instruction |= (Rd << 4) | Rs;
9584
          inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9585
          return;
9586
        }
9587
 
9588
      Rn = inst.operands[2].reg;
9589
      constraint (inst.operands[2].shifted, _("unshifted register required"));
9590
 
9591
      /* We now have Rd, Rs, and Rn set to registers.  */
9592
      if (Rd > 7 || Rs > 7 || Rn > 7)
9593
        {
9594
          /* Can't do this for SUB.      */
9595
          constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9596
          inst.instruction = T_OPCODE_ADD_HI;
9597
          inst.instruction |= (Rd & 8) << 4;
9598
          inst.instruction |= (Rd & 7);
9599
          if (Rs == Rd)
9600
            inst.instruction |= Rn << 3;
9601
          else if (Rn == Rd)
9602
            inst.instruction |= Rs << 3;
9603
          else
9604
            constraint (1, _("dest must overlap one source register"));
9605
        }
9606
      else
9607
        {
9608
          inst.instruction = (inst.instruction == T_MNEM_add
9609
                              ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9610
          inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9611
        }
9612
    }
9613
}
9614
 
9615
static void
9616
do_t_adr (void)
9617
{
9618
  unsigned Rd;
9619
 
9620
  Rd = inst.operands[0].reg;
9621
  reject_bad_reg (Rd);
9622
 
9623
  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9624
    {
9625
      /* Defer to section relaxation.  */
9626
      inst.relax = inst.instruction;
9627
      inst.instruction = THUMB_OP16 (inst.instruction);
9628
      inst.instruction |= Rd << 4;
9629
    }
9630
  else if (unified_syntax && inst.size_req != 2)
9631
    {
9632
      /* Generate a 32-bit opcode.  */
9633
      inst.instruction = THUMB_OP32 (inst.instruction);
9634
      inst.instruction |= Rd << 8;
9635
      inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9636
      inst.reloc.pc_rel = 1;
9637
    }
9638
  else
9639
    {
9640
      /* Generate a 16-bit opcode.  */
9641
      inst.instruction = THUMB_OP16 (inst.instruction);
9642
      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9643
      inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
9644
      inst.reloc.pc_rel = 1;
9645
 
9646
      inst.instruction |= Rd << 4;
9647
    }
9648
}
9649
 
9650
/* Arithmetic instructions for which there is just one 16-bit
9651
   instruction encoding, and it allows only two low registers.
9652
   For maximal compatibility with ARM syntax, we allow three register
9653
   operands even when Thumb-32 instructions are not available, as long
9654
   as the first two are identical.  For instance, both "sbc r0,r1" and
9655
   "sbc r0,r0,r1" are allowed.  */
9656
static void
9657
do_t_arit3 (void)
9658
{
9659
  int Rd, Rs, Rn;
9660
 
9661
  Rd = inst.operands[0].reg;
9662
  Rs = (inst.operands[1].present
9663
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9664
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9665
  Rn = inst.operands[2].reg;
9666
 
9667
  reject_bad_reg (Rd);
9668
  reject_bad_reg (Rs);
9669
  if (inst.operands[2].isreg)
9670
    reject_bad_reg (Rn);
9671
 
9672
  if (unified_syntax)
9673
    {
9674
      if (!inst.operands[2].isreg)
9675
        {
9676
          /* For an immediate, we always generate a 32-bit opcode;
9677
             section relaxation will shrink it later if possible.  */
9678
          inst.instruction = THUMB_OP32 (inst.instruction);
9679
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9680
          inst.instruction |= Rd << 8;
9681
          inst.instruction |= Rs << 16;
9682
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9683
        }
9684
      else
9685
        {
9686
          bfd_boolean narrow;
9687
 
9688
          /* See if we can do this with a 16-bit instruction.  */
9689
          if (THUMB_SETS_FLAGS (inst.instruction))
9690
            narrow = !in_it_block ();
9691
          else
9692
            narrow = in_it_block ();
9693
 
9694
          if (Rd > 7 || Rn > 7 || Rs > 7)
9695
            narrow = FALSE;
9696
          if (inst.operands[2].shifted)
9697
            narrow = FALSE;
9698
          if (inst.size_req == 4)
9699
            narrow = FALSE;
9700
 
9701
          if (narrow
9702
              && Rd == Rs)
9703
            {
9704
              inst.instruction = THUMB_OP16 (inst.instruction);
9705
              inst.instruction |= Rd;
9706
              inst.instruction |= Rn << 3;
9707
              return;
9708
            }
9709
 
9710
          /* If we get here, it can't be done in 16 bits.  */
9711
          constraint (inst.operands[2].shifted
9712
                      && inst.operands[2].immisreg,
9713
                      _("shift must be constant"));
9714
          inst.instruction = THUMB_OP32 (inst.instruction);
9715
          inst.instruction |= Rd << 8;
9716
          inst.instruction |= Rs << 16;
9717
          encode_thumb32_shifted_operand (2);
9718
        }
9719
    }
9720
  else
9721
    {
9722
      /* On its face this is a lie - the instruction does set the
9723
         flags.  However, the only supported mnemonic in this mode
9724
         says it doesn't.  */
9725
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9726
 
9727
      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9728
                  _("unshifted register required"));
9729
      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9730
      constraint (Rd != Rs,
9731
                  _("dest and source1 must be the same register"));
9732
 
9733
      inst.instruction = THUMB_OP16 (inst.instruction);
9734
      inst.instruction |= Rd;
9735
      inst.instruction |= Rn << 3;
9736
    }
9737
}
9738
 
9739
/* Similarly, but for instructions where the arithmetic operation is
9740
   commutative, so we can allow either of them to be different from
9741
   the destination operand in a 16-bit instruction.  For instance, all
9742
   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9743
   accepted.  */
9744
static void
9745
do_t_arit3c (void)
9746
{
9747
  int Rd, Rs, Rn;
9748
 
9749
  Rd = inst.operands[0].reg;
9750
  Rs = (inst.operands[1].present
9751
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9752
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9753
  Rn = inst.operands[2].reg;
9754
 
9755
  reject_bad_reg (Rd);
9756
  reject_bad_reg (Rs);
9757
  if (inst.operands[2].isreg)
9758
    reject_bad_reg (Rn);
9759
 
9760
  if (unified_syntax)
9761
    {
9762
      if (!inst.operands[2].isreg)
9763
        {
9764
          /* For an immediate, we always generate a 32-bit opcode;
9765
             section relaxation will shrink it later if possible.  */
9766
          inst.instruction = THUMB_OP32 (inst.instruction);
9767
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9768
          inst.instruction |= Rd << 8;
9769
          inst.instruction |= Rs << 16;
9770
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9771
        }
9772
      else
9773
        {
9774
          bfd_boolean narrow;
9775
 
9776
          /* See if we can do this with a 16-bit instruction.  */
9777
          if (THUMB_SETS_FLAGS (inst.instruction))
9778
            narrow = !in_it_block ();
9779
          else
9780
            narrow = in_it_block ();
9781
 
9782
          if (Rd > 7 || Rn > 7 || Rs > 7)
9783
            narrow = FALSE;
9784
          if (inst.operands[2].shifted)
9785
            narrow = FALSE;
9786
          if (inst.size_req == 4)
9787
            narrow = FALSE;
9788
 
9789
          if (narrow)
9790
            {
9791
              if (Rd == Rs)
9792
                {
9793
                  inst.instruction = THUMB_OP16 (inst.instruction);
9794
                  inst.instruction |= Rd;
9795
                  inst.instruction |= Rn << 3;
9796
                  return;
9797
                }
9798
              if (Rd == Rn)
9799
                {
9800
                  inst.instruction = THUMB_OP16 (inst.instruction);
9801
                  inst.instruction |= Rd;
9802
                  inst.instruction |= Rs << 3;
9803
                  return;
9804
                }
9805
            }
9806
 
9807
          /* If we get here, it can't be done in 16 bits.  */
9808
          constraint (inst.operands[2].shifted
9809
                      && inst.operands[2].immisreg,
9810
                      _("shift must be constant"));
9811
          inst.instruction = THUMB_OP32 (inst.instruction);
9812
          inst.instruction |= Rd << 8;
9813
          inst.instruction |= Rs << 16;
9814
          encode_thumb32_shifted_operand (2);
9815
        }
9816
    }
9817
  else
9818
    {
9819
      /* On its face this is a lie - the instruction does set the
9820
         flags.  However, the only supported mnemonic in this mode
9821
         says it doesn't.  */
9822
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9823
 
9824
      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9825
                  _("unshifted register required"));
9826
      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9827
 
9828
      inst.instruction = THUMB_OP16 (inst.instruction);
9829
      inst.instruction |= Rd;
9830
 
9831
      if (Rd == Rs)
9832
        inst.instruction |= Rn << 3;
9833
      else if (Rd == Rn)
9834
        inst.instruction |= Rs << 3;
9835
      else
9836
        constraint (1, _("dest must overlap one source register"));
9837
    }
9838
}
9839
 
9840
static void
9841
do_t_barrier (void)
9842
{
9843
  if (inst.operands[0].present)
9844
    {
9845
      constraint ((inst.instruction & 0xf0) != 0x40
9846
                  && inst.operands[0].imm > 0xf
9847
                  && inst.operands[0].imm < 0x0,
9848
                  _("bad barrier type"));
9849
      inst.instruction |= inst.operands[0].imm;
9850
    }
9851
  else
9852
    inst.instruction |= 0xf;
9853
}
9854
 
9855
static void
9856
do_t_bfc (void)
9857
{
9858
  unsigned Rd;
9859
  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9860
  constraint (msb > 32, _("bit-field extends past end of register"));
9861
  /* The instruction encoding stores the LSB and MSB,
9862
     not the LSB and width.  */
9863
  Rd = inst.operands[0].reg;
9864
  reject_bad_reg (Rd);
9865
  inst.instruction |= Rd << 8;
9866
  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9867
  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9868
  inst.instruction |= msb - 1;
9869
}
9870
 
9871
static void
9872
do_t_bfi (void)
9873
{
9874
  int Rd, Rn;
9875
  unsigned int msb;
9876
 
9877
  Rd = inst.operands[0].reg;
9878
  reject_bad_reg (Rd);
9879
 
9880
  /* #0 in second position is alternative syntax for bfc, which is
9881
     the same instruction but with REG_PC in the Rm field.  */
9882
  if (!inst.operands[1].isreg)
9883
    Rn = REG_PC;
9884
  else
9885
    {
9886
      Rn = inst.operands[1].reg;
9887
      reject_bad_reg (Rn);
9888
    }
9889
 
9890
  msb = inst.operands[2].imm + inst.operands[3].imm;
9891
  constraint (msb > 32, _("bit-field extends past end of register"));
9892
  /* The instruction encoding stores the LSB and MSB,
9893
     not the LSB and width.  */
9894
  inst.instruction |= Rd << 8;
9895
  inst.instruction |= Rn << 16;
9896
  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9897
  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9898
  inst.instruction |= msb - 1;
9899
}
9900
 
9901
static void
9902
do_t_bfx (void)
9903
{
9904
  unsigned Rd, Rn;
9905
 
9906
  Rd = inst.operands[0].reg;
9907
  Rn = inst.operands[1].reg;
9908
 
9909
  reject_bad_reg (Rd);
9910
  reject_bad_reg (Rn);
9911
 
9912
  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9913
              _("bit-field extends past end of register"));
9914
  inst.instruction |= Rd << 8;
9915
  inst.instruction |= Rn << 16;
9916
  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9917
  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9918
  inst.instruction |= inst.operands[3].imm - 1;
9919
}
9920
 
9921
/* ARM V5 Thumb BLX (argument parse)
9922
        BLX <target_addr>       which is BLX(1)
9923
        BLX <Rm>                which is BLX(2)
9924
   Unfortunately, there are two different opcodes for this mnemonic.
9925
   So, the insns[].value is not used, and the code here zaps values
9926
        into inst.instruction.
9927
 
9928
   ??? How to take advantage of the additional two bits of displacement
9929
   available in Thumb32 mode?  Need new relocation?  */
9930
 
9931
static void
9932
do_t_blx (void)
9933
{
9934
  set_it_insn_type_last ();
9935
 
9936
  if (inst.operands[0].isreg)
9937
    {
9938
      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9939
      /* We have a register, so this is BLX(2).  */
9940
      inst.instruction |= inst.operands[0].reg << 3;
9941
    }
9942
  else
9943
    {
9944
      /* No register.  This must be BLX(1).  */
9945
      inst.instruction = 0xf000e800;
9946
      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
9947
    }
9948
}
9949
 
9950
static void
9951
do_t_branch (void)
9952
{
9953
  int opcode;
9954
  int cond;
9955
  int reloc;
9956
 
9957
  cond = inst.cond;
9958
  set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9959
 
9960
  if (in_it_block ())
9961
    {
9962
      /* Conditional branches inside IT blocks are encoded as unconditional
9963
         branches.  */
9964
      cond = COND_ALWAYS;
9965
    }
9966
  else
9967
    cond = inst.cond;
9968
 
9969
  if (cond != COND_ALWAYS)
9970
    opcode = T_MNEM_bcond;
9971
  else
9972
    opcode = inst.instruction;
9973
 
9974
  if (unified_syntax
9975
      && (inst.size_req == 4
9976
          || (inst.size_req != 2
9977
              && (inst.operands[0].hasreloc
9978
                  || inst.reloc.exp.X_op == O_constant))))
9979
    {
9980
      inst.instruction = THUMB_OP32(opcode);
9981
      if (cond == COND_ALWAYS)
9982
        reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
9983
      else
9984
        {
9985
          gas_assert (cond != 0xF);
9986
          inst.instruction |= cond << 22;
9987
          reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
9988
        }
9989
    }
9990
  else
9991
    {
9992
      inst.instruction = THUMB_OP16(opcode);
9993
      if (cond == COND_ALWAYS)
9994
        reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
9995
      else
9996
        {
9997
          inst.instruction |= cond << 8;
9998
          reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
9999
        }
10000
      /* Allow section relaxation.  */
10001
      if (unified_syntax && inst.size_req != 2)
10002
        inst.relax = opcode;
10003
    }
10004
  inst.reloc.type = reloc;
10005
  inst.reloc.pc_rel = 1;
10006
}
10007
 
10008
static void
10009
do_t_bkpt (void)
10010
{
10011
  constraint (inst.cond != COND_ALWAYS,
10012
              _("instruction is always unconditional"));
10013
  if (inst.operands[0].present)
10014
    {
10015
      constraint (inst.operands[0].imm > 255,
10016
                  _("immediate value out of range"));
10017
      inst.instruction |= inst.operands[0].imm;
10018
      set_it_insn_type (NEUTRAL_IT_INSN);
10019
    }
10020
}
10021
 
10022
static void
10023
do_t_branch23 (void)
10024
{
10025
  set_it_insn_type_last ();
10026
  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10027
 
10028
  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10029
     this file.  We used to simply ignore the PLT reloc type here --
10030
     the branch encoding is now needed to deal with TLSCALL relocs.
10031
     So if we see a PLT reloc now, put it back to how it used to be to
10032
     keep the preexisting behaviour.  */
10033
  if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10034
    inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10035
 
10036
#if defined(OBJ_COFF)
10037
  /* If the destination of the branch is a defined symbol which does not have
10038
     the THUMB_FUNC attribute, then we must be calling a function which has
10039
     the (interfacearm) attribute.  We look for the Thumb entry point to that
10040
     function and change the branch to refer to that function instead.  */
10041
  if (   inst.reloc.exp.X_op == O_symbol
10042
      && inst.reloc.exp.X_add_symbol != NULL
10043
      && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10044
      && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10045
    inst.reloc.exp.X_add_symbol =
10046
      find_real_start (inst.reloc.exp.X_add_symbol);
10047
#endif
10048
}
10049
 
10050
static void
10051
do_t_bx (void)
10052
{
10053
  set_it_insn_type_last ();
10054
  inst.instruction |= inst.operands[0].reg << 3;
10055
  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.  The reloc
10056
     should cause the alignment to be checked once it is known.  This is
10057
     because BX PC only works if the instruction is word aligned.  */
10058
}
10059
 
10060
static void
10061
do_t_bxj (void)
10062
{
10063
  int Rm;
10064
 
10065
  set_it_insn_type_last ();
10066
  Rm = inst.operands[0].reg;
10067
  reject_bad_reg (Rm);
10068
  inst.instruction |= Rm << 16;
10069
}
10070
 
10071
static void
10072
do_t_clz (void)
10073
{
10074
  unsigned Rd;
10075
  unsigned Rm;
10076
 
10077
  Rd = inst.operands[0].reg;
10078
  Rm = inst.operands[1].reg;
10079
 
10080
  reject_bad_reg (Rd);
10081
  reject_bad_reg (Rm);
10082
 
10083
  inst.instruction |= Rd << 8;
10084
  inst.instruction |= Rm << 16;
10085
  inst.instruction |= Rm;
10086
}
10087
 
10088
static void
10089
do_t_cps (void)
10090
{
10091
  set_it_insn_type (OUTSIDE_IT_INSN);
10092
  inst.instruction |= inst.operands[0].imm;
10093
}
10094
 
10095
static void
10096
do_t_cpsi (void)
10097
{
10098
  set_it_insn_type (OUTSIDE_IT_INSN);
10099
  if (unified_syntax
10100
      && (inst.operands[1].present || inst.size_req == 4)
10101
      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10102
    {
10103
      unsigned int imod = (inst.instruction & 0x0030) >> 4;
10104
      inst.instruction = 0xf3af8000;
10105
      inst.instruction |= imod << 9;
10106
      inst.instruction |= inst.operands[0].imm << 5;
10107
      if (inst.operands[1].present)
10108
        inst.instruction |= 0x100 | inst.operands[1].imm;
10109
    }
10110
  else
10111
    {
10112
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10113
                  && (inst.operands[0].imm & 4),
10114
                  _("selected processor does not support 'A' form "
10115
                    "of this instruction"));
10116
      constraint (inst.operands[1].present || inst.size_req == 4,
10117
                  _("Thumb does not support the 2-argument "
10118
                    "form of this instruction"));
10119
      inst.instruction |= inst.operands[0].imm;
10120
    }
10121
}
10122
 
10123
/* THUMB CPY instruction (argument parse).  */
10124
 
10125
static void
10126
do_t_cpy (void)
10127
{
10128
  if (inst.size_req == 4)
10129
    {
10130
      inst.instruction = THUMB_OP32 (T_MNEM_mov);
10131
      inst.instruction |= inst.operands[0].reg << 8;
10132
      inst.instruction |= inst.operands[1].reg;
10133
    }
10134
  else
10135
    {
10136
      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10137
      inst.instruction |= (inst.operands[0].reg & 0x7);
10138
      inst.instruction |= inst.operands[1].reg << 3;
10139
    }
10140
}
10141
 
10142
static void
10143
do_t_cbz (void)
10144
{
10145
  set_it_insn_type (OUTSIDE_IT_INSN);
10146
  constraint (inst.operands[0].reg > 7, BAD_HIREG);
10147
  inst.instruction |= inst.operands[0].reg;
10148
  inst.reloc.pc_rel = 1;
10149
  inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10150
}
10151
 
10152
static void
10153
do_t_dbg (void)
10154
{
10155
  inst.instruction |= inst.operands[0].imm;
10156
}
10157
 
10158
static void
10159
do_t_div (void)
10160
{
10161
  unsigned Rd, Rn, Rm;
10162
 
10163
  Rd = inst.operands[0].reg;
10164
  Rn = (inst.operands[1].present
10165
        ? inst.operands[1].reg : Rd);
10166
  Rm = inst.operands[2].reg;
10167
 
10168
  reject_bad_reg (Rd);
10169
  reject_bad_reg (Rn);
10170
  reject_bad_reg (Rm);
10171
 
10172
  inst.instruction |= Rd << 8;
10173
  inst.instruction |= Rn << 16;
10174
  inst.instruction |= Rm;
10175
}
10176
 
10177
static void
10178
do_t_hint (void)
10179
{
10180
  if (unified_syntax && inst.size_req == 4)
10181
    inst.instruction = THUMB_OP32 (inst.instruction);
10182
  else
10183
    inst.instruction = THUMB_OP16 (inst.instruction);
10184
}
10185
 
10186
static void
10187
do_t_it (void)
10188
{
10189
  unsigned int cond = inst.operands[0].imm;
10190
 
10191
  set_it_insn_type (IT_INSN);
10192
  now_it.mask = (inst.instruction & 0xf) | 0x10;
10193
  now_it.cc = cond;
10194
 
10195
  /* If the condition is a negative condition, invert the mask.  */
10196
  if ((cond & 0x1) == 0x0)
10197
    {
10198
      unsigned int mask = inst.instruction & 0x000f;
10199
 
10200
      if ((mask & 0x7) == 0)
10201
        /* no conversion needed */;
10202
      else if ((mask & 0x3) == 0)
10203
        mask ^= 0x8;
10204
      else if ((mask & 0x1) == 0)
10205
        mask ^= 0xC;
10206
      else
10207
        mask ^= 0xE;
10208
 
10209
      inst.instruction &= 0xfff0;
10210
      inst.instruction |= mask;
10211
    }
10212
 
10213
  inst.instruction |= cond << 4;
10214
}
10215
 
10216
/* Helper function used for both push/pop and ldm/stm.  */
10217
static void
10218
encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10219
{
10220
  bfd_boolean load;
10221
 
10222
  load = (inst.instruction & (1 << 20)) != 0;
10223
 
10224
  if (mask & (1 << 13))
10225
    inst.error =  _("SP not allowed in register list");
10226
 
10227
  if ((mask & (1 << base)) != 0
10228
      && writeback)
10229
    inst.error = _("having the base register in the register list when "
10230
                   "using write back is UNPREDICTABLE");
10231
 
10232
  if (load)
10233
    {
10234
      if (mask & (1 << 15))
10235
        {
10236
          if (mask & (1 << 14))
10237
            inst.error = _("LR and PC should not both be in register list");
10238
          else
10239
            set_it_insn_type_last ();
10240
        }
10241
    }
10242
  else
10243
    {
10244
      if (mask & (1 << 15))
10245
        inst.error = _("PC not allowed in register list");
10246
    }
10247
 
10248
  if ((mask & (mask - 1)) == 0)
10249
    {
10250
      /* Single register transfers implemented as str/ldr.  */
10251
      if (writeback)
10252
        {
10253
          if (inst.instruction & (1 << 23))
10254
            inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10255
          else
10256
            inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10257
        }
10258
      else
10259
        {
10260
          if (inst.instruction & (1 << 23))
10261
            inst.instruction = 0x00800000; /* ia -> [base] */
10262
          else
10263
            inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10264
        }
10265
 
10266
      inst.instruction |= 0xf8400000;
10267
      if (load)
10268
        inst.instruction |= 0x00100000;
10269
 
10270
      mask = ffs (mask) - 1;
10271
      mask <<= 12;
10272
    }
10273
  else if (writeback)
10274
    inst.instruction |= WRITE_BACK;
10275
 
10276
  inst.instruction |= mask;
10277
  inst.instruction |= base << 16;
10278
}
10279
 
10280
static void
10281
do_t_ldmstm (void)
10282
{
10283
  /* This really doesn't seem worth it.  */
10284
  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10285
              _("expression too complex"));
10286
  constraint (inst.operands[1].writeback,
10287
              _("Thumb load/store multiple does not support {reglist}^"));
10288
 
10289
  if (unified_syntax)
10290
    {
10291
      bfd_boolean narrow;
10292
      unsigned mask;
10293
 
10294
      narrow = FALSE;
10295
      /* See if we can use a 16-bit instruction.  */
10296
      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10297
          && inst.size_req != 4
10298
          && !(inst.operands[1].imm & ~0xff))
10299
        {
10300
          mask = 1 << inst.operands[0].reg;
10301
 
10302
          if (inst.operands[0].reg <= 7)
10303
            {
10304
              if (inst.instruction == T_MNEM_stmia
10305
                  ? inst.operands[0].writeback
10306
                  : (inst.operands[0].writeback
10307
                     == !(inst.operands[1].imm & mask)))
10308
                {
10309
                  if (inst.instruction == T_MNEM_stmia
10310
                      && (inst.operands[1].imm & mask)
10311
                      && (inst.operands[1].imm & (mask - 1)))
10312
                    as_warn (_("value stored for r%d is UNKNOWN"),
10313
                             inst.operands[0].reg);
10314
 
10315
                  inst.instruction = THUMB_OP16 (inst.instruction);
10316
                  inst.instruction |= inst.operands[0].reg << 8;
10317
                  inst.instruction |= inst.operands[1].imm;
10318
                  narrow = TRUE;
10319
                }
10320
              else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10321
                {
10322
                  /* This means 1 register in reg list one of 3 situations:
10323
                     1. Instruction is stmia, but without writeback.
10324
                     2. lmdia without writeback, but with Rn not in
10325
                        reglist.
10326
                     3. ldmia with writeback, but with Rn in reglist.
10327
                     Case 3 is UNPREDICTABLE behaviour, so we handle
10328
                     case 1 and 2 which can be converted into a 16-bit
10329
                     str or ldr. The SP cases are handled below.  */
10330
                  unsigned long opcode;
10331
                  /* First, record an error for Case 3.  */
10332
                  if (inst.operands[1].imm & mask
10333
                      && inst.operands[0].writeback)
10334
                    inst.error =
10335
                        _("having the base register in the register list when "
10336
                          "using write back is UNPREDICTABLE");
10337
 
10338
                  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10339
                                                             : T_MNEM_ldr);
10340
                  inst.instruction = THUMB_OP16 (opcode);
10341
                  inst.instruction |= inst.operands[0].reg << 3;
10342
                  inst.instruction |= (ffs (inst.operands[1].imm)-1);
10343
                  narrow = TRUE;
10344
                }
10345
            }
10346
          else if (inst.operands[0] .reg == REG_SP)
10347
            {
10348
              if (inst.operands[0].writeback)
10349
                {
10350
                  inst.instruction =
10351
                        THUMB_OP16 (inst.instruction == T_MNEM_stmia
10352
                                    ? T_MNEM_push : T_MNEM_pop);
10353
                  inst.instruction |= inst.operands[1].imm;
10354
                  narrow = TRUE;
10355
                }
10356
              else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10357
                {
10358
                  inst.instruction =
10359
                        THUMB_OP16 (inst.instruction == T_MNEM_stmia
10360
                                    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10361
                  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10362
                  narrow = TRUE;
10363
                }
10364
            }
10365
        }
10366
 
10367
      if (!narrow)
10368
        {
10369
          if (inst.instruction < 0xffff)
10370
            inst.instruction = THUMB_OP32 (inst.instruction);
10371
 
10372
          encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10373
                                inst.operands[0].writeback);
10374
        }
10375
    }
10376
  else
10377
    {
10378
      constraint (inst.operands[0].reg > 7
10379
                  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10380
      constraint (inst.instruction != T_MNEM_ldmia
10381
                  && inst.instruction != T_MNEM_stmia,
10382
                  _("Thumb-2 instruction only valid in unified syntax"));
10383
      if (inst.instruction == T_MNEM_stmia)
10384
        {
10385
          if (!inst.operands[0].writeback)
10386
            as_warn (_("this instruction will write back the base register"));
10387
          if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10388
              && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10389
            as_warn (_("value stored for r%d is UNKNOWN"),
10390
                     inst.operands[0].reg);
10391
        }
10392
      else
10393
        {
10394
          if (!inst.operands[0].writeback
10395
              && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10396
            as_warn (_("this instruction will write back the base register"));
10397
          else if (inst.operands[0].writeback
10398
                   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10399
            as_warn (_("this instruction will not write back the base register"));
10400
        }
10401
 
10402
      inst.instruction = THUMB_OP16 (inst.instruction);
10403
      inst.instruction |= inst.operands[0].reg << 8;
10404
      inst.instruction |= inst.operands[1].imm;
10405
    }
10406
}
10407
 
10408
static void
10409
do_t_ldrex (void)
10410
{
10411
  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10412
              || inst.operands[1].postind || inst.operands[1].writeback
10413
              || inst.operands[1].immisreg || inst.operands[1].shifted
10414
              || inst.operands[1].negative,
10415
              BAD_ADDR_MODE);
10416
 
10417
  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10418
 
10419
  inst.instruction |= inst.operands[0].reg << 12;
10420
  inst.instruction |= inst.operands[1].reg << 16;
10421
  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10422
}
10423
 
10424
static void
10425
do_t_ldrexd (void)
10426
{
10427
  if (!inst.operands[1].present)
10428
    {
10429
      constraint (inst.operands[0].reg == REG_LR,
10430
                  _("r14 not allowed as first register "
10431
                    "when second register is omitted"));
10432
      inst.operands[1].reg = inst.operands[0].reg + 1;
10433
    }
10434
  constraint (inst.operands[0].reg == inst.operands[1].reg,
10435
              BAD_OVERLAP);
10436
 
10437
  inst.instruction |= inst.operands[0].reg << 12;
10438
  inst.instruction |= inst.operands[1].reg << 8;
10439
  inst.instruction |= inst.operands[2].reg << 16;
10440
}
10441
 
10442
static void
10443
do_t_ldst (void)
10444
{
10445
  unsigned long opcode;
10446
  int Rn;
10447
 
10448
  if (inst.operands[0].isreg
10449
      && !inst.operands[0].preind
10450
      && inst.operands[0].reg == REG_PC)
10451
    set_it_insn_type_last ();
10452
 
10453
  opcode = inst.instruction;
10454
  if (unified_syntax)
10455
    {
10456
      if (!inst.operands[1].isreg)
10457
        {
10458
          if (opcode <= 0xffff)
10459
            inst.instruction = THUMB_OP32 (opcode);
10460
          if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10461
            return;
10462
        }
10463
      if (inst.operands[1].isreg
10464
          && !inst.operands[1].writeback
10465
          && !inst.operands[1].shifted && !inst.operands[1].postind
10466
          && !inst.operands[1].negative && inst.operands[0].reg <= 7
10467
          && opcode <= 0xffff
10468
          && inst.size_req != 4)
10469
        {
10470
          /* Insn may have a 16-bit form.  */
10471
          Rn = inst.operands[1].reg;
10472
          if (inst.operands[1].immisreg)
10473
            {
10474
              inst.instruction = THUMB_OP16 (opcode);
10475
              /* [Rn, Rik] */
10476
              if (Rn <= 7 && inst.operands[1].imm <= 7)
10477
                goto op16;
10478
              else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10479
                reject_bad_reg (inst.operands[1].imm);
10480
            }
10481
          else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10482
                    && opcode != T_MNEM_ldrsb)
10483
                   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10484
                   || (Rn == REG_SP && opcode == T_MNEM_str))
10485
            {
10486
              /* [Rn, #const] */
10487
              if (Rn > 7)
10488
                {
10489
                  if (Rn == REG_PC)
10490
                    {
10491
                      if (inst.reloc.pc_rel)
10492
                        opcode = T_MNEM_ldr_pc2;
10493
                      else
10494
                        opcode = T_MNEM_ldr_pc;
10495
                    }
10496
                  else
10497
                    {
10498
                      if (opcode == T_MNEM_ldr)
10499
                        opcode = T_MNEM_ldr_sp;
10500
                      else
10501
                        opcode = T_MNEM_str_sp;
10502
                    }
10503
                  inst.instruction = inst.operands[0].reg << 8;
10504
                }
10505
              else
10506
                {
10507
                  inst.instruction = inst.operands[0].reg;
10508
                  inst.instruction |= inst.operands[1].reg << 3;
10509
                }
10510
              inst.instruction |= THUMB_OP16 (opcode);
10511
              if (inst.size_req == 2)
10512
                inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10513
              else
10514
                inst.relax = opcode;
10515
              return;
10516
            }
10517
        }
10518
      /* Definitely a 32-bit variant.  */
10519
 
10520
      /* Warning for Erratum 752419.  */
10521
      if (opcode == T_MNEM_ldr
10522
          && inst.operands[0].reg == REG_SP
10523
          && inst.operands[1].writeback == 1
10524
          && !inst.operands[1].immisreg)
10525
        {
10526
          if (no_cpu_selected ()
10527
              || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10528
                  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10529
                  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10530
            as_warn (_("This instruction may be unpredictable "
10531
                       "if executed on M-profile cores "
10532
                       "with interrupts enabled."));
10533
        }
10534
 
10535
      /* Do some validations regarding addressing modes.  */
10536
      if (inst.operands[1].immisreg && opcode != T_MNEM_ldr
10537
          && opcode != T_MNEM_str)
10538
        reject_bad_reg (inst.operands[1].imm);
10539
 
10540
      inst.instruction = THUMB_OP32 (opcode);
10541
      inst.instruction |= inst.operands[0].reg << 12;
10542
      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10543
      return;
10544
    }
10545
 
10546
  constraint (inst.operands[0].reg > 7, BAD_HIREG);
10547
 
10548
  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10549
    {
10550
      /* Only [Rn,Rm] is acceptable.  */
10551
      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10552
      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10553
                  || inst.operands[1].postind || inst.operands[1].shifted
10554
                  || inst.operands[1].negative,
10555
                  _("Thumb does not support this addressing mode"));
10556
      inst.instruction = THUMB_OP16 (inst.instruction);
10557
      goto op16;
10558
    }
10559
 
10560
  inst.instruction = THUMB_OP16 (inst.instruction);
10561
  if (!inst.operands[1].isreg)
10562
    if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10563
      return;
10564
 
10565
  constraint (!inst.operands[1].preind
10566
              || inst.operands[1].shifted
10567
              || inst.operands[1].writeback,
10568
              _("Thumb does not support this addressing mode"));
10569
  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10570
    {
10571
      constraint (inst.instruction & 0x0600,
10572
                  _("byte or halfword not valid for base register"));
10573
      constraint (inst.operands[1].reg == REG_PC
10574
                  && !(inst.instruction & THUMB_LOAD_BIT),
10575
                  _("r15 based store not allowed"));
10576
      constraint (inst.operands[1].immisreg,
10577
                  _("invalid base register for register offset"));
10578
 
10579
      if (inst.operands[1].reg == REG_PC)
10580
        inst.instruction = T_OPCODE_LDR_PC;
10581
      else if (inst.instruction & THUMB_LOAD_BIT)
10582
        inst.instruction = T_OPCODE_LDR_SP;
10583
      else
10584
        inst.instruction = T_OPCODE_STR_SP;
10585
 
10586
      inst.instruction |= inst.operands[0].reg << 8;
10587
      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10588
      return;
10589
    }
10590
 
10591
  constraint (inst.operands[1].reg > 7, BAD_HIREG);
10592
  if (!inst.operands[1].immisreg)
10593
    {
10594
      /* Immediate offset.  */
10595
      inst.instruction |= inst.operands[0].reg;
10596
      inst.instruction |= inst.operands[1].reg << 3;
10597
      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10598
      return;
10599
    }
10600
 
10601
  /* Register offset.  */
10602
  constraint (inst.operands[1].imm > 7, BAD_HIREG);
10603
  constraint (inst.operands[1].negative,
10604
              _("Thumb does not support this addressing mode"));
10605
 
10606
 op16:
10607
  switch (inst.instruction)
10608
    {
10609
    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10610
    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10611
    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10612
    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10613
    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10614
    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10615
    case 0x5600 /* ldrsb */:
10616
    case 0x5e00 /* ldrsh */: break;
10617
    default: abort ();
10618
    }
10619
 
10620
  inst.instruction |= inst.operands[0].reg;
10621
  inst.instruction |= inst.operands[1].reg << 3;
10622
  inst.instruction |= inst.operands[1].imm << 6;
10623
}
10624
 
10625
static void
10626
do_t_ldstd (void)
10627
{
10628
  if (!inst.operands[1].present)
10629
    {
10630
      inst.operands[1].reg = inst.operands[0].reg + 1;
10631
      constraint (inst.operands[0].reg == REG_LR,
10632
                  _("r14 not allowed here"));
10633
    }
10634
  inst.instruction |= inst.operands[0].reg << 12;
10635
  inst.instruction |= inst.operands[1].reg << 8;
10636
  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10637
}
10638
 
10639
static void
10640
do_t_ldstt (void)
10641
{
10642
  inst.instruction |= inst.operands[0].reg << 12;
10643
  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10644
}
10645
 
10646
static void
10647
do_t_mla (void)
10648
{
10649
  unsigned Rd, Rn, Rm, Ra;
10650
 
10651
  Rd = inst.operands[0].reg;
10652
  Rn = inst.operands[1].reg;
10653
  Rm = inst.operands[2].reg;
10654
  Ra = inst.operands[3].reg;
10655
 
10656
  reject_bad_reg (Rd);
10657
  reject_bad_reg (Rn);
10658
  reject_bad_reg (Rm);
10659
  reject_bad_reg (Ra);
10660
 
10661
  inst.instruction |= Rd << 8;
10662
  inst.instruction |= Rn << 16;
10663
  inst.instruction |= Rm;
10664
  inst.instruction |= Ra << 12;
10665
}
10666
 
10667
static void
10668
do_t_mlal (void)
10669
{
10670
  unsigned RdLo, RdHi, Rn, Rm;
10671
 
10672
  RdLo = inst.operands[0].reg;
10673
  RdHi = inst.operands[1].reg;
10674
  Rn = inst.operands[2].reg;
10675
  Rm = inst.operands[3].reg;
10676
 
10677
  reject_bad_reg (RdLo);
10678
  reject_bad_reg (RdHi);
10679
  reject_bad_reg (Rn);
10680
  reject_bad_reg (Rm);
10681
 
10682
  inst.instruction |= RdLo << 12;
10683
  inst.instruction |= RdHi << 8;
10684
  inst.instruction |= Rn << 16;
10685
  inst.instruction |= Rm;
10686
}
10687
 
10688
static void
10689
do_t_mov_cmp (void)
10690
{
10691
  unsigned Rn, Rm;
10692
 
10693
  Rn = inst.operands[0].reg;
10694
  Rm = inst.operands[1].reg;
10695
 
10696
  if (Rn == REG_PC)
10697
    set_it_insn_type_last ();
10698
 
10699
  if (unified_syntax)
10700
    {
10701
      int r0off = (inst.instruction == T_MNEM_mov
10702
                   || inst.instruction == T_MNEM_movs) ? 8 : 16;
10703
      unsigned long opcode;
10704
      bfd_boolean narrow;
10705
      bfd_boolean low_regs;
10706
 
10707
      low_regs = (Rn <= 7 && Rm <= 7);
10708
      opcode = inst.instruction;
10709
      if (in_it_block ())
10710
        narrow = opcode != T_MNEM_movs;
10711
      else
10712
        narrow = opcode != T_MNEM_movs || low_regs;
10713
      if (inst.size_req == 4
10714
          || inst.operands[1].shifted)
10715
        narrow = FALSE;
10716
 
10717
      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
10718
      if (opcode == T_MNEM_movs && inst.operands[1].isreg
10719
          && !inst.operands[1].shifted
10720
          && Rn == REG_PC
10721
          && Rm == REG_LR)
10722
        {
10723
          inst.instruction = T2_SUBS_PC_LR;
10724
          return;
10725
        }
10726
 
10727
      if (opcode == T_MNEM_cmp)
10728
        {
10729
          constraint (Rn == REG_PC, BAD_PC);
10730
          if (narrow)
10731
            {
10732
              /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10733
                 but valid.  */
10734
              warn_deprecated_sp (Rm);
10735
              /* R15 was documented as a valid choice for Rm in ARMv6,
10736
                 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
10737
                 tools reject R15, so we do too.  */
10738
              constraint (Rm == REG_PC, BAD_PC);
10739
            }
10740
          else
10741
            reject_bad_reg (Rm);
10742
        }
10743
      else if (opcode == T_MNEM_mov
10744
               || opcode == T_MNEM_movs)
10745
        {
10746
          if (inst.operands[1].isreg)
10747
            {
10748
              if (opcode == T_MNEM_movs)
10749
                {
10750
                  reject_bad_reg (Rn);
10751
                  reject_bad_reg (Rm);
10752
                }
10753
              else if (narrow)
10754
                {
10755
                  /* This is mov.n.  */
10756
                  if ((Rn == REG_SP || Rn == REG_PC)
10757
                      && (Rm == REG_SP || Rm == REG_PC))
10758
                    {
10759
                      as_warn (_("Use of r%u as a source register is "
10760
                                 "deprecated when r%u is the destination "
10761
                                 "register."), Rm, Rn);
10762
                    }
10763
                }
10764
              else
10765
                {
10766
                  /* This is mov.w.  */
10767
                  constraint (Rn == REG_PC, BAD_PC);
10768
                  constraint (Rm == REG_PC, BAD_PC);
10769
                  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
10770
                }
10771
            }
10772
          else
10773
            reject_bad_reg (Rn);
10774
        }
10775
 
10776
      if (!inst.operands[1].isreg)
10777
        {
10778
          /* Immediate operand.  */
10779
          if (!in_it_block () && opcode == T_MNEM_mov)
10780
            narrow = 0;
10781
          if (low_regs && narrow)
10782
            {
10783
              inst.instruction = THUMB_OP16 (opcode);
10784
              inst.instruction |= Rn << 8;
10785
              if (inst.size_req == 2)
10786
                inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10787
              else
10788
                inst.relax = opcode;
10789
            }
10790
          else
10791
            {
10792
              inst.instruction = THUMB_OP32 (inst.instruction);
10793
              inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10794
              inst.instruction |= Rn << r0off;
10795
              inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10796
            }
10797
        }
10798
      else if (inst.operands[1].shifted && inst.operands[1].immisreg
10799
               && (inst.instruction == T_MNEM_mov
10800
                   || inst.instruction == T_MNEM_movs))
10801
        {
10802
          /* Register shifts are encoded as separate shift instructions.  */
10803
          bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10804
 
10805
          if (in_it_block ())
10806
            narrow = !flags;
10807
          else
10808
            narrow = flags;
10809
 
10810
          if (inst.size_req == 4)
10811
            narrow = FALSE;
10812
 
10813
          if (!low_regs || inst.operands[1].imm > 7)
10814
            narrow = FALSE;
10815
 
10816
          if (Rn != Rm)
10817
            narrow = FALSE;
10818
 
10819
          switch (inst.operands[1].shift_kind)
10820
            {
10821
            case SHIFT_LSL:
10822
              opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10823
              break;
10824
            case SHIFT_ASR:
10825
              opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10826
              break;
10827
            case SHIFT_LSR:
10828
              opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10829
              break;
10830
            case SHIFT_ROR:
10831
              opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10832
              break;
10833
            default:
10834
              abort ();
10835
            }
10836
 
10837
          inst.instruction = opcode;
10838
          if (narrow)
10839
            {
10840
              inst.instruction |= Rn;
10841
              inst.instruction |= inst.operands[1].imm << 3;
10842
            }
10843
          else
10844
            {
10845
              if (flags)
10846
                inst.instruction |= CONDS_BIT;
10847
 
10848
              inst.instruction |= Rn << 8;
10849
              inst.instruction |= Rm << 16;
10850
              inst.instruction |= inst.operands[1].imm;
10851
            }
10852
        }
10853
      else if (!narrow)
10854
        {
10855
          /* Some mov with immediate shift have narrow variants.
10856
             Register shifts are handled above.  */
10857
          if (low_regs && inst.operands[1].shifted
10858
              && (inst.instruction == T_MNEM_mov
10859
                  || inst.instruction == T_MNEM_movs))
10860
            {
10861
              if (in_it_block ())
10862
                narrow = (inst.instruction == T_MNEM_mov);
10863
              else
10864
                narrow = (inst.instruction == T_MNEM_movs);
10865
            }
10866
 
10867
          if (narrow)
10868
            {
10869
              switch (inst.operands[1].shift_kind)
10870
                {
10871
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10872
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10873
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10874
                default: narrow = FALSE; break;
10875
                }
10876
            }
10877
 
10878
          if (narrow)
10879
            {
10880
              inst.instruction |= Rn;
10881
              inst.instruction |= Rm << 3;
10882
              inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10883
            }
10884
          else
10885
            {
10886
              inst.instruction = THUMB_OP32 (inst.instruction);
10887
              inst.instruction |= Rn << r0off;
10888
              encode_thumb32_shifted_operand (1);
10889
            }
10890
        }
10891
      else
10892
        switch (inst.instruction)
10893
          {
10894
          case T_MNEM_mov:
10895
            inst.instruction = T_OPCODE_MOV_HR;
10896
            inst.instruction |= (Rn & 0x8) << 4;
10897
            inst.instruction |= (Rn & 0x7);
10898
            inst.instruction |= Rm << 3;
10899
            break;
10900
 
10901
          case T_MNEM_movs:
10902
            /* We know we have low registers at this point.
10903
               Generate LSLS Rd, Rs, #0.  */
10904
            inst.instruction = T_OPCODE_LSL_I;
10905
            inst.instruction |= Rn;
10906
            inst.instruction |= Rm << 3;
10907
            break;
10908
 
10909
          case T_MNEM_cmp:
10910
            if (low_regs)
10911
              {
10912
                inst.instruction = T_OPCODE_CMP_LR;
10913
                inst.instruction |= Rn;
10914
                inst.instruction |= Rm << 3;
10915
              }
10916
            else
10917
              {
10918
                inst.instruction = T_OPCODE_CMP_HR;
10919
                inst.instruction |= (Rn & 0x8) << 4;
10920
                inst.instruction |= (Rn & 0x7);
10921
                inst.instruction |= Rm << 3;
10922
              }
10923
            break;
10924
          }
10925
      return;
10926
    }
10927
 
10928
  inst.instruction = THUMB_OP16 (inst.instruction);
10929
 
10930
  /* PR 10443: Do not silently ignore shifted operands.  */
10931
  constraint (inst.operands[1].shifted,
10932
              _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10933
 
10934
  if (inst.operands[1].isreg)
10935
    {
10936
      if (Rn < 8 && Rm < 8)
10937
        {
10938
          /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10939
             since a MOV instruction produces unpredictable results.  */
10940
          if (inst.instruction == T_OPCODE_MOV_I8)
10941
            inst.instruction = T_OPCODE_ADD_I3;
10942
          else
10943
            inst.instruction = T_OPCODE_CMP_LR;
10944
 
10945
          inst.instruction |= Rn;
10946
          inst.instruction |= Rm << 3;
10947
        }
10948
      else
10949
        {
10950
          if (inst.instruction == T_OPCODE_MOV_I8)
10951
            inst.instruction = T_OPCODE_MOV_HR;
10952
          else
10953
            inst.instruction = T_OPCODE_CMP_HR;
10954
          do_t_cpy ();
10955
        }
10956
    }
10957
  else
10958
    {
10959
      constraint (Rn > 7,
10960
                  _("only lo regs allowed with immediate"));
10961
      inst.instruction |= Rn << 8;
10962
      inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10963
    }
10964
}
10965
 
10966
static void
10967
do_t_mov16 (void)
10968
{
10969
  unsigned Rd;
10970
  bfd_vma imm;
10971
  bfd_boolean top;
10972
 
10973
  top = (inst.instruction & 0x00800000) != 0;
10974
  if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10975
    {
10976
      constraint (top, _(":lower16: not allowed this instruction"));
10977
      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10978
    }
10979
  else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
10980
    {
10981
      constraint (!top, _(":upper16: not allowed this instruction"));
10982
      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
10983
    }
10984
 
10985
  Rd = inst.operands[0].reg;
10986
  reject_bad_reg (Rd);
10987
 
10988
  inst.instruction |= Rd << 8;
10989
  if (inst.reloc.type == BFD_RELOC_UNUSED)
10990
    {
10991
      imm = inst.reloc.exp.X_add_number;
10992
      inst.instruction |= (imm & 0xf000) << 4;
10993
      inst.instruction |= (imm & 0x0800) << 15;
10994
      inst.instruction |= (imm & 0x0700) << 4;
10995
      inst.instruction |= (imm & 0x00ff);
10996
    }
10997
}
10998
 
10999
static void
11000
do_t_mvn_tst (void)
11001
{
11002
  unsigned Rn, Rm;
11003
 
11004
  Rn = inst.operands[0].reg;
11005
  Rm = inst.operands[1].reg;
11006
 
11007
  if (inst.instruction == T_MNEM_cmp
11008
      || inst.instruction == T_MNEM_cmn)
11009
    constraint (Rn == REG_PC, BAD_PC);
11010
  else
11011
    reject_bad_reg (Rn);
11012
  reject_bad_reg (Rm);
11013
 
11014
  if (unified_syntax)
11015
    {
11016
      int r0off = (inst.instruction == T_MNEM_mvn
11017
                   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11018
      bfd_boolean narrow;
11019
 
11020
      if (inst.size_req == 4
11021
          || inst.instruction > 0xffff
11022
          || inst.operands[1].shifted
11023
          || Rn > 7 || Rm > 7)
11024
        narrow = FALSE;
11025
      else if (inst.instruction == T_MNEM_cmn)
11026
        narrow = TRUE;
11027
      else if (THUMB_SETS_FLAGS (inst.instruction))
11028
        narrow = !in_it_block ();
11029
      else
11030
        narrow = in_it_block ();
11031
 
11032
      if (!inst.operands[1].isreg)
11033
        {
11034
          /* For an immediate, we always generate a 32-bit opcode;
11035
             section relaxation will shrink it later if possible.  */
11036
          if (inst.instruction < 0xffff)
11037
            inst.instruction = THUMB_OP32 (inst.instruction);
11038
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11039
          inst.instruction |= Rn << r0off;
11040
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11041
        }
11042
      else
11043
        {
11044
          /* See if we can do this with a 16-bit instruction.  */
11045
          if (narrow)
11046
            {
11047
              inst.instruction = THUMB_OP16 (inst.instruction);
11048
              inst.instruction |= Rn;
11049
              inst.instruction |= Rm << 3;
11050
            }
11051
          else
11052
            {
11053
              constraint (inst.operands[1].shifted
11054
                          && inst.operands[1].immisreg,
11055
                          _("shift must be constant"));
11056
              if (inst.instruction < 0xffff)
11057
                inst.instruction = THUMB_OP32 (inst.instruction);
11058
              inst.instruction |= Rn << r0off;
11059
              encode_thumb32_shifted_operand (1);
11060
            }
11061
        }
11062
    }
11063
  else
11064
    {
11065
      constraint (inst.instruction > 0xffff
11066
                  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11067
      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11068
                  _("unshifted register required"));
11069
      constraint (Rn > 7 || Rm > 7,
11070
                  BAD_HIREG);
11071
 
11072
      inst.instruction = THUMB_OP16 (inst.instruction);
11073
      inst.instruction |= Rn;
11074
      inst.instruction |= Rm << 3;
11075
    }
11076
}
11077
 
11078
static void
11079
do_t_mrs (void)
11080
{
11081
  unsigned Rd;
11082
 
11083
  if (do_vfp_nsyn_mrs () == SUCCESS)
11084
    return;
11085
 
11086
  Rd = inst.operands[0].reg;
11087
  reject_bad_reg (Rd);
11088
  inst.instruction |= Rd << 8;
11089
 
11090
  if (inst.operands[1].isreg)
11091
    {
11092
      unsigned br = inst.operands[1].reg;
11093
      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11094
        as_bad (_("bad register for mrs"));
11095
 
11096
      inst.instruction |= br & (0xf << 16);
11097
      inst.instruction |= (br & 0x300) >> 4;
11098
      inst.instruction |= (br & SPSR_BIT) >> 2;
11099
    }
11100
  else
11101
    {
11102
      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11103
 
11104
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11105
        constraint (flags != 0, _("selected processor does not support "
11106
                    "requested special purpose register"));
11107
      else
11108
        /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11109
           devices).  */
11110
        constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11111
                    _("'APSR', 'CPSR' or 'SPSR' expected"));
11112
 
11113
      inst.instruction |= (flags & SPSR_BIT) >> 2;
11114
      inst.instruction |= inst.operands[1].imm & 0xff;
11115
      inst.instruction |= 0xf0000;
11116
    }
11117
}
11118
 
11119
static void
11120
do_t_msr (void)
11121
{
11122
  int flags;
11123
  unsigned Rn;
11124
 
11125
  if (do_vfp_nsyn_msr () == SUCCESS)
11126
    return;
11127
 
11128
  constraint (!inst.operands[1].isreg,
11129
              _("Thumb encoding does not support an immediate here"));
11130
 
11131
  if (inst.operands[0].isreg)
11132
    flags = (int)(inst.operands[0].reg);
11133
  else
11134
    flags = inst.operands[0].imm;
11135
 
11136
  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11137
    {
11138
      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11139
 
11140
      constraint ((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11141
                   && (bits & ~(PSR_s | PSR_f)) != 0)
11142
                  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11143
                      && bits != PSR_f),
11144
                  _("selected processor does not support requested special "
11145
                    "purpose register"));
11146
    }
11147
  else
11148
     constraint ((flags & 0xff) != 0, _("selected processor does not support "
11149
                 "requested special purpose register"));
11150
 
11151
  Rn = inst.operands[1].reg;
11152
  reject_bad_reg (Rn);
11153
 
11154
  inst.instruction |= (flags & SPSR_BIT) >> 2;
11155
  inst.instruction |= (flags & 0xf0000) >> 8;
11156
  inst.instruction |= (flags & 0x300) >> 4;
11157
  inst.instruction |= (flags & 0xff);
11158
  inst.instruction |= Rn << 16;
11159
}
11160
 
11161
static void
11162
do_t_mul (void)
11163
{
11164
  bfd_boolean narrow;
11165
  unsigned Rd, Rn, Rm;
11166
 
11167
  if (!inst.operands[2].present)
11168
    inst.operands[2].reg = inst.operands[0].reg;
11169
 
11170
  Rd = inst.operands[0].reg;
11171
  Rn = inst.operands[1].reg;
11172
  Rm = inst.operands[2].reg;
11173
 
11174
  if (unified_syntax)
11175
    {
11176
      if (inst.size_req == 4
11177
          || (Rd != Rn
11178
              && Rd != Rm)
11179
          || Rn > 7
11180
          || Rm > 7)
11181
        narrow = FALSE;
11182
      else if (inst.instruction == T_MNEM_muls)
11183
        narrow = !in_it_block ();
11184
      else
11185
        narrow = in_it_block ();
11186
    }
11187
  else
11188
    {
11189
      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11190
      constraint (Rn > 7 || Rm > 7,
11191
                  BAD_HIREG);
11192
      narrow = TRUE;
11193
    }
11194
 
11195
  if (narrow)
11196
    {
11197
      /* 16-bit MULS/Conditional MUL.  */
11198
      inst.instruction = THUMB_OP16 (inst.instruction);
11199
      inst.instruction |= Rd;
11200
 
11201
      if (Rd == Rn)
11202
        inst.instruction |= Rm << 3;
11203
      else if (Rd == Rm)
11204
        inst.instruction |= Rn << 3;
11205
      else
11206
        constraint (1, _("dest must overlap one source register"));
11207
    }
11208
  else
11209
    {
11210
      constraint (inst.instruction != T_MNEM_mul,
11211
                  _("Thumb-2 MUL must not set flags"));
11212
      /* 32-bit MUL.  */
11213
      inst.instruction = THUMB_OP32 (inst.instruction);
11214
      inst.instruction |= Rd << 8;
11215
      inst.instruction |= Rn << 16;
11216
      inst.instruction |= Rm << 0;
11217
 
11218
      reject_bad_reg (Rd);
11219
      reject_bad_reg (Rn);
11220
      reject_bad_reg (Rm);
11221
    }
11222
}
11223
 
11224
static void
11225
do_t_mull (void)
11226
{
11227
  unsigned RdLo, RdHi, Rn, Rm;
11228
 
11229
  RdLo = inst.operands[0].reg;
11230
  RdHi = inst.operands[1].reg;
11231
  Rn = inst.operands[2].reg;
11232
  Rm = inst.operands[3].reg;
11233
 
11234
  reject_bad_reg (RdLo);
11235
  reject_bad_reg (RdHi);
11236
  reject_bad_reg (Rn);
11237
  reject_bad_reg (Rm);
11238
 
11239
  inst.instruction |= RdLo << 12;
11240
  inst.instruction |= RdHi << 8;
11241
  inst.instruction |= Rn << 16;
11242
  inst.instruction |= Rm;
11243
 
11244
 if (RdLo == RdHi)
11245
    as_tsktsk (_("rdhi and rdlo must be different"));
11246
}
11247
 
11248
static void
11249
do_t_nop (void)
11250
{
11251
  set_it_insn_type (NEUTRAL_IT_INSN);
11252
 
11253
  if (unified_syntax)
11254
    {
11255
      if (inst.size_req == 4 || inst.operands[0].imm > 15)
11256
        {
11257
          inst.instruction = THUMB_OP32 (inst.instruction);
11258
          inst.instruction |= inst.operands[0].imm;
11259
        }
11260
      else
11261
        {
11262
          /* PR9722: Check for Thumb2 availability before
11263
             generating a thumb2 nop instruction.  */
11264
          if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11265
            {
11266
              inst.instruction = THUMB_OP16 (inst.instruction);
11267
              inst.instruction |= inst.operands[0].imm << 4;
11268
            }
11269
          else
11270
            inst.instruction = 0x46c0;
11271
        }
11272
    }
11273
  else
11274
    {
11275
      constraint (inst.operands[0].present,
11276
                  _("Thumb does not support NOP with hints"));
11277
      inst.instruction = 0x46c0;
11278
    }
11279
}
11280
 
11281
static void
11282
do_t_neg (void)
11283
{
11284
  if (unified_syntax)
11285
    {
11286
      bfd_boolean narrow;
11287
 
11288
      if (THUMB_SETS_FLAGS (inst.instruction))
11289
        narrow = !in_it_block ();
11290
      else
11291
        narrow = in_it_block ();
11292
      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11293
        narrow = FALSE;
11294
      if (inst.size_req == 4)
11295
        narrow = FALSE;
11296
 
11297
      if (!narrow)
11298
        {
11299
          inst.instruction = THUMB_OP32 (inst.instruction);
11300
          inst.instruction |= inst.operands[0].reg << 8;
11301
          inst.instruction |= inst.operands[1].reg << 16;
11302
        }
11303
      else
11304
        {
11305
          inst.instruction = THUMB_OP16 (inst.instruction);
11306
          inst.instruction |= inst.operands[0].reg;
11307
          inst.instruction |= inst.operands[1].reg << 3;
11308
        }
11309
    }
11310
  else
11311
    {
11312
      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11313
                  BAD_HIREG);
11314
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11315
 
11316
      inst.instruction = THUMB_OP16 (inst.instruction);
11317
      inst.instruction |= inst.operands[0].reg;
11318
      inst.instruction |= inst.operands[1].reg << 3;
11319
    }
11320
}
11321
 
11322
static void
11323
do_t_orn (void)
11324
{
11325
  unsigned Rd, Rn;
11326
 
11327
  Rd = inst.operands[0].reg;
11328
  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11329
 
11330
  reject_bad_reg (Rd);
11331
  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
11332
  reject_bad_reg (Rn);
11333
 
11334
  inst.instruction |= Rd << 8;
11335
  inst.instruction |= Rn << 16;
11336
 
11337
  if (!inst.operands[2].isreg)
11338
    {
11339
      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11340
      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11341
    }
11342
  else
11343
    {
11344
      unsigned Rm;
11345
 
11346
      Rm = inst.operands[2].reg;
11347
      reject_bad_reg (Rm);
11348
 
11349
      constraint (inst.operands[2].shifted
11350
                  && inst.operands[2].immisreg,
11351
                  _("shift must be constant"));
11352
      encode_thumb32_shifted_operand (2);
11353
    }
11354
}
11355
 
11356
static void
11357
do_t_pkhbt (void)
11358
{
11359
  unsigned Rd, Rn, Rm;
11360
 
11361
  Rd = inst.operands[0].reg;
11362
  Rn = inst.operands[1].reg;
11363
  Rm = inst.operands[2].reg;
11364
 
11365
  reject_bad_reg (Rd);
11366
  reject_bad_reg (Rn);
11367
  reject_bad_reg (Rm);
11368
 
11369
  inst.instruction |= Rd << 8;
11370
  inst.instruction |= Rn << 16;
11371
  inst.instruction |= Rm;
11372
  if (inst.operands[3].present)
11373
    {
11374
      unsigned int val = inst.reloc.exp.X_add_number;
11375
      constraint (inst.reloc.exp.X_op != O_constant,
11376
                  _("expression too complex"));
11377
      inst.instruction |= (val & 0x1c) << 10;
11378
      inst.instruction |= (val & 0x03) << 6;
11379
    }
11380
}
11381
 
11382
static void
11383
do_t_pkhtb (void)
11384
{
11385
  if (!inst.operands[3].present)
11386
    {
11387
      unsigned Rtmp;
11388
 
11389
      inst.instruction &= ~0x00000020;
11390
 
11391
      /* PR 10168.  Swap the Rm and Rn registers.  */
11392
      Rtmp = inst.operands[1].reg;
11393
      inst.operands[1].reg = inst.operands[2].reg;
11394
      inst.operands[2].reg = Rtmp;
11395
    }
11396
  do_t_pkhbt ();
11397
}
11398
 
11399
static void
11400
do_t_pld (void)
11401
{
11402
  if (inst.operands[0].immisreg)
11403
    reject_bad_reg (inst.operands[0].imm);
11404
 
11405
  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11406
}
11407
 
11408
static void
11409
do_t_push_pop (void)
11410
{
11411
  unsigned mask;
11412
 
11413
  constraint (inst.operands[0].writeback,
11414
              _("push/pop do not support {reglist}^"));
11415
  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11416
              _("expression too complex"));
11417
 
11418
  mask = inst.operands[0].imm;
11419
  if ((mask & ~0xff) == 0)
11420
    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11421
  else if ((inst.instruction == T_MNEM_push
11422
            && (mask & ~0xff) == 1 << REG_LR)
11423
           || (inst.instruction == T_MNEM_pop
11424
               && (mask & ~0xff) == 1 << REG_PC))
11425
    {
11426
      inst.instruction = THUMB_OP16 (inst.instruction);
11427
      inst.instruction |= THUMB_PP_PC_LR;
11428
      inst.instruction |= mask & 0xff;
11429
    }
11430
  else if (unified_syntax)
11431
    {
11432
      inst.instruction = THUMB_OP32 (inst.instruction);
11433
      encode_thumb2_ldmstm (13, mask, TRUE);
11434
    }
11435
  else
11436
    {
11437
      inst.error = _("invalid register list to push/pop instruction");
11438
      return;
11439
    }
11440
}
11441
 
11442
static void
11443
do_t_rbit (void)
11444
{
11445
  unsigned Rd, Rm;
11446
 
11447
  Rd = inst.operands[0].reg;
11448
  Rm = inst.operands[1].reg;
11449
 
11450
  reject_bad_reg (Rd);
11451
  reject_bad_reg (Rm);
11452
 
11453
  inst.instruction |= Rd << 8;
11454
  inst.instruction |= Rm << 16;
11455
  inst.instruction |= Rm;
11456
}
11457
 
11458
static void
11459
do_t_rev (void)
11460
{
11461
  unsigned Rd, Rm;
11462
 
11463
  Rd = inst.operands[0].reg;
11464
  Rm = inst.operands[1].reg;
11465
 
11466
  reject_bad_reg (Rd);
11467
  reject_bad_reg (Rm);
11468
 
11469
  if (Rd <= 7 && Rm <= 7
11470
      && inst.size_req != 4)
11471
    {
11472
      inst.instruction = THUMB_OP16 (inst.instruction);
11473
      inst.instruction |= Rd;
11474
      inst.instruction |= Rm << 3;
11475
    }
11476
  else if (unified_syntax)
11477
    {
11478
      inst.instruction = THUMB_OP32 (inst.instruction);
11479
      inst.instruction |= Rd << 8;
11480
      inst.instruction |= Rm << 16;
11481
      inst.instruction |= Rm;
11482
    }
11483
  else
11484
    inst.error = BAD_HIREG;
11485
}
11486
 
11487
static void
11488
do_t_rrx (void)
11489
{
11490
  unsigned Rd, Rm;
11491
 
11492
  Rd = inst.operands[0].reg;
11493
  Rm = inst.operands[1].reg;
11494
 
11495
  reject_bad_reg (Rd);
11496
  reject_bad_reg (Rm);
11497
 
11498
  inst.instruction |= Rd << 8;
11499
  inst.instruction |= Rm;
11500
}
11501
 
11502
static void
11503
do_t_rsb (void)
11504
{
11505
  unsigned Rd, Rs;
11506
 
11507
  Rd = inst.operands[0].reg;
11508
  Rs = (inst.operands[1].present
11509
        ? inst.operands[1].reg    /* Rd, Rs, foo */
11510
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11511
 
11512
  reject_bad_reg (Rd);
11513
  reject_bad_reg (Rs);
11514
  if (inst.operands[2].isreg)
11515
    reject_bad_reg (inst.operands[2].reg);
11516
 
11517
  inst.instruction |= Rd << 8;
11518
  inst.instruction |= Rs << 16;
11519
  if (!inst.operands[2].isreg)
11520
    {
11521
      bfd_boolean narrow;
11522
 
11523
      if ((inst.instruction & 0x00100000) != 0)
11524
        narrow = !in_it_block ();
11525
      else
11526
        narrow = in_it_block ();
11527
 
11528
      if (Rd > 7 || Rs > 7)
11529
        narrow = FALSE;
11530
 
11531
      if (inst.size_req == 4 || !unified_syntax)
11532
        narrow = FALSE;
11533
 
11534
      if (inst.reloc.exp.X_op != O_constant
11535
          || inst.reloc.exp.X_add_number != 0)
11536
        narrow = FALSE;
11537
 
11538
      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
11539
         relaxation, but it doesn't seem worth the hassle.  */
11540
      if (narrow)
11541
        {
11542
          inst.reloc.type = BFD_RELOC_UNUSED;
11543
          inst.instruction = THUMB_OP16 (T_MNEM_negs);
11544
          inst.instruction |= Rs << 3;
11545
          inst.instruction |= Rd;
11546
        }
11547
      else
11548
        {
11549
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11550
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11551
        }
11552
    }
11553
  else
11554
    encode_thumb32_shifted_operand (2);
11555
}
11556
 
11557
static void
11558
do_t_setend (void)
11559
{
11560
  set_it_insn_type (OUTSIDE_IT_INSN);
11561
  if (inst.operands[0].imm)
11562
    inst.instruction |= 0x8;
11563
}
11564
 
11565
static void
11566
do_t_shift (void)
11567
{
11568
  if (!inst.operands[1].present)
11569
    inst.operands[1].reg = inst.operands[0].reg;
11570
 
11571
  if (unified_syntax)
11572
    {
11573
      bfd_boolean narrow;
11574
      int shift_kind;
11575
 
11576
      switch (inst.instruction)
11577
        {
11578
        case T_MNEM_asr:
11579
        case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11580
        case T_MNEM_lsl:
11581
        case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11582
        case T_MNEM_lsr:
11583
        case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11584
        case T_MNEM_ror:
11585
        case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11586
        default: abort ();
11587
        }
11588
 
11589
      if (THUMB_SETS_FLAGS (inst.instruction))
11590
        narrow = !in_it_block ();
11591
      else
11592
        narrow = in_it_block ();
11593
      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11594
        narrow = FALSE;
11595
      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11596
        narrow = FALSE;
11597
      if (inst.operands[2].isreg
11598
          && (inst.operands[1].reg != inst.operands[0].reg
11599
              || inst.operands[2].reg > 7))
11600
        narrow = FALSE;
11601
      if (inst.size_req == 4)
11602
        narrow = FALSE;
11603
 
11604
      reject_bad_reg (inst.operands[0].reg);
11605
      reject_bad_reg (inst.operands[1].reg);
11606
 
11607
      if (!narrow)
11608
        {
11609
          if (inst.operands[2].isreg)
11610
            {
11611
              reject_bad_reg (inst.operands[2].reg);
11612
              inst.instruction = THUMB_OP32 (inst.instruction);
11613
              inst.instruction |= inst.operands[0].reg << 8;
11614
              inst.instruction |= inst.operands[1].reg << 16;
11615
              inst.instruction |= inst.operands[2].reg;
11616 148 khays
 
11617
              /* PR 12854: Error on extraneous shifts.  */
11618
              constraint (inst.operands[2].shifted,
11619
                          _("extraneous shift as part of operand to shift insn"));
11620 16 khays
            }
11621
          else
11622
            {
11623
              inst.operands[1].shifted = 1;
11624
              inst.operands[1].shift_kind = shift_kind;
11625
              inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11626
                                             ? T_MNEM_movs : T_MNEM_mov);
11627
              inst.instruction |= inst.operands[0].reg << 8;
11628
              encode_thumb32_shifted_operand (1);
11629
              /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
11630
              inst.reloc.type = BFD_RELOC_UNUSED;
11631
            }
11632
        }
11633
      else
11634
        {
11635
          if (inst.operands[2].isreg)
11636
            {
11637
              switch (shift_kind)
11638
                {
11639
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11640
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11641
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11642
                case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11643
                default: abort ();
11644
                }
11645
 
11646
              inst.instruction |= inst.operands[0].reg;
11647
              inst.instruction |= inst.operands[2].reg << 3;
11648 148 khays
 
11649
              /* PR 12854: Error on extraneous shifts.  */
11650
              constraint (inst.operands[2].shifted,
11651
                          _("extraneous shift as part of operand to shift insn"));
11652 16 khays
            }
11653
          else
11654
            {
11655
              switch (shift_kind)
11656
                {
11657
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11658
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11659
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11660
                default: abort ();
11661
                }
11662
              inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11663
              inst.instruction |= inst.operands[0].reg;
11664
              inst.instruction |= inst.operands[1].reg << 3;
11665
            }
11666
        }
11667
    }
11668
  else
11669
    {
11670
      constraint (inst.operands[0].reg > 7
11671
                  || inst.operands[1].reg > 7, BAD_HIREG);
11672
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11673
 
11674
      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
11675
        {
11676
          constraint (inst.operands[2].reg > 7, BAD_HIREG);
11677
          constraint (inst.operands[0].reg != inst.operands[1].reg,
11678
                      _("source1 and dest must be same register"));
11679
 
11680
          switch (inst.instruction)
11681
            {
11682
            case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11683
            case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11684
            case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11685
            case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11686
            default: abort ();
11687
            }
11688
 
11689
          inst.instruction |= inst.operands[0].reg;
11690
          inst.instruction |= inst.operands[2].reg << 3;
11691 148 khays
 
11692
          /* PR 12854: Error on extraneous shifts.  */
11693
          constraint (inst.operands[2].shifted,
11694
                      _("extraneous shift as part of operand to shift insn"));
11695 16 khays
        }
11696
      else
11697
        {
11698
          switch (inst.instruction)
11699
            {
11700
            case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11701
            case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11702
            case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11703
            case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11704
            default: abort ();
11705
            }
11706
          inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11707
          inst.instruction |= inst.operands[0].reg;
11708
          inst.instruction |= inst.operands[1].reg << 3;
11709
        }
11710
    }
11711
}
11712
 
11713
static void
11714
do_t_simd (void)
11715
{
11716
  unsigned Rd, Rn, Rm;
11717
 
11718
  Rd = inst.operands[0].reg;
11719
  Rn = inst.operands[1].reg;
11720
  Rm = inst.operands[2].reg;
11721
 
11722
  reject_bad_reg (Rd);
11723
  reject_bad_reg (Rn);
11724
  reject_bad_reg (Rm);
11725
 
11726
  inst.instruction |= Rd << 8;
11727
  inst.instruction |= Rn << 16;
11728
  inst.instruction |= Rm;
11729
}
11730
 
11731
static void
11732
do_t_simd2 (void)
11733
{
11734
  unsigned Rd, Rn, Rm;
11735
 
11736
  Rd = inst.operands[0].reg;
11737
  Rm = inst.operands[1].reg;
11738
  Rn = inst.operands[2].reg;
11739
 
11740
  reject_bad_reg (Rd);
11741
  reject_bad_reg (Rn);
11742
  reject_bad_reg (Rm);
11743
 
11744
  inst.instruction |= Rd << 8;
11745
  inst.instruction |= Rn << 16;
11746
  inst.instruction |= Rm;
11747
}
11748
 
11749
static void
11750
do_t_smc (void)
11751
{
11752
  unsigned int value = inst.reloc.exp.X_add_number;
11753
  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
11754
              _("SMC is not permitted on this architecture"));
11755
  constraint (inst.reloc.exp.X_op != O_constant,
11756
              _("expression too complex"));
11757
  inst.reloc.type = BFD_RELOC_UNUSED;
11758
  inst.instruction |= (value & 0xf000) >> 12;
11759
  inst.instruction |= (value & 0x0ff0);
11760
  inst.instruction |= (value & 0x000f) << 16;
11761
}
11762
 
11763
static void
11764
do_t_hvc (void)
11765
{
11766
  unsigned int value = inst.reloc.exp.X_add_number;
11767
 
11768
  inst.reloc.type = BFD_RELOC_UNUSED;
11769
  inst.instruction |= (value & 0x0fff);
11770
  inst.instruction |= (value & 0xf000) << 4;
11771
}
11772
 
11773
static void
11774
do_t_ssat_usat (int bias)
11775
{
11776
  unsigned Rd, Rn;
11777
 
11778
  Rd = inst.operands[0].reg;
11779
  Rn = inst.operands[2].reg;
11780
 
11781
  reject_bad_reg (Rd);
11782
  reject_bad_reg (Rn);
11783
 
11784
  inst.instruction |= Rd << 8;
11785
  inst.instruction |= inst.operands[1].imm - bias;
11786
  inst.instruction |= Rn << 16;
11787
 
11788
  if (inst.operands[3].present)
11789
    {
11790
      offsetT shift_amount = inst.reloc.exp.X_add_number;
11791
 
11792
      inst.reloc.type = BFD_RELOC_UNUSED;
11793
 
11794
      constraint (inst.reloc.exp.X_op != O_constant,
11795
                  _("expression too complex"));
11796
 
11797
      if (shift_amount != 0)
11798
        {
11799
          constraint (shift_amount > 31,
11800
                      _("shift expression is too large"));
11801
 
11802
          if (inst.operands[3].shift_kind == SHIFT_ASR)
11803
            inst.instruction |= 0x00200000;  /* sh bit.  */
11804
 
11805
          inst.instruction |= (shift_amount & 0x1c) << 10;
11806
          inst.instruction |= (shift_amount & 0x03) << 6;
11807
        }
11808
    }
11809
}
11810
 
11811
static void
11812
do_t_ssat (void)
11813
{
11814
  do_t_ssat_usat (1);
11815
}
11816
 
11817
static void
11818
do_t_ssat16 (void)
11819
{
11820
  unsigned Rd, Rn;
11821
 
11822
  Rd = inst.operands[0].reg;
11823
  Rn = inst.operands[2].reg;
11824
 
11825
  reject_bad_reg (Rd);
11826
  reject_bad_reg (Rn);
11827
 
11828
  inst.instruction |= Rd << 8;
11829
  inst.instruction |= inst.operands[1].imm - 1;
11830
  inst.instruction |= Rn << 16;
11831
}
11832
 
11833
static void
11834
do_t_strex (void)
11835
{
11836
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11837
              || inst.operands[2].postind || inst.operands[2].writeback
11838
              || inst.operands[2].immisreg || inst.operands[2].shifted
11839
              || inst.operands[2].negative,
11840
              BAD_ADDR_MODE);
11841
 
11842
  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
11843
 
11844
  inst.instruction |= inst.operands[0].reg << 8;
11845
  inst.instruction |= inst.operands[1].reg << 12;
11846
  inst.instruction |= inst.operands[2].reg << 16;
11847
  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11848
}
11849
 
11850
static void
11851
do_t_strexd (void)
11852
{
11853
  if (!inst.operands[2].present)
11854
    inst.operands[2].reg = inst.operands[1].reg + 1;
11855
 
11856
  constraint (inst.operands[0].reg == inst.operands[1].reg
11857
              || inst.operands[0].reg == inst.operands[2].reg
11858
              || inst.operands[0].reg == inst.operands[3].reg,
11859
              BAD_OVERLAP);
11860
 
11861
  inst.instruction |= inst.operands[0].reg;
11862
  inst.instruction |= inst.operands[1].reg << 12;
11863
  inst.instruction |= inst.operands[2].reg << 8;
11864
  inst.instruction |= inst.operands[3].reg << 16;
11865
}
11866
 
11867
static void
11868
do_t_sxtah (void)
11869
{
11870
  unsigned Rd, Rn, Rm;
11871
 
11872
  Rd = inst.operands[0].reg;
11873
  Rn = inst.operands[1].reg;
11874
  Rm = inst.operands[2].reg;
11875
 
11876
  reject_bad_reg (Rd);
11877
  reject_bad_reg (Rn);
11878
  reject_bad_reg (Rm);
11879
 
11880
  inst.instruction |= Rd << 8;
11881
  inst.instruction |= Rn << 16;
11882
  inst.instruction |= Rm;
11883
  inst.instruction |= inst.operands[3].imm << 4;
11884
}
11885
 
11886
static void
11887
do_t_sxth (void)
11888
{
11889
  unsigned Rd, Rm;
11890
 
11891
  Rd = inst.operands[0].reg;
11892
  Rm = inst.operands[1].reg;
11893
 
11894
  reject_bad_reg (Rd);
11895
  reject_bad_reg (Rm);
11896
 
11897
  if (inst.instruction <= 0xffff
11898
      && inst.size_req != 4
11899
      && Rd <= 7 && Rm <= 7
11900
      && (!inst.operands[2].present || inst.operands[2].imm == 0))
11901
    {
11902
      inst.instruction = THUMB_OP16 (inst.instruction);
11903
      inst.instruction |= Rd;
11904
      inst.instruction |= Rm << 3;
11905
    }
11906
  else if (unified_syntax)
11907
    {
11908
      if (inst.instruction <= 0xffff)
11909
        inst.instruction = THUMB_OP32 (inst.instruction);
11910
      inst.instruction |= Rd << 8;
11911
      inst.instruction |= Rm;
11912
      inst.instruction |= inst.operands[2].imm << 4;
11913
    }
11914
  else
11915
    {
11916
      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11917
                  _("Thumb encoding does not support rotation"));
11918
      constraint (1, BAD_HIREG);
11919
    }
11920
}
11921
 
11922
static void
11923
do_t_swi (void)
11924
{
11925
  /* We have to do the following check manually as ARM_EXT_OS only applies
11926
     to ARM_EXT_V6M.  */
11927
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
11928
    {
11929
      if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
11930
          /* This only applies to the v6m howver, not later architectures.  */
11931
          && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
11932
        as_bad (_("SVC is not permitted on this architecture"));
11933
      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
11934
    }
11935
 
11936
  inst.reloc.type = BFD_RELOC_ARM_SWI;
11937
}
11938
 
11939
static void
11940
do_t_tb (void)
11941
{
11942
  unsigned Rn, Rm;
11943
  int half;
11944
 
11945
  half = (inst.instruction & 0x10) != 0;
11946
  set_it_insn_type_last ();
11947
  constraint (inst.operands[0].immisreg,
11948
              _("instruction requires register index"));
11949
 
11950
  Rn = inst.operands[0].reg;
11951
  Rm = inst.operands[0].imm;
11952
 
11953
  constraint (Rn == REG_SP, BAD_SP);
11954
  reject_bad_reg (Rm);
11955
 
11956
  constraint (!half && inst.operands[0].shifted,
11957
              _("instruction does not allow shifted index"));
11958
  inst.instruction |= (Rn << 16) | Rm;
11959
}
11960
 
11961
static void
11962
do_t_usat (void)
11963
{
11964
  do_t_ssat_usat (0);
11965
}
11966
 
11967
static void
11968
do_t_usat16 (void)
11969
{
11970
  unsigned Rd, Rn;
11971
 
11972
  Rd = inst.operands[0].reg;
11973
  Rn = inst.operands[2].reg;
11974
 
11975
  reject_bad_reg (Rd);
11976
  reject_bad_reg (Rn);
11977
 
11978
  inst.instruction |= Rd << 8;
11979
  inst.instruction |= inst.operands[1].imm;
11980
  inst.instruction |= Rn << 16;
11981
}
11982
 
11983
/* Neon instruction encoder helpers.  */
11984
 
11985
/* Encodings for the different types for various Neon opcodes.  */
11986
 
11987
/* An "invalid" code for the following tables.  */
11988
#define N_INV -1u
11989
 
11990
struct neon_tab_entry
11991
{
11992
  unsigned integer;
11993
  unsigned float_or_poly;
11994
  unsigned scalar_or_imm;
11995
};
11996
 
11997
/* Map overloaded Neon opcodes to their respective encodings.  */
11998
#define NEON_ENC_TAB                                    \
11999
  X(vabd,       0x0000700, 0x1200d00, N_INV),           \
12000
  X(vmax,       0x0000600, 0x0000f00, N_INV),           \
12001
  X(vmin,       0x0000610, 0x0200f00, N_INV),           \
12002
  X(vpadd,      0x0000b10, 0x1000d00, N_INV),           \
12003
  X(vpmax,      0x0000a00, 0x1000f00, N_INV),           \
12004
  X(vpmin,      0x0000a10, 0x1200f00, N_INV),           \
12005
  X(vadd,       0x0000800, 0x0000d00, N_INV),           \
12006
  X(vsub,       0x1000800, 0x0200d00, N_INV),           \
12007
  X(vceq,       0x1000810, 0x0000e00, 0x1b10100),       \
12008
  X(vcge,       0x0000310, 0x1000e00, 0x1b10080),       \
12009
  X(vcgt,       0x0000300, 0x1200e00, 0x1b10000),       \
12010
  /* Register variants of the following two instructions are encoded as
12011
     vcge / vcgt with the operands reversed.  */        \
12012
  X(vclt,       0x0000300, 0x1200e00, 0x1b10200),       \
12013
  X(vcle,       0x0000310, 0x1000e00, 0x1b10180),       \
12014
  X(vfma,       N_INV, 0x0000c10, N_INV),               \
12015
  X(vfms,       N_INV, 0x0200c10, N_INV),               \
12016
  X(vmla,       0x0000900, 0x0000d10, 0x0800040),       \
12017
  X(vmls,       0x1000900, 0x0200d10, 0x0800440),       \
12018
  X(vmul,       0x0000910, 0x1000d10, 0x0800840),       \
12019
  X(vmull,      0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
12020
  X(vmlal,      0x0800800, N_INV,     0x0800240),       \
12021
  X(vmlsl,      0x0800a00, N_INV,     0x0800640),       \
12022
  X(vqdmlal,    0x0800900, N_INV,     0x0800340),       \
12023
  X(vqdmlsl,    0x0800b00, N_INV,     0x0800740),       \
12024
  X(vqdmull,    0x0800d00, N_INV,     0x0800b40),       \
12025
  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),       \
12026
  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),       \
12027
  X(vshl,       0x0000400, N_INV,     0x0800510),       \
12028
  X(vqshl,      0x0000410, N_INV,     0x0800710),       \
12029
  X(vand,       0x0000110, N_INV,     0x0800030),       \
12030
  X(vbic,       0x0100110, N_INV,     0x0800030),       \
12031
  X(veor,       0x1000110, N_INV,     N_INV),           \
12032
  X(vorn,       0x0300110, N_INV,     0x0800010),       \
12033
  X(vorr,       0x0200110, N_INV,     0x0800010),       \
12034
  X(vmvn,       0x1b00580, N_INV,     0x0800030),       \
12035
  X(vshll,      0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
12036
  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
12037
  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
12038
  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
12039
  X(vst1,       0x0000000, 0x0800000, N_INV),           \
12040
  X(vld2,       0x0200100, 0x0a00100, 0x0a00d00),       \
12041
  X(vst2,       0x0000100, 0x0800100, N_INV),           \
12042
  X(vld3,       0x0200200, 0x0a00200, 0x0a00e00),       \
12043
  X(vst3,       0x0000200, 0x0800200, N_INV),           \
12044
  X(vld4,       0x0200300, 0x0a00300, 0x0a00f00),       \
12045
  X(vst4,       0x0000300, 0x0800300, N_INV),           \
12046
  X(vmovn,      0x1b20200, N_INV,     N_INV),           \
12047
  X(vtrn,       0x1b20080, N_INV,     N_INV),           \
12048
  X(vqmovn,     0x1b20200, N_INV,     N_INV),           \
12049
  X(vqmovun,    0x1b20240, N_INV,     N_INV),           \
12050
  X(vnmul,      0xe200a40, 0xe200b40, N_INV),           \
12051
  X(vnmla,      0xe100a40, 0xe100b40, N_INV),           \
12052
  X(vnmls,      0xe100a00, 0xe100b00, N_INV),           \
12053
  X(vfnma,      0xe900a40, 0xe900b40, N_INV),           \
12054
  X(vfnms,      0xe900a00, 0xe900b00, N_INV),           \
12055
  X(vcmp,       0xeb40a40, 0xeb40b40, N_INV),           \
12056
  X(vcmpz,      0xeb50a40, 0xeb50b40, N_INV),           \
12057
  X(vcmpe,      0xeb40ac0, 0xeb40bc0, N_INV),           \
12058
  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV)
12059
 
12060
enum neon_opc
12061
{
12062
#define X(OPC,I,F,S) N_MNEM_##OPC
12063
NEON_ENC_TAB
12064
#undef X
12065
};
12066
 
12067
static const struct neon_tab_entry neon_enc_tab[] =
12068
{
12069
#define X(OPC,I,F,S) { (I), (F), (S) }
12070
NEON_ENC_TAB
12071
#undef X
12072
};
12073
 
12074
/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
12075
#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12076
#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
12077
#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12078
#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12079
#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12080
#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12081
#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12082
#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12083
#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12084
#define NEON_ENC_SINGLE_(X) \
12085
  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12086
#define NEON_ENC_DOUBLE_(X) \
12087
  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12088
 
12089
#define NEON_ENCODE(type, inst)                                 \
12090
  do                                                            \
12091
    {                                                           \
12092
      inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12093
      inst.is_neon = 1;                                         \
12094
    }                                                           \
12095
  while (0)
12096
 
12097
#define check_neon_suffixes                                             \
12098
  do                                                                    \
12099
    {                                                                   \
12100
      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)        \
12101
        {                                                               \
12102
          as_bad (_("invalid neon suffix for non neon instruction"));   \
12103
          return;                                                       \
12104
        }                                                               \
12105
    }                                                                   \
12106
  while (0)
12107
 
12108
/* Define shapes for instruction operands. The following mnemonic characters
12109
   are used in this table:
12110
 
12111
     F - VFP S<n> register
12112
     D - Neon D<n> register
12113
     Q - Neon Q<n> register
12114
     I - Immediate
12115
     S - Scalar
12116
     R - ARM register
12117
     L - D<n> register list
12118
 
12119
   This table is used to generate various data:
12120
     - enumerations of the form NS_DDR to be used as arguments to
12121
       neon_select_shape.
12122
     - a table classifying shapes into single, double, quad, mixed.
12123
     - a table used to drive neon_select_shape.  */
12124
 
12125
#define NEON_SHAPE_DEF                  \
12126
  X(3, (D, D, D), DOUBLE),              \
12127
  X(3, (Q, Q, Q), QUAD),                \
12128
  X(3, (D, D, I), DOUBLE),              \
12129
  X(3, (Q, Q, I), QUAD),                \
12130
  X(3, (D, D, S), DOUBLE),              \
12131
  X(3, (Q, Q, S), QUAD),                \
12132
  X(2, (D, D), DOUBLE),                 \
12133
  X(2, (Q, Q), QUAD),                   \
12134
  X(2, (D, S), DOUBLE),                 \
12135
  X(2, (Q, S), QUAD),                   \
12136
  X(2, (D, R), DOUBLE),                 \
12137
  X(2, (Q, R), QUAD),                   \
12138
  X(2, (D, I), DOUBLE),                 \
12139
  X(2, (Q, I), QUAD),                   \
12140
  X(3, (D, L, D), DOUBLE),              \
12141
  X(2, (D, Q), MIXED),                  \
12142
  X(2, (Q, D), MIXED),                  \
12143
  X(3, (D, Q, I), MIXED),               \
12144
  X(3, (Q, D, I), MIXED),               \
12145
  X(3, (Q, D, D), MIXED),               \
12146
  X(3, (D, Q, Q), MIXED),               \
12147
  X(3, (Q, Q, D), MIXED),               \
12148
  X(3, (Q, D, S), MIXED),               \
12149
  X(3, (D, Q, S), MIXED),               \
12150
  X(4, (D, D, D, I), DOUBLE),           \
12151
  X(4, (Q, Q, Q, I), QUAD),             \
12152
  X(2, (F, F), SINGLE),                 \
12153
  X(3, (F, F, F), SINGLE),              \
12154
  X(2, (F, I), SINGLE),                 \
12155
  X(2, (F, D), MIXED),                  \
12156
  X(2, (D, F), MIXED),                  \
12157
  X(3, (F, F, I), MIXED),               \
12158
  X(4, (R, R, F, F), SINGLE),           \
12159
  X(4, (F, F, R, R), SINGLE),           \
12160
  X(3, (D, R, R), DOUBLE),              \
12161
  X(3, (R, R, D), DOUBLE),              \
12162
  X(2, (S, R), SINGLE),                 \
12163
  X(2, (R, S), SINGLE),                 \
12164
  X(2, (F, R), SINGLE),                 \
12165
  X(2, (R, F), SINGLE)
12166
 
12167
#define S2(A,B)         NS_##A##B
12168
#define S3(A,B,C)       NS_##A##B##C
12169
#define S4(A,B,C,D)     NS_##A##B##C##D
12170
 
12171
#define X(N, L, C) S##N L
12172
 
12173
enum neon_shape
12174
{
12175
  NEON_SHAPE_DEF,
12176
  NS_NULL
12177
};
12178
 
12179
#undef X
12180
#undef S2
12181
#undef S3
12182
#undef S4
12183
 
12184
enum neon_shape_class
12185
{
12186
  SC_SINGLE,
12187
  SC_DOUBLE,
12188
  SC_QUAD,
12189
  SC_MIXED
12190
};
12191
 
12192
#define X(N, L, C) SC_##C
12193
 
12194
static enum neon_shape_class neon_shape_class[] =
12195
{
12196
  NEON_SHAPE_DEF
12197
};
12198
 
12199
#undef X
12200
 
12201
enum neon_shape_el
12202
{
12203
  SE_F,
12204
  SE_D,
12205
  SE_Q,
12206
  SE_I,
12207
  SE_S,
12208
  SE_R,
12209
  SE_L
12210
};
12211
 
12212
/* Register widths of above.  */
12213
static unsigned neon_shape_el_size[] =
12214
{
12215
  32,
12216
  64,
12217
  128,
12218
  0,
12219
  32,
12220
  32,
12221
 
12222
};
12223
 
12224
struct neon_shape_info
12225
{
12226
  unsigned els;
12227
  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12228
};
12229
 
12230
#define S2(A,B)         { SE_##A, SE_##B }
12231
#define S3(A,B,C)       { SE_##A, SE_##B, SE_##C }
12232
#define S4(A,B,C,D)     { SE_##A, SE_##B, SE_##C, SE_##D }
12233
 
12234
#define X(N, L, C) { N, S##N L }
12235
 
12236
static struct neon_shape_info neon_shape_tab[] =
12237
{
12238
  NEON_SHAPE_DEF
12239
};
12240
 
12241
#undef X
12242
#undef S2
12243
#undef S3
12244
#undef S4
12245
 
12246
/* Bit masks used in type checking given instructions.
12247
  'N_EQK' means the type must be the same as (or based on in some way) the key
12248
   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12249
   set, various other bits can be set as well in order to modify the meaning of
12250
   the type constraint.  */
12251
 
12252
enum neon_type_mask
12253
{
12254
  N_S8   = 0x0000001,
12255
  N_S16  = 0x0000002,
12256
  N_S32  = 0x0000004,
12257
  N_S64  = 0x0000008,
12258
  N_U8   = 0x0000010,
12259
  N_U16  = 0x0000020,
12260
  N_U32  = 0x0000040,
12261
  N_U64  = 0x0000080,
12262
  N_I8   = 0x0000100,
12263
  N_I16  = 0x0000200,
12264
  N_I32  = 0x0000400,
12265
  N_I64  = 0x0000800,
12266
  N_8    = 0x0001000,
12267
  N_16   = 0x0002000,
12268
  N_32   = 0x0004000,
12269
  N_64   = 0x0008000,
12270
  N_P8   = 0x0010000,
12271
  N_P16  = 0x0020000,
12272
  N_F16  = 0x0040000,
12273
  N_F32  = 0x0080000,
12274
  N_F64  = 0x0100000,
12275
  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
12276
  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
12277
  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
12278
  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
12279
  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
12280
  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
12281
  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
12282
  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
12283
  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
12284
  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
12285
  N_UTYP = 0,
12286
  N_MAX_NONSPECIAL = N_F64
12287
};
12288
 
12289
#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12290
 
12291
#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12292
#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12293
#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12294
#define N_SUF_32   (N_SU_32 | N_F32)
12295
#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
12296
#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F32)
12297
 
12298
/* Pass this as the first type argument to neon_check_type to ignore types
12299
   altogether.  */
12300
#define N_IGNORE_TYPE (N_KEY | N_EQK)
12301
 
12302
/* Select a "shape" for the current instruction (describing register types or
12303
   sizes) from a list of alternatives. Return NS_NULL if the current instruction
12304
   doesn't fit. For non-polymorphic shapes, checking is usually done as a
12305
   function of operand parsing, so this function doesn't need to be called.
12306
   Shapes should be listed in order of decreasing length.  */
12307
 
12308
static enum neon_shape
12309
neon_select_shape (enum neon_shape shape, ...)
12310
{
12311
  va_list ap;
12312
  enum neon_shape first_shape = shape;
12313
 
12314
  /* Fix missing optional operands. FIXME: we don't know at this point how
12315
     many arguments we should have, so this makes the assumption that we have
12316
     > 1. This is true of all current Neon opcodes, I think, but may not be
12317
     true in the future.  */
12318
  if (!inst.operands[1].present)
12319
    inst.operands[1] = inst.operands[0];
12320
 
12321
  va_start (ap, shape);
12322
 
12323
  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12324
    {
12325
      unsigned j;
12326
      int matches = 1;
12327
 
12328
      for (j = 0; j < neon_shape_tab[shape].els; j++)
12329
        {
12330
          if (!inst.operands[j].present)
12331
            {
12332
              matches = 0;
12333
              break;
12334
            }
12335
 
12336
          switch (neon_shape_tab[shape].el[j])
12337
            {
12338
            case SE_F:
12339
              if (!(inst.operands[j].isreg
12340
                    && inst.operands[j].isvec
12341
                    && inst.operands[j].issingle
12342
                    && !inst.operands[j].isquad))
12343
                matches = 0;
12344
              break;
12345
 
12346
            case SE_D:
12347
              if (!(inst.operands[j].isreg
12348
                    && inst.operands[j].isvec
12349
                    && !inst.operands[j].isquad
12350
                    && !inst.operands[j].issingle))
12351
                matches = 0;
12352
              break;
12353
 
12354
            case SE_R:
12355
              if (!(inst.operands[j].isreg
12356
                    && !inst.operands[j].isvec))
12357
                matches = 0;
12358
              break;
12359
 
12360
            case SE_Q:
12361
              if (!(inst.operands[j].isreg
12362
                    && inst.operands[j].isvec
12363
                    && inst.operands[j].isquad
12364
                    && !inst.operands[j].issingle))
12365
                matches = 0;
12366
              break;
12367
 
12368
            case SE_I:
12369
              if (!(!inst.operands[j].isreg
12370
                    && !inst.operands[j].isscalar))
12371
                matches = 0;
12372
              break;
12373
 
12374
            case SE_S:
12375
              if (!(!inst.operands[j].isreg
12376
                    && inst.operands[j].isscalar))
12377
                matches = 0;
12378
              break;
12379
 
12380
            case SE_L:
12381
              break;
12382
            }
12383
          if (!matches)
12384
            break;
12385
        }
12386
      if (matches)
12387
        break;
12388
    }
12389
 
12390
  va_end (ap);
12391
 
12392
  if (shape == NS_NULL && first_shape != NS_NULL)
12393
    first_error (_("invalid instruction shape"));
12394
 
12395
  return shape;
12396
}
12397
 
12398
/* True if SHAPE is predominantly a quadword operation (most of the time, this
12399
   means the Q bit should be set).  */
12400
 
12401
static int
12402
neon_quad (enum neon_shape shape)
12403
{
12404
  return neon_shape_class[shape] == SC_QUAD;
12405
}
12406
 
12407
static void
12408
neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12409
                       unsigned *g_size)
12410
{
12411
  /* Allow modification to be made to types which are constrained to be
12412
     based on the key element, based on bits set alongside N_EQK.  */
12413
  if ((typebits & N_EQK) != 0)
12414
    {
12415
      if ((typebits & N_HLF) != 0)
12416
        *g_size /= 2;
12417
      else if ((typebits & N_DBL) != 0)
12418
        *g_size *= 2;
12419
      if ((typebits & N_SGN) != 0)
12420
        *g_type = NT_signed;
12421
      else if ((typebits & N_UNS) != 0)
12422
        *g_type = NT_unsigned;
12423
      else if ((typebits & N_INT) != 0)
12424
        *g_type = NT_integer;
12425
      else if ((typebits & N_FLT) != 0)
12426
        *g_type = NT_float;
12427
      else if ((typebits & N_SIZ) != 0)
12428
        *g_type = NT_untyped;
12429
    }
12430
}
12431
 
12432
/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12433
   operand type, i.e. the single type specified in a Neon instruction when it
12434
   is the only one given.  */
12435
 
12436
static struct neon_type_el
12437
neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12438
{
12439
  struct neon_type_el dest = *key;
12440
 
12441
  gas_assert ((thisarg & N_EQK) != 0);
12442
 
12443
  neon_modify_type_size (thisarg, &dest.type, &dest.size);
12444
 
12445
  return dest;
12446
}
12447
 
12448
/* Convert Neon type and size into compact bitmask representation.  */
12449
 
12450
static enum neon_type_mask
12451
type_chk_of_el_type (enum neon_el_type type, unsigned size)
12452
{
12453
  switch (type)
12454
    {
12455
    case NT_untyped:
12456
      switch (size)
12457
        {
12458
        case 8:  return N_8;
12459
        case 16: return N_16;
12460
        case 32: return N_32;
12461
        case 64: return N_64;
12462
        default: ;
12463
        }
12464
      break;
12465
 
12466
    case NT_integer:
12467
      switch (size)
12468
        {
12469
        case 8:  return N_I8;
12470
        case 16: return N_I16;
12471
        case 32: return N_I32;
12472
        case 64: return N_I64;
12473
        default: ;
12474
        }
12475
      break;
12476
 
12477
    case NT_float:
12478
      switch (size)
12479
        {
12480
        case 16: return N_F16;
12481
        case 32: return N_F32;
12482
        case 64: return N_F64;
12483
        default: ;
12484
        }
12485
      break;
12486
 
12487
    case NT_poly:
12488
      switch (size)
12489
        {
12490
        case 8:  return N_P8;
12491
        case 16: return N_P16;
12492
        default: ;
12493
        }
12494
      break;
12495
 
12496
    case NT_signed:
12497
      switch (size)
12498
        {
12499
        case 8:  return N_S8;
12500
        case 16: return N_S16;
12501
        case 32: return N_S32;
12502
        case 64: return N_S64;
12503
        default: ;
12504
        }
12505
      break;
12506
 
12507
    case NT_unsigned:
12508
      switch (size)
12509
        {
12510
        case 8:  return N_U8;
12511
        case 16: return N_U16;
12512
        case 32: return N_U32;
12513
        case 64: return N_U64;
12514
        default: ;
12515
        }
12516
      break;
12517
 
12518
    default: ;
12519
    }
12520
 
12521
  return N_UTYP;
12522
}
12523
 
12524
/* Convert compact Neon bitmask type representation to a type and size. Only
12525
   handles the case where a single bit is set in the mask.  */
12526
 
12527
static int
12528
el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12529
                     enum neon_type_mask mask)
12530
{
12531
  if ((mask & N_EQK) != 0)
12532
    return FAIL;
12533
 
12534
  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12535
    *size = 8;
12536
  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
12537
    *size = 16;
12538
  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12539
    *size = 32;
12540
  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
12541
    *size = 64;
12542
  else
12543
    return FAIL;
12544
 
12545
  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12546
    *type = NT_signed;
12547
  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12548
    *type = NT_unsigned;
12549
  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12550
    *type = NT_integer;
12551
  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12552
    *type = NT_untyped;
12553
  else if ((mask & (N_P8 | N_P16)) != 0)
12554
    *type = NT_poly;
12555
  else if ((mask & (N_F32 | N_F64)) != 0)
12556
    *type = NT_float;
12557
  else
12558
    return FAIL;
12559
 
12560
  return SUCCESS;
12561
}
12562
 
12563
/* Modify a bitmask of allowed types. This is only needed for type
12564
   relaxation.  */
12565
 
12566
static unsigned
12567
modify_types_allowed (unsigned allowed, unsigned mods)
12568
{
12569
  unsigned size;
12570
  enum neon_el_type type;
12571
  unsigned destmask;
12572
  int i;
12573
 
12574
  destmask = 0;
12575
 
12576
  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12577
    {
12578
      if (el_type_of_type_chk (&type, &size,
12579
                               (enum neon_type_mask) (allowed & i)) == SUCCESS)
12580
        {
12581
          neon_modify_type_size (mods, &type, &size);
12582
          destmask |= type_chk_of_el_type (type, size);
12583
        }
12584
    }
12585
 
12586
  return destmask;
12587
}
12588
 
12589
/* Check type and return type classification.
12590
   The manual states (paraphrase): If one datatype is given, it indicates the
12591
   type given in:
12592
    - the second operand, if there is one
12593
    - the operand, if there is no second operand
12594
    - the result, if there are no operands.
12595
   This isn't quite good enough though, so we use a concept of a "key" datatype
12596
   which is set on a per-instruction basis, which is the one which matters when
12597
   only one data type is written.
12598
   Note: this function has side-effects (e.g. filling in missing operands). All
12599
   Neon instructions should call it before performing bit encoding.  */
12600
 
12601
static struct neon_type_el
12602
neon_check_type (unsigned els, enum neon_shape ns, ...)
12603
{
12604
  va_list ap;
12605
  unsigned i, pass, key_el = 0;
12606
  unsigned types[NEON_MAX_TYPE_ELS];
12607
  enum neon_el_type k_type = NT_invtype;
12608
  unsigned k_size = -1u;
12609
  struct neon_type_el badtype = {NT_invtype, -1};
12610
  unsigned key_allowed = 0;
12611
 
12612
  /* Optional registers in Neon instructions are always (not) in operand 1.
12613
     Fill in the missing operand here, if it was omitted.  */
12614
  if (els > 1 && !inst.operands[1].present)
12615
    inst.operands[1] = inst.operands[0];
12616
 
12617
  /* Suck up all the varargs.  */
12618
  va_start (ap, ns);
12619
  for (i = 0; i < els; i++)
12620
    {
12621
      unsigned thisarg = va_arg (ap, unsigned);
12622
      if (thisarg == N_IGNORE_TYPE)
12623
        {
12624
          va_end (ap);
12625
          return badtype;
12626
        }
12627
      types[i] = thisarg;
12628
      if ((thisarg & N_KEY) != 0)
12629
        key_el = i;
12630
    }
12631
  va_end (ap);
12632
 
12633
  if (inst.vectype.elems > 0)
12634
    for (i = 0; i < els; i++)
12635
      if (inst.operands[i].vectype.type != NT_invtype)
12636
        {
12637
          first_error (_("types specified in both the mnemonic and operands"));
12638
          return badtype;
12639
        }
12640
 
12641
  /* Duplicate inst.vectype elements here as necessary.
12642
     FIXME: No idea if this is exactly the same as the ARM assembler,
12643
     particularly when an insn takes one register and one non-register
12644
     operand. */
12645
  if (inst.vectype.elems == 1 && els > 1)
12646
    {
12647
      unsigned j;
12648
      inst.vectype.elems = els;
12649
      inst.vectype.el[key_el] = inst.vectype.el[0];
12650
      for (j = 0; j < els; j++)
12651
        if (j != key_el)
12652
          inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12653
                                                  types[j]);
12654
    }
12655
  else if (inst.vectype.elems == 0 && els > 0)
12656
    {
12657
      unsigned j;
12658
      /* No types were given after the mnemonic, so look for types specified
12659
         after each operand. We allow some flexibility here; as long as the
12660
         "key" operand has a type, we can infer the others.  */
12661
      for (j = 0; j < els; j++)
12662
        if (inst.operands[j].vectype.type != NT_invtype)
12663
          inst.vectype.el[j] = inst.operands[j].vectype;
12664
 
12665
      if (inst.operands[key_el].vectype.type != NT_invtype)
12666
        {
12667
          for (j = 0; j < els; j++)
12668
            if (inst.operands[j].vectype.type == NT_invtype)
12669
              inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12670
                                                      types[j]);
12671
        }
12672
      else
12673
        {
12674
          first_error (_("operand types can't be inferred"));
12675
          return badtype;
12676
        }
12677
    }
12678
  else if (inst.vectype.elems != els)
12679
    {
12680
      first_error (_("type specifier has the wrong number of parts"));
12681
      return badtype;
12682
    }
12683
 
12684
  for (pass = 0; pass < 2; pass++)
12685
    {
12686
      for (i = 0; i < els; i++)
12687
        {
12688
          unsigned thisarg = types[i];
12689
          unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12690
            ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12691
          enum neon_el_type g_type = inst.vectype.el[i].type;
12692
          unsigned g_size = inst.vectype.el[i].size;
12693
 
12694
          /* Decay more-specific signed & unsigned types to sign-insensitive
12695
             integer types if sign-specific variants are unavailable.  */
12696
          if ((g_type == NT_signed || g_type == NT_unsigned)
12697
              && (types_allowed & N_SU_ALL) == 0)
12698
            g_type = NT_integer;
12699
 
12700
          /* If only untyped args are allowed, decay any more specific types to
12701
             them. Some instructions only care about signs for some element
12702
             sizes, so handle that properly.  */
12703
          if ((g_size == 8 && (types_allowed & N_8) != 0)
12704
              || (g_size == 16 && (types_allowed & N_16) != 0)
12705
              || (g_size == 32 && (types_allowed & N_32) != 0)
12706
              || (g_size == 64 && (types_allowed & N_64) != 0))
12707
            g_type = NT_untyped;
12708
 
12709
          if (pass == 0)
12710
            {
12711
              if ((thisarg & N_KEY) != 0)
12712
                {
12713
                  k_type = g_type;
12714
                  k_size = g_size;
12715
                  key_allowed = thisarg & ~N_KEY;
12716
                }
12717
            }
12718
          else
12719
            {
12720
              if ((thisarg & N_VFP) != 0)
12721
                {
12722
                  enum neon_shape_el regshape;
12723
                  unsigned regwidth, match;
12724
 
12725
                  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
12726
                  if (ns == NS_NULL)
12727
                    {
12728
                      first_error (_("invalid instruction shape"));
12729
                      return badtype;
12730
                    }
12731
                  regshape = neon_shape_tab[ns].el[i];
12732
                  regwidth = neon_shape_el_size[regshape];
12733
 
12734
                  /* In VFP mode, operands must match register widths. If we
12735
                     have a key operand, use its width, else use the width of
12736
                     the current operand.  */
12737
                  if (k_size != -1u)
12738
                    match = k_size;
12739
                  else
12740
                    match = g_size;
12741
 
12742
                  if (regwidth != match)
12743
                    {
12744
                      first_error (_("operand size must match register width"));
12745
                      return badtype;
12746
                    }
12747
                }
12748
 
12749
              if ((thisarg & N_EQK) == 0)
12750
                {
12751
                  unsigned given_type = type_chk_of_el_type (g_type, g_size);
12752
 
12753
                  if ((given_type & types_allowed) == 0)
12754
                    {
12755
                      first_error (_("bad type in Neon instruction"));
12756
                      return badtype;
12757
                    }
12758
                }
12759
              else
12760
                {
12761
                  enum neon_el_type mod_k_type = k_type;
12762
                  unsigned mod_k_size = k_size;
12763
                  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12764
                  if (g_type != mod_k_type || g_size != mod_k_size)
12765
                    {
12766
                      first_error (_("inconsistent types in Neon instruction"));
12767
                      return badtype;
12768
                    }
12769
                }
12770
            }
12771
        }
12772
    }
12773
 
12774
  return inst.vectype.el[key_el];
12775
}
12776
 
12777
/* Neon-style VFP instruction forwarding.  */
12778
 
12779
/* Thumb VFP instructions have 0xE in the condition field.  */
12780
 
12781
static void
12782
do_vfp_cond_or_thumb (void)
12783
{
12784
  inst.is_neon = 1;
12785
 
12786
  if (thumb_mode)
12787
    inst.instruction |= 0xe0000000;
12788
  else
12789
    inst.instruction |= inst.cond << 28;
12790
}
12791
 
12792
/* Look up and encode a simple mnemonic, for use as a helper function for the
12793
   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
12794
   etc.  It is assumed that operand parsing has already been done, and that the
12795
   operands are in the form expected by the given opcode (this isn't necessarily
12796
   the same as the form in which they were parsed, hence some massaging must
12797
   take place before this function is called).
12798
   Checks current arch version against that in the looked-up opcode.  */
12799
 
12800
static void
12801
do_vfp_nsyn_opcode (const char *opname)
12802
{
12803
  const struct asm_opcode *opcode;
12804
 
12805
  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12806
 
12807
  if (!opcode)
12808
    abort ();
12809
 
12810
  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12811
                thumb_mode ? *opcode->tvariant : *opcode->avariant),
12812
              _(BAD_FPU));
12813
 
12814
  inst.is_neon = 1;
12815
 
12816
  if (thumb_mode)
12817
    {
12818
      inst.instruction = opcode->tvalue;
12819
      opcode->tencode ();
12820
    }
12821
  else
12822
    {
12823
      inst.instruction = (inst.cond << 28) | opcode->avalue;
12824
      opcode->aencode ();
12825
    }
12826
}
12827
 
12828
static void
12829
do_vfp_nsyn_add_sub (enum neon_shape rs)
12830
{
12831
  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12832
 
12833
  if (rs == NS_FFF)
12834
    {
12835
      if (is_add)
12836
        do_vfp_nsyn_opcode ("fadds");
12837
      else
12838
        do_vfp_nsyn_opcode ("fsubs");
12839
    }
12840
  else
12841
    {
12842
      if (is_add)
12843
        do_vfp_nsyn_opcode ("faddd");
12844
      else
12845
        do_vfp_nsyn_opcode ("fsubd");
12846
    }
12847
}
12848
 
12849
/* Check operand types to see if this is a VFP instruction, and if so call
12850
   PFN ().  */
12851
 
12852
static int
12853
try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12854
{
12855
  enum neon_shape rs;
12856
  struct neon_type_el et;
12857
 
12858
  switch (args)
12859
    {
12860
    case 2:
12861
      rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12862
      et = neon_check_type (2, rs,
12863
        N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12864
      break;
12865
 
12866
    case 3:
12867
      rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12868
      et = neon_check_type (3, rs,
12869
        N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12870
      break;
12871
 
12872
    default:
12873
      abort ();
12874
    }
12875
 
12876
  if (et.type != NT_invtype)
12877
    {
12878
      pfn (rs);
12879
      return SUCCESS;
12880
    }
12881
 
12882
  inst.error = NULL;
12883
  return FAIL;
12884
}
12885
 
12886
static void
12887
do_vfp_nsyn_mla_mls (enum neon_shape rs)
12888
{
12889
  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12890
 
12891
  if (rs == NS_FFF)
12892
    {
12893
      if (is_mla)
12894
        do_vfp_nsyn_opcode ("fmacs");
12895
      else
12896
        do_vfp_nsyn_opcode ("fnmacs");
12897
    }
12898
  else
12899
    {
12900
      if (is_mla)
12901
        do_vfp_nsyn_opcode ("fmacd");
12902
      else
12903
        do_vfp_nsyn_opcode ("fnmacd");
12904
    }
12905
}
12906
 
12907
static void
12908
do_vfp_nsyn_fma_fms (enum neon_shape rs)
12909
{
12910
  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12911
 
12912
  if (rs == NS_FFF)
12913
    {
12914
      if (is_fma)
12915
        do_vfp_nsyn_opcode ("ffmas");
12916
      else
12917
        do_vfp_nsyn_opcode ("ffnmas");
12918
    }
12919
  else
12920
    {
12921
      if (is_fma)
12922
        do_vfp_nsyn_opcode ("ffmad");
12923
      else
12924
        do_vfp_nsyn_opcode ("ffnmad");
12925
    }
12926
}
12927
 
12928
static void
12929
do_vfp_nsyn_mul (enum neon_shape rs)
12930
{
12931
  if (rs == NS_FFF)
12932
    do_vfp_nsyn_opcode ("fmuls");
12933
  else
12934
    do_vfp_nsyn_opcode ("fmuld");
12935
}
12936
 
12937
static void
12938
do_vfp_nsyn_abs_neg (enum neon_shape rs)
12939
{
12940
  int is_neg = (inst.instruction & 0x80) != 0;
12941
  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12942
 
12943
  if (rs == NS_FF)
12944
    {
12945
      if (is_neg)
12946
        do_vfp_nsyn_opcode ("fnegs");
12947
      else
12948
        do_vfp_nsyn_opcode ("fabss");
12949
    }
12950
  else
12951
    {
12952
      if (is_neg)
12953
        do_vfp_nsyn_opcode ("fnegd");
12954
      else
12955
        do_vfp_nsyn_opcode ("fabsd");
12956
    }
12957
}
12958
 
12959
/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12960
   insns belong to Neon, and are handled elsewhere.  */
12961
 
12962
static void
12963
do_vfp_nsyn_ldm_stm (int is_dbmode)
12964
{
12965
  int is_ldm = (inst.instruction & (1 << 20)) != 0;
12966
  if (is_ldm)
12967
    {
12968
      if (is_dbmode)
12969
        do_vfp_nsyn_opcode ("fldmdbs");
12970
      else
12971
        do_vfp_nsyn_opcode ("fldmias");
12972
    }
12973
  else
12974
    {
12975
      if (is_dbmode)
12976
        do_vfp_nsyn_opcode ("fstmdbs");
12977
      else
12978
        do_vfp_nsyn_opcode ("fstmias");
12979
    }
12980
}
12981
 
12982
static void
12983
do_vfp_nsyn_sqrt (void)
12984
{
12985
  enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12986
  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12987
 
12988
  if (rs == NS_FF)
12989
    do_vfp_nsyn_opcode ("fsqrts");
12990
  else
12991
    do_vfp_nsyn_opcode ("fsqrtd");
12992
}
12993
 
12994
static void
12995
do_vfp_nsyn_div (void)
12996
{
12997
  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12998
  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12999
    N_F32 | N_F64 | N_KEY | N_VFP);
13000
 
13001
  if (rs == NS_FFF)
13002
    do_vfp_nsyn_opcode ("fdivs");
13003
  else
13004
    do_vfp_nsyn_opcode ("fdivd");
13005
}
13006
 
13007
static void
13008
do_vfp_nsyn_nmul (void)
13009
{
13010
  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13011
  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13012
    N_F32 | N_F64 | N_KEY | N_VFP);
13013
 
13014
  if (rs == NS_FFF)
13015
    {
13016
      NEON_ENCODE (SINGLE, inst);
13017
      do_vfp_sp_dyadic ();
13018
    }
13019
  else
13020
    {
13021
      NEON_ENCODE (DOUBLE, inst);
13022
      do_vfp_dp_rd_rn_rm ();
13023
    }
13024
  do_vfp_cond_or_thumb ();
13025
}
13026
 
13027
static void
13028
do_vfp_nsyn_cmp (void)
13029
{
13030
  if (inst.operands[1].isreg)
13031
    {
13032
      enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13033
      neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13034
 
13035
      if (rs == NS_FF)
13036
        {
13037
          NEON_ENCODE (SINGLE, inst);
13038
          do_vfp_sp_monadic ();
13039
        }
13040
      else
13041
        {
13042
          NEON_ENCODE (DOUBLE, inst);
13043
          do_vfp_dp_rd_rm ();
13044
        }
13045
    }
13046
  else
13047
    {
13048
      enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13049
      neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13050
 
13051
      switch (inst.instruction & 0x0fffffff)
13052
        {
13053
        case N_MNEM_vcmp:
13054
          inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13055
          break;
13056
        case N_MNEM_vcmpe:
13057
          inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13058
          break;
13059
        default:
13060
          abort ();
13061
        }
13062
 
13063
      if (rs == NS_FI)
13064
        {
13065
          NEON_ENCODE (SINGLE, inst);
13066
          do_vfp_sp_compare_z ();
13067
        }
13068
      else
13069
        {
13070
          NEON_ENCODE (DOUBLE, inst);
13071
          do_vfp_dp_rd ();
13072
        }
13073
    }
13074
  do_vfp_cond_or_thumb ();
13075
}
13076
 
13077
static void
13078
nsyn_insert_sp (void)
13079
{
13080
  inst.operands[1] = inst.operands[0];
13081
  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13082
  inst.operands[0].reg = REG_SP;
13083
  inst.operands[0].isreg = 1;
13084
  inst.operands[0].writeback = 1;
13085
  inst.operands[0].present = 1;
13086
}
13087
 
13088
static void
13089
do_vfp_nsyn_push (void)
13090
{
13091
  nsyn_insert_sp ();
13092
  if (inst.operands[1].issingle)
13093
    do_vfp_nsyn_opcode ("fstmdbs");
13094
  else
13095
    do_vfp_nsyn_opcode ("fstmdbd");
13096
}
13097
 
13098
static void
13099
do_vfp_nsyn_pop (void)
13100
{
13101
  nsyn_insert_sp ();
13102
  if (inst.operands[1].issingle)
13103
    do_vfp_nsyn_opcode ("fldmias");
13104
  else
13105
    do_vfp_nsyn_opcode ("fldmiad");
13106
}
13107
 
13108
/* Fix up Neon data-processing instructions, ORing in the correct bits for
13109
   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
13110
 
13111
static void
13112
neon_dp_fixup (struct arm_it* insn)
13113
{
13114
  unsigned int i = insn->instruction;
13115
  insn->is_neon = 1;
13116
 
13117
  if (thumb_mode)
13118
    {
13119
      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
13120
      if (i & (1 << 24))
13121
        i |= 1 << 28;
13122
 
13123
      i &= ~(1 << 24);
13124
 
13125
      i |= 0xef000000;
13126
    }
13127
  else
13128
    i |= 0xf2000000;
13129
 
13130
  insn->instruction = i;
13131
}
13132
 
13133
/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13134
   (0, 1, 2, 3).  */
13135
 
13136
static unsigned
13137
neon_logbits (unsigned x)
13138
{
13139
  return ffs (x) - 4;
13140
}
13141
 
13142
#define LOW4(R) ((R) & 0xf)
13143
#define HI1(R) (((R) >> 4) & 1)
13144
 
13145
/* Encode insns with bit pattern:
13146
 
13147
  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
13148
  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
13149
 
13150
  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13151
  different meaning for some instruction.  */
13152
 
13153
static void
13154
neon_three_same (int isquad, int ubit, int size)
13155
{
13156
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13157
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13158
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13159
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13160
  inst.instruction |= LOW4 (inst.operands[2].reg);
13161
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13162
  inst.instruction |= (isquad != 0) << 6;
13163
  inst.instruction |= (ubit != 0) << 24;
13164
  if (size != -1)
13165
    inst.instruction |= neon_logbits (size) << 20;
13166
 
13167
  neon_dp_fixup (&inst);
13168
}
13169
 
13170
/* Encode instructions of the form:
13171
 
13172
  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
13173
  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
13174
 
13175
  Don't write size if SIZE == -1.  */
13176
 
13177
static void
13178
neon_two_same (int qbit, int ubit, int size)
13179
{
13180
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13181
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13182
  inst.instruction |= LOW4 (inst.operands[1].reg);
13183
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13184
  inst.instruction |= (qbit != 0) << 6;
13185
  inst.instruction |= (ubit != 0) << 24;
13186
 
13187
  if (size != -1)
13188
    inst.instruction |= neon_logbits (size) << 18;
13189
 
13190
  neon_dp_fixup (&inst);
13191
}
13192
 
13193
/* Neon instruction encoders, in approximate order of appearance.  */
13194
 
13195
static void
13196
do_neon_dyadic_i_su (void)
13197
{
13198
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13199
  struct neon_type_el et = neon_check_type (3, rs,
13200
    N_EQK, N_EQK, N_SU_32 | N_KEY);
13201
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13202
}
13203
 
13204
static void
13205
do_neon_dyadic_i64_su (void)
13206
{
13207
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13208
  struct neon_type_el et = neon_check_type (3, rs,
13209
    N_EQK, N_EQK, N_SU_ALL | N_KEY);
13210
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13211
}
13212
 
13213
static void
13214
neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13215
                unsigned immbits)
13216
{
13217
  unsigned size = et.size >> 3;
13218
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13219
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13220
  inst.instruction |= LOW4 (inst.operands[1].reg);
13221
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13222
  inst.instruction |= (isquad != 0) << 6;
13223
  inst.instruction |= immbits << 16;
13224
  inst.instruction |= (size >> 3) << 7;
13225
  inst.instruction |= (size & 0x7) << 19;
13226
  if (write_ubit)
13227
    inst.instruction |= (uval != 0) << 24;
13228
 
13229
  neon_dp_fixup (&inst);
13230
}
13231
 
13232
static void
13233
do_neon_shl_imm (void)
13234
{
13235
  if (!inst.operands[2].isreg)
13236
    {
13237
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13238
      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13239
      NEON_ENCODE (IMMED, inst);
13240
      neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13241
    }
13242
  else
13243
    {
13244
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13245
      struct neon_type_el et = neon_check_type (3, rs,
13246
        N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13247
      unsigned int tmp;
13248
 
13249
      /* VSHL/VQSHL 3-register variants have syntax such as:
13250
           vshl.xx Dd, Dm, Dn
13251
         whereas other 3-register operations encoded by neon_three_same have
13252
         syntax like:
13253
           vadd.xx Dd, Dn, Dm
13254
         (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13255
         here.  */
13256
      tmp = inst.operands[2].reg;
13257
      inst.operands[2].reg = inst.operands[1].reg;
13258
      inst.operands[1].reg = tmp;
13259
      NEON_ENCODE (INTEGER, inst);
13260
      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13261
    }
13262
}
13263
 
13264
static void
13265
do_neon_qshl_imm (void)
13266
{
13267
  if (!inst.operands[2].isreg)
13268
    {
13269
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13270
      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13271
 
13272
      NEON_ENCODE (IMMED, inst);
13273
      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13274
                      inst.operands[2].imm);
13275
    }
13276
  else
13277
    {
13278
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13279
      struct neon_type_el et = neon_check_type (3, rs,
13280
        N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13281
      unsigned int tmp;
13282
 
13283
      /* See note in do_neon_shl_imm.  */
13284
      tmp = inst.operands[2].reg;
13285
      inst.operands[2].reg = inst.operands[1].reg;
13286
      inst.operands[1].reg = tmp;
13287
      NEON_ENCODE (INTEGER, inst);
13288
      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13289
    }
13290
}
13291
 
13292
static void
13293
do_neon_rshl (void)
13294
{
13295
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13296
  struct neon_type_el et = neon_check_type (3, rs,
13297
    N_EQK, N_EQK, N_SU_ALL | N_KEY);
13298
  unsigned int tmp;
13299
 
13300
  tmp = inst.operands[2].reg;
13301
  inst.operands[2].reg = inst.operands[1].reg;
13302
  inst.operands[1].reg = tmp;
13303
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13304
}
13305
 
13306
static int
13307
neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13308
{
13309
  /* Handle .I8 pseudo-instructions.  */
13310
  if (size == 8)
13311
    {
13312
      /* Unfortunately, this will make everything apart from zero out-of-range.
13313
         FIXME is this the intended semantics? There doesn't seem much point in
13314
         accepting .I8 if so.  */
13315
      immediate |= immediate << 8;
13316
      size = 16;
13317
    }
13318
 
13319
  if (size >= 32)
13320
    {
13321
      if (immediate == (immediate & 0x000000ff))
13322
        {
13323
          *immbits = immediate;
13324
          return 0x1;
13325
        }
13326
      else if (immediate == (immediate & 0x0000ff00))
13327
        {
13328
          *immbits = immediate >> 8;
13329
          return 0x3;
13330
        }
13331
      else if (immediate == (immediate & 0x00ff0000))
13332
        {
13333
          *immbits = immediate >> 16;
13334
          return 0x5;
13335
        }
13336
      else if (immediate == (immediate & 0xff000000))
13337
        {
13338
          *immbits = immediate >> 24;
13339
          return 0x7;
13340
        }
13341
      if ((immediate & 0xffff) != (immediate >> 16))
13342
        goto bad_immediate;
13343
      immediate &= 0xffff;
13344
    }
13345
 
13346
  if (immediate == (immediate & 0x000000ff))
13347
    {
13348
      *immbits = immediate;
13349
      return 0x9;
13350
    }
13351
  else if (immediate == (immediate & 0x0000ff00))
13352
    {
13353
      *immbits = immediate >> 8;
13354
      return 0xb;
13355
    }
13356
 
13357
  bad_immediate:
13358
  first_error (_("immediate value out of range"));
13359
  return FAIL;
13360
}
13361
 
13362
/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13363
   A, B, C, D.  */
13364
 
13365
static int
13366
neon_bits_same_in_bytes (unsigned imm)
13367
{
13368
  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13369
         && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13370
         && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13371
         && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13372
}
13373
 
13374
/* For immediate of above form, return 0bABCD.  */
13375
 
13376
static unsigned
13377
neon_squash_bits (unsigned imm)
13378
{
13379
  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13380
         | ((imm & 0x01000000) >> 21);
13381
}
13382
 
13383
/* Compress quarter-float representation to 0b...000 abcdefgh.  */
13384
 
13385
static unsigned
13386
neon_qfloat_bits (unsigned imm)
13387
{
13388
  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13389
}
13390
 
13391
/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13392
   the instruction. *OP is passed as the initial value of the op field, and
13393
   may be set to a different value depending on the constant (i.e.
13394
   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13395
   MVN).  If the immediate looks like a repeated pattern then also
13396
   try smaller element sizes.  */
13397
 
13398
static int
13399
neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13400
                         unsigned *immbits, int *op, int size,
13401
                         enum neon_el_type type)
13402
{
13403
  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13404
     float.  */
13405
  if (type == NT_float && !float_p)
13406
    return FAIL;
13407
 
13408
  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13409
    {
13410
      if (size != 32 || *op == 1)
13411
        return FAIL;
13412
      *immbits = neon_qfloat_bits (immlo);
13413
      return 0xf;
13414
    }
13415
 
13416
  if (size == 64)
13417
    {
13418
      if (neon_bits_same_in_bytes (immhi)
13419
          && neon_bits_same_in_bytes (immlo))
13420
        {
13421
          if (*op == 1)
13422
            return FAIL;
13423
          *immbits = (neon_squash_bits (immhi) << 4)
13424
                     | neon_squash_bits (immlo);
13425
          *op = 1;
13426
          return 0xe;
13427
        }
13428
 
13429
      if (immhi != immlo)
13430
        return FAIL;
13431
    }
13432
 
13433
  if (size >= 32)
13434
    {
13435
      if (immlo == (immlo & 0x000000ff))
13436
        {
13437
          *immbits = immlo;
13438
          return 0x0;
13439
        }
13440
      else if (immlo == (immlo & 0x0000ff00))
13441
        {
13442
          *immbits = immlo >> 8;
13443
          return 0x2;
13444
        }
13445
      else if (immlo == (immlo & 0x00ff0000))
13446
        {
13447
          *immbits = immlo >> 16;
13448
          return 0x4;
13449
        }
13450
      else if (immlo == (immlo & 0xff000000))
13451
        {
13452
          *immbits = immlo >> 24;
13453
          return 0x6;
13454
        }
13455
      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13456
        {
13457
          *immbits = (immlo >> 8) & 0xff;
13458
          return 0xc;
13459
        }
13460
      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13461
        {
13462
          *immbits = (immlo >> 16) & 0xff;
13463
          return 0xd;
13464
        }
13465
 
13466
      if ((immlo & 0xffff) != (immlo >> 16))
13467
        return FAIL;
13468
      immlo &= 0xffff;
13469
    }
13470
 
13471
  if (size >= 16)
13472
    {
13473
      if (immlo == (immlo & 0x000000ff))
13474
        {
13475
          *immbits = immlo;
13476
          return 0x8;
13477
        }
13478
      else if (immlo == (immlo & 0x0000ff00))
13479
        {
13480
          *immbits = immlo >> 8;
13481
          return 0xa;
13482
        }
13483
 
13484
      if ((immlo & 0xff) != (immlo >> 8))
13485
        return FAIL;
13486
      immlo &= 0xff;
13487
    }
13488
 
13489
  if (immlo == (immlo & 0x000000ff))
13490
    {
13491
      /* Don't allow MVN with 8-bit immediate.  */
13492
      if (*op == 1)
13493
        return FAIL;
13494
      *immbits = immlo;
13495
      return 0xe;
13496
    }
13497
 
13498
  return FAIL;
13499
}
13500
 
13501
/* Write immediate bits [7:0] to the following locations:
13502
 
13503
  |28/24|23     19|18 16|15                    4|3     0|
13504
  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13505
 
13506
  This function is used by VMOV/VMVN/VORR/VBIC.  */
13507
 
13508
static void
13509
neon_write_immbits (unsigned immbits)
13510
{
13511
  inst.instruction |= immbits & 0xf;
13512
  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13513
  inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13514
}
13515
 
13516
/* Invert low-order SIZE bits of XHI:XLO.  */
13517
 
13518
static void
13519
neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13520
{
13521
  unsigned immlo = xlo ? *xlo : 0;
13522
  unsigned immhi = xhi ? *xhi : 0;
13523
 
13524
  switch (size)
13525
    {
13526
    case 8:
13527
      immlo = (~immlo) & 0xff;
13528
      break;
13529
 
13530
    case 16:
13531
      immlo = (~immlo) & 0xffff;
13532
      break;
13533
 
13534
    case 64:
13535
      immhi = (~immhi) & 0xffffffff;
13536
      /* fall through.  */
13537
 
13538
    case 32:
13539
      immlo = (~immlo) & 0xffffffff;
13540
      break;
13541
 
13542
    default:
13543
      abort ();
13544
    }
13545
 
13546
  if (xlo)
13547
    *xlo = immlo;
13548
 
13549
  if (xhi)
13550
    *xhi = immhi;
13551
}
13552
 
13553
static void
13554
do_neon_logic (void)
13555
{
13556
  if (inst.operands[2].present && inst.operands[2].isreg)
13557
    {
13558
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13559
      neon_check_type (3, rs, N_IGNORE_TYPE);
13560
      /* U bit and size field were set as part of the bitmask.  */
13561
      NEON_ENCODE (INTEGER, inst);
13562
      neon_three_same (neon_quad (rs), 0, -1);
13563
    }
13564
  else
13565
    {
13566
      const int three_ops_form = (inst.operands[2].present
13567
                                  && !inst.operands[2].isreg);
13568
      const int immoperand = (three_ops_form ? 2 : 1);
13569
      enum neon_shape rs = (three_ops_form
13570
                            ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13571
                            : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13572
      struct neon_type_el et = neon_check_type (2, rs,
13573
        N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13574
      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13575
      unsigned immbits;
13576
      int cmode;
13577
 
13578
      if (et.type == NT_invtype)
13579
        return;
13580
 
13581
      if (three_ops_form)
13582
        constraint (inst.operands[0].reg != inst.operands[1].reg,
13583
                    _("first and second operands shall be the same register"));
13584
 
13585
      NEON_ENCODE (IMMED, inst);
13586
 
13587
      immbits = inst.operands[immoperand].imm;
13588
      if (et.size == 64)
13589
        {
13590
          /* .i64 is a pseudo-op, so the immediate must be a repeating
13591
             pattern.  */
13592
          if (immbits != (inst.operands[immoperand].regisimm ?
13593
                          inst.operands[immoperand].reg : 0))
13594
            {
13595
              /* Set immbits to an invalid constant.  */
13596
              immbits = 0xdeadbeef;
13597
            }
13598
        }
13599
 
13600
      switch (opcode)
13601
        {
13602
        case N_MNEM_vbic:
13603
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13604
          break;
13605
 
13606
        case N_MNEM_vorr:
13607
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13608
          break;
13609
 
13610
        case N_MNEM_vand:
13611
          /* Pseudo-instruction for VBIC.  */
13612
          neon_invert_size (&immbits, 0, et.size);
13613
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13614
          break;
13615
 
13616
        case N_MNEM_vorn:
13617
          /* Pseudo-instruction for VORR.  */
13618
          neon_invert_size (&immbits, 0, et.size);
13619
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13620
          break;
13621
 
13622
        default:
13623
          abort ();
13624
        }
13625
 
13626
      if (cmode == FAIL)
13627
        return;
13628
 
13629
      inst.instruction |= neon_quad (rs) << 6;
13630
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13631
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13632
      inst.instruction |= cmode << 8;
13633
      neon_write_immbits (immbits);
13634
 
13635
      neon_dp_fixup (&inst);
13636
    }
13637
}
13638
 
13639
static void
13640
do_neon_bitfield (void)
13641
{
13642
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13643
  neon_check_type (3, rs, N_IGNORE_TYPE);
13644
  neon_three_same (neon_quad (rs), 0, -1);
13645
}
13646
 
13647
static void
13648
neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13649
                  unsigned destbits)
13650
{
13651
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13652
  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13653
                                            types | N_KEY);
13654
  if (et.type == NT_float)
13655
    {
13656
      NEON_ENCODE (FLOAT, inst);
13657
      neon_three_same (neon_quad (rs), 0, -1);
13658
    }
13659
  else
13660
    {
13661
      NEON_ENCODE (INTEGER, inst);
13662
      neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13663
    }
13664
}
13665
 
13666
static void
13667
do_neon_dyadic_if_su (void)
13668
{
13669
  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13670
}
13671
 
13672
static void
13673
do_neon_dyadic_if_su_d (void)
13674
{
13675
  /* This version only allow D registers, but that constraint is enforced during
13676
     operand parsing so we don't need to do anything extra here.  */
13677
  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13678
}
13679
 
13680
static void
13681
do_neon_dyadic_if_i_d (void)
13682
{
13683
  /* The "untyped" case can't happen. Do this to stop the "U" bit being
13684
     affected if we specify unsigned args.  */
13685
  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13686
}
13687
 
13688
enum vfp_or_neon_is_neon_bits
13689
{
13690
  NEON_CHECK_CC = 1,
13691
  NEON_CHECK_ARCH = 2
13692
};
13693
 
13694
/* Call this function if an instruction which may have belonged to the VFP or
13695
   Neon instruction sets, but turned out to be a Neon instruction (due to the
13696
   operand types involved, etc.). We have to check and/or fix-up a couple of
13697
   things:
13698
 
13699
     - Make sure the user hasn't attempted to make a Neon instruction
13700
       conditional.
13701
     - Alter the value in the condition code field if necessary.
13702
     - Make sure that the arch supports Neon instructions.
13703
 
13704
   Which of these operations take place depends on bits from enum
13705
   vfp_or_neon_is_neon_bits.
13706
 
13707
   WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13708
   current instruction's condition is COND_ALWAYS, the condition field is
13709
   changed to inst.uncond_value. This is necessary because instructions shared
13710
   between VFP and Neon may be conditional for the VFP variants only, and the
13711
   unconditional Neon version must have, e.g., 0xF in the condition field.  */
13712
 
13713
static int
13714
vfp_or_neon_is_neon (unsigned check)
13715
{
13716
  /* Conditions are always legal in Thumb mode (IT blocks).  */
13717
  if (!thumb_mode && (check & NEON_CHECK_CC))
13718
    {
13719
      if (inst.cond != COND_ALWAYS)
13720
        {
13721
          first_error (_(BAD_COND));
13722
          return FAIL;
13723
        }
13724
      if (inst.uncond_value != -1)
13725
        inst.instruction |= inst.uncond_value << 28;
13726
    }
13727
 
13728
  if ((check & NEON_CHECK_ARCH)
13729
      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13730
    {
13731
      first_error (_(BAD_FPU));
13732
      return FAIL;
13733
    }
13734
 
13735
  return SUCCESS;
13736
}
13737
 
13738
static void
13739
do_neon_addsub_if_i (void)
13740
{
13741
  if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13742
    return;
13743
 
13744
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13745
    return;
13746
 
13747
  /* The "untyped" case can't happen. Do this to stop the "U" bit being
13748
     affected if we specify unsigned args.  */
13749
  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13750
}
13751
 
13752
/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13753
   result to be:
13754
     V<op> A,B     (A is operand 0, B is operand 2)
13755
   to mean:
13756
     V<op> A,B,A
13757
   not:
13758
     V<op> A,B,B
13759
   so handle that case specially.  */
13760
 
13761
static void
13762
neon_exchange_operands (void)
13763
{
13764
  void *scratch = alloca (sizeof (inst.operands[0]));
13765
  if (inst.operands[1].present)
13766
    {
13767
      /* Swap operands[1] and operands[2].  */
13768
      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13769
      inst.operands[1] = inst.operands[2];
13770
      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13771
    }
13772
  else
13773
    {
13774
      inst.operands[1] = inst.operands[2];
13775
      inst.operands[2] = inst.operands[0];
13776
    }
13777
}
13778
 
13779
static void
13780
neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13781
{
13782
  if (inst.operands[2].isreg)
13783
    {
13784
      if (invert)
13785
        neon_exchange_operands ();
13786
      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13787
    }
13788
  else
13789
    {
13790
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13791
      struct neon_type_el et = neon_check_type (2, rs,
13792
        N_EQK | N_SIZ, immtypes | N_KEY);
13793
 
13794
      NEON_ENCODE (IMMED, inst);
13795
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13796
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13797
      inst.instruction |= LOW4 (inst.operands[1].reg);
13798
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13799
      inst.instruction |= neon_quad (rs) << 6;
13800
      inst.instruction |= (et.type == NT_float) << 10;
13801
      inst.instruction |= neon_logbits (et.size) << 18;
13802
 
13803
      neon_dp_fixup (&inst);
13804
    }
13805
}
13806
 
13807
static void
13808
do_neon_cmp (void)
13809
{
13810
  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13811
}
13812
 
13813
static void
13814
do_neon_cmp_inv (void)
13815
{
13816
  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13817
}
13818
 
13819
static void
13820
do_neon_ceq (void)
13821
{
13822
  neon_compare (N_IF_32, N_IF_32, FALSE);
13823
}
13824
 
13825
/* For multiply instructions, we have the possibility of 16-bit or 32-bit
13826
   scalars, which are encoded in 5 bits, M : Rm.
13827
   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13828
   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13829
   index in M.  */
13830
 
13831
static unsigned
13832
neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13833
{
13834
  unsigned regno = NEON_SCALAR_REG (scalar);
13835
  unsigned elno = NEON_SCALAR_INDEX (scalar);
13836
 
13837
  switch (elsize)
13838
    {
13839
    case 16:
13840
      if (regno > 7 || elno > 3)
13841
        goto bad_scalar;
13842
      return regno | (elno << 3);
13843
 
13844
    case 32:
13845
      if (regno > 15 || elno > 1)
13846
        goto bad_scalar;
13847
      return regno | (elno << 4);
13848
 
13849
    default:
13850
    bad_scalar:
13851
      first_error (_("scalar out of range for multiply instruction"));
13852
    }
13853
 
13854
  return 0;
13855
}
13856
 
13857
/* Encode multiply / multiply-accumulate scalar instructions.  */
13858
 
13859
static void
13860
neon_mul_mac (struct neon_type_el et, int ubit)
13861
{
13862
  unsigned scalar;
13863
 
13864
  /* Give a more helpful error message if we have an invalid type.  */
13865
  if (et.type == NT_invtype)
13866
    return;
13867
 
13868
  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13869
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13870
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13871
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13872
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13873
  inst.instruction |= LOW4 (scalar);
13874
  inst.instruction |= HI1 (scalar) << 5;
13875
  inst.instruction |= (et.type == NT_float) << 8;
13876
  inst.instruction |= neon_logbits (et.size) << 20;
13877
  inst.instruction |= (ubit != 0) << 24;
13878
 
13879
  neon_dp_fixup (&inst);
13880
}
13881
 
13882
static void
13883
do_neon_mac_maybe_scalar (void)
13884
{
13885
  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13886
    return;
13887
 
13888
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13889
    return;
13890
 
13891
  if (inst.operands[2].isscalar)
13892
    {
13893
      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13894
      struct neon_type_el et = neon_check_type (3, rs,
13895
        N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13896
      NEON_ENCODE (SCALAR, inst);
13897
      neon_mul_mac (et, neon_quad (rs));
13898
    }
13899
  else
13900
    {
13901
      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
13902
         affected if we specify unsigned args.  */
13903
      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13904
    }
13905
}
13906
 
13907
static void
13908
do_neon_fmac (void)
13909
{
13910
  if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13911
    return;
13912
 
13913
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13914
    return;
13915
 
13916
  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13917
}
13918
 
13919
static void
13920
do_neon_tst (void)
13921
{
13922
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13923
  struct neon_type_el et = neon_check_type (3, rs,
13924
    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13925
  neon_three_same (neon_quad (rs), 0, et.size);
13926
}
13927
 
13928
/* VMUL with 3 registers allows the P8 type. The scalar version supports the
13929
   same types as the MAC equivalents. The polynomial type for this instruction
13930
   is encoded the same as the integer type.  */
13931
 
13932
static void
13933
do_neon_mul (void)
13934
{
13935
  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13936
    return;
13937
 
13938
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13939
    return;
13940
 
13941
  if (inst.operands[2].isscalar)
13942
    do_neon_mac_maybe_scalar ();
13943
  else
13944
    neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13945
}
13946
 
13947
static void
13948
do_neon_qdmulh (void)
13949
{
13950
  if (inst.operands[2].isscalar)
13951
    {
13952
      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13953
      struct neon_type_el et = neon_check_type (3, rs,
13954
        N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13955
      NEON_ENCODE (SCALAR, inst);
13956
      neon_mul_mac (et, neon_quad (rs));
13957
    }
13958
  else
13959
    {
13960
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13961
      struct neon_type_el et = neon_check_type (3, rs,
13962
        N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13963
      NEON_ENCODE (INTEGER, inst);
13964
      /* The U bit (rounding) comes from bit mask.  */
13965
      neon_three_same (neon_quad (rs), 0, et.size);
13966
    }
13967
}
13968
 
13969
static void
13970
do_neon_fcmp_absolute (void)
13971
{
13972
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13973
  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13974
  /* Size field comes from bit mask.  */
13975
  neon_three_same (neon_quad (rs), 1, -1);
13976
}
13977
 
13978
static void
13979
do_neon_fcmp_absolute_inv (void)
13980
{
13981
  neon_exchange_operands ();
13982
  do_neon_fcmp_absolute ();
13983
}
13984
 
13985
static void
13986
do_neon_step (void)
13987
{
13988
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13989
  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13990
  neon_three_same (neon_quad (rs), 0, -1);
13991
}
13992
 
13993
static void
13994
do_neon_abs_neg (void)
13995
{
13996
  enum neon_shape rs;
13997
  struct neon_type_el et;
13998
 
13999
  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14000
    return;
14001
 
14002
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14003
    return;
14004
 
14005
  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14006
  et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14007
 
14008
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14009
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14010
  inst.instruction |= LOW4 (inst.operands[1].reg);
14011
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14012
  inst.instruction |= neon_quad (rs) << 6;
14013
  inst.instruction |= (et.type == NT_float) << 10;
14014
  inst.instruction |= neon_logbits (et.size) << 18;
14015
 
14016
  neon_dp_fixup (&inst);
14017
}
14018
 
14019
static void
14020
do_neon_sli (void)
14021
{
14022
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14023
  struct neon_type_el et = neon_check_type (2, rs,
14024
    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14025
  int imm = inst.operands[2].imm;
14026
  constraint (imm < 0 || (unsigned)imm >= et.size,
14027
              _("immediate out of range for insert"));
14028
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14029
}
14030
 
14031
static void
14032
do_neon_sri (void)
14033
{
14034
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14035
  struct neon_type_el et = neon_check_type (2, rs,
14036
    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14037
  int imm = inst.operands[2].imm;
14038
  constraint (imm < 1 || (unsigned)imm > et.size,
14039
              _("immediate out of range for insert"));
14040
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14041
}
14042
 
14043
static void
14044
do_neon_qshlu_imm (void)
14045
{
14046
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14047
  struct neon_type_el et = neon_check_type (2, rs,
14048
    N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14049
  int imm = inst.operands[2].imm;
14050
  constraint (imm < 0 || (unsigned)imm >= et.size,
14051
              _("immediate out of range for shift"));
14052
  /* Only encodes the 'U present' variant of the instruction.
14053
     In this case, signed types have OP (bit 8) set to 0.
14054
     Unsigned types have OP set to 1.  */
14055
  inst.instruction |= (et.type == NT_unsigned) << 8;
14056
  /* The rest of the bits are the same as other immediate shifts.  */
14057
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14058
}
14059
 
14060
static void
14061
do_neon_qmovn (void)
14062
{
14063
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14064
    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14065
  /* Saturating move where operands can be signed or unsigned, and the
14066
     destination has the same signedness.  */
14067
  NEON_ENCODE (INTEGER, inst);
14068
  if (et.type == NT_unsigned)
14069
    inst.instruction |= 0xc0;
14070
  else
14071
    inst.instruction |= 0x80;
14072
  neon_two_same (0, 1, et.size / 2);
14073
}
14074
 
14075
static void
14076
do_neon_qmovun (void)
14077
{
14078
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14079
    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14080
  /* Saturating move with unsigned results. Operands must be signed.  */
14081
  NEON_ENCODE (INTEGER, inst);
14082
  neon_two_same (0, 1, et.size / 2);
14083
}
14084
 
14085
static void
14086
do_neon_rshift_sat_narrow (void)
14087
{
14088
  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14089
     or unsigned. If operands are unsigned, results must also be unsigned.  */
14090
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14091
    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14092
  int imm = inst.operands[2].imm;
14093
  /* This gets the bounds check, size encoding and immediate bits calculation
14094
     right.  */
14095
  et.size /= 2;
14096
 
14097
  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14098
     VQMOVN.I<size> <Dd>, <Qm>.  */
14099
  if (imm == 0)
14100
    {
14101
      inst.operands[2].present = 0;
14102
      inst.instruction = N_MNEM_vqmovn;
14103
      do_neon_qmovn ();
14104
      return;
14105
    }
14106
 
14107
  constraint (imm < 1 || (unsigned)imm > et.size,
14108
              _("immediate out of range"));
14109
  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14110
}
14111
 
14112
static void
14113
do_neon_rshift_sat_narrow_u (void)
14114
{
14115
  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14116
     or unsigned. If operands are unsigned, results must also be unsigned.  */
14117
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14118
    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14119
  int imm = inst.operands[2].imm;
14120
  /* This gets the bounds check, size encoding and immediate bits calculation
14121
     right.  */
14122
  et.size /= 2;
14123
 
14124
  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14125
     VQMOVUN.I<size> <Dd>, <Qm>.  */
14126
  if (imm == 0)
14127
    {
14128
      inst.operands[2].present = 0;
14129
      inst.instruction = N_MNEM_vqmovun;
14130
      do_neon_qmovun ();
14131
      return;
14132
    }
14133
 
14134
  constraint (imm < 1 || (unsigned)imm > et.size,
14135
              _("immediate out of range"));
14136
  /* FIXME: The manual is kind of unclear about what value U should have in
14137
     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14138
     must be 1.  */
14139
  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14140
}
14141
 
14142
static void
14143
do_neon_movn (void)
14144
{
14145
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14146
    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14147
  NEON_ENCODE (INTEGER, inst);
14148
  neon_two_same (0, 1, et.size / 2);
14149
}
14150
 
14151
static void
14152
do_neon_rshift_narrow (void)
14153
{
14154
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14155
    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14156
  int imm = inst.operands[2].imm;
14157
  /* This gets the bounds check, size encoding and immediate bits calculation
14158
     right.  */
14159
  et.size /= 2;
14160
 
14161
  /* If immediate is zero then we are a pseudo-instruction for
14162
     VMOVN.I<size> <Dd>, <Qm>  */
14163
  if (imm == 0)
14164
    {
14165
      inst.operands[2].present = 0;
14166
      inst.instruction = N_MNEM_vmovn;
14167
      do_neon_movn ();
14168
      return;
14169
    }
14170
 
14171
  constraint (imm < 1 || (unsigned)imm > et.size,
14172
              _("immediate out of range for narrowing operation"));
14173
  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14174
}
14175
 
14176
static void
14177
do_neon_shll (void)
14178
{
14179
  /* FIXME: Type checking when lengthening.  */
14180
  struct neon_type_el et = neon_check_type (2, NS_QDI,
14181
    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14182
  unsigned imm = inst.operands[2].imm;
14183
 
14184
  if (imm == et.size)
14185
    {
14186
      /* Maximum shift variant.  */
14187
      NEON_ENCODE (INTEGER, inst);
14188
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14189
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14190
      inst.instruction |= LOW4 (inst.operands[1].reg);
14191
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14192
      inst.instruction |= neon_logbits (et.size) << 18;
14193
 
14194
      neon_dp_fixup (&inst);
14195
    }
14196
  else
14197
    {
14198
      /* A more-specific type check for non-max versions.  */
14199
      et = neon_check_type (2, NS_QDI,
14200
        N_EQK | N_DBL, N_SU_32 | N_KEY);
14201
      NEON_ENCODE (IMMED, inst);
14202
      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14203
    }
14204
}
14205
 
14206
/* Check the various types for the VCVT instruction, and return which version
14207
   the current instruction is.  */
14208
 
14209
static int
14210
neon_cvt_flavour (enum neon_shape rs)
14211
{
14212
#define CVT_VAR(C,X,Y)                                                  \
14213
  et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y));       \
14214
  if (et.type != NT_invtype)                                            \
14215
    {                                                                   \
14216
      inst.error = NULL;                                                \
14217
      return (C);                                                       \
14218
    }
14219
  struct neon_type_el et;
14220
  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14221
                        || rs == NS_FF) ? N_VFP : 0;
14222
  /* The instruction versions which take an immediate take one register
14223
     argument, which is extended to the width of the full register. Thus the
14224
     "source" and "destination" registers must have the same width.  Hack that
14225
     here by making the size equal to the key (wider, in this case) operand.  */
14226
  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14227
 
14228
  CVT_VAR (0, N_S32, N_F32);
14229
  CVT_VAR (1, N_U32, N_F32);
14230
  CVT_VAR (2, N_F32, N_S32);
14231
  CVT_VAR (3, N_F32, N_U32);
14232
  /* Half-precision conversions.  */
14233
  CVT_VAR (4, N_F32, N_F16);
14234
  CVT_VAR (5, N_F16, N_F32);
14235
 
14236
  whole_reg = N_VFP;
14237
 
14238
  /* VFP instructions.  */
14239
  CVT_VAR (6, N_F32, N_F64);
14240
  CVT_VAR (7, N_F64, N_F32);
14241
  CVT_VAR (8, N_S32, N_F64 | key);
14242
  CVT_VAR (9, N_U32, N_F64 | key);
14243
  CVT_VAR (10, N_F64 | key, N_S32);
14244
  CVT_VAR (11, N_F64 | key, N_U32);
14245
  /* VFP instructions with bitshift.  */
14246
  CVT_VAR (12, N_F32 | key, N_S16);
14247
  CVT_VAR (13, N_F32 | key, N_U16);
14248
  CVT_VAR (14, N_F64 | key, N_S16);
14249
  CVT_VAR (15, N_F64 | key, N_U16);
14250
  CVT_VAR (16, N_S16, N_F32 | key);
14251
  CVT_VAR (17, N_U16, N_F32 | key);
14252
  CVT_VAR (18, N_S16, N_F64 | key);
14253
  CVT_VAR (19, N_U16, N_F64 | key);
14254
 
14255
  return -1;
14256
#undef CVT_VAR
14257
}
14258
 
14259
/* Neon-syntax VFP conversions.  */
14260
 
14261
static void
14262
do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
14263
{
14264
  const char *opname = 0;
14265
 
14266
  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14267
    {
14268
      /* Conversions with immediate bitshift.  */
14269
      const char *enc[] =
14270
        {
14271
          "ftosls",
14272
          "ftouls",
14273
          "fsltos",
14274
          "fultos",
14275
          NULL,
14276
          NULL,
14277
          NULL,
14278
          NULL,
14279
          "ftosld",
14280
          "ftould",
14281
          "fsltod",
14282
          "fultod",
14283
          "fshtos",
14284
          "fuhtos",
14285
          "fshtod",
14286
          "fuhtod",
14287
          "ftoshs",
14288
          "ftouhs",
14289
          "ftoshd",
14290
          "ftouhd"
14291
        };
14292
 
14293
      if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14294
        {
14295
          opname = enc[flavour];
14296
          constraint (inst.operands[0].reg != inst.operands[1].reg,
14297
                      _("operands 0 and 1 must be the same register"));
14298
          inst.operands[1] = inst.operands[2];
14299
          memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14300
        }
14301
    }
14302
  else
14303
    {
14304
      /* Conversions without bitshift.  */
14305
      const char *enc[] =
14306
        {
14307
          "ftosis",
14308
          "ftouis",
14309
          "fsitos",
14310
          "fuitos",
14311
          "NULL",
14312
          "NULL",
14313
          "fcvtsd",
14314
          "fcvtds",
14315
          "ftosid",
14316
          "ftouid",
14317
          "fsitod",
14318
          "fuitod"
14319
        };
14320
 
14321
      if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14322
        opname = enc[flavour];
14323
    }
14324
 
14325
  if (opname)
14326
    do_vfp_nsyn_opcode (opname);
14327
}
14328
 
14329
static void
14330
do_vfp_nsyn_cvtz (void)
14331
{
14332
  enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14333
  int flavour = neon_cvt_flavour (rs);
14334
  const char *enc[] =
14335
    {
14336
      "ftosizs",
14337
      "ftouizs",
14338
      NULL,
14339
      NULL,
14340
      NULL,
14341
      NULL,
14342
      NULL,
14343
      NULL,
14344
      "ftosizd",
14345
      "ftouizd"
14346
    };
14347
 
14348
  if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14349
    do_vfp_nsyn_opcode (enc[flavour]);
14350
}
14351
 
14352
static void
14353
do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
14354
{
14355
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14356
    NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14357
  int flavour = neon_cvt_flavour (rs);
14358
 
14359
  /* PR11109: Handle round-to-zero for VCVT conversions.  */
14360
  if (round_to_zero
14361
      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14362
      && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
14363
      && (rs == NS_FD || rs == NS_FF))
14364
    {
14365
      do_vfp_nsyn_cvtz ();
14366
      return;
14367
    }
14368
 
14369
  /* VFP rather than Neon conversions.  */
14370
  if (flavour >= 6)
14371
    {
14372
      do_vfp_nsyn_cvt (rs, flavour);
14373
      return;
14374
    }
14375
 
14376
  switch (rs)
14377
    {
14378
    case NS_DDI:
14379
    case NS_QQI:
14380
      {
14381
        unsigned immbits;
14382
        unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14383
 
14384
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14385
          return;
14386
 
14387
        /* Fixed-point conversion with #0 immediate is encoded as an
14388
           integer conversion.  */
14389
        if (inst.operands[2].present && inst.operands[2].imm == 0)
14390
          goto int_encode;
14391
       immbits = 32 - inst.operands[2].imm;
14392
        NEON_ENCODE (IMMED, inst);
14393
        if (flavour != -1)
14394
          inst.instruction |= enctab[flavour];
14395
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14396
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14397
        inst.instruction |= LOW4 (inst.operands[1].reg);
14398
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14399
        inst.instruction |= neon_quad (rs) << 6;
14400
        inst.instruction |= 1 << 21;
14401
        inst.instruction |= immbits << 16;
14402
 
14403
        neon_dp_fixup (&inst);
14404
      }
14405
      break;
14406
 
14407
    case NS_DD:
14408
    case NS_QQ:
14409
    int_encode:
14410
      {
14411
        unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14412
 
14413
        NEON_ENCODE (INTEGER, inst);
14414
 
14415
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14416
          return;
14417
 
14418
        if (flavour != -1)
14419
          inst.instruction |= enctab[flavour];
14420
 
14421
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14422
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14423
        inst.instruction |= LOW4 (inst.operands[1].reg);
14424
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14425
        inst.instruction |= neon_quad (rs) << 6;
14426
        inst.instruction |= 2 << 18;
14427
 
14428
        neon_dp_fixup (&inst);
14429
      }
14430
    break;
14431
 
14432
    /* Half-precision conversions for Advanced SIMD -- neon.  */
14433
    case NS_QD:
14434
    case NS_DQ:
14435
 
14436
      if ((rs == NS_DQ)
14437
          && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14438
          {
14439
            as_bad (_("operand size must match register width"));
14440
            break;
14441
          }
14442
 
14443
      if ((rs == NS_QD)
14444
          && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14445
          {
14446
            as_bad (_("operand size must match register width"));
14447
            break;
14448
          }
14449
 
14450
      if (rs == NS_DQ)
14451
        inst.instruction = 0x3b60600;
14452
      else
14453
        inst.instruction = 0x3b60700;
14454
 
14455
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14456
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14457
      inst.instruction |= LOW4 (inst.operands[1].reg);
14458
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14459
      neon_dp_fixup (&inst);
14460
      break;
14461
 
14462
    default:
14463
      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
14464
      do_vfp_nsyn_cvt (rs, flavour);
14465
    }
14466
}
14467
 
14468
static void
14469
do_neon_cvtr (void)
14470
{
14471
  do_neon_cvt_1 (FALSE);
14472
}
14473
 
14474
static void
14475
do_neon_cvt (void)
14476
{
14477
  do_neon_cvt_1 (TRUE);
14478
}
14479
 
14480
static void
14481
do_neon_cvtb (void)
14482
{
14483
  inst.instruction = 0xeb20a40;
14484
 
14485
  /* The sizes are attached to the mnemonic.  */
14486
  if (inst.vectype.el[0].type != NT_invtype
14487
      && inst.vectype.el[0].size == 16)
14488
    inst.instruction |= 0x00010000;
14489
 
14490
  /* Programmer's syntax: the sizes are attached to the operands.  */
14491
  else if (inst.operands[0].vectype.type != NT_invtype
14492
           && inst.operands[0].vectype.size == 16)
14493
    inst.instruction |= 0x00010000;
14494
 
14495
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14496
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
14497
  do_vfp_cond_or_thumb ();
14498
}
14499
 
14500
 
14501
static void
14502
do_neon_cvtt (void)
14503
{
14504
  do_neon_cvtb ();
14505
  inst.instruction |= 0x80;
14506
}
14507
 
14508
static void
14509
neon_move_immediate (void)
14510
{
14511
  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14512
  struct neon_type_el et = neon_check_type (2, rs,
14513
    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14514
  unsigned immlo, immhi = 0, immbits;
14515
  int op, cmode, float_p;
14516
 
14517
  constraint (et.type == NT_invtype,
14518
              _("operand size must be specified for immediate VMOV"));
14519
 
14520
  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
14521
  op = (inst.instruction & (1 << 5)) != 0;
14522
 
14523
  immlo = inst.operands[1].imm;
14524
  if (inst.operands[1].regisimm)
14525
    immhi = inst.operands[1].reg;
14526
 
14527
  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14528
              _("immediate has bits set outside the operand size"));
14529
 
14530
  float_p = inst.operands[1].immisfloat;
14531
 
14532
  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14533
                                        et.size, et.type)) == FAIL)
14534
    {
14535
      /* Invert relevant bits only.  */
14536
      neon_invert_size (&immlo, &immhi, et.size);
14537
      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14538
         with one or the other; those cases are caught by
14539
         neon_cmode_for_move_imm.  */
14540
      op = !op;
14541
      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14542
                                            &op, et.size, et.type)) == FAIL)
14543
        {
14544
          first_error (_("immediate out of range"));
14545
          return;
14546
        }
14547
    }
14548
 
14549
  inst.instruction &= ~(1 << 5);
14550
  inst.instruction |= op << 5;
14551
 
14552
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14553
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14554
  inst.instruction |= neon_quad (rs) << 6;
14555
  inst.instruction |= cmode << 8;
14556
 
14557
  neon_write_immbits (immbits);
14558
}
14559
 
14560
static void
14561
do_neon_mvn (void)
14562
{
14563
  if (inst.operands[1].isreg)
14564
    {
14565
      enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14566
 
14567
      NEON_ENCODE (INTEGER, inst);
14568
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14569
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14570
      inst.instruction |= LOW4 (inst.operands[1].reg);
14571
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14572
      inst.instruction |= neon_quad (rs) << 6;
14573
    }
14574
  else
14575
    {
14576
      NEON_ENCODE (IMMED, inst);
14577
      neon_move_immediate ();
14578
    }
14579
 
14580
  neon_dp_fixup (&inst);
14581
}
14582
 
14583
/* Encode instructions of form:
14584
 
14585
  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
14586
  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
14587
 
14588
static void
14589
neon_mixed_length (struct neon_type_el et, unsigned size)
14590
{
14591
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14592
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14593
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14594
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14595
  inst.instruction |= LOW4 (inst.operands[2].reg);
14596
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14597
  inst.instruction |= (et.type == NT_unsigned) << 24;
14598
  inst.instruction |= neon_logbits (size) << 20;
14599
 
14600
  neon_dp_fixup (&inst);
14601
}
14602
 
14603
static void
14604
do_neon_dyadic_long (void)
14605
{
14606
  /* FIXME: Type checking for lengthening op.  */
14607
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14608
    N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14609
  neon_mixed_length (et, et.size);
14610
}
14611
 
14612
static void
14613
do_neon_abal (void)
14614
{
14615
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14616
    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14617
  neon_mixed_length (et, et.size);
14618
}
14619
 
14620
static void
14621
neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14622
{
14623
  if (inst.operands[2].isscalar)
14624
    {
14625
      struct neon_type_el et = neon_check_type (3, NS_QDS,
14626
        N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14627
      NEON_ENCODE (SCALAR, inst);
14628
      neon_mul_mac (et, et.type == NT_unsigned);
14629
    }
14630
  else
14631
    {
14632
      struct neon_type_el et = neon_check_type (3, NS_QDD,
14633
        N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14634
      NEON_ENCODE (INTEGER, inst);
14635
      neon_mixed_length (et, et.size);
14636
    }
14637
}
14638
 
14639
static void
14640
do_neon_mac_maybe_scalar_long (void)
14641
{
14642
  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14643
}
14644
 
14645
static void
14646
do_neon_dyadic_wide (void)
14647
{
14648
  struct neon_type_el et = neon_check_type (3, NS_QQD,
14649
    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14650
  neon_mixed_length (et, et.size);
14651
}
14652
 
14653
static void
14654
do_neon_dyadic_narrow (void)
14655
{
14656
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14657
    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14658
  /* Operand sign is unimportant, and the U bit is part of the opcode,
14659
     so force the operand type to integer.  */
14660
  et.type = NT_integer;
14661
  neon_mixed_length (et, et.size / 2);
14662
}
14663
 
14664
static void
14665
do_neon_mul_sat_scalar_long (void)
14666
{
14667
  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14668
}
14669
 
14670
static void
14671
do_neon_vmull (void)
14672
{
14673
  if (inst.operands[2].isscalar)
14674
    do_neon_mac_maybe_scalar_long ();
14675
  else
14676
    {
14677
      struct neon_type_el et = neon_check_type (3, NS_QDD,
14678
        N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14679
      if (et.type == NT_poly)
14680
        NEON_ENCODE (POLY, inst);
14681
      else
14682
        NEON_ENCODE (INTEGER, inst);
14683
      /* For polynomial encoding, size field must be 0b00 and the U bit must be
14684
         zero. Should be OK as-is.  */
14685
      neon_mixed_length (et, et.size);
14686
    }
14687
}
14688
 
14689
static void
14690
do_neon_ext (void)
14691
{
14692
  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14693
  struct neon_type_el et = neon_check_type (3, rs,
14694
    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14695
  unsigned imm = (inst.operands[3].imm * et.size) / 8;
14696
 
14697
  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14698
              _("shift out of range"));
14699
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14700
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14701
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14702
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14703
  inst.instruction |= LOW4 (inst.operands[2].reg);
14704
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14705
  inst.instruction |= neon_quad (rs) << 6;
14706
  inst.instruction |= imm << 8;
14707
 
14708
  neon_dp_fixup (&inst);
14709
}
14710
 
14711
static void
14712
do_neon_rev (void)
14713
{
14714
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14715
  struct neon_type_el et = neon_check_type (2, rs,
14716
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
14717
  unsigned op = (inst.instruction >> 7) & 3;
14718
  /* N (width of reversed regions) is encoded as part of the bitmask. We
14719
     extract it here to check the elements to be reversed are smaller.
14720
     Otherwise we'd get a reserved instruction.  */
14721
  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14722
  gas_assert (elsize != 0);
14723
  constraint (et.size >= elsize,
14724
              _("elements must be smaller than reversal region"));
14725
  neon_two_same (neon_quad (rs), 1, et.size);
14726
}
14727
 
14728
static void
14729
do_neon_dup (void)
14730
{
14731
  if (inst.operands[1].isscalar)
14732
    {
14733
      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14734
      struct neon_type_el et = neon_check_type (2, rs,
14735
        N_EQK, N_8 | N_16 | N_32 | N_KEY);
14736
      unsigned sizebits = et.size >> 3;
14737
      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14738
      int logsize = neon_logbits (et.size);
14739
      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14740
 
14741
      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14742
        return;
14743
 
14744
      NEON_ENCODE (SCALAR, inst);
14745
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14746
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14747
      inst.instruction |= LOW4 (dm);
14748
      inst.instruction |= HI1 (dm) << 5;
14749
      inst.instruction |= neon_quad (rs) << 6;
14750
      inst.instruction |= x << 17;
14751
      inst.instruction |= sizebits << 16;
14752
 
14753
      neon_dp_fixup (&inst);
14754
    }
14755
  else
14756
    {
14757
      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14758
      struct neon_type_el et = neon_check_type (2, rs,
14759
        N_8 | N_16 | N_32 | N_KEY, N_EQK);
14760
      /* Duplicate ARM register to lanes of vector.  */
14761
      NEON_ENCODE (ARMREG, inst);
14762
      switch (et.size)
14763
        {
14764
        case 8:  inst.instruction |= 0x400000; break;
14765
        case 16: inst.instruction |= 0x000020; break;
14766
        case 32: inst.instruction |= 0x000000; break;
14767
        default: break;
14768
        }
14769
      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14770
      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14771
      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14772
      inst.instruction |= neon_quad (rs) << 21;
14773
      /* The encoding for this instruction is identical for the ARM and Thumb
14774
         variants, except for the condition field.  */
14775
      do_vfp_cond_or_thumb ();
14776
    }
14777
}
14778
 
14779
/* VMOV has particularly many variations. It can be one of:
14780
     0. VMOV<c><q> <Qd>, <Qm>
14781
     1. VMOV<c><q> <Dd>, <Dm>
14782
   (Register operations, which are VORR with Rm = Rn.)
14783
     2. VMOV<c><q>.<dt> <Qd>, #<imm>
14784
     3. VMOV<c><q>.<dt> <Dd>, #<imm>
14785
   (Immediate loads.)
14786
     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14787
   (ARM register to scalar.)
14788
     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14789
   (Two ARM registers to vector.)
14790
     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14791
   (Scalar to ARM register.)
14792
     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14793
   (Vector to two ARM registers.)
14794
     8. VMOV.F32 <Sd>, <Sm>
14795
     9. VMOV.F64 <Dd>, <Dm>
14796
   (VFP register moves.)
14797
    10. VMOV.F32 <Sd>, #imm
14798
    11. VMOV.F64 <Dd>, #imm
14799
   (VFP float immediate load.)
14800
    12. VMOV <Rd>, <Sm>
14801
   (VFP single to ARM reg.)
14802
    13. VMOV <Sd>, <Rm>
14803
   (ARM reg to VFP single.)
14804
    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14805
   (Two ARM regs to two VFP singles.)
14806
    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14807
   (Two VFP singles to two ARM regs.)
14808
 
14809
   These cases can be disambiguated using neon_select_shape, except cases 1/9
14810
   and 3/11 which depend on the operand type too.
14811
 
14812
   All the encoded bits are hardcoded by this function.
14813
 
14814
   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14815
   Cases 5, 7 may be used with VFPv2 and above.
14816
 
14817
   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14818
   can specify a type where it doesn't make sense to, and is ignored).  */
14819
 
14820
static void
14821
do_neon_mov (void)
14822
{
14823
  enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14824
    NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14825
    NS_NULL);
14826
  struct neon_type_el et;
14827
  const char *ldconst = 0;
14828
 
14829
  switch (rs)
14830
    {
14831
    case NS_DD:  /* case 1/9.  */
14832
      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14833
      /* It is not an error here if no type is given.  */
14834
      inst.error = NULL;
14835
      if (et.type == NT_float && et.size == 64)
14836
        {
14837
          do_vfp_nsyn_opcode ("fcpyd");
14838
          break;
14839
        }
14840
      /* fall through.  */
14841
 
14842
    case NS_QQ:  /* case 0/1.  */
14843
      {
14844
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14845
          return;
14846
        /* The architecture manual I have doesn't explicitly state which
14847
           value the U bit should have for register->register moves, but
14848
           the equivalent VORR instruction has U = 0, so do that.  */
14849
        inst.instruction = 0x0200110;
14850
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14851
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14852
        inst.instruction |= LOW4 (inst.operands[1].reg);
14853
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14854
        inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14855
        inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14856
        inst.instruction |= neon_quad (rs) << 6;
14857
 
14858
        neon_dp_fixup (&inst);
14859
      }
14860
      break;
14861
 
14862
    case NS_DI:  /* case 3/11.  */
14863
      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14864
      inst.error = NULL;
14865
      if (et.type == NT_float && et.size == 64)
14866
        {
14867
          /* case 11 (fconstd).  */
14868
          ldconst = "fconstd";
14869
          goto encode_fconstd;
14870
        }
14871
      /* fall through.  */
14872
 
14873
    case NS_QI:  /* case 2/3.  */
14874
      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14875
        return;
14876
      inst.instruction = 0x0800010;
14877
      neon_move_immediate ();
14878
      neon_dp_fixup (&inst);
14879
      break;
14880
 
14881
    case NS_SR:  /* case 4.  */
14882
      {
14883
        unsigned bcdebits = 0;
14884
        int logsize;
14885
        unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14886
        unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14887
 
14888
        et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14889
        logsize = neon_logbits (et.size);
14890
 
14891
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14892
                    _(BAD_FPU));
14893
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14894
                    && et.size != 32, _(BAD_FPU));
14895
        constraint (et.type == NT_invtype, _("bad type for scalar"));
14896
        constraint (x >= 64 / et.size, _("scalar index out of range"));
14897
 
14898
        switch (et.size)
14899
          {
14900
          case 8:  bcdebits = 0x8; break;
14901
          case 16: bcdebits = 0x1; break;
14902
          case 32: bcdebits = 0x0; break;
14903
          default: ;
14904
          }
14905
 
14906
        bcdebits |= x << logsize;
14907
 
14908
        inst.instruction = 0xe000b10;
14909
        do_vfp_cond_or_thumb ();
14910
        inst.instruction |= LOW4 (dn) << 16;
14911
        inst.instruction |= HI1 (dn) << 7;
14912
        inst.instruction |= inst.operands[1].reg << 12;
14913
        inst.instruction |= (bcdebits & 3) << 5;
14914
        inst.instruction |= (bcdebits >> 2) << 21;
14915
      }
14916
      break;
14917
 
14918
    case NS_DRR:  /* case 5 (fmdrr).  */
14919
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14920
                  _(BAD_FPU));
14921
 
14922
      inst.instruction = 0xc400b10;
14923
      do_vfp_cond_or_thumb ();
14924
      inst.instruction |= LOW4 (inst.operands[0].reg);
14925
      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14926
      inst.instruction |= inst.operands[1].reg << 12;
14927
      inst.instruction |= inst.operands[2].reg << 16;
14928
      break;
14929
 
14930
    case NS_RS:  /* case 6.  */
14931
      {
14932
        unsigned logsize;
14933
        unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14934
        unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14935
        unsigned abcdebits = 0;
14936
 
14937
        et = neon_check_type (2, NS_NULL,
14938
                              N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14939
        logsize = neon_logbits (et.size);
14940
 
14941
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14942
                    _(BAD_FPU));
14943
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14944
                    && et.size != 32, _(BAD_FPU));
14945
        constraint (et.type == NT_invtype, _("bad type for scalar"));
14946
        constraint (x >= 64 / et.size, _("scalar index out of range"));
14947
 
14948
        switch (et.size)
14949
          {
14950
          case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14951
          case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14952
          case 32: abcdebits = 0x00; break;
14953
          default: ;
14954
          }
14955
 
14956
        abcdebits |= x << logsize;
14957
        inst.instruction = 0xe100b10;
14958
        do_vfp_cond_or_thumb ();
14959
        inst.instruction |= LOW4 (dn) << 16;
14960
        inst.instruction |= HI1 (dn) << 7;
14961
        inst.instruction |= inst.operands[0].reg << 12;
14962
        inst.instruction |= (abcdebits & 3) << 5;
14963
        inst.instruction |= (abcdebits >> 2) << 21;
14964
      }
14965
      break;
14966
 
14967
    case NS_RRD:  /* case 7 (fmrrd).  */
14968
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14969
                  _(BAD_FPU));
14970
 
14971
      inst.instruction = 0xc500b10;
14972
      do_vfp_cond_or_thumb ();
14973
      inst.instruction |= inst.operands[0].reg << 12;
14974
      inst.instruction |= inst.operands[1].reg << 16;
14975
      inst.instruction |= LOW4 (inst.operands[2].reg);
14976
      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14977
      break;
14978
 
14979
    case NS_FF:  /* case 8 (fcpys).  */
14980
      do_vfp_nsyn_opcode ("fcpys");
14981
      break;
14982
 
14983
    case NS_FI:  /* case 10 (fconsts).  */
14984
      ldconst = "fconsts";
14985
      encode_fconstd:
14986
      if (is_quarter_float (inst.operands[1].imm))
14987
        {
14988
          inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
14989
          do_vfp_nsyn_opcode (ldconst);
14990
        }
14991
      else
14992
        first_error (_("immediate out of range"));
14993
      break;
14994
 
14995
    case NS_RF:  /* case 12 (fmrs).  */
14996
      do_vfp_nsyn_opcode ("fmrs");
14997
      break;
14998
 
14999
    case NS_FR:  /* case 13 (fmsr).  */
15000
      do_vfp_nsyn_opcode ("fmsr");
15001
      break;
15002
 
15003
    /* The encoders for the fmrrs and fmsrr instructions expect three operands
15004
       (one of which is a list), but we have parsed four.  Do some fiddling to
15005
       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15006
       expect.  */
15007
    case NS_RRFF:  /* case 14 (fmrrs).  */
15008
      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15009
                  _("VFP registers must be adjacent"));
15010
      inst.operands[2].imm = 2;
15011
      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15012
      do_vfp_nsyn_opcode ("fmrrs");
15013
      break;
15014
 
15015
    case NS_FFRR:  /* case 15 (fmsrr).  */
15016
      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15017
                  _("VFP registers must be adjacent"));
15018
      inst.operands[1] = inst.operands[2];
15019
      inst.operands[2] = inst.operands[3];
15020
      inst.operands[0].imm = 2;
15021
      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15022
      do_vfp_nsyn_opcode ("fmsrr");
15023
      break;
15024
 
15025
    default:
15026
      abort ();
15027
    }
15028
}
15029
 
15030
static void
15031
do_neon_rshift_round_imm (void)
15032
{
15033
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15034
  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15035
  int imm = inst.operands[2].imm;
15036
 
15037
  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
15038
  if (imm == 0)
15039
    {
15040
      inst.operands[2].present = 0;
15041
      do_neon_mov ();
15042
      return;
15043
    }
15044
 
15045
  constraint (imm < 1 || (unsigned)imm > et.size,
15046
              _("immediate out of range for shift"));
15047
  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15048
                  et.size - imm);
15049
}
15050
 
15051
static void
15052
do_neon_movl (void)
15053
{
15054
  struct neon_type_el et = neon_check_type (2, NS_QD,
15055
    N_EQK | N_DBL, N_SU_32 | N_KEY);
15056
  unsigned sizebits = et.size >> 3;
15057
  inst.instruction |= sizebits << 19;
15058
  neon_two_same (0, et.type == NT_unsigned, -1);
15059
}
15060
 
15061
static void
15062
do_neon_trn (void)
15063
{
15064
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15065
  struct neon_type_el et = neon_check_type (2, rs,
15066
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15067
  NEON_ENCODE (INTEGER, inst);
15068
  neon_two_same (neon_quad (rs), 1, et.size);
15069
}
15070
 
15071
static void
15072
do_neon_zip_uzp (void)
15073
{
15074
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15075
  struct neon_type_el et = neon_check_type (2, rs,
15076
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15077
  if (rs == NS_DD && et.size == 32)
15078
    {
15079
      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
15080
      inst.instruction = N_MNEM_vtrn;
15081
      do_neon_trn ();
15082
      return;
15083
    }
15084
  neon_two_same (neon_quad (rs), 1, et.size);
15085
}
15086
 
15087
static void
15088
do_neon_sat_abs_neg (void)
15089
{
15090
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15091
  struct neon_type_el et = neon_check_type (2, rs,
15092
    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15093
  neon_two_same (neon_quad (rs), 1, et.size);
15094
}
15095
 
15096
static void
15097
do_neon_pair_long (void)
15098
{
15099
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15100
  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15101
  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
15102
  inst.instruction |= (et.type == NT_unsigned) << 7;
15103
  neon_two_same (neon_quad (rs), 1, et.size);
15104
}
15105
 
15106
static void
15107
do_neon_recip_est (void)
15108
{
15109
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15110
  struct neon_type_el et = neon_check_type (2, rs,
15111
    N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15112
  inst.instruction |= (et.type == NT_float) << 8;
15113
  neon_two_same (neon_quad (rs), 1, et.size);
15114
}
15115
 
15116
static void
15117
do_neon_cls (void)
15118
{
15119
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15120
  struct neon_type_el et = neon_check_type (2, rs,
15121
    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15122
  neon_two_same (neon_quad (rs), 1, et.size);
15123
}
15124
 
15125
static void
15126
do_neon_clz (void)
15127
{
15128
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15129
  struct neon_type_el et = neon_check_type (2, rs,
15130
    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15131
  neon_two_same (neon_quad (rs), 1, et.size);
15132
}
15133
 
15134
static void
15135
do_neon_cnt (void)
15136
{
15137
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15138
  struct neon_type_el et = neon_check_type (2, rs,
15139
    N_EQK | N_INT, N_8 | N_KEY);
15140
  neon_two_same (neon_quad (rs), 1, et.size);
15141
}
15142
 
15143
static void
15144
do_neon_swp (void)
15145
{
15146
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15147
  neon_two_same (neon_quad (rs), 1, -1);
15148
}
15149
 
15150
static void
15151
do_neon_tbl_tbx (void)
15152
{
15153
  unsigned listlenbits;
15154
  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15155
 
15156
  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15157
    {
15158
      first_error (_("bad list length for table lookup"));
15159
      return;
15160
    }
15161
 
15162
  listlenbits = inst.operands[1].imm - 1;
15163
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15164
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15165
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15166
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15167
  inst.instruction |= LOW4 (inst.operands[2].reg);
15168
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15169
  inst.instruction |= listlenbits << 8;
15170
 
15171
  neon_dp_fixup (&inst);
15172
}
15173
 
15174
static void
15175
do_neon_ldm_stm (void)
15176
{
15177
  /* P, U and L bits are part of bitmask.  */
15178
  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15179
  unsigned offsetbits = inst.operands[1].imm * 2;
15180
 
15181
  if (inst.operands[1].issingle)
15182
    {
15183
      do_vfp_nsyn_ldm_stm (is_dbmode);
15184
      return;
15185
    }
15186
 
15187
  constraint (is_dbmode && !inst.operands[0].writeback,
15188
              _("writeback (!) must be used for VLDMDB and VSTMDB"));
15189
 
15190
  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15191
              _("register list must contain at least 1 and at most 16 "
15192
                "registers"));
15193
 
15194
  inst.instruction |= inst.operands[0].reg << 16;
15195
  inst.instruction |= inst.operands[0].writeback << 21;
15196
  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15197
  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15198
 
15199
  inst.instruction |= offsetbits;
15200
 
15201
  do_vfp_cond_or_thumb ();
15202
}
15203
 
15204
static void
15205
do_neon_ldr_str (void)
15206
{
15207
  int is_ldr = (inst.instruction & (1 << 20)) != 0;
15208
 
15209
  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15210
     And is UNPREDICTABLE in thumb mode.  */
15211
  if (!is_ldr
15212
      && inst.operands[1].reg == REG_PC
15213
      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15214
    {
15215
      if (!thumb_mode && warn_on_deprecated)
15216
        as_warn (_("Use of PC here is deprecated"));
15217
      else
15218
        inst.error = _("Use of PC here is UNPREDICTABLE");
15219
    }
15220
 
15221
  if (inst.operands[0].issingle)
15222
    {
15223
      if (is_ldr)
15224
        do_vfp_nsyn_opcode ("flds");
15225
      else
15226
        do_vfp_nsyn_opcode ("fsts");
15227
    }
15228
  else
15229
    {
15230
      if (is_ldr)
15231
        do_vfp_nsyn_opcode ("fldd");
15232
      else
15233
        do_vfp_nsyn_opcode ("fstd");
15234
    }
15235
}
15236
 
15237
/* "interleave" version also handles non-interleaving register VLD1/VST1
15238
   instructions.  */
15239
 
15240
static void
15241
do_neon_ld_st_interleave (void)
15242
{
15243
  struct neon_type_el et = neon_check_type (1, NS_NULL,
15244
                                            N_8 | N_16 | N_32 | N_64);
15245
  unsigned alignbits = 0;
15246
  unsigned idx;
15247
  /* The bits in this table go:
15248
     0: register stride of one (0) or two (1)
15249
     1,2: register list length, minus one (1, 2, 3, 4).
15250
     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15251
     We use -1 for invalid entries.  */
15252
  const int typetable[] =
15253
    {
15254
      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
15255
       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
15256
       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
15257
       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
15258
    };
15259
  int typebits;
15260
 
15261
  if (et.type == NT_invtype)
15262
    return;
15263
 
15264
  if (inst.operands[1].immisalign)
15265
    switch (inst.operands[1].imm >> 8)
15266
      {
15267
      case 64: alignbits = 1; break;
15268
      case 128:
15269
        if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15270
            && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15271
          goto bad_alignment;
15272
        alignbits = 2;
15273
        break;
15274
      case 256:
15275
        if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15276
          goto bad_alignment;
15277
        alignbits = 3;
15278
        break;
15279
      default:
15280
      bad_alignment:
15281
        first_error (_("bad alignment"));
15282
        return;
15283
      }
15284
 
15285
  inst.instruction |= alignbits << 4;
15286
  inst.instruction |= neon_logbits (et.size) << 6;
15287
 
15288
  /* Bits [4:6] of the immediate in a list specifier encode register stride
15289
     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15290
     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15291
     up the right value for "type" in a table based on this value and the given
15292
     list style, then stick it back.  */
15293
  idx = ((inst.operands[0].imm >> 4) & 7)
15294
        | (((inst.instruction >> 8) & 3) << 3);
15295
 
15296
  typebits = typetable[idx];
15297
 
15298
  constraint (typebits == -1, _("bad list type for instruction"));
15299
 
15300
  inst.instruction &= ~0xf00;
15301
  inst.instruction |= typebits << 8;
15302
}
15303
 
15304
/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15305
   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15306
   otherwise. The variable arguments are a list of pairs of legal (size, align)
15307
   values, terminated with -1.  */
15308
 
15309
static int
15310
neon_alignment_bit (int size, int align, int *do_align, ...)
15311
{
15312
  va_list ap;
15313
  int result = FAIL, thissize, thisalign;
15314
 
15315
  if (!inst.operands[1].immisalign)
15316
    {
15317
      *do_align = 0;
15318
      return SUCCESS;
15319
    }
15320
 
15321
  va_start (ap, do_align);
15322
 
15323
  do
15324
    {
15325
      thissize = va_arg (ap, int);
15326
      if (thissize == -1)
15327
        break;
15328
      thisalign = va_arg (ap, int);
15329
 
15330
      if (size == thissize && align == thisalign)
15331
        result = SUCCESS;
15332
    }
15333
  while (result != SUCCESS);
15334
 
15335
  va_end (ap);
15336
 
15337
  if (result == SUCCESS)
15338
    *do_align = 1;
15339
  else
15340
    first_error (_("unsupported alignment for instruction"));
15341
 
15342
  return result;
15343
}
15344
 
15345
static void
15346
do_neon_ld_st_lane (void)
15347
{
15348
  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15349
  int align_good, do_align = 0;
15350
  int logsize = neon_logbits (et.size);
15351
  int align = inst.operands[1].imm >> 8;
15352
  int n = (inst.instruction >> 8) & 3;
15353
  int max_el = 64 / et.size;
15354
 
15355
  if (et.type == NT_invtype)
15356
    return;
15357
 
15358
  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15359
              _("bad list length"));
15360
  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15361
              _("scalar index out of range"));
15362
  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15363
              && et.size == 8,
15364
              _("stride of 2 unavailable when element size is 8"));
15365
 
15366
  switch (n)
15367
    {
15368
    case 0:  /* VLD1 / VST1.  */
15369
      align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15370
                                       32, 32, -1);
15371
      if (align_good == FAIL)
15372
        return;
15373
      if (do_align)
15374
        {
15375
          unsigned alignbits = 0;
15376
          switch (et.size)
15377
            {
15378
            case 16: alignbits = 0x1; break;
15379
            case 32: alignbits = 0x3; break;
15380
            default: ;
15381
            }
15382
          inst.instruction |= alignbits << 4;
15383
        }
15384
      break;
15385
 
15386
    case 1:  /* VLD2 / VST2.  */
15387
      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15388
                                       32, 64, -1);
15389
      if (align_good == FAIL)
15390
        return;
15391
      if (do_align)
15392
        inst.instruction |= 1 << 4;
15393
      break;
15394
 
15395
    case 2:  /* VLD3 / VST3.  */
15396
      constraint (inst.operands[1].immisalign,
15397
                  _("can't use alignment with this instruction"));
15398
      break;
15399
 
15400
    case 3:  /* VLD4 / VST4.  */
15401
      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15402
                                       16, 64, 32, 64, 32, 128, -1);
15403
      if (align_good == FAIL)
15404
        return;
15405
      if (do_align)
15406
        {
15407
          unsigned alignbits = 0;
15408
          switch (et.size)
15409
            {
15410
            case 8:  alignbits = 0x1; break;
15411
            case 16: alignbits = 0x1; break;
15412
            case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15413
            default: ;
15414
            }
15415
          inst.instruction |= alignbits << 4;
15416
        }
15417
      break;
15418
 
15419
    default: ;
15420
    }
15421
 
15422
  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
15423
  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15424
    inst.instruction |= 1 << (4 + logsize);
15425
 
15426
  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15427
  inst.instruction |= logsize << 10;
15428
}
15429
 
15430
/* Encode single n-element structure to all lanes VLD<n> instructions.  */
15431
 
15432
static void
15433
do_neon_ld_dup (void)
15434
{
15435
  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15436
  int align_good, do_align = 0;
15437
 
15438
  if (et.type == NT_invtype)
15439
    return;
15440
 
15441
  switch ((inst.instruction >> 8) & 3)
15442
    {
15443
    case 0:  /* VLD1.  */
15444
      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15445
      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15446
                                       &do_align, 16, 16, 32, 32, -1);
15447
      if (align_good == FAIL)
15448
        return;
15449
      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15450
        {
15451
        case 1: break;
15452
        case 2: inst.instruction |= 1 << 5; break;
15453
        default: first_error (_("bad list length")); return;
15454
        }
15455
      inst.instruction |= neon_logbits (et.size) << 6;
15456
      break;
15457
 
15458
    case 1:  /* VLD2.  */
15459
      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15460
                                       &do_align, 8, 16, 16, 32, 32, 64, -1);
15461
      if (align_good == FAIL)
15462
        return;
15463
      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15464
                  _("bad list length"));
15465
      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15466
        inst.instruction |= 1 << 5;
15467
      inst.instruction |= neon_logbits (et.size) << 6;
15468
      break;
15469
 
15470
    case 2:  /* VLD3.  */
15471
      constraint (inst.operands[1].immisalign,
15472
                  _("can't use alignment with this instruction"));
15473
      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15474
                  _("bad list length"));
15475
      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15476
        inst.instruction |= 1 << 5;
15477
      inst.instruction |= neon_logbits (et.size) << 6;
15478
      break;
15479
 
15480
    case 3:  /* VLD4.  */
15481
      {
15482
        int align = inst.operands[1].imm >> 8;
15483
        align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15484
                                         16, 64, 32, 64, 32, 128, -1);
15485
        if (align_good == FAIL)
15486
          return;
15487
        constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15488
                    _("bad list length"));
15489
        if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15490
          inst.instruction |= 1 << 5;
15491
        if (et.size == 32 && align == 128)
15492
          inst.instruction |= 0x3 << 6;
15493
        else
15494
          inst.instruction |= neon_logbits (et.size) << 6;
15495
      }
15496
      break;
15497
 
15498
    default: ;
15499
    }
15500
 
15501
  inst.instruction |= do_align << 4;
15502
}
15503
 
15504
/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15505
   apart from bits [11:4].  */
15506
 
15507
static void
15508
do_neon_ldx_stx (void)
15509
{
15510
  if (inst.operands[1].isreg)
15511
    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15512
 
15513
  switch (NEON_LANE (inst.operands[0].imm))
15514
    {
15515
    case NEON_INTERLEAVE_LANES:
15516
      NEON_ENCODE (INTERLV, inst);
15517
      do_neon_ld_st_interleave ();
15518
      break;
15519
 
15520
    case NEON_ALL_LANES:
15521
      NEON_ENCODE (DUP, inst);
15522
      do_neon_ld_dup ();
15523
      break;
15524
 
15525
    default:
15526
      NEON_ENCODE (LANE, inst);
15527
      do_neon_ld_st_lane ();
15528
    }
15529
 
15530
  /* L bit comes from bit mask.  */
15531
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15532
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15533
  inst.instruction |= inst.operands[1].reg << 16;
15534
 
15535
  if (inst.operands[1].postind)
15536
    {
15537
      int postreg = inst.operands[1].imm & 0xf;
15538
      constraint (!inst.operands[1].immisreg,
15539
                  _("post-index must be a register"));
15540
      constraint (postreg == 0xd || postreg == 0xf,
15541
                  _("bad register for post-index"));
15542
      inst.instruction |= postreg;
15543
    }
15544
  else if (inst.operands[1].writeback)
15545
    {
15546
      inst.instruction |= 0xd;
15547
    }
15548
  else
15549
    inst.instruction |= 0xf;
15550
 
15551
  if (thumb_mode)
15552
    inst.instruction |= 0xf9000000;
15553
  else
15554
    inst.instruction |= 0xf4000000;
15555
}
15556
 
15557
/* Overall per-instruction processing.  */
15558
 
15559
/* We need to be able to fix up arbitrary expressions in some statements.
15560
   This is so that we can handle symbols that are an arbitrary distance from
15561
   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15562
   which returns part of an address in a form which will be valid for
15563
   a data instruction.  We do this by pushing the expression into a symbol
15564
   in the expr_section, and creating a fix for that.  */
15565
 
15566
static void
15567
fix_new_arm (fragS *       frag,
15568
             int           where,
15569
             short int     size,
15570
             expressionS * exp,
15571
             int           pc_rel,
15572
             int           reloc)
15573
{
15574
  fixS *           new_fix;
15575
 
15576
  switch (exp->X_op)
15577
    {
15578
    case O_constant:
15579
      if (pc_rel)
15580
        {
15581
          /* Create an absolute valued symbol, so we have something to
15582
             refer to in the object file.  Unfortunately for us, gas's
15583
             generic expression parsing will already have folded out
15584
             any use of .set foo/.type foo %function that may have
15585
             been used to set type information of the target location,
15586
             that's being specified symbolically.  We have to presume
15587
             the user knows what they are doing.  */
15588
          char name[16 + 8];
15589
          symbolS *symbol;
15590
 
15591
          sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
15592
 
15593
          symbol = symbol_find_or_make (name);
15594
          S_SET_SEGMENT (symbol, absolute_section);
15595
          symbol_set_frag (symbol, &zero_address_frag);
15596
          S_SET_VALUE (symbol, exp->X_add_number);
15597
          exp->X_op = O_symbol;
15598
          exp->X_add_symbol = symbol;
15599
          exp->X_add_number = 0;
15600
        }
15601
      /* FALLTHROUGH */
15602
    case O_symbol:
15603
    case O_add:
15604
    case O_subtract:
15605
      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
15606
                             (enum bfd_reloc_code_real) reloc);
15607
      break;
15608
 
15609
    default:
15610
      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
15611
                                  pc_rel, (enum bfd_reloc_code_real) reloc);
15612
      break;
15613
    }
15614
 
15615
  /* Mark whether the fix is to a THUMB instruction, or an ARM
15616
     instruction.  */
15617
  new_fix->tc_fix_data = thumb_mode;
15618
}
15619
 
15620
/* Create a frg for an instruction requiring relaxation.  */
15621
static void
15622
output_relax_insn (void)
15623
{
15624
  char * to;
15625
  symbolS *sym;
15626
  int offset;
15627
 
15628
  /* The size of the instruction is unknown, so tie the debug info to the
15629
     start of the instruction.  */
15630
  dwarf2_emit_insn (0);
15631
 
15632
  switch (inst.reloc.exp.X_op)
15633
    {
15634
    case O_symbol:
15635
      sym = inst.reloc.exp.X_add_symbol;
15636
      offset = inst.reloc.exp.X_add_number;
15637
      break;
15638
    case O_constant:
15639
      sym = NULL;
15640
      offset = inst.reloc.exp.X_add_number;
15641
      break;
15642
    default:
15643
      sym = make_expr_symbol (&inst.reloc.exp);
15644
      offset = 0;
15645
      break;
15646
  }
15647
  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15648
                 inst.relax, sym, offset, NULL/*offset, opcode*/);
15649
  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15650
}
15651
 
15652
/* Write a 32-bit thumb instruction to buf.  */
15653
static void
15654
put_thumb32_insn (char * buf, unsigned long insn)
15655
{
15656
  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15657
  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15658
}
15659
 
15660
static void
15661
output_inst (const char * str)
15662
{
15663
  char * to = NULL;
15664
 
15665
  if (inst.error)
15666
    {
15667
      as_bad ("%s -- `%s'", inst.error, str);
15668
      return;
15669
    }
15670
  if (inst.relax)
15671
    {
15672
      output_relax_insn ();
15673
      return;
15674
    }
15675
  if (inst.size == 0)
15676
    return;
15677
 
15678
  to = frag_more (inst.size);
15679
  /* PR 9814: Record the thumb mode into the current frag so that we know
15680
     what type of NOP padding to use, if necessary.  We override any previous
15681
     setting so that if the mode has changed then the NOPS that we use will
15682
     match the encoding of the last instruction in the frag.  */
15683
  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15684
 
15685
  if (thumb_mode && (inst.size > THUMB_SIZE))
15686
    {
15687
      gas_assert (inst.size == (2 * THUMB_SIZE));
15688
      put_thumb32_insn (to, inst.instruction);
15689
    }
15690
  else if (inst.size > INSN_SIZE)
15691
    {
15692
      gas_assert (inst.size == (2 * INSN_SIZE));
15693
      md_number_to_chars (to, inst.instruction, INSN_SIZE);
15694
      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15695
    }
15696
  else
15697
    md_number_to_chars (to, inst.instruction, inst.size);
15698
 
15699
  if (inst.reloc.type != BFD_RELOC_UNUSED)
15700
    fix_new_arm (frag_now, to - frag_now->fr_literal,
15701
                 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15702
                 inst.reloc.type);
15703
 
15704
  dwarf2_emit_insn (inst.size);
15705
}
15706
 
15707
static char *
15708
output_it_inst (int cond, int mask, char * to)
15709
{
15710
  unsigned long instruction = 0xbf00;
15711
 
15712
  mask &= 0xf;
15713
  instruction |= mask;
15714
  instruction |= cond << 4;
15715
 
15716
  if (to == NULL)
15717
    {
15718
      to = frag_more (2);
15719
#ifdef OBJ_ELF
15720
      dwarf2_emit_insn (2);
15721
#endif
15722
    }
15723
 
15724
  md_number_to_chars (to, instruction, 2);
15725
 
15726
  return to;
15727
}
15728
 
15729
/* Tag values used in struct asm_opcode's tag field.  */
15730
enum opcode_tag
15731
{
15732
  OT_unconditional,     /* Instruction cannot be conditionalized.
15733
                           The ARM condition field is still 0xE.  */
15734
  OT_unconditionalF,    /* Instruction cannot be conditionalized
15735
                           and carries 0xF in its ARM condition field.  */
15736
  OT_csuffix,           /* Instruction takes a conditional suffix.  */
15737
  OT_csuffixF,          /* Some forms of the instruction take a conditional
15738
                           suffix, others place 0xF where the condition field
15739
                           would be.  */
15740
  OT_cinfix3,           /* Instruction takes a conditional infix,
15741
                           beginning at character index 3.  (In
15742
                           unified mode, it becomes a suffix.)  */
15743
  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
15744
                            tsts, cmps, cmns, and teqs. */
15745
  OT_cinfix3_legacy,    /* Legacy instruction takes a conditional infix at
15746
                           character index 3, even in unified mode.  Used for
15747
                           legacy instructions where suffix and infix forms
15748
                           may be ambiguous.  */
15749
  OT_csuf_or_in3,       /* Instruction takes either a conditional
15750
                           suffix or an infix at character index 3.  */
15751
  OT_odd_infix_unc,     /* This is the unconditional variant of an
15752
                           instruction that takes a conditional infix
15753
                           at an unusual position.  In unified mode,
15754
                           this variant will accept a suffix.  */
15755
  OT_odd_infix_0        /* Values greater than or equal to OT_odd_infix_0
15756
                           are the conditional variants of instructions that
15757
                           take conditional infixes in unusual positions.
15758
                           The infix appears at character index
15759
                           (tag - OT_odd_infix_0).  These are not accepted
15760
                           in unified mode.  */
15761
};
15762
 
15763
/* Subroutine of md_assemble, responsible for looking up the primary
15764
   opcode from the mnemonic the user wrote.  STR points to the
15765
   beginning of the mnemonic.
15766
 
15767
   This is not simply a hash table lookup, because of conditional
15768
   variants.  Most instructions have conditional variants, which are
15769
   expressed with a _conditional affix_ to the mnemonic.  If we were
15770
   to encode each conditional variant as a literal string in the opcode
15771
   table, it would have approximately 20,000 entries.
15772
 
15773
   Most mnemonics take this affix as a suffix, and in unified syntax,
15774
   'most' is upgraded to 'all'.  However, in the divided syntax, some
15775
   instructions take the affix as an infix, notably the s-variants of
15776
   the arithmetic instructions.  Of those instructions, all but six
15777
   have the infix appear after the third character of the mnemonic.
15778
 
15779
   Accordingly, the algorithm for looking up primary opcodes given
15780
   an identifier is:
15781
 
15782
   1. Look up the identifier in the opcode table.
15783
      If we find a match, go to step U.
15784
 
15785
   2. Look up the last two characters of the identifier in the
15786
      conditions table.  If we find a match, look up the first N-2
15787
      characters of the identifier in the opcode table.  If we
15788
      find a match, go to step CE.
15789
 
15790
   3. Look up the fourth and fifth characters of the identifier in
15791
      the conditions table.  If we find a match, extract those
15792
      characters from the identifier, and look up the remaining
15793
      characters in the opcode table.  If we find a match, go
15794
      to step CM.
15795
 
15796
   4. Fail.
15797
 
15798
   U. Examine the tag field of the opcode structure, in case this is
15799
      one of the six instructions with its conditional infix in an
15800
      unusual place.  If it is, the tag tells us where to find the
15801
      infix; look it up in the conditions table and set inst.cond
15802
      accordingly.  Otherwise, this is an unconditional instruction.
15803
      Again set inst.cond accordingly.  Return the opcode structure.
15804
 
15805
  CE. Examine the tag field to make sure this is an instruction that
15806
      should receive a conditional suffix.  If it is not, fail.
15807
      Otherwise, set inst.cond from the suffix we already looked up,
15808
      and return the opcode structure.
15809
 
15810
  CM. Examine the tag field to make sure this is an instruction that
15811
      should receive a conditional infix after the third character.
15812
      If it is not, fail.  Otherwise, undo the edits to the current
15813
      line of input and proceed as for case CE.  */
15814
 
15815
static const struct asm_opcode *
15816
opcode_lookup (char **str)
15817
{
15818
  char *end, *base;
15819
  char *affix;
15820
  const struct asm_opcode *opcode;
15821
  const struct asm_cond *cond;
15822
  char save[2];
15823
 
15824
  /* Scan up to the end of the mnemonic, which must end in white space,
15825
     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
15826
  for (base = end = *str; *end != '\0'; end++)
15827
    if (*end == ' ' || *end == '.')
15828
      break;
15829
 
15830
  if (end == base)
15831
    return NULL;
15832
 
15833
  /* Handle a possible width suffix and/or Neon type suffix.  */
15834
  if (end[0] == '.')
15835
    {
15836
      int offset = 2;
15837
 
15838
      /* The .w and .n suffixes are only valid if the unified syntax is in
15839
         use.  */
15840
      if (unified_syntax && end[1] == 'w')
15841
        inst.size_req = 4;
15842
      else if (unified_syntax && end[1] == 'n')
15843
        inst.size_req = 2;
15844
      else
15845
        offset = 0;
15846
 
15847
      inst.vectype.elems = 0;
15848
 
15849
      *str = end + offset;
15850
 
15851
      if (end[offset] == '.')
15852
        {
15853
          /* See if we have a Neon type suffix (possible in either unified or
15854
             non-unified ARM syntax mode).  */
15855
          if (parse_neon_type (&inst.vectype, str) == FAIL)
15856
            return NULL;
15857
        }
15858
      else if (end[offset] != '\0' && end[offset] != ' ')
15859
        return NULL;
15860
    }
15861
  else
15862
    *str = end;
15863
 
15864
  /* Look for unaffixed or special-case affixed mnemonic.  */
15865
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15866
                                                    end - base);
15867
  if (opcode)
15868
    {
15869
      /* step U */
15870
      if (opcode->tag < OT_odd_infix_0)
15871
        {
15872
          inst.cond = COND_ALWAYS;
15873
          return opcode;
15874
        }
15875
 
15876
      if (warn_on_deprecated && unified_syntax)
15877
        as_warn (_("conditional infixes are deprecated in unified syntax"));
15878
      affix = base + (opcode->tag - OT_odd_infix_0);
15879
      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15880
      gas_assert (cond);
15881
 
15882
      inst.cond = cond->value;
15883
      return opcode;
15884
    }
15885
 
15886
  /* Cannot have a conditional suffix on a mnemonic of less than two
15887
     characters.  */
15888
  if (end - base < 3)
15889
    return NULL;
15890
 
15891
  /* Look for suffixed mnemonic.  */
15892
  affix = end - 2;
15893
  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15894
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15895
                                                    affix - base);
15896
  if (opcode && cond)
15897
    {
15898
      /* step CE */
15899
      switch (opcode->tag)
15900
        {
15901
        case OT_cinfix3_legacy:
15902
          /* Ignore conditional suffixes matched on infix only mnemonics.  */
15903
          break;
15904
 
15905
        case OT_cinfix3:
15906
        case OT_cinfix3_deprecated:
15907
        case OT_odd_infix_unc:
15908
          if (!unified_syntax)
15909
            return 0;
15910
          /* else fall through */
15911
 
15912
        case OT_csuffix:
15913
        case OT_csuffixF:
15914
        case OT_csuf_or_in3:
15915
          inst.cond = cond->value;
15916
          return opcode;
15917
 
15918
        case OT_unconditional:
15919
        case OT_unconditionalF:
15920
          if (thumb_mode)
15921
            inst.cond = cond->value;
15922
          else
15923
            {
15924
              /* Delayed diagnostic.  */
15925
              inst.error = BAD_COND;
15926
              inst.cond = COND_ALWAYS;
15927
            }
15928
          return opcode;
15929
 
15930
        default:
15931
          return NULL;
15932
        }
15933
    }
15934
 
15935
  /* Cannot have a usual-position infix on a mnemonic of less than
15936
     six characters (five would be a suffix).  */
15937
  if (end - base < 6)
15938
    return NULL;
15939
 
15940
  /* Look for infixed mnemonic in the usual position.  */
15941
  affix = base + 3;
15942
  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15943
  if (!cond)
15944
    return NULL;
15945
 
15946
  memcpy (save, affix, 2);
15947
  memmove (affix, affix + 2, (end - affix) - 2);
15948
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15949
                                                    (end - base) - 2);
15950
  memmove (affix + 2, affix, (end - affix) - 2);
15951
  memcpy (affix, save, 2);
15952
 
15953
  if (opcode
15954
      && (opcode->tag == OT_cinfix3
15955
          || opcode->tag == OT_cinfix3_deprecated
15956
          || opcode->tag == OT_csuf_or_in3
15957
          || opcode->tag == OT_cinfix3_legacy))
15958
    {
15959
      /* Step CM.  */
15960
      if (warn_on_deprecated && unified_syntax
15961
          && (opcode->tag == OT_cinfix3
15962
              || opcode->tag == OT_cinfix3_deprecated))
15963
        as_warn (_("conditional infixes are deprecated in unified syntax"));
15964
 
15965
      inst.cond = cond->value;
15966
      return opcode;
15967
    }
15968
 
15969
  return NULL;
15970
}
15971
 
15972
/* This function generates an initial IT instruction, leaving its block
15973
   virtually open for the new instructions. Eventually,
15974
   the mask will be updated by now_it_add_mask () each time
15975
   a new instruction needs to be included in the IT block.
15976
   Finally, the block is closed with close_automatic_it_block ().
15977
   The block closure can be requested either from md_assemble (),
15978
   a tencode (), or due to a label hook.  */
15979
 
15980
static void
15981
new_automatic_it_block (int cond)
15982
{
15983
  now_it.state = AUTOMATIC_IT_BLOCK;
15984
  now_it.mask = 0x18;
15985
  now_it.cc = cond;
15986
  now_it.block_length = 1;
15987
  mapping_state (MAP_THUMB);
15988
  now_it.insn = output_it_inst (cond, now_it.mask, NULL);
15989
}
15990
 
15991
/* Close an automatic IT block.
15992
   See comments in new_automatic_it_block ().  */
15993
 
15994
static void
15995
close_automatic_it_block (void)
15996
{
15997
  now_it.mask = 0x10;
15998
  now_it.block_length = 0;
15999
}
16000
 
16001
/* Update the mask of the current automatically-generated IT
16002
   instruction. See comments in new_automatic_it_block ().  */
16003
 
16004
static void
16005
now_it_add_mask (int cond)
16006
{
16007
#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
16008
#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
16009
                                              | ((bitvalue) << (nbit)))
16010
  const int resulting_bit = (cond & 1);
16011
 
16012
  now_it.mask &= 0xf;
16013
  now_it.mask = SET_BIT_VALUE (now_it.mask,
16014
                                   resulting_bit,
16015
                                  (5 - now_it.block_length));
16016
  now_it.mask = SET_BIT_VALUE (now_it.mask,
16017
                                   1,
16018
                                   ((5 - now_it.block_length) - 1) );
16019
  output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16020
 
16021
#undef CLEAR_BIT
16022
#undef SET_BIT_VALUE
16023
}
16024
 
16025
/* The IT blocks handling machinery is accessed through the these functions:
16026
     it_fsm_pre_encode ()               from md_assemble ()
16027
     set_it_insn_type ()                optional, from the tencode functions
16028
     set_it_insn_type_last ()           ditto
16029
     in_it_block ()                     ditto
16030
     it_fsm_post_encode ()              from md_assemble ()
16031
     force_automatic_it_block_close ()  from label habdling functions
16032
 
16033
   Rationale:
16034
     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16035
        initializing the IT insn type with a generic initial value depending
16036
        on the inst.condition.
16037
     2) During the tencode function, two things may happen:
16038
        a) The tencode function overrides the IT insn type by
16039
           calling either set_it_insn_type (type) or set_it_insn_type_last ().
16040
        b) The tencode function queries the IT block state by
16041
           calling in_it_block () (i.e. to determine narrow/not narrow mode).
16042
 
16043
        Both set_it_insn_type and in_it_block run the internal FSM state
16044
        handling function (handle_it_state), because: a) setting the IT insn
16045
        type may incur in an invalid state (exiting the function),
16046
        and b) querying the state requires the FSM to be updated.
16047
        Specifically we want to avoid creating an IT block for conditional
16048
        branches, so it_fsm_pre_encode is actually a guess and we can't
16049
        determine whether an IT block is required until the tencode () routine
16050
        has decided what type of instruction this actually it.
16051
        Because of this, if set_it_insn_type and in_it_block have to be used,
16052
        set_it_insn_type has to be called first.
16053
 
16054
        set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16055
        determines the insn IT type depending on the inst.cond code.
16056
        When a tencode () routine encodes an instruction that can be
16057
        either outside an IT block, or, in the case of being inside, has to be
16058
        the last one, set_it_insn_type_last () will determine the proper
16059
        IT instruction type based on the inst.cond code. Otherwise,
16060
        set_it_insn_type can be called for overriding that logic or
16061
        for covering other cases.
16062
 
16063
        Calling handle_it_state () may not transition the IT block state to
16064
        OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16065
        still queried. Instead, if the FSM determines that the state should
16066
        be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16067
        after the tencode () function: that's what it_fsm_post_encode () does.
16068
 
16069
        Since in_it_block () calls the state handling function to get an
16070
        updated state, an error may occur (due to invalid insns combination).
16071
        In that case, inst.error is set.
16072
        Therefore, inst.error has to be checked after the execution of
16073
        the tencode () routine.
16074
 
16075
     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16076
        any pending state change (if any) that didn't take place in
16077
        handle_it_state () as explained above.  */
16078
 
16079
static void
16080
it_fsm_pre_encode (void)
16081
{
16082
  if (inst.cond != COND_ALWAYS)
16083
    inst.it_insn_type = INSIDE_IT_INSN;
16084
  else
16085
    inst.it_insn_type = OUTSIDE_IT_INSN;
16086
 
16087
  now_it.state_handled = 0;
16088
}
16089
 
16090
/* IT state FSM handling function.  */
16091
 
16092
static int
16093
handle_it_state (void)
16094
{
16095
  now_it.state_handled = 1;
16096
 
16097
  switch (now_it.state)
16098
    {
16099
    case OUTSIDE_IT_BLOCK:
16100
      switch (inst.it_insn_type)
16101
        {
16102
        case OUTSIDE_IT_INSN:
16103
          break;
16104
 
16105
        case INSIDE_IT_INSN:
16106
        case INSIDE_IT_LAST_INSN:
16107
          if (thumb_mode == 0)
16108
            {
16109
              if (unified_syntax
16110
                  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16111
                as_tsktsk (_("Warning: conditional outside an IT block"\
16112
                             " for Thumb."));
16113
            }
16114
          else
16115
            {
16116
              if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16117
                  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16118
                {
16119
                  /* Automatically generate the IT instruction.  */
16120
                  new_automatic_it_block (inst.cond);
16121
                  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16122
                    close_automatic_it_block ();
16123
                }
16124
              else
16125
                {
16126
                  inst.error = BAD_OUT_IT;
16127
                  return FAIL;
16128
                }
16129
            }
16130
          break;
16131
 
16132
        case IF_INSIDE_IT_LAST_INSN:
16133
        case NEUTRAL_IT_INSN:
16134
          break;
16135
 
16136
        case IT_INSN:
16137
          now_it.state = MANUAL_IT_BLOCK;
16138
          now_it.block_length = 0;
16139
          break;
16140
        }
16141
      break;
16142
 
16143
    case AUTOMATIC_IT_BLOCK:
16144
      /* Three things may happen now:
16145
         a) We should increment current it block size;
16146
         b) We should close current it block (closing insn or 4 insns);
16147
         c) We should close current it block and start a new one (due
16148
         to incompatible conditions or
16149
         4 insns-length block reached).  */
16150
 
16151
      switch (inst.it_insn_type)
16152
        {
16153
        case OUTSIDE_IT_INSN:
16154
          /* The closure of the block shall happen immediatelly,
16155
             so any in_it_block () call reports the block as closed.  */
16156
          force_automatic_it_block_close ();
16157
          break;
16158
 
16159
        case INSIDE_IT_INSN:
16160
        case INSIDE_IT_LAST_INSN:
16161
        case IF_INSIDE_IT_LAST_INSN:
16162
          now_it.block_length++;
16163
 
16164
          if (now_it.block_length > 4
16165
              || !now_it_compatible (inst.cond))
16166
            {
16167
              force_automatic_it_block_close ();
16168
              if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16169
                new_automatic_it_block (inst.cond);
16170
            }
16171
          else
16172
            {
16173
              now_it_add_mask (inst.cond);
16174
            }
16175
 
16176
          if (now_it.state == AUTOMATIC_IT_BLOCK
16177
              && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16178
                  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16179
            close_automatic_it_block ();
16180
          break;
16181
 
16182
        case NEUTRAL_IT_INSN:
16183
          now_it.block_length++;
16184
 
16185
          if (now_it.block_length > 4)
16186
            force_automatic_it_block_close ();
16187
          else
16188
            now_it_add_mask (now_it.cc & 1);
16189
          break;
16190
 
16191
        case IT_INSN:
16192
          close_automatic_it_block ();
16193
          now_it.state = MANUAL_IT_BLOCK;
16194
          break;
16195
        }
16196
      break;
16197
 
16198
    case MANUAL_IT_BLOCK:
16199
      {
16200
        /* Check conditional suffixes.  */
16201
        const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16202
        int is_last;
16203
        now_it.mask <<= 1;
16204
        now_it.mask &= 0x1f;
16205
        is_last = (now_it.mask == 0x10);
16206
 
16207
        switch (inst.it_insn_type)
16208
          {
16209
          case OUTSIDE_IT_INSN:
16210
            inst.error = BAD_NOT_IT;
16211
            return FAIL;
16212
 
16213
          case INSIDE_IT_INSN:
16214
            if (cond != inst.cond)
16215
              {
16216
                inst.error = BAD_IT_COND;
16217
                return FAIL;
16218
              }
16219
            break;
16220
 
16221
          case INSIDE_IT_LAST_INSN:
16222
          case IF_INSIDE_IT_LAST_INSN:
16223
            if (cond != inst.cond)
16224
              {
16225
                inst.error = BAD_IT_COND;
16226
                return FAIL;
16227
              }
16228
            if (!is_last)
16229
              {
16230
                inst.error = BAD_BRANCH;
16231
                return FAIL;
16232
              }
16233
            break;
16234
 
16235
          case NEUTRAL_IT_INSN:
16236
            /* The BKPT instruction is unconditional even in an IT block.  */
16237
            break;
16238
 
16239
          case IT_INSN:
16240
            inst.error = BAD_IT_IT;
16241
            return FAIL;
16242
          }
16243
      }
16244
      break;
16245
    }
16246
 
16247
  return SUCCESS;
16248
}
16249
 
16250
static void
16251
it_fsm_post_encode (void)
16252
{
16253
  int is_last;
16254
 
16255
  if (!now_it.state_handled)
16256
    handle_it_state ();
16257
 
16258
  is_last = (now_it.mask == 0x10);
16259
  if (is_last)
16260
    {
16261
      now_it.state = OUTSIDE_IT_BLOCK;
16262
      now_it.mask = 0;
16263
    }
16264
}
16265
 
16266
static void
16267
force_automatic_it_block_close (void)
16268
{
16269
  if (now_it.state == AUTOMATIC_IT_BLOCK)
16270
    {
16271
      close_automatic_it_block ();
16272
      now_it.state = OUTSIDE_IT_BLOCK;
16273
      now_it.mask = 0;
16274
    }
16275
}
16276
 
16277
static int
16278
in_it_block (void)
16279
{
16280
  if (!now_it.state_handled)
16281
    handle_it_state ();
16282
 
16283
  return now_it.state != OUTSIDE_IT_BLOCK;
16284
}
16285
 
16286
void
16287
md_assemble (char *str)
16288
{
16289
  char *p = str;
16290
  const struct asm_opcode * opcode;
16291
 
16292
  /* Align the previous label if needed.  */
16293
  if (last_label_seen != NULL)
16294
    {
16295
      symbol_set_frag (last_label_seen, frag_now);
16296
      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
16297
      S_SET_SEGMENT (last_label_seen, now_seg);
16298
    }
16299
 
16300
  memset (&inst, '\0', sizeof (inst));
16301
  inst.reloc.type = BFD_RELOC_UNUSED;
16302
 
16303
  opcode = opcode_lookup (&p);
16304
  if (!opcode)
16305
    {
16306
      /* It wasn't an instruction, but it might be a register alias of
16307
         the form alias .req reg, or a Neon .dn/.qn directive.  */
16308
      if (! create_register_alias (str, p)
16309
          && ! create_neon_reg_alias (str, p))
16310
        as_bad (_("bad instruction `%s'"), str);
16311
 
16312
      return;
16313
    }
16314
 
16315
  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
16316
    as_warn (_("s suffix on comparison instruction is deprecated"));
16317
 
16318
  /* The value which unconditional instructions should have in place of the
16319
     condition field.  */
16320
  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
16321
 
16322
  if (thumb_mode)
16323
    {
16324
      arm_feature_set variant;
16325
 
16326
      variant = cpu_variant;
16327
      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
16328
      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
16329
        ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
16330
      /* Check that this instruction is supported for this CPU.  */
16331
      if (!opcode->tvariant
16332
          || (thumb_mode == 1
16333
              && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
16334
        {
16335
          as_bad (_("selected processor does not support Thumb mode `%s'"), str);
16336
          return;
16337
        }
16338
      if (inst.cond != COND_ALWAYS && !unified_syntax
16339
          && opcode->tencode != do_t_branch)
16340
        {
16341
          as_bad (_("Thumb does not support conditional execution"));
16342
          return;
16343
        }
16344
 
16345
      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
16346
        {
16347
          if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
16348
              && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
16349
                   || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
16350
            {
16351
              /* Two things are addressed here.
16352
                 1) Implicit require narrow instructions on Thumb-1.
16353
                    This avoids relaxation accidentally introducing Thumb-2
16354
                     instructions.
16355
                 2) Reject wide instructions in non Thumb-2 cores.  */
16356
              if (inst.size_req == 0)
16357
                inst.size_req = 2;
16358
              else if (inst.size_req == 4)
16359
                {
16360
                  as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
16361
                  return;
16362
                }
16363
            }
16364
        }
16365
 
16366
      inst.instruction = opcode->tvalue;
16367
 
16368
      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
16369
        {
16370
          /* Prepare the it_insn_type for those encodings that don't set
16371
             it.  */
16372
          it_fsm_pre_encode ();
16373
 
16374
          opcode->tencode ();
16375
 
16376
          it_fsm_post_encode ();
16377
        }
16378
 
16379
      if (!(inst.error || inst.relax))
16380
        {
16381
          gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
16382
          inst.size = (inst.instruction > 0xffff ? 4 : 2);
16383
          if (inst.size_req && inst.size_req != inst.size)
16384
            {
16385
              as_bad (_("cannot honor width suffix -- `%s'"), str);
16386
              return;
16387
            }
16388
        }
16389
 
16390
      /* Something has gone badly wrong if we try to relax a fixed size
16391
         instruction.  */
16392
      gas_assert (inst.size_req == 0 || !inst.relax);
16393
 
16394
      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16395
                              *opcode->tvariant);
16396
      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
16397
         set those bits when Thumb-2 32-bit instructions are seen.  ie.
16398
         anything other than bl/blx and v6-M instructions.
16399
         This is overly pessimistic for relaxable instructions.  */
16400
      if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
16401
           || inst.relax)
16402
          && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
16403
               || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
16404
        ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16405
                                arm_ext_v6t2);
16406
 
16407
      check_neon_suffixes;
16408
 
16409
      if (!inst.error)
16410
        {
16411
          mapping_state (MAP_THUMB);
16412
        }
16413
    }
16414
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
16415
    {
16416
      bfd_boolean is_bx;
16417
 
16418
      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
16419
      is_bx = (opcode->aencode == do_bx);
16420
 
16421
      /* Check that this instruction is supported for this CPU.  */
16422
      if (!(is_bx && fix_v4bx)
16423
          && !(opcode->avariant &&
16424
               ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
16425
        {
16426
          as_bad (_("selected processor does not support ARM mode `%s'"), str);
16427
          return;
16428
        }
16429
      if (inst.size_req)
16430
        {
16431
          as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
16432
          return;
16433
        }
16434
 
16435
      inst.instruction = opcode->avalue;
16436
      if (opcode->tag == OT_unconditionalF)
16437
        inst.instruction |= 0xF << 28;
16438
      else
16439
        inst.instruction |= inst.cond << 28;
16440
      inst.size = INSN_SIZE;
16441
      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
16442
        {
16443
          it_fsm_pre_encode ();
16444
          opcode->aencode ();
16445
          it_fsm_post_encode ();
16446
        }
16447
      /* Arm mode bx is marked as both v4T and v5 because it's still required
16448
         on a hypothetical non-thumb v5 core.  */
16449
      if (is_bx)
16450
        ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
16451
      else
16452
        ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
16453
                                *opcode->avariant);
16454
 
16455
      check_neon_suffixes;
16456
 
16457
      if (!inst.error)
16458
        {
16459
          mapping_state (MAP_ARM);
16460
        }
16461
    }
16462
  else
16463
    {
16464
      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16465
                "-- `%s'"), str);
16466
      return;
16467
    }
16468
  output_inst (str);
16469
}
16470
 
16471
static void
16472
check_it_blocks_finished (void)
16473
{
16474
#ifdef OBJ_ELF
16475
  asection *sect;
16476
 
16477
  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
16478
    if (seg_info (sect)->tc_segment_info_data.current_it.state
16479
        == MANUAL_IT_BLOCK)
16480
      {
16481
        as_warn (_("section '%s' finished with an open IT block."),
16482
                 sect->name);
16483
      }
16484
#else
16485
  if (now_it.state == MANUAL_IT_BLOCK)
16486
    as_warn (_("file finished with an open IT block."));
16487
#endif
16488
}
16489
 
16490
/* Various frobbings of labels and their addresses.  */
16491
 
16492
void
16493
arm_start_line_hook (void)
16494
{
16495
  last_label_seen = NULL;
16496
}
16497
 
16498
void
16499
arm_frob_label (symbolS * sym)
16500
{
16501
  last_label_seen = sym;
16502
 
16503
  ARM_SET_THUMB (sym, thumb_mode);
16504
 
16505
#if defined OBJ_COFF || defined OBJ_ELF
16506
  ARM_SET_INTERWORK (sym, support_interwork);
16507
#endif
16508
 
16509
  force_automatic_it_block_close ();
16510
 
16511
  /* Note - do not allow local symbols (.Lxxx) to be labelled
16512
     as Thumb functions.  This is because these labels, whilst
16513
     they exist inside Thumb code, are not the entry points for
16514
     possible ARM->Thumb calls.  Also, these labels can be used
16515
     as part of a computed goto or switch statement.  eg gcc
16516
     can generate code that looks like this:
16517
 
16518
                ldr  r2, [pc, .Laaa]
16519
                lsl  r3, r3, #2
16520
                ldr  r2, [r3, r2]
16521
                mov  pc, r2
16522
 
16523
       .Lbbb:  .word .Lxxx
16524
       .Lccc:  .word .Lyyy
16525
       ..etc...
16526
       .Laaa:   .word Lbbb
16527
 
16528
     The first instruction loads the address of the jump table.
16529
     The second instruction converts a table index into a byte offset.
16530
     The third instruction gets the jump address out of the table.
16531
     The fourth instruction performs the jump.
16532
 
16533
     If the address stored at .Laaa is that of a symbol which has the
16534
     Thumb_Func bit set, then the linker will arrange for this address
16535
     to have the bottom bit set, which in turn would mean that the
16536
     address computation performed by the third instruction would end
16537
     up with the bottom bit set.  Since the ARM is capable of unaligned
16538
     word loads, the instruction would then load the incorrect address
16539
     out of the jump table, and chaos would ensue.  */
16540
  if (label_is_thumb_function_name
16541
      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
16542
      && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
16543
    {
16544
      /* When the address of a Thumb function is taken the bottom
16545
         bit of that address should be set.  This will allow
16546
         interworking between Arm and Thumb functions to work
16547
         correctly.  */
16548
 
16549
      THUMB_SET_FUNC (sym, 1);
16550
 
16551
      label_is_thumb_function_name = FALSE;
16552
    }
16553
 
16554
  dwarf2_emit_label (sym);
16555
}
16556
 
16557
bfd_boolean
16558
arm_data_in_code (void)
16559
{
16560
  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
16561
    {
16562
      *input_line_pointer = '/';
16563
      input_line_pointer += 5;
16564
      *input_line_pointer = 0;
16565
      return TRUE;
16566
    }
16567
 
16568
  return FALSE;
16569
}
16570
 
16571
char *
16572
arm_canonicalize_symbol_name (char * name)
16573
{
16574
  int len;
16575
 
16576
  if (thumb_mode && (len = strlen (name)) > 5
16577
      && streq (name + len - 5, "/data"))
16578
    *(name + len - 5) = 0;
16579
 
16580
  return name;
16581
}
16582
 
16583
/* Table of all register names defined by default.  The user can
16584
   define additional names with .req.  Note that all register names
16585
   should appear in both upper and lowercase variants.  Some registers
16586
   also have mixed-case names.  */
16587
 
16588
#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16589
#define REGNUM(p,n,t) REGDEF(p##n, n, t)
16590
#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16591
#define REGSET(p,t) \
16592
  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16593
  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16594
  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16595
  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16596
#define REGSETH(p,t) \
16597
  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16598
  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16599
  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16600
  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16601
#define REGSET2(p,t) \
16602
  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16603
  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16604
  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16605
  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16606
#define SPLRBANK(base,bank,t) \
16607
  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
16608
  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
16609
  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
16610
  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
16611
  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
16612
  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
16613
 
16614
static const struct reg_entry reg_names[] =
16615
{
16616
  /* ARM integer registers.  */
16617
  REGSET(r, RN), REGSET(R, RN),
16618
 
16619
  /* ATPCS synonyms.  */
16620
  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
16621
  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
16622
  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
16623
 
16624
  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16625
  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16626
  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16627
 
16628
  /* Well-known aliases.  */
16629
  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16630
  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16631
 
16632
  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16633
  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16634
 
16635
  /* Coprocessor numbers.  */
16636
  REGSET(p, CP), REGSET(P, CP),
16637
 
16638
  /* Coprocessor register numbers.  The "cr" variants are for backward
16639
     compatibility.  */
16640
  REGSET(c,  CN), REGSET(C, CN),
16641
  REGSET(cr, CN), REGSET(CR, CN),
16642
 
16643
  /* ARM banked registers.  */
16644
  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
16645
  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
16646
  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
16647
  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
16648
  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
16649
  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
16650
  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
16651
 
16652
  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
16653
  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
16654
  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
16655
  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
16656
  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
16657
  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
16658
  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
16659
  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
16660
 
16661
  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
16662
  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
16663
  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
16664
  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
16665
  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
16666
  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
16667
  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
16668
  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
16669
  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
16670
 
16671
  /* FPA registers.  */
16672
  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16673
  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16674
 
16675
  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16676
  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16677
 
16678
  /* VFP SP registers.  */
16679
  REGSET(s,VFS),  REGSET(S,VFS),
16680
  REGSETH(s,VFS), REGSETH(S,VFS),
16681
 
16682
  /* VFP DP Registers.  */
16683
  REGSET(d,VFD),  REGSET(D,VFD),
16684
  /* Extra Neon DP registers.  */
16685
  REGSETH(d,VFD), REGSETH(D,VFD),
16686
 
16687
  /* Neon QP registers.  */
16688
  REGSET2(q,NQ),  REGSET2(Q,NQ),
16689
 
16690
  /* VFP control registers.  */
16691
  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16692
  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16693
  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16694
  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16695
  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16696
  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16697
 
16698
  /* Maverick DSP coprocessor registers.  */
16699
  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
16700
  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
16701
 
16702
  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16703
  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16704
  REGDEF(dspsc,0,DSPSC),
16705
 
16706
  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16707
  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16708
  REGDEF(DSPSC,0,DSPSC),
16709
 
16710
  /* iWMMXt data registers - p0, c0-15.  */
16711
  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16712
 
16713
  /* iWMMXt control registers - p1, c0-3.  */
16714
  REGDEF(wcid,  0,MMXWC),  REGDEF(wCID,   0,MMXWC),  REGDEF(WCID,  0,MMXWC),
16715
  REGDEF(wcon,  1,MMXWC),  REGDEF(wCon,  1,MMXWC),  REGDEF(WCON,  1,MMXWC),
16716
  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
16717
  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
16718
 
16719
  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
16720
  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
16721
  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
16722
  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
16723
  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
16724
 
16725
  /* XScale accumulator registers.  */
16726
  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
16727
};
16728
#undef REGDEF
16729
#undef REGNUM
16730
#undef REGSET
16731
 
16732
/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
16733
   within psr_required_here.  */
16734
static const struct asm_psr psrs[] =
16735
{
16736
  /* Backward compatibility notation.  Note that "all" is no longer
16737
     truly all possible PSR bits.  */
16738
  {"all",  PSR_c | PSR_f},
16739
  {"flg",  PSR_f},
16740
  {"ctl",  PSR_c},
16741
 
16742
  /* Individual flags.  */
16743
  {"f",    PSR_f},
16744
  {"c",    PSR_c},
16745
  {"x",    PSR_x},
16746
  {"s",    PSR_s},
16747
 
16748
  /* Combinations of flags.  */
16749
  {"fs",   PSR_f | PSR_s},
16750
  {"fx",   PSR_f | PSR_x},
16751
  {"fc",   PSR_f | PSR_c},
16752
  {"sf",   PSR_s | PSR_f},
16753
  {"sx",   PSR_s | PSR_x},
16754
  {"sc",   PSR_s | PSR_c},
16755
  {"xf",   PSR_x | PSR_f},
16756
  {"xs",   PSR_x | PSR_s},
16757
  {"xc",   PSR_x | PSR_c},
16758
  {"cf",   PSR_c | PSR_f},
16759
  {"cs",   PSR_c | PSR_s},
16760
  {"cx",   PSR_c | PSR_x},
16761
  {"fsx",  PSR_f | PSR_s | PSR_x},
16762
  {"fsc",  PSR_f | PSR_s | PSR_c},
16763
  {"fxs",  PSR_f | PSR_x | PSR_s},
16764
  {"fxc",  PSR_f | PSR_x | PSR_c},
16765
  {"fcs",  PSR_f | PSR_c | PSR_s},
16766
  {"fcx",  PSR_f | PSR_c | PSR_x},
16767
  {"sfx",  PSR_s | PSR_f | PSR_x},
16768
  {"sfc",  PSR_s | PSR_f | PSR_c},
16769
  {"sxf",  PSR_s | PSR_x | PSR_f},
16770
  {"sxc",  PSR_s | PSR_x | PSR_c},
16771
  {"scf",  PSR_s | PSR_c | PSR_f},
16772
  {"scx",  PSR_s | PSR_c | PSR_x},
16773
  {"xfs",  PSR_x | PSR_f | PSR_s},
16774
  {"xfc",  PSR_x | PSR_f | PSR_c},
16775
  {"xsf",  PSR_x | PSR_s | PSR_f},
16776
  {"xsc",  PSR_x | PSR_s | PSR_c},
16777
  {"xcf",  PSR_x | PSR_c | PSR_f},
16778
  {"xcs",  PSR_x | PSR_c | PSR_s},
16779
  {"cfs",  PSR_c | PSR_f | PSR_s},
16780
  {"cfx",  PSR_c | PSR_f | PSR_x},
16781
  {"csf",  PSR_c | PSR_s | PSR_f},
16782
  {"csx",  PSR_c | PSR_s | PSR_x},
16783
  {"cxf",  PSR_c | PSR_x | PSR_f},
16784
  {"cxs",  PSR_c | PSR_x | PSR_s},
16785
  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16786
  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16787
  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16788
  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16789
  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16790
  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16791
  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16792
  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16793
  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16794
  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16795
  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16796
  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16797
  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16798
  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16799
  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16800
  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16801
  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16802
  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16803
  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16804
  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16805
  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16806
  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16807
  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16808
  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16809
};
16810
 
16811
/* Table of V7M psr names.  */
16812
static const struct asm_psr v7m_psrs[] =
16813
{
16814
  {"apsr",        0 }, {"APSR",          0 },
16815
  {"iapsr",       1 }, {"IAPSR",        1 },
16816
  {"eapsr",       2 }, {"EAPSR",        2 },
16817
  {"psr",         3 }, {"PSR",          3 },
16818
  {"xpsr",        3 }, {"XPSR",         3 }, {"xPSR",     3 },
16819
  {"ipsr",        5 }, {"IPSR",         5 },
16820
  {"epsr",        6 }, {"EPSR",         6 },
16821
  {"iepsr",       7 }, {"IEPSR",        7 },
16822
  {"msp",         8 }, {"MSP",          8 },
16823
  {"psp",         9 }, {"PSP",          9 },
16824
  {"primask",     16}, {"PRIMASK",      16},
16825
  {"basepri",     17}, {"BASEPRI",      17},
16826
  {"basepri_max", 18}, {"BASEPRI_MAX",  18},
16827
  {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility.  */
16828
  {"faultmask",   19}, {"FAULTMASK",    19},
16829
  {"control",     20}, {"CONTROL",      20}
16830
};
16831
 
16832
/* Table of all shift-in-operand names.  */
16833
static const struct asm_shift_name shift_names [] =
16834
{
16835
  { "asl", SHIFT_LSL },  { "ASL", SHIFT_LSL },
16836
  { "lsl", SHIFT_LSL },  { "LSL", SHIFT_LSL },
16837
  { "lsr", SHIFT_LSR },  { "LSR", SHIFT_LSR },
16838
  { "asr", SHIFT_ASR },  { "ASR", SHIFT_ASR },
16839
  { "ror", SHIFT_ROR },  { "ROR", SHIFT_ROR },
16840
  { "rrx", SHIFT_RRX },  { "RRX", SHIFT_RRX }
16841
};
16842
 
16843
/* Table of all explicit relocation names.  */
16844
#ifdef OBJ_ELF
16845
static struct reloc_entry reloc_names[] =
16846
{
16847
  { "got",     BFD_RELOC_ARM_GOT32   },  { "GOT",     BFD_RELOC_ARM_GOT32   },
16848
  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },  { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
16849
  { "plt",     BFD_RELOC_ARM_PLT32   },  { "PLT",     BFD_RELOC_ARM_PLT32   },
16850
  { "target1", BFD_RELOC_ARM_TARGET1 },  { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16851
  { "target2", BFD_RELOC_ARM_TARGET2 },  { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16852
  { "sbrel",   BFD_RELOC_ARM_SBREL32 },  { "SBREL",   BFD_RELOC_ARM_SBREL32 },
16853
  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
16854
  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
16855
  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
16856
  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16857
  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
16858
  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
16859
  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
16860
        { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
16861
  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
16862
        { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
16863
  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
16864
        { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
16865
};
16866
#endif
16867
 
16868
/* Table of all conditional affixes.  0xF is not defined as a condition code.  */
16869
static const struct asm_cond conds[] =
16870
{
16871
  {"eq", 0x0},
16872
  {"ne", 0x1},
16873
  {"cs", 0x2}, {"hs", 0x2},
16874
  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16875
  {"mi", 0x4},
16876
  {"pl", 0x5},
16877
  {"vs", 0x6},
16878
  {"vc", 0x7},
16879
  {"hi", 0x8},
16880
  {"ls", 0x9},
16881
  {"ge", 0xa},
16882
  {"lt", 0xb},
16883
  {"gt", 0xc},
16884
  {"le", 0xd},
16885
  {"al", 0xe}
16886
};
16887
 
16888
static struct asm_barrier_opt barrier_opt_names[] =
16889
{
16890
  { "sy",    0xf }, { "SY",    0xf },
16891
  { "un",    0x7 }, { "UN",    0x7 },
16892
  { "st",    0xe }, { "ST",    0xe },
16893
  { "unst",  0x6 }, { "UNST",  0x6 },
16894
  { "ish",   0xb }, { "ISH",   0xb },
16895
  { "sh",    0xb }, { "SH",    0xb },
16896
  { "ishst", 0xa }, { "ISHST", 0xa },
16897
  { "shst",  0xa }, { "SHST",  0xa },
16898
  { "nsh",   0x7 }, { "NSH",   0x7 },
16899
  { "nshst", 0x6 }, { "NSHST", 0x6 },
16900
  { "osh",   0x3 }, { "OSH",   0x3 },
16901
  { "oshst", 0x2 }, { "OSHST", 0x2 }
16902
};
16903
 
16904
/* Table of ARM-format instructions.    */
16905
 
16906
/* Macros for gluing together operand strings.  N.B. In all cases
16907
   other than OPS0, the trailing OP_stop comes from default
16908
   zero-initialization of the unspecified elements of the array.  */
16909
#define OPS0()            { OP_stop, }
16910
#define OPS1(a)           { OP_##a, }
16911
#define OPS2(a,b)         { OP_##a,OP_##b, }
16912
#define OPS3(a,b,c)       { OP_##a,OP_##b,OP_##c, }
16913
#define OPS4(a,b,c,d)     { OP_##a,OP_##b,OP_##c,OP_##d, }
16914
#define OPS5(a,b,c,d,e)   { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16915
#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16916
 
16917
/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16918
   This is useful when mixing operands for ARM and THUMB, i.e. using the
16919
   MIX_ARM_THUMB_OPERANDS macro.
16920
   In order to use these macros, prefix the number of operands with _
16921
   e.g. _3.  */
16922
#define OPS_1(a)           { a, }
16923
#define OPS_2(a,b)         { a,b, }
16924
#define OPS_3(a,b,c)       { a,b,c, }
16925
#define OPS_4(a,b,c,d)     { a,b,c,d, }
16926
#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
16927
#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16928
 
16929
/* These macros abstract out the exact format of the mnemonic table and
16930
   save some repeated characters.  */
16931
 
16932
/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
16933
#define TxCE(mnem, op, top, nops, ops, ae, te) \
16934
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16935
    THUMB_VARIANT, do_##ae, do_##te }
16936
 
16937
/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16938
   a T_MNEM_xyz enumerator.  */
16939
#define TCE(mnem, aop, top, nops, ops, ae, te) \
16940
      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16941
#define tCE(mnem, aop, top, nops, ops, ae, te) \
16942
      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16943
 
16944
/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16945
   infix after the third character.  */
16946
#define TxC3(mnem, op, top, nops, ops, ae, te) \
16947
  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16948
    THUMB_VARIANT, do_##ae, do_##te }
16949
#define TxC3w(mnem, op, top, nops, ops, ae, te) \
16950
  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16951
    THUMB_VARIANT, do_##ae, do_##te }
16952
#define TC3(mnem, aop, top, nops, ops, ae, te) \
16953
      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16954
#define TC3w(mnem, aop, top, nops, ops, ae, te) \
16955
      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16956
#define tC3(mnem, aop, top, nops, ops, ae, te) \
16957
      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16958
#define tC3w(mnem, aop, top, nops, ops, ae, te) \
16959
      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16960
 
16961
/* Mnemonic with a conditional infix in an unusual place.  Each and every variant has to
16962
   appear in the condition table.  */
16963
#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te)   \
16964
  { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16965
    0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16966
 
16967
#define TxCM(m1, m2, op, top, nops, ops, ae, te)        \
16968
  TxCM_ (m1,   , m2, op, top, nops, ops, ae, te),       \
16969
  TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te),       \
16970
  TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te),       \
16971
  TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te),       \
16972
  TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te),       \
16973
  TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te),       \
16974
  TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te),       \
16975
  TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te),       \
16976
  TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te),       \
16977
  TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te),       \
16978
  TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te),       \
16979
  TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te),       \
16980
  TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te),       \
16981
  TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te),       \
16982
  TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te),       \
16983
  TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te),       \
16984
  TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te),       \
16985
  TxCM_ (m1, le, m2, op, top, nops, ops, ae, te),       \
16986
  TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16987
 
16988
#define TCM(m1,m2, aop, top, nops, ops, ae, te)         \
16989
      TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16990
#define tCM(m1,m2, aop, top, nops, ops, ae, te)         \
16991
      TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16992
 
16993
/* Mnemonic that cannot be conditionalized.  The ARM condition-code
16994
   field is still 0xE.  Many of the Thumb variants can be executed
16995
   conditionally, so this is checked separately.  */
16996
#define TUE(mnem, op, top, nops, ops, ae, te)                           \
16997
  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16998
    THUMB_VARIANT, do_##ae, do_##te }
16999
 
17000
/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17001
   condition code field.  */
17002
#define TUF(mnem, op, top, nops, ops, ae, te)                           \
17003
  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17004
    THUMB_VARIANT, do_##ae, do_##te }
17005
 
17006
/* ARM-only variants of all the above.  */
17007
#define CE(mnem,  op, nops, ops, ae)    \
17008
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17009
 
17010
#define C3(mnem, op, nops, ops, ae)     \
17011
  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17012
 
17013
/* Legacy mnemonics that always have conditional infix after the third
17014
   character.  */
17015
#define CL(mnem, op, nops, ops, ae)     \
17016
  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17017
    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17018
 
17019
/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
17020
#define cCE(mnem,  op, nops, ops, ae)   \
17021
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17022
 
17023
/* Legacy coprocessor instructions where conditional infix and conditional
17024
   suffix are ambiguous.  For consistency this includes all FPA instructions,
17025
   not just the potentially ambiguous ones.  */
17026
#define cCL(mnem, op, nops, ops, ae)    \
17027
  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17028
    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17029
 
17030
/* Coprocessor, takes either a suffix or a position-3 infix
17031
   (for an FPA corner case). */
17032
#define C3E(mnem, op, nops, ops, ae) \
17033
  { mnem, OPS##nops ops, OT_csuf_or_in3, \
17034
    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17035
 
17036
#define xCM_(m1, m2, m3, op, nops, ops, ae)     \
17037
  { m1 #m2 m3, OPS##nops ops, \
17038
    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17039
    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17040
 
17041
#define CM(m1, m2, op, nops, ops, ae)   \
17042
  xCM_ (m1,   , m2, op, nops, ops, ae), \
17043
  xCM_ (m1, eq, m2, op, nops, ops, ae), \
17044
  xCM_ (m1, ne, m2, op, nops, ops, ae), \
17045
  xCM_ (m1, cs, m2, op, nops, ops, ae), \
17046
  xCM_ (m1, hs, m2, op, nops, ops, ae), \
17047
  xCM_ (m1, cc, m2, op, nops, ops, ae), \
17048
  xCM_ (m1, ul, m2, op, nops, ops, ae), \
17049
  xCM_ (m1, lo, m2, op, nops, ops, ae), \
17050
  xCM_ (m1, mi, m2, op, nops, ops, ae), \
17051
  xCM_ (m1, pl, m2, op, nops, ops, ae), \
17052
  xCM_ (m1, vs, m2, op, nops, ops, ae), \
17053
  xCM_ (m1, vc, m2, op, nops, ops, ae), \
17054
  xCM_ (m1, hi, m2, op, nops, ops, ae), \
17055
  xCM_ (m1, ls, m2, op, nops, ops, ae), \
17056
  xCM_ (m1, ge, m2, op, nops, ops, ae), \
17057
  xCM_ (m1, lt, m2, op, nops, ops, ae), \
17058
  xCM_ (m1, gt, m2, op, nops, ops, ae), \
17059
  xCM_ (m1, le, m2, op, nops, ops, ae), \
17060
  xCM_ (m1, al, m2, op, nops, ops, ae)
17061
 
17062
#define UE(mnem, op, nops, ops, ae)     \
17063
  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17064
 
17065
#define UF(mnem, op, nops, ops, ae)     \
17066
  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17067
 
17068
/* Neon data-processing. ARM versions are unconditional with cond=0xf.
17069
   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17070
   use the same encoding function for each.  */
17071
#define NUF(mnem, op, nops, ops, enc)                                   \
17072
  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,            \
17073
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17074
 
17075
/* Neon data processing, version which indirects through neon_enc_tab for
17076
   the various overloaded versions of opcodes.  */
17077
#define nUF(mnem, op, nops, ops, enc)                                   \
17078
  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,    \
17079
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17080
 
17081
/* Neon insn with conditional suffix for the ARM version, non-overloaded
17082
   version.  */
17083
#define NCE_tag(mnem, op, nops, ops, enc, tag)                          \
17084
  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,             \
17085
    THUMB_VARIANT, do_##enc, do_##enc }
17086
 
17087
#define NCE(mnem, op, nops, ops, enc)                                   \
17088
   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17089
 
17090
#define NCEF(mnem, op, nops, ops, enc)                                  \
17091
    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17092
 
17093
/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
17094
#define nCE_tag(mnem, op, nops, ops, enc, tag)                          \
17095
  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,          \
17096
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17097
 
17098
#define nCE(mnem, op, nops, ops, enc)                                   \
17099
   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17100
 
17101
#define nCEF(mnem, op, nops, ops, enc)                                  \
17102
    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17103
 
17104
#define do_0 0
17105
 
17106
static const struct asm_opcode insns[] =
17107
{
17108
#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions.  */
17109
#define THUMB_VARIANT &arm_ext_v4t
17110
 tCE("and",     0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
17111
 tC3("ands",    0100000, _ands,    3, (RR, oRR, SH), arit, t_arit3c),
17112
 tCE("eor",     0200000, _eor,     3, (RR, oRR, SH), arit, t_arit3c),
17113
 tC3("eors",    0300000, _eors,    3, (RR, oRR, SH), arit, t_arit3c),
17114
 tCE("sub",     0400000, _sub,     3, (RR, oRR, SH), arit, t_add_sub),
17115
 tC3("subs",    0500000, _subs,    3, (RR, oRR, SH), arit, t_add_sub),
17116
 tCE("add",     0800000, _add,     3, (RR, oRR, SHG), arit, t_add_sub),
17117
 tC3("adds",    0900000, _adds,    3, (RR, oRR, SHG), arit, t_add_sub),
17118
 tCE("adc",     0a00000, _adc,     3, (RR, oRR, SH), arit, t_arit3c),
17119
 tC3("adcs",    0b00000, _adcs,    3, (RR, oRR, SH), arit, t_arit3c),
17120
 tCE("sbc",     0c00000, _sbc,     3, (RR, oRR, SH), arit, t_arit3),
17121
 tC3("sbcs",    0d00000, _sbcs,    3, (RR, oRR, SH), arit, t_arit3),
17122
 tCE("orr",     1800000, _orr,     3, (RR, oRR, SH), arit, t_arit3c),
17123
 tC3("orrs",    1900000, _orrs,    3, (RR, oRR, SH), arit, t_arit3c),
17124
 tCE("bic",     1c00000, _bic,     3, (RR, oRR, SH), arit, t_arit3),
17125
 tC3("bics",    1d00000, _bics,    3, (RR, oRR, SH), arit, t_arit3),
17126
 
17127
 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17128
    for setting PSR flag bits.  They are obsolete in V6 and do not
17129
    have Thumb equivalents. */
17130
 tCE("tst",     1100000, _tst,     2, (RR, SH),      cmp,  t_mvn_tst),
17131
 tC3w("tsts",   1100000, _tst,     2, (RR, SH),      cmp,  t_mvn_tst),
17132
  CL("tstp",    110f000,           2, (RR, SH),      cmp),
17133
 tCE("cmp",     1500000, _cmp,     2, (RR, SH),      cmp,  t_mov_cmp),
17134
 tC3w("cmps",   1500000, _cmp,     2, (RR, SH),      cmp,  t_mov_cmp),
17135
  CL("cmpp",    150f000,           2, (RR, SH),      cmp),
17136
 tCE("cmn",     1700000, _cmn,     2, (RR, SH),      cmp,  t_mvn_tst),
17137
 tC3w("cmns",   1700000, _cmn,     2, (RR, SH),      cmp,  t_mvn_tst),
17138
  CL("cmnp",    170f000,           2, (RR, SH),      cmp),
17139
 
17140
 tCE("mov",     1a00000, _mov,     2, (RR, SH),      mov,  t_mov_cmp),
17141
 tC3("movs",    1b00000, _movs,    2, (RR, SH),      mov,  t_mov_cmp),
17142
 tCE("mvn",     1e00000, _mvn,     2, (RR, SH),      mov,  t_mvn_tst),
17143
 tC3("mvns",    1f00000, _mvns,    2, (RR, SH),      mov,  t_mvn_tst),
17144
 
17145
 tCE("ldr",     4100000, _ldr,     2, (RR, ADDRGLDR),ldst, t_ldst),
17146
 tC3("ldrb",    4500000, _ldrb,    2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17147
 tCE("str",     4000000, _str,     _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17148
                                                                OP_RRnpc),
17149
                                        OP_ADDRGLDR),ldst, t_ldst),
17150
 tC3("strb",    4400000, _strb,    2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17151
 
17152
 tCE("stm",     8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17153
 tC3("stmia",   8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17154
 tC3("stmea",   8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17155
 tCE("ldm",     8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17156
 tC3("ldmia",   8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17157
 tC3("ldmfd",   8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17158
 
17159
 TCE("swi",     f000000, df00,     1, (EXPi),        swi, t_swi),
17160
 TCE("svc",     f000000, df00,     1, (EXPi),        swi, t_swi),
17161
 tCE("b",       a000000, _b,       1, (EXPr),        branch, t_branch),
17162
 TCE("bl",      b000000, f000f800, 1, (EXPr),        bl, t_branch23),
17163
 
17164
  /* Pseudo ops.  */
17165
 tCE("adr",     28f0000, _adr,     2, (RR, EXP),     adr,  t_adr),
17166
  C3(adrl,      28f0000,           2, (RR, EXP),     adrl),
17167
 tCE("nop",     1a00000, _nop,     1, (oI255c),      nop,  t_nop),
17168
 
17169
  /* Thumb-compatibility pseudo ops.  */
17170
 tCE("lsl",     1a00000, _lsl,     3, (RR, oRR, SH), shift, t_shift),
17171
 tC3("lsls",    1b00000, _lsls,    3, (RR, oRR, SH), shift, t_shift),
17172
 tCE("lsr",     1a00020, _lsr,     3, (RR, oRR, SH), shift, t_shift),
17173
 tC3("lsrs",    1b00020, _lsrs,    3, (RR, oRR, SH), shift, t_shift),
17174
 tCE("asr",     1a00040, _asr,     3, (RR, oRR, SH), shift, t_shift),
17175
 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
17176
 tCE("ror",     1a00060, _ror,     3, (RR, oRR, SH), shift, t_shift),
17177
 tC3("rors",    1b00060, _rors,    3, (RR, oRR, SH), shift, t_shift),
17178
 tCE("neg",     2600000, _neg,     2, (RR, RR),      rd_rn, t_neg),
17179
 tC3("negs",    2700000, _negs,    2, (RR, RR),      rd_rn, t_neg),
17180
 tCE("push",    92d0000, _push,     1, (REGLST),             push_pop, t_push_pop),
17181
 tCE("pop",     8bd0000, _pop,     1, (REGLST),      push_pop, t_push_pop),
17182
 
17183
 /* These may simplify to neg.  */
17184
 TCE("rsb",     0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
17185
 TC3("rsbs",    0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
17186
 
17187
#undef  THUMB_VARIANT
17188
#define THUMB_VARIANT  & arm_ext_v6
17189
 
17190
 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
17191
 
17192
 /* V1 instructions with no Thumb analogue prior to V6T2.  */
17193
#undef  THUMB_VARIANT
17194
#define THUMB_VARIANT  & arm_ext_v6t2
17195
 
17196
 TCE("teq",     1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
17197
 TC3w("teqs",   1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
17198
  CL("teqp",    130f000,           2, (RR, SH),      cmp),
17199
 
17200
 TC3("ldrt",    4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17201
 TC3("ldrbt",   4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17202
 TC3("strt",    4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
17203
 TC3("strbt",   4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17204
 
17205
 TC3("stmdb",   9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17206
 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17207
 
17208
 TC3("ldmdb",   9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17209
 TC3("ldmea",   9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17210
 
17211
 /* V1 instructions with no Thumb analogue at all.  */
17212
  CE("rsc",     0e00000,           3, (RR, oRR, SH), arit),
17213
  C3(rscs,      0f00000,           3, (RR, oRR, SH), arit),
17214
 
17215
  C3(stmib,     9800000,           2, (RRw, REGLST), ldmstm),
17216
  C3(stmfa,     9800000,           2, (RRw, REGLST), ldmstm),
17217
  C3(stmda,     8000000,           2, (RRw, REGLST), ldmstm),
17218
  C3(stmed,     8000000,           2, (RRw, REGLST), ldmstm),
17219
  C3(ldmib,     9900000,           2, (RRw, REGLST), ldmstm),
17220
  C3(ldmed,     9900000,           2, (RRw, REGLST), ldmstm),
17221
  C3(ldmda,     8100000,           2, (RRw, REGLST), ldmstm),
17222
  C3(ldmfa,     8100000,           2, (RRw, REGLST), ldmstm),
17223
 
17224
#undef  ARM_VARIANT
17225
#define ARM_VARIANT    & arm_ext_v2     /* ARM 2 - multiplies.  */
17226
#undef  THUMB_VARIANT
17227
#define THUMB_VARIANT  & arm_ext_v4t
17228
 
17229
 tCE("mul",     0000090, _mul,     3, (RRnpc, RRnpc, oRR), mul, t_mul),
17230
 tC3("muls",    0100090, _muls,    3, (RRnpc, RRnpc, oRR), mul, t_mul),
17231
 
17232
#undef  THUMB_VARIANT
17233
#define THUMB_VARIANT  & arm_ext_v6t2
17234
 
17235
 TCE("mla",     0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17236
  C3(mlas,      0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
17237
 
17238
  /* Generic coprocessor instructions.  */
17239
 TCE("cdp",     e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
17240
 TCE("ldc",     c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17241
 TC3("ldcl",    c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17242
 TCE("stc",     c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17243
 TC3("stcl",    c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17244
 TCE("mcr",     e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17245
 TCE("mrc",     e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
17246
 
17247
#undef  ARM_VARIANT
17248
#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
17249
 
17250
  CE("swp",     1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17251
  C3(swpb,      1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17252
 
17253
#undef  ARM_VARIANT
17254
#define ARM_VARIANT    & arm_ext_v3     /* ARM 6 Status register instructions.  */
17255
#undef  THUMB_VARIANT
17256
#define THUMB_VARIANT  & arm_ext_msr
17257
 
17258
 TCE("mrs",     1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
17259
 TCE("msr",     120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
17260
 
17261
#undef  ARM_VARIANT
17262
#define ARM_VARIANT    & arm_ext_v3m     /* ARM 7M long multiplies.  */
17263
#undef  THUMB_VARIANT
17264
#define THUMB_VARIANT  & arm_ext_v6t2
17265
 
17266
 TCE("smull",   0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17267
  CM("smull","s",       0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17268
 TCE("umull",   0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17269
  CM("umull","s",       0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17270
 TCE("smlal",   0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17271
  CM("smlal","s",       0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17272
 TCE("umlal",   0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17273
  CM("umlal","s",       0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17274
 
17275
#undef  ARM_VARIANT
17276
#define ARM_VARIANT    & arm_ext_v4     /* ARM Architecture 4.  */
17277
#undef  THUMB_VARIANT
17278
#define THUMB_VARIANT  & arm_ext_v4t
17279
 
17280
 tC3("ldrh",    01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17281
 tC3("strh",    00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17282
 tC3("ldrsh",   01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17283
 tC3("ldrsb",   01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17284
 tCM("ld","sh", 01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17285
 tCM("ld","sb", 01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17286
 
17287
#undef  ARM_VARIANT
17288
#define ARM_VARIANT  & arm_ext_v4t_5
17289
 
17290
  /* ARM Architecture 4T.  */
17291
  /* Note: bx (and blx) are required on V5, even if the processor does
17292
     not support Thumb.  */
17293
 TCE("bx",      12fff10, 4700, 1, (RR), bx, t_bx),
17294
 
17295
#undef  ARM_VARIANT
17296
#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.     */
17297
#undef  THUMB_VARIANT
17298
#define THUMB_VARIANT  & arm_ext_v5t
17299
 
17300
  /* Note: blx has 2 variants; the .value coded here is for
17301
     BLX(2).  Only this variant has conditional execution.  */
17302
 TCE("blx",     12fff30, 4780, 1, (RR_EXr),                         blx,  t_blx),
17303
 TUE("bkpt",    1200070, be00, 1, (oIffffb),                        bkpt, t_bkpt),
17304
 
17305
#undef  THUMB_VARIANT
17306
#define THUMB_VARIANT  & arm_ext_v6t2
17307
 
17308
 TCE("clz",     16f0f10, fab0f080, 2, (RRnpc, RRnpc),                   rd_rm,  t_clz),
17309
 TUF("ldc2",    c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17310
 TUF("ldc2l",   c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),                     lstc,   lstc),
17311
 TUF("stc2",    c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17312
 TUF("stc2l",   c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),                     lstc,   lstc),
17313
 TUF("cdp2",    e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
17314
 TUF("mcr2",    e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17315
 TUF("mrc2",    e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17316
 
17317
#undef  ARM_VARIANT
17318
#define ARM_VARIANT  & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
17319
#undef THUMB_VARIANT
17320
#define THUMB_VARIANT &arm_ext_v5exp
17321
 
17322
 TCE("smlabb",  1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17323
 TCE("smlatb",  10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17324
 TCE("smlabt",  10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17325
 TCE("smlatt",  10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17326
 
17327
 TCE("smlawb",  1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17328
 TCE("smlawt",  12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17329
 
17330
 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17331
 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17332
 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17333
 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17334
 
17335
 TCE("smulbb",  1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17336
 TCE("smultb",  16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17337
 TCE("smulbt",  16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17338
 TCE("smultt",  16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17339
 
17340
 TCE("smulwb",  12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17341
 TCE("smulwt",  12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17342
 
17343
 TCE("qadd",    1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17344
 TCE("qdadd",   1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17345
 TCE("qsub",    1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17346
 TCE("qdsub",   1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17347
 
17348
#undef  ARM_VARIANT
17349
#define ARM_VARIANT  & arm_ext_v5e /*  ARM Architecture 5TE.  */
17350
#undef THUMB_VARIANT
17351
#define THUMB_VARIANT &arm_ext_v6t2
17352
 
17353
 TUF("pld",     450f000, f810f000, 1, (ADDR),                pld,  t_pld),
17354
 TC3("ldrd",    00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
17355
     ldrd, t_ldstd),
17356
 TC3("strd",    00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
17357
                                       ADDRGLDRS), ldrd, t_ldstd),
17358
 
17359
 TCE("mcrr",    c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17360
 TCE("mrrc",    c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17361
 
17362
#undef  ARM_VARIANT
17363
#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
17364
 
17365
 TCE("bxj",     12fff20, f3c08f00, 1, (RR),                       bxj, t_bxj),
17366
 
17367
#undef  ARM_VARIANT
17368
#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
17369
#undef  THUMB_VARIANT
17370
#define THUMB_VARIANT  & arm_ext_v6
17371
 
17372
 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
17373
 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
17374
 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17375
 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17376
 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17377
 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17378
 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17379
 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17380
 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17381
 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
17382
 
17383
#undef  THUMB_VARIANT
17384
#define THUMB_VARIANT  & arm_ext_v6t2
17385
 
17386
 TCE("ldrex",   1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),        ldrex, t_ldrex),
17387
 TCE("strex",   1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17388
                                      strex,  t_strex),
17389
 TUF("mcrr2",   c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17390
 TUF("mrrc2",   c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17391
 
17392
 TCE("ssat",    6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
17393
 TCE("usat",    6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
17394
 
17395
/*  ARM V6 not included in V7M.  */
17396
#undef  THUMB_VARIANT
17397
#define THUMB_VARIANT  & arm_ext_v6_notm
17398
 TUF("rfeia",   8900a00, e990c000, 1, (RRw),                       rfe, rfe),
17399
  UF(rfeib,     9900a00,           1, (RRw),                       rfe),
17400
  UF(rfeda,     8100a00,           1, (RRw),                       rfe),
17401
 TUF("rfedb",   9100a00, e810c000, 1, (RRw),                       rfe, rfe),
17402
 TUF("rfefd",   8900a00, e990c000, 1, (RRw),                       rfe, rfe),
17403
  UF(rfefa,     9900a00,           1, (RRw),                       rfe),
17404
  UF(rfeea,     8100a00,           1, (RRw),                       rfe),
17405
 TUF("rfeed",   9100a00, e810c000, 1, (RRw),                       rfe, rfe),
17406
 TUF("srsia",   8c00500, e980c000, 2, (oRRw, I31w),                srs,  srs),
17407
  UF(srsib,     9c00500,           2, (oRRw, I31w),                srs),
17408
  UF(srsda,     8400500,           2, (oRRw, I31w),                srs),
17409
 TUF("srsdb",   9400500, e800c000, 2, (oRRw, I31w),                srs,  srs),
17410
 
17411
/*  ARM V6 not included in V7M (eg. integer SIMD).  */
17412
#undef  THUMB_VARIANT
17413
#define THUMB_VARIANT  & arm_ext_v6_dsp
17414
 TUF("cps",     1020000, f3af8100, 1, (I31b),                     imm0, t_cps),
17415
 TCE("pkhbt",   6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
17416
 TCE("pkhtb",   6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
17417
 TCE("qadd16",  6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17418
 TCE("qadd8",   6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17419
 TCE("qasx",    6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17420
 /* Old name for QASX.  */
17421
 TCE("qaddsubx",        6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17422
 TCE("qsax",    6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17423
 /* Old name for QSAX.  */
17424
 TCE("qsubaddx",        6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17425
 TCE("qsub16",  6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17426
 TCE("qsub8",   6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17427
 TCE("sadd16",  6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17428
 TCE("sadd8",   6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17429
 TCE("sasx",    6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17430
 /* Old name for SASX.  */
17431
 TCE("saddsubx",        6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17432
 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17433
 TCE("shadd8",  6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17434
 TCE("shasx",     6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17435
 /* Old name for SHASX.  */
17436
 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17437
 TCE("shsax",      6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),    rd_rn_rm, t_simd),
17438
 /* Old name for SHSAX.  */
17439
 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17440
 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17441
 TCE("shsub8",  6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17442
 TCE("ssax",    6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17443
 /* Old name for SSAX.  */
17444
 TCE("ssubaddx",        6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17445
 TCE("ssub16",  6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17446
 TCE("ssub8",   6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17447
 TCE("uadd16",  6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17448
 TCE("uadd8",   6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17449
 TCE("uasx",    6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17450
 /* Old name for UASX.  */
17451
 TCE("uaddsubx",        6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17452
 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17453
 TCE("uhadd8",  6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17454
 TCE("uhasx",     6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17455
 /* Old name for UHASX.  */
17456
 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17457
 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17458
 /* Old name for UHSAX.  */
17459
 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17460
 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17461
 TCE("uhsub8",  6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17462
 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17463
 TCE("uqadd8",  6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17464
 TCE("uqasx",     6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17465
 /* Old name for UQASX.  */
17466
 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17467
 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17468
 /* Old name for UQSAX.  */
17469
 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17470
 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17471
 TCE("uqsub8",  6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17472
 TCE("usub16",  6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17473
 TCE("usax",    6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17474
 /* Old name for USAX.  */
17475
 TCE("usubaddx",        6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17476
 TCE("usub8",   6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17477
 TCE("sxtah",   6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17478
 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17479
 TCE("sxtab",   6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17480
 TCE("sxtb16",  68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),        sxth,  t_sxth),
17481
 TCE("uxtah",   6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17482
 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17483
 TCE("uxtab",   6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17484
 TCE("uxtb16",  6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),        sxth,  t_sxth),
17485
 TCE("sel",     6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17486
 TCE("smlad",   7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17487
 TCE("smladx",  7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17488
 TCE("smlald",  7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17489
 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17490
 TCE("smlsd",   7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17491
 TCE("smlsdx",  7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17492
 TCE("smlsld",  7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17493
 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17494
 TCE("smmla",   7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17495
 TCE("smmlar",  7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17496
 TCE("smmls",   75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17497
 TCE("smmlsr",  75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17498
 TCE("smmul",   750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17499
 TCE("smmulr",  750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17500
 TCE("smuad",   700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17501
 TCE("smuadx",  700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17502
 TCE("smusd",   700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17503
 TCE("smusdx",  700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17504
 TCE("ssat16",  6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),         ssat16, t_ssat16),
17505
 TCE("umaal",   0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
17506
 TCE("usad8",   780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),       smul,   t_simd),
17507
 TCE("usada8",  7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
17508
 TCE("usat16",  6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),         usat16, t_usat16),
17509
 
17510
#undef  ARM_VARIANT
17511
#define ARM_VARIANT   & arm_ext_v6k
17512
#undef  THUMB_VARIANT
17513
#define THUMB_VARIANT & arm_ext_v6k
17514
 
17515
 tCE("yield",   320f001, _yield,    0, (), noargs, t_hint),
17516
 tCE("wfe",     320f002, _wfe,      0, (), noargs, t_hint),
17517
 tCE("wfi",     320f003, _wfi,      0, (), noargs, t_hint),
17518
 tCE("sev",     320f004, _sev,      0, (), noargs, t_hint),
17519
 
17520
#undef  THUMB_VARIANT
17521
#define THUMB_VARIANT  & arm_ext_v6_notm
17522
 TCE("ldrexd",  1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
17523
                                      ldrexd, t_ldrexd),
17524
 TCE("strexd",  1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
17525
                                       RRnpcb), strexd, t_strexd),
17526
 
17527
#undef  THUMB_VARIANT
17528
#define THUMB_VARIANT  & arm_ext_v6t2
17529
 TCE("ldrexb",  1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
17530
     rd_rn,  rd_rn),
17531
 TCE("ldrexh",  1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
17532
     rd_rn,  rd_rn),
17533
 TCE("strexb",  1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17534 160 khays
     strex, t_strexbh),
17535 16 khays
 TCE("strexh",  1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17536 160 khays
     strex, t_strexbh),
17537 16 khays
 TUF("clrex",   57ff01f, f3bf8f2f, 0, (),                              noargs, noargs),
17538
 
17539
#undef  ARM_VARIANT
17540
#define ARM_VARIANT    & arm_ext_sec
17541
#undef THUMB_VARIANT
17542
#define THUMB_VARIANT  & arm_ext_sec
17543
 
17544
 TCE("smc",     1600070, f7f08000, 1, (EXPi), smc, t_smc),
17545
 
17546
#undef  ARM_VARIANT
17547
#define ARM_VARIANT    & arm_ext_virt
17548
#undef  THUMB_VARIANT
17549
#define THUMB_VARIANT    & arm_ext_virt
17550
 
17551
 TCE("hvc",     1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
17552
 TCE("eret",    160006e, f3de8f00, 0, (), noargs, noargs),
17553
 
17554
#undef  ARM_VARIANT
17555
#define ARM_VARIANT  & arm_ext_v6t2
17556
#undef  THUMB_VARIANT
17557
#define THUMB_VARIANT  & arm_ext_v6t2
17558
 
17559
 TCE("bfc",     7c0001f, f36f0000, 3, (RRnpc, I31, I32),           bfc, t_bfc),
17560
 TCE("bfi",     7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
17561
 TCE("sbfx",    7a00050, f3400000, 4, (RR, RR, I31, I32),          bfx, t_bfx),
17562
 TCE("ubfx",    7e00050, f3c00000, 4, (RR, RR, I31, I32),          bfx, t_bfx),
17563
 
17564
 TCE("mls",     0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17565
 TCE("movw",    3000000, f2400000, 2, (RRnpc, HALF),                mov16, t_mov16),
17566
 TCE("movt",    3400000, f2c00000, 2, (RRnpc, HALF),                mov16, t_mov16),
17567
 TCE("rbit",    6ff0f30, fa90f0a0, 2, (RR, RR),                     rd_rm, t_rbit),
17568
 
17569
 TC3("ldrht",   03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17570
 TC3("ldrsht",  03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17571
 TC3("ldrsbt",  03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17572
 TC3("strht",   02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17573
 
17574
 /* Thumb-only instructions.  */
17575
#undef ARM_VARIANT
17576
#define ARM_VARIANT NULL
17577
  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
17578
  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
17579
 
17580
 /* ARM does not really have an IT instruction, so always allow it.
17581
    The opcode is copied from Thumb in order to allow warnings in
17582
    -mimplicit-it=[never | arm] modes.  */
17583
#undef  ARM_VARIANT
17584
#define ARM_VARIANT  & arm_ext_v1
17585
 
17586
 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
17587
 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
17588
 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
17589
 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
17590
 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
17591
 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
17592
 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
17593
 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
17594
 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
17595
 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
17596
 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
17597
 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
17598
 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
17599
 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
17600
 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
17601
 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
17602
 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
17603
 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
17604
 
17605
 /* Thumb2 only instructions.  */
17606
#undef  ARM_VARIANT
17607
#define ARM_VARIANT  NULL
17608
 
17609
 TCE("addw",    0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17610
 TCE("subw",    0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17611
 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
17612
 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
17613
 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
17614
 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
17615
 
17616
 /* Hardware division instructions.  */
17617
#undef  ARM_VARIANT
17618
#define ARM_VARIANT    & arm_ext_adiv
17619
#undef  THUMB_VARIANT
17620
#define THUMB_VARIANT  & arm_ext_div
17621
 
17622
 TCE("sdiv",    710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
17623
 TCE("udiv",    730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
17624
 
17625
 /* ARM V6M/V7 instructions.  */
17626
#undef  ARM_VARIANT
17627
#define ARM_VARIANT    & arm_ext_barrier
17628
#undef  THUMB_VARIANT
17629
#define THUMB_VARIANT  & arm_ext_barrier
17630
 
17631
 TUF("dmb",     57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier,  t_barrier),
17632
 TUF("dsb",     57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier,  t_barrier),
17633
 TUF("isb",     57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier,  t_barrier),
17634
 
17635
 /* ARM V7 instructions.  */
17636
#undef  ARM_VARIANT
17637
#define ARM_VARIANT    & arm_ext_v7
17638
#undef  THUMB_VARIANT
17639
#define THUMB_VARIANT  & arm_ext_v7
17640
 
17641
 TUF("pli",     450f000, f910f000, 1, (ADDR),     pli,      t_pld),
17642
 TCE("dbg",     320f0f0, f3af80f0, 1, (I15),      dbg,      t_dbg),
17643
 
17644
#undef ARM_VARIANT
17645
#define ARM_VARIANT    & arm_ext_mp
17646
#undef THUMB_VARIANT
17647
#define THUMB_VARIANT  & arm_ext_mp
17648
 
17649
 TUF("pldw",    410f000, f830f000, 1, (ADDR),   pld,    t_pld),
17650
 
17651
#undef  ARM_VARIANT
17652
#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
17653
 
17654
 cCE("wfs",     e200110, 1, (RR),            rd),
17655
 cCE("rfs",     e300110, 1, (RR),            rd),
17656
 cCE("wfc",     e400110, 1, (RR),            rd),
17657
 cCE("rfc",     e500110, 1, (RR),            rd),
17658
 
17659
 cCL("ldfs",    c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17660
 cCL("ldfd",    c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17661
 cCL("ldfe",    c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17662
 cCL("ldfp",    c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17663
 
17664
 cCL("stfs",    c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17665
 cCL("stfd",    c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17666
 cCL("stfe",    c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17667
 cCL("stfp",    c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17668
 
17669
 cCL("mvfs",    e008100, 2, (RF, RF_IF),     rd_rm),
17670
 cCL("mvfsp",   e008120, 2, (RF, RF_IF),     rd_rm),
17671
 cCL("mvfsm",   e008140, 2, (RF, RF_IF),     rd_rm),
17672
 cCL("mvfsz",   e008160, 2, (RF, RF_IF),     rd_rm),
17673
 cCL("mvfd",    e008180, 2, (RF, RF_IF),     rd_rm),
17674
 cCL("mvfdp",   e0081a0, 2, (RF, RF_IF),     rd_rm),
17675
 cCL("mvfdm",   e0081c0, 2, (RF, RF_IF),     rd_rm),
17676
 cCL("mvfdz",   e0081e0, 2, (RF, RF_IF),     rd_rm),
17677
 cCL("mvfe",    e088100, 2, (RF, RF_IF),     rd_rm),
17678
 cCL("mvfep",   e088120, 2, (RF, RF_IF),     rd_rm),
17679
 cCL("mvfem",   e088140, 2, (RF, RF_IF),     rd_rm),
17680
 cCL("mvfez",   e088160, 2, (RF, RF_IF),     rd_rm),
17681
 
17682
 cCL("mnfs",    e108100, 2, (RF, RF_IF),     rd_rm),
17683
 cCL("mnfsp",   e108120, 2, (RF, RF_IF),     rd_rm),
17684
 cCL("mnfsm",   e108140, 2, (RF, RF_IF),     rd_rm),
17685
 cCL("mnfsz",   e108160, 2, (RF, RF_IF),     rd_rm),
17686
 cCL("mnfd",    e108180, 2, (RF, RF_IF),     rd_rm),
17687
 cCL("mnfdp",   e1081a0, 2, (RF, RF_IF),     rd_rm),
17688
 cCL("mnfdm",   e1081c0, 2, (RF, RF_IF),     rd_rm),
17689
 cCL("mnfdz",   e1081e0, 2, (RF, RF_IF),     rd_rm),
17690
 cCL("mnfe",    e188100, 2, (RF, RF_IF),     rd_rm),
17691
 cCL("mnfep",   e188120, 2, (RF, RF_IF),     rd_rm),
17692
 cCL("mnfem",   e188140, 2, (RF, RF_IF),     rd_rm),
17693
 cCL("mnfez",   e188160, 2, (RF, RF_IF),     rd_rm),
17694
 
17695
 cCL("abss",    e208100, 2, (RF, RF_IF),     rd_rm),
17696
 cCL("abssp",   e208120, 2, (RF, RF_IF),     rd_rm),
17697
 cCL("abssm",   e208140, 2, (RF, RF_IF),     rd_rm),
17698
 cCL("abssz",   e208160, 2, (RF, RF_IF),     rd_rm),
17699
 cCL("absd",    e208180, 2, (RF, RF_IF),     rd_rm),
17700
 cCL("absdp",   e2081a0, 2, (RF, RF_IF),     rd_rm),
17701
 cCL("absdm",   e2081c0, 2, (RF, RF_IF),     rd_rm),
17702
 cCL("absdz",   e2081e0, 2, (RF, RF_IF),     rd_rm),
17703
 cCL("abse",    e288100, 2, (RF, RF_IF),     rd_rm),
17704
 cCL("absep",   e288120, 2, (RF, RF_IF),     rd_rm),
17705
 cCL("absem",   e288140, 2, (RF, RF_IF),     rd_rm),
17706
 cCL("absez",   e288160, 2, (RF, RF_IF),     rd_rm),
17707
 
17708
 cCL("rnds",    e308100, 2, (RF, RF_IF),     rd_rm),
17709
 cCL("rndsp",   e308120, 2, (RF, RF_IF),     rd_rm),
17710
 cCL("rndsm",   e308140, 2, (RF, RF_IF),     rd_rm),
17711
 cCL("rndsz",   e308160, 2, (RF, RF_IF),     rd_rm),
17712
 cCL("rndd",    e308180, 2, (RF, RF_IF),     rd_rm),
17713
 cCL("rnddp",   e3081a0, 2, (RF, RF_IF),     rd_rm),
17714
 cCL("rnddm",   e3081c0, 2, (RF, RF_IF),     rd_rm),
17715
 cCL("rnddz",   e3081e0, 2, (RF, RF_IF),     rd_rm),
17716
 cCL("rnde",    e388100, 2, (RF, RF_IF),     rd_rm),
17717
 cCL("rndep",   e388120, 2, (RF, RF_IF),     rd_rm),
17718
 cCL("rndem",   e388140, 2, (RF, RF_IF),     rd_rm),
17719
 cCL("rndez",   e388160, 2, (RF, RF_IF),     rd_rm),
17720
 
17721
 cCL("sqts",    e408100, 2, (RF, RF_IF),     rd_rm),
17722
 cCL("sqtsp",   e408120, 2, (RF, RF_IF),     rd_rm),
17723
 cCL("sqtsm",   e408140, 2, (RF, RF_IF),     rd_rm),
17724
 cCL("sqtsz",   e408160, 2, (RF, RF_IF),     rd_rm),
17725
 cCL("sqtd",    e408180, 2, (RF, RF_IF),     rd_rm),
17726
 cCL("sqtdp",   e4081a0, 2, (RF, RF_IF),     rd_rm),
17727
 cCL("sqtdm",   e4081c0, 2, (RF, RF_IF),     rd_rm),
17728
 cCL("sqtdz",   e4081e0, 2, (RF, RF_IF),     rd_rm),
17729
 cCL("sqte",    e488100, 2, (RF, RF_IF),     rd_rm),
17730
 cCL("sqtep",   e488120, 2, (RF, RF_IF),     rd_rm),
17731
 cCL("sqtem",   e488140, 2, (RF, RF_IF),     rd_rm),
17732
 cCL("sqtez",   e488160, 2, (RF, RF_IF),     rd_rm),
17733
 
17734
 cCL("logs",    e508100, 2, (RF, RF_IF),     rd_rm),
17735
 cCL("logsp",   e508120, 2, (RF, RF_IF),     rd_rm),
17736
 cCL("logsm",   e508140, 2, (RF, RF_IF),     rd_rm),
17737
 cCL("logsz",   e508160, 2, (RF, RF_IF),     rd_rm),
17738
 cCL("logd",    e508180, 2, (RF, RF_IF),     rd_rm),
17739
 cCL("logdp",   e5081a0, 2, (RF, RF_IF),     rd_rm),
17740
 cCL("logdm",   e5081c0, 2, (RF, RF_IF),     rd_rm),
17741
 cCL("logdz",   e5081e0, 2, (RF, RF_IF),     rd_rm),
17742
 cCL("loge",    e588100, 2, (RF, RF_IF),     rd_rm),
17743
 cCL("logep",   e588120, 2, (RF, RF_IF),     rd_rm),
17744
 cCL("logem",   e588140, 2, (RF, RF_IF),     rd_rm),
17745
 cCL("logez",   e588160, 2, (RF, RF_IF),     rd_rm),
17746
 
17747
 cCL("lgns",    e608100, 2, (RF, RF_IF),     rd_rm),
17748
 cCL("lgnsp",   e608120, 2, (RF, RF_IF),     rd_rm),
17749
 cCL("lgnsm",   e608140, 2, (RF, RF_IF),     rd_rm),
17750
 cCL("lgnsz",   e608160, 2, (RF, RF_IF),     rd_rm),
17751
 cCL("lgnd",    e608180, 2, (RF, RF_IF),     rd_rm),
17752
 cCL("lgndp",   e6081a0, 2, (RF, RF_IF),     rd_rm),
17753
 cCL("lgndm",   e6081c0, 2, (RF, RF_IF),     rd_rm),
17754
 cCL("lgndz",   e6081e0, 2, (RF, RF_IF),     rd_rm),
17755
 cCL("lgne",    e688100, 2, (RF, RF_IF),     rd_rm),
17756
 cCL("lgnep",   e688120, 2, (RF, RF_IF),     rd_rm),
17757
 cCL("lgnem",   e688140, 2, (RF, RF_IF),     rd_rm),
17758
 cCL("lgnez",   e688160, 2, (RF, RF_IF),     rd_rm),
17759
 
17760
 cCL("exps",    e708100, 2, (RF, RF_IF),     rd_rm),
17761
 cCL("expsp",   e708120, 2, (RF, RF_IF),     rd_rm),
17762
 cCL("expsm",   e708140, 2, (RF, RF_IF),     rd_rm),
17763
 cCL("expsz",   e708160, 2, (RF, RF_IF),     rd_rm),
17764
 cCL("expd",    e708180, 2, (RF, RF_IF),     rd_rm),
17765
 cCL("expdp",   e7081a0, 2, (RF, RF_IF),     rd_rm),
17766
 cCL("expdm",   e7081c0, 2, (RF, RF_IF),     rd_rm),
17767
 cCL("expdz",   e7081e0, 2, (RF, RF_IF),     rd_rm),
17768
 cCL("expe",    e788100, 2, (RF, RF_IF),     rd_rm),
17769
 cCL("expep",   e788120, 2, (RF, RF_IF),     rd_rm),
17770
 cCL("expem",   e788140, 2, (RF, RF_IF),     rd_rm),
17771
 cCL("expdz",   e788160, 2, (RF, RF_IF),     rd_rm),
17772
 
17773
 cCL("sins",    e808100, 2, (RF, RF_IF),     rd_rm),
17774
 cCL("sinsp",   e808120, 2, (RF, RF_IF),     rd_rm),
17775
 cCL("sinsm",   e808140, 2, (RF, RF_IF),     rd_rm),
17776
 cCL("sinsz",   e808160, 2, (RF, RF_IF),     rd_rm),
17777
 cCL("sind",    e808180, 2, (RF, RF_IF),     rd_rm),
17778
 cCL("sindp",   e8081a0, 2, (RF, RF_IF),     rd_rm),
17779
 cCL("sindm",   e8081c0, 2, (RF, RF_IF),     rd_rm),
17780
 cCL("sindz",   e8081e0, 2, (RF, RF_IF),     rd_rm),
17781
 cCL("sine",    e888100, 2, (RF, RF_IF),     rd_rm),
17782
 cCL("sinep",   e888120, 2, (RF, RF_IF),     rd_rm),
17783
 cCL("sinem",   e888140, 2, (RF, RF_IF),     rd_rm),
17784
 cCL("sinez",   e888160, 2, (RF, RF_IF),     rd_rm),
17785
 
17786
 cCL("coss",    e908100, 2, (RF, RF_IF),     rd_rm),
17787
 cCL("cossp",   e908120, 2, (RF, RF_IF),     rd_rm),
17788
 cCL("cossm",   e908140, 2, (RF, RF_IF),     rd_rm),
17789
 cCL("cossz",   e908160, 2, (RF, RF_IF),     rd_rm),
17790
 cCL("cosd",    e908180, 2, (RF, RF_IF),     rd_rm),
17791
 cCL("cosdp",   e9081a0, 2, (RF, RF_IF),     rd_rm),
17792
 cCL("cosdm",   e9081c0, 2, (RF, RF_IF),     rd_rm),
17793
 cCL("cosdz",   e9081e0, 2, (RF, RF_IF),     rd_rm),
17794
 cCL("cose",    e988100, 2, (RF, RF_IF),     rd_rm),
17795
 cCL("cosep",   e988120, 2, (RF, RF_IF),     rd_rm),
17796
 cCL("cosem",   e988140, 2, (RF, RF_IF),     rd_rm),
17797
 cCL("cosez",   e988160, 2, (RF, RF_IF),     rd_rm),
17798
 
17799
 cCL("tans",    ea08100, 2, (RF, RF_IF),     rd_rm),
17800
 cCL("tansp",   ea08120, 2, (RF, RF_IF),     rd_rm),
17801
 cCL("tansm",   ea08140, 2, (RF, RF_IF),     rd_rm),
17802
 cCL("tansz",   ea08160, 2, (RF, RF_IF),     rd_rm),
17803
 cCL("tand",    ea08180, 2, (RF, RF_IF),     rd_rm),
17804
 cCL("tandp",   ea081a0, 2, (RF, RF_IF),     rd_rm),
17805
 cCL("tandm",   ea081c0, 2, (RF, RF_IF),     rd_rm),
17806
 cCL("tandz",   ea081e0, 2, (RF, RF_IF),     rd_rm),
17807
 cCL("tane",    ea88100, 2, (RF, RF_IF),     rd_rm),
17808
 cCL("tanep",   ea88120, 2, (RF, RF_IF),     rd_rm),
17809
 cCL("tanem",   ea88140, 2, (RF, RF_IF),     rd_rm),
17810
 cCL("tanez",   ea88160, 2, (RF, RF_IF),     rd_rm),
17811
 
17812
 cCL("asns",    eb08100, 2, (RF, RF_IF),     rd_rm),
17813
 cCL("asnsp",   eb08120, 2, (RF, RF_IF),     rd_rm),
17814
 cCL("asnsm",   eb08140, 2, (RF, RF_IF),     rd_rm),
17815
 cCL("asnsz",   eb08160, 2, (RF, RF_IF),     rd_rm),
17816
 cCL("asnd",    eb08180, 2, (RF, RF_IF),     rd_rm),
17817
 cCL("asndp",   eb081a0, 2, (RF, RF_IF),     rd_rm),
17818
 cCL("asndm",   eb081c0, 2, (RF, RF_IF),     rd_rm),
17819
 cCL("asndz",   eb081e0, 2, (RF, RF_IF),     rd_rm),
17820
 cCL("asne",    eb88100, 2, (RF, RF_IF),     rd_rm),
17821
 cCL("asnep",   eb88120, 2, (RF, RF_IF),     rd_rm),
17822
 cCL("asnem",   eb88140, 2, (RF, RF_IF),     rd_rm),
17823
 cCL("asnez",   eb88160, 2, (RF, RF_IF),     rd_rm),
17824
 
17825
 cCL("acss",    ec08100, 2, (RF, RF_IF),     rd_rm),
17826
 cCL("acssp",   ec08120, 2, (RF, RF_IF),     rd_rm),
17827
 cCL("acssm",   ec08140, 2, (RF, RF_IF),     rd_rm),
17828
 cCL("acssz",   ec08160, 2, (RF, RF_IF),     rd_rm),
17829
 cCL("acsd",    ec08180, 2, (RF, RF_IF),     rd_rm),
17830
 cCL("acsdp",   ec081a0, 2, (RF, RF_IF),     rd_rm),
17831
 cCL("acsdm",   ec081c0, 2, (RF, RF_IF),     rd_rm),
17832
 cCL("acsdz",   ec081e0, 2, (RF, RF_IF),     rd_rm),
17833
 cCL("acse",    ec88100, 2, (RF, RF_IF),     rd_rm),
17834
 cCL("acsep",   ec88120, 2, (RF, RF_IF),     rd_rm),
17835
 cCL("acsem",   ec88140, 2, (RF, RF_IF),     rd_rm),
17836
 cCL("acsez",   ec88160, 2, (RF, RF_IF),     rd_rm),
17837
 
17838
 cCL("atns",    ed08100, 2, (RF, RF_IF),     rd_rm),
17839
 cCL("atnsp",   ed08120, 2, (RF, RF_IF),     rd_rm),
17840
 cCL("atnsm",   ed08140, 2, (RF, RF_IF),     rd_rm),
17841
 cCL("atnsz",   ed08160, 2, (RF, RF_IF),     rd_rm),
17842
 cCL("atnd",    ed08180, 2, (RF, RF_IF),     rd_rm),
17843
 cCL("atndp",   ed081a0, 2, (RF, RF_IF),     rd_rm),
17844
 cCL("atndm",   ed081c0, 2, (RF, RF_IF),     rd_rm),
17845
 cCL("atndz",   ed081e0, 2, (RF, RF_IF),     rd_rm),
17846
 cCL("atne",    ed88100, 2, (RF, RF_IF),     rd_rm),
17847
 cCL("atnep",   ed88120, 2, (RF, RF_IF),     rd_rm),
17848
 cCL("atnem",   ed88140, 2, (RF, RF_IF),     rd_rm),
17849
 cCL("atnez",   ed88160, 2, (RF, RF_IF),     rd_rm),
17850
 
17851
 cCL("urds",    ee08100, 2, (RF, RF_IF),     rd_rm),
17852
 cCL("urdsp",   ee08120, 2, (RF, RF_IF),     rd_rm),
17853
 cCL("urdsm",   ee08140, 2, (RF, RF_IF),     rd_rm),
17854
 cCL("urdsz",   ee08160, 2, (RF, RF_IF),     rd_rm),
17855
 cCL("urdd",    ee08180, 2, (RF, RF_IF),     rd_rm),
17856
 cCL("urddp",   ee081a0, 2, (RF, RF_IF),     rd_rm),
17857
 cCL("urddm",   ee081c0, 2, (RF, RF_IF),     rd_rm),
17858
 cCL("urddz",   ee081e0, 2, (RF, RF_IF),     rd_rm),
17859
 cCL("urde",    ee88100, 2, (RF, RF_IF),     rd_rm),
17860
 cCL("urdep",   ee88120, 2, (RF, RF_IF),     rd_rm),
17861
 cCL("urdem",   ee88140, 2, (RF, RF_IF),     rd_rm),
17862
 cCL("urdez",   ee88160, 2, (RF, RF_IF),     rd_rm),
17863
 
17864
 cCL("nrms",    ef08100, 2, (RF, RF_IF),     rd_rm),
17865
 cCL("nrmsp",   ef08120, 2, (RF, RF_IF),     rd_rm),
17866
 cCL("nrmsm",   ef08140, 2, (RF, RF_IF),     rd_rm),
17867
 cCL("nrmsz",   ef08160, 2, (RF, RF_IF),     rd_rm),
17868
 cCL("nrmd",    ef08180, 2, (RF, RF_IF),     rd_rm),
17869
 cCL("nrmdp",   ef081a0, 2, (RF, RF_IF),     rd_rm),
17870
 cCL("nrmdm",   ef081c0, 2, (RF, RF_IF),     rd_rm),
17871
 cCL("nrmdz",   ef081e0, 2, (RF, RF_IF),     rd_rm),
17872
 cCL("nrme",    ef88100, 2, (RF, RF_IF),     rd_rm),
17873
 cCL("nrmep",   ef88120, 2, (RF, RF_IF),     rd_rm),
17874
 cCL("nrmem",   ef88140, 2, (RF, RF_IF),     rd_rm),
17875
 cCL("nrmez",   ef88160, 2, (RF, RF_IF),     rd_rm),
17876
 
17877
 cCL("adfs",    e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17878
 cCL("adfsp",   e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17879
 cCL("adfsm",   e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17880
 cCL("adfsz",   e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17881
 cCL("adfd",    e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17882
 cCL("adfdp",   e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17883
 cCL("adfdm",   e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17884
 cCL("adfdz",   e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17885
 cCL("adfe",    e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17886
 cCL("adfep",   e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17887
 cCL("adfem",   e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17888
 cCL("adfez",   e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17889
 
17890
 cCL("sufs",    e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17891
 cCL("sufsp",   e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17892
 cCL("sufsm",   e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17893
 cCL("sufsz",   e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17894
 cCL("sufd",    e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17895
 cCL("sufdp",   e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17896
 cCL("sufdm",   e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17897
 cCL("sufdz",   e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17898
 cCL("sufe",    e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17899
 cCL("sufep",   e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17900
 cCL("sufem",   e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17901
 cCL("sufez",   e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17902
 
17903
 cCL("rsfs",    e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17904
 cCL("rsfsp",   e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17905
 cCL("rsfsm",   e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17906
 cCL("rsfsz",   e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17907
 cCL("rsfd",    e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17908
 cCL("rsfdp",   e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17909
 cCL("rsfdm",   e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17910
 cCL("rsfdz",   e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17911
 cCL("rsfe",    e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17912
 cCL("rsfep",   e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17913
 cCL("rsfem",   e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17914
 cCL("rsfez",   e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17915
 
17916
 cCL("mufs",    e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17917
 cCL("mufsp",   e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17918
 cCL("mufsm",   e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17919
 cCL("mufsz",   e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17920
 cCL("mufd",    e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17921
 cCL("mufdp",   e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17922
 cCL("mufdm",   e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17923
 cCL("mufdz",   e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17924
 cCL("mufe",    e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17925
 cCL("mufep",   e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17926
 cCL("mufem",   e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17927
 cCL("mufez",   e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17928
 
17929
 cCL("dvfs",    e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17930
 cCL("dvfsp",   e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17931
 cCL("dvfsm",   e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17932
 cCL("dvfsz",   e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17933
 cCL("dvfd",    e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17934
 cCL("dvfdp",   e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17935
 cCL("dvfdm",   e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17936
 cCL("dvfdz",   e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17937
 cCL("dvfe",    e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17938
 cCL("dvfep",   e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17939
 cCL("dvfem",   e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17940
 cCL("dvfez",   e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17941
 
17942
 cCL("rdfs",    e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17943
 cCL("rdfsp",   e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17944
 cCL("rdfsm",   e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17945
 cCL("rdfsz",   e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17946
 cCL("rdfd",    e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17947
 cCL("rdfdp",   e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17948
 cCL("rdfdm",   e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17949
 cCL("rdfdz",   e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17950
 cCL("rdfe",    e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17951
 cCL("rdfep",   e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
17952
 cCL("rdfem",   e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
17953
 cCL("rdfez",   e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
17954
 
17955
 cCL("pows",    e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
17956
 cCL("powsp",   e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
17957
 cCL("powsm",   e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
17958
 cCL("powsz",   e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
17959
 cCL("powd",    e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
17960
 cCL("powdp",   e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17961
 cCL("powdm",   e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17962
 cCL("powdz",   e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17963
 cCL("powe",    e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
17964
 cCL("powep",   e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
17965
 cCL("powem",   e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
17966
 cCL("powez",   e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
17967
 
17968
 cCL("rpws",    e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
17969
 cCL("rpwsp",   e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
17970
 cCL("rpwsm",   e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
17971
 cCL("rpwsz",   e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
17972
 cCL("rpwd",    e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
17973
 cCL("rpwdp",   e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17974
 cCL("rpwdm",   e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17975
 cCL("rpwdz",   e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17976
 cCL("rpwe",    e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
17977
 cCL("rpwep",   e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
17978
 cCL("rpwem",   e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
17979
 cCL("rpwez",   e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
17980
 
17981
 cCL("rmfs",    e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
17982
 cCL("rmfsp",   e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
17983
 cCL("rmfsm",   e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
17984
 cCL("rmfsz",   e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
17985
 cCL("rmfd",    e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
17986
 cCL("rmfdp",   e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17987
 cCL("rmfdm",   e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17988
 cCL("rmfdz",   e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17989
 cCL("rmfe",    e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
17990
 cCL("rmfep",   e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
17991
 cCL("rmfem",   e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
17992
 cCL("rmfez",   e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
17993
 
17994
 cCL("fmls",    e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
17995
 cCL("fmlsp",   e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
17996
 cCL("fmlsm",   e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
17997
 cCL("fmlsz",   e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
17998
 cCL("fmld",    e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
17999
 cCL("fmldp",   e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18000
 cCL("fmldm",   e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18001
 cCL("fmldz",   e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18002
 cCL("fmle",    e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18003
 cCL("fmlep",   e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18004
 cCL("fmlem",   e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18005
 cCL("fmlez",   e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18006
 
18007
 cCL("fdvs",    ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18008
 cCL("fdvsp",   ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18009
 cCL("fdvsm",   ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18010
 cCL("fdvsz",   ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18011
 cCL("fdvd",    ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18012
 cCL("fdvdp",   ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18013
 cCL("fdvdm",   ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18014
 cCL("fdvdz",   ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18015
 cCL("fdve",    ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18016
 cCL("fdvep",   ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18017
 cCL("fdvem",   ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18018
 cCL("fdvez",   ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18019
 
18020
 cCL("frds",    eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18021
 cCL("frdsp",   eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18022
 cCL("frdsm",   eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18023
 cCL("frdsz",   eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18024
 cCL("frdd",    eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18025
 cCL("frddp",   eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18026
 cCL("frddm",   eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18027
 cCL("frddz",   eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18028
 cCL("frde",    eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18029
 cCL("frdep",   eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18030
 cCL("frdem",   eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18031
 cCL("frdez",   eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18032
 
18033
 cCL("pols",    ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18034
 cCL("polsp",   ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18035
 cCL("polsm",   ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18036
 cCL("polsz",   ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18037
 cCL("pold",    ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18038
 cCL("poldp",   ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18039
 cCL("poldm",   ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18040
 cCL("poldz",   ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18041
 cCL("pole",    ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18042
 cCL("polep",   ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18043
 cCL("polem",   ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18044
 cCL("polez",   ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18045
 
18046
 cCE("cmf",     e90f110, 2, (RF, RF_IF),     fpa_cmp),
18047
 C3E("cmfe",    ed0f110, 2, (RF, RF_IF),     fpa_cmp),
18048
 cCE("cnf",     eb0f110, 2, (RF, RF_IF),     fpa_cmp),
18049
 C3E("cnfe",    ef0f110, 2, (RF, RF_IF),     fpa_cmp),
18050
 
18051
 cCL("flts",    e000110, 2, (RF, RR),        rn_rd),
18052
 cCL("fltsp",   e000130, 2, (RF, RR),        rn_rd),
18053
 cCL("fltsm",   e000150, 2, (RF, RR),        rn_rd),
18054
 cCL("fltsz",   e000170, 2, (RF, RR),        rn_rd),
18055
 cCL("fltd",    e000190, 2, (RF, RR),        rn_rd),
18056
 cCL("fltdp",   e0001b0, 2, (RF, RR),        rn_rd),
18057
 cCL("fltdm",   e0001d0, 2, (RF, RR),        rn_rd),
18058
 cCL("fltdz",   e0001f0, 2, (RF, RR),        rn_rd),
18059
 cCL("flte",    e080110, 2, (RF, RR),        rn_rd),
18060
 cCL("fltep",   e080130, 2, (RF, RR),        rn_rd),
18061
 cCL("fltem",   e080150, 2, (RF, RR),        rn_rd),
18062
 cCL("fltez",   e080170, 2, (RF, RR),        rn_rd),
18063
 
18064
  /* The implementation of the FIX instruction is broken on some
18065
     assemblers, in that it accepts a precision specifier as well as a
18066
     rounding specifier, despite the fact that this is meaningless.
18067
     To be more compatible, we accept it as well, though of course it
18068
     does not set any bits.  */
18069
 cCE("fix",     e100110, 2, (RR, RF),        rd_rm),
18070
 cCL("fixp",    e100130, 2, (RR, RF),        rd_rm),
18071
 cCL("fixm",    e100150, 2, (RR, RF),        rd_rm),
18072
 cCL("fixz",    e100170, 2, (RR, RF),        rd_rm),
18073
 cCL("fixsp",   e100130, 2, (RR, RF),        rd_rm),
18074
 cCL("fixsm",   e100150, 2, (RR, RF),        rd_rm),
18075
 cCL("fixsz",   e100170, 2, (RR, RF),        rd_rm),
18076
 cCL("fixdp",   e100130, 2, (RR, RF),        rd_rm),
18077
 cCL("fixdm",   e100150, 2, (RR, RF),        rd_rm),
18078
 cCL("fixdz",   e100170, 2, (RR, RF),        rd_rm),
18079
 cCL("fixep",   e100130, 2, (RR, RF),        rd_rm),
18080
 cCL("fixem",   e100150, 2, (RR, RF),        rd_rm),
18081
 cCL("fixez",   e100170, 2, (RR, RF),        rd_rm),
18082
 
18083
  /* Instructions that were new with the real FPA, call them V2.  */
18084
#undef  ARM_VARIANT
18085
#define ARM_VARIANT  & fpu_fpa_ext_v2
18086
 
18087
 cCE("lfm",     c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18088
 cCL("lfmfd",   c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18089
 cCL("lfmea",   d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18090
 cCE("sfm",     c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18091
 cCL("sfmfd",   d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18092
 cCL("sfmea",   c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18093
 
18094
#undef  ARM_VARIANT
18095
#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
18096
 
18097
  /* Moves and type conversions.  */
18098
 cCE("fcpys",   eb00a40, 2, (RVS, RVS),       vfp_sp_monadic),
18099
 cCE("fmrs",    e100a10, 2, (RR, RVS),        vfp_reg_from_sp),
18100
 cCE("fmsr",    e000a10, 2, (RVS, RR),        vfp_sp_from_reg),
18101
 cCE("fmstat",  ef1fa10, 0, (),                noargs),
18102
 cCE("vmrs",    ef10a10, 2, (APSR_RR, RVC),   vmrs),
18103
 cCE("vmsr",    ee10a10, 2, (RVC, RR),        vmsr),
18104
 cCE("fsitos",  eb80ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18105
 cCE("fuitos",  eb80a40, 2, (RVS, RVS),       vfp_sp_monadic),
18106
 cCE("ftosis",  ebd0a40, 2, (RVS, RVS),       vfp_sp_monadic),
18107
 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18108
 cCE("ftouis",  ebc0a40, 2, (RVS, RVS),       vfp_sp_monadic),
18109
 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18110
 cCE("fmrx",    ef00a10, 2, (RR, RVC),        rd_rn),
18111
 cCE("fmxr",    ee00a10, 2, (RVC, RR),        rn_rd),
18112
 
18113
  /* Memory operations.  */
18114
 cCE("flds",    d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
18115
 cCE("fsts",    d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
18116
 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18117
 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18118
 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18119
 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18120
 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18121
 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18122
 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18123
 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18124
 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18125
 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18126
 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18127
 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18128
 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18129
 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18130
 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18131
 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18132
 
18133
  /* Monadic operations.  */
18134
 cCE("fabss",   eb00ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18135
 cCE("fnegs",   eb10a40, 2, (RVS, RVS),       vfp_sp_monadic),
18136
 cCE("fsqrts",  eb10ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18137
 
18138
  /* Dyadic operations.  */
18139
 cCE("fadds",   e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18140
 cCE("fsubs",   e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18141
 cCE("fmuls",   e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18142
 cCE("fdivs",   e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18143
 cCE("fmacs",   e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18144
 cCE("fmscs",   e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18145
 cCE("fnmuls",  e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18146
 cCE("fnmacs",  e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18147
 cCE("fnmscs",  e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18148
 
18149
  /* Comparisons.  */
18150
 cCE("fcmps",   eb40a40, 2, (RVS, RVS),       vfp_sp_monadic),
18151
 cCE("fcmpzs",  eb50a40, 1, (RVS),            vfp_sp_compare_z),
18152
 cCE("fcmpes",  eb40ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18153
 cCE("fcmpezs", eb50ac0, 1, (RVS),            vfp_sp_compare_z),
18154
 
18155
 /* Double precision load/store are still present on single precision
18156
    implementations.  */
18157
 cCE("fldd",    d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
18158
 cCE("fstd",    d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
18159
 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18160
 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18161
 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18162
 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18163
 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18164
 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18165
 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18166
 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18167
 
18168
#undef  ARM_VARIANT
18169
#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
18170
 
18171
  /* Moves and type conversions.  */
18172
 cCE("fcpyd",   eb00b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18173
 cCE("fcvtds",  eb70ac0, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18174
 cCE("fcvtsd",  eb70bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18175
 cCE("fmdhr",   e200b10, 2, (RVD, RR),        vfp_dp_rn_rd),
18176
 cCE("fmdlr",   e000b10, 2, (RVD, RR),        vfp_dp_rn_rd),
18177
 cCE("fmrdh",   e300b10, 2, (RR, RVD),        vfp_dp_rd_rn),
18178
 cCE("fmrdl",   e100b10, 2, (RR, RVD),        vfp_dp_rd_rn),
18179
 cCE("fsitod",  eb80bc0, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18180
 cCE("fuitod",  eb80b40, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18181
 cCE("ftosid",  ebd0b40, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18182
 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18183
 cCE("ftouid",  ebc0b40, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18184
 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18185
 
18186
  /* Monadic operations.  */
18187
 cCE("fabsd",   eb00bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18188
 cCE("fnegd",   eb10b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18189
 cCE("fsqrtd",  eb10bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18190
 
18191
  /* Dyadic operations.  */
18192
 cCE("faddd",   e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18193
 cCE("fsubd",   e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18194
 cCE("fmuld",   e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18195
 cCE("fdivd",   e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18196
 cCE("fmacd",   e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18197
 cCE("fmscd",   e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18198
 cCE("fnmuld",  e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18199
 cCE("fnmacd",  e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18200
 cCE("fnmscd",  e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18201
 
18202
  /* Comparisons.  */
18203
 cCE("fcmpd",   eb40b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18204
 cCE("fcmpzd",  eb50b40, 1, (RVD),            vfp_dp_rd),
18205
 cCE("fcmped",  eb40bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18206
 cCE("fcmpezd", eb50bc0, 1, (RVD),            vfp_dp_rd),
18207
 
18208
#undef  ARM_VARIANT
18209
#define ARM_VARIANT  & fpu_vfp_ext_v2
18210
 
18211
 cCE("fmsrr",   c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
18212
 cCE("fmrrs",   c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
18213
 cCE("fmdrr",   c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
18214
 cCE("fmrrd",   c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
18215
 
18216
/* Instructions which may belong to either the Neon or VFP instruction sets.
18217
   Individual encoder functions perform additional architecture checks.  */
18218
#undef  ARM_VARIANT
18219
#define ARM_VARIANT    & fpu_vfp_ext_v1xd
18220
#undef  THUMB_VARIANT
18221
#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
18222
 
18223
  /* These mnemonics are unique to VFP.  */
18224
 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
18225
 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
18226
 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18227
 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18228
 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18229
 nCE(vcmp,      _vcmp,    2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
18230
 nCE(vcmpe,     _vcmpe,   2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
18231
 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
18232
 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
18233
 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
18234
 
18235
  /* Mnemonics shared by Neon and VFP.  */
18236
 nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
18237
 nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18238
 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18239
 
18240
 nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18241
 nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18242
 
18243
 NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18244
 NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18245
 
18246
 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18247
 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18248
 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18249
 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18250
 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18251
 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18252
 NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18253
 NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18254
 
18255 160 khays
 nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
18256 16 khays
 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
18257
 nCEF(vcvtb,    _vcvt,   2, (RVS, RVS), neon_cvtb),
18258
 nCEF(vcvtt,    _vcvt,   2, (RVS, RVS), neon_cvtt),
18259
 
18260
 
18261
  /* NOTE: All VMOV encoding is special-cased!  */
18262
 NCE(vmov,      0,       1, (VMOV), neon_mov),
18263
 NCE(vmovq,     0,       1, (VMOV), neon_mov),
18264
 
18265
#undef  THUMB_VARIANT
18266
#define THUMB_VARIANT  & fpu_neon_ext_v1
18267
#undef  ARM_VARIANT
18268
#define ARM_VARIANT    & fpu_neon_ext_v1
18269
 
18270
  /* Data processing with three registers of the same length.  */
18271
  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
18272
 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
18273
 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
18274
 NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18275
 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18276
 NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18277
 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18278
 NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18279
 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18280
  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
18281
 NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18282
 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
18283
 NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18284
 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
18285
 NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18286
 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
18287
 NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18288
 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
18289
  /* If not immediate, fall back to neon_dyadic_i64_su.
18290
     shl_imm should accept I8 I16 I32 I64,
18291
     qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
18292
 nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
18293
 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
18294
 nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
18295
 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
18296
  /* Logic ops, types optional & ignored.  */
18297
 nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18298
 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18299
 nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18300
 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18301
 nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18302
 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18303
 nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18304
 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18305
 nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
18306
 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
18307
  /* Bitfield ops, untyped.  */
18308
 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18309
 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18310
 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18311
 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18312
 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18313
 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18314
  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32.  */
18315
 nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18316
 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18317
 nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18318
 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18319
 nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18320
 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18321
  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
18322
     back to neon_dyadic_if_su.  */
18323
 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18324
 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
18325
 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18326
 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
18327
 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18328
 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
18329
 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18330
 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
18331
  /* Comparison. Type I8 I16 I32 F32.  */
18332
 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
18333
 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
18334
  /* As above, D registers only.  */
18335
 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
18336
 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
18337
  /* Int and float variants, signedness unimportant.  */
18338
 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
18339
 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
18340
 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
18341
  /* Add/sub take types I8 I16 I32 I64 F32.  */
18342
 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
18343
 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
18344
  /* vtst takes sizes 8, 16, 32.  */
18345
 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
18346
 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
18347
  /* VMUL takes I8 I16 I32 F32 P8.  */
18348
 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
18349
  /* VQD{R}MULH takes S16 S32.  */
18350
 nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18351
 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
18352
 nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18353
 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
18354
 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18355
 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
18356
 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18357
 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
18358
 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18359
 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
18360
 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18361
 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
18362
 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
18363
 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
18364
 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
18365
 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
18366
 
18367
  /* Two address, int/float. Types S8 S16 S32 F32.  */
18368
 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
18369
 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
18370
 
18371
  /* Data processing with two registers and a shift amount.  */
18372
  /* Right shifts, and variants with rounding.
18373
     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
18374
 NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18375
 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
18376
 NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18377
 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
18378
 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
18379
 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
18380
 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
18381
 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
18382
  /* Shift and insert. Sizes accepted 8 16 32 64.  */
18383
 NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
18384
 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
18385
 NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
18386
 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
18387
  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
18388
 NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
18389
 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
18390
  /* Right shift immediate, saturating & narrowing, with rounding variants.
18391
     Types accepted S16 S32 S64 U16 U32 U64.  */
18392
 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18393
 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18394
  /* As above, unsigned. Types accepted S16 S32 S64.  */
18395
 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18396
 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18397
  /* Right shift narrowing. Types accepted I16 I32 I64.  */
18398
 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18399
 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18400
  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
18401
 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
18402
  /* CVT with optional immediate for fixed-point variant.  */
18403
 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
18404
 
18405
 nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
18406
 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
18407
 
18408
  /* Data processing, three registers of different lengths.  */
18409
  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
18410
 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
18411
 NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
18412
 NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
18413
 NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
18414
  /* If not scalar, fall back to neon_dyadic_long.
18415
     Vector types as above, scalar types S16 S32 U16 U32.  */
18416
 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18417
 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18418
  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
18419
 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18420
 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18421
  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
18422
 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18423
 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18424
 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18425
 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18426
  /* Saturating doubling multiplies. Types S16 S32.  */
18427
 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18428
 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18429
 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18430
  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
18431
     S16 S32 U16 U32.  */
18432
 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
18433
 
18434
  /* Extract. Size 8.  */
18435
 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
18436
 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
18437
 
18438
  /* Two registers, miscellaneous.  */
18439
  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
18440
 NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
18441
 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
18442
 NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
18443
 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
18444
 NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
18445
 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
18446
  /* Vector replicate. Sizes 8 16 32.  */
18447
 nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
18448
 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
18449
  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
18450
 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
18451
  /* VMOVN. Types I16 I32 I64.  */
18452
 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
18453
  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
18454
 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
18455
  /* VQMOVUN. Types S16 S32 S64.  */
18456
 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
18457
  /* VZIP / VUZP. Sizes 8 16 32.  */
18458
 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
18459
 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
18460
 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
18461
 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
18462
  /* VQABS / VQNEG. Types S8 S16 S32.  */
18463
 NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
18464
 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
18465
 NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
18466
 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
18467
  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
18468
 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
18469
 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
18470
 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
18471
 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
18472
  /* Reciprocal estimates. Types U32 F32.  */
18473
 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
18474
 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
18475
 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
18476
 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
18477
  /* VCLS. Types S8 S16 S32.  */
18478
 NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
18479
 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
18480
  /* VCLZ. Types I8 I16 I32.  */
18481
 NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
18482
 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
18483
  /* VCNT. Size 8.  */
18484
 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
18485
 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
18486
  /* Two address, untyped.  */
18487
 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
18488
 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
18489
  /* VTRN. Sizes 8 16 32.  */
18490
 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
18491
 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
18492
 
18493
  /* Table lookup. Size 8.  */
18494
 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18495
 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18496
 
18497
#undef  THUMB_VARIANT
18498
#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
18499
#undef  ARM_VARIANT
18500
#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
18501
 
18502
  /* Neon element/structure load/store.  */
18503
 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18504
 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18505
 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18506
 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18507
 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18508
 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18509
 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18510
 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18511
 
18512
#undef  THUMB_VARIANT
18513
#define THUMB_VARIANT &fpu_vfp_ext_v3xd
18514
#undef ARM_VARIANT
18515
#define ARM_VARIANT &fpu_vfp_ext_v3xd
18516
 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
18517
 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18518
 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18519
 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18520
 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18521
 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18522
 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18523
 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18524
 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18525
 
18526
#undef THUMB_VARIANT
18527
#define THUMB_VARIANT  & fpu_vfp_ext_v3
18528
#undef  ARM_VARIANT
18529
#define ARM_VARIANT    & fpu_vfp_ext_v3
18530
 
18531
 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
18532
 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18533
 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18534
 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18535
 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18536
 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18537
 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18538
 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18539
 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18540
 
18541
#undef ARM_VARIANT
18542
#define ARM_VARIANT &fpu_vfp_ext_fma
18543
#undef THUMB_VARIANT
18544
#define THUMB_VARIANT &fpu_vfp_ext_fma
18545
 /* Mnemonics shared by Neon and VFP.  These are included in the
18546
    VFP FMA variant; NEON and VFP FMA always includes the NEON
18547
    FMA instructions.  */
18548
 nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18549
 nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18550
 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18551
    the v form should always be used.  */
18552
 cCE("ffmas",   ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18553
 cCE("ffnmas",  ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18554
 cCE("ffmad",   ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18555
 cCE("ffnmad",  ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18556
 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18557
 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18558
 
18559
#undef THUMB_VARIANT
18560
#undef  ARM_VARIANT
18561
#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
18562
 
18563
 cCE("mia",     e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18564
 cCE("miaph",   e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18565
 cCE("miabb",   e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18566
 cCE("miabt",   e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18567
 cCE("miatb",   e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18568
 cCE("miatt",   e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18569
 cCE("mar",     c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
18570
 cCE("mra",     c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
18571
 
18572
#undef  ARM_VARIANT
18573
#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
18574
 
18575
 cCE("tandcb",  e13f130, 1, (RR),                   iwmmxt_tandorc),
18576
 cCE("tandch",  e53f130, 1, (RR),                   iwmmxt_tandorc),
18577
 cCE("tandcw",  e93f130, 1, (RR),                   iwmmxt_tandorc),
18578
 cCE("tbcstb",  e400010, 2, (RIWR, RR),             rn_rd),
18579
 cCE("tbcsth",  e400050, 2, (RIWR, RR),             rn_rd),
18580
 cCE("tbcstw",  e400090, 2, (RIWR, RR),             rn_rd),
18581
 cCE("textrcb", e130170, 2, (RR, I7),               iwmmxt_textrc),
18582
 cCE("textrch", e530170, 2, (RR, I7),               iwmmxt_textrc),
18583
 cCE("textrcw", e930170, 2, (RR, I7),               iwmmxt_textrc),
18584
 cCE("textrmub",        e100070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18585
 cCE("textrmuh",        e500070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18586
 cCE("textrmuw",        e900070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18587
 cCE("textrmsb",        e100078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18588
 cCE("textrmsh",        e500078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18589
 cCE("textrmsw",        e900078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18590
 cCE("tinsrb",  e600010, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18591
 cCE("tinsrh",  e600050, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18592
 cCE("tinsrw",  e600090, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18593
 cCE("tmcr",    e000110, 2, (RIWC_RIWG, RR),        rn_rd),
18594
 cCE("tmcrr",   c400000, 3, (RIWR, RR, RR),         rm_rd_rn),
18595
 cCE("tmia",    e200010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18596
 cCE("tmiaph",  e280010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18597
 cCE("tmiabb",  e2c0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18598
 cCE("tmiabt",  e2d0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18599
 cCE("tmiatb",  e2e0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18600
 cCE("tmiatt",  e2f0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18601
 cCE("tmovmskb",        e100030, 2, (RR, RIWR),             rd_rn),
18602
 cCE("tmovmskh",        e500030, 2, (RR, RIWR),             rd_rn),
18603
 cCE("tmovmskw",        e900030, 2, (RR, RIWR),             rd_rn),
18604
 cCE("tmrc",    e100110, 2, (RR, RIWC_RIWG),        rd_rn),
18605
 cCE("tmrrc",   c500000, 3, (RR, RR, RIWR),         rd_rn_rm),
18606
 cCE("torcb",   e13f150, 1, (RR),                   iwmmxt_tandorc),
18607
 cCE("torch",   e53f150, 1, (RR),                   iwmmxt_tandorc),
18608
 cCE("torcw",   e93f150, 1, (RR),                   iwmmxt_tandorc),
18609
 cCE("waccb",   e0001c0, 2, (RIWR, RIWR),           rd_rn),
18610
 cCE("wacch",   e4001c0, 2, (RIWR, RIWR),           rd_rn),
18611
 cCE("waccw",   e8001c0, 2, (RIWR, RIWR),           rd_rn),
18612
 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18613
 cCE("waddb",   e000180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18614
 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18615
 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18616
 cCE("waddh",   e400180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18617
 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18618
 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18619
 cCE("waddw",   e800180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18620
 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18621
 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
18622
 cCE("walignr0",        e800020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18623
 cCE("walignr1",        e900020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18624
 cCE("walignr2",        ea00020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18625
 cCE("walignr3",        eb00020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18626
 cCE("wand",    e200000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18627
 cCE("wandn",   e300000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18628
 cCE("wavg2b",  e800000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18629
 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18630
 cCE("wavg2h",  ec00000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18631
 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18632
 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18633
 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18634
 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18635
 cCE("wcmpgtub",        e100060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18636
 cCE("wcmpgtuh",        e500060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18637
 cCE("wcmpgtuw",        e900060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18638
 cCE("wcmpgtsb",        e300060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18639
 cCE("wcmpgtsh",        e700060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18640
 cCE("wcmpgtsw",        eb00060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18641
 cCE("wldrb",   c100000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18642
 cCE("wldrh",   c500000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18643
 cCE("wldrw",   c100100, 2, (RIWR_RIWC, ADDR),      iwmmxt_wldstw),
18644
 cCE("wldrd",   c500100, 2, (RIWR, ADDR),           iwmmxt_wldstd),
18645
 cCE("wmacs",   e600100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18646
 cCE("wmacsz",  e700100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18647
 cCE("wmacu",   e400100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18648
 cCE("wmacuz",  e500100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18649
 cCE("wmadds",  ea00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18650
 cCE("wmaddu",  e800100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18651
 cCE("wmaxsb",  e200160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18652
 cCE("wmaxsh",  e600160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18653
 cCE("wmaxsw",  ea00160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18654
 cCE("wmaxub",  e000160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18655
 cCE("wmaxuh",  e400160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18656
 cCE("wmaxuw",  e800160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18657
 cCE("wminsb",  e300160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18658
 cCE("wminsh",  e700160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18659
 cCE("wminsw",  eb00160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18660
 cCE("wminub",  e100160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18661
 cCE("wminuh",  e500160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18662
 cCE("wminuw",  e900160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18663
 cCE("wmov",    e000000, 2, (RIWR, RIWR),           iwmmxt_wmov),
18664
 cCE("wmulsm",  e300100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18665
 cCE("wmulsl",  e200100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18666
 cCE("wmulum",  e100100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18667
 cCE("wmulul",  e000100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18668
 cCE("wor",     e000000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18669
 cCE("wpackhss",        e700080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18670
 cCE("wpackhus",        e500080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18671
 cCE("wpackwss",        eb00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18672
 cCE("wpackwus",        e900080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18673
 cCE("wpackdss",        ef00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18674
 cCE("wpackdus",        ed00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18675
 cCE("wrorh",   e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18676
 cCE("wrorhg",  e700148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18677
 cCE("wrorw",   eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18678
 cCE("wrorwg",  eb00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18679
 cCE("wrord",   ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18680
 cCE("wrordg",  ef00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18681
 cCE("wsadb",   e000120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18682
 cCE("wsadbz",  e100120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18683
 cCE("wsadh",   e400120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18684
 cCE("wsadhz",  e500120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18685
 cCE("wshufh",  e0001e0, 3, (RIWR, RIWR, I255),     iwmmxt_wshufh),
18686
 cCE("wsllh",   e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18687
 cCE("wsllhg",  e500148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18688
 cCE("wsllw",   e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18689
 cCE("wsllwg",  e900148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18690
 cCE("wslld",   ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18691
 cCE("wslldg",  ed00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18692
 cCE("wsrah",   e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18693
 cCE("wsrahg",  e400148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18694
 cCE("wsraw",   e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18695
 cCE("wsrawg",  e800148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18696
 cCE("wsrad",   ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18697
 cCE("wsradg",  ec00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18698
 cCE("wsrlh",   e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18699
 cCE("wsrlhg",  e600148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18700
 cCE("wsrlw",   ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18701
 cCE("wsrlwg",  ea00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18702
 cCE("wsrld",   ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18703
 cCE("wsrldg",  ee00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18704
 cCE("wstrb",   c000000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18705
 cCE("wstrh",   c400000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18706
 cCE("wstrw",   c000100, 2, (RIWR_RIWC, ADDR),      iwmmxt_wldstw),
18707
 cCE("wstrd",   c400100, 2, (RIWR, ADDR),           iwmmxt_wldstd),
18708
 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18709
 cCE("wsubb",   e0001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18710
 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18711
 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18712
 cCE("wsubh",   e4001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18713
 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18714
 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18715
 cCE("wsubw",   e8001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18716
 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18717
 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),         rd_rn),
18718
 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),         rd_rn),
18719
 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),         rd_rn),
18720
 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),         rd_rn),
18721
 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),         rd_rn),
18722
 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),         rd_rn),
18723
 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18724
 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18725
 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18726
 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),         rd_rn),
18727
 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),         rd_rn),
18728
 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),         rd_rn),
18729
 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),         rd_rn),
18730
 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),         rd_rn),
18731
 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),         rd_rn),
18732
 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18733
 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18734
 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18735
 cCE("wxor",    e100000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18736
 cCE("wzero",   e300000, 1, (RIWR),                 iwmmxt_wzero),
18737
 
18738
#undef  ARM_VARIANT
18739
#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
18740
 
18741
 cCE("torvscb",   e12f190, 1, (RR),                 iwmmxt_tandorc),
18742
 cCE("torvsch",   e52f190, 1, (RR),                 iwmmxt_tandorc),
18743
 cCE("torvscw",   e92f190, 1, (RR),                 iwmmxt_tandorc),
18744
 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
18745
 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
18746
 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
18747
 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18748
 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18749
 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18750
 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18751
 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18752
 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18753
 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18754
 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18755
 cCE("wavg4",   e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18756
 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18757
 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18758
 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18759
 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18760
 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18761
 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
18762
 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18763
 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18764
 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18765
 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18766
 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18767
 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18768
 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18769
 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18770
 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18771
 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18772
 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18773
 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18774
 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18775
 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18776
 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18777
 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18778
 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18779
 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18780
 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18781
 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18782
 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18783
 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18784
 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18785
 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18786
 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18787
 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18788
 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18789
 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18790
 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18791
 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18792
 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18793
 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18794
 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18795
 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18796
 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18797
 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18798
 
18799
#undef  ARM_VARIANT
18800
#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
18801
 
18802
 cCE("cfldrs",  c100400, 2, (RMF, ADDRGLDC),          rd_cpaddr),
18803
 cCE("cfldrd",  c500400, 2, (RMD, ADDRGLDC),          rd_cpaddr),
18804
 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC),         rd_cpaddr),
18805
 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC),         rd_cpaddr),
18806
 cCE("cfstrs",  c000400, 2, (RMF, ADDRGLDC),          rd_cpaddr),
18807
 cCE("cfstrd",  c400400, 2, (RMD, ADDRGLDC),          rd_cpaddr),
18808
 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC),         rd_cpaddr),
18809
 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC),         rd_cpaddr),
18810
 cCE("cfmvsr",  e000450, 2, (RMF, RR),                rn_rd),
18811
 cCE("cfmvrs",  e100450, 2, (RR, RMF),                rd_rn),
18812
 cCE("cfmvdlr", e000410, 2, (RMD, RR),                rn_rd),
18813
 cCE("cfmvrdl", e100410, 2, (RR, RMD),                rd_rn),
18814
 cCE("cfmvdhr", e000430, 2, (RMD, RR),                rn_rd),
18815
 cCE("cfmvrdh", e100430, 2, (RR, RMD),                rd_rn),
18816
 cCE("cfmv64lr",        e000510, 2, (RMDX, RR),               rn_rd),
18817
 cCE("cfmvr64l",        e100510, 2, (RR, RMDX),               rd_rn),
18818
 cCE("cfmv64hr",        e000530, 2, (RMDX, RR),               rn_rd),
18819
 cCE("cfmvr64h",        e100530, 2, (RR, RMDX),               rd_rn),
18820
 cCE("cfmval32",        e200440, 2, (RMAX, RMFX),             rd_rn),
18821
 cCE("cfmv32al",        e100440, 2, (RMFX, RMAX),             rd_rn),
18822
 cCE("cfmvam32",        e200460, 2, (RMAX, RMFX),             rd_rn),
18823
 cCE("cfmv32am",        e100460, 2, (RMFX, RMAX),             rd_rn),
18824
 cCE("cfmvah32",        e200480, 2, (RMAX, RMFX),             rd_rn),
18825
 cCE("cfmv32ah",        e100480, 2, (RMFX, RMAX),             rd_rn),
18826
 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX),             rd_rn),
18827
 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX),             rd_rn),
18828
 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX),             rd_rn),
18829
 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX),             rd_rn),
18830
 cCE("cfmvsc32",        e2004e0, 2, (RMDS, RMDX),             mav_dspsc),
18831
 cCE("cfmv32sc",        e1004e0, 2, (RMDX, RMDS),             rd),
18832
 cCE("cfcpys",  e000400, 2, (RMF, RMF),               rd_rn),
18833
 cCE("cfcpyd",  e000420, 2, (RMD, RMD),               rd_rn),
18834
 cCE("cfcvtsd", e000460, 2, (RMD, RMF),               rd_rn),
18835
 cCE("cfcvtds", e000440, 2, (RMF, RMD),               rd_rn),
18836
 cCE("cfcvt32s",        e000480, 2, (RMF, RMFX),              rd_rn),
18837
 cCE("cfcvt32d",        e0004a0, 2, (RMD, RMFX),              rd_rn),
18838
 cCE("cfcvt64s",        e0004c0, 2, (RMF, RMDX),              rd_rn),
18839
 cCE("cfcvt64d",        e0004e0, 2, (RMD, RMDX),              rd_rn),
18840
 cCE("cfcvts32",        e100580, 2, (RMFX, RMF),              rd_rn),
18841
 cCE("cfcvtd32",        e1005a0, 2, (RMFX, RMD),              rd_rn),
18842
 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),            rd_rn),
18843
 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),            rd_rn),
18844
 cCE("cfrshl32",        e000550, 3, (RMFX, RMFX, RR),         mav_triple),
18845
 cCE("cfrshl64",        e000570, 3, (RMDX, RMDX, RR),         mav_triple),
18846
 cCE("cfsh32",  e000500, 3, (RMFX, RMFX, I63s),       mav_shift),
18847
 cCE("cfsh64",  e200500, 3, (RMDX, RMDX, I63s),       mav_shift),
18848
 cCE("cfcmps",  e100490, 3, (RR, RMF, RMF),           rd_rn_rm),
18849
 cCE("cfcmpd",  e1004b0, 3, (RR, RMD, RMD),           rd_rn_rm),
18850
 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX),         rd_rn_rm),
18851
 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX),         rd_rn_rm),
18852
 cCE("cfabss",  e300400, 2, (RMF, RMF),               rd_rn),
18853
 cCE("cfabsd",  e300420, 2, (RMD, RMD),               rd_rn),
18854
 cCE("cfnegs",  e300440, 2, (RMF, RMF),               rd_rn),
18855
 cCE("cfnegd",  e300460, 2, (RMD, RMD),               rd_rn),
18856
 cCE("cfadds",  e300480, 3, (RMF, RMF, RMF),          rd_rn_rm),
18857
 cCE("cfaddd",  e3004a0, 3, (RMD, RMD, RMD),          rd_rn_rm),
18858
 cCE("cfsubs",  e3004c0, 3, (RMF, RMF, RMF),          rd_rn_rm),
18859
 cCE("cfsubd",  e3004e0, 3, (RMD, RMD, RMD),          rd_rn_rm),
18860
 cCE("cfmuls",  e100400, 3, (RMF, RMF, RMF),          rd_rn_rm),
18861
 cCE("cfmuld",  e100420, 3, (RMD, RMD, RMD),          rd_rn_rm),
18862
 cCE("cfabs32", e300500, 2, (RMFX, RMFX),             rd_rn),
18863
 cCE("cfabs64", e300520, 2, (RMDX, RMDX),             rd_rn),
18864
 cCE("cfneg32", e300540, 2, (RMFX, RMFX),             rd_rn),
18865
 cCE("cfneg64", e300560, 2, (RMDX, RMDX),             rd_rn),
18866
 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18867
 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18868
 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18869
 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18870
 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18871
 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18872
 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18873
 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18874
 cCE("cfmadd32",        e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18875
 cCE("cfmsub32",        e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18876
 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18877
 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18878
};
18879
#undef ARM_VARIANT
18880
#undef THUMB_VARIANT
18881
#undef TCE
18882
#undef TCM
18883
#undef TUE
18884
#undef TUF
18885
#undef TCC
18886
#undef cCE
18887
#undef cCL
18888
#undef C3E
18889
#undef CE
18890
#undef CM
18891
#undef UE
18892
#undef UF
18893
#undef UT
18894
#undef NUF
18895
#undef nUF
18896
#undef NCE
18897
#undef nCE
18898
#undef OPS0
18899
#undef OPS1
18900
#undef OPS2
18901
#undef OPS3
18902
#undef OPS4
18903
#undef OPS5
18904
#undef OPS6
18905
#undef do_0
18906
 
18907
/* MD interface: bits in the object file.  */
18908
 
18909
/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18910
   for use in the a.out file, and stores them in the array pointed to by buf.
18911
   This knows about the endian-ness of the target machine and does
18912
   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
18913
   2 (short) and 4 (long)  Floating numbers are put out as a series of
18914
   LITTLENUMS (shorts, here at least).  */
18915
 
18916
void
18917
md_number_to_chars (char * buf, valueT val, int n)
18918
{
18919
  if (target_big_endian)
18920
    number_to_chars_bigendian (buf, val, n);
18921
  else
18922
    number_to_chars_littleendian (buf, val, n);
18923
}
18924
 
18925
static valueT
18926
md_chars_to_number (char * buf, int n)
18927
{
18928
  valueT result = 0;
18929
  unsigned char * where = (unsigned char *) buf;
18930
 
18931
  if (target_big_endian)
18932
    {
18933
      while (n--)
18934
        {
18935
          result <<= 8;
18936
          result |= (*where++ & 255);
18937
        }
18938
    }
18939
  else
18940
    {
18941
      while (n--)
18942
        {
18943
          result <<= 8;
18944
          result |= (where[n] & 255);
18945
        }
18946
    }
18947
 
18948
  return result;
18949
}
18950
 
18951
/* MD interface: Sections.  */
18952
 
18953
/* Estimate the size of a frag before relaxing.  Assume everything fits in
18954
   2 bytes.  */
18955
 
18956
int
18957
md_estimate_size_before_relax (fragS * fragp,
18958
                               segT    segtype ATTRIBUTE_UNUSED)
18959
{
18960
  fragp->fr_var = 2;
18961
  return 2;
18962
}
18963
 
18964
/* Convert a machine dependent frag.  */
18965
 
18966
void
18967
md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
18968
{
18969
  unsigned long insn;
18970
  unsigned long old_op;
18971
  char *buf;
18972
  expressionS exp;
18973
  fixS *fixp;
18974
  int reloc_type;
18975
  int pc_rel;
18976
  int opcode;
18977
 
18978
  buf = fragp->fr_literal + fragp->fr_fix;
18979
 
18980
  old_op = bfd_get_16(abfd, buf);
18981
  if (fragp->fr_symbol)
18982
    {
18983
      exp.X_op = O_symbol;
18984
      exp.X_add_symbol = fragp->fr_symbol;
18985
    }
18986
  else
18987
    {
18988
      exp.X_op = O_constant;
18989
    }
18990
  exp.X_add_number = fragp->fr_offset;
18991
  opcode = fragp->fr_subtype;
18992
  switch (opcode)
18993
    {
18994
    case T_MNEM_ldr_pc:
18995
    case T_MNEM_ldr_pc2:
18996
    case T_MNEM_ldr_sp:
18997
    case T_MNEM_str_sp:
18998
    case T_MNEM_ldr:
18999
    case T_MNEM_ldrb:
19000
    case T_MNEM_ldrh:
19001
    case T_MNEM_str:
19002
    case T_MNEM_strb:
19003
    case T_MNEM_strh:
19004
      if (fragp->fr_var == 4)
19005
        {
19006
          insn = THUMB_OP32 (opcode);
19007
          if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19008
            {
19009
              insn |= (old_op & 0x700) << 4;
19010
            }
19011
          else
19012
            {
19013
              insn |= (old_op & 7) << 12;
19014
              insn |= (old_op & 0x38) << 13;
19015
            }
19016
          insn |= 0x00000c00;
19017
          put_thumb32_insn (buf, insn);
19018
          reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19019
        }
19020
      else
19021
        {
19022
          reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19023
        }
19024
      pc_rel = (opcode == T_MNEM_ldr_pc2);
19025
      break;
19026
    case T_MNEM_adr:
19027
      if (fragp->fr_var == 4)
19028
        {
19029
          insn = THUMB_OP32 (opcode);
19030
          insn |= (old_op & 0xf0) << 4;
19031
          put_thumb32_insn (buf, insn);
19032
          reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19033
        }
19034
      else
19035
        {
19036
          reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19037
          exp.X_add_number -= 4;
19038
        }
19039
      pc_rel = 1;
19040
      break;
19041
    case T_MNEM_mov:
19042
    case T_MNEM_movs:
19043
    case T_MNEM_cmp:
19044
    case T_MNEM_cmn:
19045
      if (fragp->fr_var == 4)
19046
        {
19047
          int r0off = (opcode == T_MNEM_mov
19048
                       || opcode == T_MNEM_movs) ? 0 : 8;
19049
          insn = THUMB_OP32 (opcode);
19050
          insn = (insn & 0xe1ffffff) | 0x10000000;
19051
          insn |= (old_op & 0x700) << r0off;
19052
          put_thumb32_insn (buf, insn);
19053
          reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19054
        }
19055
      else
19056
        {
19057
          reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19058
        }
19059
      pc_rel = 0;
19060
      break;
19061
    case T_MNEM_b:
19062
      if (fragp->fr_var == 4)
19063
        {
19064
          insn = THUMB_OP32(opcode);
19065
          put_thumb32_insn (buf, insn);
19066
          reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
19067
        }
19068
      else
19069
        reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
19070
      pc_rel = 1;
19071
      break;
19072
    case T_MNEM_bcond:
19073
      if (fragp->fr_var == 4)
19074
        {
19075
          insn = THUMB_OP32(opcode);
19076
          insn |= (old_op & 0xf00) << 14;
19077
          put_thumb32_insn (buf, insn);
19078
          reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
19079
        }
19080
      else
19081
        reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
19082
      pc_rel = 1;
19083
      break;
19084
    case T_MNEM_add_sp:
19085
    case T_MNEM_add_pc:
19086
    case T_MNEM_inc_sp:
19087
    case T_MNEM_dec_sp:
19088
      if (fragp->fr_var == 4)
19089
        {
19090
          /* ??? Choose between add and addw.  */
19091
          insn = THUMB_OP32 (opcode);
19092
          insn |= (old_op & 0xf0) << 4;
19093
          put_thumb32_insn (buf, insn);
19094
          if (opcode == T_MNEM_add_pc)
19095
            reloc_type = BFD_RELOC_ARM_T32_IMM12;
19096
          else
19097
            reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19098
        }
19099
      else
19100
        reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19101
      pc_rel = 0;
19102
      break;
19103
 
19104
    case T_MNEM_addi:
19105
    case T_MNEM_addis:
19106
    case T_MNEM_subi:
19107
    case T_MNEM_subis:
19108
      if (fragp->fr_var == 4)
19109
        {
19110
          insn = THUMB_OP32 (opcode);
19111
          insn |= (old_op & 0xf0) << 4;
19112
          insn |= (old_op & 0xf) << 16;
19113
          put_thumb32_insn (buf, insn);
19114
          if (insn & (1 << 20))
19115
            reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19116
          else
19117
            reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19118
        }
19119
      else
19120
        reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19121
      pc_rel = 0;
19122
      break;
19123
    default:
19124
      abort ();
19125
    }
19126
  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
19127
                      (enum bfd_reloc_code_real) reloc_type);
19128
  fixp->fx_file = fragp->fr_file;
19129
  fixp->fx_line = fragp->fr_line;
19130
  fragp->fr_fix += fragp->fr_var;
19131
}
19132
 
19133
/* Return the size of a relaxable immediate operand instruction.
19134
   SHIFT and SIZE specify the form of the allowable immediate.  */
19135
static int
19136
relax_immediate (fragS *fragp, int size, int shift)
19137
{
19138
  offsetT offset;
19139
  offsetT mask;
19140
  offsetT low;
19141
 
19142
  /* ??? Should be able to do better than this.  */
19143
  if (fragp->fr_symbol)
19144
    return 4;
19145
 
19146
  low = (1 << shift) - 1;
19147
  mask = (1 << (shift + size)) - (1 << shift);
19148
  offset = fragp->fr_offset;
19149
  /* Force misaligned offsets to 32-bit variant.  */
19150
  if (offset & low)
19151
    return 4;
19152
  if (offset & ~mask)
19153
    return 4;
19154
  return 2;
19155
}
19156
 
19157
/* Get the address of a symbol during relaxation.  */
19158
static addressT
19159
relaxed_symbol_addr (fragS *fragp, long stretch)
19160
{
19161
  fragS *sym_frag;
19162
  addressT addr;
19163
  symbolS *sym;
19164
 
19165
  sym = fragp->fr_symbol;
19166
  sym_frag = symbol_get_frag (sym);
19167
  know (S_GET_SEGMENT (sym) != absolute_section
19168
        || sym_frag == &zero_address_frag);
19169
  addr = S_GET_VALUE (sym) + fragp->fr_offset;
19170
 
19171
  /* If frag has yet to be reached on this pass, assume it will
19172
     move by STRETCH just as we did.  If this is not so, it will
19173
     be because some frag between grows, and that will force
19174
     another pass.  */
19175
 
19176
  if (stretch != 0
19177
      && sym_frag->relax_marker != fragp->relax_marker)
19178
    {
19179
      fragS *f;
19180
 
19181
      /* Adjust stretch for any alignment frag.  Note that if have
19182
         been expanding the earlier code, the symbol may be
19183
         defined in what appears to be an earlier frag.  FIXME:
19184
         This doesn't handle the fr_subtype field, which specifies
19185
         a maximum number of bytes to skip when doing an
19186
         alignment.  */
19187
      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
19188
        {
19189
          if (f->fr_type == rs_align || f->fr_type == rs_align_code)
19190
            {
19191
              if (stretch < 0)
19192
                stretch = - ((- stretch)
19193
                             & ~ ((1 << (int) f->fr_offset) - 1));
19194
              else
19195
                stretch &= ~ ((1 << (int) f->fr_offset) - 1);
19196
              if (stretch == 0)
19197
                break;
19198
            }
19199
        }
19200
      if (f != NULL)
19201
        addr += stretch;
19202
    }
19203
 
19204
  return addr;
19205
}
19206
 
19207
/* Return the size of a relaxable adr pseudo-instruction or PC-relative
19208
   load.  */
19209
static int
19210
relax_adr (fragS *fragp, asection *sec, long stretch)
19211
{
19212
  addressT addr;
19213
  offsetT val;
19214
 
19215
  /* Assume worst case for symbols not known to be in the same section.  */
19216
  if (fragp->fr_symbol == NULL
19217
      || !S_IS_DEFINED (fragp->fr_symbol)
19218
      || sec != S_GET_SEGMENT (fragp->fr_symbol)
19219
      || S_IS_WEAK (fragp->fr_symbol))
19220
    return 4;
19221
 
19222
  val = relaxed_symbol_addr (fragp, stretch);
19223
  addr = fragp->fr_address + fragp->fr_fix;
19224
  addr = (addr + 4) & ~3;
19225
  /* Force misaligned targets to 32-bit variant.  */
19226
  if (val & 3)
19227
    return 4;
19228
  val -= addr;
19229
  if (val < 0 || val > 1020)
19230
    return 4;
19231
  return 2;
19232
}
19233
 
19234
/* Return the size of a relaxable add/sub immediate instruction.  */
19235
static int
19236
relax_addsub (fragS *fragp, asection *sec)
19237
{
19238
  char *buf;
19239
  int op;
19240
 
19241
  buf = fragp->fr_literal + fragp->fr_fix;
19242
  op = bfd_get_16(sec->owner, buf);
19243
  if ((op & 0xf) == ((op >> 4) & 0xf))
19244
    return relax_immediate (fragp, 8, 0);
19245
  else
19246
    return relax_immediate (fragp, 3, 0);
19247
}
19248
 
19249
 
19250
/* Return the size of a relaxable branch instruction.  BITS is the
19251
   size of the offset field in the narrow instruction.  */
19252
 
19253
static int
19254
relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
19255
{
19256
  addressT addr;
19257
  offsetT val;
19258
  offsetT limit;
19259
 
19260
  /* Assume worst case for symbols not known to be in the same section.  */
19261
  if (!S_IS_DEFINED (fragp->fr_symbol)
19262
      || sec != S_GET_SEGMENT (fragp->fr_symbol)
19263
      || S_IS_WEAK (fragp->fr_symbol))
19264
    return 4;
19265
 
19266
#ifdef OBJ_ELF
19267
  if (S_IS_DEFINED (fragp->fr_symbol)
19268
      && ARM_IS_FUNC (fragp->fr_symbol))
19269
      return 4;
19270
 
19271
  /* PR 12532.  Global symbols with default visibility might
19272
     be preempted, so do not relax relocations to them.  */
19273
  if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
19274
      && (! S_IS_LOCAL (fragp->fr_symbol)))
19275
    return 4;
19276
#endif
19277
 
19278
  val = relaxed_symbol_addr (fragp, stretch);
19279
  addr = fragp->fr_address + fragp->fr_fix + 4;
19280
  val -= addr;
19281
 
19282
  /* Offset is a signed value *2 */
19283
  limit = 1 << bits;
19284
  if (val >= limit || val < -limit)
19285
    return 4;
19286
  return 2;
19287
}
19288
 
19289
 
19290
/* Relax a machine dependent frag.  This returns the amount by which
19291
   the current size of the frag should change.  */
19292
 
19293
int
19294
arm_relax_frag (asection *sec, fragS *fragp, long stretch)
19295
{
19296
  int oldsize;
19297
  int newsize;
19298
 
19299
  oldsize = fragp->fr_var;
19300
  switch (fragp->fr_subtype)
19301
    {
19302
    case T_MNEM_ldr_pc2:
19303
      newsize = relax_adr (fragp, sec, stretch);
19304
      break;
19305
    case T_MNEM_ldr_pc:
19306
    case T_MNEM_ldr_sp:
19307
    case T_MNEM_str_sp:
19308
      newsize = relax_immediate (fragp, 8, 2);
19309
      break;
19310
    case T_MNEM_ldr:
19311
    case T_MNEM_str:
19312
      newsize = relax_immediate (fragp, 5, 2);
19313
      break;
19314
    case T_MNEM_ldrh:
19315
    case T_MNEM_strh:
19316
      newsize = relax_immediate (fragp, 5, 1);
19317
      break;
19318
    case T_MNEM_ldrb:
19319
    case T_MNEM_strb:
19320
      newsize = relax_immediate (fragp, 5, 0);
19321
      break;
19322
    case T_MNEM_adr:
19323
      newsize = relax_adr (fragp, sec, stretch);
19324
      break;
19325
    case T_MNEM_mov:
19326
    case T_MNEM_movs:
19327
    case T_MNEM_cmp:
19328
    case T_MNEM_cmn:
19329
      newsize = relax_immediate (fragp, 8, 0);
19330
      break;
19331
    case T_MNEM_b:
19332
      newsize = relax_branch (fragp, sec, 11, stretch);
19333
      break;
19334
    case T_MNEM_bcond:
19335
      newsize = relax_branch (fragp, sec, 8, stretch);
19336
      break;
19337
    case T_MNEM_add_sp:
19338
    case T_MNEM_add_pc:
19339
      newsize = relax_immediate (fragp, 8, 2);
19340
      break;
19341
    case T_MNEM_inc_sp:
19342
    case T_MNEM_dec_sp:
19343
      newsize = relax_immediate (fragp, 7, 2);
19344
      break;
19345
    case T_MNEM_addi:
19346
    case T_MNEM_addis:
19347
    case T_MNEM_subi:
19348
    case T_MNEM_subis:
19349
      newsize = relax_addsub (fragp, sec);
19350
      break;
19351
    default:
19352
      abort ();
19353
    }
19354
 
19355
  fragp->fr_var = newsize;
19356
  /* Freeze wide instructions that are at or before the same location as
19357
     in the previous pass.  This avoids infinite loops.
19358
     Don't freeze them unconditionally because targets may be artificially
19359
     misaligned by the expansion of preceding frags.  */
19360
  if (stretch <= 0 && newsize > 2)
19361
    {
19362
      md_convert_frag (sec->owner, sec, fragp);
19363
      frag_wane (fragp);
19364
    }
19365
 
19366
  return newsize - oldsize;
19367
}
19368
 
19369
/* Round up a section size to the appropriate boundary.  */
19370
 
19371
valueT
19372
md_section_align (segT   segment ATTRIBUTE_UNUSED,
19373
                  valueT size)
19374
{
19375
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
19376
  if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
19377
    {
19378
      /* For a.out, force the section size to be aligned.  If we don't do
19379
         this, BFD will align it for us, but it will not write out the
19380
         final bytes of the section.  This may be a bug in BFD, but it is
19381
         easier to fix it here since that is how the other a.out targets
19382
         work.  */
19383
      int align;
19384
 
19385
      align = bfd_get_section_alignment (stdoutput, segment);
19386
      size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
19387
    }
19388
#endif
19389
 
19390
  return size;
19391
}
19392
 
19393
/* This is called from HANDLE_ALIGN in write.c.  Fill in the contents
19394
   of an rs_align_code fragment.  */
19395
 
19396
void
19397
arm_handle_align (fragS * fragP)
19398
{
19399
  static char const arm_noop[2][2][4] =
19400
    {
19401
      {  /* ARMv1 */
19402
        {0x00, 0x00, 0xa0, 0xe1},  /* LE */
19403
        {0xe1, 0xa0, 0x00, 0x00},  /* BE */
19404
      },
19405
      {  /* ARMv6k */
19406
        {0x00, 0xf0, 0x20, 0xe3},  /* LE */
19407
        {0xe3, 0x20, 0xf0, 0x00},  /* BE */
19408
      },
19409
    };
19410
  static char const thumb_noop[2][2][2] =
19411
    {
19412
      {  /* Thumb-1 */
19413
        {0xc0, 0x46},  /* LE */
19414
        {0x46, 0xc0},  /* BE */
19415
      },
19416
      {  /* Thumb-2 */
19417
        {0x00, 0xbf},  /* LE */
19418
        {0xbf, 0x00}   /* BE */
19419
      }
19420
    };
19421
  static char const wide_thumb_noop[2][4] =
19422
    {  /* Wide Thumb-2 */
19423
      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
19424
      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
19425
    };
19426
 
19427
  unsigned bytes, fix, noop_size;
19428
  char * p;
19429
  const char * noop;
19430
  const char *narrow_noop = NULL;
19431
#ifdef OBJ_ELF
19432
  enum mstate state;
19433
#endif
19434
 
19435
  if (fragP->fr_type != rs_align_code)
19436
    return;
19437
 
19438
  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
19439
  p = fragP->fr_literal + fragP->fr_fix;
19440
  fix = 0;
19441
 
19442
  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
19443
    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
19444
 
19445
  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
19446
 
19447
  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
19448
    {
19449
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
19450
        {
19451
          narrow_noop = thumb_noop[1][target_big_endian];
19452
          noop = wide_thumb_noop[target_big_endian];
19453
        }
19454
      else
19455
        noop = thumb_noop[0][target_big_endian];
19456
      noop_size = 2;
19457
#ifdef OBJ_ELF
19458
      state = MAP_THUMB;
19459
#endif
19460
    }
19461
  else
19462
    {
19463
      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
19464
                     [target_big_endian];
19465
      noop_size = 4;
19466
#ifdef OBJ_ELF
19467
      state = MAP_ARM;
19468
#endif
19469
    }
19470
 
19471
  fragP->fr_var = noop_size;
19472
 
19473
  if (bytes & (noop_size - 1))
19474
    {
19475
      fix = bytes & (noop_size - 1);
19476
#ifdef OBJ_ELF
19477
      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
19478
#endif
19479
      memset (p, 0, fix);
19480
      p += fix;
19481
      bytes -= fix;
19482
    }
19483
 
19484
  if (narrow_noop)
19485
    {
19486
      if (bytes & noop_size)
19487
        {
19488
          /* Insert a narrow noop.  */
19489
          memcpy (p, narrow_noop, noop_size);
19490
          p += noop_size;
19491
          bytes -= noop_size;
19492
          fix += noop_size;
19493
        }
19494
 
19495
      /* Use wide noops for the remainder */
19496
      noop_size = 4;
19497
    }
19498
 
19499
  while (bytes >= noop_size)
19500
    {
19501
      memcpy (p, noop, noop_size);
19502
      p += noop_size;
19503
      bytes -= noop_size;
19504
      fix += noop_size;
19505
    }
19506
 
19507
  fragP->fr_fix += fix;
19508
}
19509
 
19510
/* Called from md_do_align.  Used to create an alignment
19511
   frag in a code section.  */
19512
 
19513
void
19514
arm_frag_align_code (int n, int max)
19515
{
19516
  char * p;
19517
 
19518
  /* We assume that there will never be a requirement
19519
     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
19520
  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
19521
    {
19522
      char err_msg[128];
19523
 
19524
      sprintf (err_msg,
19525
        _("alignments greater than %d bytes not supported in .text sections."),
19526
        MAX_MEM_FOR_RS_ALIGN_CODE + 1);
19527
      as_fatal ("%s", err_msg);
19528
    }
19529
 
19530
  p = frag_var (rs_align_code,
19531
                MAX_MEM_FOR_RS_ALIGN_CODE,
19532
                1,
19533
                (relax_substateT) max,
19534
                (symbolS *) NULL,
19535
                (offsetT) n,
19536
                (char *) NULL);
19537
  *p = 0;
19538
}
19539
 
19540
/* Perform target specific initialisation of a frag.
19541
   Note - despite the name this initialisation is not done when the frag
19542
   is created, but only when its type is assigned.  A frag can be created
19543
   and used a long time before its type is set, so beware of assuming that
19544
   this initialisationis performed first.  */
19545
 
19546
#ifndef OBJ_ELF
19547
void
19548
arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
19549
{
19550
  /* Record whether this frag is in an ARM or a THUMB area.  */
19551
  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19552
}
19553
 
19554
#else /* OBJ_ELF is defined.  */
19555
void
19556
arm_init_frag (fragS * fragP, int max_chars)
19557
{
19558
  /* If the current ARM vs THUMB mode has not already
19559
     been recorded into this frag then do so now.  */
19560
  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
19561
    {
19562
      fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19563
 
19564
      /* Record a mapping symbol for alignment frags.  We will delete this
19565
         later if the alignment ends up empty.  */
19566
      switch (fragP->fr_type)
19567
        {
19568
          case rs_align:
19569
          case rs_align_test:
19570
          case rs_fill:
19571
            mapping_state_2 (MAP_DATA, max_chars);
19572
            break;
19573
          case rs_align_code:
19574
            mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
19575
            break;
19576
          default:
19577
            break;
19578
        }
19579
    }
19580
}
19581
 
19582
/* When we change sections we need to issue a new mapping symbol.  */
19583
 
19584
void
19585
arm_elf_change_section (void)
19586
{
19587
  /* Link an unlinked unwind index table section to the .text section.  */
19588
  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
19589
      && elf_linked_to_section (now_seg) == NULL)
19590
    elf_linked_to_section (now_seg) = text_section;
19591
}
19592
 
19593
int
19594
arm_elf_section_type (const char * str, size_t len)
19595
{
19596
  if (len == 5 && strncmp (str, "exidx", 5) == 0)
19597
    return SHT_ARM_EXIDX;
19598
 
19599
  return -1;
19600
}
19601
 
19602
/* Code to deal with unwinding tables.  */
19603
 
19604
static void add_unwind_adjustsp (offsetT);
19605
 
19606
/* Generate any deferred unwind frame offset.  */
19607
 
19608
static void
19609
flush_pending_unwind (void)
19610
{
19611
  offsetT offset;
19612
 
19613
  offset = unwind.pending_offset;
19614
  unwind.pending_offset = 0;
19615
  if (offset != 0)
19616
    add_unwind_adjustsp (offset);
19617
}
19618
 
19619
/* Add an opcode to this list for this function.  Two-byte opcodes should
19620
   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
19621
   order.  */
19622
 
19623
static void
19624
add_unwind_opcode (valueT op, int length)
19625
{
19626
  /* Add any deferred stack adjustment.  */
19627
  if (unwind.pending_offset)
19628
    flush_pending_unwind ();
19629
 
19630
  unwind.sp_restored = 0;
19631
 
19632
  if (unwind.opcode_count + length > unwind.opcode_alloc)
19633
    {
19634
      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
19635
      if (unwind.opcodes)
19636
        unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
19637
                                                     unwind.opcode_alloc);
19638
      else
19639
        unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
19640
    }
19641
  while (length > 0)
19642
    {
19643
      length--;
19644
      unwind.opcodes[unwind.opcode_count] = op & 0xff;
19645
      op >>= 8;
19646
      unwind.opcode_count++;
19647
    }
19648
}
19649
 
19650
/* Add unwind opcodes to adjust the stack pointer.  */
19651
 
19652
static void
19653
add_unwind_adjustsp (offsetT offset)
19654
{
19655
  valueT op;
19656
 
19657
  if (offset > 0x200)
19658
    {
19659
      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
19660
      char bytes[5];
19661
      int n;
19662
      valueT o;
19663
 
19664
      /* Long form: 0xb2, uleb128.  */
19665
      /* This might not fit in a word so add the individual bytes,
19666
         remembering the list is built in reverse order.  */
19667
      o = (valueT) ((offset - 0x204) >> 2);
19668
      if (o == 0)
19669
        add_unwind_opcode (0, 1);
19670
 
19671
      /* Calculate the uleb128 encoding of the offset.  */
19672
      n = 0;
19673
      while (o)
19674
        {
19675
          bytes[n] = o & 0x7f;
19676
          o >>= 7;
19677
          if (o)
19678
            bytes[n] |= 0x80;
19679
          n++;
19680
        }
19681
      /* Add the insn.  */
19682
      for (; n; n--)
19683
        add_unwind_opcode (bytes[n - 1], 1);
19684
      add_unwind_opcode (0xb2, 1);
19685
    }
19686
  else if (offset > 0x100)
19687
    {
19688
      /* Two short opcodes.  */
19689
      add_unwind_opcode (0x3f, 1);
19690
      op = (offset - 0x104) >> 2;
19691
      add_unwind_opcode (op, 1);
19692
    }
19693
  else if (offset > 0)
19694
    {
19695
      /* Short opcode.  */
19696
      op = (offset - 4) >> 2;
19697
      add_unwind_opcode (op, 1);
19698
    }
19699
  else if (offset < 0)
19700
    {
19701
      offset = -offset;
19702
      while (offset > 0x100)
19703
        {
19704
          add_unwind_opcode (0x7f, 1);
19705
          offset -= 0x100;
19706
        }
19707
      op = ((offset - 4) >> 2) | 0x40;
19708
      add_unwind_opcode (op, 1);
19709
    }
19710
}
19711
 
19712
/* Finish the list of unwind opcodes for this function.  */
19713
static void
19714
finish_unwind_opcodes (void)
19715
{
19716
  valueT op;
19717
 
19718
  if (unwind.fp_used)
19719
    {
19720
      /* Adjust sp as necessary.  */
19721
      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
19722
      flush_pending_unwind ();
19723
 
19724
      /* After restoring sp from the frame pointer.  */
19725
      op = 0x90 | unwind.fp_reg;
19726
      add_unwind_opcode (op, 1);
19727
    }
19728
  else
19729
    flush_pending_unwind ();
19730
}
19731
 
19732
 
19733
/* Start an exception table entry.  If idx is nonzero this is an index table
19734
   entry.  */
19735
 
19736
static void
19737
start_unwind_section (const segT text_seg, int idx)
19738
{
19739
  const char * text_name;
19740
  const char * prefix;
19741
  const char * prefix_once;
19742
  const char * group_name;
19743
  size_t prefix_len;
19744
  size_t text_len;
19745
  char * sec_name;
19746
  size_t sec_name_len;
19747
  int type;
19748
  int flags;
19749
  int linkonce;
19750
 
19751
  if (idx)
19752
    {
19753
      prefix = ELF_STRING_ARM_unwind;
19754
      prefix_once = ELF_STRING_ARM_unwind_once;
19755
      type = SHT_ARM_EXIDX;
19756
    }
19757
  else
19758
    {
19759
      prefix = ELF_STRING_ARM_unwind_info;
19760
      prefix_once = ELF_STRING_ARM_unwind_info_once;
19761
      type = SHT_PROGBITS;
19762
    }
19763
 
19764
  text_name = segment_name (text_seg);
19765
  if (streq (text_name, ".text"))
19766
    text_name = "";
19767
 
19768
  if (strncmp (text_name, ".gnu.linkonce.t.",
19769
               strlen (".gnu.linkonce.t.")) == 0)
19770
    {
19771
      prefix = prefix_once;
19772
      text_name += strlen (".gnu.linkonce.t.");
19773
    }
19774
 
19775
  prefix_len = strlen (prefix);
19776
  text_len = strlen (text_name);
19777
  sec_name_len = prefix_len + text_len;
19778
  sec_name = (char *) xmalloc (sec_name_len + 1);
19779
  memcpy (sec_name, prefix, prefix_len);
19780
  memcpy (sec_name + prefix_len, text_name, text_len);
19781
  sec_name[prefix_len + text_len] = '\0';
19782
 
19783
  flags = SHF_ALLOC;
19784
  linkonce = 0;
19785
  group_name = 0;
19786
 
19787
  /* Handle COMDAT group.  */
19788
  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
19789
    {
19790
      group_name = elf_group_name (text_seg);
19791
      if (group_name == NULL)
19792
        {
19793
          as_bad (_("Group section `%s' has no group signature"),
19794
                  segment_name (text_seg));
19795
          ignore_rest_of_line ();
19796
          return;
19797
        }
19798
      flags |= SHF_GROUP;
19799
      linkonce = 1;
19800
    }
19801
 
19802
  obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
19803
 
19804
  /* Set the section link for index tables.  */
19805
  if (idx)
19806
    elf_linked_to_section (now_seg) = text_seg;
19807
}
19808
 
19809
 
19810
/* Start an unwind table entry.  HAVE_DATA is nonzero if we have additional
19811
   personality routine data.  Returns zero, or the index table value for
19812
   and inline entry.  */
19813
 
19814
static valueT
19815
create_unwind_entry (int have_data)
19816
{
19817
  int size;
19818
  addressT where;
19819
  char *ptr;
19820
  /* The current word of data.  */
19821
  valueT data;
19822
  /* The number of bytes left in this word.  */
19823
  int n;
19824
 
19825
  finish_unwind_opcodes ();
19826
 
19827
  /* Remember the current text section.  */
19828
  unwind.saved_seg = now_seg;
19829
  unwind.saved_subseg = now_subseg;
19830
 
19831
  start_unwind_section (now_seg, 0);
19832
 
19833
  if (unwind.personality_routine == NULL)
19834
    {
19835
      if (unwind.personality_index == -2)
19836
        {
19837
          if (have_data)
19838
            as_bad (_("handlerdata in cantunwind frame"));
19839
          return 1; /* EXIDX_CANTUNWIND.  */
19840
        }
19841
 
19842
      /* Use a default personality routine if none is specified.  */
19843
      if (unwind.personality_index == -1)
19844
        {
19845
          if (unwind.opcode_count > 3)
19846
            unwind.personality_index = 1;
19847
          else
19848
            unwind.personality_index = 0;
19849
        }
19850
 
19851
      /* Space for the personality routine entry.  */
19852
      if (unwind.personality_index == 0)
19853
        {
19854
          if (unwind.opcode_count > 3)
19855
            as_bad (_("too many unwind opcodes for personality routine 0"));
19856
 
19857
          if (!have_data)
19858
            {
19859
              /* All the data is inline in the index table.  */
19860
              data = 0x80;
19861
              n = 3;
19862
              while (unwind.opcode_count > 0)
19863
                {
19864
                  unwind.opcode_count--;
19865
                  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19866
                  n--;
19867
                }
19868
 
19869
              /* Pad with "finish" opcodes.  */
19870
              while (n--)
19871
                data = (data << 8) | 0xb0;
19872
 
19873
              return data;
19874
            }
19875
          size = 0;
19876
        }
19877
      else
19878
        /* We get two opcodes "free" in the first word.  */
19879
        size = unwind.opcode_count - 2;
19880
    }
19881
  else
19882
    /* An extra byte is required for the opcode count.  */
19883
    size = unwind.opcode_count + 1;
19884
 
19885
  size = (size + 3) >> 2;
19886
  if (size > 0xff)
19887
    as_bad (_("too many unwind opcodes"));
19888
 
19889
  frag_align (2, 0, 0);
19890
  record_alignment (now_seg, 2);
19891
  unwind.table_entry = expr_build_dot ();
19892
 
19893
  /* Allocate the table entry.  */
19894
  ptr = frag_more ((size << 2) + 4);
19895
  where = frag_now_fix () - ((size << 2) + 4);
19896
 
19897
  switch (unwind.personality_index)
19898
    {
19899
    case -1:
19900
      /* ??? Should this be a PLT generating relocation?  */
19901
      /* Custom personality routine.  */
19902
      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19903
               BFD_RELOC_ARM_PREL31);
19904
 
19905
      where += 4;
19906
      ptr += 4;
19907
 
19908
      /* Set the first byte to the number of additional words.  */
19909
      data = size - 1;
19910
      n = 3;
19911
      break;
19912
 
19913
    /* ABI defined personality routines.  */
19914
    case 0:
19915
      /* Three opcodes bytes are packed into the first word.  */
19916
      data = 0x80;
19917
      n = 3;
19918
      break;
19919
 
19920
    case 1:
19921
    case 2:
19922
      /* The size and first two opcode bytes go in the first word.  */
19923
      data = ((0x80 + unwind.personality_index) << 8) | size;
19924
      n = 2;
19925
      break;
19926
 
19927
    default:
19928
      /* Should never happen.  */
19929
      abort ();
19930
    }
19931
 
19932
  /* Pack the opcodes into words (MSB first), reversing the list at the same
19933
     time.  */
19934
  while (unwind.opcode_count > 0)
19935
    {
19936
      if (n == 0)
19937
        {
19938
          md_number_to_chars (ptr, data, 4);
19939
          ptr += 4;
19940
          n = 4;
19941
          data = 0;
19942
        }
19943
      unwind.opcode_count--;
19944
      n--;
19945
      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19946
    }
19947
 
19948
  /* Finish off the last word.  */
19949
  if (n < 4)
19950
    {
19951
      /* Pad with "finish" opcodes.  */
19952
      while (n--)
19953
        data = (data << 8) | 0xb0;
19954
 
19955
      md_number_to_chars (ptr, data, 4);
19956
    }
19957
 
19958
  if (!have_data)
19959
    {
19960
      /* Add an empty descriptor if there is no user-specified data.   */
19961
      ptr = frag_more (4);
19962
      md_number_to_chars (ptr, 0, 4);
19963
    }
19964
 
19965
  return 0;
19966
}
19967
 
19968
 
19969
/* Initialize the DWARF-2 unwind information for this procedure.  */
19970
 
19971
void
19972
tc_arm_frame_initial_instructions (void)
19973
{
19974
  cfi_add_CFA_def_cfa (REG_SP, 0);
19975
}
19976
#endif /* OBJ_ELF */
19977
 
19978
/* Convert REGNAME to a DWARF-2 register number.  */
19979
 
19980
int
19981
tc_arm_regname_to_dw2regnum (char *regname)
19982
{
19983
  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
19984
 
19985
  if (reg == FAIL)
19986
    return -1;
19987
 
19988
  return reg;
19989
}
19990
 
19991
#ifdef TE_PE
19992
void
19993
tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
19994
{
19995
  expressionS exp;
19996
 
19997
  exp.X_op = O_secrel;
19998
  exp.X_add_symbol = symbol;
19999
  exp.X_add_number = 0;
20000
  emit_expr (&exp, size);
20001
}
20002
#endif
20003
 
20004
/* MD interface: Symbol and relocation handling.  */
20005
 
20006
/* Return the address within the segment that a PC-relative fixup is
20007
   relative to.  For ARM, PC-relative fixups applied to instructions
20008
   are generally relative to the location of the fixup plus 8 bytes.
20009
   Thumb branches are offset by 4, and Thumb loads relative to PC
20010
   require special handling.  */
20011
 
20012
long
20013
md_pcrel_from_section (fixS * fixP, segT seg)
20014
{
20015
  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20016
 
20017
  /* If this is pc-relative and we are going to emit a relocation
20018
     then we just want to put out any pipeline compensation that the linker
20019
     will need.  Otherwise we want to use the calculated base.
20020
     For WinCE we skip the bias for externals as well, since this
20021
     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
20022
  if (fixP->fx_pcrel
20023
      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20024
          || (arm_force_relocation (fixP)
20025
#ifdef TE_WINCE
20026
              && !S_IS_EXTERNAL (fixP->fx_addsy)
20027
#endif
20028
              )))
20029
    base = 0;
20030
 
20031
 
20032
  switch (fixP->fx_r_type)
20033
    {
20034
      /* PC relative addressing on the Thumb is slightly odd as the
20035
         bottom two bits of the PC are forced to zero for the
20036
         calculation.  This happens *after* application of the
20037
         pipeline offset.  However, Thumb adrl already adjusts for
20038
         this, so we need not do it again.  */
20039
    case BFD_RELOC_ARM_THUMB_ADD:
20040
      return base & ~3;
20041
 
20042
    case BFD_RELOC_ARM_THUMB_OFFSET:
20043
    case BFD_RELOC_ARM_T32_OFFSET_IMM:
20044
    case BFD_RELOC_ARM_T32_ADD_PC12:
20045
    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20046
      return (base + 4) & ~3;
20047
 
20048
      /* Thumb branches are simply offset by +4.  */
20049
    case BFD_RELOC_THUMB_PCREL_BRANCH7:
20050
    case BFD_RELOC_THUMB_PCREL_BRANCH9:
20051
    case BFD_RELOC_THUMB_PCREL_BRANCH12:
20052
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
20053
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
20054
      return base + 4;
20055
 
20056
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
20057
      if (fixP->fx_addsy
20058
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20059
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20060
          && ARM_IS_FUNC (fixP->fx_addsy)
20061
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20062
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20063
       return base + 4;
20064
 
20065
      /* BLX is like branches above, but forces the low two bits of PC to
20066
         zero.  */
20067
    case BFD_RELOC_THUMB_PCREL_BLX:
20068
      if (fixP->fx_addsy
20069
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20070
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20071
          && THUMB_IS_FUNC (fixP->fx_addsy)
20072
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20073
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20074
      return (base + 4) & ~3;
20075
 
20076
      /* ARM mode branches are offset by +8.  However, the Windows CE
20077
         loader expects the relocation not to take this into account.  */
20078
    case BFD_RELOC_ARM_PCREL_BLX:
20079
      if (fixP->fx_addsy
20080
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20081
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20082
          && ARM_IS_FUNC (fixP->fx_addsy)
20083
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20084
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20085
      return base + 8;
20086
 
20087
    case BFD_RELOC_ARM_PCREL_CALL:
20088
      if (fixP->fx_addsy
20089
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20090
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20091
          && THUMB_IS_FUNC (fixP->fx_addsy)
20092
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20093
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20094
      return base + 8;
20095
 
20096
    case BFD_RELOC_ARM_PCREL_BRANCH:
20097
    case BFD_RELOC_ARM_PCREL_JUMP:
20098
    case BFD_RELOC_ARM_PLT32:
20099
#ifdef TE_WINCE
20100
      /* When handling fixups immediately, because we have already
20101
         discovered the value of a symbol, or the address of the frag involved
20102
         we must account for the offset by +8, as the OS loader will never see the reloc.
20103
         see fixup_segment() in write.c
20104
         The S_IS_EXTERNAL test handles the case of global symbols.
20105
         Those need the calculated base, not just the pipe compensation the linker will need.  */
20106
      if (fixP->fx_pcrel
20107
          && fixP->fx_addsy != NULL
20108
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20109
          && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
20110
        return base + 8;
20111
      return base;
20112
#else
20113
      return base + 8;
20114
#endif
20115
 
20116
 
20117
      /* ARM mode loads relative to PC are also offset by +8.  Unlike
20118
         branches, the Windows CE loader *does* expect the relocation
20119
         to take this into account.  */
20120
    case BFD_RELOC_ARM_OFFSET_IMM:
20121
    case BFD_RELOC_ARM_OFFSET_IMM8:
20122
    case BFD_RELOC_ARM_HWLITERAL:
20123
    case BFD_RELOC_ARM_LITERAL:
20124
    case BFD_RELOC_ARM_CP_OFF_IMM:
20125
      return base + 8;
20126
 
20127
 
20128
      /* Other PC-relative relocations are un-offset.  */
20129
    default:
20130
      return base;
20131
    }
20132
}
20133
 
20134
/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
20135
   Otherwise we have no need to default values of symbols.  */
20136
 
20137
symbolS *
20138
md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
20139
{
20140
#ifdef OBJ_ELF
20141
  if (name[0] == '_' && name[1] == 'G'
20142
      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
20143
    {
20144
      if (!GOT_symbol)
20145
        {
20146
          if (symbol_find (name))
20147
            as_bad (_("GOT already in the symbol table"));
20148
 
20149
          GOT_symbol = symbol_new (name, undefined_section,
20150
                                   (valueT) 0, & zero_address_frag);
20151
        }
20152
 
20153
      return GOT_symbol;
20154
    }
20155
#endif
20156
 
20157
  return NULL;
20158
}
20159
 
20160
/* Subroutine of md_apply_fix.   Check to see if an immediate can be
20161
   computed as two separate immediate values, added together.  We
20162
   already know that this value cannot be computed by just one ARM
20163
   instruction.  */
20164
 
20165
static unsigned int
20166
validate_immediate_twopart (unsigned int   val,
20167
                            unsigned int * highpart)
20168
{
20169
  unsigned int a;
20170
  unsigned int i;
20171
 
20172
  for (i = 0; i < 32; i += 2)
20173
    if (((a = rotate_left (val, i)) & 0xff) != 0)
20174
      {
20175
        if (a & 0xff00)
20176
          {
20177
            if (a & ~ 0xffff)
20178
              continue;
20179
            * highpart = (a  >> 8) | ((i + 24) << 7);
20180
          }
20181
        else if (a & 0xff0000)
20182
          {
20183
            if (a & 0xff000000)
20184
              continue;
20185
            * highpart = (a >> 16) | ((i + 16) << 7);
20186
          }
20187
        else
20188
          {
20189
            gas_assert (a & 0xff000000);
20190
            * highpart = (a >> 24) | ((i + 8) << 7);
20191
          }
20192
 
20193
        return (a & 0xff) | (i << 7);
20194
      }
20195
 
20196
  return FAIL;
20197
}
20198
 
20199
static int
20200
validate_offset_imm (unsigned int val, int hwse)
20201
{
20202
  if ((hwse && val > 255) || val > 4095)
20203
    return FAIL;
20204
  return val;
20205
}
20206
 
20207
/* Subroutine of md_apply_fix.   Do those data_ops which can take a
20208
   negative immediate constant by altering the instruction.  A bit of
20209
   a hack really.
20210
        MOV <-> MVN
20211
        AND <-> BIC
20212
        ADC <-> SBC
20213
        by inverting the second operand, and
20214
        ADD <-> SUB
20215
        CMP <-> CMN
20216
        by negating the second operand.  */
20217
 
20218
static int
20219
negate_data_op (unsigned long * instruction,
20220
                unsigned long   value)
20221
{
20222
  int op, new_inst;
20223
  unsigned long negated, inverted;
20224
 
20225
  negated = encode_arm_immediate (-value);
20226
  inverted = encode_arm_immediate (~value);
20227
 
20228
  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
20229
  switch (op)
20230
    {
20231
      /* First negates.  */
20232
    case OPCODE_SUB:             /* ADD <-> SUB  */
20233
      new_inst = OPCODE_ADD;
20234
      value = negated;
20235
      break;
20236
 
20237
    case OPCODE_ADD:
20238
      new_inst = OPCODE_SUB;
20239
      value = negated;
20240
      break;
20241
 
20242
    case OPCODE_CMP:             /* CMP <-> CMN  */
20243
      new_inst = OPCODE_CMN;
20244
      value = negated;
20245
      break;
20246
 
20247
    case OPCODE_CMN:
20248
      new_inst = OPCODE_CMP;
20249
      value = negated;
20250
      break;
20251
 
20252
      /* Now Inverted ops.  */
20253
    case OPCODE_MOV:             /* MOV <-> MVN  */
20254
      new_inst = OPCODE_MVN;
20255
      value = inverted;
20256
      break;
20257
 
20258
    case OPCODE_MVN:
20259
      new_inst = OPCODE_MOV;
20260
      value = inverted;
20261
      break;
20262
 
20263
    case OPCODE_AND:             /* AND <-> BIC  */
20264
      new_inst = OPCODE_BIC;
20265
      value = inverted;
20266
      break;
20267
 
20268
    case OPCODE_BIC:
20269
      new_inst = OPCODE_AND;
20270
      value = inverted;
20271
      break;
20272
 
20273
    case OPCODE_ADC:              /* ADC <-> SBC  */
20274
      new_inst = OPCODE_SBC;
20275
      value = inverted;
20276
      break;
20277
 
20278
    case OPCODE_SBC:
20279
      new_inst = OPCODE_ADC;
20280
      value = inverted;
20281
      break;
20282
 
20283
      /* We cannot do anything.  */
20284
    default:
20285
      return FAIL;
20286
    }
20287
 
20288
  if (value == (unsigned) FAIL)
20289
    return FAIL;
20290
 
20291
  *instruction &= OPCODE_MASK;
20292
  *instruction |= new_inst << DATA_OP_SHIFT;
20293
  return value;
20294
}
20295
 
20296
/* Like negate_data_op, but for Thumb-2.   */
20297
 
20298
static unsigned int
20299
thumb32_negate_data_op (offsetT *instruction, unsigned int value)
20300
{
20301
  int op, new_inst;
20302
  int rd;
20303
  unsigned int negated, inverted;
20304
 
20305
  negated = encode_thumb32_immediate (-value);
20306
  inverted = encode_thumb32_immediate (~value);
20307
 
20308
  rd = (*instruction >> 8) & 0xf;
20309
  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
20310
  switch (op)
20311
    {
20312
      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
20313
    case T2_OPCODE_SUB:
20314
      new_inst = T2_OPCODE_ADD;
20315
      value = negated;
20316
      break;
20317
 
20318
    case T2_OPCODE_ADD:
20319
      new_inst = T2_OPCODE_SUB;
20320
      value = negated;
20321
      break;
20322
 
20323
      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
20324
    case T2_OPCODE_ORR:
20325
      new_inst = T2_OPCODE_ORN;
20326
      value = inverted;
20327
      break;
20328
 
20329
    case T2_OPCODE_ORN:
20330
      new_inst = T2_OPCODE_ORR;
20331
      value = inverted;
20332
      break;
20333
 
20334
      /* AND <-> BIC.  TST has no inverted equivalent.  */
20335
    case T2_OPCODE_AND:
20336
      new_inst = T2_OPCODE_BIC;
20337
      if (rd == 15)
20338
        value = FAIL;
20339
      else
20340
        value = inverted;
20341
      break;
20342
 
20343
    case T2_OPCODE_BIC:
20344
      new_inst = T2_OPCODE_AND;
20345
      value = inverted;
20346
      break;
20347
 
20348
      /* ADC <-> SBC  */
20349
    case T2_OPCODE_ADC:
20350
      new_inst = T2_OPCODE_SBC;
20351
      value = inverted;
20352
      break;
20353
 
20354
    case T2_OPCODE_SBC:
20355
      new_inst = T2_OPCODE_ADC;
20356
      value = inverted;
20357
      break;
20358
 
20359
      /* We cannot do anything.  */
20360
    default:
20361
      return FAIL;
20362
    }
20363
 
20364
  if (value == (unsigned int)FAIL)
20365
    return FAIL;
20366
 
20367
  *instruction &= T2_OPCODE_MASK;
20368
  *instruction |= new_inst << T2_DATA_OP_SHIFT;
20369
  return value;
20370
}
20371
 
20372
/* Read a 32-bit thumb instruction from buf.  */
20373
static unsigned long
20374
get_thumb32_insn (char * buf)
20375
{
20376
  unsigned long insn;
20377
  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
20378
  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20379
 
20380
  return insn;
20381
}
20382
 
20383
 
20384
/* We usually want to set the low bit on the address of thumb function
20385
   symbols.  In particular .word foo - . should have the low bit set.
20386
   Generic code tries to fold the difference of two symbols to
20387
   a constant.  Prevent this and force a relocation when the first symbols
20388
   is a thumb function.  */
20389
 
20390
bfd_boolean
20391
arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
20392
{
20393
  if (op == O_subtract
20394
      && l->X_op == O_symbol
20395
      && r->X_op == O_symbol
20396
      && THUMB_IS_FUNC (l->X_add_symbol))
20397
    {
20398
      l->X_op = O_subtract;
20399
      l->X_op_symbol = r->X_add_symbol;
20400
      l->X_add_number -= r->X_add_number;
20401
      return TRUE;
20402
    }
20403
 
20404
  /* Process as normal.  */
20405
  return FALSE;
20406
}
20407
 
20408
/* Encode Thumb2 unconditional branches and calls. The encoding
20409
   for the 2 are identical for the immediate values.  */
20410
 
20411
static void
20412
encode_thumb2_b_bl_offset (char * buf, offsetT value)
20413
{
20414
#define T2I1I2MASK  ((1 << 13) | (1 << 11))
20415
  offsetT newval;
20416
  offsetT newval2;
20417
  addressT S, I1, I2, lo, hi;
20418
 
20419
  S = (value >> 24) & 0x01;
20420
  I1 = (value >> 23) & 0x01;
20421
  I2 = (value >> 22) & 0x01;
20422
  hi = (value >> 12) & 0x3ff;
20423
  lo = (value >> 1) & 0x7ff;
20424
  newval   = md_chars_to_number (buf, THUMB_SIZE);
20425
  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20426
  newval  |= (S << 10) | hi;
20427
  newval2 &=  ~T2I1I2MASK;
20428
  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
20429
  md_number_to_chars (buf, newval, THUMB_SIZE);
20430
  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20431
}
20432
 
20433
void
20434
md_apply_fix (fixS *    fixP,
20435
               valueT * valP,
20436
               segT     seg)
20437
{
20438
  offsetT        value = * valP;
20439
  offsetT        newval;
20440
  unsigned int   newimm;
20441
  unsigned long  temp;
20442
  int            sign;
20443
  char *         buf = fixP->fx_where + fixP->fx_frag->fr_literal;
20444
 
20445
  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
20446
 
20447
  /* Note whether this will delete the relocation.  */
20448
 
20449
  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
20450
    fixP->fx_done = 1;
20451
 
20452
  /* On a 64-bit host, silently truncate 'value' to 32 bits for
20453
     consistency with the behaviour on 32-bit hosts.  Remember value
20454
     for emit_reloc.  */
20455
  value &= 0xffffffff;
20456
  value ^= 0x80000000;
20457
  value -= 0x80000000;
20458
 
20459
  *valP = value;
20460
  fixP->fx_addnumber = value;
20461
 
20462
  /* Same treatment for fixP->fx_offset.  */
20463
  fixP->fx_offset &= 0xffffffff;
20464
  fixP->fx_offset ^= 0x80000000;
20465
  fixP->fx_offset -= 0x80000000;
20466
 
20467
  switch (fixP->fx_r_type)
20468
    {
20469
    case BFD_RELOC_NONE:
20470
      /* This will need to go in the object file.  */
20471
      fixP->fx_done = 0;
20472
      break;
20473
 
20474
    case BFD_RELOC_ARM_IMMEDIATE:
20475
      /* We claim that this fixup has been processed here,
20476
         even if in fact we generate an error because we do
20477
         not have a reloc for it, so tc_gen_reloc will reject it.  */
20478
      fixP->fx_done = 1;
20479
 
20480
      if (fixP->fx_addsy)
20481
        {
20482
          const char *msg = 0;
20483
 
20484
          if (! S_IS_DEFINED (fixP->fx_addsy))
20485
            msg = _("undefined symbol %s used as an immediate value");
20486
          else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20487
            msg = _("symbol %s is in a different section");
20488
          else if (S_IS_WEAK (fixP->fx_addsy))
20489
            msg = _("symbol %s is weak and may be overridden later");
20490
 
20491
          if (msg)
20492
            {
20493
              as_bad_where (fixP->fx_file, fixP->fx_line,
20494
                            msg, S_GET_NAME (fixP->fx_addsy));
20495
              break;
20496
            }
20497
        }
20498
 
20499
      newimm = encode_arm_immediate (value);
20500
      temp = md_chars_to_number (buf, INSN_SIZE);
20501
 
20502
      /* If the instruction will fail, see if we can fix things up by
20503
         changing the opcode.  */
20504
      if (newimm == (unsigned int) FAIL
20505
          && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
20506
        {
20507
          as_bad_where (fixP->fx_file, fixP->fx_line,
20508
                        _("invalid constant (%lx) after fixup"),
20509
                        (unsigned long) value);
20510
          break;
20511
        }
20512
 
20513
      newimm |= (temp & 0xfffff000);
20514
      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20515
      break;
20516
 
20517
    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20518
      {
20519
        unsigned int highpart = 0;
20520
        unsigned int newinsn  = 0xe1a00000; /* nop.  */
20521
 
20522
        if (fixP->fx_addsy)
20523
          {
20524
            const char *msg = 0;
20525
 
20526
            if (! S_IS_DEFINED (fixP->fx_addsy))
20527
              msg = _("undefined symbol %s used as an immediate value");
20528
            else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20529
              msg = _("symbol %s is in a different section");
20530
            else if (S_IS_WEAK (fixP->fx_addsy))
20531
              msg = _("symbol %s is weak and may be overridden later");
20532
 
20533
            if (msg)
20534
              {
20535
                as_bad_where (fixP->fx_file, fixP->fx_line,
20536
                              msg, S_GET_NAME (fixP->fx_addsy));
20537
                break;
20538
              }
20539
          }
20540
 
20541
        newimm = encode_arm_immediate (value);
20542
        temp = md_chars_to_number (buf, INSN_SIZE);
20543
 
20544
        /* If the instruction will fail, see if we can fix things up by
20545
           changing the opcode.  */
20546
        if (newimm == (unsigned int) FAIL
20547
            && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
20548
          {
20549
            /* No ?  OK - try using two ADD instructions to generate
20550
               the value.  */
20551
            newimm = validate_immediate_twopart (value, & highpart);
20552
 
20553
            /* Yes - then make sure that the second instruction is
20554
               also an add.  */
20555
            if (newimm != (unsigned int) FAIL)
20556
              newinsn = temp;
20557
            /* Still No ?  Try using a negated value.  */
20558
            else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
20559
              temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
20560
            /* Otherwise - give up.  */
20561
            else
20562
              {
20563
                as_bad_where (fixP->fx_file, fixP->fx_line,
20564
                              _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20565
                              (long) value);
20566
                break;
20567
              }
20568
 
20569
            /* Replace the first operand in the 2nd instruction (which
20570
               is the PC) with the destination register.  We have
20571
               already added in the PC in the first instruction and we
20572
               do not want to do it again.  */
20573
            newinsn &= ~ 0xf0000;
20574
            newinsn |= ((newinsn & 0x0f000) << 4);
20575
          }
20576
 
20577
        newimm |= (temp & 0xfffff000);
20578
        md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20579
 
20580
        highpart |= (newinsn & 0xfffff000);
20581
        md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
20582
      }
20583
      break;
20584
 
20585
    case BFD_RELOC_ARM_OFFSET_IMM:
20586
      if (!fixP->fx_done && seg->use_rela_p)
20587
        value = 0;
20588
 
20589
    case BFD_RELOC_ARM_LITERAL:
20590
      sign = value > 0;
20591
 
20592
      if (value < 0)
20593
        value = - value;
20594
 
20595
      if (validate_offset_imm (value, 0) == FAIL)
20596
        {
20597
          if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
20598
            as_bad_where (fixP->fx_file, fixP->fx_line,
20599
                          _("invalid literal constant: pool needs to be closer"));
20600
          else
20601
            as_bad_where (fixP->fx_file, fixP->fx_line,
20602
                          _("bad immediate value for offset (%ld)"),
20603
                          (long) value);
20604
          break;
20605
        }
20606
 
20607
      newval = md_chars_to_number (buf, INSN_SIZE);
20608
      if (value == 0)
20609
        newval &= 0xfffff000;
20610
      else
20611
        {
20612
          newval &= 0xff7ff000;
20613
          newval |= value | (sign ? INDEX_UP : 0);
20614
        }
20615
      md_number_to_chars (buf, newval, INSN_SIZE);
20616
      break;
20617
 
20618
    case BFD_RELOC_ARM_OFFSET_IMM8:
20619
    case BFD_RELOC_ARM_HWLITERAL:
20620
      sign = value > 0;
20621
 
20622
      if (value < 0)
20623
        value = - value;
20624
 
20625
      if (validate_offset_imm (value, 1) == FAIL)
20626
        {
20627
          if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
20628
            as_bad_where (fixP->fx_file, fixP->fx_line,
20629
                          _("invalid literal constant: pool needs to be closer"));
20630
          else
20631
            as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20632
                    (long) value);
20633
          break;
20634
        }
20635
 
20636
      newval = md_chars_to_number (buf, INSN_SIZE);
20637
      if (value == 0)
20638
        newval &= 0xfffff0f0;
20639
      else
20640
        {
20641
          newval &= 0xff7ff0f0;
20642
          newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
20643
        }
20644
      md_number_to_chars (buf, newval, INSN_SIZE);
20645
      break;
20646
 
20647
    case BFD_RELOC_ARM_T32_OFFSET_U8:
20648
      if (value < 0 || value > 1020 || value % 4 != 0)
20649
        as_bad_where (fixP->fx_file, fixP->fx_line,
20650
                      _("bad immediate value for offset (%ld)"), (long) value);
20651
      value /= 4;
20652
 
20653
      newval = md_chars_to_number (buf+2, THUMB_SIZE);
20654
      newval |= value;
20655
      md_number_to_chars (buf+2, newval, THUMB_SIZE);
20656
      break;
20657
 
20658
    case BFD_RELOC_ARM_T32_OFFSET_IMM:
20659
      /* This is a complicated relocation used for all varieties of Thumb32
20660
         load/store instruction with immediate offset:
20661
 
20662
         1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20663
                                                   *4, optional writeback(W)
20664
                                                   (doubleword load/store)
20665
 
20666
         1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20667
         1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20668
         1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20669
         1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20670
         1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20671
 
20672
         Uppercase letters indicate bits that are already encoded at
20673
         this point.  Lowercase letters are our problem.  For the
20674
         second block of instructions, the secondary opcode nybble
20675
         (bits 8..11) is present, and bit 23 is zero, even if this is
20676
         a PC-relative operation.  */
20677
      newval = md_chars_to_number (buf, THUMB_SIZE);
20678
      newval <<= 16;
20679
      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
20680
 
20681
      if ((newval & 0xf0000000) == 0xe0000000)
20682
        {
20683
          /* Doubleword load/store: 8-bit offset, scaled by 4.  */
20684
          if (value >= 0)
20685
            newval |= (1 << 23);
20686
          else
20687
            value = -value;
20688
          if (value % 4 != 0)
20689
            {
20690
              as_bad_where (fixP->fx_file, fixP->fx_line,
20691
                            _("offset not a multiple of 4"));
20692
              break;
20693
            }
20694
          value /= 4;
20695
          if (value > 0xff)
20696
            {
20697
              as_bad_where (fixP->fx_file, fixP->fx_line,
20698
                            _("offset out of range"));
20699
              break;
20700
            }
20701
          newval &= ~0xff;
20702
        }
20703
      else if ((newval & 0x000f0000) == 0x000f0000)
20704
        {
20705
          /* PC-relative, 12-bit offset.  */
20706
          if (value >= 0)
20707
            newval |= (1 << 23);
20708
          else
20709
            value = -value;
20710
          if (value > 0xfff)
20711
            {
20712
              as_bad_where (fixP->fx_file, fixP->fx_line,
20713
                            _("offset out of range"));
20714
              break;
20715
            }
20716
          newval &= ~0xfff;
20717
        }
20718
      else if ((newval & 0x00000100) == 0x00000100)
20719
        {
20720
          /* Writeback: 8-bit, +/- offset.  */
20721
          if (value >= 0)
20722
            newval |= (1 << 9);
20723
          else
20724
            value = -value;
20725
          if (value > 0xff)
20726
            {
20727
              as_bad_where (fixP->fx_file, fixP->fx_line,
20728
                            _("offset out of range"));
20729
              break;
20730
            }
20731
          newval &= ~0xff;
20732
        }
20733
      else if ((newval & 0x00000f00) == 0x00000e00)
20734
        {
20735
          /* T-instruction: positive 8-bit offset.  */
20736
          if (value < 0 || value > 0xff)
20737
            {
20738
              as_bad_where (fixP->fx_file, fixP->fx_line,
20739
                            _("offset out of range"));
20740
              break;
20741
            }
20742
          newval &= ~0xff;
20743
          newval |= value;
20744
        }
20745
      else
20746
        {
20747
          /* Positive 12-bit or negative 8-bit offset.  */
20748
          int limit;
20749
          if (value >= 0)
20750
            {
20751
              newval |= (1 << 23);
20752
              limit = 0xfff;
20753
            }
20754
          else
20755
            {
20756
              value = -value;
20757
              limit = 0xff;
20758
            }
20759
          if (value > limit)
20760
            {
20761
              as_bad_where (fixP->fx_file, fixP->fx_line,
20762
                            _("offset out of range"));
20763
              break;
20764
            }
20765
          newval &= ~limit;
20766
        }
20767
 
20768
      newval |= value;
20769
      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
20770
      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
20771
      break;
20772
 
20773
    case BFD_RELOC_ARM_SHIFT_IMM:
20774
      newval = md_chars_to_number (buf, INSN_SIZE);
20775
      if (((unsigned long) value) > 32
20776
          || (value == 32
20777
              && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
20778
        {
20779
          as_bad_where (fixP->fx_file, fixP->fx_line,
20780
                        _("shift expression is too large"));
20781
          break;
20782
        }
20783
 
20784
      if (value == 0)
20785
        /* Shifts of zero must be done as lsl.  */
20786
        newval &= ~0x60;
20787
      else if (value == 32)
20788
        value = 0;
20789
      newval &= 0xfffff07f;
20790
      newval |= (value & 0x1f) << 7;
20791
      md_number_to_chars (buf, newval, INSN_SIZE);
20792
      break;
20793
 
20794
    case BFD_RELOC_ARM_T32_IMMEDIATE:
20795
    case BFD_RELOC_ARM_T32_ADD_IMM:
20796
    case BFD_RELOC_ARM_T32_IMM12:
20797
    case BFD_RELOC_ARM_T32_ADD_PC12:
20798
      /* We claim that this fixup has been processed here,
20799
         even if in fact we generate an error because we do
20800
         not have a reloc for it, so tc_gen_reloc will reject it.  */
20801
      fixP->fx_done = 1;
20802
 
20803
      if (fixP->fx_addsy
20804
          && ! S_IS_DEFINED (fixP->fx_addsy))
20805
        {
20806
          as_bad_where (fixP->fx_file, fixP->fx_line,
20807
                        _("undefined symbol %s used as an immediate value"),
20808
                        S_GET_NAME (fixP->fx_addsy));
20809
          break;
20810
        }
20811
 
20812
      newval = md_chars_to_number (buf, THUMB_SIZE);
20813
      newval <<= 16;
20814
      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
20815
 
20816
      newimm = FAIL;
20817
      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20818
          || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20819
        {
20820
          newimm = encode_thumb32_immediate (value);
20821
          if (newimm == (unsigned int) FAIL)
20822
            newimm = thumb32_negate_data_op (&newval, value);
20823
        }
20824
      if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
20825
          && newimm == (unsigned int) FAIL)
20826
        {
20827
          /* Turn add/sum into addw/subw.  */
20828
          if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20829
            newval = (newval & 0xfeffffff) | 0x02000000;
20830
          /* No flat 12-bit imm encoding for addsw/subsw.  */
20831
          if ((newval & 0x00100000) == 0)
20832
            {
20833
              /* 12 bit immediate for addw/subw.  */
20834
              if (value < 0)
20835
                {
20836
                  value = -value;
20837
                  newval ^= 0x00a00000;
20838
                }
20839
              if (value > 0xfff)
20840
                newimm = (unsigned int) FAIL;
20841
              else
20842
                newimm = value;
20843
            }
20844
        }
20845
 
20846
      if (newimm == (unsigned int)FAIL)
20847
        {
20848
          as_bad_where (fixP->fx_file, fixP->fx_line,
20849
                        _("invalid constant (%lx) after fixup"),
20850
                        (unsigned long) value);
20851
          break;
20852
        }
20853
 
20854
      newval |= (newimm & 0x800) << 15;
20855
      newval |= (newimm & 0x700) << 4;
20856
      newval |= (newimm & 0x0ff);
20857
 
20858
      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20859
      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20860
      break;
20861
 
20862
    case BFD_RELOC_ARM_SMC:
20863
      if (((unsigned long) value) > 0xffff)
20864
        as_bad_where (fixP->fx_file, fixP->fx_line,
20865
                      _("invalid smc expression"));
20866
      newval = md_chars_to_number (buf, INSN_SIZE);
20867
      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20868
      md_number_to_chars (buf, newval, INSN_SIZE);
20869
      break;
20870
 
20871
    case BFD_RELOC_ARM_HVC:
20872
      if (((unsigned long) value) > 0xffff)
20873
        as_bad_where (fixP->fx_file, fixP->fx_line,
20874
                      _("invalid hvc expression"));
20875
      newval = md_chars_to_number (buf, INSN_SIZE);
20876
      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20877
      md_number_to_chars (buf, newval, INSN_SIZE);
20878
      break;
20879
 
20880
    case BFD_RELOC_ARM_SWI:
20881
      if (fixP->tc_fix_data != 0)
20882
        {
20883
          if (((unsigned long) value) > 0xff)
20884
            as_bad_where (fixP->fx_file, fixP->fx_line,
20885
                          _("invalid swi expression"));
20886
          newval = md_chars_to_number (buf, THUMB_SIZE);
20887
          newval |= value;
20888
          md_number_to_chars (buf, newval, THUMB_SIZE);
20889
        }
20890
      else
20891
        {
20892
          if (((unsigned long) value) > 0x00ffffff)
20893
            as_bad_where (fixP->fx_file, fixP->fx_line,
20894
                          _("invalid swi expression"));
20895
          newval = md_chars_to_number (buf, INSN_SIZE);
20896
          newval |= value;
20897
          md_number_to_chars (buf, newval, INSN_SIZE);
20898
        }
20899
      break;
20900
 
20901
    case BFD_RELOC_ARM_MULTI:
20902
      if (((unsigned long) value) > 0xffff)
20903
        as_bad_where (fixP->fx_file, fixP->fx_line,
20904
                      _("invalid expression in load/store multiple"));
20905
      newval = value | md_chars_to_number (buf, INSN_SIZE);
20906
      md_number_to_chars (buf, newval, INSN_SIZE);
20907
      break;
20908
 
20909
#ifdef OBJ_ELF
20910
    case BFD_RELOC_ARM_PCREL_CALL:
20911
 
20912
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20913
          && fixP->fx_addsy
20914
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20915
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20916
          && THUMB_IS_FUNC (fixP->fx_addsy))
20917
        /* Flip the bl to blx. This is a simple flip
20918
           bit here because we generate PCREL_CALL for
20919
           unconditional bls.  */
20920
        {
20921
          newval = md_chars_to_number (buf, INSN_SIZE);
20922
          newval = newval | 0x10000000;
20923
          md_number_to_chars (buf, newval, INSN_SIZE);
20924
          temp = 1;
20925
          fixP->fx_done = 1;
20926
        }
20927
      else
20928
        temp = 3;
20929
      goto arm_branch_common;
20930
 
20931
    case BFD_RELOC_ARM_PCREL_JUMP:
20932
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20933
          && fixP->fx_addsy
20934
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20935
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20936
          && THUMB_IS_FUNC (fixP->fx_addsy))
20937
        {
20938
          /* This would map to a bl<cond>, b<cond>,
20939
             b<always> to a Thumb function. We
20940
             need to force a relocation for this particular
20941
             case.  */
20942
          newval = md_chars_to_number (buf, INSN_SIZE);
20943
          fixP->fx_done = 0;
20944
        }
20945
 
20946
    case BFD_RELOC_ARM_PLT32:
20947
#endif
20948
    case BFD_RELOC_ARM_PCREL_BRANCH:
20949
      temp = 3;
20950
      goto arm_branch_common;
20951
 
20952
    case BFD_RELOC_ARM_PCREL_BLX:
20953
 
20954
      temp = 1;
20955
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20956
          && fixP->fx_addsy
20957
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20958
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20959
          && ARM_IS_FUNC (fixP->fx_addsy))
20960
        {
20961
          /* Flip the blx to a bl and warn.  */
20962
          const char *name = S_GET_NAME (fixP->fx_addsy);
20963
          newval = 0xeb000000;
20964
          as_warn_where (fixP->fx_file, fixP->fx_line,
20965
                         _("blx to '%s' an ARM ISA state function changed to bl"),
20966
                          name);
20967
          md_number_to_chars (buf, newval, INSN_SIZE);
20968
          temp = 3;
20969
          fixP->fx_done = 1;
20970
        }
20971
 
20972
#ifdef OBJ_ELF
20973
       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20974
         fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
20975
#endif
20976
 
20977
    arm_branch_common:
20978
      /* We are going to store value (shifted right by two) in the
20979
         instruction, in a 24 bit, signed field.  Bits 26 through 32 either
20980
         all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
20981
         also be be clear.  */
20982
      if (value & temp)
20983
        as_bad_where (fixP->fx_file, fixP->fx_line,
20984
                      _("misaligned branch destination"));
20985
      if ((value & (offsetT)0xfe000000) != (offsetT)0
20986
          && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
20987 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
20988 16 khays
 
20989
      if (fixP->fx_done || !seg->use_rela_p)
20990
        {
20991
          newval = md_chars_to_number (buf, INSN_SIZE);
20992
          newval |= (value >> 2) & 0x00ffffff;
20993
          /* Set the H bit on BLX instructions.  */
20994
          if (temp == 1)
20995
            {
20996
              if (value & 2)
20997
                newval |= 0x01000000;
20998
              else
20999
                newval &= ~0x01000000;
21000
            }
21001
          md_number_to_chars (buf, newval, INSN_SIZE);
21002
        }
21003
      break;
21004
 
21005
    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21006
      /* CBZ can only branch forward.  */
21007
 
21008
      /* Attempts to use CBZ to branch to the next instruction
21009
         (which, strictly speaking, are prohibited) will be turned into
21010
         no-ops.
21011
 
21012
         FIXME: It may be better to remove the instruction completely and
21013
         perform relaxation.  */
21014
      if (value == -2)
21015
        {
21016
          newval = md_chars_to_number (buf, THUMB_SIZE);
21017
          newval = 0xbf00; /* NOP encoding T1 */
21018
          md_number_to_chars (buf, newval, THUMB_SIZE);
21019
        }
21020
      else
21021
        {
21022
          if (value & ~0x7e)
21023 160 khays
            as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21024 16 khays
 
21025
          if (fixP->fx_done || !seg->use_rela_p)
21026
            {
21027
              newval = md_chars_to_number (buf, THUMB_SIZE);
21028
              newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21029
              md_number_to_chars (buf, newval, THUMB_SIZE);
21030
            }
21031
        }
21032
      break;
21033
 
21034
    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.  */
21035
      if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21036 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21037 16 khays
 
21038
      if (fixP->fx_done || !seg->use_rela_p)
21039
        {
21040
          newval = md_chars_to_number (buf, THUMB_SIZE);
21041
          newval |= (value & 0x1ff) >> 1;
21042
          md_number_to_chars (buf, newval, THUMB_SIZE);
21043
        }
21044
      break;
21045
 
21046
    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
21047
      if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
21048 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21049 16 khays
 
21050
      if (fixP->fx_done || !seg->use_rela_p)
21051
        {
21052
          newval = md_chars_to_number (buf, THUMB_SIZE);
21053
          newval |= (value & 0xfff) >> 1;
21054
          md_number_to_chars (buf, newval, THUMB_SIZE);
21055
        }
21056
      break;
21057
 
21058
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21059
      if (fixP->fx_addsy
21060
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21061
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21062
          && ARM_IS_FUNC (fixP->fx_addsy)
21063
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21064
        {
21065
          /* Force a relocation for a branch 20 bits wide.  */
21066
          fixP->fx_done = 0;
21067
        }
21068 160 khays
      if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
21069 16 khays
        as_bad_where (fixP->fx_file, fixP->fx_line,
21070
                      _("conditional branch out of range"));
21071
 
21072
      if (fixP->fx_done || !seg->use_rela_p)
21073
        {
21074
          offsetT newval2;
21075
          addressT S, J1, J2, lo, hi;
21076
 
21077
          S  = (value & 0x00100000) >> 20;
21078
          J2 = (value & 0x00080000) >> 19;
21079
          J1 = (value & 0x00040000) >> 18;
21080
          hi = (value & 0x0003f000) >> 12;
21081
          lo = (value & 0x00000ffe) >> 1;
21082
 
21083
          newval   = md_chars_to_number (buf, THUMB_SIZE);
21084
          newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21085
          newval  |= (S << 10) | hi;
21086
          newval2 |= (J1 << 13) | (J2 << 11) | lo;
21087
          md_number_to_chars (buf, newval, THUMB_SIZE);
21088
          md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21089
        }
21090
      break;
21091
 
21092
    case BFD_RELOC_THUMB_PCREL_BLX:
21093
      /* If there is a blx from a thumb state function to
21094
         another thumb function flip this to a bl and warn
21095
         about it.  */
21096
 
21097
      if (fixP->fx_addsy
21098
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21099
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21100
          && THUMB_IS_FUNC (fixP->fx_addsy))
21101
        {
21102
          const char *name = S_GET_NAME (fixP->fx_addsy);
21103
          as_warn_where (fixP->fx_file, fixP->fx_line,
21104
                         _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
21105
                         name);
21106
          newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21107
          newval = newval | 0x1000;
21108
          md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21109
          fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21110
          fixP->fx_done = 1;
21111
        }
21112
 
21113
 
21114
      goto thumb_bl_common;
21115
 
21116
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21117
      /* A bl from Thumb state ISA to an internal ARM state function
21118
         is converted to a blx.  */
21119
      if (fixP->fx_addsy
21120
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21121
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21122
          && ARM_IS_FUNC (fixP->fx_addsy)
21123
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21124
        {
21125
          newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21126
          newval = newval & ~0x1000;
21127
          md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21128
          fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
21129
          fixP->fx_done = 1;
21130
        }
21131
 
21132
    thumb_bl_common:
21133
 
21134
#ifdef OBJ_ELF
21135
       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
21136
           fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21137
         fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21138
#endif
21139
 
21140
      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21141
        /* For a BLX instruction, make sure that the relocation is rounded up
21142
           to a word boundary.  This follows the semantics of the instruction
21143
           which specifies that bit 1 of the target address will come from bit
21144
           1 of the base address.  */
21145
        value = (value + 1) & ~ 1;
21146
 
21147
       if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
21148 160 khays
         {
21149
           if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
21150
             as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21151
           else if ((value & ~0x1ffffff)
21152
                    && ((value & ~0x1ffffff) != ~0x1ffffff))
21153
             as_bad_where (fixP->fx_file, fixP->fx_line,
21154
                           _("Thumb2 branch out of range"));
21155
         }
21156 16 khays
 
21157
      if (fixP->fx_done || !seg->use_rela_p)
21158
        encode_thumb2_b_bl_offset (buf, value);
21159
 
21160
      break;
21161
 
21162
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21163 160 khays
      if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
21164
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21165 16 khays
 
21166
      if (fixP->fx_done || !seg->use_rela_p)
21167
          encode_thumb2_b_bl_offset (buf, value);
21168
 
21169
      break;
21170
 
21171
    case BFD_RELOC_8:
21172
      if (fixP->fx_done || !seg->use_rela_p)
21173
        md_number_to_chars (buf, value, 1);
21174
      break;
21175
 
21176
    case BFD_RELOC_16:
21177
      if (fixP->fx_done || !seg->use_rela_p)
21178
        md_number_to_chars (buf, value, 2);
21179
      break;
21180
 
21181
#ifdef OBJ_ELF
21182
    case BFD_RELOC_ARM_TLS_CALL:
21183
    case BFD_RELOC_ARM_THM_TLS_CALL:
21184
    case BFD_RELOC_ARM_TLS_DESCSEQ:
21185
    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21186
      S_SET_THREAD_LOCAL (fixP->fx_addsy);
21187
      break;
21188
 
21189
    case BFD_RELOC_ARM_TLS_GOTDESC:
21190
    case BFD_RELOC_ARM_TLS_GD32:
21191
    case BFD_RELOC_ARM_TLS_LE32:
21192
    case BFD_RELOC_ARM_TLS_IE32:
21193
    case BFD_RELOC_ARM_TLS_LDM32:
21194
    case BFD_RELOC_ARM_TLS_LDO32:
21195
      S_SET_THREAD_LOCAL (fixP->fx_addsy);
21196
      /* fall through */
21197
 
21198
    case BFD_RELOC_ARM_GOT32:
21199
    case BFD_RELOC_ARM_GOTOFF:
21200
      if (fixP->fx_done || !seg->use_rela_p)
21201
        md_number_to_chars (buf, 0, 4);
21202
      break;
21203
 
21204
    case BFD_RELOC_ARM_GOT_PREL:
21205
      if (fixP->fx_done || !seg->use_rela_p)
21206
        md_number_to_chars (buf, value, 4);
21207
      break;
21208
 
21209
    case BFD_RELOC_ARM_TARGET2:
21210
      /* TARGET2 is not partial-inplace, so we need to write the
21211
         addend here for REL targets, because it won't be written out
21212
         during reloc processing later.  */
21213
      if (fixP->fx_done || !seg->use_rela_p)
21214
        md_number_to_chars (buf, fixP->fx_offset, 4);
21215
      break;
21216
#endif
21217
 
21218
    case BFD_RELOC_RVA:
21219
    case BFD_RELOC_32:
21220
    case BFD_RELOC_ARM_TARGET1:
21221
    case BFD_RELOC_ARM_ROSEGREL32:
21222
    case BFD_RELOC_ARM_SBREL32:
21223
    case BFD_RELOC_32_PCREL:
21224
#ifdef TE_PE
21225
    case BFD_RELOC_32_SECREL:
21226
#endif
21227
      if (fixP->fx_done || !seg->use_rela_p)
21228
#ifdef TE_WINCE
21229
        /* For WinCE we only do this for pcrel fixups.  */
21230
        if (fixP->fx_done || fixP->fx_pcrel)
21231
#endif
21232
          md_number_to_chars (buf, value, 4);
21233
      break;
21234
 
21235
#ifdef OBJ_ELF
21236
    case BFD_RELOC_ARM_PREL31:
21237
      if (fixP->fx_done || !seg->use_rela_p)
21238
        {
21239
          newval = md_chars_to_number (buf, 4) & 0x80000000;
21240
          if ((value ^ (value >> 1)) & 0x40000000)
21241
            {
21242
              as_bad_where (fixP->fx_file, fixP->fx_line,
21243
                            _("rel31 relocation overflow"));
21244
            }
21245
          newval |= value & 0x7fffffff;
21246
          md_number_to_chars (buf, newval, 4);
21247
        }
21248
      break;
21249
#endif
21250
 
21251
    case BFD_RELOC_ARM_CP_OFF_IMM:
21252
    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21253
      if (value < -1023 || value > 1023 || (value & 3))
21254
        as_bad_where (fixP->fx_file, fixP->fx_line,
21255
                      _("co-processor offset out of range"));
21256
    cp_off_common:
21257
      sign = value > 0;
21258
      if (value < 0)
21259
        value = -value;
21260
      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21261
          || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21262
        newval = md_chars_to_number (buf, INSN_SIZE);
21263
      else
21264
        newval = get_thumb32_insn (buf);
21265
      if (value == 0)
21266
        newval &= 0xffffff00;
21267
      else
21268
        {
21269
          newval &= 0xff7fff00;
21270
          newval |= (value >> 2) | (sign ? INDEX_UP : 0);
21271
        }
21272
      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21273
          || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21274
        md_number_to_chars (buf, newval, INSN_SIZE);
21275
      else
21276
        put_thumb32_insn (buf, newval);
21277
      break;
21278
 
21279
    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
21280
    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
21281
      if (value < -255 || value > 255)
21282
        as_bad_where (fixP->fx_file, fixP->fx_line,
21283
                      _("co-processor offset out of range"));
21284
      value *= 4;
21285
      goto cp_off_common;
21286
 
21287
    case BFD_RELOC_ARM_THUMB_OFFSET:
21288
      newval = md_chars_to_number (buf, THUMB_SIZE);
21289
      /* Exactly what ranges, and where the offset is inserted depends
21290
         on the type of instruction, we can establish this from the
21291
         top 4 bits.  */
21292
      switch (newval >> 12)
21293
        {
21294
        case 4: /* PC load.  */
21295
          /* Thumb PC loads are somewhat odd, bit 1 of the PC is
21296
             forced to zero for these loads; md_pcrel_from has already
21297
             compensated for this.  */
21298
          if (value & 3)
21299
            as_bad_where (fixP->fx_file, fixP->fx_line,
21300
                          _("invalid offset, target not word aligned (0x%08lX)"),
21301
                          (((unsigned long) fixP->fx_frag->fr_address
21302
                            + (unsigned long) fixP->fx_where) & ~3)
21303
                          + (unsigned long) value);
21304
 
21305
          if (value & ~0x3fc)
21306
            as_bad_where (fixP->fx_file, fixP->fx_line,
21307
                          _("invalid offset, value too big (0x%08lX)"),
21308
                          (long) value);
21309
 
21310
          newval |= value >> 2;
21311
          break;
21312
 
21313
        case 9: /* SP load/store.  */
21314
          if (value & ~0x3fc)
21315
            as_bad_where (fixP->fx_file, fixP->fx_line,
21316
                          _("invalid offset, value too big (0x%08lX)"),
21317
                          (long) value);
21318
          newval |= value >> 2;
21319
          break;
21320
 
21321
        case 6: /* Word load/store.  */
21322
          if (value & ~0x7c)
21323
            as_bad_where (fixP->fx_file, fixP->fx_line,
21324
                          _("invalid offset, value too big (0x%08lX)"),
21325
                          (long) value);
21326
          newval |= value << 4; /* 6 - 2.  */
21327
          break;
21328
 
21329
        case 7: /* Byte load/store.  */
21330
          if (value & ~0x1f)
21331
            as_bad_where (fixP->fx_file, fixP->fx_line,
21332
                          _("invalid offset, value too big (0x%08lX)"),
21333
                          (long) value);
21334
          newval |= value << 6;
21335
          break;
21336
 
21337
        case 8: /* Halfword load/store.  */
21338
          if (value & ~0x3e)
21339
            as_bad_where (fixP->fx_file, fixP->fx_line,
21340
                          _("invalid offset, value too big (0x%08lX)"),
21341
                          (long) value);
21342
          newval |= value << 5; /* 6 - 1.  */
21343
          break;
21344
 
21345
        default:
21346
          as_bad_where (fixP->fx_file, fixP->fx_line,
21347
                        "Unable to process relocation for thumb opcode: %lx",
21348
                        (unsigned long) newval);
21349
          break;
21350
        }
21351
      md_number_to_chars (buf, newval, THUMB_SIZE);
21352
      break;
21353
 
21354
    case BFD_RELOC_ARM_THUMB_ADD:
21355
      /* This is a complicated relocation, since we use it for all of
21356
         the following immediate relocations:
21357
 
21358
            3bit ADD/SUB
21359
            8bit ADD/SUB
21360
            9bit ADD/SUB SP word-aligned
21361
           10bit ADD PC/SP word-aligned
21362
 
21363
         The type of instruction being processed is encoded in the
21364
         instruction field:
21365
 
21366
           0x8000  SUB
21367
           0x00F0  Rd
21368
           0x000F  Rs
21369
      */
21370
      newval = md_chars_to_number (buf, THUMB_SIZE);
21371
      {
21372
        int rd = (newval >> 4) & 0xf;
21373
        int rs = newval & 0xf;
21374
        int subtract = !!(newval & 0x8000);
21375
 
21376
        /* Check for HI regs, only very restricted cases allowed:
21377
           Adjusting SP, and using PC or SP to get an address.  */
21378
        if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
21379
            || (rs > 7 && rs != REG_SP && rs != REG_PC))
21380
          as_bad_where (fixP->fx_file, fixP->fx_line,
21381
                        _("invalid Hi register with immediate"));
21382
 
21383
        /* If value is negative, choose the opposite instruction.  */
21384
        if (value < 0)
21385
          {
21386
            value = -value;
21387
            subtract = !subtract;
21388
            if (value < 0)
21389
              as_bad_where (fixP->fx_file, fixP->fx_line,
21390
                            _("immediate value out of range"));
21391
          }
21392
 
21393
        if (rd == REG_SP)
21394
          {
21395
            if (value & ~0x1fc)
21396
              as_bad_where (fixP->fx_file, fixP->fx_line,
21397
                            _("invalid immediate for stack address calculation"));
21398
            newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
21399
            newval |= value >> 2;
21400
          }
21401
        else if (rs == REG_PC || rs == REG_SP)
21402
          {
21403
            if (subtract || value & ~0x3fc)
21404
              as_bad_where (fixP->fx_file, fixP->fx_line,
21405
                            _("invalid immediate for address calculation (value = 0x%08lX)"),
21406
                            (unsigned long) value);
21407
            newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
21408
            newval |= rd << 8;
21409
            newval |= value >> 2;
21410
          }
21411
        else if (rs == rd)
21412
          {
21413
            if (value & ~0xff)
21414
              as_bad_where (fixP->fx_file, fixP->fx_line,
21415
                            _("immediate value out of range"));
21416
            newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
21417
            newval |= (rd << 8) | value;
21418
          }
21419
        else
21420
          {
21421
            if (value & ~0x7)
21422
              as_bad_where (fixP->fx_file, fixP->fx_line,
21423
                            _("immediate value out of range"));
21424
            newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
21425
            newval |= rd | (rs << 3) | (value << 6);
21426
          }
21427
      }
21428
      md_number_to_chars (buf, newval, THUMB_SIZE);
21429
      break;
21430
 
21431
    case BFD_RELOC_ARM_THUMB_IMM:
21432
      newval = md_chars_to_number (buf, THUMB_SIZE);
21433
      if (value < 0 || value > 255)
21434
        as_bad_where (fixP->fx_file, fixP->fx_line,
21435
                      _("invalid immediate: %ld is out of range"),
21436
                      (long) value);
21437
      newval |= value;
21438
      md_number_to_chars (buf, newval, THUMB_SIZE);
21439
      break;
21440
 
21441
    case BFD_RELOC_ARM_THUMB_SHIFT:
21442
      /* 5bit shift value (0..32).  LSL cannot take 32.  */
21443
      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
21444
      temp = newval & 0xf800;
21445
      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
21446
        as_bad_where (fixP->fx_file, fixP->fx_line,
21447
                      _("invalid shift value: %ld"), (long) value);
21448
      /* Shifts of zero must be encoded as LSL.  */
21449
      if (value == 0)
21450
        newval = (newval & 0x003f) | T_OPCODE_LSL_I;
21451
      /* Shifts of 32 are encoded as zero.  */
21452
      else if (value == 32)
21453
        value = 0;
21454
      newval |= value << 6;
21455
      md_number_to_chars (buf, newval, THUMB_SIZE);
21456
      break;
21457
 
21458
    case BFD_RELOC_VTABLE_INHERIT:
21459
    case BFD_RELOC_VTABLE_ENTRY:
21460
      fixP->fx_done = 0;
21461
      return;
21462
 
21463
    case BFD_RELOC_ARM_MOVW:
21464
    case BFD_RELOC_ARM_MOVT:
21465
    case BFD_RELOC_ARM_THUMB_MOVW:
21466
    case BFD_RELOC_ARM_THUMB_MOVT:
21467
      if (fixP->fx_done || !seg->use_rela_p)
21468
        {
21469
          /* REL format relocations are limited to a 16-bit addend.  */
21470
          if (!fixP->fx_done)
21471
            {
21472
              if (value < -0x8000 || value > 0x7fff)
21473
                  as_bad_where (fixP->fx_file, fixP->fx_line,
21474
                                _("offset out of range"));
21475
            }
21476
          else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21477
                   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21478
            {
21479
              value >>= 16;
21480
            }
21481
 
21482
          if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21483
              || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21484
            {
21485
              newval = get_thumb32_insn (buf);
21486
              newval &= 0xfbf08f00;
21487
              newval |= (value & 0xf000) << 4;
21488
              newval |= (value & 0x0800) << 15;
21489
              newval |= (value & 0x0700) << 4;
21490
              newval |= (value & 0x00ff);
21491
              put_thumb32_insn (buf, newval);
21492
            }
21493
          else
21494
            {
21495
              newval = md_chars_to_number (buf, 4);
21496
              newval &= 0xfff0f000;
21497
              newval |= value & 0x0fff;
21498
              newval |= (value & 0xf000) << 4;
21499
              md_number_to_chars (buf, newval, 4);
21500
            }
21501
        }
21502
      return;
21503
 
21504
   case BFD_RELOC_ARM_ALU_PC_G0_NC:
21505
   case BFD_RELOC_ARM_ALU_PC_G0:
21506
   case BFD_RELOC_ARM_ALU_PC_G1_NC:
21507
   case BFD_RELOC_ARM_ALU_PC_G1:
21508
   case BFD_RELOC_ARM_ALU_PC_G2:
21509
   case BFD_RELOC_ARM_ALU_SB_G0_NC:
21510
   case BFD_RELOC_ARM_ALU_SB_G0:
21511
   case BFD_RELOC_ARM_ALU_SB_G1_NC:
21512
   case BFD_RELOC_ARM_ALU_SB_G1:
21513
   case BFD_RELOC_ARM_ALU_SB_G2:
21514
     gas_assert (!fixP->fx_done);
21515
     if (!seg->use_rela_p)
21516
       {
21517
         bfd_vma insn;
21518
         bfd_vma encoded_addend;
21519
         bfd_vma addend_abs = abs (value);
21520
 
21521
         /* Check that the absolute value of the addend can be
21522
            expressed as an 8-bit constant plus a rotation.  */
21523
         encoded_addend = encode_arm_immediate (addend_abs);
21524
         if (encoded_addend == (unsigned int) FAIL)
21525
           as_bad_where (fixP->fx_file, fixP->fx_line,
21526
                         _("the offset 0x%08lX is not representable"),
21527
                         (unsigned long) addend_abs);
21528
 
21529
         /* Extract the instruction.  */
21530
         insn = md_chars_to_number (buf, INSN_SIZE);
21531
 
21532
         /* If the addend is positive, use an ADD instruction.
21533
            Otherwise use a SUB.  Take care not to destroy the S bit.  */
21534
         insn &= 0xff1fffff;
21535
         if (value < 0)
21536
           insn |= 1 << 22;
21537
         else
21538
           insn |= 1 << 23;
21539
 
21540
         /* Place the encoded addend into the first 12 bits of the
21541
            instruction.  */
21542
         insn &= 0xfffff000;
21543
         insn |= encoded_addend;
21544
 
21545
         /* Update the instruction.  */
21546
         md_number_to_chars (buf, insn, INSN_SIZE);
21547
       }
21548
     break;
21549
 
21550
    case BFD_RELOC_ARM_LDR_PC_G0:
21551
    case BFD_RELOC_ARM_LDR_PC_G1:
21552
    case BFD_RELOC_ARM_LDR_PC_G2:
21553
    case BFD_RELOC_ARM_LDR_SB_G0:
21554
    case BFD_RELOC_ARM_LDR_SB_G1:
21555
    case BFD_RELOC_ARM_LDR_SB_G2:
21556
      gas_assert (!fixP->fx_done);
21557
      if (!seg->use_rela_p)
21558
        {
21559
          bfd_vma insn;
21560
          bfd_vma addend_abs = abs (value);
21561
 
21562
          /* Check that the absolute value of the addend can be
21563
             encoded in 12 bits.  */
21564
          if (addend_abs >= 0x1000)
21565
            as_bad_where (fixP->fx_file, fixP->fx_line,
21566
                          _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21567
                          (unsigned long) addend_abs);
21568
 
21569
          /* Extract the instruction.  */
21570
          insn = md_chars_to_number (buf, INSN_SIZE);
21571
 
21572
          /* If the addend is negative, clear bit 23 of the instruction.
21573
             Otherwise set it.  */
21574
          if (value < 0)
21575
            insn &= ~(1 << 23);
21576
          else
21577
            insn |= 1 << 23;
21578
 
21579
          /* Place the absolute value of the addend into the first 12 bits
21580
             of the instruction.  */
21581
          insn &= 0xfffff000;
21582
          insn |= addend_abs;
21583
 
21584
          /* Update the instruction.  */
21585
          md_number_to_chars (buf, insn, INSN_SIZE);
21586
        }
21587
      break;
21588
 
21589
    case BFD_RELOC_ARM_LDRS_PC_G0:
21590
    case BFD_RELOC_ARM_LDRS_PC_G1:
21591
    case BFD_RELOC_ARM_LDRS_PC_G2:
21592
    case BFD_RELOC_ARM_LDRS_SB_G0:
21593
    case BFD_RELOC_ARM_LDRS_SB_G1:
21594
    case BFD_RELOC_ARM_LDRS_SB_G2:
21595
      gas_assert (!fixP->fx_done);
21596
      if (!seg->use_rela_p)
21597
        {
21598
          bfd_vma insn;
21599
          bfd_vma addend_abs = abs (value);
21600
 
21601
          /* Check that the absolute value of the addend can be
21602
             encoded in 8 bits.  */
21603
          if (addend_abs >= 0x100)
21604
            as_bad_where (fixP->fx_file, fixP->fx_line,
21605
                          _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21606
                          (unsigned long) addend_abs);
21607
 
21608
          /* Extract the instruction.  */
21609
          insn = md_chars_to_number (buf, INSN_SIZE);
21610
 
21611
          /* If the addend is negative, clear bit 23 of the instruction.
21612
             Otherwise set it.  */
21613
          if (value < 0)
21614
            insn &= ~(1 << 23);
21615
          else
21616
            insn |= 1 << 23;
21617
 
21618
          /* Place the first four bits of the absolute value of the addend
21619
             into the first 4 bits of the instruction, and the remaining
21620
             four into bits 8 .. 11.  */
21621
          insn &= 0xfffff0f0;
21622
          insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
21623
 
21624
          /* Update the instruction.  */
21625
          md_number_to_chars (buf, insn, INSN_SIZE);
21626
        }
21627
      break;
21628
 
21629
    case BFD_RELOC_ARM_LDC_PC_G0:
21630
    case BFD_RELOC_ARM_LDC_PC_G1:
21631
    case BFD_RELOC_ARM_LDC_PC_G2:
21632
    case BFD_RELOC_ARM_LDC_SB_G0:
21633
    case BFD_RELOC_ARM_LDC_SB_G1:
21634
    case BFD_RELOC_ARM_LDC_SB_G2:
21635
      gas_assert (!fixP->fx_done);
21636
      if (!seg->use_rela_p)
21637
        {
21638
          bfd_vma insn;
21639
          bfd_vma addend_abs = abs (value);
21640
 
21641
          /* Check that the absolute value of the addend is a multiple of
21642
             four and, when divided by four, fits in 8 bits.  */
21643
          if (addend_abs & 0x3)
21644
            as_bad_where (fixP->fx_file, fixP->fx_line,
21645
                          _("bad offset 0x%08lX (must be word-aligned)"),
21646
                          (unsigned long) addend_abs);
21647
 
21648
          if ((addend_abs >> 2) > 0xff)
21649
            as_bad_where (fixP->fx_file, fixP->fx_line,
21650
                          _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21651
                          (unsigned long) addend_abs);
21652
 
21653
          /* Extract the instruction.  */
21654
          insn = md_chars_to_number (buf, INSN_SIZE);
21655
 
21656
          /* If the addend is negative, clear bit 23 of the instruction.
21657
             Otherwise set it.  */
21658
          if (value < 0)
21659
            insn &= ~(1 << 23);
21660
          else
21661
            insn |= 1 << 23;
21662
 
21663
          /* Place the addend (divided by four) into the first eight
21664
             bits of the instruction.  */
21665
          insn &= 0xfffffff0;
21666
          insn |= addend_abs >> 2;
21667
 
21668
          /* Update the instruction.  */
21669
          md_number_to_chars (buf, insn, INSN_SIZE);
21670
        }
21671
      break;
21672
 
21673
    case BFD_RELOC_ARM_V4BX:
21674
      /* This will need to go in the object file.  */
21675
      fixP->fx_done = 0;
21676
      break;
21677
 
21678
    case BFD_RELOC_UNUSED:
21679
    default:
21680
      as_bad_where (fixP->fx_file, fixP->fx_line,
21681
                    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
21682
    }
21683
}
21684
 
21685
/* Translate internal representation of relocation info to BFD target
21686
   format.  */
21687
 
21688
arelent *
21689
tc_gen_reloc (asection *section, fixS *fixp)
21690
{
21691
  arelent * reloc;
21692
  bfd_reloc_code_real_type code;
21693
 
21694
  reloc = (arelent *) xmalloc (sizeof (arelent));
21695
 
21696
  reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
21697
  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
21698
  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
21699
 
21700
  if (fixp->fx_pcrel)
21701
    {
21702
      if (section->use_rela_p)
21703
        fixp->fx_offset -= md_pcrel_from_section (fixp, section);
21704
      else
21705
        fixp->fx_offset = reloc->address;
21706
    }
21707
  reloc->addend = fixp->fx_offset;
21708
 
21709
  switch (fixp->fx_r_type)
21710
    {
21711
    case BFD_RELOC_8:
21712
      if (fixp->fx_pcrel)
21713
        {
21714
          code = BFD_RELOC_8_PCREL;
21715
          break;
21716
        }
21717
 
21718
    case BFD_RELOC_16:
21719
      if (fixp->fx_pcrel)
21720
        {
21721
          code = BFD_RELOC_16_PCREL;
21722
          break;
21723
        }
21724
 
21725
    case BFD_RELOC_32:
21726
      if (fixp->fx_pcrel)
21727
        {
21728
          code = BFD_RELOC_32_PCREL;
21729
          break;
21730
        }
21731
 
21732
    case BFD_RELOC_ARM_MOVW:
21733
      if (fixp->fx_pcrel)
21734
        {
21735
          code = BFD_RELOC_ARM_MOVW_PCREL;
21736
          break;
21737
        }
21738
 
21739
    case BFD_RELOC_ARM_MOVT:
21740
      if (fixp->fx_pcrel)
21741
        {
21742
          code = BFD_RELOC_ARM_MOVT_PCREL;
21743
          break;
21744
        }
21745
 
21746
    case BFD_RELOC_ARM_THUMB_MOVW:
21747
      if (fixp->fx_pcrel)
21748
        {
21749
          code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
21750
          break;
21751
        }
21752
 
21753
    case BFD_RELOC_ARM_THUMB_MOVT:
21754
      if (fixp->fx_pcrel)
21755
        {
21756
          code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
21757
          break;
21758
        }
21759
 
21760
    case BFD_RELOC_NONE:
21761
    case BFD_RELOC_ARM_PCREL_BRANCH:
21762
    case BFD_RELOC_ARM_PCREL_BLX:
21763
    case BFD_RELOC_RVA:
21764
    case BFD_RELOC_THUMB_PCREL_BRANCH7:
21765
    case BFD_RELOC_THUMB_PCREL_BRANCH9:
21766
    case BFD_RELOC_THUMB_PCREL_BRANCH12:
21767
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21768
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21769
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21770
    case BFD_RELOC_VTABLE_ENTRY:
21771
    case BFD_RELOC_VTABLE_INHERIT:
21772
#ifdef TE_PE
21773
    case BFD_RELOC_32_SECREL:
21774
#endif
21775
      code = fixp->fx_r_type;
21776
      break;
21777
 
21778
    case BFD_RELOC_THUMB_PCREL_BLX:
21779
#ifdef OBJ_ELF
21780
      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21781
        code = BFD_RELOC_THUMB_PCREL_BRANCH23;
21782
      else
21783
#endif
21784
        code = BFD_RELOC_THUMB_PCREL_BLX;
21785
      break;
21786
 
21787
    case BFD_RELOC_ARM_LITERAL:
21788
    case BFD_RELOC_ARM_HWLITERAL:
21789
      /* If this is called then the a literal has
21790
         been referenced across a section boundary.  */
21791
      as_bad_where (fixp->fx_file, fixp->fx_line,
21792
                    _("literal referenced across section boundary"));
21793
      return NULL;
21794
 
21795
#ifdef OBJ_ELF
21796
    case BFD_RELOC_ARM_TLS_CALL:
21797
    case BFD_RELOC_ARM_THM_TLS_CALL:
21798
    case BFD_RELOC_ARM_TLS_DESCSEQ:
21799
    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21800
    case BFD_RELOC_ARM_GOT32:
21801
    case BFD_RELOC_ARM_GOTOFF:
21802
    case BFD_RELOC_ARM_GOT_PREL:
21803
    case BFD_RELOC_ARM_PLT32:
21804
    case BFD_RELOC_ARM_TARGET1:
21805
    case BFD_RELOC_ARM_ROSEGREL32:
21806
    case BFD_RELOC_ARM_SBREL32:
21807
    case BFD_RELOC_ARM_PREL31:
21808
    case BFD_RELOC_ARM_TARGET2:
21809
    case BFD_RELOC_ARM_TLS_LE32:
21810
    case BFD_RELOC_ARM_TLS_LDO32:
21811
    case BFD_RELOC_ARM_PCREL_CALL:
21812
    case BFD_RELOC_ARM_PCREL_JUMP:
21813
    case BFD_RELOC_ARM_ALU_PC_G0_NC:
21814
    case BFD_RELOC_ARM_ALU_PC_G0:
21815
    case BFD_RELOC_ARM_ALU_PC_G1_NC:
21816
    case BFD_RELOC_ARM_ALU_PC_G1:
21817
    case BFD_RELOC_ARM_ALU_PC_G2:
21818
    case BFD_RELOC_ARM_LDR_PC_G0:
21819
    case BFD_RELOC_ARM_LDR_PC_G1:
21820
    case BFD_RELOC_ARM_LDR_PC_G2:
21821
    case BFD_RELOC_ARM_LDRS_PC_G0:
21822
    case BFD_RELOC_ARM_LDRS_PC_G1:
21823
    case BFD_RELOC_ARM_LDRS_PC_G2:
21824
    case BFD_RELOC_ARM_LDC_PC_G0:
21825
    case BFD_RELOC_ARM_LDC_PC_G1:
21826
    case BFD_RELOC_ARM_LDC_PC_G2:
21827
    case BFD_RELOC_ARM_ALU_SB_G0_NC:
21828
    case BFD_RELOC_ARM_ALU_SB_G0:
21829
    case BFD_RELOC_ARM_ALU_SB_G1_NC:
21830
    case BFD_RELOC_ARM_ALU_SB_G1:
21831
    case BFD_RELOC_ARM_ALU_SB_G2:
21832
    case BFD_RELOC_ARM_LDR_SB_G0:
21833
    case BFD_RELOC_ARM_LDR_SB_G1:
21834
    case BFD_RELOC_ARM_LDR_SB_G2:
21835
    case BFD_RELOC_ARM_LDRS_SB_G0:
21836
    case BFD_RELOC_ARM_LDRS_SB_G1:
21837
    case BFD_RELOC_ARM_LDRS_SB_G2:
21838
    case BFD_RELOC_ARM_LDC_SB_G0:
21839
    case BFD_RELOC_ARM_LDC_SB_G1:
21840
    case BFD_RELOC_ARM_LDC_SB_G2:
21841
    case BFD_RELOC_ARM_V4BX:
21842
      code = fixp->fx_r_type;
21843
      break;
21844
 
21845
    case BFD_RELOC_ARM_TLS_GOTDESC:
21846
    case BFD_RELOC_ARM_TLS_GD32:
21847
    case BFD_RELOC_ARM_TLS_IE32:
21848
    case BFD_RELOC_ARM_TLS_LDM32:
21849
      /* BFD will include the symbol's address in the addend.
21850
         But we don't want that, so subtract it out again here.  */
21851
      if (!S_IS_COMMON (fixp->fx_addsy))
21852
        reloc->addend -= (*reloc->sym_ptr_ptr)->value;
21853
      code = fixp->fx_r_type;
21854
      break;
21855
#endif
21856
 
21857
    case BFD_RELOC_ARM_IMMEDIATE:
21858
      as_bad_where (fixp->fx_file, fixp->fx_line,
21859
                    _("internal relocation (type: IMMEDIATE) not fixed up"));
21860
      return NULL;
21861
 
21862
    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21863
      as_bad_where (fixp->fx_file, fixp->fx_line,
21864
                    _("ADRL used for a symbol not defined in the same file"));
21865
      return NULL;
21866
 
21867
    case BFD_RELOC_ARM_OFFSET_IMM:
21868
      if (section->use_rela_p)
21869
        {
21870
          code = fixp->fx_r_type;
21871
          break;
21872
        }
21873
 
21874
      if (fixp->fx_addsy != NULL
21875
          && !S_IS_DEFINED (fixp->fx_addsy)
21876
          && S_IS_LOCAL (fixp->fx_addsy))
21877
        {
21878
          as_bad_where (fixp->fx_file, fixp->fx_line,
21879
                        _("undefined local label `%s'"),
21880
                        S_GET_NAME (fixp->fx_addsy));
21881
          return NULL;
21882
        }
21883
 
21884
      as_bad_where (fixp->fx_file, fixp->fx_line,
21885
                    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21886
      return NULL;
21887
 
21888
    default:
21889
      {
21890
        char * type;
21891
 
21892
        switch (fixp->fx_r_type)
21893
          {
21894
          case BFD_RELOC_NONE:             type = "NONE";         break;
21895
          case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
21896
          case BFD_RELOC_ARM_SHIFT_IMM:    type = "SHIFT_IMM";    break;
21897
          case BFD_RELOC_ARM_SMC:          type = "SMC";          break;
21898
          case BFD_RELOC_ARM_SWI:          type = "SWI";          break;
21899
          case BFD_RELOC_ARM_MULTI:        type = "MULTI";        break;
21900
          case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";   break;
21901
          case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
21902
          case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21903
          case BFD_RELOC_ARM_THUMB_ADD:    type = "THUMB_ADD";    break;
21904
          case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
21905
          case BFD_RELOC_ARM_THUMB_IMM:    type = "THUMB_IMM";    break;
21906
          case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21907
          default:                         type = _("<unknown>"); break;
21908
          }
21909
        as_bad_where (fixp->fx_file, fixp->fx_line,
21910
                      _("cannot represent %s relocation in this object file format"),
21911
                      type);
21912
        return NULL;
21913
      }
21914
    }
21915
 
21916
#ifdef OBJ_ELF
21917
  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21918
      && GOT_symbol
21919
      && fixp->fx_addsy == GOT_symbol)
21920
    {
21921
      code = BFD_RELOC_ARM_GOTPC;
21922
      reloc->addend = fixp->fx_offset = reloc->address;
21923
    }
21924
#endif
21925
 
21926
  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21927
 
21928
  if (reloc->howto == NULL)
21929
    {
21930
      as_bad_where (fixp->fx_file, fixp->fx_line,
21931
                    _("cannot represent %s relocation in this object file format"),
21932
                    bfd_get_reloc_code_name (code));
21933
      return NULL;
21934
    }
21935
 
21936
  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21937
     vtable entry to be used in the relocation's section offset.  */
21938
  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21939
    reloc->address = fixp->fx_offset;
21940
 
21941
  return reloc;
21942
}
21943
 
21944
/* This fix_new is called by cons via TC_CONS_FIX_NEW.  */
21945
 
21946
void
21947
cons_fix_new_arm (fragS *       frag,
21948
                  int           where,
21949
                  int           size,
21950
                  expressionS * exp)
21951
{
21952
  bfd_reloc_code_real_type type;
21953
  int pcrel = 0;
21954
 
21955
  /* Pick a reloc.
21956
     FIXME: @@ Should look at CPU word size.  */
21957
  switch (size)
21958
    {
21959
    case 1:
21960
      type = BFD_RELOC_8;
21961
      break;
21962
    case 2:
21963
      type = BFD_RELOC_16;
21964
      break;
21965
    case 4:
21966
    default:
21967
      type = BFD_RELOC_32;
21968
      break;
21969
    case 8:
21970
      type = BFD_RELOC_64;
21971
      break;
21972
    }
21973
 
21974
#ifdef TE_PE
21975
  if (exp->X_op == O_secrel)
21976
  {
21977
    exp->X_op = O_symbol;
21978
    type = BFD_RELOC_32_SECREL;
21979
  }
21980
#endif
21981
 
21982
  fix_new_exp (frag, where, (int) size, exp, pcrel, type);
21983
}
21984
 
21985
#if defined (OBJ_COFF)
21986
void
21987
arm_validate_fix (fixS * fixP)
21988
{
21989
  /* If the destination of the branch is a defined symbol which does not have
21990
     the THUMB_FUNC attribute, then we must be calling a function which has
21991
     the (interfacearm) attribute.  We look for the Thumb entry point to that
21992
     function and change the branch to refer to that function instead.  */
21993
  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
21994
      && fixP->fx_addsy != NULL
21995
      && S_IS_DEFINED (fixP->fx_addsy)
21996
      && ! THUMB_IS_FUNC (fixP->fx_addsy))
21997
    {
21998
      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
21999
    }
22000
}
22001
#endif
22002
 
22003
 
22004
int
22005
arm_force_relocation (struct fix * fixp)
22006
{
22007
#if defined (OBJ_COFF) && defined (TE_PE)
22008
  if (fixp->fx_r_type == BFD_RELOC_RVA)
22009
    return 1;
22010
#endif
22011
 
22012
  /* In case we have a call or a branch to a function in ARM ISA mode from
22013
     a thumb function or vice-versa force the relocation. These relocations
22014
     are cleared off for some cores that might have blx and simple transformations
22015
     are possible.  */
22016
 
22017
#ifdef OBJ_ELF
22018
  switch (fixp->fx_r_type)
22019
    {
22020
    case BFD_RELOC_ARM_PCREL_JUMP:
22021
    case BFD_RELOC_ARM_PCREL_CALL:
22022
    case BFD_RELOC_THUMB_PCREL_BLX:
22023
      if (THUMB_IS_FUNC (fixp->fx_addsy))
22024
        return 1;
22025
      break;
22026
 
22027
    case BFD_RELOC_ARM_PCREL_BLX:
22028
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
22029
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
22030
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
22031
      if (ARM_IS_FUNC (fixp->fx_addsy))
22032
        return 1;
22033
      break;
22034
 
22035
    default:
22036
      break;
22037
    }
22038
#endif
22039
 
22040
  /* Resolve these relocations even if the symbol is extern or weak.
22041
     Technically this is probably wrong due to symbol preemption.
22042
     In practice these relocations do not have enough range to be useful
22043
     at dynamic link time, and some code (e.g. in the Linux kernel)
22044
     expects these references to be resolved.  */
22045
  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22046
      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
22047
      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
22048
      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
22049
      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22050
      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
22051
      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
22052
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
22053
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22054
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
22055
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
22056
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
22057
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
22058
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
22059
    return 0;
22060
 
22061
  /* Always leave these relocations for the linker.  */
22062
  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22063
       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22064
      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22065
    return 1;
22066
 
22067
  /* Always generate relocations against function symbols.  */
22068
  if (fixp->fx_r_type == BFD_RELOC_32
22069
      && fixp->fx_addsy
22070
      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
22071
    return 1;
22072
 
22073
  return generic_force_reloc (fixp);
22074
}
22075
 
22076
#if defined (OBJ_ELF) || defined (OBJ_COFF)
22077
/* Relocations against function names must be left unadjusted,
22078
   so that the linker can use this information to generate interworking
22079
   stubs.  The MIPS version of this function
22080
   also prevents relocations that are mips-16 specific, but I do not
22081
   know why it does this.
22082
 
22083
   FIXME:
22084
   There is one other problem that ought to be addressed here, but
22085
   which currently is not:  Taking the address of a label (rather
22086
   than a function) and then later jumping to that address.  Such
22087
   addresses also ought to have their bottom bit set (assuming that
22088
   they reside in Thumb code), but at the moment they will not.  */
22089
 
22090
bfd_boolean
22091
arm_fix_adjustable (fixS * fixP)
22092
{
22093
  if (fixP->fx_addsy == NULL)
22094
    return 1;
22095
 
22096
  /* Preserve relocations against symbols with function type.  */
22097
  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
22098
    return FALSE;
22099
 
22100
  if (THUMB_IS_FUNC (fixP->fx_addsy)
22101
      && fixP->fx_subsy == NULL)
22102
    return FALSE;
22103
 
22104
  /* We need the symbol name for the VTABLE entries.  */
22105
  if (   fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
22106
      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22107
    return FALSE;
22108
 
22109
  /* Don't allow symbols to be discarded on GOT related relocs.  */
22110
  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
22111
      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
22112
      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
22113
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
22114
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
22115
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
22116
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
22117
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
22118
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
22119
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
22120
      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
22121
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
22122
      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
22123
      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
22124
    return FALSE;
22125
 
22126
  /* Similarly for group relocations.  */
22127
  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22128
       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22129
      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22130
    return FALSE;
22131
 
22132
  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
22133
  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
22134
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22135
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
22136
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
22137
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22138
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
22139
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
22140
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
22141
    return FALSE;
22142
 
22143
  return TRUE;
22144
}
22145
#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
22146
 
22147
#ifdef OBJ_ELF
22148
 
22149
const char *
22150
elf32_arm_target_format (void)
22151
{
22152
#ifdef TE_SYMBIAN
22153
  return (target_big_endian
22154
          ? "elf32-bigarm-symbian"
22155
          : "elf32-littlearm-symbian");
22156
#elif defined (TE_VXWORKS)
22157
  return (target_big_endian
22158
          ? "elf32-bigarm-vxworks"
22159
          : "elf32-littlearm-vxworks");
22160
#else
22161
  if (target_big_endian)
22162
    return "elf32-bigarm";
22163
  else
22164
    return "elf32-littlearm";
22165
#endif
22166
}
22167
 
22168
void
22169
armelf_frob_symbol (symbolS * symp,
22170
                    int *     puntp)
22171
{
22172
  elf_frob_symbol (symp, puntp);
22173
}
22174
#endif
22175
 
22176
/* MD interface: Finalization.  */
22177
 
22178
void
22179
arm_cleanup (void)
22180
{
22181
  literal_pool * pool;
22182
 
22183
  /* Ensure that all the IT blocks are properly closed.  */
22184
  check_it_blocks_finished ();
22185
 
22186
  for (pool = list_of_pools; pool; pool = pool->next)
22187
    {
22188
      /* Put it at the end of the relevant section.  */
22189
      subseg_set (pool->section, pool->sub_section);
22190
#ifdef OBJ_ELF
22191
      arm_elf_change_section ();
22192
#endif
22193
      s_ltorg (0);
22194
    }
22195
}
22196
 
22197
#ifdef OBJ_ELF
22198
/* Remove any excess mapping symbols generated for alignment frags in
22199
   SEC.  We may have created a mapping symbol before a zero byte
22200
   alignment; remove it if there's a mapping symbol after the
22201
   alignment.  */
22202
static void
22203
check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
22204
                       void *dummy ATTRIBUTE_UNUSED)
22205
{
22206
  segment_info_type *seginfo = seg_info (sec);
22207
  fragS *fragp;
22208
 
22209
  if (seginfo == NULL || seginfo->frchainP == NULL)
22210
    return;
22211
 
22212
  for (fragp = seginfo->frchainP->frch_root;
22213
       fragp != NULL;
22214
       fragp = fragp->fr_next)
22215
    {
22216
      symbolS *sym = fragp->tc_frag_data.last_map;
22217
      fragS *next = fragp->fr_next;
22218
 
22219
      /* Variable-sized frags have been converted to fixed size by
22220
         this point.  But if this was variable-sized to start with,
22221
         there will be a fixed-size frag after it.  So don't handle
22222
         next == NULL.  */
22223
      if (sym == NULL || next == NULL)
22224
        continue;
22225
 
22226
      if (S_GET_VALUE (sym) < next->fr_address)
22227
        /* Not at the end of this frag.  */
22228
        continue;
22229
      know (S_GET_VALUE (sym) == next->fr_address);
22230
 
22231
      do
22232
        {
22233
          if (next->tc_frag_data.first_map != NULL)
22234
            {
22235
              /* Next frag starts with a mapping symbol.  Discard this
22236
                 one.  */
22237
              symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22238
              break;
22239
            }
22240
 
22241
          if (next->fr_next == NULL)
22242
            {
22243
              /* This mapping symbol is at the end of the section.  Discard
22244
                 it.  */
22245
              know (next->fr_fix == 0 && next->fr_var == 0);
22246
              symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22247
              break;
22248
            }
22249
 
22250
          /* As long as we have empty frags without any mapping symbols,
22251
             keep looking.  */
22252
          /* If the next frag is non-empty and does not start with a
22253
             mapping symbol, then this mapping symbol is required.  */
22254
          if (next->fr_address != next->fr_next->fr_address)
22255
            break;
22256
 
22257
          next = next->fr_next;
22258
        }
22259
      while (next != NULL);
22260
    }
22261
}
22262
#endif
22263
 
22264
/* Adjust the symbol table.  This marks Thumb symbols as distinct from
22265
   ARM ones.  */
22266
 
22267
void
22268
arm_adjust_symtab (void)
22269
{
22270
#ifdef OBJ_COFF
22271
  symbolS * sym;
22272
 
22273
  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22274
    {
22275
      if (ARM_IS_THUMB (sym))
22276
        {
22277
          if (THUMB_IS_FUNC (sym))
22278
            {
22279
              /* Mark the symbol as a Thumb function.  */
22280
              if (   S_GET_STORAGE_CLASS (sym) == C_STAT
22281
                  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!  */
22282
                S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
22283
 
22284
              else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
22285
                S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
22286
              else
22287
                as_bad (_("%s: unexpected function type: %d"),
22288
                        S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
22289
            }
22290
          else switch (S_GET_STORAGE_CLASS (sym))
22291
            {
22292
            case C_EXT:
22293
              S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
22294
              break;
22295
            case C_STAT:
22296
              S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
22297
              break;
22298
            case C_LABEL:
22299
              S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
22300
              break;
22301
            default:
22302
              /* Do nothing.  */
22303
              break;
22304
            }
22305
        }
22306
 
22307
      if (ARM_IS_INTERWORK (sym))
22308
        coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
22309
    }
22310
#endif
22311
#ifdef OBJ_ELF
22312
  symbolS * sym;
22313
  char      bind;
22314
 
22315
  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22316
    {
22317
      if (ARM_IS_THUMB (sym))
22318
        {
22319
          elf_symbol_type * elf_sym;
22320
 
22321
          elf_sym = elf_symbol (symbol_get_bfdsym (sym));
22322
          bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
22323
 
22324
          if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
22325
                BFD_ARM_SPECIAL_SYM_TYPE_ANY))
22326
            {
22327
              /* If it's a .thumb_func, declare it as so,
22328
                 otherwise tag label as .code 16.  */
22329
              if (THUMB_IS_FUNC (sym))
22330
                elf_sym->internal_elf_sym.st_target_internal
22331
                  = ST_BRANCH_TO_THUMB;
22332
              else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22333
                elf_sym->internal_elf_sym.st_info =
22334
                  ELF_ST_INFO (bind, STT_ARM_16BIT);
22335
            }
22336
        }
22337
    }
22338
 
22339
  /* Remove any overlapping mapping symbols generated by alignment frags.  */
22340
  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
22341
  /* Now do generic ELF adjustments.  */
22342
  elf_adjust_symtab ();
22343
#endif
22344
}
22345
 
22346
/* MD interface: Initialization.  */
22347
 
22348
static void
22349
set_constant_flonums (void)
22350
{
22351
  int i;
22352
 
22353
  for (i = 0; i < NUM_FLOAT_VALS; i++)
22354
    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
22355
      abort ();
22356
}
22357
 
22358
/* Auto-select Thumb mode if it's the only available instruction set for the
22359
   given architecture.  */
22360
 
22361
static void
22362
autoselect_thumb_from_cpu_variant (void)
22363
{
22364
  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
22365
    opcode_select (16);
22366
}
22367
 
22368
void
22369
md_begin (void)
22370
{
22371
  unsigned mach;
22372
  unsigned int i;
22373
 
22374
  if (   (arm_ops_hsh = hash_new ()) == NULL
22375
      || (arm_cond_hsh = hash_new ()) == NULL
22376
      || (arm_shift_hsh = hash_new ()) == NULL
22377
      || (arm_psr_hsh = hash_new ()) == NULL
22378
      || (arm_v7m_psr_hsh = hash_new ()) == NULL
22379
      || (arm_reg_hsh = hash_new ()) == NULL
22380
      || (arm_reloc_hsh = hash_new ()) == NULL
22381
      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
22382
    as_fatal (_("virtual memory exhausted"));
22383
 
22384
  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
22385
    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
22386
  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
22387
    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
22388
  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
22389
    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
22390
  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
22391
    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
22392
  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
22393
    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
22394
                 (void *) (v7m_psrs + i));
22395
  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
22396
    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
22397
  for (i = 0;
22398
       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
22399
       i++)
22400
    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
22401
                 (void *) (barrier_opt_names + i));
22402
#ifdef OBJ_ELF
22403
  for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
22404
    hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
22405
#endif
22406
 
22407
  set_constant_flonums ();
22408
 
22409
  /* Set the cpu variant based on the command-line options.  We prefer
22410
     -mcpu= over -march= if both are set (as for GCC); and we prefer
22411
     -mfpu= over any other way of setting the floating point unit.
22412
     Use of legacy options with new options are faulted.  */
22413
  if (legacy_cpu)
22414
    {
22415
      if (mcpu_cpu_opt || march_cpu_opt)
22416
        as_bad (_("use of old and new-style options to set CPU type"));
22417
 
22418
      mcpu_cpu_opt = legacy_cpu;
22419
    }
22420
  else if (!mcpu_cpu_opt)
22421
    mcpu_cpu_opt = march_cpu_opt;
22422
 
22423
  if (legacy_fpu)
22424
    {
22425
      if (mfpu_opt)
22426
        as_bad (_("use of old and new-style options to set FPU type"));
22427
 
22428
      mfpu_opt = legacy_fpu;
22429
    }
22430
  else if (!mfpu_opt)
22431
    {
22432
#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
22433
        || defined (TE_NetBSD) || defined (TE_VXWORKS))
22434
      /* Some environments specify a default FPU.  If they don't, infer it
22435
         from the processor.  */
22436
      if (mcpu_fpu_opt)
22437
        mfpu_opt = mcpu_fpu_opt;
22438
      else
22439
        mfpu_opt = march_fpu_opt;
22440
#else
22441
      mfpu_opt = &fpu_default;
22442
#endif
22443
    }
22444
 
22445
  if (!mfpu_opt)
22446
    {
22447
      if (mcpu_cpu_opt != NULL)
22448
        mfpu_opt = &fpu_default;
22449
      else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
22450
        mfpu_opt = &fpu_arch_vfp_v2;
22451
      else
22452
        mfpu_opt = &fpu_arch_fpa;
22453
    }
22454
 
22455
#ifdef CPU_DEFAULT
22456
  if (!mcpu_cpu_opt)
22457
    {
22458
      mcpu_cpu_opt = &cpu_default;
22459
      selected_cpu = cpu_default;
22460
    }
22461
#else
22462
  if (mcpu_cpu_opt)
22463
    selected_cpu = *mcpu_cpu_opt;
22464
  else
22465
    mcpu_cpu_opt = &arm_arch_any;
22466
#endif
22467
 
22468
  ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22469
 
22470
  autoselect_thumb_from_cpu_variant ();
22471
 
22472
  arm_arch_used = thumb_arch_used = arm_arch_none;
22473
 
22474
#if defined OBJ_COFF || defined OBJ_ELF
22475
  {
22476
    unsigned int flags = 0;
22477
 
22478
#if defined OBJ_ELF
22479
    flags = meabi_flags;
22480
 
22481
    switch (meabi_flags)
22482
      {
22483
      case EF_ARM_EABI_UNKNOWN:
22484
#endif
22485
        /* Set the flags in the private structure.  */
22486
        if (uses_apcs_26)      flags |= F_APCS26;
22487
        if (support_interwork) flags |= F_INTERWORK;
22488
        if (uses_apcs_float)   flags |= F_APCS_FLOAT;
22489
        if (pic_code)          flags |= F_PIC;
22490
        if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
22491
          flags |= F_SOFT_FLOAT;
22492
 
22493
        switch (mfloat_abi_opt)
22494
          {
22495
          case ARM_FLOAT_ABI_SOFT:
22496
          case ARM_FLOAT_ABI_SOFTFP:
22497
            flags |= F_SOFT_FLOAT;
22498
            break;
22499
 
22500
          case ARM_FLOAT_ABI_HARD:
22501
            if (flags & F_SOFT_FLOAT)
22502
              as_bad (_("hard-float conflicts with specified fpu"));
22503
            break;
22504
          }
22505
 
22506
        /* Using pure-endian doubles (even if soft-float).      */
22507
        if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
22508
          flags |= F_VFP_FLOAT;
22509
 
22510
#if defined OBJ_ELF
22511
        if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
22512
            flags |= EF_ARM_MAVERICK_FLOAT;
22513
        break;
22514
 
22515
      case EF_ARM_EABI_VER4:
22516
      case EF_ARM_EABI_VER5:
22517
        /* No additional flags to set.  */
22518
        break;
22519
 
22520
      default:
22521
        abort ();
22522
      }
22523
#endif
22524
    bfd_set_private_flags (stdoutput, flags);
22525
 
22526
    /* We have run out flags in the COFF header to encode the
22527
       status of ATPCS support, so instead we create a dummy,
22528
       empty, debug section called .arm.atpcs.  */
22529
    if (atpcs)
22530
      {
22531
        asection * sec;
22532
 
22533
        sec = bfd_make_section (stdoutput, ".arm.atpcs");
22534
 
22535
        if (sec != NULL)
22536
          {
22537
            bfd_set_section_flags
22538
              (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
22539
            bfd_set_section_size (stdoutput, sec, 0);
22540
            bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
22541
          }
22542
      }
22543
  }
22544
#endif
22545
 
22546
  /* Record the CPU type as well.  */
22547
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
22548
    mach = bfd_mach_arm_iWMMXt2;
22549
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
22550
    mach = bfd_mach_arm_iWMMXt;
22551
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
22552
    mach = bfd_mach_arm_XScale;
22553
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
22554
    mach = bfd_mach_arm_ep9312;
22555
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
22556
    mach = bfd_mach_arm_5TE;
22557
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
22558
    {
22559
      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22560
        mach = bfd_mach_arm_5T;
22561
      else
22562
        mach = bfd_mach_arm_5;
22563
    }
22564
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
22565
    {
22566
      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22567
        mach = bfd_mach_arm_4T;
22568
      else
22569
        mach = bfd_mach_arm_4;
22570
    }
22571
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
22572
    mach = bfd_mach_arm_3M;
22573
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
22574
    mach = bfd_mach_arm_3;
22575
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
22576
    mach = bfd_mach_arm_2a;
22577
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
22578
    mach = bfd_mach_arm_2;
22579
  else
22580
    mach = bfd_mach_arm_unknown;
22581
 
22582
  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
22583
}
22584
 
22585
/* Command line processing.  */
22586
 
22587
/* md_parse_option
22588
      Invocation line includes a switch not recognized by the base assembler.
22589
      See if it's a processor-specific option.
22590
 
22591
      This routine is somewhat complicated by the need for backwards
22592
      compatibility (since older releases of gcc can't be changed).
22593
      The new options try to make the interface as compatible as
22594
      possible with GCC.
22595
 
22596
      New options (supported) are:
22597
 
22598
              -mcpu=<cpu name>           Assemble for selected processor
22599
              -march=<architecture name> Assemble for selected architecture
22600
              -mfpu=<fpu architecture>   Assemble for selected FPU.
22601
              -EB/-mbig-endian           Big-endian
22602
              -EL/-mlittle-endian        Little-endian
22603
              -k                         Generate PIC code
22604
              -mthumb                    Start in Thumb mode
22605
              -mthumb-interwork          Code supports ARM/Thumb interworking
22606
 
22607
              -m[no-]warn-deprecated     Warn about deprecated features
22608
 
22609
      For now we will also provide support for:
22610
 
22611
              -mapcs-32                  32-bit Program counter
22612
              -mapcs-26                  26-bit Program counter
22613
              -macps-float               Floats passed in FP registers
22614
              -mapcs-reentrant           Reentrant code
22615
              -matpcs
22616
      (sometime these will probably be replaced with -mapcs=<list of options>
22617
      and -matpcs=<list of options>)
22618
 
22619
      The remaining options are only supported for back-wards compatibility.
22620
      Cpu variants, the arm part is optional:
22621
              -m[arm]1                Currently not supported.
22622
              -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
22623
              -m[arm]3                Arm 3 processor
22624
              -m[arm]6[xx],           Arm 6 processors
22625
              -m[arm]7[xx][t][[d]m]   Arm 7 processors
22626
              -m[arm]8[10]            Arm 8 processors
22627
              -m[arm]9[20][tdmi]      Arm 9 processors
22628
              -mstrongarm[110[0]]     StrongARM processors
22629
              -mxscale                XScale processors
22630
              -m[arm]v[2345[t[e]]]    Arm architectures
22631
              -mall                   All (except the ARM1)
22632
      FP variants:
22633
              -mfpa10, -mfpa11        FPA10 and 11 co-processor instructions
22634
              -mfpe-old               (No float load/store multiples)
22635
              -mvfpxd                 VFP Single precision
22636
              -mvfp                   All VFP
22637
              -mno-fpu                Disable all floating point instructions
22638
 
22639
      The following CPU names are recognized:
22640
              arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22641
              arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22642
              arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22643
              arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22644
              arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22645
              arm10t arm10e, arm1020t, arm1020e, arm10200e,
22646
              strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22647
 
22648
      */
22649
 
22650
const char * md_shortopts = "m:k";
22651
 
22652
#ifdef ARM_BI_ENDIAN
22653
#define OPTION_EB (OPTION_MD_BASE + 0)
22654
#define OPTION_EL (OPTION_MD_BASE + 1)
22655
#else
22656
#if TARGET_BYTES_BIG_ENDIAN
22657
#define OPTION_EB (OPTION_MD_BASE + 0)
22658
#else
22659
#define OPTION_EL (OPTION_MD_BASE + 1)
22660
#endif
22661
#endif
22662
#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22663
 
22664
struct option md_longopts[] =
22665
{
22666
#ifdef OPTION_EB
22667
  {"EB", no_argument, NULL, OPTION_EB},
22668
#endif
22669
#ifdef OPTION_EL
22670
  {"EL", no_argument, NULL, OPTION_EL},
22671
#endif
22672
  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
22673
  {NULL, no_argument, NULL, 0}
22674
};
22675
 
22676
size_t md_longopts_size = sizeof (md_longopts);
22677
 
22678
struct arm_option_table
22679
{
22680
  char *option;         /* Option name to match.  */
22681
  char *help;           /* Help information.  */
22682
  int  *var;            /* Variable to change.  */
22683
  int   value;          /* What to change it to.  */
22684
  char *deprecated;     /* If non-null, print this message.  */
22685
};
22686
 
22687
struct arm_option_table arm_opts[] =
22688
{
22689
  {"k",      N_("generate PIC code"),      &pic_code,    1, NULL},
22690
  {"mthumb", N_("assemble Thumb code"),    &thumb_mode,  1, NULL},
22691
  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22692
   &support_interwork, 1, NULL},
22693
  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
22694
  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
22695
  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
22696
   1, NULL},
22697
  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
22698
  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
22699
  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
22700
  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
22701
   NULL},
22702
 
22703
  /* These are recognized by the assembler, but have no affect on code.  */
22704
  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
22705
  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
22706
 
22707
  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
22708
  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22709
   &warn_on_deprecated, 0, NULL},
22710
  {NULL, NULL, NULL, 0, NULL}
22711
};
22712
 
22713
struct arm_legacy_option_table
22714
{
22715
  char *option;                         /* Option name to match.  */
22716
  const arm_feature_set **var;          /* Variable to change.  */
22717
  const arm_feature_set value;          /* What to change it to.  */
22718
  char *deprecated;                     /* If non-null, print this message.  */
22719
};
22720
 
22721
const struct arm_legacy_option_table arm_legacy_opts[] =
22722
{
22723
  /* DON'T add any new processors to this list -- we want the whole list
22724
     to go away...  Add them to the processors table instead.  */
22725
  {"marm1",      &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
22726
  {"m1",         &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
22727
  {"marm2",      &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
22728
  {"m2",         &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
22729
  {"marm250",    &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22730
  {"m250",       &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22731
  {"marm3",      &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22732
  {"m3",         &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22733
  {"marm6",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
22734
  {"m6",         &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
22735
  {"marm600",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
22736
  {"m600",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
22737
  {"marm610",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
22738
  {"m610",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
22739
  {"marm620",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
22740
  {"m620",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
22741
  {"marm7",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
22742
  {"m7",         &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
22743
  {"marm70",     &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
22744
  {"m70",        &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
22745
  {"marm700",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
22746
  {"m700",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
22747
  {"marm700i",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
22748
  {"m700i",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
22749
  {"marm710",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
22750
  {"m710",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
22751
  {"marm710c",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
22752
  {"m710c",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
22753
  {"marm720",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
22754
  {"m720",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
22755
  {"marm7d",     &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
22756
  {"m7d",        &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
22757
  {"marm7di",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
22758
  {"m7di",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
22759
  {"marm7m",     &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22760
  {"m7m",        &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22761
  {"marm7dm",    &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22762
  {"m7dm",       &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22763
  {"marm7dmi",   &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22764
  {"m7dmi",      &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22765
  {"marm7100",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
22766
  {"m7100",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
22767
  {"marm7500",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
22768
  {"m7500",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
22769
  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
22770
  {"m7500fe",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
22771
  {"marm7t",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22772
  {"m7t",        &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22773
  {"marm7tdmi",  &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22774
  {"m7tdmi",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22775
  {"marm710t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22776
  {"m710t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22777
  {"marm720t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22778
  {"m720t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22779
  {"marm740t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22780
  {"m740t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22781
  {"marm8",      &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
22782
  {"m8",         &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
22783
  {"marm810",    &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
22784
  {"m810",       &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
22785
  {"marm9",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22786
  {"m9",         &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22787
  {"marm9tdmi",  &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22788
  {"m9tdmi",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22789
  {"marm920",    &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22790
  {"m920",       &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22791
  {"marm940",    &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22792
  {"m940",       &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22793
  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
22794
  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
22795
   N_("use -mcpu=strongarm110")},
22796
  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
22797
   N_("use -mcpu=strongarm1100")},
22798
  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
22799
   N_("use -mcpu=strongarm1110")},
22800
  {"mxscale",    &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
22801
  {"miwmmxt",    &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
22802
  {"mall",       &legacy_cpu, ARM_ANY,         N_("use -mcpu=all")},
22803
 
22804
  /* Architecture variants -- don't add any more to this list either.  */
22805
  {"mv2",        &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
22806
  {"marmv2",     &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
22807
  {"mv2a",       &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22808
  {"marmv2a",    &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22809
  {"mv3",        &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
22810
  {"marmv3",     &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
22811
  {"mv3m",       &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22812
  {"marmv3m",    &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22813
  {"mv4",        &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
22814
  {"marmv4",     &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
22815
  {"mv4t",       &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22816
  {"marmv4t",    &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22817
  {"mv5",        &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
22818
  {"marmv5",     &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
22819
  {"mv5t",       &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22820
  {"marmv5t",    &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22821
  {"mv5e",       &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22822
  {"marmv5e",    &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22823
 
22824
  /* Floating point variants -- don't add any more to this list either.  */
22825
  {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
22826
  {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
22827
  {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
22828
  {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
22829
   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22830
 
22831
  {NULL, NULL, ARM_ARCH_NONE, NULL}
22832
};
22833
 
22834
struct arm_cpu_option_table
22835
{
22836
  char *name;
22837
  const arm_feature_set value;
22838
  /* For some CPUs we assume an FPU unless the user explicitly sets
22839
     -mfpu=...  */
22840
  const arm_feature_set default_fpu;
22841
  /* The canonical name of the CPU, or NULL to use NAME converted to upper
22842
     case.  */
22843
  const char *canonical_name;
22844
};
22845
 
22846
/* This list should, at a minimum, contain all the cpu names
22847
   recognized by GCC.  */
22848
static const struct arm_cpu_option_table arm_cpus[] =
22849
{
22850
  {"all",               ARM_ANY,         FPU_ARCH_FPA,    NULL},
22851
  {"arm1",              ARM_ARCH_V1,     FPU_ARCH_FPA,    NULL},
22852
  {"arm2",              ARM_ARCH_V2,     FPU_ARCH_FPA,    NULL},
22853
  {"arm250",            ARM_ARCH_V2S,    FPU_ARCH_FPA,    NULL},
22854
  {"arm3",              ARM_ARCH_V2S,    FPU_ARCH_FPA,    NULL},
22855
  {"arm6",              ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22856
  {"arm60",             ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22857
  {"arm600",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22858
  {"arm610",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22859
  {"arm620",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22860
  {"arm7",              ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22861
  {"arm7m",             ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL},
22862
  {"arm7d",             ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22863
  {"arm7dm",            ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL},
22864
  {"arm7di",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22865
  {"arm7dmi",           ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL},
22866
  {"arm70",             ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22867
  {"arm700",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22868
  {"arm700i",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22869
  {"arm710",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22870
  {"arm710t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22871
  {"arm720",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22872
  {"arm720t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22873
  {"arm740t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22874
  {"arm710c",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22875
  {"arm7100",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22876
  {"arm7500",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22877
  {"arm7500fe",         ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22878
  {"arm7t",             ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22879
  {"arm7tdmi",          ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22880
  {"arm7tdmi-s",        ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22881
  {"arm8",              ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22882
  {"arm810",            ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22883
  {"strongarm",         ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22884
  {"strongarm1",        ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22885
  {"strongarm110",      ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22886
  {"strongarm1100",     ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22887
  {"strongarm1110",     ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22888
  {"arm9",              ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22889
  {"arm920",            ARM_ARCH_V4T,    FPU_ARCH_FPA,    "ARM920T"},
22890
  {"arm920t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22891
  {"arm922t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22892
  {"arm940t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22893
  {"arm9tdmi",          ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22894
  {"fa526",             ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22895
  {"fa626",             ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22896
  /* For V5 or later processors we default to using VFP; but the user
22897
     should really set the FPU type explicitly.  */
22898
  {"arm9e-r0",          ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22899
  {"arm9e",             ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22900
  {"arm926ej",          ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22901
  {"arm926ejs",         ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22902
  {"arm926ej-s",        ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, NULL},
22903
  {"arm946e-r0",        ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22904
  {"arm946e",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM946E-S"},
22905
  {"arm946e-s",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22906
  {"arm966e-r0",        ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22907
  {"arm966e",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM966E-S"},
22908
  {"arm966e-s",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22909
  {"arm968e-s",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22910
  {"arm10t",            ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL},
22911
  {"arm10tdmi",         ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL},
22912
  {"arm10e",            ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22913
  {"arm1020",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM1020E"},
22914
  {"arm1020t",          ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL},
22915
  {"arm1020e",          ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22916
  {"arm1022e",          ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22917
  {"arm1026ejs",        ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
22918
  {"arm1026ej-s",       ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, NULL},
22919
  {"fa606te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22920
  {"fa616te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22921
  {"fa626te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22922
  {"fmp626",            ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22923
  {"fa726te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22924
  {"arm1136js",         ARM_ARCH_V6,     FPU_NONE,        "ARM1136J-S"},
22925
  {"arm1136j-s",        ARM_ARCH_V6,     FPU_NONE,        NULL},
22926
  {"arm1136jfs",        ARM_ARCH_V6,     FPU_ARCH_VFP_V2, "ARM1136JF-S"},
22927
  {"arm1136jf-s",       ARM_ARCH_V6,     FPU_ARCH_VFP_V2, NULL},
22928
  {"mpcore",            ARM_ARCH_V6K,    FPU_ARCH_VFP_V2, "MPCore"},
22929
  {"mpcorenovfp",       ARM_ARCH_V6K,    FPU_NONE,        "MPCore"},
22930
  {"arm1156t2-s",       ARM_ARCH_V6T2,   FPU_NONE,        NULL},
22931
  {"arm1156t2f-s",      ARM_ARCH_V6T2,   FPU_ARCH_VFP_V2, NULL},
22932
  {"arm1176jz-s",       ARM_ARCH_V6ZK,   FPU_NONE,        NULL},
22933
  {"arm1176jzf-s",      ARM_ARCH_V6ZK,   FPU_ARCH_VFP_V2, NULL},
22934
  {"cortex-a5",         ARM_ARCH_V7A_MP_SEC,
22935
                                         FPU_NONE,        "Cortex-A5"},
22936
  {"cortex-a8",         ARM_ARCH_V7A_SEC,
22937
                                         ARM_FEATURE (0, FPU_VFP_V3
22938
                                                        | FPU_NEON_EXT_V1),
22939
                                                          "Cortex-A8"},
22940
  {"cortex-a9",         ARM_ARCH_V7A_MP_SEC,
22941
                                         ARM_FEATURE (0, FPU_VFP_V3
22942
                                                        | FPU_NEON_EXT_V1),
22943
                                                          "Cortex-A9"},
22944
  {"cortex-a15",        ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
22945
                                         FPU_ARCH_NEON_VFP_V4,
22946
                                                          "Cortex-A15"},
22947
  {"cortex-r4",         ARM_ARCH_V7R,    FPU_NONE,        "Cortex-R4"},
22948
  {"cortex-r4f",        ARM_ARCH_V7R,    FPU_ARCH_VFP_V3D16,
22949
                                                          "Cortex-R4F"},
22950
  {"cortex-r5",         ARM_ARCH_V7R_IDIV,
22951
                                         FPU_NONE,        "Cortex-R5"},
22952
  {"cortex-m4",         ARM_ARCH_V7EM,   FPU_NONE,        "Cortex-M4"},
22953
  {"cortex-m3",         ARM_ARCH_V7M,    FPU_NONE,        "Cortex-M3"},
22954
  {"cortex-m1",         ARM_ARCH_V6SM,   FPU_NONE,        "Cortex-M1"},
22955
  {"cortex-m0",         ARM_ARCH_V6SM,   FPU_NONE,        "Cortex-M0"},
22956
  /* ??? XSCALE is really an architecture.  */
22957
  {"xscale",            ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22958
  /* ??? iwmmxt is not a processor.  */
22959
  {"iwmmxt",            ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
22960
  {"iwmmxt2",           ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
22961
  {"i80200",            ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22962
  /* Maverick */
22963
  {"ep9312",    ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
22964
  {NULL,                ARM_ARCH_NONE,   ARM_ARCH_NONE, NULL}
22965
};
22966
 
22967
struct arm_arch_option_table
22968
{
22969
  char *name;
22970
  const arm_feature_set value;
22971
  const arm_feature_set default_fpu;
22972
};
22973
 
22974
/* This list should, at a minimum, contain all the architecture names
22975
   recognized by GCC.  */
22976
static const struct arm_arch_option_table arm_archs[] =
22977
{
22978
  {"all",               ARM_ANY,         FPU_ARCH_FPA},
22979
  {"armv1",             ARM_ARCH_V1,     FPU_ARCH_FPA},
22980
  {"armv2",             ARM_ARCH_V2,     FPU_ARCH_FPA},
22981
  {"armv2a",            ARM_ARCH_V2S,    FPU_ARCH_FPA},
22982
  {"armv2s",            ARM_ARCH_V2S,    FPU_ARCH_FPA},
22983
  {"armv3",             ARM_ARCH_V3,     FPU_ARCH_FPA},
22984
  {"armv3m",            ARM_ARCH_V3M,    FPU_ARCH_FPA},
22985
  {"armv4",             ARM_ARCH_V4,     FPU_ARCH_FPA},
22986
  {"armv4xm",           ARM_ARCH_V4xM,   FPU_ARCH_FPA},
22987
  {"armv4t",            ARM_ARCH_V4T,    FPU_ARCH_FPA},
22988
  {"armv4txm",          ARM_ARCH_V4TxM,  FPU_ARCH_FPA},
22989
  {"armv5",             ARM_ARCH_V5,     FPU_ARCH_VFP},
22990
  {"armv5t",            ARM_ARCH_V5T,    FPU_ARCH_VFP},
22991
  {"armv5txm",          ARM_ARCH_V5TxM,  FPU_ARCH_VFP},
22992
  {"armv5te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP},
22993
  {"armv5texp",         ARM_ARCH_V5TExP, FPU_ARCH_VFP},
22994
  {"armv5tej",          ARM_ARCH_V5TEJ,  FPU_ARCH_VFP},
22995
  {"armv6",             ARM_ARCH_V6,     FPU_ARCH_VFP},
22996
  {"armv6j",            ARM_ARCH_V6,     FPU_ARCH_VFP},
22997
  {"armv6k",            ARM_ARCH_V6K,    FPU_ARCH_VFP},
22998
  {"armv6z",            ARM_ARCH_V6Z,    FPU_ARCH_VFP},
22999
  {"armv6zk",           ARM_ARCH_V6ZK,   FPU_ARCH_VFP},
23000
  {"armv6t2",           ARM_ARCH_V6T2,   FPU_ARCH_VFP},
23001
  {"armv6kt2",          ARM_ARCH_V6KT2,  FPU_ARCH_VFP},
23002
  {"armv6zt2",          ARM_ARCH_V6ZT2,  FPU_ARCH_VFP},
23003
  {"armv6zkt2",         ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
23004
  {"armv6-m",           ARM_ARCH_V6M,    FPU_ARCH_VFP},
23005
  {"armv6s-m",          ARM_ARCH_V6SM,   FPU_ARCH_VFP},
23006
  {"armv7",             ARM_ARCH_V7,     FPU_ARCH_VFP},
23007
  /* The official spelling of the ARMv7 profile variants is the dashed form.
23008
     Accept the non-dashed form for compatibility with old toolchains.  */
23009
  {"armv7a",            ARM_ARCH_V7A,    FPU_ARCH_VFP},
23010
  {"armv7r",            ARM_ARCH_V7R,    FPU_ARCH_VFP},
23011
  {"armv7m",            ARM_ARCH_V7M,    FPU_ARCH_VFP},
23012
  {"armv7-a",           ARM_ARCH_V7A,    FPU_ARCH_VFP},
23013
  {"armv7-r",           ARM_ARCH_V7R,    FPU_ARCH_VFP},
23014
  {"armv7-m",           ARM_ARCH_V7M,    FPU_ARCH_VFP},
23015
  {"armv7e-m",          ARM_ARCH_V7EM,   FPU_ARCH_VFP},
23016
  {"xscale",            ARM_ARCH_XSCALE, FPU_ARCH_VFP},
23017
  {"iwmmxt",            ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
23018
  {"iwmmxt2",           ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
23019
  {NULL,                ARM_ARCH_NONE,   ARM_ARCH_NONE}
23020
};
23021
 
23022
/* ISA extensions in the co-processor and main instruction set space.  */
23023
struct arm_option_extension_value_table
23024
{
23025
  char *name;
23026
  const arm_feature_set value;
23027
  const arm_feature_set allowed_archs;
23028
};
23029
 
23030
/* The following table must be in alphabetical order with a NULL last entry.
23031
   */
23032
static const struct arm_option_extension_value_table arm_extensions[] =
23033
{
23034
  {"idiv",      ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23035
                                   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)},
23036
  {"iwmmxt",    ARM_FEATURE (0, ARM_CEXT_IWMMXT),        ARM_ANY},
23037
  {"iwmmxt2",   ARM_FEATURE (0, ARM_CEXT_IWMMXT2),       ARM_ANY},
23038
  {"maverick",  ARM_FEATURE (0, ARM_CEXT_MAVERICK),      ARM_ANY},
23039
  {"mp",        ARM_FEATURE (ARM_EXT_MP, 0),
23040
                     ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)},
23041
  {"os",        ARM_FEATURE (ARM_EXT_OS, 0),
23042
                                   ARM_FEATURE (ARM_EXT_V6M, 0)},
23043
  {"sec",       ARM_FEATURE (ARM_EXT_SEC, 0),
23044
                     ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)},
23045
  {"virt",      ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23046
                                   ARM_FEATURE (ARM_EXT_V7A, 0)},
23047
  {"xscale",    ARM_FEATURE (0, ARM_CEXT_XSCALE),        ARM_ANY},
23048
  {NULL,        ARM_ARCH_NONE,                    ARM_ARCH_NONE}
23049
};
23050
 
23051
/* ISA floating-point and Advanced SIMD extensions.  */
23052
struct arm_option_fpu_value_table
23053
{
23054
  char *name;
23055
  const arm_feature_set value;
23056
};
23057
 
23058
/* This list should, at a minimum, contain all the fpu names
23059
   recognized by GCC.  */
23060
static const struct arm_option_fpu_value_table arm_fpus[] =
23061
{
23062
  {"softfpa",           FPU_NONE},
23063
  {"fpe",               FPU_ARCH_FPE},
23064
  {"fpe2",              FPU_ARCH_FPE},
23065
  {"fpe3",              FPU_ARCH_FPA},  /* Third release supports LFM/SFM.  */
23066
  {"fpa",               FPU_ARCH_FPA},
23067
  {"fpa10",             FPU_ARCH_FPA},
23068
  {"fpa11",             FPU_ARCH_FPA},
23069
  {"arm7500fe",         FPU_ARCH_FPA},
23070
  {"softvfp",           FPU_ARCH_VFP},
23071
  {"softvfp+vfp",       FPU_ARCH_VFP_V2},
23072
  {"vfp",               FPU_ARCH_VFP_V2},
23073
  {"vfp9",              FPU_ARCH_VFP_V2},
23074
  {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
23075
  {"vfp10",             FPU_ARCH_VFP_V2},
23076
  {"vfp10-r0",          FPU_ARCH_VFP_V1},
23077
  {"vfpxd",             FPU_ARCH_VFP_V1xD},
23078
  {"vfpv2",             FPU_ARCH_VFP_V2},
23079
  {"vfpv3",             FPU_ARCH_VFP_V3},
23080
  {"vfpv3-fp16",        FPU_ARCH_VFP_V3_FP16},
23081
  {"vfpv3-d16",         FPU_ARCH_VFP_V3D16},
23082
  {"vfpv3-d16-fp16",    FPU_ARCH_VFP_V3D16_FP16},
23083
  {"vfpv3xd",           FPU_ARCH_VFP_V3xD},
23084
  {"vfpv3xd-fp16",      FPU_ARCH_VFP_V3xD_FP16},
23085
  {"arm1020t",          FPU_ARCH_VFP_V1},
23086
  {"arm1020e",          FPU_ARCH_VFP_V2},
23087
  {"arm1136jfs",        FPU_ARCH_VFP_V2},
23088
  {"arm1136jf-s",       FPU_ARCH_VFP_V2},
23089
  {"maverick",          FPU_ARCH_MAVERICK},
23090
  {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
23091
  {"neon-fp16",         FPU_ARCH_NEON_FP16},
23092
  {"vfpv4",             FPU_ARCH_VFP_V4},
23093
  {"vfpv4-d16",         FPU_ARCH_VFP_V4D16},
23094
  {"fpv4-sp-d16",       FPU_ARCH_VFP_V4_SP_D16},
23095
  {"neon-vfpv4",        FPU_ARCH_NEON_VFP_V4},
23096
  {NULL,                ARM_ARCH_NONE}
23097
};
23098
 
23099
struct arm_option_value_table
23100
{
23101
  char *name;
23102
  long value;
23103
};
23104
 
23105
static const struct arm_option_value_table arm_float_abis[] =
23106
{
23107
  {"hard",      ARM_FLOAT_ABI_HARD},
23108
  {"softfp",    ARM_FLOAT_ABI_SOFTFP},
23109
  {"soft",      ARM_FLOAT_ABI_SOFT},
23110
  {NULL,        0}
23111
};
23112
 
23113
#ifdef OBJ_ELF
23114
/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
23115
static const struct arm_option_value_table arm_eabis[] =
23116
{
23117
  {"gnu",       EF_ARM_EABI_UNKNOWN},
23118
  {"4",         EF_ARM_EABI_VER4},
23119
  {"5",         EF_ARM_EABI_VER5},
23120
  {NULL,        0}
23121
};
23122
#endif
23123
 
23124
struct arm_long_option_table
23125
{
23126
  char * option;                /* Substring to match.  */
23127
  char * help;                  /* Help information.  */
23128
  int (* func) (char * subopt); /* Function to decode sub-option.  */
23129
  char * deprecated;            /* If non-null, print this message.  */
23130
};
23131
 
23132
static bfd_boolean
23133
arm_parse_extension (char * str, const arm_feature_set **opt_p)
23134
{
23135
  arm_feature_set *ext_set = (arm_feature_set *)
23136
      xmalloc (sizeof (arm_feature_set));
23137
 
23138
  /* We insist on extensions being specified in alphabetical order, and with
23139
     extensions being added before being removed.  We achieve this by having
23140
     the global ARM_EXTENSIONS table in alphabetical order, and using the
23141
     ADDING_VALUE variable to indicate whether we are adding an extension (1)
23142
     or removing it (0) and only allowing it to change in the order
23143
     -1 -> 1 -> 0.  */
23144
  const struct arm_option_extension_value_table * opt = NULL;
23145
  int adding_value = -1;
23146
 
23147
  /* Copy the feature set, so that we can modify it.  */
23148
  *ext_set = **opt_p;
23149
  *opt_p = ext_set;
23150
 
23151
  while (str != NULL && *str != 0)
23152
    {
23153
      char * ext;
23154
      size_t optlen;
23155
 
23156
      if (*str != '+')
23157
        {
23158
          as_bad (_("invalid architectural extension"));
23159
          return FALSE;
23160
        }
23161
 
23162
      str++;
23163
      ext = strchr (str, '+');
23164
 
23165
      if (ext != NULL)
23166
        optlen = ext - str;
23167
      else
23168
        optlen = strlen (str);
23169
 
23170
      if (optlen >= 2
23171
          && strncmp (str, "no", 2) == 0)
23172
        {
23173
          if (adding_value != 0)
23174
            {
23175
              adding_value = 0;
23176
              opt = arm_extensions;
23177
            }
23178
 
23179
          optlen -= 2;
23180
          str += 2;
23181
        }
23182
      else if (optlen > 0)
23183
        {
23184
          if (adding_value == -1)
23185
            {
23186
              adding_value = 1;
23187
              opt = arm_extensions;
23188
            }
23189
          else if (adding_value != 1)
23190
            {
23191
              as_bad (_("must specify extensions to add before specifying "
23192
                        "those to remove"));
23193
              return FALSE;
23194
            }
23195
        }
23196
 
23197
      if (optlen == 0)
23198
        {
23199
          as_bad (_("missing architectural extension"));
23200
          return FALSE;
23201
        }
23202
 
23203
      gas_assert (adding_value != -1);
23204
      gas_assert (opt != NULL);
23205
 
23206
      /* Scan over the options table trying to find an exact match. */
23207
      for (; opt->name != NULL; opt++)
23208
        if (strncmp (opt->name, str, optlen) == 0
23209
            && strlen (opt->name) == optlen)
23210
          {
23211
            /* Check we can apply the extension to this architecture.  */
23212
            if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
23213
              {
23214
                as_bad (_("extension does not apply to the base architecture"));
23215
                return FALSE;
23216
              }
23217
 
23218
            /* Add or remove the extension.  */
23219
            if (adding_value)
23220
              ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
23221
            else
23222
              ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
23223
 
23224
            break;
23225
          }
23226
 
23227
      if (opt->name == NULL)
23228
        {
23229
          /* Did we fail to find an extension because it wasn't specified in
23230
             alphabetical order, or because it does not exist?  */
23231
 
23232
          for (opt = arm_extensions; opt->name != NULL; opt++)
23233
            if (strncmp (opt->name, str, optlen) == 0)
23234
              break;
23235
 
23236
          if (opt->name == NULL)
23237
            as_bad (_("unknown architectural extension `%s'"), str);
23238
          else
23239
            as_bad (_("architectural extensions must be specified in "
23240
                      "alphabetical order"));
23241
 
23242
          return FALSE;
23243
        }
23244
      else
23245
        {
23246
          /* We should skip the extension we've just matched the next time
23247
             round.  */
23248
          opt++;
23249
        }
23250
 
23251
      str = ext;
23252
    };
23253
 
23254
  return TRUE;
23255
}
23256
 
23257
static bfd_boolean
23258
arm_parse_cpu (char * str)
23259
{
23260
  const struct arm_cpu_option_table * opt;
23261
  char * ext = strchr (str, '+');
23262
  int optlen;
23263
 
23264
  if (ext != NULL)
23265
    optlen = ext - str;
23266
  else
23267
    optlen = strlen (str);
23268
 
23269
  if (optlen == 0)
23270
    {
23271
      as_bad (_("missing cpu name `%s'"), str);
23272
      return FALSE;
23273
    }
23274
 
23275
  for (opt = arm_cpus; opt->name != NULL; opt++)
23276
    if (strncmp (opt->name, str, optlen) == 0)
23277
      {
23278
        mcpu_cpu_opt = &opt->value;
23279
        mcpu_fpu_opt = &opt->default_fpu;
23280
        if (opt->canonical_name)
23281
          strcpy (selected_cpu_name, opt->canonical_name);
23282
        else
23283
          {
23284
            int i;
23285
 
23286
            for (i = 0; i < optlen; i++)
23287
              selected_cpu_name[i] = TOUPPER (opt->name[i]);
23288
            selected_cpu_name[i] = 0;
23289
          }
23290
 
23291
        if (ext != NULL)
23292
          return arm_parse_extension (ext, &mcpu_cpu_opt);
23293
 
23294
        return TRUE;
23295
      }
23296
 
23297
  as_bad (_("unknown cpu `%s'"), str);
23298
  return FALSE;
23299
}
23300
 
23301
static bfd_boolean
23302
arm_parse_arch (char * str)
23303
{
23304
  const struct arm_arch_option_table *opt;
23305
  char *ext = strchr (str, '+');
23306
  int optlen;
23307
 
23308
  if (ext != NULL)
23309
    optlen = ext - str;
23310
  else
23311
    optlen = strlen (str);
23312
 
23313
  if (optlen == 0)
23314
    {
23315
      as_bad (_("missing architecture name `%s'"), str);
23316
      return FALSE;
23317
    }
23318
 
23319
  for (opt = arm_archs; opt->name != NULL; opt++)
23320
    if (strncmp (opt->name, str, optlen) == 0)
23321
      {
23322
        march_cpu_opt = &opt->value;
23323
        march_fpu_opt = &opt->default_fpu;
23324
        strcpy (selected_cpu_name, opt->name);
23325
 
23326
        if (ext != NULL)
23327
          return arm_parse_extension (ext, &march_cpu_opt);
23328
 
23329
        return TRUE;
23330
      }
23331
 
23332
  as_bad (_("unknown architecture `%s'\n"), str);
23333
  return FALSE;
23334
}
23335
 
23336
static bfd_boolean
23337
arm_parse_fpu (char * str)
23338
{
23339
  const struct arm_option_fpu_value_table * opt;
23340
 
23341
  for (opt = arm_fpus; opt->name != NULL; opt++)
23342
    if (streq (opt->name, str))
23343
      {
23344
        mfpu_opt = &opt->value;
23345
        return TRUE;
23346
      }
23347
 
23348
  as_bad (_("unknown floating point format `%s'\n"), str);
23349
  return FALSE;
23350
}
23351
 
23352
static bfd_boolean
23353
arm_parse_float_abi (char * str)
23354
{
23355
  const struct arm_option_value_table * opt;
23356
 
23357
  for (opt = arm_float_abis; opt->name != NULL; opt++)
23358
    if (streq (opt->name, str))
23359
      {
23360
        mfloat_abi_opt = opt->value;
23361
        return TRUE;
23362
      }
23363
 
23364
  as_bad (_("unknown floating point abi `%s'\n"), str);
23365
  return FALSE;
23366
}
23367
 
23368
#ifdef OBJ_ELF
23369
static bfd_boolean
23370
arm_parse_eabi (char * str)
23371
{
23372
  const struct arm_option_value_table *opt;
23373
 
23374
  for (opt = arm_eabis; opt->name != NULL; opt++)
23375
    if (streq (opt->name, str))
23376
      {
23377
        meabi_flags = opt->value;
23378
        return TRUE;
23379
      }
23380
  as_bad (_("unknown EABI `%s'\n"), str);
23381
  return FALSE;
23382
}
23383
#endif
23384
 
23385
static bfd_boolean
23386
arm_parse_it_mode (char * str)
23387
{
23388
  bfd_boolean ret = TRUE;
23389
 
23390
  if (streq ("arm", str))
23391
    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
23392
  else if (streq ("thumb", str))
23393
    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
23394
  else if (streq ("always", str))
23395
    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
23396
  else if (streq ("never", str))
23397
    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
23398
  else
23399
    {
23400
      as_bad (_("unknown implicit IT mode `%s', should be "\
23401
                "arm, thumb, always, or never."), str);
23402
      ret = FALSE;
23403
    }
23404
 
23405
  return ret;
23406
}
23407
 
23408
struct arm_long_option_table arm_long_opts[] =
23409
{
23410
  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
23411
   arm_parse_cpu, NULL},
23412
  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
23413
   arm_parse_arch, NULL},
23414
  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
23415
   arm_parse_fpu, NULL},
23416
  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
23417
   arm_parse_float_abi, NULL},
23418
#ifdef OBJ_ELF
23419
  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
23420
   arm_parse_eabi, NULL},
23421
#endif
23422
  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
23423
   arm_parse_it_mode, NULL},
23424
  {NULL, NULL, 0, NULL}
23425
};
23426
 
23427
int
23428
md_parse_option (int c, char * arg)
23429
{
23430
  struct arm_option_table *opt;
23431
  const struct arm_legacy_option_table *fopt;
23432
  struct arm_long_option_table *lopt;
23433
 
23434
  switch (c)
23435
    {
23436
#ifdef OPTION_EB
23437
    case OPTION_EB:
23438
      target_big_endian = 1;
23439
      break;
23440
#endif
23441
 
23442
#ifdef OPTION_EL
23443
    case OPTION_EL:
23444
      target_big_endian = 0;
23445
      break;
23446
#endif
23447
 
23448
    case OPTION_FIX_V4BX:
23449
      fix_v4bx = TRUE;
23450
      break;
23451
 
23452
    case 'a':
23453
      /* Listing option.  Just ignore these, we don't support additional
23454
         ones.  */
23455
      return 0;
23456
 
23457
    default:
23458
      for (opt = arm_opts; opt->option != NULL; opt++)
23459
        {
23460
          if (c == opt->option[0]
23461
              && ((arg == NULL && opt->option[1] == 0)
23462
                  || streq (arg, opt->option + 1)))
23463
            {
23464
              /* If the option is deprecated, tell the user.  */
23465
              if (warn_on_deprecated && opt->deprecated != NULL)
23466
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23467
                           arg ? arg : "", _(opt->deprecated));
23468
 
23469
              if (opt->var != NULL)
23470
                *opt->var = opt->value;
23471
 
23472
              return 1;
23473
            }
23474
        }
23475
 
23476
      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
23477
        {
23478
          if (c == fopt->option[0]
23479
              && ((arg == NULL && fopt->option[1] == 0)
23480
                  || streq (arg, fopt->option + 1)))
23481
            {
23482
              /* If the option is deprecated, tell the user.  */
23483
              if (warn_on_deprecated && fopt->deprecated != NULL)
23484
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23485
                           arg ? arg : "", _(fopt->deprecated));
23486
 
23487
              if (fopt->var != NULL)
23488
                *fopt->var = &fopt->value;
23489
 
23490
              return 1;
23491
            }
23492
        }
23493
 
23494
      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23495
        {
23496
          /* These options are expected to have an argument.  */
23497
          if (c == lopt->option[0]
23498
              && arg != NULL
23499
              && strncmp (arg, lopt->option + 1,
23500
                          strlen (lopt->option + 1)) == 0)
23501
            {
23502
              /* If the option is deprecated, tell the user.  */
23503
              if (warn_on_deprecated && lopt->deprecated != NULL)
23504
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
23505
                           _(lopt->deprecated));
23506
 
23507
              /* Call the sup-option parser.  */
23508
              return lopt->func (arg + strlen (lopt->option) - 1);
23509
            }
23510
        }
23511
 
23512
      return 0;
23513
    }
23514
 
23515
  return 1;
23516
}
23517
 
23518
void
23519
md_show_usage (FILE * fp)
23520
{
23521
  struct arm_option_table *opt;
23522
  struct arm_long_option_table *lopt;
23523
 
23524
  fprintf (fp, _(" ARM-specific assembler options:\n"));
23525
 
23526
  for (opt = arm_opts; opt->option != NULL; opt++)
23527
    if (opt->help != NULL)
23528
      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
23529
 
23530
  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23531
    if (lopt->help != NULL)
23532
      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
23533
 
23534
#ifdef OPTION_EB
23535
  fprintf (fp, _("\
23536
  -EB                     assemble code for a big-endian cpu\n"));
23537
#endif
23538
 
23539
#ifdef OPTION_EL
23540
  fprintf (fp, _("\
23541
  -EL                     assemble code for a little-endian cpu\n"));
23542
#endif
23543
 
23544
  fprintf (fp, _("\
23545
  --fix-v4bx              Allow BX in ARMv4 code\n"));
23546
}
23547
 
23548
 
23549
#ifdef OBJ_ELF
23550
typedef struct
23551
{
23552
  int val;
23553
  arm_feature_set flags;
23554
} cpu_arch_ver_table;
23555
 
23556
/* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
23557
   least features first.  */
23558
static const cpu_arch_ver_table cpu_arch_ver[] =
23559
{
23560
    {1, ARM_ARCH_V4},
23561
    {2, ARM_ARCH_V4T},
23562
    {3, ARM_ARCH_V5},
23563
    {3, ARM_ARCH_V5T},
23564
    {4, ARM_ARCH_V5TE},
23565
    {5, ARM_ARCH_V5TEJ},
23566
    {6, ARM_ARCH_V6},
23567
    {9, ARM_ARCH_V6K},
23568
    {7, ARM_ARCH_V6Z},
23569
    {11, ARM_ARCH_V6M},
23570
    {12, ARM_ARCH_V6SM},
23571
    {8, ARM_ARCH_V6T2},
23572
    {10, ARM_ARCH_V7A},
23573
    {10, ARM_ARCH_V7R},
23574
    {10, ARM_ARCH_V7M},
23575
    {0, ARM_ARCH_NONE}
23576
};
23577
 
23578
/* Set an attribute if it has not already been set by the user.  */
23579
static void
23580
aeabi_set_attribute_int (int tag, int value)
23581
{
23582
  if (tag < 1
23583
      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23584
      || !attributes_set_explicitly[tag])
23585
    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
23586
}
23587
 
23588
static void
23589
aeabi_set_attribute_string (int tag, const char *value)
23590
{
23591
  if (tag < 1
23592
      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23593
      || !attributes_set_explicitly[tag])
23594
    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
23595
}
23596
 
23597
/* Set the public EABI object attributes.  */
23598
static void
23599
aeabi_set_public_attributes (void)
23600
{
23601
  int arch;
23602
  int virt_sec = 0;
23603
  arm_feature_set flags;
23604
  arm_feature_set tmp;
23605
  const cpu_arch_ver_table *p;
23606
 
23607
  /* Choose the architecture based on the capabilities of the requested cpu
23608
     (if any) and/or the instructions actually used.  */
23609
  ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
23610
  ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
23611
  ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
23612
  /*Allow the user to override the reported architecture.  */
23613
  if (object_arch)
23614
    {
23615
      ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
23616
      ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
23617
    }
23618
 
23619
  /* We need to make sure that the attributes do not identify us as v6S-M
23620
     when the only v6S-M feature in use is the Operating System Extensions.  */
23621
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
23622
      if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
23623
        ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
23624
 
23625
  tmp = flags;
23626
  arch = 0;
23627
  for (p = cpu_arch_ver; p->val; p++)
23628
    {
23629
      if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
23630
        {
23631
          arch = p->val;
23632
          ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
23633
        }
23634
    }
23635
 
23636
  /* The table lookup above finds the last architecture to contribute
23637
     a new feature.  Unfortunately, Tag13 is a subset of the union of
23638
     v6T2 and v7-M, so it is never seen as contributing a new feature.
23639
     We can not search for the last entry which is entirely used,
23640
     because if no CPU is specified we build up only those flags
23641
     actually used.  Perhaps we should separate out the specified
23642
     and implicit cases.  Avoid taking this path for -march=all by
23643
     checking for contradictory v7-A / v7-M features.  */
23644
  if (arch == 10
23645
      && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
23646
      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
23647
      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
23648
    arch = 13;
23649
 
23650
  /* Tag_CPU_name.  */
23651
  if (selected_cpu_name[0])
23652
    {
23653
      char *q;
23654
 
23655
      q = selected_cpu_name;
23656
      if (strncmp (q, "armv", 4) == 0)
23657
        {
23658
          int i;
23659
 
23660
          q += 4;
23661
          for (i = 0; q[i]; i++)
23662
            q[i] = TOUPPER (q[i]);
23663
        }
23664
      aeabi_set_attribute_string (Tag_CPU_name, q);
23665
    }
23666
 
23667
  /* Tag_CPU_arch.  */
23668
  aeabi_set_attribute_int (Tag_CPU_arch, arch);
23669
 
23670
  /* Tag_CPU_arch_profile.  */
23671
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
23672
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
23673
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
23674
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
23675
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
23676
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
23677
 
23678
  /* Tag_ARM_ISA_use.  */
23679
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
23680
      || arch == 0)
23681
    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
23682
 
23683
  /* Tag_THUMB_ISA_use.  */
23684
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
23685
      || arch == 0)
23686
    aeabi_set_attribute_int (Tag_THUMB_ISA_use,
23687
        ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
23688
 
23689
  /* Tag_VFP_arch.  */
23690
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
23691
    aeabi_set_attribute_int (Tag_VFP_arch,
23692
                             ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
23693
                             ? 5 : 6);
23694
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
23695
    aeabi_set_attribute_int (Tag_VFP_arch, 3);
23696
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
23697
    aeabi_set_attribute_int (Tag_VFP_arch, 4);
23698
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
23699
    aeabi_set_attribute_int (Tag_VFP_arch, 2);
23700
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
23701
           || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
23702
    aeabi_set_attribute_int (Tag_VFP_arch, 1);
23703
 
23704
  /* Tag_ABI_HardFP_use.  */
23705
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
23706
      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
23707
    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
23708
 
23709
  /* Tag_WMMX_arch.  */
23710
  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
23711
    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
23712
  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
23713
    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
23714
 
23715
  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
23716
  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
23717
    aeabi_set_attribute_int
23718
      (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
23719
                                ? 2 : 1));
23720
 
23721
  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
23722
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
23723
    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
23724
 
23725
  /* Tag_DIV_use.  */
23726
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv))
23727
    aeabi_set_attribute_int (Tag_DIV_use, 2);
23728
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_div))
23729
    aeabi_set_attribute_int (Tag_DIV_use, 0);
23730
  else
23731
    aeabi_set_attribute_int (Tag_DIV_use, 1);
23732
 
23733
  /* Tag_MP_extension_use.  */
23734
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
23735
    aeabi_set_attribute_int (Tag_MPextension_use, 1);
23736
 
23737
  /* Tag Virtualization_use.  */
23738
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
23739
    virt_sec |= 1;
23740
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
23741
    virt_sec |= 2;
23742
  if (virt_sec != 0)
23743
    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
23744
}
23745
 
23746
/* Add the default contents for the .ARM.attributes section.  */
23747
void
23748
arm_md_end (void)
23749
{
23750
  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23751
    return;
23752
 
23753
  aeabi_set_public_attributes ();
23754
}
23755
#endif /* OBJ_ELF */
23756
 
23757
 
23758
/* Parse a .cpu directive.  */
23759
 
23760
static void
23761
s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
23762
{
23763
  const struct arm_cpu_option_table *opt;
23764
  char *name;
23765
  char saved_char;
23766
 
23767
  name = input_line_pointer;
23768
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23769
    input_line_pointer++;
23770
  saved_char = *input_line_pointer;
23771
  *input_line_pointer = 0;
23772
 
23773
  /* Skip the first "all" entry.  */
23774
  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
23775
    if (streq (opt->name, name))
23776
      {
23777
        mcpu_cpu_opt = &opt->value;
23778
        selected_cpu = opt->value;
23779
        if (opt->canonical_name)
23780
          strcpy (selected_cpu_name, opt->canonical_name);
23781
        else
23782
          {
23783
            int i;
23784
            for (i = 0; opt->name[i]; i++)
23785
              selected_cpu_name[i] = TOUPPER (opt->name[i]);
23786
            selected_cpu_name[i] = 0;
23787
          }
23788
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23789
        *input_line_pointer = saved_char;
23790
        demand_empty_rest_of_line ();
23791
        return;
23792
      }
23793
  as_bad (_("unknown cpu `%s'"), name);
23794
  *input_line_pointer = saved_char;
23795
  ignore_rest_of_line ();
23796
}
23797
 
23798
 
23799
/* Parse a .arch directive.  */
23800
 
23801
static void
23802
s_arm_arch (int ignored ATTRIBUTE_UNUSED)
23803
{
23804
  const struct arm_arch_option_table *opt;
23805
  char saved_char;
23806
  char *name;
23807
 
23808
  name = input_line_pointer;
23809
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23810
    input_line_pointer++;
23811
  saved_char = *input_line_pointer;
23812
  *input_line_pointer = 0;
23813
 
23814
  /* Skip the first "all" entry.  */
23815
  for (opt = arm_archs + 1; opt->name != NULL; opt++)
23816
    if (streq (opt->name, name))
23817
      {
23818
        mcpu_cpu_opt = &opt->value;
23819
        selected_cpu = opt->value;
23820
        strcpy (selected_cpu_name, opt->name);
23821
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23822
        *input_line_pointer = saved_char;
23823
        demand_empty_rest_of_line ();
23824
        return;
23825
      }
23826
 
23827
  as_bad (_("unknown architecture `%s'\n"), name);
23828
  *input_line_pointer = saved_char;
23829
  ignore_rest_of_line ();
23830
}
23831
 
23832
 
23833
/* Parse a .object_arch directive.  */
23834
 
23835
static void
23836
s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
23837
{
23838
  const struct arm_arch_option_table *opt;
23839
  char saved_char;
23840
  char *name;
23841
 
23842
  name = input_line_pointer;
23843
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23844
    input_line_pointer++;
23845
  saved_char = *input_line_pointer;
23846
  *input_line_pointer = 0;
23847
 
23848
  /* Skip the first "all" entry.  */
23849
  for (opt = arm_archs + 1; opt->name != NULL; opt++)
23850
    if (streq (opt->name, name))
23851
      {
23852
        object_arch = &opt->value;
23853
        *input_line_pointer = saved_char;
23854
        demand_empty_rest_of_line ();
23855
        return;
23856
      }
23857
 
23858
  as_bad (_("unknown architecture `%s'\n"), name);
23859
  *input_line_pointer = saved_char;
23860
  ignore_rest_of_line ();
23861
}
23862
 
23863
/* Parse a .arch_extension directive.  */
23864
 
23865
static void
23866
s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
23867
{
23868
  const struct arm_option_extension_value_table *opt;
23869
  char saved_char;
23870
  char *name;
23871
  int adding_value = 1;
23872
 
23873
  name = input_line_pointer;
23874
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23875
    input_line_pointer++;
23876
  saved_char = *input_line_pointer;
23877
  *input_line_pointer = 0;
23878
 
23879
  if (strlen (name) >= 2
23880
      && strncmp (name, "no", 2) == 0)
23881
    {
23882
      adding_value = 0;
23883
      name += 2;
23884
    }
23885
 
23886
  for (opt = arm_extensions; opt->name != NULL; opt++)
23887
    if (streq (opt->name, name))
23888
      {
23889
        if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
23890
          {
23891
            as_bad (_("architectural extension `%s' is not allowed for the "
23892
                      "current base architecture"), name);
23893
            break;
23894
          }
23895
 
23896
        if (adding_value)
23897
          ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
23898
        else
23899
          ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
23900
 
23901
        mcpu_cpu_opt = &selected_cpu;
23902
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23903
        *input_line_pointer = saved_char;
23904
        demand_empty_rest_of_line ();
23905
        return;
23906
      }
23907
 
23908
  if (opt->name == NULL)
23909
    as_bad (_("unknown architecture `%s'\n"), name);
23910
 
23911
  *input_line_pointer = saved_char;
23912
  ignore_rest_of_line ();
23913
}
23914
 
23915
/* Parse a .fpu directive.  */
23916
 
23917
static void
23918
s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
23919
{
23920
  const struct arm_option_fpu_value_table *opt;
23921
  char saved_char;
23922
  char *name;
23923
 
23924
  name = input_line_pointer;
23925
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23926
    input_line_pointer++;
23927
  saved_char = *input_line_pointer;
23928
  *input_line_pointer = 0;
23929
 
23930
  for (opt = arm_fpus; opt->name != NULL; opt++)
23931
    if (streq (opt->name, name))
23932
      {
23933
        mfpu_opt = &opt->value;
23934
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23935
        *input_line_pointer = saved_char;
23936
        demand_empty_rest_of_line ();
23937
        return;
23938
      }
23939
 
23940
  as_bad (_("unknown floating point format `%s'\n"), name);
23941
  *input_line_pointer = saved_char;
23942
  ignore_rest_of_line ();
23943
}
23944
 
23945
/* Copy symbol information.  */
23946
 
23947
void
23948
arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
23949
{
23950
  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
23951
}
23952
 
23953
#ifdef OBJ_ELF
23954
/* Given a symbolic attribute NAME, return the proper integer value.
23955
   Returns -1 if the attribute is not known.  */
23956
 
23957
int
23958
arm_convert_symbolic_attribute (const char *name)
23959
{
23960
  static const struct
23961
  {
23962
    const char * name;
23963
    const int    tag;
23964
  }
23965
  attribute_table[] =
23966
    {
23967
      /* When you modify this table you should
23968
         also modify the list in doc/c-arm.texi.  */
23969
#define T(tag) {#tag, tag}
23970
      T (Tag_CPU_raw_name),
23971
      T (Tag_CPU_name),
23972
      T (Tag_CPU_arch),
23973
      T (Tag_CPU_arch_profile),
23974
      T (Tag_ARM_ISA_use),
23975
      T (Tag_THUMB_ISA_use),
23976
      T (Tag_FP_arch),
23977
      T (Tag_VFP_arch),
23978
      T (Tag_WMMX_arch),
23979
      T (Tag_Advanced_SIMD_arch),
23980
      T (Tag_PCS_config),
23981
      T (Tag_ABI_PCS_R9_use),
23982
      T (Tag_ABI_PCS_RW_data),
23983
      T (Tag_ABI_PCS_RO_data),
23984
      T (Tag_ABI_PCS_GOT_use),
23985
      T (Tag_ABI_PCS_wchar_t),
23986
      T (Tag_ABI_FP_rounding),
23987
      T (Tag_ABI_FP_denormal),
23988
      T (Tag_ABI_FP_exceptions),
23989
      T (Tag_ABI_FP_user_exceptions),
23990
      T (Tag_ABI_FP_number_model),
23991
      T (Tag_ABI_align_needed),
23992
      T (Tag_ABI_align8_needed),
23993
      T (Tag_ABI_align_preserved),
23994
      T (Tag_ABI_align8_preserved),
23995
      T (Tag_ABI_enum_size),
23996
      T (Tag_ABI_HardFP_use),
23997
      T (Tag_ABI_VFP_args),
23998
      T (Tag_ABI_WMMX_args),
23999
      T (Tag_ABI_optimization_goals),
24000
      T (Tag_ABI_FP_optimization_goals),
24001
      T (Tag_compatibility),
24002
      T (Tag_CPU_unaligned_access),
24003
      T (Tag_FP_HP_extension),
24004
      T (Tag_VFP_HP_extension),
24005
      T (Tag_ABI_FP_16bit_format),
24006
      T (Tag_MPextension_use),
24007
      T (Tag_DIV_use),
24008
      T (Tag_nodefaults),
24009
      T (Tag_also_compatible_with),
24010
      T (Tag_conformance),
24011
      T (Tag_T2EE_use),
24012
      T (Tag_Virtualization_use),
24013
      /* We deliberately do not include Tag_MPextension_use_legacy.  */
24014
#undef T
24015
    };
24016
  unsigned int i;
24017
 
24018
  if (name == NULL)
24019
    return -1;
24020
 
24021
  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
24022
    if (streq (name, attribute_table[i].name))
24023
      return attribute_table[i].tag;
24024
 
24025
  return -1;
24026
}
24027
 
24028
 
24029
/* Apply sym value for relocations only in the case that
24030
   they are for local symbols and you have the respective
24031
   architectural feature for blx and simple switches.  */
24032
int
24033
arm_apply_sym_value (struct fix * fixP)
24034
{
24035
  if (fixP->fx_addsy
24036
      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24037
      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
24038
    {
24039
      switch (fixP->fx_r_type)
24040
        {
24041
        case BFD_RELOC_ARM_PCREL_BLX:
24042
        case BFD_RELOC_THUMB_PCREL_BRANCH23:
24043
          if (ARM_IS_FUNC (fixP->fx_addsy))
24044
            return 1;
24045
          break;
24046
 
24047
        case BFD_RELOC_ARM_PCREL_CALL:
24048
        case BFD_RELOC_THUMB_PCREL_BLX:
24049
          if (THUMB_IS_FUNC (fixP->fx_addsy))
24050
              return 1;
24051
          break;
24052
 
24053
        default:
24054
          break;
24055
        }
24056
 
24057
    }
24058
  return 0;
24059
}
24060
#endif /* OBJ_ELF */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.