OpenCores
URL https://opencores.org/ocsvn/open8_urisc/open8_urisc/trunk

Subversion Repositories open8_urisc

[/] [open8_urisc/] [trunk/] [gnu/] [binutils/] [gas/] [config/] [tc-arm.c] - Blame information for rev 163

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 16 khays
/* tc-arm.c -- Assemble for the ARM
2
   Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3
   2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4
   Free Software Foundation, Inc.
5
   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6
        Modified by David Taylor (dtaylor@armltd.co.uk)
7
        Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8
        Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9
        Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
 
11
   This file is part of GAS, the GNU Assembler.
12
 
13
   GAS is free software; you can redistribute it and/or modify
14
   it under the terms of the GNU General Public License as published by
15
   the Free Software Foundation; either version 3, or (at your option)
16
   any later version.
17
 
18
   GAS is distributed in the hope that it will be useful,
19
   but WITHOUT ANY WARRANTY; without even the implied warranty of
20
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
   GNU General Public License for more details.
22
 
23
   You should have received a copy of the GNU General Public License
24
   along with GAS; see the file COPYING.  If not, write to the Free
25
   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26
   02110-1301, USA.  */
27
 
28
#include "as.h"
29
#include <limits.h>
30
#include <stdarg.h>
31
#define  NO_RELOC 0
32
#include "safe-ctype.h"
33
#include "subsegs.h"
34
#include "obstack.h"
35 163 khays
#include "libiberty.h"
36 16 khays
#include "opcode/arm.h"
37
 
38
#ifdef OBJ_ELF
39
#include "elf/arm.h"
40
#include "dw2gencfi.h"
41
#endif
42
 
43
#include "dwarf2dbg.h"
44
 
45
#ifdef OBJ_ELF
46
/* Must be at least the size of the largest unwind opcode (currently two).  */
47
#define ARM_OPCODE_CHUNK_SIZE 8
48
 
49
/* This structure holds the unwinding state.  */
50
 
51
static struct
52
{
53
  symbolS *       proc_start;
54
  symbolS *       table_entry;
55
  symbolS *       personality_routine;
56
  int             personality_index;
57
  /* The segment containing the function.  */
58
  segT            saved_seg;
59
  subsegT         saved_subseg;
60
  /* Opcodes generated from this function.  */
61
  unsigned char * opcodes;
62
  int             opcode_count;
63
  int             opcode_alloc;
64
  /* The number of bytes pushed to the stack.  */
65
  offsetT         frame_size;
66
  /* We don't add stack adjustment opcodes immediately so that we can merge
67
     multiple adjustments.  We can also omit the final adjustment
68
     when using a frame pointer.  */
69
  offsetT         pending_offset;
70
  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
71
     hold the reg+offset to use when restoring sp from a frame pointer.  */
72
  offsetT         fp_offset;
73
  int             fp_reg;
74
  /* Nonzero if an unwind_setfp directive has been seen.  */
75
  unsigned        fp_used:1;
76
  /* Nonzero if the last opcode restores sp from fp_reg.  */
77
  unsigned        sp_restored:1;
78
} unwind;
79
 
80
#endif /* OBJ_ELF */
81
 
82
/* Results from operand parsing worker functions.  */
83
 
84
typedef enum
85
{
86
  PARSE_OPERAND_SUCCESS,
87
  PARSE_OPERAND_FAIL,
88
  PARSE_OPERAND_FAIL_NO_BACKTRACK
89
} parse_operand_result;
90
 
91
enum arm_float_abi
92
{
93
  ARM_FLOAT_ABI_HARD,
94
  ARM_FLOAT_ABI_SOFTFP,
95
  ARM_FLOAT_ABI_SOFT
96
};
97
 
98
/* Types of processor to assemble for.  */
99
#ifndef CPU_DEFAULT
100
/* The code that was here used to select a default CPU depending on compiler
101
   pre-defines which were only present when doing native builds, thus
102
   changing gas' default behaviour depending upon the build host.
103
 
104
   If you have a target that requires a default CPU option then the you
105
   should define CPU_DEFAULT here.  */
106
#endif
107
 
108
#ifndef FPU_DEFAULT
109
# ifdef TE_LINUX
110
#  define FPU_DEFAULT FPU_ARCH_FPA
111
# elif defined (TE_NetBSD)
112
#  ifdef OBJ_ELF
113
#   define FPU_DEFAULT FPU_ARCH_VFP     /* Soft-float, but VFP order.  */
114
#  else
115
    /* Legacy a.out format.  */
116
#   define FPU_DEFAULT FPU_ARCH_FPA     /* Soft-float, but FPA order.  */
117
#  endif
118
# elif defined (TE_VXWORKS)
119
#  define FPU_DEFAULT FPU_ARCH_VFP      /* Soft-float, VFP order.  */
120
# else
121
   /* For backwards compatibility, default to FPA.  */
122
#  define FPU_DEFAULT FPU_ARCH_FPA
123
# endif
124
#endif /* ifndef FPU_DEFAULT */
125
 
126
#define streq(a, b)           (strcmp (a, b) == 0)
127
 
128
static arm_feature_set cpu_variant;
129
static arm_feature_set arm_arch_used;
130
static arm_feature_set thumb_arch_used;
131
 
132
/* Flags stored in private area of BFD structure.  */
133
static int uses_apcs_26      = FALSE;
134
static int atpcs             = FALSE;
135
static int support_interwork = FALSE;
136
static int uses_apcs_float   = FALSE;
137
static int pic_code          = FALSE;
138
static int fix_v4bx          = FALSE;
139
/* Warn on using deprecated features.  */
140
static int warn_on_deprecated = TRUE;
141
 
142
 
143
/* Variables that we set while parsing command-line options.  Once all
144
   options have been read we re-process these values to set the real
145
   assembly flags.  */
146
static const arm_feature_set *legacy_cpu = NULL;
147
static const arm_feature_set *legacy_fpu = NULL;
148
 
149
static const arm_feature_set *mcpu_cpu_opt = NULL;
150
static const arm_feature_set *mcpu_fpu_opt = NULL;
151
static const arm_feature_set *march_cpu_opt = NULL;
152
static const arm_feature_set *march_fpu_opt = NULL;
153
static const arm_feature_set *mfpu_opt = NULL;
154
static const arm_feature_set *object_arch = NULL;
155
 
156
/* Constants for known architecture features.  */
157
static const arm_feature_set fpu_default = FPU_DEFAULT;
158
static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159
static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160
static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161
static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162
static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163
static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164
static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165
static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
 
167
#ifdef CPU_DEFAULT
168
static const arm_feature_set cpu_default = CPU_DEFAULT;
169
#endif
170
 
171
static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172
static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173
static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174
static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175
static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176
static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177
static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178
static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179
static const arm_feature_set arm_ext_v4t_5 =
180
  ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181
static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182
static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183
static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184
static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185
static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186
static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187
static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188
static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189
static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190
static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191
static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192
static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193
static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194
static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195
static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196
static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197
static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198
static const arm_feature_set arm_ext_m =
199
  ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
200
static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
201
static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
202
static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
203
static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
204
static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
205
 
206
static const arm_feature_set arm_arch_any = ARM_ANY;
207
static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
208
static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
209
static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
210
static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
211
 
212
static const arm_feature_set arm_cext_iwmmxt2 =
213
  ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
214
static const arm_feature_set arm_cext_iwmmxt =
215
  ARM_FEATURE (0, ARM_CEXT_IWMMXT);
216
static const arm_feature_set arm_cext_xscale =
217
  ARM_FEATURE (0, ARM_CEXT_XSCALE);
218
static const arm_feature_set arm_cext_maverick =
219
  ARM_FEATURE (0, ARM_CEXT_MAVERICK);
220
static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
221
static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
222
static const arm_feature_set fpu_vfp_ext_v1xd =
223
  ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
224
static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
225
static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
226
static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
227
static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
228
static const arm_feature_set fpu_vfp_ext_d32 =
229
  ARM_FEATURE (0, FPU_VFP_EXT_D32);
230
static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
231
static const arm_feature_set fpu_vfp_v3_or_neon_ext =
232
  ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
233
static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
234
static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
235
static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
236
 
237
static int mfloat_abi_opt = -1;
238
/* Record user cpu selection for object attributes.  */
239
static arm_feature_set selected_cpu = ARM_ARCH_NONE;
240
/* Must be long enough to hold any of the names in arm_cpus.  */
241
static char selected_cpu_name[16];
242
 
243
/* Return if no cpu was selected on command-line.  */
244
static bfd_boolean
245
no_cpu_selected (void)
246
{
247
  return selected_cpu.core == arm_arch_none.core
248
    && selected_cpu.coproc == arm_arch_none.coproc;
249
}
250
 
251
#ifdef OBJ_ELF
252
# ifdef EABI_DEFAULT
253
static int meabi_flags = EABI_DEFAULT;
254
# else
255
static int meabi_flags = EF_ARM_EABI_UNKNOWN;
256
# endif
257
 
258
static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
259
 
260
bfd_boolean
261
arm_is_eabi (void)
262
{
263
  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
264
}
265
#endif
266
 
267
#ifdef OBJ_ELF
268
/* Pre-defined "_GLOBAL_OFFSET_TABLE_"  */
269
symbolS * GOT_symbol;
270
#endif
271
 
272
/* 0: assemble for ARM,
273
   1: assemble for Thumb,
274
   2: assemble for Thumb even though target CPU does not support thumb
275
      instructions.  */
276
static int thumb_mode = 0;
277
/* A value distinct from the possible values for thumb_mode that we
278
   can use to record whether thumb_mode has been copied into the
279
   tc_frag_data field of a frag.  */
280
#define MODE_RECORDED (1 << 4)
281
 
282
/* Specifies the intrinsic IT insn behavior mode.  */
283
enum implicit_it_mode
284
{
285
  IMPLICIT_IT_MODE_NEVER  = 0x00,
286
  IMPLICIT_IT_MODE_ARM    = 0x01,
287
  IMPLICIT_IT_MODE_THUMB  = 0x02,
288
  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
289
};
290
static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
291
 
292
/* If unified_syntax is true, we are processing the new unified
293
   ARM/Thumb syntax.  Important differences from the old ARM mode:
294
 
295
     - Immediate operands do not require a # prefix.
296
     - Conditional affixes always appear at the end of the
297
       instruction.  (For backward compatibility, those instructions
298
       that formerly had them in the middle, continue to accept them
299
       there.)
300
     - The IT instruction may appear, and if it does is validated
301
       against subsequent conditional affixes.  It does not generate
302
       machine code.
303
 
304
   Important differences from the old Thumb mode:
305
 
306
     - Immediate operands do not require a # prefix.
307
     - Most of the V6T2 instructions are only available in unified mode.
308
     - The .N and .W suffixes are recognized and honored (it is an error
309
       if they cannot be honored).
310
     - All instructions set the flags if and only if they have an 's' affix.
311
     - Conditional affixes may be used.  They are validated against
312
       preceding IT instructions.  Unlike ARM mode, you cannot use a
313
       conditional affix except in the scope of an IT instruction.  */
314
 
315
static bfd_boolean unified_syntax = FALSE;
316
 
317
enum neon_el_type
318
{
319
  NT_invtype,
320
  NT_untyped,
321
  NT_integer,
322
  NT_float,
323
  NT_poly,
324
  NT_signed,
325
  NT_unsigned
326
};
327
 
328
struct neon_type_el
329
{
330
  enum neon_el_type type;
331
  unsigned size;
332
};
333
 
334
#define NEON_MAX_TYPE_ELS 4
335
 
336
struct neon_type
337
{
338
  struct neon_type_el el[NEON_MAX_TYPE_ELS];
339
  unsigned elems;
340
};
341
 
342
enum it_instruction_type
343
{
344
   OUTSIDE_IT_INSN,
345
   INSIDE_IT_INSN,
346
   INSIDE_IT_LAST_INSN,
347
   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
348
                              if inside, should be the last one.  */
349
   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
350
                              i.e. BKPT and NOP.  */
351
   IT_INSN                 /* The IT insn has been parsed.  */
352
};
353
 
354
struct arm_it
355
{
356
  const char *  error;
357
  unsigned long instruction;
358
  int           size;
359
  int           size_req;
360
  int           cond;
361
  /* "uncond_value" is set to the value in place of the conditional field in
362
     unconditional versions of the instruction, or -1 if nothing is
363
     appropriate.  */
364
  int           uncond_value;
365
  struct neon_type vectype;
366
  /* This does not indicate an actual NEON instruction, only that
367
     the mnemonic accepts neon-style type suffixes.  */
368
  int           is_neon;
369
  /* Set to the opcode if the instruction needs relaxation.
370
     Zero if the instruction is not relaxed.  */
371
  unsigned long relax;
372
  struct
373
  {
374
    bfd_reloc_code_real_type type;
375
    expressionS              exp;
376
    int                      pc_rel;
377
  } reloc;
378
 
379
  enum it_instruction_type it_insn_type;
380
 
381
  struct
382
  {
383
    unsigned reg;
384
    signed int imm;
385
    struct neon_type_el vectype;
386
    unsigned present    : 1;  /* Operand present.  */
387
    unsigned isreg      : 1;  /* Operand was a register.  */
388
    unsigned immisreg   : 1;  /* .imm field is a second register.  */
389
    unsigned isscalar   : 1;  /* Operand is a (Neon) scalar.  */
390
    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
391
    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
392
    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
393
       instructions. This allows us to disambiguate ARM <-> vector insns.  */
394
    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
395
    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
396
    unsigned isquad     : 1;  /* Operand is Neon quad-precision register.  */
397
    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
398
    unsigned hasreloc   : 1;  /* Operand has relocation suffix.  */
399
    unsigned writeback  : 1;  /* Operand has trailing !  */
400
    unsigned preind     : 1;  /* Preindexed address.  */
401
    unsigned postind    : 1;  /* Postindexed address.  */
402
    unsigned negative   : 1;  /* Index register was negated.  */
403
    unsigned shifted    : 1;  /* Shift applied to operation.  */
404
    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
405
  } operands[6];
406
};
407
 
408
static struct arm_it inst;
409
 
410
#define NUM_FLOAT_VALS 8
411
 
412
const char * fp_const[] =
413
{
414
  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
415
};
416
 
417
/* Number of littlenums required to hold an extended precision number.  */
418
#define MAX_LITTLENUMS 6
419
 
420
LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
421
 
422
#define FAIL    (-1)
423
#define SUCCESS (0)
424
 
425
#define SUFF_S 1
426
#define SUFF_D 2
427
#define SUFF_E 3
428
#define SUFF_P 4
429
 
430
#define CP_T_X   0x00008000
431
#define CP_T_Y   0x00400000
432
 
433
#define CONDS_BIT        0x00100000
434
#define LOAD_BIT         0x00100000
435
 
436
#define DOUBLE_LOAD_FLAG 0x00000001
437
 
438
struct asm_cond
439
{
440
  const char *   template_name;
441
  unsigned long  value;
442
};
443
 
444
#define COND_ALWAYS 0xE
445
 
446
struct asm_psr
447
{
448
  const char *   template_name;
449
  unsigned long  field;
450
};
451
 
452
struct asm_barrier_opt
453
{
454
  const char *   template_name;
455
  unsigned long  value;
456
};
457
 
458
/* The bit that distinguishes CPSR and SPSR.  */
459
#define SPSR_BIT   (1 << 22)
460
 
461
/* The individual PSR flag bits.  */
462
#define PSR_c   (1 << 16)
463
#define PSR_x   (1 << 17)
464
#define PSR_s   (1 << 18)
465
#define PSR_f   (1 << 19)
466
 
467
struct reloc_entry
468
{
469
  char *                    name;
470
  bfd_reloc_code_real_type  reloc;
471
};
472
 
473
enum vfp_reg_pos
474
{
475
  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
476
  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
477
};
478
 
479
enum vfp_ldstm_type
480
{
481
  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
482
};
483
 
484
/* Bits for DEFINED field in neon_typed_alias.  */
485
#define NTA_HASTYPE  1
486
#define NTA_HASINDEX 2
487
 
488
struct neon_typed_alias
489
{
490
  unsigned char        defined;
491
  unsigned char        index;
492
  struct neon_type_el  eltype;
493
};
494
 
495
/* ARM register categories.  This includes coprocessor numbers and various
496
   architecture extensions' registers.  */
497
enum arm_reg_type
498
{
499
  REG_TYPE_RN,
500
  REG_TYPE_CP,
501
  REG_TYPE_CN,
502
  REG_TYPE_FN,
503
  REG_TYPE_VFS,
504
  REG_TYPE_VFD,
505
  REG_TYPE_NQ,
506
  REG_TYPE_VFSD,
507
  REG_TYPE_NDQ,
508
  REG_TYPE_NSDQ,
509
  REG_TYPE_VFC,
510
  REG_TYPE_MVF,
511
  REG_TYPE_MVD,
512
  REG_TYPE_MVFX,
513
  REG_TYPE_MVDX,
514
  REG_TYPE_MVAX,
515
  REG_TYPE_DSPSC,
516
  REG_TYPE_MMXWR,
517
  REG_TYPE_MMXWC,
518
  REG_TYPE_MMXWCG,
519
  REG_TYPE_XSCALE,
520
  REG_TYPE_RNB
521
};
522
 
523
/* Structure for a hash table entry for a register.
524
   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
525
   information which states whether a vector type or index is specified (for a
526
   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
527
struct reg_entry
528
{
529
  const char *               name;
530
  unsigned int               number;
531
  unsigned char              type;
532
  unsigned char              builtin;
533
  struct neon_typed_alias *  neon;
534
};
535
 
536
/* Diagnostics used when we don't get a register of the expected type.  */
537
const char * const reg_expected_msgs[] =
538
{
539
  N_("ARM register expected"),
540
  N_("bad or missing co-processor number"),
541
  N_("co-processor register expected"),
542
  N_("FPA register expected"),
543
  N_("VFP single precision register expected"),
544
  N_("VFP/Neon double precision register expected"),
545
  N_("Neon quad precision register expected"),
546
  N_("VFP single or double precision register expected"),
547
  N_("Neon double or quad precision register expected"),
548
  N_("VFP single, double or Neon quad precision register expected"),
549
  N_("VFP system register expected"),
550
  N_("Maverick MVF register expected"),
551
  N_("Maverick MVD register expected"),
552
  N_("Maverick MVFX register expected"),
553
  N_("Maverick MVDX register expected"),
554
  N_("Maverick MVAX register expected"),
555
  N_("Maverick DSPSC register expected"),
556
  N_("iWMMXt data register expected"),
557
  N_("iWMMXt control register expected"),
558
  N_("iWMMXt scalar register expected"),
559
  N_("XScale accumulator register expected"),
560
};
561
 
562
/* Some well known registers that we refer to directly elsewhere.  */
563
#define REG_SP  13
564
#define REG_LR  14
565
#define REG_PC  15
566
 
567
/* ARM instructions take 4bytes in the object file, Thumb instructions
568
   take 2:  */
569
#define INSN_SIZE       4
570
 
571
struct asm_opcode
572
{
573
  /* Basic string to match.  */
574
  const char * template_name;
575
 
576
  /* Parameters to instruction.  */
577
  unsigned int operands[8];
578
 
579
  /* Conditional tag - see opcode_lookup.  */
580
  unsigned int tag : 4;
581
 
582
  /* Basic instruction code.  */
583
  unsigned int avalue : 28;
584
 
585
  /* Thumb-format instruction code.  */
586
  unsigned int tvalue;
587
 
588
  /* Which architecture variant provides this instruction.  */
589
  const arm_feature_set * avariant;
590
  const arm_feature_set * tvariant;
591
 
592
  /* Function to call to encode instruction in ARM format.  */
593
  void (* aencode) (void);
594
 
595
  /* Function to call to encode instruction in Thumb format.  */
596
  void (* tencode) (void);
597
};
598
 
599
/* Defines for various bits that we will want to toggle.  */
600
#define INST_IMMEDIATE  0x02000000
601
#define OFFSET_REG      0x02000000
602
#define HWOFFSET_IMM    0x00400000
603
#define SHIFT_BY_REG    0x00000010
604
#define PRE_INDEX       0x01000000
605
#define INDEX_UP        0x00800000
606
#define WRITE_BACK      0x00200000
607
#define LDM_TYPE_2_OR_3 0x00400000
608
#define CPSI_MMOD       0x00020000
609
 
610
#define LITERAL_MASK    0xf000f000
611
#define OPCODE_MASK     0xfe1fffff
612
#define V4_STR_BIT      0x00000020
613
 
614
#define T2_SUBS_PC_LR   0xf3de8f00
615
 
616
#define DATA_OP_SHIFT   21
617
 
618
#define T2_OPCODE_MASK  0xfe1fffff
619
#define T2_DATA_OP_SHIFT 21
620
 
621
/* Codes to distinguish the arithmetic instructions.  */
622
#define OPCODE_AND      0
623
#define OPCODE_EOR      1
624
#define OPCODE_SUB      2
625
#define OPCODE_RSB      3
626
#define OPCODE_ADD      4
627
#define OPCODE_ADC      5
628
#define OPCODE_SBC      6
629
#define OPCODE_RSC      7
630
#define OPCODE_TST      8
631
#define OPCODE_TEQ      9
632
#define OPCODE_CMP      10
633
#define OPCODE_CMN      11
634
#define OPCODE_ORR      12
635
#define OPCODE_MOV      13
636
#define OPCODE_BIC      14
637
#define OPCODE_MVN      15
638
 
639
#define T2_OPCODE_AND   0
640
#define T2_OPCODE_BIC   1
641
#define T2_OPCODE_ORR   2
642
#define T2_OPCODE_ORN   3
643
#define T2_OPCODE_EOR   4
644
#define T2_OPCODE_ADD   8
645
#define T2_OPCODE_ADC   10
646
#define T2_OPCODE_SBC   11
647
#define T2_OPCODE_SUB   13
648
#define T2_OPCODE_RSB   14
649
 
650
#define T_OPCODE_MUL 0x4340
651
#define T_OPCODE_TST 0x4200
652
#define T_OPCODE_CMN 0x42c0
653
#define T_OPCODE_NEG 0x4240
654
#define T_OPCODE_MVN 0x43c0
655
 
656
#define T_OPCODE_ADD_R3 0x1800
657
#define T_OPCODE_SUB_R3 0x1a00
658
#define T_OPCODE_ADD_HI 0x4400
659
#define T_OPCODE_ADD_ST 0xb000
660
#define T_OPCODE_SUB_ST 0xb080
661
#define T_OPCODE_ADD_SP 0xa800
662
#define T_OPCODE_ADD_PC 0xa000
663
#define T_OPCODE_ADD_I8 0x3000
664
#define T_OPCODE_SUB_I8 0x3800
665
#define T_OPCODE_ADD_I3 0x1c00
666
#define T_OPCODE_SUB_I3 0x1e00
667
 
668
#define T_OPCODE_ASR_R  0x4100
669
#define T_OPCODE_LSL_R  0x4080
670
#define T_OPCODE_LSR_R  0x40c0
671
#define T_OPCODE_ROR_R  0x41c0
672
#define T_OPCODE_ASR_I  0x1000
673
#define T_OPCODE_LSL_I  0x0000
674
#define T_OPCODE_LSR_I  0x0800
675
 
676
#define T_OPCODE_MOV_I8 0x2000
677
#define T_OPCODE_CMP_I8 0x2800
678
#define T_OPCODE_CMP_LR 0x4280
679
#define T_OPCODE_MOV_HR 0x4600
680
#define T_OPCODE_CMP_HR 0x4500
681
 
682
#define T_OPCODE_LDR_PC 0x4800
683
#define T_OPCODE_LDR_SP 0x9800
684
#define T_OPCODE_STR_SP 0x9000
685
#define T_OPCODE_LDR_IW 0x6800
686
#define T_OPCODE_STR_IW 0x6000
687
#define T_OPCODE_LDR_IH 0x8800
688
#define T_OPCODE_STR_IH 0x8000
689
#define T_OPCODE_LDR_IB 0x7800
690
#define T_OPCODE_STR_IB 0x7000
691
#define T_OPCODE_LDR_RW 0x5800
692
#define T_OPCODE_STR_RW 0x5000
693
#define T_OPCODE_LDR_RH 0x5a00
694
#define T_OPCODE_STR_RH 0x5200
695
#define T_OPCODE_LDR_RB 0x5c00
696
#define T_OPCODE_STR_RB 0x5400
697
 
698
#define T_OPCODE_PUSH   0xb400
699
#define T_OPCODE_POP    0xbc00
700
 
701
#define T_OPCODE_BRANCH 0xe000
702
 
703
#define THUMB_SIZE      2       /* Size of thumb instruction.  */
704
#define THUMB_PP_PC_LR 0x0100
705
#define THUMB_LOAD_BIT 0x0800
706
#define THUMB2_LOAD_BIT 0x00100000
707
 
708
#define BAD_ARGS        _("bad arguments to instruction")
709
#define BAD_SP          _("r13 not allowed here")
710
#define BAD_PC          _("r15 not allowed here")
711
#define BAD_COND        _("instruction cannot be conditional")
712
#define BAD_OVERLAP     _("registers may not be the same")
713
#define BAD_HIREG       _("lo register required")
714
#define BAD_THUMB32     _("instruction not supported in Thumb16 mode")
715
#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode");
716
#define BAD_BRANCH      _("branch must be last instruction in IT block")
717
#define BAD_NOT_IT      _("instruction not allowed in IT block")
718
#define BAD_FPU         _("selected FPU does not support instruction")
719
#define BAD_OUT_IT      _("thumb conditional instruction should be in IT block")
720
#define BAD_IT_COND     _("incorrect condition in IT block")
721
#define BAD_IT_IT       _("IT falling in the range of a previous IT block")
722
#define MISSING_FNSTART _("missing .fnstart before unwinding directive")
723
#define BAD_PC_ADDRESSING \
724
        _("cannot use register index with PC-relative addressing")
725
#define BAD_PC_WRITEBACK \
726
        _("cannot use writeback with PC-relative addressing")
727 160 khays
#define BAD_RANGE     _("branch out of range")
728 16 khays
 
729
static struct hash_control * arm_ops_hsh;
730
static struct hash_control * arm_cond_hsh;
731
static struct hash_control * arm_shift_hsh;
732
static struct hash_control * arm_psr_hsh;
733
static struct hash_control * arm_v7m_psr_hsh;
734
static struct hash_control * arm_reg_hsh;
735
static struct hash_control * arm_reloc_hsh;
736
static struct hash_control * arm_barrier_opt_hsh;
737
 
738
/* Stuff needed to resolve the label ambiguity
739
   As:
740
     ...
741
     label:   <insn>
742
   may differ from:
743
     ...
744
     label:
745
              <insn>  */
746
 
747
symbolS *  last_label_seen;
748
static int label_is_thumb_function_name = FALSE;
749
 
750
/* Literal pool structure.  Held on a per-section
751
   and per-sub-section basis.  */
752
 
753
#define MAX_LITERAL_POOL_SIZE 1024
754
typedef struct literal_pool
755
{
756
  expressionS            literals [MAX_LITERAL_POOL_SIZE];
757
  unsigned int           next_free_entry;
758
  unsigned int           id;
759
  symbolS *              symbol;
760
  segT                   section;
761
  subsegT                sub_section;
762 160 khays
#ifdef OBJ_ELF
763
  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
764
#endif
765 16 khays
  struct literal_pool *  next;
766
} literal_pool;
767
 
768
/* Pointer to a linked list of literal pools.  */
769
literal_pool * list_of_pools = NULL;
770
 
771
#ifdef OBJ_ELF
772
#  define now_it seg_info (now_seg)->tc_segment_info_data.current_it
773
#else
774
static struct current_it now_it;
775
#endif
776
 
777
static inline int
778
now_it_compatible (int cond)
779
{
780
  return (cond & ~1) == (now_it.cc & ~1);
781
}
782
 
783
static inline int
784
conditional_insn (void)
785
{
786
  return inst.cond != COND_ALWAYS;
787
}
788
 
789
static int in_it_block (void);
790
 
791
static int handle_it_state (void);
792
 
793
static void force_automatic_it_block_close (void);
794
 
795
static void it_fsm_post_encode (void);
796
 
797
#define set_it_insn_type(type)                  \
798
  do                                            \
799
    {                                           \
800
      inst.it_insn_type = type;                 \
801
      if (handle_it_state () == FAIL)           \
802
        return;                                 \
803
    }                                           \
804
  while (0)
805
 
806
#define set_it_insn_type_nonvoid(type, failret) \
807
  do                                            \
808
    {                                           \
809
      inst.it_insn_type = type;                 \
810
      if (handle_it_state () == FAIL)           \
811
        return failret;                         \
812
    }                                           \
813
  while(0)
814
 
815
#define set_it_insn_type_last()                         \
816
  do                                                    \
817
    {                                                   \
818
      if (inst.cond == COND_ALWAYS)                     \
819
        set_it_insn_type (IF_INSIDE_IT_LAST_INSN);      \
820
      else                                              \
821
        set_it_insn_type (INSIDE_IT_LAST_INSN);         \
822
    }                                                   \
823
  while (0)
824
 
825
/* Pure syntax.  */
826
 
827
/* This array holds the chars that always start a comment.  If the
828
   pre-processor is disabled, these aren't very useful.  */
829
const char comment_chars[] = "@";
830
 
831
/* This array holds the chars that only start a comment at the beginning of
832
   a line.  If the line seems to have the form '# 123 filename'
833
   .line and .file directives will appear in the pre-processed output.  */
834
/* Note that input_file.c hand checks for '#' at the beginning of the
835
   first line of the input file.  This is because the compiler outputs
836
   #NO_APP at the beginning of its output.  */
837
/* Also note that comments like this one will always work.  */
838
const char line_comment_chars[] = "#";
839
 
840
const char line_separator_chars[] = ";";
841
 
842
/* Chars that can be used to separate mant
843
   from exp in floating point numbers.  */
844
const char EXP_CHARS[] = "eE";
845
 
846
/* Chars that mean this number is a floating point constant.  */
847
/* As in 0f12.456  */
848
/* or    0d1.2345e12  */
849
 
850
const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
851
 
852
/* Prefix characters that indicate the start of an immediate
853
   value.  */
854
#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
855
 
856
/* Separator character handling.  */
857
 
858
#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
859
 
860
static inline int
861
skip_past_char (char ** str, char c)
862
{
863
  if (**str == c)
864
    {
865
      (*str)++;
866
      return SUCCESS;
867
    }
868
  else
869
    return FAIL;
870
}
871
 
872
#define skip_past_comma(str) skip_past_char (str, ',')
873
 
874
/* Arithmetic expressions (possibly involving symbols).  */
875
 
876
/* Return TRUE if anything in the expression is a bignum.  */
877
 
878
static int
879
walk_no_bignums (symbolS * sp)
880
{
881
  if (symbol_get_value_expression (sp)->X_op == O_big)
882
    return 1;
883
 
884
  if (symbol_get_value_expression (sp)->X_add_symbol)
885
    {
886
      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
887
              || (symbol_get_value_expression (sp)->X_op_symbol
888
                  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
889
    }
890
 
891
  return 0;
892
}
893
 
894
static int in_my_get_expression = 0;
895
 
896
/* Third argument to my_get_expression.  */
897
#define GE_NO_PREFIX 0
898
#define GE_IMM_PREFIX 1
899
#define GE_OPT_PREFIX 2
900
/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
901
   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
902
#define GE_OPT_PREFIX_BIG 3
903
 
904
static int
905
my_get_expression (expressionS * ep, char ** str, int prefix_mode)
906
{
907
  char * save_in;
908
  segT   seg;
909
 
910
  /* In unified syntax, all prefixes are optional.  */
911
  if (unified_syntax)
912
    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
913
                  : GE_OPT_PREFIX;
914
 
915
  switch (prefix_mode)
916
    {
917
    case GE_NO_PREFIX: break;
918
    case GE_IMM_PREFIX:
919
      if (!is_immediate_prefix (**str))
920
        {
921
          inst.error = _("immediate expression requires a # prefix");
922
          return FAIL;
923
        }
924
      (*str)++;
925
      break;
926
    case GE_OPT_PREFIX:
927
    case GE_OPT_PREFIX_BIG:
928
      if (is_immediate_prefix (**str))
929
        (*str)++;
930
      break;
931
    default: abort ();
932
    }
933
 
934
  memset (ep, 0, sizeof (expressionS));
935
 
936
  save_in = input_line_pointer;
937
  input_line_pointer = *str;
938
  in_my_get_expression = 1;
939
  seg = expression (ep);
940
  in_my_get_expression = 0;
941
 
942
  if (ep->X_op == O_illegal || ep->X_op == O_absent)
943
    {
944
      /* We found a bad or missing expression in md_operand().  */
945
      *str = input_line_pointer;
946
      input_line_pointer = save_in;
947
      if (inst.error == NULL)
948
        inst.error = (ep->X_op == O_absent
949
                      ? _("missing expression") :_("bad expression"));
950
      return 1;
951
    }
952
 
953
#ifdef OBJ_AOUT
954
  if (seg != absolute_section
955
      && seg != text_section
956
      && seg != data_section
957
      && seg != bss_section
958
      && seg != undefined_section)
959
    {
960
      inst.error = _("bad segment");
961
      *str = input_line_pointer;
962
      input_line_pointer = save_in;
963
      return 1;
964
    }
965
#else
966
  (void) seg;
967
#endif
968
 
969
  /* Get rid of any bignums now, so that we don't generate an error for which
970
     we can't establish a line number later on.  Big numbers are never valid
971
     in instructions, which is where this routine is always called.  */
972
  if (prefix_mode != GE_OPT_PREFIX_BIG
973
      && (ep->X_op == O_big
974
          || (ep->X_add_symbol
975
              && (walk_no_bignums (ep->X_add_symbol)
976
                  || (ep->X_op_symbol
977
                      && walk_no_bignums (ep->X_op_symbol))))))
978
    {
979
      inst.error = _("invalid constant");
980
      *str = input_line_pointer;
981
      input_line_pointer = save_in;
982
      return 1;
983
    }
984
 
985
  *str = input_line_pointer;
986
  input_line_pointer = save_in;
987
  return 0;
988
}
989
 
990
/* Turn a string in input_line_pointer into a floating point constant
991
   of type TYPE, and store the appropriate bytes in *LITP.  The number
992
   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
993
   returned, or NULL on OK.
994
 
995
   Note that fp constants aren't represent in the normal way on the ARM.
996
   In big endian mode, things are as expected.  However, in little endian
997
   mode fp constants are big-endian word-wise, and little-endian byte-wise
998
   within the words.  For example, (double) 1.1 in big endian mode is
999
   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1000
   the byte sequence 99 99 f1 3f 9a 99 99 99.
1001
 
1002
   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1003
 
1004
char *
1005
md_atof (int type, char * litP, int * sizeP)
1006
{
1007
  int prec;
1008
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1009
  char *t;
1010
  int i;
1011
 
1012
  switch (type)
1013
    {
1014
    case 'f':
1015
    case 'F':
1016
    case 's':
1017
    case 'S':
1018
      prec = 2;
1019
      break;
1020
 
1021
    case 'd':
1022
    case 'D':
1023
    case 'r':
1024
    case 'R':
1025
      prec = 4;
1026
      break;
1027
 
1028
    case 'x':
1029
    case 'X':
1030
      prec = 5;
1031
      break;
1032
 
1033
    case 'p':
1034
    case 'P':
1035
      prec = 5;
1036
      break;
1037
 
1038
    default:
1039
      *sizeP = 0;
1040
      return _("Unrecognized or unsupported floating point constant");
1041
    }
1042
 
1043
  t = atof_ieee (input_line_pointer, type, words);
1044
  if (t)
1045
    input_line_pointer = t;
1046
  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1047
 
1048
  if (target_big_endian)
1049
    {
1050
      for (i = 0; i < prec; i++)
1051
        {
1052
          md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1053
          litP += sizeof (LITTLENUM_TYPE);
1054
        }
1055
    }
1056
  else
1057
    {
1058
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1059
        for (i = prec - 1; i >= 0; i--)
1060
          {
1061
            md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1062
            litP += sizeof (LITTLENUM_TYPE);
1063
          }
1064
      else
1065
        /* For a 4 byte float the order of elements in `words' is 1 0.
1066
           For an 8 byte float the order is 1 0 3 2.  */
1067
        for (i = 0; i < prec; i += 2)
1068
          {
1069
            md_number_to_chars (litP, (valueT) words[i + 1],
1070
                                sizeof (LITTLENUM_TYPE));
1071
            md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1072
                                (valueT) words[i], sizeof (LITTLENUM_TYPE));
1073
            litP += 2 * sizeof (LITTLENUM_TYPE);
1074
          }
1075
    }
1076
 
1077
  return NULL;
1078
}
1079
 
1080
/* We handle all bad expressions here, so that we can report the faulty
1081
   instruction in the error message.  */
1082
void
1083
md_operand (expressionS * exp)
1084
{
1085
  if (in_my_get_expression)
1086
    exp->X_op = O_illegal;
1087
}
1088
 
1089
/* Immediate values.  */
1090
 
1091
/* Generic immediate-value read function for use in directives.
1092
   Accepts anything that 'expression' can fold to a constant.
1093
   *val receives the number.  */
1094
#ifdef OBJ_ELF
1095
static int
1096
immediate_for_directive (int *val)
1097
{
1098
  expressionS exp;
1099
  exp.X_op = O_illegal;
1100
 
1101
  if (is_immediate_prefix (*input_line_pointer))
1102
    {
1103
      input_line_pointer++;
1104
      expression (&exp);
1105
    }
1106
 
1107
  if (exp.X_op != O_constant)
1108
    {
1109
      as_bad (_("expected #constant"));
1110
      ignore_rest_of_line ();
1111
      return FAIL;
1112
    }
1113
  *val = exp.X_add_number;
1114
  return SUCCESS;
1115
}
1116
#endif
1117
 
1118
/* Register parsing.  */
1119
 
1120
/* Generic register parser.  CCP points to what should be the
1121
   beginning of a register name.  If it is indeed a valid register
1122
   name, advance CCP over it and return the reg_entry structure;
1123
   otherwise return NULL.  Does not issue diagnostics.  */
1124
 
1125
static struct reg_entry *
1126
arm_reg_parse_multi (char **ccp)
1127
{
1128
  char *start = *ccp;
1129
  char *p;
1130
  struct reg_entry *reg;
1131
 
1132
#ifdef REGISTER_PREFIX
1133
  if (*start != REGISTER_PREFIX)
1134
    return NULL;
1135
  start++;
1136
#endif
1137
#ifdef OPTIONAL_REGISTER_PREFIX
1138
  if (*start == OPTIONAL_REGISTER_PREFIX)
1139
    start++;
1140
#endif
1141
 
1142
  p = start;
1143
  if (!ISALPHA (*p) || !is_name_beginner (*p))
1144
    return NULL;
1145
 
1146
  do
1147
    p++;
1148
  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1149
 
1150
  reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1151
 
1152
  if (!reg)
1153
    return NULL;
1154
 
1155
  *ccp = p;
1156
  return reg;
1157
}
1158
 
1159
static int
1160
arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1161
                    enum arm_reg_type type)
1162
{
1163
  /* Alternative syntaxes are accepted for a few register classes.  */
1164
  switch (type)
1165
    {
1166
    case REG_TYPE_MVF:
1167
    case REG_TYPE_MVD:
1168
    case REG_TYPE_MVFX:
1169
    case REG_TYPE_MVDX:
1170
      /* Generic coprocessor register names are allowed for these.  */
1171
      if (reg && reg->type == REG_TYPE_CN)
1172
        return reg->number;
1173
      break;
1174
 
1175
    case REG_TYPE_CP:
1176
      /* For backward compatibility, a bare number is valid here.  */
1177
      {
1178
        unsigned long processor = strtoul (start, ccp, 10);
1179
        if (*ccp != start && processor <= 15)
1180
          return processor;
1181
      }
1182
 
1183
    case REG_TYPE_MMXWC:
1184
      /* WC includes WCG.  ??? I'm not sure this is true for all
1185
         instructions that take WC registers.  */
1186
      if (reg && reg->type == REG_TYPE_MMXWCG)
1187
        return reg->number;
1188
      break;
1189
 
1190
    default:
1191
      break;
1192
    }
1193
 
1194
  return FAIL;
1195
}
1196
 
1197
/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1198
   return value is the register number or FAIL.  */
1199
 
1200
static int
1201
arm_reg_parse (char **ccp, enum arm_reg_type type)
1202
{
1203
  char *start = *ccp;
1204
  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1205
  int ret;
1206
 
1207
  /* Do not allow a scalar (reg+index) to parse as a register.  */
1208
  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1209
    return FAIL;
1210
 
1211
  if (reg && reg->type == type)
1212
    return reg->number;
1213
 
1214
  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1215
    return ret;
1216
 
1217
  *ccp = start;
1218
  return FAIL;
1219
}
1220
 
1221
/* Parse a Neon type specifier. *STR should point at the leading '.'
1222
   character. Does no verification at this stage that the type fits the opcode
1223
   properly. E.g.,
1224
 
1225
     .i32.i32.s16
1226
     .s32.f32
1227
     .u16
1228
 
1229
   Can all be legally parsed by this function.
1230
 
1231
   Fills in neon_type struct pointer with parsed information, and updates STR
1232
   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1233
   type, FAIL if not.  */
1234
 
1235
static int
1236
parse_neon_type (struct neon_type *type, char **str)
1237
{
1238
  char *ptr = *str;
1239
 
1240
  if (type)
1241
    type->elems = 0;
1242
 
1243
  while (type->elems < NEON_MAX_TYPE_ELS)
1244
    {
1245
      enum neon_el_type thistype = NT_untyped;
1246
      unsigned thissize = -1u;
1247
 
1248
      if (*ptr != '.')
1249
        break;
1250
 
1251
      ptr++;
1252
 
1253
      /* Just a size without an explicit type.  */
1254
      if (ISDIGIT (*ptr))
1255
        goto parsesize;
1256
 
1257
      switch (TOLOWER (*ptr))
1258
        {
1259
        case 'i': thistype = NT_integer; break;
1260
        case 'f': thistype = NT_float; break;
1261
        case 'p': thistype = NT_poly; break;
1262
        case 's': thistype = NT_signed; break;
1263
        case 'u': thistype = NT_unsigned; break;
1264
        case 'd':
1265
          thistype = NT_float;
1266
          thissize = 64;
1267
          ptr++;
1268
          goto done;
1269
        default:
1270
          as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1271
          return FAIL;
1272
        }
1273
 
1274
      ptr++;
1275
 
1276
      /* .f is an abbreviation for .f32.  */
1277
      if (thistype == NT_float && !ISDIGIT (*ptr))
1278
        thissize = 32;
1279
      else
1280
        {
1281
        parsesize:
1282
          thissize = strtoul (ptr, &ptr, 10);
1283
 
1284
          if (thissize != 8 && thissize != 16 && thissize != 32
1285
              && thissize != 64)
1286
            {
1287
              as_bad (_("bad size %d in type specifier"), thissize);
1288
              return FAIL;
1289
            }
1290
        }
1291
 
1292
      done:
1293
      if (type)
1294
        {
1295
          type->el[type->elems].type = thistype;
1296
          type->el[type->elems].size = thissize;
1297
          type->elems++;
1298
        }
1299
    }
1300
 
1301
  /* Empty/missing type is not a successful parse.  */
1302
  if (type->elems == 0)
1303
    return FAIL;
1304
 
1305
  *str = ptr;
1306
 
1307
  return SUCCESS;
1308
}
1309
 
1310
/* Errors may be set multiple times during parsing or bit encoding
1311
   (particularly in the Neon bits), but usually the earliest error which is set
1312
   will be the most meaningful. Avoid overwriting it with later (cascading)
1313
   errors by calling this function.  */
1314
 
1315
static void
1316
first_error (const char *err)
1317
{
1318
  if (!inst.error)
1319
    inst.error = err;
1320
}
1321
 
1322
/* Parse a single type, e.g. ".s32", leading period included.  */
1323
static int
1324
parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1325
{
1326
  char *str = *ccp;
1327
  struct neon_type optype;
1328
 
1329
  if (*str == '.')
1330
    {
1331
      if (parse_neon_type (&optype, &str) == SUCCESS)
1332
        {
1333
          if (optype.elems == 1)
1334
            *vectype = optype.el[0];
1335
          else
1336
            {
1337
              first_error (_("only one type should be specified for operand"));
1338
              return FAIL;
1339
            }
1340
        }
1341
      else
1342
        {
1343
          first_error (_("vector type expected"));
1344
          return FAIL;
1345
        }
1346
    }
1347
  else
1348
    return FAIL;
1349
 
1350
  *ccp = str;
1351
 
1352
  return SUCCESS;
1353
}
1354
 
1355
/* Special meanings for indices (which have a range of 0-7), which will fit into
1356
   a 4-bit integer.  */
1357
 
1358
#define NEON_ALL_LANES          15
1359
#define NEON_INTERLEAVE_LANES   14
1360
 
1361
/* Parse either a register or a scalar, with an optional type. Return the
1362
   register number, and optionally fill in the actual type of the register
1363
   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1364
   type/index information in *TYPEINFO.  */
1365
 
1366
static int
1367
parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1368
                           enum arm_reg_type *rtype,
1369
                           struct neon_typed_alias *typeinfo)
1370
{
1371
  char *str = *ccp;
1372
  struct reg_entry *reg = arm_reg_parse_multi (&str);
1373
  struct neon_typed_alias atype;
1374
  struct neon_type_el parsetype;
1375
 
1376
  atype.defined = 0;
1377
  atype.index = -1;
1378
  atype.eltype.type = NT_invtype;
1379
  atype.eltype.size = -1;
1380
 
1381
  /* Try alternate syntax for some types of register. Note these are mutually
1382
     exclusive with the Neon syntax extensions.  */
1383
  if (reg == NULL)
1384
    {
1385
      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1386
      if (altreg != FAIL)
1387
        *ccp = str;
1388
      if (typeinfo)
1389
        *typeinfo = atype;
1390
      return altreg;
1391
    }
1392
 
1393
  /* Undo polymorphism when a set of register types may be accepted.  */
1394
  if ((type == REG_TYPE_NDQ
1395
       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1396
      || (type == REG_TYPE_VFSD
1397
          && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1398
      || (type == REG_TYPE_NSDQ
1399
          && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1400
              || reg->type == REG_TYPE_NQ))
1401
      || (type == REG_TYPE_MMXWC
1402
          && (reg->type == REG_TYPE_MMXWCG)))
1403
    type = (enum arm_reg_type) reg->type;
1404
 
1405
  if (type != reg->type)
1406
    return FAIL;
1407
 
1408
  if (reg->neon)
1409
    atype = *reg->neon;
1410
 
1411
  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1412
    {
1413
      if ((atype.defined & NTA_HASTYPE) != 0)
1414
        {
1415
          first_error (_("can't redefine type for operand"));
1416
          return FAIL;
1417
        }
1418
      atype.defined |= NTA_HASTYPE;
1419
      atype.eltype = parsetype;
1420
    }
1421
 
1422
  if (skip_past_char (&str, '[') == SUCCESS)
1423
    {
1424
      if (type != REG_TYPE_VFD)
1425
        {
1426
          first_error (_("only D registers may be indexed"));
1427
          return FAIL;
1428
        }
1429
 
1430
      if ((atype.defined & NTA_HASINDEX) != 0)
1431
        {
1432
          first_error (_("can't change index for operand"));
1433
          return FAIL;
1434
        }
1435
 
1436
      atype.defined |= NTA_HASINDEX;
1437
 
1438
      if (skip_past_char (&str, ']') == SUCCESS)
1439
        atype.index = NEON_ALL_LANES;
1440
      else
1441
        {
1442
          expressionS exp;
1443
 
1444
          my_get_expression (&exp, &str, GE_NO_PREFIX);
1445
 
1446
          if (exp.X_op != O_constant)
1447
            {
1448
              first_error (_("constant expression required"));
1449
              return FAIL;
1450
            }
1451
 
1452
          if (skip_past_char (&str, ']') == FAIL)
1453
            return FAIL;
1454
 
1455
          atype.index = exp.X_add_number;
1456
        }
1457
    }
1458
 
1459
  if (typeinfo)
1460
    *typeinfo = atype;
1461
 
1462
  if (rtype)
1463
    *rtype = type;
1464
 
1465
  *ccp = str;
1466
 
1467
  return reg->number;
1468
}
1469
 
1470
/* Like arm_reg_parse, but allow allow the following extra features:
1471
    - If RTYPE is non-zero, return the (possibly restricted) type of the
1472
      register (e.g. Neon double or quad reg when either has been requested).
1473
    - If this is a Neon vector type with additional type information, fill
1474
      in the struct pointed to by VECTYPE (if non-NULL).
1475
   This function will fault on encountering a scalar.  */
1476
 
1477
static int
1478
arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1479
                     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1480
{
1481
  struct neon_typed_alias atype;
1482
  char *str = *ccp;
1483
  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1484
 
1485
  if (reg == FAIL)
1486
    return FAIL;
1487
 
1488
  /* Do not allow regname(... to parse as a register.  */
1489
  if (*str == '(')
1490
    return FAIL;
1491
 
1492
  /* Do not allow a scalar (reg+index) to parse as a register.  */
1493
  if ((atype.defined & NTA_HASINDEX) != 0)
1494
    {
1495
      first_error (_("register operand expected, but got scalar"));
1496
      return FAIL;
1497
    }
1498
 
1499
  if (vectype)
1500
    *vectype = atype.eltype;
1501
 
1502
  *ccp = str;
1503
 
1504
  return reg;
1505
}
1506
 
1507
#define NEON_SCALAR_REG(X)      ((X) >> 4)
1508
#define NEON_SCALAR_INDEX(X)    ((X) & 15)
1509
 
1510
/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1511
   have enough information to be able to do a good job bounds-checking. So, we
1512
   just do easy checks here, and do further checks later.  */
1513
 
1514
static int
1515
parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1516
{
1517
  int reg;
1518
  char *str = *ccp;
1519
  struct neon_typed_alias atype;
1520
 
1521
  reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1522
 
1523
  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1524
    return FAIL;
1525
 
1526
  if (atype.index == NEON_ALL_LANES)
1527
    {
1528
      first_error (_("scalar must have an index"));
1529
      return FAIL;
1530
    }
1531
  else if (atype.index >= 64 / elsize)
1532
    {
1533
      first_error (_("scalar index out of range"));
1534
      return FAIL;
1535
    }
1536
 
1537
  if (type)
1538
    *type = atype.eltype;
1539
 
1540
  *ccp = str;
1541
 
1542
  return reg * 16 + atype.index;
1543
}
1544
 
1545
/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1546
 
1547
static long
1548
parse_reg_list (char ** strp)
1549
{
1550
  char * str = * strp;
1551
  long   range = 0;
1552
  int    another_range;
1553
 
1554
  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1555
  do
1556
    {
1557
      another_range = 0;
1558
 
1559
      if (*str == '{')
1560
        {
1561
          int in_range = 0;
1562
          int cur_reg = -1;
1563
 
1564
          str++;
1565
          do
1566
            {
1567
              int reg;
1568
 
1569
              if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1570
                {
1571
                  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1572
                  return FAIL;
1573
                }
1574
 
1575
              if (in_range)
1576
                {
1577
                  int i;
1578
 
1579
                  if (reg <= cur_reg)
1580
                    {
1581
                      first_error (_("bad range in register list"));
1582
                      return FAIL;
1583
                    }
1584
 
1585
                  for (i = cur_reg + 1; i < reg; i++)
1586
                    {
1587
                      if (range & (1 << i))
1588
                        as_tsktsk
1589
                          (_("Warning: duplicated register (r%d) in register list"),
1590
                           i);
1591
                      else
1592
                        range |= 1 << i;
1593
                    }
1594
                  in_range = 0;
1595
                }
1596
 
1597
              if (range & (1 << reg))
1598
                as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1599
                           reg);
1600
              else if (reg <= cur_reg)
1601
                as_tsktsk (_("Warning: register range not in ascending order"));
1602
 
1603
              range |= 1 << reg;
1604
              cur_reg = reg;
1605
            }
1606
          while (skip_past_comma (&str) != FAIL
1607
                 || (in_range = 1, *str++ == '-'));
1608
          str--;
1609
 
1610
          if (*str++ != '}')
1611
            {
1612
              first_error (_("missing `}'"));
1613
              return FAIL;
1614
            }
1615
        }
1616
      else
1617
        {
1618
          expressionS exp;
1619
 
1620
          if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1621
            return FAIL;
1622
 
1623
          if (exp.X_op == O_constant)
1624
            {
1625
              if (exp.X_add_number
1626
                  != (exp.X_add_number & 0x0000ffff))
1627
                {
1628
                  inst.error = _("invalid register mask");
1629
                  return FAIL;
1630
                }
1631
 
1632
              if ((range & exp.X_add_number) != 0)
1633
                {
1634
                  int regno = range & exp.X_add_number;
1635
 
1636
                  regno &= -regno;
1637
                  regno = (1 << regno) - 1;
1638
                  as_tsktsk
1639
                    (_("Warning: duplicated register (r%d) in register list"),
1640
                     regno);
1641
                }
1642
 
1643
              range |= exp.X_add_number;
1644
            }
1645
          else
1646
            {
1647
              if (inst.reloc.type != 0)
1648
                {
1649
                  inst.error = _("expression too complex");
1650
                  return FAIL;
1651
                }
1652
 
1653
              memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1654
              inst.reloc.type = BFD_RELOC_ARM_MULTI;
1655
              inst.reloc.pc_rel = 0;
1656
            }
1657
        }
1658
 
1659
      if (*str == '|' || *str == '+')
1660
        {
1661
          str++;
1662
          another_range = 1;
1663
        }
1664
    }
1665
  while (another_range);
1666
 
1667
  *strp = str;
1668
  return range;
1669
}
1670
 
1671
/* Types of registers in a list.  */
1672
 
1673
enum reg_list_els
1674
{
1675
  REGLIST_VFP_S,
1676
  REGLIST_VFP_D,
1677
  REGLIST_NEON_D
1678
};
1679
 
1680
/* Parse a VFP register list.  If the string is invalid return FAIL.
1681
   Otherwise return the number of registers, and set PBASE to the first
1682
   register.  Parses registers of type ETYPE.
1683
   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1684
     - Q registers can be used to specify pairs of D registers
1685
     - { } can be omitted from around a singleton register list
1686
         FIXME: This is not implemented, as it would require backtracking in
1687
         some cases, e.g.:
1688
           vtbl.8 d3,d4,d5
1689
         This could be done (the meaning isn't really ambiguous), but doesn't
1690
         fit in well with the current parsing framework.
1691
     - 32 D registers may be used (also true for VFPv3).
1692
   FIXME: Types are ignored in these register lists, which is probably a
1693
   bug.  */
1694
 
1695
static int
1696
parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1697
{
1698
  char *str = *ccp;
1699
  int base_reg;
1700
  int new_base;
1701
  enum arm_reg_type regtype = (enum arm_reg_type) 0;
1702
  int max_regs = 0;
1703
  int count = 0;
1704
  int warned = 0;
1705
  unsigned long mask = 0;
1706
  int i;
1707
 
1708
  if (*str != '{')
1709
    {
1710
      inst.error = _("expecting {");
1711
      return FAIL;
1712
    }
1713
 
1714
  str++;
1715
 
1716
  switch (etype)
1717
    {
1718
    case REGLIST_VFP_S:
1719
      regtype = REG_TYPE_VFS;
1720
      max_regs = 32;
1721
      break;
1722
 
1723
    case REGLIST_VFP_D:
1724
      regtype = REG_TYPE_VFD;
1725
      break;
1726
 
1727
    case REGLIST_NEON_D:
1728
      regtype = REG_TYPE_NDQ;
1729
      break;
1730
    }
1731
 
1732
  if (etype != REGLIST_VFP_S)
1733
    {
1734
      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
1735
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1736
        {
1737
          max_regs = 32;
1738
          if (thumb_mode)
1739
            ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1740
                                    fpu_vfp_ext_d32);
1741
          else
1742
            ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1743
                                    fpu_vfp_ext_d32);
1744
        }
1745
      else
1746
        max_regs = 16;
1747
    }
1748
 
1749
  base_reg = max_regs;
1750
 
1751
  do
1752
    {
1753
      int setmask = 1, addregs = 1;
1754
 
1755
      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1756
 
1757
      if (new_base == FAIL)
1758
        {
1759
          first_error (_(reg_expected_msgs[regtype]));
1760
          return FAIL;
1761
        }
1762
 
1763
      if (new_base >= max_regs)
1764
        {
1765
          first_error (_("register out of range in list"));
1766
          return FAIL;
1767
        }
1768
 
1769
      /* Note: a value of 2 * n is returned for the register Q<n>.  */
1770
      if (regtype == REG_TYPE_NQ)
1771
        {
1772
          setmask = 3;
1773
          addregs = 2;
1774
        }
1775
 
1776
      if (new_base < base_reg)
1777
        base_reg = new_base;
1778
 
1779
      if (mask & (setmask << new_base))
1780
        {
1781
          first_error (_("invalid register list"));
1782
          return FAIL;
1783
        }
1784
 
1785
      if ((mask >> new_base) != 0 && ! warned)
1786
        {
1787
          as_tsktsk (_("register list not in ascending order"));
1788
          warned = 1;
1789
        }
1790
 
1791
      mask |= setmask << new_base;
1792
      count += addregs;
1793
 
1794
      if (*str == '-') /* We have the start of a range expression */
1795
        {
1796
          int high_range;
1797
 
1798
          str++;
1799
 
1800
          if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1801
              == FAIL)
1802
            {
1803
              inst.error = gettext (reg_expected_msgs[regtype]);
1804
              return FAIL;
1805
            }
1806
 
1807
          if (high_range >= max_regs)
1808
            {
1809
              first_error (_("register out of range in list"));
1810
              return FAIL;
1811
            }
1812
 
1813
          if (regtype == REG_TYPE_NQ)
1814
            high_range = high_range + 1;
1815
 
1816
          if (high_range <= new_base)
1817
            {
1818
              inst.error = _("register range not in ascending order");
1819
              return FAIL;
1820
            }
1821
 
1822
          for (new_base += addregs; new_base <= high_range; new_base += addregs)
1823
            {
1824
              if (mask & (setmask << new_base))
1825
                {
1826
                  inst.error = _("invalid register list");
1827
                  return FAIL;
1828
                }
1829
 
1830
              mask |= setmask << new_base;
1831
              count += addregs;
1832
            }
1833
        }
1834
    }
1835
  while (skip_past_comma (&str) != FAIL);
1836
 
1837
  str++;
1838
 
1839
  /* Sanity check -- should have raised a parse error above.  */
1840
  if (count == 0 || count > max_regs)
1841
    abort ();
1842
 
1843
  *pbase = base_reg;
1844
 
1845
  /* Final test -- the registers must be consecutive.  */
1846
  mask >>= base_reg;
1847
  for (i = 0; i < count; i++)
1848
    {
1849
      if ((mask & (1u << i)) == 0)
1850
        {
1851
          inst.error = _("non-contiguous register range");
1852
          return FAIL;
1853
        }
1854
    }
1855
 
1856
  *ccp = str;
1857
 
1858
  return count;
1859
}
1860
 
1861
/* True if two alias types are the same.  */
1862
 
1863
static bfd_boolean
1864
neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1865
{
1866
  if (!a && !b)
1867
    return TRUE;
1868
 
1869
  if (!a || !b)
1870
    return FALSE;
1871
 
1872
  if (a->defined != b->defined)
1873
    return FALSE;
1874
 
1875
  if ((a->defined & NTA_HASTYPE) != 0
1876
      && (a->eltype.type != b->eltype.type
1877
          || a->eltype.size != b->eltype.size))
1878
    return FALSE;
1879
 
1880
  if ((a->defined & NTA_HASINDEX) != 0
1881
      && (a->index != b->index))
1882
    return FALSE;
1883
 
1884
  return TRUE;
1885
}
1886
 
1887
/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1888
   The base register is put in *PBASE.
1889
   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1890
   the return value.
1891
   The register stride (minus one) is put in bit 4 of the return value.
1892
   Bits [6:5] encode the list length (minus one).
1893
   The type of the list elements is put in *ELTYPE, if non-NULL.  */
1894
 
1895
#define NEON_LANE(X)            ((X) & 0xf)
1896
#define NEON_REG_STRIDE(X)      ((((X) >> 4) & 1) + 1)
1897
#define NEON_REGLIST_LENGTH(X)  ((((X) >> 5) & 3) + 1)
1898
 
1899
static int
1900
parse_neon_el_struct_list (char **str, unsigned *pbase,
1901
                           struct neon_type_el *eltype)
1902
{
1903
  char *ptr = *str;
1904
  int base_reg = -1;
1905
  int reg_incr = -1;
1906
  int count = 0;
1907
  int lane = -1;
1908
  int leading_brace = 0;
1909
  enum arm_reg_type rtype = REG_TYPE_NDQ;
1910
  const char *const incr_error = _("register stride must be 1 or 2");
1911
  const char *const type_error = _("mismatched element/structure types in list");
1912
  struct neon_typed_alias firsttype;
1913
 
1914
  if (skip_past_char (&ptr, '{') == SUCCESS)
1915
    leading_brace = 1;
1916
 
1917
  do
1918
    {
1919
      struct neon_typed_alias atype;
1920
      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1921
 
1922
      if (getreg == FAIL)
1923
        {
1924
          first_error (_(reg_expected_msgs[rtype]));
1925
          return FAIL;
1926
        }
1927
 
1928
      if (base_reg == -1)
1929
        {
1930
          base_reg = getreg;
1931
          if (rtype == REG_TYPE_NQ)
1932
            {
1933
              reg_incr = 1;
1934
            }
1935
          firsttype = atype;
1936
        }
1937
      else if (reg_incr == -1)
1938
        {
1939
          reg_incr = getreg - base_reg;
1940
          if (reg_incr < 1 || reg_incr > 2)
1941
            {
1942
              first_error (_(incr_error));
1943
              return FAIL;
1944
            }
1945
        }
1946
      else if (getreg != base_reg + reg_incr * count)
1947
        {
1948
          first_error (_(incr_error));
1949
          return FAIL;
1950
        }
1951
 
1952
      if (! neon_alias_types_same (&atype, &firsttype))
1953
        {
1954
          first_error (_(type_error));
1955
          return FAIL;
1956
        }
1957
 
1958
      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1959
         modes.  */
1960
      if (ptr[0] == '-')
1961
        {
1962
          struct neon_typed_alias htype;
1963
          int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1964
          if (lane == -1)
1965
            lane = NEON_INTERLEAVE_LANES;
1966
          else if (lane != NEON_INTERLEAVE_LANES)
1967
            {
1968
              first_error (_(type_error));
1969
              return FAIL;
1970
            }
1971
          if (reg_incr == -1)
1972
            reg_incr = 1;
1973
          else if (reg_incr != 1)
1974
            {
1975
              first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1976
              return FAIL;
1977
            }
1978
          ptr++;
1979
          hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1980
          if (hireg == FAIL)
1981
            {
1982
              first_error (_(reg_expected_msgs[rtype]));
1983
              return FAIL;
1984
            }
1985
          if (! neon_alias_types_same (&htype, &firsttype))
1986
            {
1987
              first_error (_(type_error));
1988
              return FAIL;
1989
            }
1990
          count += hireg + dregs - getreg;
1991
          continue;
1992
        }
1993
 
1994
      /* If we're using Q registers, we can't use [] or [n] syntax.  */
1995
      if (rtype == REG_TYPE_NQ)
1996
        {
1997
          count += 2;
1998
          continue;
1999
        }
2000
 
2001
      if ((atype.defined & NTA_HASINDEX) != 0)
2002
        {
2003
          if (lane == -1)
2004
            lane = atype.index;
2005
          else if (lane != atype.index)
2006
            {
2007
              first_error (_(type_error));
2008
              return FAIL;
2009
            }
2010
        }
2011
      else if (lane == -1)
2012
        lane = NEON_INTERLEAVE_LANES;
2013
      else if (lane != NEON_INTERLEAVE_LANES)
2014
        {
2015
          first_error (_(type_error));
2016
          return FAIL;
2017
        }
2018
      count++;
2019
    }
2020
  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2021
 
2022
  /* No lane set by [x]. We must be interleaving structures.  */
2023
  if (lane == -1)
2024
    lane = NEON_INTERLEAVE_LANES;
2025
 
2026
  /* Sanity check.  */
2027
  if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2028
      || (count > 1 && reg_incr == -1))
2029
    {
2030
      first_error (_("error parsing element/structure list"));
2031
      return FAIL;
2032
    }
2033
 
2034
  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2035
    {
2036
      first_error (_("expected }"));
2037
      return FAIL;
2038
    }
2039
 
2040
  if (reg_incr == -1)
2041
    reg_incr = 1;
2042
 
2043
  if (eltype)
2044
    *eltype = firsttype.eltype;
2045
 
2046
  *pbase = base_reg;
2047
  *str = ptr;
2048
 
2049
  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2050
}
2051
 
2052
/* Parse an explicit relocation suffix on an expression.  This is
2053
   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2054
   arm_reloc_hsh contains no entries, so this function can only
2055
   succeed if there is no () after the word.  Returns -1 on error,
2056
   BFD_RELOC_UNUSED if there wasn't any suffix.  */
2057 163 khays
 
2058 16 khays
static int
2059
parse_reloc (char **str)
2060
{
2061
  struct reloc_entry *r;
2062
  char *p, *q;
2063
 
2064
  if (**str != '(')
2065
    return BFD_RELOC_UNUSED;
2066
 
2067
  p = *str + 1;
2068
  q = p;
2069
 
2070
  while (*q && *q != ')' && *q != ',')
2071
    q++;
2072
  if (*q != ')')
2073
    return -1;
2074
 
2075
  if ((r = (struct reloc_entry *)
2076
       hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2077
    return -1;
2078
 
2079
  *str = q + 1;
2080
  return r->reloc;
2081
}
2082
 
2083
/* Directives: register aliases.  */
2084
 
2085
static struct reg_entry *
2086
insert_reg_alias (char *str, unsigned number, int type)
2087
{
2088
  struct reg_entry *new_reg;
2089
  const char *name;
2090
 
2091
  if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2092
    {
2093
      if (new_reg->builtin)
2094
        as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2095
 
2096
      /* Only warn about a redefinition if it's not defined as the
2097
         same register.  */
2098
      else if (new_reg->number != number || new_reg->type != type)
2099
        as_warn (_("ignoring redefinition of register alias '%s'"), str);
2100
 
2101
      return NULL;
2102
    }
2103
 
2104
  name = xstrdup (str);
2105
  new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2106
 
2107
  new_reg->name = name;
2108
  new_reg->number = number;
2109
  new_reg->type = type;
2110
  new_reg->builtin = FALSE;
2111
  new_reg->neon = NULL;
2112
 
2113
  if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2114
    abort ();
2115
 
2116
  return new_reg;
2117
}
2118
 
2119
static void
2120
insert_neon_reg_alias (char *str, int number, int type,
2121
                       struct neon_typed_alias *atype)
2122
{
2123
  struct reg_entry *reg = insert_reg_alias (str, number, type);
2124
 
2125
  if (!reg)
2126
    {
2127
      first_error (_("attempt to redefine typed alias"));
2128
      return;
2129
    }
2130
 
2131
  if (atype)
2132
    {
2133
      reg->neon = (struct neon_typed_alias *)
2134
          xmalloc (sizeof (struct neon_typed_alias));
2135
      *reg->neon = *atype;
2136
    }
2137
}
2138
 
2139
/* Look for the .req directive.  This is of the form:
2140
 
2141
        new_register_name .req existing_register_name
2142
 
2143
   If we find one, or if it looks sufficiently like one that we want to
2144
   handle any error here, return TRUE.  Otherwise return FALSE.  */
2145
 
2146
static bfd_boolean
2147
create_register_alias (char * newname, char *p)
2148
{
2149
  struct reg_entry *old;
2150
  char *oldname, *nbuf;
2151
  size_t nlen;
2152
 
2153
  /* The input scrubber ensures that whitespace after the mnemonic is
2154
     collapsed to single spaces.  */
2155
  oldname = p;
2156
  if (strncmp (oldname, " .req ", 6) != 0)
2157
    return FALSE;
2158
 
2159
  oldname += 6;
2160
  if (*oldname == '\0')
2161
    return FALSE;
2162
 
2163
  old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2164
  if (!old)
2165
    {
2166
      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2167
      return TRUE;
2168
    }
2169
 
2170
  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2171
     the desired alias name, and p points to its end.  If not, then
2172
     the desired alias name is in the global original_case_string.  */
2173
#ifdef TC_CASE_SENSITIVE
2174
  nlen = p - newname;
2175
#else
2176
  newname = original_case_string;
2177
  nlen = strlen (newname);
2178
#endif
2179
 
2180
  nbuf = (char *) alloca (nlen + 1);
2181
  memcpy (nbuf, newname, nlen);
2182
  nbuf[nlen] = '\0';
2183
 
2184
  /* Create aliases under the new name as stated; an all-lowercase
2185
     version of the new name; and an all-uppercase version of the new
2186
     name.  */
2187
  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2188
    {
2189
      for (p = nbuf; *p; p++)
2190
        *p = TOUPPER (*p);
2191
 
2192
      if (strncmp (nbuf, newname, nlen))
2193
        {
2194
          /* If this attempt to create an additional alias fails, do not bother
2195
             trying to create the all-lower case alias.  We will fail and issue
2196
             a second, duplicate error message.  This situation arises when the
2197
             programmer does something like:
2198
               foo .req r0
2199
               Foo .req r1
2200
             The second .req creates the "Foo" alias but then fails to create
2201
             the artificial FOO alias because it has already been created by the
2202
             first .req.  */
2203
          if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2204
            return TRUE;
2205
        }
2206
 
2207
      for (p = nbuf; *p; p++)
2208
        *p = TOLOWER (*p);
2209
 
2210
      if (strncmp (nbuf, newname, nlen))
2211
        insert_reg_alias (nbuf, old->number, old->type);
2212
    }
2213
 
2214
  return TRUE;
2215
}
2216
 
2217
/* Create a Neon typed/indexed register alias using directives, e.g.:
2218
     X .dn d5.s32[1]
2219
     Y .qn 6.s16
2220
     Z .dn d7
2221
     T .dn Z[0]
2222
   These typed registers can be used instead of the types specified after the
2223
   Neon mnemonic, so long as all operands given have types. Types can also be
2224
   specified directly, e.g.:
2225
     vadd d0.s32, d1.s32, d2.s32  */
2226
 
2227
static bfd_boolean
2228
create_neon_reg_alias (char *newname, char *p)
2229
{
2230
  enum arm_reg_type basetype;
2231
  struct reg_entry *basereg;
2232
  struct reg_entry mybasereg;
2233
  struct neon_type ntype;
2234
  struct neon_typed_alias typeinfo;
2235
  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2236
  int namelen;
2237
 
2238
  typeinfo.defined = 0;
2239
  typeinfo.eltype.type = NT_invtype;
2240
  typeinfo.eltype.size = -1;
2241
  typeinfo.index = -1;
2242
 
2243
  nameend = p;
2244
 
2245
  if (strncmp (p, " .dn ", 5) == 0)
2246
    basetype = REG_TYPE_VFD;
2247
  else if (strncmp (p, " .qn ", 5) == 0)
2248
    basetype = REG_TYPE_NQ;
2249
  else
2250
    return FALSE;
2251
 
2252
  p += 5;
2253
 
2254
  if (*p == '\0')
2255
    return FALSE;
2256
 
2257
  basereg = arm_reg_parse_multi (&p);
2258
 
2259
  if (basereg && basereg->type != basetype)
2260
    {
2261
      as_bad (_("bad type for register"));
2262
      return FALSE;
2263
    }
2264
 
2265
  if (basereg == NULL)
2266
    {
2267
      expressionS exp;
2268
      /* Try parsing as an integer.  */
2269
      my_get_expression (&exp, &p, GE_NO_PREFIX);
2270
      if (exp.X_op != O_constant)
2271
        {
2272
          as_bad (_("expression must be constant"));
2273
          return FALSE;
2274
        }
2275
      basereg = &mybasereg;
2276
      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2277
                                                  : exp.X_add_number;
2278
      basereg->neon = 0;
2279
    }
2280
 
2281
  if (basereg->neon)
2282
    typeinfo = *basereg->neon;
2283
 
2284
  if (parse_neon_type (&ntype, &p) == SUCCESS)
2285
    {
2286
      /* We got a type.  */
2287
      if (typeinfo.defined & NTA_HASTYPE)
2288
        {
2289
          as_bad (_("can't redefine the type of a register alias"));
2290
          return FALSE;
2291
        }
2292
 
2293
      typeinfo.defined |= NTA_HASTYPE;
2294
      if (ntype.elems != 1)
2295
        {
2296
          as_bad (_("you must specify a single type only"));
2297
          return FALSE;
2298
        }
2299
      typeinfo.eltype = ntype.el[0];
2300
    }
2301
 
2302
  if (skip_past_char (&p, '[') == SUCCESS)
2303
    {
2304
      expressionS exp;
2305
      /* We got a scalar index.  */
2306
 
2307
      if (typeinfo.defined & NTA_HASINDEX)
2308
        {
2309
          as_bad (_("can't redefine the index of a scalar alias"));
2310
          return FALSE;
2311
        }
2312
 
2313
      my_get_expression (&exp, &p, GE_NO_PREFIX);
2314
 
2315
      if (exp.X_op != O_constant)
2316
        {
2317
          as_bad (_("scalar index must be constant"));
2318
          return FALSE;
2319
        }
2320
 
2321
      typeinfo.defined |= NTA_HASINDEX;
2322
      typeinfo.index = exp.X_add_number;
2323
 
2324
      if (skip_past_char (&p, ']') == FAIL)
2325
        {
2326
          as_bad (_("expecting ]"));
2327
          return FALSE;
2328
        }
2329
    }
2330
 
2331
  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2332
     the desired alias name, and p points to its end.  If not, then
2333
     the desired alias name is in the global original_case_string.  */
2334
#ifdef TC_CASE_SENSITIVE
2335
  namelen = nameend - newname;
2336
#else
2337
  newname = original_case_string;
2338
  namelen = strlen (newname);
2339
#endif
2340
 
2341
  namebuf = (char *) alloca (namelen + 1);
2342
  strncpy (namebuf, newname, namelen);
2343
  namebuf[namelen] = '\0';
2344
 
2345
  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2346
                         typeinfo.defined != 0 ? &typeinfo : NULL);
2347
 
2348
  /* Insert name in all uppercase.  */
2349
  for (p = namebuf; *p; p++)
2350
    *p = TOUPPER (*p);
2351
 
2352
  if (strncmp (namebuf, newname, namelen))
2353
    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2354
                           typeinfo.defined != 0 ? &typeinfo : NULL);
2355
 
2356
  /* Insert name in all lowercase.  */
2357
  for (p = namebuf; *p; p++)
2358
    *p = TOLOWER (*p);
2359
 
2360
  if (strncmp (namebuf, newname, namelen))
2361
    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2362
                           typeinfo.defined != 0 ? &typeinfo : NULL);
2363
 
2364
  return TRUE;
2365
}
2366
 
2367
/* Should never be called, as .req goes between the alias and the
2368
   register name, not at the beginning of the line.  */
2369
 
2370
static void
2371
s_req (int a ATTRIBUTE_UNUSED)
2372
{
2373
  as_bad (_("invalid syntax for .req directive"));
2374
}
2375
 
2376
static void
2377
s_dn (int a ATTRIBUTE_UNUSED)
2378
{
2379
  as_bad (_("invalid syntax for .dn directive"));
2380
}
2381
 
2382
static void
2383
s_qn (int a ATTRIBUTE_UNUSED)
2384
{
2385
  as_bad (_("invalid syntax for .qn directive"));
2386
}
2387
 
2388
/* The .unreq directive deletes an alias which was previously defined
2389
   by .req.  For example:
2390
 
2391
       my_alias .req r11
2392
       .unreq my_alias    */
2393
 
2394
static void
2395
s_unreq (int a ATTRIBUTE_UNUSED)
2396
{
2397
  char * name;
2398
  char saved_char;
2399
 
2400
  name = input_line_pointer;
2401
 
2402
  while (*input_line_pointer != 0
2403
         && *input_line_pointer != ' '
2404
         && *input_line_pointer != '\n')
2405
    ++input_line_pointer;
2406
 
2407
  saved_char = *input_line_pointer;
2408
  *input_line_pointer = 0;
2409
 
2410
  if (!*name)
2411
    as_bad (_("invalid syntax for .unreq directive"));
2412
  else
2413
    {
2414
      struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2415
                                                              name);
2416
 
2417
      if (!reg)
2418
        as_bad (_("unknown register alias '%s'"), name);
2419
      else if (reg->builtin)
2420
        as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2421
                 name);
2422
      else
2423
        {
2424
          char * p;
2425
          char * nbuf;
2426
 
2427
          hash_delete (arm_reg_hsh, name, FALSE);
2428
          free ((char *) reg->name);
2429
          if (reg->neon)
2430
            free (reg->neon);
2431
          free (reg);
2432
 
2433
          /* Also locate the all upper case and all lower case versions.
2434
             Do not complain if we cannot find one or the other as it
2435
             was probably deleted above.  */
2436
 
2437
          nbuf = strdup (name);
2438
          for (p = nbuf; *p; p++)
2439
            *p = TOUPPER (*p);
2440
          reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2441
          if (reg)
2442
            {
2443
              hash_delete (arm_reg_hsh, nbuf, FALSE);
2444
              free ((char *) reg->name);
2445
              if (reg->neon)
2446
                free (reg->neon);
2447
              free (reg);
2448
            }
2449
 
2450
          for (p = nbuf; *p; p++)
2451
            *p = TOLOWER (*p);
2452
          reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2453
          if (reg)
2454
            {
2455
              hash_delete (arm_reg_hsh, nbuf, FALSE);
2456
              free ((char *) reg->name);
2457
              if (reg->neon)
2458
                free (reg->neon);
2459
              free (reg);
2460
            }
2461
 
2462
          free (nbuf);
2463
        }
2464
    }
2465
 
2466
  *input_line_pointer = saved_char;
2467
  demand_empty_rest_of_line ();
2468
}
2469
 
2470
/* Directives: Instruction set selection.  */
2471
 
2472
#ifdef OBJ_ELF
2473
/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2474
   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2475
   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2476
   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2477
 
2478
/* Create a new mapping symbol for the transition to STATE.  */
2479
 
2480
static void
2481
make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2482
{
2483
  symbolS * symbolP;
2484
  const char * symname;
2485
  int type;
2486
 
2487
  switch (state)
2488
    {
2489
    case MAP_DATA:
2490
      symname = "$d";
2491
      type = BSF_NO_FLAGS;
2492
      break;
2493
    case MAP_ARM:
2494
      symname = "$a";
2495
      type = BSF_NO_FLAGS;
2496
      break;
2497
    case MAP_THUMB:
2498
      symname = "$t";
2499
      type = BSF_NO_FLAGS;
2500
      break;
2501
    default:
2502
      abort ();
2503
    }
2504
 
2505
  symbolP = symbol_new (symname, now_seg, value, frag);
2506
  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2507
 
2508
  switch (state)
2509
    {
2510
    case MAP_ARM:
2511
      THUMB_SET_FUNC (symbolP, 0);
2512
      ARM_SET_THUMB (symbolP, 0);
2513
      ARM_SET_INTERWORK (symbolP, support_interwork);
2514
      break;
2515
 
2516
    case MAP_THUMB:
2517
      THUMB_SET_FUNC (symbolP, 1);
2518
      ARM_SET_THUMB (symbolP, 1);
2519
      ARM_SET_INTERWORK (symbolP, support_interwork);
2520
      break;
2521
 
2522
    case MAP_DATA:
2523
    default:
2524
      break;
2525
    }
2526
 
2527
  /* Save the mapping symbols for future reference.  Also check that
2528
     we do not place two mapping symbols at the same offset within a
2529
     frag.  We'll handle overlap between frags in
2530
     check_mapping_symbols.
2531
 
2532
     If .fill or other data filling directive generates zero sized data,
2533
     the mapping symbol for the following code will have the same value
2534
     as the one generated for the data filling directive.  In this case,
2535
     we replace the old symbol with the new one at the same address.  */
2536
  if (value == 0)
2537
    {
2538
      if (frag->tc_frag_data.first_map != NULL)
2539
        {
2540
          know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2541
          symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2542
        }
2543
      frag->tc_frag_data.first_map = symbolP;
2544
    }
2545
  if (frag->tc_frag_data.last_map != NULL)
2546
    {
2547
      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2548
      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2549
        symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2550
    }
2551
  frag->tc_frag_data.last_map = symbolP;
2552
}
2553
 
2554
/* We must sometimes convert a region marked as code to data during
2555
   code alignment, if an odd number of bytes have to be padded.  The
2556
   code mapping symbol is pushed to an aligned address.  */
2557
 
2558
static void
2559
insert_data_mapping_symbol (enum mstate state,
2560
                            valueT value, fragS *frag, offsetT bytes)
2561
{
2562
  /* If there was already a mapping symbol, remove it.  */
2563
  if (frag->tc_frag_data.last_map != NULL
2564
      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2565
    {
2566
      symbolS *symp = frag->tc_frag_data.last_map;
2567
 
2568
      if (value == 0)
2569
        {
2570
          know (frag->tc_frag_data.first_map == symp);
2571
          frag->tc_frag_data.first_map = NULL;
2572
        }
2573
      frag->tc_frag_data.last_map = NULL;
2574
      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2575
    }
2576
 
2577
  make_mapping_symbol (MAP_DATA, value, frag);
2578
  make_mapping_symbol (state, value + bytes, frag);
2579
}
2580
 
2581
static void mapping_state_2 (enum mstate state, int max_chars);
2582
 
2583
/* Set the mapping state to STATE.  Only call this when about to
2584
   emit some STATE bytes to the file.  */
2585
 
2586
void
2587
mapping_state (enum mstate state)
2588
{
2589
  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2590
 
2591
#define TRANSITION(from, to) (mapstate == (from) && state == (to))
2592
 
2593
  if (mapstate == state)
2594
    /* The mapping symbol has already been emitted.
2595
       There is nothing else to do.  */
2596
    return;
2597 160 khays
 
2598
  if (state == MAP_ARM || state == MAP_THUMB)
2599
    /*  PR gas/12931
2600
        All ARM instructions require 4-byte alignment.
2601
        (Almost) all Thumb instructions require 2-byte alignment.
2602
 
2603
        When emitting instructions into any section, mark the section
2604
        appropriately.
2605
 
2606
        Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2607
        but themselves require 2-byte alignment; this applies to some
2608
        PC- relative forms.  However, these cases will invovle implicit
2609
        literal pool generation or an explicit .align >=2, both of
2610
        which will cause the section to me marked with sufficient
2611
        alignment.  Thus, we don't handle those cases here.  */
2612
    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2613
 
2614
  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2615 16 khays
    /* This case will be evaluated later in the next else.  */
2616
    return;
2617
  else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2618
          || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2619
    {
2620
      /* Only add the symbol if the offset is > 0:
2621
         if we're at the first frag, check it's size > 0;
2622
         if we're not at the first frag, then for sure
2623
            the offset is > 0.  */
2624
      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2625
      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2626
 
2627
      if (add_symbol)
2628
        make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2629
    }
2630
 
2631
  mapping_state_2 (state, 0);
2632
#undef TRANSITION
2633
}
2634
 
2635
/* Same as mapping_state, but MAX_CHARS bytes have already been
2636
   allocated.  Put the mapping symbol that far back.  */
2637
 
2638
static void
2639
mapping_state_2 (enum mstate state, int max_chars)
2640
{
2641
  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2642
 
2643
  if (!SEG_NORMAL (now_seg))
2644
    return;
2645
 
2646
  if (mapstate == state)
2647
    /* The mapping symbol has already been emitted.
2648
       There is nothing else to do.  */
2649
    return;
2650
 
2651
  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2652
  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2653
}
2654
#else
2655
#define mapping_state(x) ((void)0)
2656
#define mapping_state_2(x, y) ((void)0)
2657
#endif
2658
 
2659
/* Find the real, Thumb encoded start of a Thumb function.  */
2660
 
2661
#ifdef OBJ_COFF
2662
static symbolS *
2663
find_real_start (symbolS * symbolP)
2664
{
2665
  char *       real_start;
2666
  const char * name = S_GET_NAME (symbolP);
2667
  symbolS *    new_target;
2668
 
2669
  /* This definition must agree with the one in gcc/config/arm/thumb.c.  */
2670
#define STUB_NAME ".real_start_of"
2671
 
2672
  if (name == NULL)
2673
    abort ();
2674
 
2675
  /* The compiler may generate BL instructions to local labels because
2676
     it needs to perform a branch to a far away location. These labels
2677
     do not have a corresponding ".real_start_of" label.  We check
2678
     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2679
     the ".real_start_of" convention for nonlocal branches.  */
2680
  if (S_IS_LOCAL (symbolP) || name[0] == '.')
2681
    return symbolP;
2682
 
2683
  real_start = ACONCAT ((STUB_NAME, name, NULL));
2684
  new_target = symbol_find (real_start);
2685
 
2686
  if (new_target == NULL)
2687
    {
2688
      as_warn (_("Failed to find real start of function: %s\n"), name);
2689
      new_target = symbolP;
2690
    }
2691
 
2692
  return new_target;
2693
}
2694
#endif
2695
 
2696
static void
2697
opcode_select (int width)
2698
{
2699
  switch (width)
2700
    {
2701
    case 16:
2702
      if (! thumb_mode)
2703
        {
2704
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2705
            as_bad (_("selected processor does not support THUMB opcodes"));
2706
 
2707
          thumb_mode = 1;
2708
          /* No need to force the alignment, since we will have been
2709
             coming from ARM mode, which is word-aligned.  */
2710
          record_alignment (now_seg, 1);
2711
        }
2712
      break;
2713
 
2714
    case 32:
2715
      if (thumb_mode)
2716
        {
2717
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2718
            as_bad (_("selected processor does not support ARM opcodes"));
2719
 
2720
          thumb_mode = 0;
2721
 
2722
          if (!need_pass_2)
2723
            frag_align (2, 0, 0);
2724
 
2725
          record_alignment (now_seg, 1);
2726
        }
2727
      break;
2728
 
2729
    default:
2730
      as_bad (_("invalid instruction size selected (%d)"), width);
2731
    }
2732
}
2733
 
2734
static void
2735
s_arm (int ignore ATTRIBUTE_UNUSED)
2736
{
2737
  opcode_select (32);
2738
  demand_empty_rest_of_line ();
2739
}
2740
 
2741
static void
2742
s_thumb (int ignore ATTRIBUTE_UNUSED)
2743
{
2744
  opcode_select (16);
2745
  demand_empty_rest_of_line ();
2746
}
2747
 
2748
static void
2749
s_code (int unused ATTRIBUTE_UNUSED)
2750
{
2751
  int temp;
2752
 
2753
  temp = get_absolute_expression ();
2754
  switch (temp)
2755
    {
2756
    case 16:
2757
    case 32:
2758
      opcode_select (temp);
2759
      break;
2760
 
2761
    default:
2762
      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2763
    }
2764
}
2765
 
2766
static void
2767
s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2768
{
2769
  /* If we are not already in thumb mode go into it, EVEN if
2770
     the target processor does not support thumb instructions.
2771
     This is used by gcc/config/arm/lib1funcs.asm for example
2772
     to compile interworking support functions even if the
2773
     target processor should not support interworking.  */
2774
  if (! thumb_mode)
2775
    {
2776
      thumb_mode = 2;
2777
      record_alignment (now_seg, 1);
2778
    }
2779
 
2780
  demand_empty_rest_of_line ();
2781
}
2782
 
2783
static void
2784
s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2785
{
2786
  s_thumb (0);
2787
 
2788
  /* The following label is the name/address of the start of a Thumb function.
2789
     We need to know this for the interworking support.  */
2790
  label_is_thumb_function_name = TRUE;
2791
}
2792
 
2793
/* Perform a .set directive, but also mark the alias as
2794
   being a thumb function.  */
2795
 
2796
static void
2797
s_thumb_set (int equiv)
2798
{
2799
  /* XXX the following is a duplicate of the code for s_set() in read.c
2800
     We cannot just call that code as we need to get at the symbol that
2801
     is created.  */
2802
  char *    name;
2803
  char      delim;
2804
  char *    end_name;
2805
  symbolS * symbolP;
2806
 
2807
  /* Especial apologies for the random logic:
2808
     This just grew, and could be parsed much more simply!
2809
     Dean - in haste.  */
2810
  name      = input_line_pointer;
2811
  delim     = get_symbol_end ();
2812
  end_name  = input_line_pointer;
2813
  *end_name = delim;
2814
 
2815
  if (*input_line_pointer != ',')
2816
    {
2817
      *end_name = 0;
2818
      as_bad (_("expected comma after name \"%s\""), name);
2819
      *end_name = delim;
2820
      ignore_rest_of_line ();
2821
      return;
2822
    }
2823
 
2824
  input_line_pointer++;
2825
  *end_name = 0;
2826
 
2827
  if (name[0] == '.' && name[1] == '\0')
2828
    {
2829
      /* XXX - this should not happen to .thumb_set.  */
2830
      abort ();
2831
    }
2832
 
2833
  if ((symbolP = symbol_find (name)) == NULL
2834
      && (symbolP = md_undefined_symbol (name)) == NULL)
2835
    {
2836
#ifndef NO_LISTING
2837
      /* When doing symbol listings, play games with dummy fragments living
2838
         outside the normal fragment chain to record the file and line info
2839
         for this symbol.  */
2840
      if (listing & LISTING_SYMBOLS)
2841
        {
2842
          extern struct list_info_struct * listing_tail;
2843
          fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2844
 
2845
          memset (dummy_frag, 0, sizeof (fragS));
2846
          dummy_frag->fr_type = rs_fill;
2847
          dummy_frag->line = listing_tail;
2848
          symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2849
          dummy_frag->fr_symbol = symbolP;
2850
        }
2851
      else
2852
#endif
2853
        symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2854
 
2855
#ifdef OBJ_COFF
2856
      /* "set" symbols are local unless otherwise specified.  */
2857
      SF_SET_LOCAL (symbolP);
2858
#endif /* OBJ_COFF  */
2859
    }                           /* Make a new symbol.  */
2860
 
2861
  symbol_table_insert (symbolP);
2862
 
2863
  * end_name = delim;
2864
 
2865
  if (equiv
2866
      && S_IS_DEFINED (symbolP)
2867
      && S_GET_SEGMENT (symbolP) != reg_section)
2868
    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2869
 
2870
  pseudo_set (symbolP);
2871
 
2872
  demand_empty_rest_of_line ();
2873
 
2874
  /* XXX Now we come to the Thumb specific bit of code.  */
2875
 
2876
  THUMB_SET_FUNC (symbolP, 1);
2877
  ARM_SET_THUMB (symbolP, 1);
2878
#if defined OBJ_ELF || defined OBJ_COFF
2879
  ARM_SET_INTERWORK (symbolP, support_interwork);
2880
#endif
2881
}
2882
 
2883
/* Directives: Mode selection.  */
2884
 
2885
/* .syntax [unified|divided] - choose the new unified syntax
2886
   (same for Arm and Thumb encoding, modulo slight differences in what
2887
   can be represented) or the old divergent syntax for each mode.  */
2888
static void
2889
s_syntax (int unused ATTRIBUTE_UNUSED)
2890
{
2891
  char *name, delim;
2892
 
2893
  name = input_line_pointer;
2894
  delim = get_symbol_end ();
2895
 
2896
  if (!strcasecmp (name, "unified"))
2897
    unified_syntax = TRUE;
2898
  else if (!strcasecmp (name, "divided"))
2899
    unified_syntax = FALSE;
2900
  else
2901
    {
2902
      as_bad (_("unrecognized syntax mode \"%s\""), name);
2903
      return;
2904
    }
2905
  *input_line_pointer = delim;
2906
  demand_empty_rest_of_line ();
2907
}
2908
 
2909
/* Directives: sectioning and alignment.  */
2910
 
2911
/* Same as s_align_ptwo but align 0 => align 2.  */
2912
 
2913
static void
2914
s_align (int unused ATTRIBUTE_UNUSED)
2915
{
2916
  int temp;
2917
  bfd_boolean fill_p;
2918
  long temp_fill;
2919
  long max_alignment = 15;
2920
 
2921
  temp = get_absolute_expression ();
2922
  if (temp > max_alignment)
2923
    as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2924
  else if (temp < 0)
2925
    {
2926
      as_bad (_("alignment negative. 0 assumed."));
2927
      temp = 0;
2928
    }
2929
 
2930
  if (*input_line_pointer == ',')
2931
    {
2932
      input_line_pointer++;
2933
      temp_fill = get_absolute_expression ();
2934
      fill_p = TRUE;
2935
    }
2936
  else
2937
    {
2938
      fill_p = FALSE;
2939
      temp_fill = 0;
2940
    }
2941
 
2942
  if (!temp)
2943
    temp = 2;
2944
 
2945
  /* Only make a frag if we HAVE to.  */
2946
  if (temp && !need_pass_2)
2947
    {
2948
      if (!fill_p && subseg_text_p (now_seg))
2949
        frag_align_code (temp, 0);
2950
      else
2951
        frag_align (temp, (int) temp_fill, 0);
2952
    }
2953
  demand_empty_rest_of_line ();
2954
 
2955
  record_alignment (now_seg, temp);
2956
}
2957
 
2958
static void
2959
s_bss (int ignore ATTRIBUTE_UNUSED)
2960
{
2961
  /* We don't support putting frags in the BSS segment, we fake it by
2962
     marking in_bss, then looking at s_skip for clues.  */
2963
  subseg_set (bss_section, 0);
2964
  demand_empty_rest_of_line ();
2965
 
2966
#ifdef md_elf_section_change_hook
2967
  md_elf_section_change_hook ();
2968
#endif
2969
}
2970
 
2971
static void
2972
s_even (int ignore ATTRIBUTE_UNUSED)
2973
{
2974
  /* Never make frag if expect extra pass.  */
2975
  if (!need_pass_2)
2976
    frag_align (1, 0, 0);
2977
 
2978
  record_alignment (now_seg, 1);
2979
 
2980
  demand_empty_rest_of_line ();
2981
}
2982
 
2983
/* Directives: Literal pools.  */
2984
 
2985
static literal_pool *
2986
find_literal_pool (void)
2987
{
2988
  literal_pool * pool;
2989
 
2990
  for (pool = list_of_pools; pool != NULL; pool = pool->next)
2991
    {
2992
      if (pool->section == now_seg
2993
          && pool->sub_section == now_subseg)
2994
        break;
2995
    }
2996
 
2997
  return pool;
2998
}
2999
 
3000
static literal_pool *
3001
find_or_make_literal_pool (void)
3002
{
3003
  /* Next literal pool ID number.  */
3004
  static unsigned int latest_pool_num = 1;
3005
  literal_pool *      pool;
3006
 
3007
  pool = find_literal_pool ();
3008
 
3009
  if (pool == NULL)
3010
    {
3011
      /* Create a new pool.  */
3012
      pool = (literal_pool *) xmalloc (sizeof (* pool));
3013
      if (! pool)
3014
        return NULL;
3015
 
3016
      pool->next_free_entry = 0;
3017
      pool->section         = now_seg;
3018
      pool->sub_section     = now_subseg;
3019
      pool->next            = list_of_pools;
3020
      pool->symbol          = NULL;
3021
 
3022
      /* Add it to the list.  */
3023
      list_of_pools = pool;
3024
    }
3025
 
3026
  /* New pools, and emptied pools, will have a NULL symbol.  */
3027
  if (pool->symbol == NULL)
3028
    {
3029
      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3030
                                    (valueT) 0, &zero_address_frag);
3031
      pool->id = latest_pool_num ++;
3032
    }
3033
 
3034
  /* Done.  */
3035
  return pool;
3036
}
3037
 
3038
/* Add the literal in the global 'inst'
3039
   structure to the relevant literal pool.  */
3040
 
3041
static int
3042
add_to_lit_pool (void)
3043
{
3044
  literal_pool * pool;
3045
  unsigned int entry;
3046
 
3047
  pool = find_or_make_literal_pool ();
3048
 
3049
  /* Check if this literal value is already in the pool.  */
3050
  for (entry = 0; entry < pool->next_free_entry; entry ++)
3051
    {
3052
      if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3053
          && (inst.reloc.exp.X_op == O_constant)
3054
          && (pool->literals[entry].X_add_number
3055
              == inst.reloc.exp.X_add_number)
3056
          && (pool->literals[entry].X_unsigned
3057
              == inst.reloc.exp.X_unsigned))
3058
        break;
3059
 
3060
      if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3061
          && (inst.reloc.exp.X_op == O_symbol)
3062
          && (pool->literals[entry].X_add_number
3063
              == inst.reloc.exp.X_add_number)
3064
          && (pool->literals[entry].X_add_symbol
3065
              == inst.reloc.exp.X_add_symbol)
3066
          && (pool->literals[entry].X_op_symbol
3067
              == inst.reloc.exp.X_op_symbol))
3068
        break;
3069
    }
3070
 
3071
  /* Do we need to create a new entry?  */
3072
  if (entry == pool->next_free_entry)
3073
    {
3074
      if (entry >= MAX_LITERAL_POOL_SIZE)
3075
        {
3076
          inst.error = _("literal pool overflow");
3077
          return FAIL;
3078
        }
3079
 
3080
      pool->literals[entry] = inst.reloc.exp;
3081 160 khays
#ifdef OBJ_ELF
3082
      /* PR ld/12974: Record the location of the first source line to reference
3083
         this entry in the literal pool.  If it turns out during linking that the
3084
         symbol does not exist we will be able to give an accurate line number for
3085
         the (first use of the) missing reference.  */
3086
      if (debug_type == DEBUG_DWARF2)
3087
        dwarf2_where (pool->locs + entry);
3088
#endif
3089 16 khays
      pool->next_free_entry += 1;
3090
    }
3091
 
3092
  inst.reloc.exp.X_op         = O_symbol;
3093
  inst.reloc.exp.X_add_number = ((int) entry) * 4;
3094
  inst.reloc.exp.X_add_symbol = pool->symbol;
3095
 
3096
  return SUCCESS;
3097
}
3098
 
3099
/* Can't use symbol_new here, so have to create a symbol and then at
3100
   a later date assign it a value. Thats what these functions do.  */
3101
 
3102
static void
3103
symbol_locate (symbolS *    symbolP,
3104
               const char * name,       /* It is copied, the caller can modify.  */
3105
               segT         segment,    /* Segment identifier (SEG_<something>).  */
3106
               valueT       valu,       /* Symbol value.  */
3107
               fragS *      frag)       /* Associated fragment.  */
3108
{
3109
  unsigned int name_length;
3110
  char * preserved_copy_of_name;
3111
 
3112
  name_length = strlen (name) + 1;   /* +1 for \0.  */
3113
  obstack_grow (&notes, name, name_length);
3114
  preserved_copy_of_name = (char *) obstack_finish (&notes);
3115
 
3116
#ifdef tc_canonicalize_symbol_name
3117
  preserved_copy_of_name =
3118
    tc_canonicalize_symbol_name (preserved_copy_of_name);
3119
#endif
3120
 
3121
  S_SET_NAME (symbolP, preserved_copy_of_name);
3122
 
3123
  S_SET_SEGMENT (symbolP, segment);
3124
  S_SET_VALUE (symbolP, valu);
3125
  symbol_clear_list_pointers (symbolP);
3126
 
3127
  symbol_set_frag (symbolP, frag);
3128
 
3129
  /* Link to end of symbol chain.  */
3130
  {
3131
    extern int symbol_table_frozen;
3132
 
3133
    if (symbol_table_frozen)
3134
      abort ();
3135
  }
3136
 
3137
  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3138
 
3139
  obj_symbol_new_hook (symbolP);
3140
 
3141
#ifdef tc_symbol_new_hook
3142
  tc_symbol_new_hook (symbolP);
3143
#endif
3144
 
3145
#ifdef DEBUG_SYMS
3146
  verify_symbol_chain (symbol_rootP, symbol_lastP);
3147
#endif /* DEBUG_SYMS  */
3148
}
3149
 
3150
 
3151
static void
3152
s_ltorg (int ignored ATTRIBUTE_UNUSED)
3153
{
3154
  unsigned int entry;
3155
  literal_pool * pool;
3156
  char sym_name[20];
3157
 
3158
  pool = find_literal_pool ();
3159
  if (pool == NULL
3160
      || pool->symbol == NULL
3161
      || pool->next_free_entry == 0)
3162
    return;
3163
 
3164
  mapping_state (MAP_DATA);
3165
 
3166
  /* Align pool as you have word accesses.
3167
     Only make a frag if we have to.  */
3168
  if (!need_pass_2)
3169
    frag_align (2, 0, 0);
3170
 
3171
  record_alignment (now_seg, 2);
3172
 
3173
  sprintf (sym_name, "$$lit_\002%x", pool->id);
3174
 
3175
  symbol_locate (pool->symbol, sym_name, now_seg,
3176
                 (valueT) frag_now_fix (), frag_now);
3177
  symbol_table_insert (pool->symbol);
3178
 
3179
  ARM_SET_THUMB (pool->symbol, thumb_mode);
3180
 
3181
#if defined OBJ_COFF || defined OBJ_ELF
3182
  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3183
#endif
3184
 
3185
  for (entry = 0; entry < pool->next_free_entry; entry ++)
3186 160 khays
    {
3187
#ifdef OBJ_ELF
3188
      if (debug_type == DEBUG_DWARF2)
3189
        dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3190
#endif
3191
      /* First output the expression in the instruction to the pool.  */
3192
      emit_expr (&(pool->literals[entry]), 4); /* .word  */
3193
    }
3194 16 khays
 
3195
  /* Mark the pool as empty.  */
3196
  pool->next_free_entry = 0;
3197
  pool->symbol = NULL;
3198
}
3199
 
3200
#ifdef OBJ_ELF
3201
/* Forward declarations for functions below, in the MD interface
3202
   section.  */
3203
static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3204
static valueT create_unwind_entry (int);
3205
static void start_unwind_section (const segT, int);
3206
static void add_unwind_opcode (valueT, int);
3207
static void flush_pending_unwind (void);
3208
 
3209
/* Directives: Data.  */
3210
 
3211
static void
3212
s_arm_elf_cons (int nbytes)
3213
{
3214
  expressionS exp;
3215
 
3216
#ifdef md_flush_pending_output
3217
  md_flush_pending_output ();
3218
#endif
3219
 
3220
  if (is_it_end_of_statement ())
3221
    {
3222
      demand_empty_rest_of_line ();
3223
      return;
3224
    }
3225
 
3226
#ifdef md_cons_align
3227
  md_cons_align (nbytes);
3228
#endif
3229
 
3230
  mapping_state (MAP_DATA);
3231
  do
3232
    {
3233
      int reloc;
3234
      char *base = input_line_pointer;
3235
 
3236
      expression (& exp);
3237
 
3238
      if (exp.X_op != O_symbol)
3239
        emit_expr (&exp, (unsigned int) nbytes);
3240
      else
3241
        {
3242
          char *before_reloc = input_line_pointer;
3243
          reloc = parse_reloc (&input_line_pointer);
3244
          if (reloc == -1)
3245
            {
3246
              as_bad (_("unrecognized relocation suffix"));
3247
              ignore_rest_of_line ();
3248
              return;
3249
            }
3250
          else if (reloc == BFD_RELOC_UNUSED)
3251
            emit_expr (&exp, (unsigned int) nbytes);
3252
          else
3253
            {
3254
              reloc_howto_type *howto = (reloc_howto_type *)
3255
                  bfd_reloc_type_lookup (stdoutput,
3256
                                         (bfd_reloc_code_real_type) reloc);
3257
              int size = bfd_get_reloc_size (howto);
3258
 
3259
              if (reloc == BFD_RELOC_ARM_PLT32)
3260
                {
3261
                  as_bad (_("(plt) is only valid on branch targets"));
3262
                  reloc = BFD_RELOC_UNUSED;
3263
                  size = 0;
3264
                }
3265
 
3266
              if (size > nbytes)
3267
                as_bad (_("%s relocations do not fit in %d bytes"),
3268
                        howto->name, nbytes);
3269
              else
3270
                {
3271
                  /* We've parsed an expression stopping at O_symbol.
3272
                     But there may be more expression left now that we
3273
                     have parsed the relocation marker.  Parse it again.
3274
                     XXX Surely there is a cleaner way to do this.  */
3275
                  char *p = input_line_pointer;
3276
                  int offset;
3277
                  char *save_buf = (char *) alloca (input_line_pointer - base);
3278
                  memcpy (save_buf, base, input_line_pointer - base);
3279
                  memmove (base + (input_line_pointer - before_reloc),
3280
                           base, before_reloc - base);
3281
 
3282
                  input_line_pointer = base + (input_line_pointer-before_reloc);
3283
                  expression (&exp);
3284
                  memcpy (base, save_buf, p - base);
3285
 
3286
                  offset = nbytes - size;
3287
                  p = frag_more ((int) nbytes);
3288
                  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3289
                               size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3290
                }
3291
            }
3292
        }
3293
    }
3294
  while (*input_line_pointer++ == ',');
3295
 
3296
  /* Put terminator back into stream.  */
3297
  input_line_pointer --;
3298
  demand_empty_rest_of_line ();
3299
}
3300
 
3301
/* Emit an expression containing a 32-bit thumb instruction.
3302
   Implementation based on put_thumb32_insn.  */
3303
 
3304
static void
3305
emit_thumb32_expr (expressionS * exp)
3306
{
3307
  expressionS exp_high = *exp;
3308
 
3309
  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3310
  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3311
  exp->X_add_number &= 0xffff;
3312
  emit_expr (exp, (unsigned int) THUMB_SIZE);
3313
}
3314
 
3315
/*  Guess the instruction size based on the opcode.  */
3316
 
3317
static int
3318
thumb_insn_size (int opcode)
3319
{
3320
  if ((unsigned int) opcode < 0xe800u)
3321
    return 2;
3322
  else if ((unsigned int) opcode >= 0xe8000000u)
3323
    return 4;
3324
  else
3325
    return 0;
3326
}
3327
 
3328
static bfd_boolean
3329
emit_insn (expressionS *exp, int nbytes)
3330
{
3331
  int size = 0;
3332
 
3333
  if (exp->X_op == O_constant)
3334
    {
3335
      size = nbytes;
3336
 
3337
      if (size == 0)
3338
        size = thumb_insn_size (exp->X_add_number);
3339
 
3340
      if (size != 0)
3341
        {
3342
          if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3343
            {
3344
              as_bad (_(".inst.n operand too big. "\
3345
                        "Use .inst.w instead"));
3346
              size = 0;
3347
            }
3348
          else
3349
            {
3350
              if (now_it.state == AUTOMATIC_IT_BLOCK)
3351
                set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3352
              else
3353
                set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3354
 
3355
              if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3356
                emit_thumb32_expr (exp);
3357
              else
3358
                emit_expr (exp, (unsigned int) size);
3359
 
3360
              it_fsm_post_encode ();
3361
            }
3362
        }
3363
      else
3364
        as_bad (_("cannot determine Thumb instruction size. "   \
3365
                  "Use .inst.n/.inst.w instead"));
3366
    }
3367
  else
3368
    as_bad (_("constant expression required"));
3369
 
3370
  return (size != 0);
3371
}
3372
 
3373
/* Like s_arm_elf_cons but do not use md_cons_align and
3374
   set the mapping state to MAP_ARM/MAP_THUMB.  */
3375
 
3376
static void
3377
s_arm_elf_inst (int nbytes)
3378
{
3379
  if (is_it_end_of_statement ())
3380
    {
3381
      demand_empty_rest_of_line ();
3382
      return;
3383
    }
3384
 
3385
  /* Calling mapping_state () here will not change ARM/THUMB,
3386
     but will ensure not to be in DATA state.  */
3387
 
3388
  if (thumb_mode)
3389
    mapping_state (MAP_THUMB);
3390
  else
3391
    {
3392
      if (nbytes != 0)
3393
        {
3394
          as_bad (_("width suffixes are invalid in ARM mode"));
3395
          ignore_rest_of_line ();
3396
          return;
3397
        }
3398
 
3399
      nbytes = 4;
3400
 
3401
      mapping_state (MAP_ARM);
3402
    }
3403
 
3404
  do
3405
    {
3406
      expressionS exp;
3407
 
3408
      expression (& exp);
3409
 
3410
      if (! emit_insn (& exp, nbytes))
3411
        {
3412
          ignore_rest_of_line ();
3413
          return;
3414
        }
3415
    }
3416
  while (*input_line_pointer++ == ',');
3417
 
3418
  /* Put terminator back into stream.  */
3419
  input_line_pointer --;
3420
  demand_empty_rest_of_line ();
3421
}
3422
 
3423
/* Parse a .rel31 directive.  */
3424
 
3425
static void
3426
s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3427
{
3428
  expressionS exp;
3429
  char *p;
3430
  valueT highbit;
3431
 
3432
  highbit = 0;
3433
  if (*input_line_pointer == '1')
3434
    highbit = 0x80000000;
3435
  else if (*input_line_pointer != '0')
3436
    as_bad (_("expected 0 or 1"));
3437
 
3438
  input_line_pointer++;
3439
  if (*input_line_pointer != ',')
3440
    as_bad (_("missing comma"));
3441
  input_line_pointer++;
3442
 
3443
#ifdef md_flush_pending_output
3444
  md_flush_pending_output ();
3445
#endif
3446
 
3447
#ifdef md_cons_align
3448
  md_cons_align (4);
3449
#endif
3450
 
3451
  mapping_state (MAP_DATA);
3452
 
3453
  expression (&exp);
3454
 
3455
  p = frag_more (4);
3456
  md_number_to_chars (p, highbit, 4);
3457
  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3458
               BFD_RELOC_ARM_PREL31);
3459
 
3460
  demand_empty_rest_of_line ();
3461
}
3462
 
3463
/* Directives: AEABI stack-unwind tables.  */
3464
 
3465
/* Parse an unwind_fnstart directive.  Simply records the current location.  */
3466
 
3467
static void
3468
s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3469
{
3470
  demand_empty_rest_of_line ();
3471
  if (unwind.proc_start)
3472
    {
3473
      as_bad (_("duplicate .fnstart directive"));
3474
      return;
3475
    }
3476
 
3477
  /* Mark the start of the function.  */
3478
  unwind.proc_start = expr_build_dot ();
3479
 
3480
  /* Reset the rest of the unwind info.  */
3481
  unwind.opcode_count = 0;
3482
  unwind.table_entry = NULL;
3483
  unwind.personality_routine = NULL;
3484
  unwind.personality_index = -1;
3485
  unwind.frame_size = 0;
3486
  unwind.fp_offset = 0;
3487
  unwind.fp_reg = REG_SP;
3488
  unwind.fp_used = 0;
3489
  unwind.sp_restored = 0;
3490
}
3491
 
3492
 
3493
/* Parse a handlerdata directive.  Creates the exception handling table entry
3494
   for the function.  */
3495
 
3496
static void
3497
s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3498
{
3499
  demand_empty_rest_of_line ();
3500
  if (!unwind.proc_start)
3501
    as_bad (MISSING_FNSTART);
3502
 
3503
  if (unwind.table_entry)
3504
    as_bad (_("duplicate .handlerdata directive"));
3505
 
3506
  create_unwind_entry (1);
3507
}
3508
 
3509
/* Parse an unwind_fnend directive.  Generates the index table entry.  */
3510
 
3511
static void
3512
s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3513
{
3514
  long where;
3515
  char *ptr;
3516
  valueT val;
3517
  unsigned int marked_pr_dependency;
3518
 
3519
  demand_empty_rest_of_line ();
3520
 
3521
  if (!unwind.proc_start)
3522
    {
3523
      as_bad (_(".fnend directive without .fnstart"));
3524
      return;
3525
    }
3526
 
3527
  /* Add eh table entry.  */
3528
  if (unwind.table_entry == NULL)
3529
    val = create_unwind_entry (0);
3530
  else
3531
    val = 0;
3532
 
3533
  /* Add index table entry.  This is two words.  */
3534
  start_unwind_section (unwind.saved_seg, 1);
3535
  frag_align (2, 0, 0);
3536
  record_alignment (now_seg, 2);
3537
 
3538
  ptr = frag_more (8);
3539
  where = frag_now_fix () - 8;
3540
 
3541
  /* Self relative offset of the function start.  */
3542
  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3543
           BFD_RELOC_ARM_PREL31);
3544
 
3545
  /* Indicate dependency on EHABI-defined personality routines to the
3546
     linker, if it hasn't been done already.  */
3547
  marked_pr_dependency
3548
    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3549
  if (unwind.personality_index >= 0 && unwind.personality_index < 3
3550
      && !(marked_pr_dependency & (1 << unwind.personality_index)))
3551
    {
3552
      static const char *const name[] =
3553
        {
3554
          "__aeabi_unwind_cpp_pr0",
3555
          "__aeabi_unwind_cpp_pr1",
3556
          "__aeabi_unwind_cpp_pr2"
3557
        };
3558
      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3559
      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3560
      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3561
        |= 1 << unwind.personality_index;
3562
    }
3563
 
3564
  if (val)
3565
    /* Inline exception table entry.  */
3566
    md_number_to_chars (ptr + 4, val, 4);
3567
  else
3568
    /* Self relative offset of the table entry.  */
3569
    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3570
             BFD_RELOC_ARM_PREL31);
3571
 
3572
  /* Restore the original section.  */
3573
  subseg_set (unwind.saved_seg, unwind.saved_subseg);
3574
 
3575
  unwind.proc_start = NULL;
3576
}
3577
 
3578
 
3579
/* Parse an unwind_cantunwind directive.  */
3580
 
3581
static void
3582
s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3583
{
3584
  demand_empty_rest_of_line ();
3585
  if (!unwind.proc_start)
3586
    as_bad (MISSING_FNSTART);
3587
 
3588
  if (unwind.personality_routine || unwind.personality_index != -1)
3589
    as_bad (_("personality routine specified for cantunwind frame"));
3590
 
3591
  unwind.personality_index = -2;
3592
}
3593
 
3594
 
3595
/* Parse a personalityindex directive.  */
3596
 
3597
static void
3598
s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3599
{
3600
  expressionS exp;
3601
 
3602
  if (!unwind.proc_start)
3603
    as_bad (MISSING_FNSTART);
3604
 
3605
  if (unwind.personality_routine || unwind.personality_index != -1)
3606
    as_bad (_("duplicate .personalityindex directive"));
3607
 
3608
  expression (&exp);
3609
 
3610
  if (exp.X_op != O_constant
3611
      || exp.X_add_number < 0 || exp.X_add_number > 15)
3612
    {
3613
      as_bad (_("bad personality routine number"));
3614
      ignore_rest_of_line ();
3615
      return;
3616
    }
3617
 
3618
  unwind.personality_index = exp.X_add_number;
3619
 
3620
  demand_empty_rest_of_line ();
3621
}
3622
 
3623
 
3624
/* Parse a personality directive.  */
3625
 
3626
static void
3627
s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3628
{
3629
  char *name, *p, c;
3630
 
3631
  if (!unwind.proc_start)
3632
    as_bad (MISSING_FNSTART);
3633
 
3634
  if (unwind.personality_routine || unwind.personality_index != -1)
3635
    as_bad (_("duplicate .personality directive"));
3636
 
3637
  name = input_line_pointer;
3638
  c = get_symbol_end ();
3639
  p = input_line_pointer;
3640
  unwind.personality_routine = symbol_find_or_make (name);
3641
  *p = c;
3642
  demand_empty_rest_of_line ();
3643
}
3644
 
3645
 
3646
/* Parse a directive saving core registers.  */
3647
 
3648
static void
3649
s_arm_unwind_save_core (void)
3650
{
3651
  valueT op;
3652
  long range;
3653
  int n;
3654
 
3655
  range = parse_reg_list (&input_line_pointer);
3656
  if (range == FAIL)
3657
    {
3658
      as_bad (_("expected register list"));
3659
      ignore_rest_of_line ();
3660
      return;
3661
    }
3662
 
3663
  demand_empty_rest_of_line ();
3664
 
3665
  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3666
     into .unwind_save {..., sp...}.  We aren't bothered about the value of
3667
     ip because it is clobbered by calls.  */
3668
  if (unwind.sp_restored && unwind.fp_reg == 12
3669
      && (range & 0x3000) == 0x1000)
3670
    {
3671
      unwind.opcode_count--;
3672
      unwind.sp_restored = 0;
3673
      range = (range | 0x2000) & ~0x1000;
3674
      unwind.pending_offset = 0;
3675
    }
3676
 
3677
  /* Pop r4-r15.  */
3678
  if (range & 0xfff0)
3679
    {
3680
      /* See if we can use the short opcodes.  These pop a block of up to 8
3681
         registers starting with r4, plus maybe r14.  */
3682
      for (n = 0; n < 8; n++)
3683
        {
3684
          /* Break at the first non-saved register.      */
3685
          if ((range & (1 << (n + 4))) == 0)
3686
            break;
3687
        }
3688
      /* See if there are any other bits set.  */
3689
      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3690
        {
3691
          /* Use the long form.  */
3692
          op = 0x8000 | ((range >> 4) & 0xfff);
3693
          add_unwind_opcode (op, 2);
3694
        }
3695
      else
3696
        {
3697
          /* Use the short form.  */
3698
          if (range & 0x4000)
3699
            op = 0xa8; /* Pop r14.      */
3700
          else
3701
            op = 0xa0; /* Do not pop r14.  */
3702
          op |= (n - 1);
3703
          add_unwind_opcode (op, 1);
3704
        }
3705
    }
3706
 
3707
  /* Pop r0-r3.  */
3708
  if (range & 0xf)
3709
    {
3710
      op = 0xb100 | (range & 0xf);
3711
      add_unwind_opcode (op, 2);
3712
    }
3713
 
3714
  /* Record the number of bytes pushed.  */
3715
  for (n = 0; n < 16; n++)
3716
    {
3717
      if (range & (1 << n))
3718
        unwind.frame_size += 4;
3719
    }
3720
}
3721
 
3722
 
3723
/* Parse a directive saving FPA registers.  */
3724
 
3725
static void
3726
s_arm_unwind_save_fpa (int reg)
3727
{
3728
  expressionS exp;
3729
  int num_regs;
3730
  valueT op;
3731
 
3732
  /* Get Number of registers to transfer.  */
3733
  if (skip_past_comma (&input_line_pointer) != FAIL)
3734
    expression (&exp);
3735
  else
3736
    exp.X_op = O_illegal;
3737
 
3738
  if (exp.X_op != O_constant)
3739
    {
3740
      as_bad (_("expected , <constant>"));
3741
      ignore_rest_of_line ();
3742
      return;
3743
    }
3744
 
3745
  num_regs = exp.X_add_number;
3746
 
3747
  if (num_regs < 1 || num_regs > 4)
3748
    {
3749
      as_bad (_("number of registers must be in the range [1:4]"));
3750
      ignore_rest_of_line ();
3751
      return;
3752
    }
3753
 
3754
  demand_empty_rest_of_line ();
3755
 
3756
  if (reg == 4)
3757
    {
3758
      /* Short form.  */
3759
      op = 0xb4 | (num_regs - 1);
3760
      add_unwind_opcode (op, 1);
3761
    }
3762
  else
3763
    {
3764
      /* Long form.  */
3765
      op = 0xc800 | (reg << 4) | (num_regs - 1);
3766
      add_unwind_opcode (op, 2);
3767
    }
3768
  unwind.frame_size += num_regs * 12;
3769
}
3770
 
3771
 
3772
/* Parse a directive saving VFP registers for ARMv6 and above.  */
3773
 
3774
static void
3775
s_arm_unwind_save_vfp_armv6 (void)
3776
{
3777
  int count;
3778
  unsigned int start;
3779
  valueT op;
3780
  int num_vfpv3_regs = 0;
3781
  int num_regs_below_16;
3782
 
3783
  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3784
  if (count == FAIL)
3785
    {
3786
      as_bad (_("expected register list"));
3787
      ignore_rest_of_line ();
3788
      return;
3789
    }
3790
 
3791
  demand_empty_rest_of_line ();
3792
 
3793
  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3794
     than FSTMX/FLDMX-style ones).  */
3795
 
3796
  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
3797
  if (start >= 16)
3798
    num_vfpv3_regs = count;
3799
  else if (start + count > 16)
3800
    num_vfpv3_regs = start + count - 16;
3801
 
3802
  if (num_vfpv3_regs > 0)
3803
    {
3804
      int start_offset = start > 16 ? start - 16 : 0;
3805
      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3806
      add_unwind_opcode (op, 2);
3807
    }
3808
 
3809
  /* Generate opcode for registers numbered in the range 0 .. 15.  */
3810
  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3811
  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3812
  if (num_regs_below_16 > 0)
3813
    {
3814
      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3815
      add_unwind_opcode (op, 2);
3816
    }
3817
 
3818
  unwind.frame_size += count * 8;
3819
}
3820
 
3821
 
3822
/* Parse a directive saving VFP registers for pre-ARMv6.  */
3823
 
3824
static void
3825
s_arm_unwind_save_vfp (void)
3826
{
3827
  int count;
3828
  unsigned int reg;
3829
  valueT op;
3830
 
3831
  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3832
  if (count == FAIL)
3833
    {
3834
      as_bad (_("expected register list"));
3835
      ignore_rest_of_line ();
3836
      return;
3837
    }
3838
 
3839
  demand_empty_rest_of_line ();
3840
 
3841
  if (reg == 8)
3842
    {
3843
      /* Short form.  */
3844
      op = 0xb8 | (count - 1);
3845
      add_unwind_opcode (op, 1);
3846
    }
3847
  else
3848
    {
3849
      /* Long form.  */
3850
      op = 0xb300 | (reg << 4) | (count - 1);
3851
      add_unwind_opcode (op, 2);
3852
    }
3853
  unwind.frame_size += count * 8 + 4;
3854
}
3855
 
3856
 
3857
/* Parse a directive saving iWMMXt data registers.  */
3858
 
3859
static void
3860
s_arm_unwind_save_mmxwr (void)
3861
{
3862
  int reg;
3863
  int hi_reg;
3864
  int i;
3865
  unsigned mask = 0;
3866
  valueT op;
3867
 
3868
  if (*input_line_pointer == '{')
3869
    input_line_pointer++;
3870
 
3871
  do
3872
    {
3873
      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3874
 
3875
      if (reg == FAIL)
3876
        {
3877
          as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3878
          goto error;
3879
        }
3880
 
3881
      if (mask >> reg)
3882
        as_tsktsk (_("register list not in ascending order"));
3883
      mask |= 1 << reg;
3884
 
3885
      if (*input_line_pointer == '-')
3886
        {
3887
          input_line_pointer++;
3888
          hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3889
          if (hi_reg == FAIL)
3890
            {
3891
              as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3892
              goto error;
3893
            }
3894
          else if (reg >= hi_reg)
3895
            {
3896
              as_bad (_("bad register range"));
3897
              goto error;
3898
            }
3899
          for (; reg < hi_reg; reg++)
3900
            mask |= 1 << reg;
3901
        }
3902
    }
3903
  while (skip_past_comma (&input_line_pointer) != FAIL);
3904
 
3905
  if (*input_line_pointer == '}')
3906
    input_line_pointer++;
3907
 
3908
  demand_empty_rest_of_line ();
3909
 
3910
  /* Generate any deferred opcodes because we're going to be looking at
3911
     the list.  */
3912
  flush_pending_unwind ();
3913
 
3914
  for (i = 0; i < 16; i++)
3915
    {
3916
      if (mask & (1 << i))
3917
        unwind.frame_size += 8;
3918
    }
3919
 
3920
  /* Attempt to combine with a previous opcode.  We do this because gcc
3921
     likes to output separate unwind directives for a single block of
3922
     registers.  */
3923
  if (unwind.opcode_count > 0)
3924
    {
3925
      i = unwind.opcodes[unwind.opcode_count - 1];
3926
      if ((i & 0xf8) == 0xc0)
3927
        {
3928
          i &= 7;
3929
          /* Only merge if the blocks are contiguous.  */
3930
          if (i < 6)
3931
            {
3932
              if ((mask & 0xfe00) == (1 << 9))
3933
                {
3934
                  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3935
                  unwind.opcode_count--;
3936
                }
3937
            }
3938
          else if (i == 6 && unwind.opcode_count >= 2)
3939
            {
3940
              i = unwind.opcodes[unwind.opcode_count - 2];
3941
              reg = i >> 4;
3942
              i &= 0xf;
3943
 
3944
              op = 0xffff << (reg - 1);
3945
              if (reg > 0
3946
                  && ((mask & op) == (1u << (reg - 1))))
3947
                {
3948
                  op = (1 << (reg + i + 1)) - 1;
3949
                  op &= ~((1 << reg) - 1);
3950
                  mask |= op;
3951
                  unwind.opcode_count -= 2;
3952
                }
3953
            }
3954
        }
3955
    }
3956
 
3957
  hi_reg = 15;
3958
  /* We want to generate opcodes in the order the registers have been
3959
     saved, ie. descending order.  */
3960
  for (reg = 15; reg >= -1; reg--)
3961
    {
3962
      /* Save registers in blocks.  */
3963
      if (reg < 0
3964
          || !(mask & (1 << reg)))
3965
        {
3966
          /* We found an unsaved reg.  Generate opcodes to save the
3967
             preceding block.   */
3968
          if (reg != hi_reg)
3969
            {
3970
              if (reg == 9)
3971
                {
3972
                  /* Short form.  */
3973
                  op = 0xc0 | (hi_reg - 10);
3974
                  add_unwind_opcode (op, 1);
3975
                }
3976
              else
3977
                {
3978
                  /* Long form.  */
3979
                  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3980
                  add_unwind_opcode (op, 2);
3981
                }
3982
            }
3983
          hi_reg = reg - 1;
3984
        }
3985
    }
3986
 
3987
  return;
3988
error:
3989
  ignore_rest_of_line ();
3990
}
3991
 
3992
static void
3993
s_arm_unwind_save_mmxwcg (void)
3994
{
3995
  int reg;
3996
  int hi_reg;
3997
  unsigned mask = 0;
3998
  valueT op;
3999
 
4000
  if (*input_line_pointer == '{')
4001
    input_line_pointer++;
4002
 
4003
  do
4004
    {
4005
      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4006
 
4007
      if (reg == FAIL)
4008
        {
4009
          as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4010
          goto error;
4011
        }
4012
 
4013
      reg -= 8;
4014
      if (mask >> reg)
4015
        as_tsktsk (_("register list not in ascending order"));
4016
      mask |= 1 << reg;
4017
 
4018
      if (*input_line_pointer == '-')
4019
        {
4020
          input_line_pointer++;
4021
          hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4022
          if (hi_reg == FAIL)
4023
            {
4024
              as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4025
              goto error;
4026
            }
4027
          else if (reg >= hi_reg)
4028
            {
4029
              as_bad (_("bad register range"));
4030
              goto error;
4031
            }
4032
          for (; reg < hi_reg; reg++)
4033
            mask |= 1 << reg;
4034
        }
4035
    }
4036
  while (skip_past_comma (&input_line_pointer) != FAIL);
4037
 
4038
  if (*input_line_pointer == '}')
4039
    input_line_pointer++;
4040
 
4041
  demand_empty_rest_of_line ();
4042
 
4043
  /* Generate any deferred opcodes because we're going to be looking at
4044
     the list.  */
4045
  flush_pending_unwind ();
4046
 
4047
  for (reg = 0; reg < 16; reg++)
4048
    {
4049
      if (mask & (1 << reg))
4050
        unwind.frame_size += 4;
4051
    }
4052
  op = 0xc700 | mask;
4053
  add_unwind_opcode (op, 2);
4054
  return;
4055
error:
4056
  ignore_rest_of_line ();
4057
}
4058
 
4059
 
4060
/* Parse an unwind_save directive.
4061
   If the argument is non-zero, this is a .vsave directive.  */
4062
 
4063
static void
4064
s_arm_unwind_save (int arch_v6)
4065
{
4066
  char *peek;
4067
  struct reg_entry *reg;
4068
  bfd_boolean had_brace = FALSE;
4069
 
4070
  if (!unwind.proc_start)
4071
    as_bad (MISSING_FNSTART);
4072
 
4073
  /* Figure out what sort of save we have.  */
4074
  peek = input_line_pointer;
4075
 
4076
  if (*peek == '{')
4077
    {
4078
      had_brace = TRUE;
4079
      peek++;
4080
    }
4081
 
4082
  reg = arm_reg_parse_multi (&peek);
4083
 
4084
  if (!reg)
4085
    {
4086
      as_bad (_("register expected"));
4087
      ignore_rest_of_line ();
4088
      return;
4089
    }
4090
 
4091
  switch (reg->type)
4092
    {
4093
    case REG_TYPE_FN:
4094
      if (had_brace)
4095
        {
4096
          as_bad (_("FPA .unwind_save does not take a register list"));
4097
          ignore_rest_of_line ();
4098
          return;
4099
        }
4100
      input_line_pointer = peek;
4101
      s_arm_unwind_save_fpa (reg->number);
4102
      return;
4103
 
4104
    case REG_TYPE_RN:     s_arm_unwind_save_core ();   return;
4105
    case REG_TYPE_VFD:
4106
      if (arch_v6)
4107
        s_arm_unwind_save_vfp_armv6 ();
4108
      else
4109
        s_arm_unwind_save_vfp ();
4110
      return;
4111
    case REG_TYPE_MMXWR:  s_arm_unwind_save_mmxwr ();  return;
4112
    case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4113
 
4114
    default:
4115
      as_bad (_(".unwind_save does not support this kind of register"));
4116
      ignore_rest_of_line ();
4117
    }
4118
}
4119
 
4120
 
4121
/* Parse an unwind_movsp directive.  */
4122
 
4123
static void
4124
s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4125
{
4126
  int reg;
4127
  valueT op;
4128
  int offset;
4129
 
4130
  if (!unwind.proc_start)
4131
    as_bad (MISSING_FNSTART);
4132
 
4133
  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4134
  if (reg == FAIL)
4135
    {
4136
      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4137
      ignore_rest_of_line ();
4138
      return;
4139
    }
4140
 
4141
  /* Optional constant.  */
4142
  if (skip_past_comma (&input_line_pointer) != FAIL)
4143
    {
4144
      if (immediate_for_directive (&offset) == FAIL)
4145
        return;
4146
    }
4147
  else
4148
    offset = 0;
4149
 
4150
  demand_empty_rest_of_line ();
4151
 
4152
  if (reg == REG_SP || reg == REG_PC)
4153
    {
4154
      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4155
      return;
4156
    }
4157
 
4158
  if (unwind.fp_reg != REG_SP)
4159
    as_bad (_("unexpected .unwind_movsp directive"));
4160
 
4161
  /* Generate opcode to restore the value.  */
4162
  op = 0x90 | reg;
4163
  add_unwind_opcode (op, 1);
4164
 
4165
  /* Record the information for later.  */
4166
  unwind.fp_reg = reg;
4167
  unwind.fp_offset = unwind.frame_size - offset;
4168
  unwind.sp_restored = 1;
4169
}
4170
 
4171
/* Parse an unwind_pad directive.  */
4172
 
4173
static void
4174
s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4175
{
4176
  int offset;
4177
 
4178
  if (!unwind.proc_start)
4179
    as_bad (MISSING_FNSTART);
4180
 
4181
  if (immediate_for_directive (&offset) == FAIL)
4182
    return;
4183
 
4184
  if (offset & 3)
4185
    {
4186
      as_bad (_("stack increment must be multiple of 4"));
4187
      ignore_rest_of_line ();
4188
      return;
4189
    }
4190
 
4191
  /* Don't generate any opcodes, just record the details for later.  */
4192
  unwind.frame_size += offset;
4193
  unwind.pending_offset += offset;
4194
 
4195
  demand_empty_rest_of_line ();
4196
}
4197
 
4198
/* Parse an unwind_setfp directive.  */
4199
 
4200
static void
4201
s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4202
{
4203
  int sp_reg;
4204
  int fp_reg;
4205
  int offset;
4206
 
4207
  if (!unwind.proc_start)
4208
    as_bad (MISSING_FNSTART);
4209
 
4210
  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4211
  if (skip_past_comma (&input_line_pointer) == FAIL)
4212
    sp_reg = FAIL;
4213
  else
4214
    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4215
 
4216
  if (fp_reg == FAIL || sp_reg == FAIL)
4217
    {
4218
      as_bad (_("expected <reg>, <reg>"));
4219
      ignore_rest_of_line ();
4220
      return;
4221
    }
4222
 
4223
  /* Optional constant.  */
4224
  if (skip_past_comma (&input_line_pointer) != FAIL)
4225
    {
4226
      if (immediate_for_directive (&offset) == FAIL)
4227
        return;
4228
    }
4229
  else
4230
    offset = 0;
4231
 
4232
  demand_empty_rest_of_line ();
4233
 
4234
  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4235
    {
4236
      as_bad (_("register must be either sp or set by a previous"
4237
                "unwind_movsp directive"));
4238
      return;
4239
    }
4240
 
4241
  /* Don't generate any opcodes, just record the information for later.  */
4242
  unwind.fp_reg = fp_reg;
4243
  unwind.fp_used = 1;
4244
  if (sp_reg == REG_SP)
4245
    unwind.fp_offset = unwind.frame_size - offset;
4246
  else
4247
    unwind.fp_offset -= offset;
4248
}
4249
 
4250
/* Parse an unwind_raw directive.  */
4251
 
4252
static void
4253
s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4254
{
4255
  expressionS exp;
4256
  /* This is an arbitrary limit.         */
4257
  unsigned char op[16];
4258
  int count;
4259
 
4260
  if (!unwind.proc_start)
4261
    as_bad (MISSING_FNSTART);
4262
 
4263
  expression (&exp);
4264
  if (exp.X_op == O_constant
4265
      && skip_past_comma (&input_line_pointer) != FAIL)
4266
    {
4267
      unwind.frame_size += exp.X_add_number;
4268
      expression (&exp);
4269
    }
4270
  else
4271
    exp.X_op = O_illegal;
4272
 
4273
  if (exp.X_op != O_constant)
4274
    {
4275
      as_bad (_("expected <offset>, <opcode>"));
4276
      ignore_rest_of_line ();
4277
      return;
4278
    }
4279
 
4280
  count = 0;
4281
 
4282
  /* Parse the opcode.  */
4283
  for (;;)
4284
    {
4285
      if (count >= 16)
4286
        {
4287
          as_bad (_("unwind opcode too long"));
4288
          ignore_rest_of_line ();
4289
        }
4290
      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4291
        {
4292
          as_bad (_("invalid unwind opcode"));
4293
          ignore_rest_of_line ();
4294
          return;
4295
        }
4296
      op[count++] = exp.X_add_number;
4297
 
4298
      /* Parse the next byte.  */
4299
      if (skip_past_comma (&input_line_pointer) == FAIL)
4300
        break;
4301
 
4302
      expression (&exp);
4303
    }
4304
 
4305
  /* Add the opcode bytes in reverse order.  */
4306
  while (count--)
4307
    add_unwind_opcode (op[count], 1);
4308
 
4309
  demand_empty_rest_of_line ();
4310
}
4311
 
4312
 
4313
/* Parse a .eabi_attribute directive.  */
4314
 
4315
static void
4316
s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4317
{
4318
  int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4319
 
4320
  if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4321
    attributes_set_explicitly[tag] = 1;
4322
}
4323
 
4324
/* Emit a tls fix for the symbol.  */
4325
 
4326
static void
4327
s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4328
{
4329
  char *p;
4330
  expressionS exp;
4331
#ifdef md_flush_pending_output
4332
  md_flush_pending_output ();
4333
#endif
4334
 
4335
#ifdef md_cons_align
4336
  md_cons_align (4);
4337
#endif
4338
 
4339
  /* Since we're just labelling the code, there's no need to define a
4340
     mapping symbol.  */
4341
  expression (&exp);
4342
  p = obstack_next_free (&frchain_now->frch_obstack);
4343
  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4344
               thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4345
               : BFD_RELOC_ARM_TLS_DESCSEQ);
4346
}
4347
#endif /* OBJ_ELF */
4348
 
4349
static void s_arm_arch (int);
4350
static void s_arm_object_arch (int);
4351
static void s_arm_cpu (int);
4352
static void s_arm_fpu (int);
4353
static void s_arm_arch_extension (int);
4354
 
4355
#ifdef TE_PE
4356
 
4357
static void
4358
pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4359
{
4360
  expressionS exp;
4361
 
4362
  do
4363
    {
4364
      expression (&exp);
4365
      if (exp.X_op == O_symbol)
4366
        exp.X_op = O_secrel;
4367
 
4368
      emit_expr (&exp, 4);
4369
    }
4370
  while (*input_line_pointer++ == ',');
4371
 
4372
  input_line_pointer--;
4373
  demand_empty_rest_of_line ();
4374
}
4375
#endif /* TE_PE */
4376
 
4377
/* This table describes all the machine specific pseudo-ops the assembler
4378
   has to support.  The fields are:
4379
     pseudo-op name without dot
4380
     function to call to execute this pseudo-op
4381
     Integer arg to pass to the function.  */
4382
 
4383
const pseudo_typeS md_pseudo_table[] =
4384
{
4385
  /* Never called because '.req' does not start a line.  */
4386
  { "req",         s_req,         0 },
4387
  /* Following two are likewise never called.  */
4388
  { "dn",          s_dn,          0 },
4389
  { "qn",          s_qn,          0 },
4390
  { "unreq",       s_unreq,       0 },
4391
  { "bss",         s_bss,         0 },
4392
  { "align",       s_align,       0 },
4393
  { "arm",         s_arm,         0 },
4394
  { "thumb",       s_thumb,       0 },
4395
  { "code",        s_code,        0 },
4396
  { "force_thumb", s_force_thumb, 0 },
4397
  { "thumb_func",  s_thumb_func,  0 },
4398
  { "thumb_set",   s_thumb_set,   0 },
4399
  { "even",        s_even,        0 },
4400
  { "ltorg",       s_ltorg,       0 },
4401
  { "pool",        s_ltorg,       0 },
4402
  { "syntax",      s_syntax,      0 },
4403
  { "cpu",         s_arm_cpu,     0 },
4404
  { "arch",        s_arm_arch,    0 },
4405
  { "object_arch", s_arm_object_arch,   0 },
4406
  { "fpu",         s_arm_fpu,     0 },
4407
  { "arch_extension", s_arm_arch_extension, 0 },
4408
#ifdef OBJ_ELF
4409
  { "word",             s_arm_elf_cons, 4 },
4410
  { "long",             s_arm_elf_cons, 4 },
4411
  { "inst.n",           s_arm_elf_inst, 2 },
4412
  { "inst.w",           s_arm_elf_inst, 4 },
4413
  { "inst",             s_arm_elf_inst, 0 },
4414
  { "rel31",            s_arm_rel31,      0 },
4415
  { "fnstart",          s_arm_unwind_fnstart,   0 },
4416
  { "fnend",            s_arm_unwind_fnend,     0 },
4417
  { "cantunwind",       s_arm_unwind_cantunwind, 0 },
4418
  { "personality",      s_arm_unwind_personality, 0 },
4419
  { "personalityindex", s_arm_unwind_personalityindex, 0 },
4420
  { "handlerdata",      s_arm_unwind_handlerdata, 0 },
4421
  { "save",             s_arm_unwind_save,      0 },
4422
  { "vsave",            s_arm_unwind_save,      1 },
4423
  { "movsp",            s_arm_unwind_movsp,     0 },
4424
  { "pad",              s_arm_unwind_pad,       0 },
4425
  { "setfp",            s_arm_unwind_setfp,     0 },
4426
  { "unwind_raw",       s_arm_unwind_raw,       0 },
4427
  { "eabi_attribute",   s_arm_eabi_attribute,   0 },
4428
  { "tlsdescseq",       s_arm_tls_descseq,      0 },
4429
#else
4430
  { "word",        cons, 4},
4431
 
4432
  /* These are used for dwarf.  */
4433
  {"2byte", cons, 2},
4434
  {"4byte", cons, 4},
4435
  {"8byte", cons, 8},
4436
  /* These are used for dwarf2.  */
4437
  { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4438
  { "loc",  dwarf2_directive_loc,  0 },
4439
  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4440
#endif
4441
  { "extend",      float_cons, 'x' },
4442
  { "ldouble",     float_cons, 'x' },
4443
  { "packed",      float_cons, 'p' },
4444
#ifdef TE_PE
4445
  {"secrel32", pe_directive_secrel, 0},
4446
#endif
4447
  { 0, 0, 0 }
4448
};
4449
 
4450
/* Parser functions used exclusively in instruction operands.  */
4451
 
4452
/* Generic immediate-value read function for use in insn parsing.
4453
   STR points to the beginning of the immediate (the leading #);
4454
   VAL receives the value; if the value is outside [MIN, MAX]
4455
   issue an error.  PREFIX_OPT is true if the immediate prefix is
4456
   optional.  */
4457
 
4458
static int
4459
parse_immediate (char **str, int *val, int min, int max,
4460
                 bfd_boolean prefix_opt)
4461
{
4462
  expressionS exp;
4463
  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4464
  if (exp.X_op != O_constant)
4465
    {
4466
      inst.error = _("constant expression required");
4467
      return FAIL;
4468
    }
4469
 
4470
  if (exp.X_add_number < min || exp.X_add_number > max)
4471
    {
4472
      inst.error = _("immediate value out of range");
4473
      return FAIL;
4474
    }
4475
 
4476
  *val = exp.X_add_number;
4477
  return SUCCESS;
4478
}
4479
 
4480
/* Less-generic immediate-value read function with the possibility of loading a
4481
   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4482
   instructions. Puts the result directly in inst.operands[i].  */
4483
 
4484
static int
4485
parse_big_immediate (char **str, int i)
4486
{
4487
  expressionS exp;
4488
  char *ptr = *str;
4489
 
4490
  my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4491
 
4492
  if (exp.X_op == O_constant)
4493
    {
4494
      inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4495
      /* If we're on a 64-bit host, then a 64-bit number can be returned using
4496
         O_constant.  We have to be careful not to break compilation for
4497
         32-bit X_add_number, though.  */
4498
      if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4499
        {
4500
          /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4.  */
4501
          inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4502
          inst.operands[i].regisimm = 1;
4503
        }
4504
    }
4505
  else if (exp.X_op == O_big
4506
           && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4507
    {
4508
      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4509
 
4510
      /* Bignums have their least significant bits in
4511
         generic_bignum[0]. Make sure we put 32 bits in imm and
4512
         32 bits in reg,  in a (hopefully) portable way.  */
4513
      gas_assert (parts != 0);
4514
 
4515
      /* Make sure that the number is not too big.
4516
         PR 11972: Bignums can now be sign-extended to the
4517
         size of a .octa so check that the out of range bits
4518
         are all zero or all one.  */
4519
      if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4520
        {
4521
          LITTLENUM_TYPE m = -1;
4522
 
4523
          if (generic_bignum[parts * 2] != 0
4524
              && generic_bignum[parts * 2] != m)
4525
            return FAIL;
4526
 
4527
          for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4528
            if (generic_bignum[j] != generic_bignum[j-1])
4529
              return FAIL;
4530
        }
4531
 
4532
      inst.operands[i].imm = 0;
4533
      for (j = 0; j < parts; j++, idx++)
4534
        inst.operands[i].imm |= generic_bignum[idx]
4535
                                << (LITTLENUM_NUMBER_OF_BITS * j);
4536
      inst.operands[i].reg = 0;
4537
      for (j = 0; j < parts; j++, idx++)
4538
        inst.operands[i].reg |= generic_bignum[idx]
4539
                                << (LITTLENUM_NUMBER_OF_BITS * j);
4540
      inst.operands[i].regisimm = 1;
4541
    }
4542
  else
4543
    return FAIL;
4544
 
4545
  *str = ptr;
4546
 
4547
  return SUCCESS;
4548
}
4549
 
4550
/* Returns the pseudo-register number of an FPA immediate constant,
4551
   or FAIL if there isn't a valid constant here.  */
4552
 
4553
static int
4554
parse_fpa_immediate (char ** str)
4555
{
4556
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4557
  char *         save_in;
4558
  expressionS    exp;
4559
  int            i;
4560
  int            j;
4561
 
4562
  /* First try and match exact strings, this is to guarantee
4563
     that some formats will work even for cross assembly.  */
4564
 
4565
  for (i = 0; fp_const[i]; i++)
4566
    {
4567
      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4568
        {
4569
          char *start = *str;
4570
 
4571
          *str += strlen (fp_const[i]);
4572
          if (is_end_of_line[(unsigned char) **str])
4573
            return i + 8;
4574
          *str = start;
4575
        }
4576
    }
4577
 
4578
  /* Just because we didn't get a match doesn't mean that the constant
4579
     isn't valid, just that it is in a format that we don't
4580
     automatically recognize.  Try parsing it with the standard
4581
     expression routines.  */
4582
 
4583
  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4584
 
4585
  /* Look for a raw floating point number.  */
4586
  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4587
      && is_end_of_line[(unsigned char) *save_in])
4588
    {
4589
      for (i = 0; i < NUM_FLOAT_VALS; i++)
4590
        {
4591
          for (j = 0; j < MAX_LITTLENUMS; j++)
4592
            {
4593
              if (words[j] != fp_values[i][j])
4594
                break;
4595
            }
4596
 
4597
          if (j == MAX_LITTLENUMS)
4598
            {
4599
              *str = save_in;
4600
              return i + 8;
4601
            }
4602
        }
4603
    }
4604
 
4605
  /* Try and parse a more complex expression, this will probably fail
4606
     unless the code uses a floating point prefix (eg "0f").  */
4607
  save_in = input_line_pointer;
4608
  input_line_pointer = *str;
4609
  if (expression (&exp) == absolute_section
4610
      && exp.X_op == O_big
4611
      && exp.X_add_number < 0)
4612
    {
4613
      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4614
         Ditto for 15.  */
4615
      if (gen_to_words (words, 5, (long) 15) == 0)
4616
        {
4617
          for (i = 0; i < NUM_FLOAT_VALS; i++)
4618
            {
4619
              for (j = 0; j < MAX_LITTLENUMS; j++)
4620
                {
4621
                  if (words[j] != fp_values[i][j])
4622
                    break;
4623
                }
4624
 
4625
              if (j == MAX_LITTLENUMS)
4626
                {
4627
                  *str = input_line_pointer;
4628
                  input_line_pointer = save_in;
4629
                  return i + 8;
4630
                }
4631
            }
4632
        }
4633
    }
4634
 
4635
  *str = input_line_pointer;
4636
  input_line_pointer = save_in;
4637
  inst.error = _("invalid FPA immediate expression");
4638
  return FAIL;
4639
}
4640
 
4641
/* Returns 1 if a number has "quarter-precision" float format
4642
   0baBbbbbbc defgh000 00000000 00000000.  */
4643
 
4644
static int
4645
is_quarter_float (unsigned imm)
4646
{
4647
  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4648
  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4649
}
4650
 
4651
/* Parse an 8-bit "quarter-precision" floating point number of the form:
4652
   0baBbbbbbc defgh000 00000000 00000000.
4653
   The zero and minus-zero cases need special handling, since they can't be
4654
   encoded in the "quarter-precision" float format, but can nonetheless be
4655
   loaded as integer constants.  */
4656
 
4657
static unsigned
4658
parse_qfloat_immediate (char **ccp, int *immed)
4659
{
4660
  char *str = *ccp;
4661
  char *fpnum;
4662
  LITTLENUM_TYPE words[MAX_LITTLENUMS];
4663
  int found_fpchar = 0;
4664
 
4665
  skip_past_char (&str, '#');
4666
 
4667
  /* We must not accidentally parse an integer as a floating-point number. Make
4668
     sure that the value we parse is not an integer by checking for special
4669
     characters '.' or 'e'.
4670
     FIXME: This is a horrible hack, but doing better is tricky because type
4671
     information isn't in a very usable state at parse time.  */
4672
  fpnum = str;
4673
  skip_whitespace (fpnum);
4674
 
4675
  if (strncmp (fpnum, "0x", 2) == 0)
4676
    return FAIL;
4677
  else
4678
    {
4679
      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4680
        if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4681
          {
4682
            found_fpchar = 1;
4683
            break;
4684
          }
4685
 
4686
      if (!found_fpchar)
4687
        return FAIL;
4688
    }
4689
 
4690
  if ((str = atof_ieee (str, 's', words)) != NULL)
4691
    {
4692
      unsigned fpword = 0;
4693
      int i;
4694
 
4695
      /* Our FP word must be 32 bits (single-precision FP).  */
4696
      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4697
        {
4698
          fpword <<= LITTLENUM_NUMBER_OF_BITS;
4699
          fpword |= words[i];
4700
        }
4701
 
4702
      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4703
        *immed = fpword;
4704
      else
4705
        return FAIL;
4706
 
4707
      *ccp = str;
4708
 
4709
      return SUCCESS;
4710
    }
4711
 
4712
  return FAIL;
4713
}
4714
 
4715
/* Shift operands.  */
4716
enum shift_kind
4717
{
4718
  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4719
};
4720
 
4721
struct asm_shift_name
4722
{
4723
  const char      *name;
4724
  enum shift_kind  kind;
4725
};
4726
 
4727
/* Third argument to parse_shift.  */
4728
enum parse_shift_mode
4729
{
4730
  NO_SHIFT_RESTRICT,            /* Any kind of shift is accepted.  */
4731
  SHIFT_IMMEDIATE,              /* Shift operand must be an immediate.  */
4732
  SHIFT_LSL_OR_ASR_IMMEDIATE,   /* Shift must be LSL or ASR immediate.  */
4733
  SHIFT_ASR_IMMEDIATE,          /* Shift must be ASR immediate.  */
4734
  SHIFT_LSL_IMMEDIATE,          /* Shift must be LSL immediate.  */
4735
};
4736
 
4737
/* Parse a <shift> specifier on an ARM data processing instruction.
4738
   This has three forms:
4739
 
4740
     (LSL|LSR|ASL|ASR|ROR) Rs
4741
     (LSL|LSR|ASL|ASR|ROR) #imm
4742
     RRX
4743
 
4744
   Note that ASL is assimilated to LSL in the instruction encoding, and
4745
   RRX to ROR #0 (which cannot be written as such).  */
4746
 
4747
static int
4748
parse_shift (char **str, int i, enum parse_shift_mode mode)
4749
{
4750
  const struct asm_shift_name *shift_name;
4751
  enum shift_kind shift;
4752
  char *s = *str;
4753
  char *p = s;
4754
  int reg;
4755
 
4756
  for (p = *str; ISALPHA (*p); p++)
4757
    ;
4758
 
4759
  if (p == *str)
4760
    {
4761
      inst.error = _("shift expression expected");
4762
      return FAIL;
4763
    }
4764
 
4765
  shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4766
                                                            p - *str);
4767
 
4768
  if (shift_name == NULL)
4769
    {
4770
      inst.error = _("shift expression expected");
4771
      return FAIL;
4772
    }
4773
 
4774
  shift = shift_name->kind;
4775
 
4776
  switch (mode)
4777
    {
4778
    case NO_SHIFT_RESTRICT:
4779
    case SHIFT_IMMEDIATE:   break;
4780
 
4781
    case SHIFT_LSL_OR_ASR_IMMEDIATE:
4782
      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4783
        {
4784
          inst.error = _("'LSL' or 'ASR' required");
4785
          return FAIL;
4786
        }
4787
      break;
4788
 
4789
    case SHIFT_LSL_IMMEDIATE:
4790
      if (shift != SHIFT_LSL)
4791
        {
4792
          inst.error = _("'LSL' required");
4793
          return FAIL;
4794
        }
4795
      break;
4796
 
4797
    case SHIFT_ASR_IMMEDIATE:
4798
      if (shift != SHIFT_ASR)
4799
        {
4800
          inst.error = _("'ASR' required");
4801
          return FAIL;
4802
        }
4803
      break;
4804
 
4805
    default: abort ();
4806
    }
4807
 
4808
  if (shift != SHIFT_RRX)
4809
    {
4810
      /* Whitespace can appear here if the next thing is a bare digit.  */
4811
      skip_whitespace (p);
4812
 
4813
      if (mode == NO_SHIFT_RESTRICT
4814
          && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4815
        {
4816
          inst.operands[i].imm = reg;
4817
          inst.operands[i].immisreg = 1;
4818
        }
4819
      else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4820
        return FAIL;
4821
    }
4822
  inst.operands[i].shift_kind = shift;
4823
  inst.operands[i].shifted = 1;
4824
  *str = p;
4825
  return SUCCESS;
4826
}
4827
 
4828
/* Parse a <shifter_operand> for an ARM data processing instruction:
4829
 
4830
      #<immediate>
4831
      #<immediate>, <rotate>
4832
      <Rm>
4833
      <Rm>, <shift>
4834
 
4835
   where <shift> is defined by parse_shift above, and <rotate> is a
4836
   multiple of 2 between 0 and 30.  Validation of immediate operands
4837
   is deferred to md_apply_fix.  */
4838
 
4839
static int
4840
parse_shifter_operand (char **str, int i)
4841
{
4842
  int value;
4843
  expressionS exp;
4844
 
4845
  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4846
    {
4847
      inst.operands[i].reg = value;
4848
      inst.operands[i].isreg = 1;
4849
 
4850
      /* parse_shift will override this if appropriate */
4851
      inst.reloc.exp.X_op = O_constant;
4852
      inst.reloc.exp.X_add_number = 0;
4853
 
4854
      if (skip_past_comma (str) == FAIL)
4855
        return SUCCESS;
4856
 
4857
      /* Shift operation on register.  */
4858
      return parse_shift (str, i, NO_SHIFT_RESTRICT);
4859
    }
4860
 
4861
  if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4862
    return FAIL;
4863
 
4864
  if (skip_past_comma (str) == SUCCESS)
4865
    {
4866
      /* #x, y -- ie explicit rotation by Y.  */
4867
      if (my_get_expression (&exp, str, GE_NO_PREFIX))
4868
        return FAIL;
4869
 
4870
      if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4871
        {
4872
          inst.error = _("constant expression expected");
4873
          return FAIL;
4874
        }
4875
 
4876
      value = exp.X_add_number;
4877
      if (value < 0 || value > 30 || value % 2 != 0)
4878
        {
4879
          inst.error = _("invalid rotation");
4880
          return FAIL;
4881
        }
4882
      if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4883
        {
4884
          inst.error = _("invalid constant");
4885
          return FAIL;
4886
        }
4887
 
4888 163 khays
      /* Encode as specified.  */
4889
      inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4890
      return SUCCESS;
4891 16 khays
    }
4892
 
4893
  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4894
  inst.reloc.pc_rel = 0;
4895
  return SUCCESS;
4896
}
4897
 
4898
/* Group relocation information.  Each entry in the table contains the
4899
   textual name of the relocation as may appear in assembler source
4900
   and must end with a colon.
4901
   Along with this textual name are the relocation codes to be used if
4902
   the corresponding instruction is an ALU instruction (ADD or SUB only),
4903
   an LDR, an LDRS, or an LDC.  */
4904
 
4905
struct group_reloc_table_entry
4906
{
4907
  const char *name;
4908
  int alu_code;
4909
  int ldr_code;
4910
  int ldrs_code;
4911
  int ldc_code;
4912
};
4913
 
4914
typedef enum
4915
{
4916
  /* Varieties of non-ALU group relocation.  */
4917
 
4918
  GROUP_LDR,
4919
  GROUP_LDRS,
4920
  GROUP_LDC
4921
} group_reloc_type;
4922
 
4923
static struct group_reloc_table_entry group_reloc_table[] =
4924
  { /* Program counter relative: */
4925
    { "pc_g0_nc",
4926
      BFD_RELOC_ARM_ALU_PC_G0_NC,       /* ALU */
4927
      0,                         /* LDR */
4928
      0,                         /* LDRS */
4929
 
4930
    { "pc_g0",
4931
      BFD_RELOC_ARM_ALU_PC_G0,          /* ALU */
4932
      BFD_RELOC_ARM_LDR_PC_G0,          /* LDR */
4933
      BFD_RELOC_ARM_LDRS_PC_G0,         /* LDRS */
4934
      BFD_RELOC_ARM_LDC_PC_G0 },        /* LDC */
4935
    { "pc_g1_nc",
4936
      BFD_RELOC_ARM_ALU_PC_G1_NC,       /* ALU */
4937
      0,                         /* LDR */
4938
      0,                         /* LDRS */
4939
 
4940
    { "pc_g1",
4941
      BFD_RELOC_ARM_ALU_PC_G1,          /* ALU */
4942
      BFD_RELOC_ARM_LDR_PC_G1,          /* LDR */
4943
      BFD_RELOC_ARM_LDRS_PC_G1,         /* LDRS */
4944
      BFD_RELOC_ARM_LDC_PC_G1 },        /* LDC */
4945
    { "pc_g2",
4946
      BFD_RELOC_ARM_ALU_PC_G2,          /* ALU */
4947
      BFD_RELOC_ARM_LDR_PC_G2,          /* LDR */
4948
      BFD_RELOC_ARM_LDRS_PC_G2,         /* LDRS */
4949
      BFD_RELOC_ARM_LDC_PC_G2 },        /* LDC */
4950
    /* Section base relative */
4951
    { "sb_g0_nc",
4952
      BFD_RELOC_ARM_ALU_SB_G0_NC,       /* ALU */
4953
      0,                         /* LDR */
4954
      0,                         /* LDRS */
4955
 
4956
    { "sb_g0",
4957
      BFD_RELOC_ARM_ALU_SB_G0,          /* ALU */
4958
      BFD_RELOC_ARM_LDR_SB_G0,          /* LDR */
4959
      BFD_RELOC_ARM_LDRS_SB_G0,         /* LDRS */
4960
      BFD_RELOC_ARM_LDC_SB_G0 },        /* LDC */
4961
    { "sb_g1_nc",
4962
      BFD_RELOC_ARM_ALU_SB_G1_NC,       /* ALU */
4963
      0,                         /* LDR */
4964
      0,                         /* LDRS */
4965
 
4966
    { "sb_g1",
4967
      BFD_RELOC_ARM_ALU_SB_G1,          /* ALU */
4968
      BFD_RELOC_ARM_LDR_SB_G1,          /* LDR */
4969
      BFD_RELOC_ARM_LDRS_SB_G1,         /* LDRS */
4970
      BFD_RELOC_ARM_LDC_SB_G1 },        /* LDC */
4971
    { "sb_g2",
4972
      BFD_RELOC_ARM_ALU_SB_G2,          /* ALU */
4973
      BFD_RELOC_ARM_LDR_SB_G2,          /* LDR */
4974
      BFD_RELOC_ARM_LDRS_SB_G2,         /* LDRS */
4975
      BFD_RELOC_ARM_LDC_SB_G2 } };      /* LDC */
4976
 
4977
/* Given the address of a pointer pointing to the textual name of a group
4978
   relocation as may appear in assembler source, attempt to find its details
4979
   in group_reloc_table.  The pointer will be updated to the character after
4980
   the trailing colon.  On failure, FAIL will be returned; SUCCESS
4981
   otherwise.  On success, *entry will be updated to point at the relevant
4982
   group_reloc_table entry. */
4983
 
4984
static int
4985
find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4986
{
4987
  unsigned int i;
4988
  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4989
    {
4990
      int length = strlen (group_reloc_table[i].name);
4991
 
4992
      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4993
          && (*str)[length] == ':')
4994
        {
4995
          *out = &group_reloc_table[i];
4996
          *str += (length + 1);
4997
          return SUCCESS;
4998
        }
4999
    }
5000
 
5001
  return FAIL;
5002
}
5003
 
5004
/* Parse a <shifter_operand> for an ARM data processing instruction
5005
   (as for parse_shifter_operand) where group relocations are allowed:
5006
 
5007
      #<immediate>
5008
      #<immediate>, <rotate>
5009
      #:<group_reloc>:<expression>
5010
      <Rm>
5011
      <Rm>, <shift>
5012
 
5013
   where <group_reloc> is one of the strings defined in group_reloc_table.
5014
   The hashes are optional.
5015
 
5016
   Everything else is as for parse_shifter_operand.  */
5017
 
5018
static parse_operand_result
5019
parse_shifter_operand_group_reloc (char **str, int i)
5020
{
5021
  /* Determine if we have the sequence of characters #: or just :
5022
     coming next.  If we do, then we check for a group relocation.
5023
     If we don't, punt the whole lot to parse_shifter_operand.  */
5024
 
5025
  if (((*str)[0] == '#' && (*str)[1] == ':')
5026
      || (*str)[0] == ':')
5027
    {
5028
      struct group_reloc_table_entry *entry;
5029
 
5030
      if ((*str)[0] == '#')
5031
        (*str) += 2;
5032
      else
5033
        (*str)++;
5034
 
5035
      /* Try to parse a group relocation.  Anything else is an error.  */
5036
      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5037
        {
5038
          inst.error = _("unknown group relocation");
5039
          return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5040
        }
5041
 
5042
      /* We now have the group relocation table entry corresponding to
5043
         the name in the assembler source.  Next, we parse the expression.  */
5044
      if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5045
        return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5046
 
5047
      /* Record the relocation type (always the ALU variant here).  */
5048
      inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5049
      gas_assert (inst.reloc.type != 0);
5050
 
5051
      return PARSE_OPERAND_SUCCESS;
5052
    }
5053
  else
5054
    return parse_shifter_operand (str, i) == SUCCESS
5055
           ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5056
 
5057
  /* Never reached.  */
5058
}
5059
 
5060
/* Parse a Neon alignment expression.  Information is written to
5061
   inst.operands[i].  We assume the initial ':' has been skipped.
5062
 
5063
   align        .imm = align << 8, .immisalign=1, .preind=0  */
5064
static parse_operand_result
5065
parse_neon_alignment (char **str, int i)
5066
{
5067
  char *p = *str;
5068
  expressionS exp;
5069
 
5070
  my_get_expression (&exp, &p, GE_NO_PREFIX);
5071
 
5072
  if (exp.X_op != O_constant)
5073
    {
5074
      inst.error = _("alignment must be constant");
5075
      return PARSE_OPERAND_FAIL;
5076
    }
5077
 
5078
  inst.operands[i].imm = exp.X_add_number << 8;
5079
  inst.operands[i].immisalign = 1;
5080
  /* Alignments are not pre-indexes.  */
5081
  inst.operands[i].preind = 0;
5082
 
5083
  *str = p;
5084
  return PARSE_OPERAND_SUCCESS;
5085
}
5086
 
5087
/* Parse all forms of an ARM address expression.  Information is written
5088
   to inst.operands[i] and/or inst.reloc.
5089
 
5090
   Preindexed addressing (.preind=1):
5091
 
5092
   [Rn, #offset]       .reg=Rn .reloc.exp=offset
5093
   [Rn, +/-Rm]         .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5094
   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5095
                       .shift_kind=shift .reloc.exp=shift_imm
5096
 
5097
   These three may have a trailing ! which causes .writeback to be set also.
5098
 
5099
   Postindexed addressing (.postind=1, .writeback=1):
5100
 
5101
   [Rn], #offset       .reg=Rn .reloc.exp=offset
5102
   [Rn], +/-Rm         .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5103
   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5104
                       .shift_kind=shift .reloc.exp=shift_imm
5105
 
5106
   Unindexed addressing (.preind=0, .postind=0):
5107
 
5108
   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5109
 
5110
   Other:
5111
 
5112
   [Rn]{!}             shorthand for [Rn,#0]{!}
5113
   =immediate          .isreg=0 .reloc.exp=immediate
5114
   label               .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5115
 
5116
  It is the caller's responsibility to check for addressing modes not
5117
  supported by the instruction, and to set inst.reloc.type.  */
5118
 
5119
static parse_operand_result
5120
parse_address_main (char **str, int i, int group_relocations,
5121
                    group_reloc_type group_type)
5122
{
5123
  char *p = *str;
5124
  int reg;
5125
 
5126
  if (skip_past_char (&p, '[') == FAIL)
5127
    {
5128
      if (skip_past_char (&p, '=') == FAIL)
5129
        {
5130
          /* Bare address - translate to PC-relative offset.  */
5131
          inst.reloc.pc_rel = 1;
5132
          inst.operands[i].reg = REG_PC;
5133
          inst.operands[i].isreg = 1;
5134
          inst.operands[i].preind = 1;
5135
        }
5136
      /* Otherwise a load-constant pseudo op, no special treatment needed here.  */
5137
 
5138
      if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5139
        return PARSE_OPERAND_FAIL;
5140
 
5141
      *str = p;
5142
      return PARSE_OPERAND_SUCCESS;
5143
    }
5144
 
5145
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5146
    {
5147
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5148
      return PARSE_OPERAND_FAIL;
5149
    }
5150
  inst.operands[i].reg = reg;
5151
  inst.operands[i].isreg = 1;
5152
 
5153
  if (skip_past_comma (&p) == SUCCESS)
5154
    {
5155
      inst.operands[i].preind = 1;
5156
 
5157
      if (*p == '+') p++;
5158
      else if (*p == '-') p++, inst.operands[i].negative = 1;
5159
 
5160
      if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5161
        {
5162
          inst.operands[i].imm = reg;
5163
          inst.operands[i].immisreg = 1;
5164
 
5165
          if (skip_past_comma (&p) == SUCCESS)
5166
            if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5167
              return PARSE_OPERAND_FAIL;
5168
        }
5169
      else if (skip_past_char (&p, ':') == SUCCESS)
5170
        {
5171
          /* FIXME: '@' should be used here, but it's filtered out by generic
5172
             code before we get to see it here. This may be subject to
5173
             change.  */
5174
          parse_operand_result result = parse_neon_alignment (&p, i);
5175
 
5176
          if (result != PARSE_OPERAND_SUCCESS)
5177
            return result;
5178
        }
5179
      else
5180
        {
5181
          if (inst.operands[i].negative)
5182
            {
5183
              inst.operands[i].negative = 0;
5184
              p--;
5185
            }
5186
 
5187
          if (group_relocations
5188
              && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5189
            {
5190
              struct group_reloc_table_entry *entry;
5191
 
5192
              /* Skip over the #: or : sequence.  */
5193
              if (*p == '#')
5194
                p += 2;
5195
              else
5196
                p++;
5197
 
5198
              /* Try to parse a group relocation.  Anything else is an
5199
                 error.  */
5200
              if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5201
                {
5202
                  inst.error = _("unknown group relocation");
5203
                  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5204
                }
5205
 
5206
              /* We now have the group relocation table entry corresponding to
5207
                 the name in the assembler source.  Next, we parse the
5208
                 expression.  */
5209
              if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5210
                return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5211
 
5212
              /* Record the relocation type.  */
5213
              switch (group_type)
5214
                {
5215
                  case GROUP_LDR:
5216
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5217
                    break;
5218
 
5219
                  case GROUP_LDRS:
5220
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5221
                    break;
5222
 
5223
                  case GROUP_LDC:
5224
                    inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5225
                    break;
5226
 
5227
                  default:
5228
                    gas_assert (0);
5229
                }
5230
 
5231
              if (inst.reloc.type == 0)
5232
                {
5233
                  inst.error = _("this group relocation is not allowed on this instruction");
5234
                  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5235
                }
5236
            }
5237
          else
5238
            {
5239
              char *q = p;
5240
              if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5241
                return PARSE_OPERAND_FAIL;
5242
              /* If the offset is 0, find out if it's a +0 or -0.  */
5243
              if (inst.reloc.exp.X_op == O_constant
5244
                  && inst.reloc.exp.X_add_number == 0)
5245
                {
5246
                  skip_whitespace (q);
5247
                  if (*q == '#')
5248
                    {
5249
                      q++;
5250
                      skip_whitespace (q);
5251
                    }
5252
                  if (*q == '-')
5253
                    inst.operands[i].negative = 1;
5254
                }
5255
            }
5256
        }
5257
    }
5258
  else if (skip_past_char (&p, ':') == SUCCESS)
5259
    {
5260
      /* FIXME: '@' should be used here, but it's filtered out by generic code
5261
         before we get to see it here. This may be subject to change.  */
5262
      parse_operand_result result = parse_neon_alignment (&p, i);
5263
 
5264
      if (result != PARSE_OPERAND_SUCCESS)
5265
        return result;
5266
    }
5267
 
5268
  if (skip_past_char (&p, ']') == FAIL)
5269
    {
5270
      inst.error = _("']' expected");
5271
      return PARSE_OPERAND_FAIL;
5272
    }
5273
 
5274
  if (skip_past_char (&p, '!') == SUCCESS)
5275
    inst.operands[i].writeback = 1;
5276
 
5277
  else if (skip_past_comma (&p) == SUCCESS)
5278
    {
5279
      if (skip_past_char (&p, '{') == SUCCESS)
5280
        {
5281
          /* [Rn], {expr} - unindexed, with option */
5282
          if (parse_immediate (&p, &inst.operands[i].imm,
5283
                               0, 255, TRUE) == FAIL)
5284
            return PARSE_OPERAND_FAIL;
5285
 
5286
          if (skip_past_char (&p, '}') == FAIL)
5287
            {
5288
              inst.error = _("'}' expected at end of 'option' field");
5289
              return PARSE_OPERAND_FAIL;
5290
            }
5291
          if (inst.operands[i].preind)
5292
            {
5293
              inst.error = _("cannot combine index with option");
5294
              return PARSE_OPERAND_FAIL;
5295
            }
5296
          *str = p;
5297
          return PARSE_OPERAND_SUCCESS;
5298
        }
5299
      else
5300
        {
5301
          inst.operands[i].postind = 1;
5302
          inst.operands[i].writeback = 1;
5303
 
5304
          if (inst.operands[i].preind)
5305
            {
5306
              inst.error = _("cannot combine pre- and post-indexing");
5307
              return PARSE_OPERAND_FAIL;
5308
            }
5309
 
5310
          if (*p == '+') p++;
5311
          else if (*p == '-') p++, inst.operands[i].negative = 1;
5312
 
5313
          if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5314
            {
5315
              /* We might be using the immediate for alignment already. If we
5316
                 are, OR the register number into the low-order bits.  */
5317
              if (inst.operands[i].immisalign)
5318
                inst.operands[i].imm |= reg;
5319
              else
5320
                inst.operands[i].imm = reg;
5321
              inst.operands[i].immisreg = 1;
5322
 
5323
              if (skip_past_comma (&p) == SUCCESS)
5324
                if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5325
                  return PARSE_OPERAND_FAIL;
5326
            }
5327
          else
5328
            {
5329
              char *q = p;
5330
              if (inst.operands[i].negative)
5331
                {
5332
                  inst.operands[i].negative = 0;
5333
                  p--;
5334
                }
5335
              if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5336
                return PARSE_OPERAND_FAIL;
5337
              /* If the offset is 0, find out if it's a +0 or -0.  */
5338
              if (inst.reloc.exp.X_op == O_constant
5339
                  && inst.reloc.exp.X_add_number == 0)
5340
                {
5341
                  skip_whitespace (q);
5342
                  if (*q == '#')
5343
                    {
5344
                      q++;
5345
                      skip_whitespace (q);
5346
                    }
5347
                  if (*q == '-')
5348
                    inst.operands[i].negative = 1;
5349
                }
5350
            }
5351
        }
5352
    }
5353
 
5354
  /* If at this point neither .preind nor .postind is set, we have a
5355
     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
5356
  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5357
    {
5358
      inst.operands[i].preind = 1;
5359
      inst.reloc.exp.X_op = O_constant;
5360
      inst.reloc.exp.X_add_number = 0;
5361
    }
5362
  *str = p;
5363
  return PARSE_OPERAND_SUCCESS;
5364
}
5365
 
5366
static int
5367
parse_address (char **str, int i)
5368
{
5369
  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5370
         ? SUCCESS : FAIL;
5371
}
5372
 
5373
static parse_operand_result
5374
parse_address_group_reloc (char **str, int i, group_reloc_type type)
5375
{
5376
  return parse_address_main (str, i, 1, type);
5377
}
5378
 
5379
/* Parse an operand for a MOVW or MOVT instruction.  */
5380
static int
5381
parse_half (char **str)
5382
{
5383
  char * p;
5384
 
5385
  p = *str;
5386
  skip_past_char (&p, '#');
5387
  if (strncasecmp (p, ":lower16:", 9) == 0)
5388
    inst.reloc.type = BFD_RELOC_ARM_MOVW;
5389
  else if (strncasecmp (p, ":upper16:", 9) == 0)
5390
    inst.reloc.type = BFD_RELOC_ARM_MOVT;
5391
 
5392
  if (inst.reloc.type != BFD_RELOC_UNUSED)
5393
    {
5394
      p += 9;
5395
      skip_whitespace (p);
5396
    }
5397
 
5398
  if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5399
    return FAIL;
5400
 
5401
  if (inst.reloc.type == BFD_RELOC_UNUSED)
5402
    {
5403
      if (inst.reloc.exp.X_op != O_constant)
5404
        {
5405
          inst.error = _("constant expression expected");
5406
          return FAIL;
5407
        }
5408
      if (inst.reloc.exp.X_add_number < 0
5409
          || inst.reloc.exp.X_add_number > 0xffff)
5410
        {
5411
          inst.error = _("immediate value out of range");
5412
          return FAIL;
5413
        }
5414
    }
5415
  *str = p;
5416
  return SUCCESS;
5417
}
5418
 
5419
/* Miscellaneous. */
5420
 
5421
/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
5422
   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
5423
static int
5424
parse_psr (char **str, bfd_boolean lhs)
5425
{
5426
  char *p;
5427
  unsigned long psr_field;
5428
  const struct asm_psr *psr;
5429
  char *start;
5430
  bfd_boolean is_apsr = FALSE;
5431
  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5432
 
5433
  /* PR gas/12698:  If the user has specified -march=all then m_profile will
5434
     be TRUE, but we want to ignore it in this case as we are building for any
5435
     CPU type, including non-m variants.  */
5436
  if (selected_cpu.core == arm_arch_any.core)
5437
    m_profile = FALSE;
5438
 
5439
  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
5440
     feature for ease of use and backwards compatibility.  */
5441
  p = *str;
5442
  if (strncasecmp (p, "SPSR", 4) == 0)
5443
    {
5444
      if (m_profile)
5445
        goto unsupported_psr;
5446
 
5447
      psr_field = SPSR_BIT;
5448
    }
5449
  else if (strncasecmp (p, "CPSR", 4) == 0)
5450
    {
5451
      if (m_profile)
5452
        goto unsupported_psr;
5453
 
5454
      psr_field = 0;
5455
    }
5456
  else if (strncasecmp (p, "APSR", 4) == 0)
5457
    {
5458
      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5459
         and ARMv7-R architecture CPUs.  */
5460
      is_apsr = TRUE;
5461
      psr_field = 0;
5462
    }
5463
  else if (m_profile)
5464
    {
5465
      start = p;
5466
      do
5467
        p++;
5468
      while (ISALNUM (*p) || *p == '_');
5469
 
5470
      if (strncasecmp (start, "iapsr", 5) == 0
5471
          || strncasecmp (start, "eapsr", 5) == 0
5472
          || strncasecmp (start, "xpsr", 4) == 0
5473
          || strncasecmp (start, "psr", 3) == 0)
5474
        p = start + strcspn (start, "rR") + 1;
5475
 
5476
      psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5477
                                                  p - start);
5478
 
5479
      if (!psr)
5480
        return FAIL;
5481
 
5482
      /* If APSR is being written, a bitfield may be specified.  Note that
5483
         APSR itself is handled above.  */
5484
      if (psr->field <= 3)
5485
        {
5486
          psr_field = psr->field;
5487
          is_apsr = TRUE;
5488
          goto check_suffix;
5489
        }
5490
 
5491
      *str = p;
5492
      /* M-profile MSR instructions have the mask field set to "10", except
5493
         *PSR variants which modify APSR, which may use a different mask (and
5494
         have been handled already).  Do that by setting the PSR_f field
5495
         here.  */
5496
      return psr->field | (lhs ? PSR_f : 0);
5497
    }
5498
  else
5499
    goto unsupported_psr;
5500
 
5501
  p += 4;
5502
check_suffix:
5503
  if (*p == '_')
5504
    {
5505
      /* A suffix follows.  */
5506
      p++;
5507
      start = p;
5508
 
5509
      do
5510
        p++;
5511
      while (ISALNUM (*p) || *p == '_');
5512
 
5513
      if (is_apsr)
5514
        {
5515
          /* APSR uses a notation for bits, rather than fields.  */
5516
          unsigned int nzcvq_bits = 0;
5517
          unsigned int g_bit = 0;
5518
          char *bit;
5519
 
5520
          for (bit = start; bit != p; bit++)
5521
            {
5522
              switch (TOLOWER (*bit))
5523
                {
5524
                case 'n':
5525
                  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5526
                  break;
5527
 
5528
                case 'z':
5529
                  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5530
                  break;
5531
 
5532
                case 'c':
5533
                  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5534
                  break;
5535
 
5536
                case 'v':
5537
                  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5538
                  break;
5539
 
5540
                case 'q':
5541
                  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5542
                  break;
5543
 
5544
                case 'g':
5545
                  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5546
                  break;
5547
 
5548
                default:
5549
                  inst.error = _("unexpected bit specified after APSR");
5550
                  return FAIL;
5551
                }
5552
            }
5553
 
5554
          if (nzcvq_bits == 0x1f)
5555
            psr_field |= PSR_f;
5556
 
5557
          if (g_bit == 0x1)
5558
            {
5559
              if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5560
                {
5561
                  inst.error = _("selected processor does not "
5562
                                 "support DSP extension");
5563
                  return FAIL;
5564
                }
5565
 
5566
              psr_field |= PSR_s;
5567
            }
5568
 
5569
          if ((nzcvq_bits & 0x20) != 0
5570
              || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5571
              || (g_bit & 0x2) != 0)
5572
            {
5573
              inst.error = _("bad bitmask specified after APSR");
5574
              return FAIL;
5575
            }
5576
        }
5577
      else
5578
        {
5579
          psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5580
                                                      p - start);
5581
          if (!psr)
5582
            goto error;
5583
 
5584
          psr_field |= psr->field;
5585
        }
5586
    }
5587
  else
5588
    {
5589
      if (ISALNUM (*p))
5590
        goto error;    /* Garbage after "[CS]PSR".  */
5591
 
5592
      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
5593
         is deprecated, but allow it anyway.  */
5594
      if (is_apsr && lhs)
5595
        {
5596
          psr_field |= PSR_f;
5597
          as_tsktsk (_("writing to APSR without specifying a bitmask is "
5598
                       "deprecated"));
5599
        }
5600
      else if (!m_profile)
5601
        /* These bits are never right for M-profile devices: don't set them
5602
           (only code paths which read/write APSR reach here).  */
5603
        psr_field |= (PSR_c | PSR_f);
5604
    }
5605
  *str = p;
5606
  return psr_field;
5607
 
5608
 unsupported_psr:
5609
  inst.error = _("selected processor does not support requested special "
5610
                 "purpose register");
5611
  return FAIL;
5612
 
5613
 error:
5614
  inst.error = _("flag for {c}psr instruction expected");
5615
  return FAIL;
5616
}
5617
 
5618
/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
5619
   value suitable for splatting into the AIF field of the instruction.  */
5620
 
5621
static int
5622
parse_cps_flags (char **str)
5623
{
5624
  int val = 0;
5625
  int saw_a_flag = 0;
5626
  char *s = *str;
5627
 
5628
  for (;;)
5629
    switch (*s++)
5630
      {
5631
      case '\0': case ',':
5632
        goto done;
5633
 
5634
      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5635
      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5636
      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5637
 
5638
      default:
5639
        inst.error = _("unrecognized CPS flag");
5640
        return FAIL;
5641
      }
5642
 
5643
 done:
5644
  if (saw_a_flag == 0)
5645
    {
5646
      inst.error = _("missing CPS flags");
5647
      return FAIL;
5648
    }
5649
 
5650
  *str = s - 1;
5651
  return val;
5652
}
5653
 
5654
/* Parse an endian specifier ("BE" or "LE", case insensitive);
5655
   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
5656
 
5657
static int
5658
parse_endian_specifier (char **str)
5659
{
5660
  int little_endian;
5661
  char *s = *str;
5662
 
5663
  if (strncasecmp (s, "BE", 2))
5664
    little_endian = 0;
5665
  else if (strncasecmp (s, "LE", 2))
5666
    little_endian = 1;
5667
  else
5668
    {
5669
      inst.error = _("valid endian specifiers are be or le");
5670
      return FAIL;
5671
    }
5672
 
5673
  if (ISALNUM (s[2]) || s[2] == '_')
5674
    {
5675
      inst.error = _("valid endian specifiers are be or le");
5676
      return FAIL;
5677
    }
5678
 
5679
  *str = s + 2;
5680
  return little_endian;
5681
}
5682
 
5683
/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
5684
   value suitable for poking into the rotate field of an sxt or sxta
5685
   instruction, or FAIL on error.  */
5686
 
5687
static int
5688
parse_ror (char **str)
5689
{
5690
  int rot;
5691
  char *s = *str;
5692
 
5693
  if (strncasecmp (s, "ROR", 3) == 0)
5694
    s += 3;
5695
  else
5696
    {
5697
      inst.error = _("missing rotation field after comma");
5698
      return FAIL;
5699
    }
5700
 
5701
  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5702
    return FAIL;
5703
 
5704
  switch (rot)
5705
    {
5706
    case  0: *str = s; return 0x0;
5707
    case  8: *str = s; return 0x1;
5708
    case 16: *str = s; return 0x2;
5709
    case 24: *str = s; return 0x3;
5710
 
5711
    default:
5712
      inst.error = _("rotation can only be 0, 8, 16, or 24");
5713
      return FAIL;
5714
    }
5715
}
5716
 
5717
/* Parse a conditional code (from conds[] below).  The value returned is in the
5718
   range 0 .. 14, or FAIL.  */
5719
static int
5720
parse_cond (char **str)
5721
{
5722
  char *q;
5723
  const struct asm_cond *c;
5724
  int n;
5725
  /* Condition codes are always 2 characters, so matching up to
5726
     3 characters is sufficient.  */
5727
  char cond[3];
5728
 
5729
  q = *str;
5730
  n = 0;
5731
  while (ISALPHA (*q) && n < 3)
5732
    {
5733
      cond[n] = TOLOWER (*q);
5734
      q++;
5735
      n++;
5736
    }
5737
 
5738
  c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5739
  if (!c)
5740
    {
5741
      inst.error = _("condition required");
5742
      return FAIL;
5743
    }
5744
 
5745
  *str = q;
5746
  return c->value;
5747
}
5748
 
5749
/* Parse an option for a barrier instruction.  Returns the encoding for the
5750
   option, or FAIL.  */
5751
static int
5752
parse_barrier (char **str)
5753
{
5754
  char *p, *q;
5755
  const struct asm_barrier_opt *o;
5756
 
5757
  p = q = *str;
5758
  while (ISALPHA (*q))
5759
    q++;
5760
 
5761
  o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5762
                                                    q - p);
5763
  if (!o)
5764
    return FAIL;
5765
 
5766
  *str = q;
5767
  return o->value;
5768
}
5769
 
5770
/* Parse the operands of a table branch instruction.  Similar to a memory
5771
   operand.  */
5772
static int
5773
parse_tb (char **str)
5774
{
5775
  char * p = *str;
5776
  int reg;
5777
 
5778
  if (skip_past_char (&p, '[') == FAIL)
5779
    {
5780
      inst.error = _("'[' expected");
5781
      return FAIL;
5782
    }
5783
 
5784
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5785
    {
5786
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5787
      return FAIL;
5788
    }
5789
  inst.operands[0].reg = reg;
5790
 
5791
  if (skip_past_comma (&p) == FAIL)
5792
    {
5793
      inst.error = _("',' expected");
5794
      return FAIL;
5795
    }
5796
 
5797
  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5798
    {
5799
      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5800
      return FAIL;
5801
    }
5802
  inst.operands[0].imm = reg;
5803
 
5804
  if (skip_past_comma (&p) == SUCCESS)
5805
    {
5806
      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5807
        return FAIL;
5808
      if (inst.reloc.exp.X_add_number != 1)
5809
        {
5810
          inst.error = _("invalid shift");
5811
          return FAIL;
5812
        }
5813
      inst.operands[0].shifted = 1;
5814
    }
5815
 
5816
  if (skip_past_char (&p, ']') == FAIL)
5817
    {
5818
      inst.error = _("']' expected");
5819
      return FAIL;
5820
    }
5821
  *str = p;
5822
  return SUCCESS;
5823
}
5824
 
5825
/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5826
   information on the types the operands can take and how they are encoded.
5827
   Up to four operands may be read; this function handles setting the
5828
   ".present" field for each read operand itself.
5829
   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5830
   else returns FAIL.  */
5831
 
5832
static int
5833
parse_neon_mov (char **str, int *which_operand)
5834
{
5835
  int i = *which_operand, val;
5836
  enum arm_reg_type rtype;
5837
  char *ptr = *str;
5838
  struct neon_type_el optype;
5839
 
5840
  if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5841
    {
5842
      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
5843
      inst.operands[i].reg = val;
5844
      inst.operands[i].isscalar = 1;
5845
      inst.operands[i].vectype = optype;
5846
      inst.operands[i++].present = 1;
5847
 
5848
      if (skip_past_comma (&ptr) == FAIL)
5849
        goto wanted_comma;
5850
 
5851
      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5852
        goto wanted_arm;
5853
 
5854
      inst.operands[i].reg = val;
5855
      inst.operands[i].isreg = 1;
5856
      inst.operands[i].present = 1;
5857
    }
5858
  else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5859
           != FAIL)
5860
    {
5861
      /* Cases 0, 1, 2, 3, 5 (D only).  */
5862
      if (skip_past_comma (&ptr) == FAIL)
5863
        goto wanted_comma;
5864
 
5865
      inst.operands[i].reg = val;
5866
      inst.operands[i].isreg = 1;
5867
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5868
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5869
      inst.operands[i].isvec = 1;
5870
      inst.operands[i].vectype = optype;
5871
      inst.operands[i++].present = 1;
5872
 
5873
      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5874
        {
5875
          /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5876
             Case 13: VMOV <Sd>, <Rm>  */
5877
          inst.operands[i].reg = val;
5878
          inst.operands[i].isreg = 1;
5879
          inst.operands[i].present = 1;
5880
 
5881
          if (rtype == REG_TYPE_NQ)
5882
            {
5883
              first_error (_("can't use Neon quad register here"));
5884
              return FAIL;
5885
            }
5886
          else if (rtype != REG_TYPE_VFS)
5887
            {
5888
              i++;
5889
              if (skip_past_comma (&ptr) == FAIL)
5890
                goto wanted_comma;
5891
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5892
                goto wanted_arm;
5893
              inst.operands[i].reg = val;
5894
              inst.operands[i].isreg = 1;
5895
              inst.operands[i].present = 1;
5896
            }
5897
        }
5898
      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5899
                                           &optype)) != FAIL)
5900
        {
5901
          /* Case 0: VMOV<c><q> <Qd>, <Qm>
5902
             Case 1: VMOV<c><q> <Dd>, <Dm>
5903
             Case 8: VMOV.F32 <Sd>, <Sm>
5904
             Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
5905
 
5906
          inst.operands[i].reg = val;
5907
          inst.operands[i].isreg = 1;
5908
          inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5909
          inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5910
          inst.operands[i].isvec = 1;
5911
          inst.operands[i].vectype = optype;
5912
          inst.operands[i].present = 1;
5913
 
5914
          if (skip_past_comma (&ptr) == SUCCESS)
5915
            {
5916
              /* Case 15.  */
5917
              i++;
5918
 
5919
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5920
                goto wanted_arm;
5921
 
5922
              inst.operands[i].reg = val;
5923
              inst.operands[i].isreg = 1;
5924
              inst.operands[i++].present = 1;
5925
 
5926
              if (skip_past_comma (&ptr) == FAIL)
5927
                goto wanted_comma;
5928
 
5929
              if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5930
                goto wanted_arm;
5931
 
5932
              inst.operands[i].reg = val;
5933
              inst.operands[i].isreg = 1;
5934
              inst.operands[i++].present = 1;
5935
            }
5936
        }
5937
      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5938
          /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5939
             Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5940
             Case 10: VMOV.F32 <Sd>, #<imm>
5941
             Case 11: VMOV.F64 <Dd>, #<imm>  */
5942
        inst.operands[i].immisfloat = 1;
5943
      else if (parse_big_immediate (&ptr, i) == SUCCESS)
5944
          /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5945
             Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
5946
        ;
5947
      else
5948
        {
5949
          first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5950
          return FAIL;
5951
        }
5952
    }
5953
  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5954
    {
5955
      /* Cases 6, 7.  */
5956
      inst.operands[i].reg = val;
5957
      inst.operands[i].isreg = 1;
5958
      inst.operands[i++].present = 1;
5959
 
5960
      if (skip_past_comma (&ptr) == FAIL)
5961
        goto wanted_comma;
5962
 
5963
      if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5964
        {
5965
          /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
5966
          inst.operands[i].reg = val;
5967
          inst.operands[i].isscalar = 1;
5968
          inst.operands[i].present = 1;
5969
          inst.operands[i].vectype = optype;
5970
        }
5971
      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5972
        {
5973
          /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
5974
          inst.operands[i].reg = val;
5975
          inst.operands[i].isreg = 1;
5976
          inst.operands[i++].present = 1;
5977
 
5978
          if (skip_past_comma (&ptr) == FAIL)
5979
            goto wanted_comma;
5980
 
5981
          if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5982
              == FAIL)
5983
            {
5984
              first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5985
              return FAIL;
5986
            }
5987
 
5988
          inst.operands[i].reg = val;
5989
          inst.operands[i].isreg = 1;
5990
          inst.operands[i].isvec = 1;
5991
          inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5992
          inst.operands[i].vectype = optype;
5993
          inst.operands[i].present = 1;
5994
 
5995
          if (rtype == REG_TYPE_VFS)
5996
            {
5997
              /* Case 14.  */
5998
              i++;
5999
              if (skip_past_comma (&ptr) == FAIL)
6000
                goto wanted_comma;
6001
              if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6002
                                              &optype)) == FAIL)
6003
                {
6004
                  first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6005
                  return FAIL;
6006
                }
6007
              inst.operands[i].reg = val;
6008
              inst.operands[i].isreg = 1;
6009
              inst.operands[i].isvec = 1;
6010
              inst.operands[i].issingle = 1;
6011
              inst.operands[i].vectype = optype;
6012
              inst.operands[i].present = 1;
6013
            }
6014
        }
6015
      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6016
               != FAIL)
6017
        {
6018
          /* Case 13.  */
6019
          inst.operands[i].reg = val;
6020
          inst.operands[i].isreg = 1;
6021
          inst.operands[i].isvec = 1;
6022
          inst.operands[i].issingle = 1;
6023
          inst.operands[i].vectype = optype;
6024
          inst.operands[i++].present = 1;
6025
        }
6026
    }
6027
  else
6028
    {
6029
      first_error (_("parse error"));
6030
      return FAIL;
6031
    }
6032
 
6033
  /* Successfully parsed the operands. Update args.  */
6034
  *which_operand = i;
6035
  *str = ptr;
6036
  return SUCCESS;
6037
 
6038
 wanted_comma:
6039
  first_error (_("expected comma"));
6040
  return FAIL;
6041
 
6042
 wanted_arm:
6043
  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6044
  return FAIL;
6045
}
6046
 
6047
/* Use this macro when the operand constraints are different
6048
   for ARM and THUMB (e.g. ldrd).  */
6049
#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6050
        ((arm_operand) | ((thumb_operand) << 16))
6051
 
6052
/* Matcher codes for parse_operands.  */
6053
enum operand_parse_code
6054
{
6055
  OP_stop,      /* end of line */
6056
 
6057
  OP_RR,        /* ARM register */
6058
  OP_RRnpc,     /* ARM register, not r15 */
6059
  OP_RRnpcsp,   /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6060
  OP_RRnpcb,    /* ARM register, not r15, in square brackets */
6061
  OP_RRnpctw,   /* ARM register, not r15 in Thumb-state or with writeback,
6062
                   optional trailing ! */
6063
  OP_RRw,       /* ARM register, not r15, optional trailing ! */
6064
  OP_RCP,       /* Coprocessor number */
6065
  OP_RCN,       /* Coprocessor register */
6066
  OP_RF,        /* FPA register */
6067
  OP_RVS,       /* VFP single precision register */
6068
  OP_RVD,       /* VFP double precision register (0..15) */
6069
  OP_RND,       /* Neon double precision register (0..31) */
6070
  OP_RNQ,       /* Neon quad precision register */
6071
  OP_RVSD,      /* VFP single or double precision register */
6072
  OP_RNDQ,      /* Neon double or quad precision register */
6073
  OP_RNSDQ,     /* Neon single, double or quad precision register */
6074
  OP_RNSC,      /* Neon scalar D[X] */
6075
  OP_RVC,       /* VFP control register */
6076
  OP_RMF,       /* Maverick F register */
6077
  OP_RMD,       /* Maverick D register */
6078
  OP_RMFX,      /* Maverick FX register */
6079
  OP_RMDX,      /* Maverick DX register */
6080
  OP_RMAX,      /* Maverick AX register */
6081
  OP_RMDS,      /* Maverick DSPSC register */
6082
  OP_RIWR,      /* iWMMXt wR register */
6083
  OP_RIWC,      /* iWMMXt wC register */
6084
  OP_RIWG,      /* iWMMXt wCG register */
6085
  OP_RXA,       /* XScale accumulator register */
6086
 
6087
  OP_REGLST,    /* ARM register list */
6088
  OP_VRSLST,    /* VFP single-precision register list */
6089
  OP_VRDLST,    /* VFP double-precision register list */
6090
  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
6091
  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
6092
  OP_NSTRLST,   /* Neon element/structure list */
6093
 
6094
  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
6095
  OP_RVSD_I0,   /* VFP S or D reg, or immediate zero.  */
6096
  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
6097
  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
6098
  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
6099
  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
6100
  OP_VMOV,      /* Neon VMOV operands.  */
6101
  OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN.  */
6102
  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
6103
  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
6104
 
6105
  OP_I0,        /* immediate zero */
6106
  OP_I7,        /* immediate value 0 .. 7 */
6107
  OP_I15,       /*                 0 .. 15 */
6108
  OP_I16,       /*                 1 .. 16 */
6109
  OP_I16z,      /*                 0 .. 16 */
6110
  OP_I31,       /*                 0 .. 31 */
6111
  OP_I31w,      /*                 0 .. 31, optional trailing ! */
6112
  OP_I32,       /*                 1 .. 32 */
6113
  OP_I32z,      /*                 0 .. 32 */
6114
  OP_I63,       /*                 0 .. 63 */
6115
  OP_I63s,      /*               -64 .. 63 */
6116
  OP_I64,       /*                 1 .. 64 */
6117
  OP_I64z,      /*                 0 .. 64 */
6118
  OP_I255,      /*                 0 .. 255 */
6119
 
6120
  OP_I4b,       /* immediate, prefix optional, 1 .. 4 */
6121
  OP_I7b,       /*                             0 .. 7 */
6122
  OP_I15b,      /*                             0 .. 15 */
6123
  OP_I31b,      /*                             0 .. 31 */
6124
 
6125
  OP_SH,        /* shifter operand */
6126
  OP_SHG,       /* shifter operand with possible group relocation */
6127
  OP_ADDR,      /* Memory address expression (any mode) */
6128
  OP_ADDRGLDR,  /* Mem addr expr (any mode) with possible LDR group reloc */
6129
  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6130
  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
6131
  OP_EXP,       /* arbitrary expression */
6132
  OP_EXPi,      /* same, with optional immediate prefix */
6133
  OP_EXPr,      /* same, with optional relocation suffix */
6134
  OP_HALF,      /* 0 .. 65535 or low/high reloc.  */
6135
 
6136
  OP_CPSF,      /* CPS flags */
6137
  OP_ENDI,      /* Endianness specifier */
6138
  OP_wPSR,      /* CPSR/SPSR/APSR mask for msr (writing).  */
6139
  OP_rPSR,      /* CPSR/SPSR/APSR mask for msr (reading).  */
6140
  OP_COND,      /* conditional code */
6141
  OP_TB,        /* Table branch.  */
6142
 
6143
  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
6144
 
6145
  OP_RRnpc_I0,  /* ARM register or literal 0 */
6146
  OP_RR_EXr,    /* ARM register or expression with opt. reloc suff. */
6147
  OP_RR_EXi,    /* ARM register or expression with imm prefix */
6148
  OP_RF_IF,     /* FPA register or immediate */
6149
  OP_RIWR_RIWC, /* iWMMXt R or C reg */
6150
  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6151
 
6152
  /* Optional operands.  */
6153
  OP_oI7b,       /* immediate, prefix optional, 0 .. 7 */
6154
  OP_oI31b,      /*                             0 .. 31 */
6155
  OP_oI32b,      /*                             1 .. 32 */
6156 160 khays
  OP_oI32z,      /*                             0 .. 32 */
6157 16 khays
  OP_oIffffb,    /*                             0 .. 65535 */
6158
  OP_oI255c,     /*       curly-brace enclosed, 0 .. 255 */
6159
 
6160
  OP_oRR,        /* ARM register */
6161
  OP_oRRnpc,     /* ARM register, not the PC */
6162
  OP_oRRnpcsp,   /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6163
  OP_oRRw,       /* ARM register, not r15, optional trailing ! */
6164
  OP_oRND,       /* Optional Neon double precision register */
6165
  OP_oRNQ,       /* Optional Neon quad precision register */
6166
  OP_oRNDQ,      /* Optional Neon double or quad precision register */
6167
  OP_oRNSDQ,     /* Optional single, double or quad precision vector register */
6168
  OP_oSHll,      /* LSL immediate */
6169
  OP_oSHar,      /* ASR immediate */
6170
  OP_oSHllar,    /* LSL or ASR immediate */
6171
  OP_oROR,       /* ROR 0/8/16/24 */
6172
  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
6173
 
6174
  /* Some pre-defined mixed (ARM/THUMB) operands.  */
6175
  OP_RR_npcsp           = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6176
  OP_RRnpc_npcsp        = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6177
  OP_oRRnpc_npcsp       = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6178
 
6179
  OP_FIRST_OPTIONAL = OP_oI7b
6180
};
6181
 
6182
/* Generic instruction operand parser.  This does no encoding and no
6183
   semantic validation; it merely squirrels values away in the inst
6184
   structure.  Returns SUCCESS or FAIL depending on whether the
6185
   specified grammar matched.  */
6186
static int
6187
parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6188
{
6189
  unsigned const int *upat = pattern;
6190
  char *backtrack_pos = 0;
6191
  const char *backtrack_error = 0;
6192
  int i, val, backtrack_index = 0;
6193
  enum arm_reg_type rtype;
6194
  parse_operand_result result;
6195
  unsigned int op_parse_code;
6196
 
6197
#define po_char_or_fail(chr)                    \
6198
  do                                            \
6199
    {                                           \
6200
      if (skip_past_char (&str, chr) == FAIL)   \
6201
        goto bad_args;                          \
6202
    }                                           \
6203
  while (0)
6204
 
6205
#define po_reg_or_fail(regtype)                                 \
6206
  do                                                            \
6207
    {                                                           \
6208
      val = arm_typed_reg_parse (& str, regtype, & rtype,       \
6209
                                 & inst.operands[i].vectype);   \
6210
      if (val == FAIL)                                          \
6211
        {                                                       \
6212
          first_error (_(reg_expected_msgs[regtype]));          \
6213
          goto failure;                                         \
6214
        }                                                       \
6215
      inst.operands[i].reg = val;                               \
6216
      inst.operands[i].isreg = 1;                               \
6217
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);         \
6218
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);      \
6219
      inst.operands[i].isvec = (rtype == REG_TYPE_VFS           \
6220
                             || rtype == REG_TYPE_VFD           \
6221
                             || rtype == REG_TYPE_NQ);          \
6222
    }                                                           \
6223
  while (0)
6224
 
6225
#define po_reg_or_goto(regtype, label)                          \
6226
  do                                                            \
6227
    {                                                           \
6228
      val = arm_typed_reg_parse (& str, regtype, & rtype,       \
6229
                                 & inst.operands[i].vectype);   \
6230
      if (val == FAIL)                                          \
6231
        goto label;                                             \
6232
                                                                \
6233
      inst.operands[i].reg = val;                               \
6234
      inst.operands[i].isreg = 1;                               \
6235
      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);         \
6236
      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);      \
6237
      inst.operands[i].isvec = (rtype == REG_TYPE_VFS           \
6238
                             || rtype == REG_TYPE_VFD           \
6239
                             || rtype == REG_TYPE_NQ);          \
6240
    }                                                           \
6241
  while (0)
6242
 
6243
#define po_imm_or_fail(min, max, popt)                          \
6244
  do                                                            \
6245
    {                                                           \
6246
      if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6247
        goto failure;                                           \
6248
      inst.operands[i].imm = val;                               \
6249
    }                                                           \
6250
  while (0)
6251
 
6252
#define po_scalar_or_goto(elsz, label)                                  \
6253
  do                                                                    \
6254
    {                                                                   \
6255
      val = parse_scalar (& str, elsz, & inst.operands[i].vectype);     \
6256
      if (val == FAIL)                                                  \
6257
        goto label;                                                     \
6258
      inst.operands[i].reg = val;                                       \
6259
      inst.operands[i].isscalar = 1;                                    \
6260
    }                                                                   \
6261
  while (0)
6262
 
6263
#define po_misc_or_fail(expr)                   \
6264
  do                                            \
6265
    {                                           \
6266
      if (expr)                                 \
6267
        goto failure;                           \
6268
    }                                           \
6269
  while (0)
6270
 
6271
#define po_misc_or_fail_no_backtrack(expr)              \
6272
  do                                                    \
6273
    {                                                   \
6274
      result = expr;                                    \
6275
      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)    \
6276
        backtrack_pos = 0;                               \
6277
      if (result != PARSE_OPERAND_SUCCESS)              \
6278
        goto failure;                                   \
6279
    }                                                   \
6280
  while (0)
6281
 
6282
#define po_barrier_or_imm(str)                             \
6283
  do                                                       \
6284
    {                                                      \
6285
      val = parse_barrier (&str);                          \
6286
      if (val == FAIL)                                     \
6287
        {                                                  \
6288
          if (ISALPHA (*str))                              \
6289
              goto failure;                                \
6290
          else                                             \
6291
              goto immediate;                              \
6292
        }                                                  \
6293
      else                                                 \
6294
        {                                                  \
6295
          if ((inst.instruction & 0xf0) == 0x60            \
6296
              && val != 0xf)                               \
6297
            {                                              \
6298
               /* ISB can only take SY as an option.  */   \
6299
               inst.error = _("invalid barrier type");     \
6300
               goto failure;                               \
6301
            }                                              \
6302
        }                                                  \
6303
    }                                                      \
6304
  while (0)
6305
 
6306
  skip_whitespace (str);
6307
 
6308
  for (i = 0; upat[i] != OP_stop; i++)
6309
    {
6310
      op_parse_code = upat[i];
6311
      if (op_parse_code >= 1<<16)
6312
        op_parse_code = thumb ? (op_parse_code >> 16)
6313
                                : (op_parse_code & ((1<<16)-1));
6314
 
6315
      if (op_parse_code >= OP_FIRST_OPTIONAL)
6316
        {
6317
          /* Remember where we are in case we need to backtrack.  */
6318
          gas_assert (!backtrack_pos);
6319
          backtrack_pos = str;
6320
          backtrack_error = inst.error;
6321
          backtrack_index = i;
6322
        }
6323
 
6324
      if (i > 0 && (i > 1 || inst.operands[0].present))
6325
        po_char_or_fail (',');
6326
 
6327
      switch (op_parse_code)
6328
        {
6329
          /* Registers */
6330
        case OP_oRRnpc:
6331
        case OP_oRRnpcsp:
6332
        case OP_RRnpc:
6333
        case OP_RRnpcsp:
6334
        case OP_oRR:
6335
        case OP_RR:    po_reg_or_fail (REG_TYPE_RN);      break;
6336
        case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);      break;
6337
        case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);      break;
6338
        case OP_RF:    po_reg_or_fail (REG_TYPE_FN);      break;
6339
        case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);     break;
6340
        case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);     break;
6341
        case OP_oRND:
6342
        case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);     break;
6343
        case OP_RVC:
6344
          po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6345
          break;
6346
          /* Also accept generic coprocessor regs for unknown registers.  */
6347
          coproc_reg:
6348
          po_reg_or_fail (REG_TYPE_CN);
6349
          break;
6350
        case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);     break;
6351
        case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);     break;
6352
        case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);    break;
6353
        case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);    break;
6354
        case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);    break;
6355
        case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);   break;
6356
        case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);   break;
6357
        case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);   break;
6358
        case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
6359
        case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
6360
        case OP_oRNQ:
6361
        case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
6362
        case OP_oRNDQ:
6363
        case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
6364
        case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
6365
        case OP_oRNSDQ:
6366
        case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
6367
 
6368
        /* Neon scalar. Using an element size of 8 means that some invalid
6369
           scalars are accepted here, so deal with those in later code.  */
6370
        case OP_RNSC:  po_scalar_or_goto (8, failure);    break;
6371
 
6372
        case OP_RNDQ_I0:
6373
          {
6374
            po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6375
            break;
6376
            try_imm0:
6377
            po_imm_or_fail (0, 0, TRUE);
6378
          }
6379
          break;
6380
 
6381
        case OP_RVSD_I0:
6382
          po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6383
          break;
6384
 
6385
        case OP_RR_RNSC:
6386
          {
6387
            po_scalar_or_goto (8, try_rr);
6388
            break;
6389
            try_rr:
6390
            po_reg_or_fail (REG_TYPE_RN);
6391
          }
6392
          break;
6393
 
6394
        case OP_RNSDQ_RNSC:
6395
          {
6396
            po_scalar_or_goto (8, try_nsdq);
6397
            break;
6398
            try_nsdq:
6399
            po_reg_or_fail (REG_TYPE_NSDQ);
6400
          }
6401
          break;
6402
 
6403
        case OP_RNDQ_RNSC:
6404
          {
6405
            po_scalar_or_goto (8, try_ndq);
6406
            break;
6407
            try_ndq:
6408
            po_reg_or_fail (REG_TYPE_NDQ);
6409
          }
6410
          break;
6411
 
6412
        case OP_RND_RNSC:
6413
          {
6414
            po_scalar_or_goto (8, try_vfd);
6415
            break;
6416
            try_vfd:
6417
            po_reg_or_fail (REG_TYPE_VFD);
6418
          }
6419
          break;
6420
 
6421
        case OP_VMOV:
6422
          /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6423
             not careful then bad things might happen.  */
6424
          po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6425
          break;
6426
 
6427
        case OP_RNDQ_Ibig:
6428
          {
6429
            po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6430
            break;
6431
            try_immbig:
6432
            /* There's a possibility of getting a 64-bit immediate here, so
6433
               we need special handling.  */
6434
            if (parse_big_immediate (&str, i) == FAIL)
6435
              {
6436
                inst.error = _("immediate value is out of range");
6437
                goto failure;
6438
              }
6439
          }
6440
          break;
6441
 
6442
        case OP_RNDQ_I63b:
6443
          {
6444
            po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6445
            break;
6446
            try_shimm:
6447
            po_imm_or_fail (0, 63, TRUE);
6448
          }
6449
          break;
6450
 
6451
        case OP_RRnpcb:
6452
          po_char_or_fail ('[');
6453
          po_reg_or_fail  (REG_TYPE_RN);
6454
          po_char_or_fail (']');
6455
          break;
6456
 
6457
        case OP_RRnpctw:
6458
        case OP_RRw:
6459
        case OP_oRRw:
6460
          po_reg_or_fail (REG_TYPE_RN);
6461
          if (skip_past_char (&str, '!') == SUCCESS)
6462
            inst.operands[i].writeback = 1;
6463
          break;
6464
 
6465
          /* Immediates */
6466
        case OP_I7:      po_imm_or_fail (  0,       7, FALSE);   break;
6467
        case OP_I15:     po_imm_or_fail (  0,      15, FALSE);   break;
6468
        case OP_I16:     po_imm_or_fail (  1,     16, FALSE);   break;
6469
        case OP_I16z:    po_imm_or_fail (  0,     16, FALSE);   break;
6470
        case OP_I31:     po_imm_or_fail (  0,      31, FALSE);   break;
6471
        case OP_I32:     po_imm_or_fail (  1,     32, FALSE);   break;
6472
        case OP_I32z:    po_imm_or_fail (  0,     32, FALSE);   break;
6473
        case OP_I63s:    po_imm_or_fail (-64,     63, FALSE);   break;
6474
        case OP_I63:     po_imm_or_fail (  0,     63, FALSE);   break;
6475
        case OP_I64:     po_imm_or_fail (  1,     64, FALSE);   break;
6476
        case OP_I64z:    po_imm_or_fail (  0,     64, FALSE);   break;
6477
        case OP_I255:    po_imm_or_fail (  0,     255, FALSE);   break;
6478
 
6479
        case OP_I4b:     po_imm_or_fail (  1,      4, TRUE);    break;
6480
        case OP_oI7b:
6481
        case OP_I7b:     po_imm_or_fail (  0,       7, TRUE);    break;
6482
        case OP_I15b:    po_imm_or_fail (  0,      15, TRUE);    break;
6483
        case OP_oI31b:
6484
        case OP_I31b:    po_imm_or_fail (  0,      31, TRUE);    break;
6485
        case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
6486 160 khays
        case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
6487 16 khays
        case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);     break;
6488
 
6489
          /* Immediate variants */
6490
        case OP_oI255c:
6491
          po_char_or_fail ('{');
6492
          po_imm_or_fail (0, 255, TRUE);
6493
          po_char_or_fail ('}');
6494
          break;
6495
 
6496
        case OP_I31w:
6497
          /* The expression parser chokes on a trailing !, so we have
6498
             to find it first and zap it.  */
6499
          {
6500
            char *s = str;
6501
            while (*s && *s != ',')
6502
              s++;
6503
            if (s[-1] == '!')
6504
              {
6505
                s[-1] = '\0';
6506
                inst.operands[i].writeback = 1;
6507
              }
6508
            po_imm_or_fail (0, 31, TRUE);
6509
            if (str == s - 1)
6510
              str = s;
6511
          }
6512
          break;
6513
 
6514
          /* Expressions */
6515
        case OP_EXPi:   EXPi:
6516
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6517
                                              GE_OPT_PREFIX));
6518
          break;
6519
 
6520
        case OP_EXP:
6521
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6522
                                              GE_NO_PREFIX));
6523
          break;
6524
 
6525
        case OP_EXPr:   EXPr:
6526
          po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6527
                                              GE_NO_PREFIX));
6528
          if (inst.reloc.exp.X_op == O_symbol)
6529
            {
6530
              val = parse_reloc (&str);
6531
              if (val == -1)
6532
                {
6533
                  inst.error = _("unrecognized relocation suffix");
6534
                  goto failure;
6535
                }
6536
              else if (val != BFD_RELOC_UNUSED)
6537
                {
6538
                  inst.operands[i].imm = val;
6539
                  inst.operands[i].hasreloc = 1;
6540
                }
6541
            }
6542
          break;
6543
 
6544
          /* Operand for MOVW or MOVT.  */
6545
        case OP_HALF:
6546
          po_misc_or_fail (parse_half (&str));
6547
          break;
6548
 
6549
          /* Register or expression.  */
6550
        case OP_RR_EXr:   po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6551
        case OP_RR_EXi:   po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6552
 
6553
          /* Register or immediate.  */
6554
        case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
6555
        I0:               po_imm_or_fail (0, 0, FALSE);         break;
6556
 
6557
        case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
6558
        IF:
6559
          if (!is_immediate_prefix (*str))
6560
            goto bad_args;
6561
          str++;
6562
          val = parse_fpa_immediate (&str);
6563
          if (val == FAIL)
6564
            goto failure;
6565
          /* FPA immediates are encoded as registers 8-15.
6566
             parse_fpa_immediate has already applied the offset.  */
6567
          inst.operands[i].reg = val;
6568
          inst.operands[i].isreg = 1;
6569
          break;
6570
 
6571
        case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6572
        I32z:             po_imm_or_fail (0, 32, FALSE);   break;
6573
 
6574
          /* Two kinds of register.  */
6575
        case OP_RIWR_RIWC:
6576
          {
6577
            struct reg_entry *rege = arm_reg_parse_multi (&str);
6578
            if (!rege
6579
                || (rege->type != REG_TYPE_MMXWR
6580
                    && rege->type != REG_TYPE_MMXWC
6581
                    && rege->type != REG_TYPE_MMXWCG))
6582
              {
6583
                inst.error = _("iWMMXt data or control register expected");
6584
                goto failure;
6585
              }
6586
            inst.operands[i].reg = rege->number;
6587
            inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6588
          }
6589
          break;
6590
 
6591
        case OP_RIWC_RIWG:
6592
          {
6593
            struct reg_entry *rege = arm_reg_parse_multi (&str);
6594
            if (!rege
6595
                || (rege->type != REG_TYPE_MMXWC
6596
                    && rege->type != REG_TYPE_MMXWCG))
6597
              {
6598
                inst.error = _("iWMMXt control register expected");
6599
                goto failure;
6600
              }
6601
            inst.operands[i].reg = rege->number;
6602
            inst.operands[i].isreg = 1;
6603
          }
6604
          break;
6605
 
6606
          /* Misc */
6607
        case OP_CPSF:    val = parse_cps_flags (&str);          break;
6608
        case OP_ENDI:    val = parse_endian_specifier (&str);   break;
6609
        case OP_oROR:    val = parse_ror (&str);                break;
6610
        case OP_COND:    val = parse_cond (&str);               break;
6611
        case OP_oBARRIER_I15:
6612
          po_barrier_or_imm (str); break;
6613
          immediate:
6614
          if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6615
            goto failure;
6616
          break;
6617
 
6618
        case OP_wPSR:
6619
        case OP_rPSR:
6620
          po_reg_or_goto (REG_TYPE_RNB, try_psr);
6621
          if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6622
            {
6623
              inst.error = _("Banked registers are not available with this "
6624
                             "architecture.");
6625
              goto failure;
6626
            }
6627
          break;
6628
          try_psr:
6629
          val = parse_psr (&str, op_parse_code == OP_wPSR);
6630
          break;
6631
 
6632
        case OP_APSR_RR:
6633
          po_reg_or_goto (REG_TYPE_RN, try_apsr);
6634
          break;
6635
          try_apsr:
6636
          /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6637
             instruction).  */
6638
          if (strncasecmp (str, "APSR_", 5) == 0)
6639
            {
6640
              unsigned found = 0;
6641
              str += 5;
6642
              while (found < 15)
6643
                switch (*str++)
6644
                  {
6645
                  case 'c': found = (found & 1) ? 16 : found | 1; break;
6646
                  case 'n': found = (found & 2) ? 16 : found | 2; break;
6647
                  case 'z': found = (found & 4) ? 16 : found | 4; break;
6648
                  case 'v': found = (found & 8) ? 16 : found | 8; break;
6649
                  default: found = 16;
6650
                  }
6651
              if (found != 15)
6652
                goto failure;
6653
              inst.operands[i].isvec = 1;
6654
              /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
6655
              inst.operands[i].reg = REG_PC;
6656
            }
6657
          else
6658
            goto failure;
6659
          break;
6660
 
6661
        case OP_TB:
6662
          po_misc_or_fail (parse_tb (&str));
6663
          break;
6664
 
6665
          /* Register lists.  */
6666
        case OP_REGLST:
6667
          val = parse_reg_list (&str);
6668
          if (*str == '^')
6669
            {
6670
              inst.operands[1].writeback = 1;
6671
              str++;
6672
            }
6673
          break;
6674
 
6675
        case OP_VRSLST:
6676
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6677
          break;
6678
 
6679
        case OP_VRDLST:
6680
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6681
          break;
6682
 
6683
        case OP_VRSDLST:
6684
          /* Allow Q registers too.  */
6685
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6686
                                    REGLIST_NEON_D);
6687
          if (val == FAIL)
6688
            {
6689
              inst.error = NULL;
6690
              val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6691
                                        REGLIST_VFP_S);
6692
              inst.operands[i].issingle = 1;
6693
            }
6694
          break;
6695
 
6696
        case OP_NRDLST:
6697
          val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6698
                                    REGLIST_NEON_D);
6699
          break;
6700
 
6701
        case OP_NSTRLST:
6702
          val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6703
                                           &inst.operands[i].vectype);
6704
          break;
6705
 
6706
          /* Addressing modes */
6707
        case OP_ADDR:
6708
          po_misc_or_fail (parse_address (&str, i));
6709
          break;
6710
 
6711
        case OP_ADDRGLDR:
6712
          po_misc_or_fail_no_backtrack (
6713
            parse_address_group_reloc (&str, i, GROUP_LDR));
6714
          break;
6715
 
6716
        case OP_ADDRGLDRS:
6717
          po_misc_or_fail_no_backtrack (
6718
            parse_address_group_reloc (&str, i, GROUP_LDRS));
6719
          break;
6720
 
6721
        case OP_ADDRGLDC:
6722
          po_misc_or_fail_no_backtrack (
6723
            parse_address_group_reloc (&str, i, GROUP_LDC));
6724
          break;
6725
 
6726
        case OP_SH:
6727
          po_misc_or_fail (parse_shifter_operand (&str, i));
6728
          break;
6729
 
6730
        case OP_SHG:
6731
          po_misc_or_fail_no_backtrack (
6732
            parse_shifter_operand_group_reloc (&str, i));
6733
          break;
6734
 
6735
        case OP_oSHll:
6736
          po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6737
          break;
6738
 
6739
        case OP_oSHar:
6740
          po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6741
          break;
6742
 
6743
        case OP_oSHllar:
6744
          po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6745
          break;
6746
 
6747
        default:
6748
          as_fatal (_("unhandled operand code %d"), op_parse_code);
6749
        }
6750
 
6751
      /* Various value-based sanity checks and shared operations.  We
6752
         do not signal immediate failures for the register constraints;
6753
         this allows a syntax error to take precedence.  */
6754
      switch (op_parse_code)
6755
        {
6756
        case OP_oRRnpc:
6757
        case OP_RRnpc:
6758
        case OP_RRnpcb:
6759
        case OP_RRw:
6760
        case OP_oRRw:
6761
        case OP_RRnpc_I0:
6762
          if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6763
            inst.error = BAD_PC;
6764
          break;
6765
 
6766
        case OP_oRRnpcsp:
6767
        case OP_RRnpcsp:
6768
          if (inst.operands[i].isreg)
6769
            {
6770
              if (inst.operands[i].reg == REG_PC)
6771
                inst.error = BAD_PC;
6772
              else if (inst.operands[i].reg == REG_SP)
6773
                inst.error = BAD_SP;
6774
            }
6775
          break;
6776
 
6777
        case OP_RRnpctw:
6778
          if (inst.operands[i].isreg
6779
              && inst.operands[i].reg == REG_PC
6780
              && (inst.operands[i].writeback || thumb))
6781
            inst.error = BAD_PC;
6782
          break;
6783
 
6784
        case OP_CPSF:
6785
        case OP_ENDI:
6786
        case OP_oROR:
6787
        case OP_wPSR:
6788
        case OP_rPSR:
6789
        case OP_COND:
6790
        case OP_oBARRIER_I15:
6791
        case OP_REGLST:
6792
        case OP_VRSLST:
6793
        case OP_VRDLST:
6794
        case OP_VRSDLST:
6795
        case OP_NRDLST:
6796
        case OP_NSTRLST:
6797
          if (val == FAIL)
6798
            goto failure;
6799
          inst.operands[i].imm = val;
6800
          break;
6801
 
6802
        default:
6803
          break;
6804
        }
6805
 
6806
      /* If we get here, this operand was successfully parsed.  */
6807
      inst.operands[i].present = 1;
6808
      continue;
6809
 
6810
    bad_args:
6811
      inst.error = BAD_ARGS;
6812
 
6813
    failure:
6814
      if (!backtrack_pos)
6815
        {
6816
          /* The parse routine should already have set inst.error, but set a
6817
             default here just in case.  */
6818
          if (!inst.error)
6819
            inst.error = _("syntax error");
6820
          return FAIL;
6821
        }
6822
 
6823
      /* Do not backtrack over a trailing optional argument that
6824
         absorbed some text.  We will only fail again, with the
6825
         'garbage following instruction' error message, which is
6826
         probably less helpful than the current one.  */
6827
      if (backtrack_index == i && backtrack_pos != str
6828
          && upat[i+1] == OP_stop)
6829
        {
6830
          if (!inst.error)
6831
            inst.error = _("syntax error");
6832
          return FAIL;
6833
        }
6834
 
6835
      /* Try again, skipping the optional argument at backtrack_pos.  */
6836
      str = backtrack_pos;
6837
      inst.error = backtrack_error;
6838
      inst.operands[backtrack_index].present = 0;
6839
      i = backtrack_index;
6840
      backtrack_pos = 0;
6841
    }
6842
 
6843
  /* Check that we have parsed all the arguments.  */
6844
  if (*str != '\0' && !inst.error)
6845
    inst.error = _("garbage following instruction");
6846
 
6847
  return inst.error ? FAIL : SUCCESS;
6848
}
6849
 
6850
#undef po_char_or_fail
6851
#undef po_reg_or_fail
6852
#undef po_reg_or_goto
6853
#undef po_imm_or_fail
6854
#undef po_scalar_or_fail
6855
#undef po_barrier_or_imm
6856
 
6857
/* Shorthand macro for instruction encoding functions issuing errors.  */
6858
#define constraint(expr, err)                   \
6859
  do                                            \
6860
    {                                           \
6861
      if (expr)                                 \
6862
        {                                       \
6863
          inst.error = err;                     \
6864
          return;                               \
6865
        }                                       \
6866
    }                                           \
6867
  while (0)
6868
 
6869
/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
6870
   instructions are unpredictable if these registers are used.  This
6871
   is the BadReg predicate in ARM's Thumb-2 documentation.  */
6872
#define reject_bad_reg(reg)                             \
6873
  do                                                    \
6874
   if (reg == REG_SP || reg == REG_PC)                  \
6875
     {                                                  \
6876
       inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC;  \
6877
       return;                                          \
6878
     }                                                  \
6879
  while (0)
6880
 
6881
/* If REG is R13 (the stack pointer), warn that its use is
6882
   deprecated.  */
6883
#define warn_deprecated_sp(reg)                 \
6884
  do                                            \
6885
    if (warn_on_deprecated && reg == REG_SP)    \
6886
       as_warn (_("use of r13 is deprecated")); \
6887
  while (0)
6888
 
6889
/* Functions for operand encoding.  ARM, then Thumb.  */
6890
 
6891
#define rotate_left(v, n) (v << n | v >> (32 - n))
6892
 
6893
/* If VAL can be encoded in the immediate field of an ARM instruction,
6894
   return the encoded form.  Otherwise, return FAIL.  */
6895
 
6896
static unsigned int
6897
encode_arm_immediate (unsigned int val)
6898
{
6899
  unsigned int a, i;
6900
 
6901
  for (i = 0; i < 32; i += 2)
6902
    if ((a = rotate_left (val, i)) <= 0xff)
6903
      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
6904
 
6905
  return FAIL;
6906
}
6907
 
6908
/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6909
   return the encoded form.  Otherwise, return FAIL.  */
6910
static unsigned int
6911
encode_thumb32_immediate (unsigned int val)
6912
{
6913
  unsigned int a, i;
6914
 
6915
  if (val <= 0xff)
6916
    return val;
6917
 
6918
  for (i = 1; i <= 24; i++)
6919
    {
6920
      a = val >> i;
6921
      if ((val & ~(0xff << i)) == 0)
6922
        return ((val >> i) & 0x7f) | ((32 - i) << 7);
6923
    }
6924
 
6925
  a = val & 0xff;
6926
  if (val == ((a << 16) | a))
6927
    return 0x100 | a;
6928
  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6929
    return 0x300 | a;
6930
 
6931
  a = val & 0xff00;
6932
  if (val == ((a << 16) | a))
6933
    return 0x200 | (a >> 8);
6934
 
6935
  return FAIL;
6936
}
6937
/* Encode a VFP SP or DP register number into inst.instruction.  */
6938
 
6939
static void
6940
encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6941
{
6942
  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6943
      && reg > 15)
6944
    {
6945
      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6946
        {
6947
          if (thumb_mode)
6948
            ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6949
                                    fpu_vfp_ext_d32);
6950
          else
6951
            ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6952
                                    fpu_vfp_ext_d32);
6953
        }
6954
      else
6955
        {
6956
          first_error (_("D register out of range for selected VFP version"));
6957
          return;
6958
        }
6959
    }
6960
 
6961
  switch (pos)
6962
    {
6963
    case VFP_REG_Sd:
6964
      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6965
      break;
6966
 
6967
    case VFP_REG_Sn:
6968
      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6969
      break;
6970
 
6971
    case VFP_REG_Sm:
6972
      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6973
      break;
6974
 
6975
    case VFP_REG_Dd:
6976
      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6977
      break;
6978
 
6979
    case VFP_REG_Dn:
6980
      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6981
      break;
6982
 
6983
    case VFP_REG_Dm:
6984
      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6985
      break;
6986
 
6987
    default:
6988
      abort ();
6989
    }
6990
}
6991
 
6992
/* Encode a <shift> in an ARM-format instruction.  The immediate,
6993
   if any, is handled by md_apply_fix.   */
6994
static void
6995
encode_arm_shift (int i)
6996
{
6997
  if (inst.operands[i].shift_kind == SHIFT_RRX)
6998
    inst.instruction |= SHIFT_ROR << 5;
6999
  else
7000
    {
7001
      inst.instruction |= inst.operands[i].shift_kind << 5;
7002
      if (inst.operands[i].immisreg)
7003
        {
7004
          inst.instruction |= SHIFT_BY_REG;
7005
          inst.instruction |= inst.operands[i].imm << 8;
7006
        }
7007
      else
7008
        inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7009
    }
7010
}
7011
 
7012
static void
7013
encode_arm_shifter_operand (int i)
7014
{
7015
  if (inst.operands[i].isreg)
7016
    {
7017
      inst.instruction |= inst.operands[i].reg;
7018
      encode_arm_shift (i);
7019
    }
7020
  else
7021 163 khays
    {
7022
      inst.instruction |= INST_IMMEDIATE;
7023
      if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7024
        inst.instruction |= inst.operands[i].imm;
7025
    }
7026 16 khays
}
7027
 
7028
/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
7029
static void
7030
encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7031
{
7032
  gas_assert (inst.operands[i].isreg);
7033
  inst.instruction |= inst.operands[i].reg << 16;
7034
 
7035
  if (inst.operands[i].preind)
7036
    {
7037
      if (is_t)
7038
        {
7039
          inst.error = _("instruction does not accept preindexed addressing");
7040
          return;
7041
        }
7042
      inst.instruction |= PRE_INDEX;
7043
      if (inst.operands[i].writeback)
7044
        inst.instruction |= WRITE_BACK;
7045
 
7046
    }
7047
  else if (inst.operands[i].postind)
7048
    {
7049
      gas_assert (inst.operands[i].writeback);
7050
      if (is_t)
7051
        inst.instruction |= WRITE_BACK;
7052
    }
7053
  else /* unindexed - only for coprocessor */
7054
    {
7055
      inst.error = _("instruction does not accept unindexed addressing");
7056
      return;
7057
    }
7058
 
7059
  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7060
      && (((inst.instruction & 0x000f0000) >> 16)
7061
          == ((inst.instruction & 0x0000f000) >> 12)))
7062
    as_warn ((inst.instruction & LOAD_BIT)
7063
             ? _("destination register same as write-back base")
7064
             : _("source register same as write-back base"));
7065
}
7066
 
7067
/* inst.operands[i] was set up by parse_address.  Encode it into an
7068
   ARM-format mode 2 load or store instruction.  If is_t is true,
7069
   reject forms that cannot be used with a T instruction (i.e. not
7070
   post-indexed).  */
7071
static void
7072
encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7073
{
7074
  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7075
 
7076
  encode_arm_addr_mode_common (i, is_t);
7077
 
7078
  if (inst.operands[i].immisreg)
7079
    {
7080
      constraint ((inst.operands[i].imm == REG_PC
7081
                   || (is_pc && inst.operands[i].writeback)),
7082
                  BAD_PC_ADDRESSING);
7083
      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
7084
      inst.instruction |= inst.operands[i].imm;
7085
      if (!inst.operands[i].negative)
7086
        inst.instruction |= INDEX_UP;
7087
      if (inst.operands[i].shifted)
7088
        {
7089
          if (inst.operands[i].shift_kind == SHIFT_RRX)
7090
            inst.instruction |= SHIFT_ROR << 5;
7091
          else
7092
            {
7093
              inst.instruction |= inst.operands[i].shift_kind << 5;
7094
              inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7095
            }
7096
        }
7097
    }
7098
  else /* immediate offset in inst.reloc */
7099
    {
7100
      if (is_pc && !inst.reloc.pc_rel)
7101
        {
7102
          const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7103
 
7104
          /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
7105
             cannot use PC in addressing.
7106
             PC cannot be used in writeback addressing, either.  */
7107
          constraint ((is_t || inst.operands[i].writeback),
7108
                      BAD_PC_ADDRESSING);
7109
 
7110
          /* Use of PC in str is deprecated for ARMv7.  */
7111
          if (warn_on_deprecated
7112
              && !is_load
7113
              && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7114
            as_warn (_("use of PC in this instruction is deprecated"));
7115
        }
7116
 
7117
      if (inst.reloc.type == BFD_RELOC_UNUSED)
7118
        {
7119
          /* Prefer + for zero encoded value.  */
7120
          if (!inst.operands[i].negative)
7121
            inst.instruction |= INDEX_UP;
7122
          inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7123
        }
7124
    }
7125
}
7126
 
7127
/* inst.operands[i] was set up by parse_address.  Encode it into an
7128
   ARM-format mode 3 load or store instruction.  Reject forms that
7129
   cannot be used with such instructions.  If is_t is true, reject
7130
   forms that cannot be used with a T instruction (i.e. not
7131
   post-indexed).  */
7132
static void
7133
encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7134
{
7135
  if (inst.operands[i].immisreg && inst.operands[i].shifted)
7136
    {
7137
      inst.error = _("instruction does not accept scaled register index");
7138
      return;
7139
    }
7140
 
7141
  encode_arm_addr_mode_common (i, is_t);
7142
 
7143
  if (inst.operands[i].immisreg)
7144
    {
7145
      constraint ((inst.operands[i].imm == REG_PC
7146
                   || inst.operands[i].reg == REG_PC),
7147
                  BAD_PC_ADDRESSING);
7148
      inst.instruction |= inst.operands[i].imm;
7149
      if (!inst.operands[i].negative)
7150
        inst.instruction |= INDEX_UP;
7151
    }
7152
  else /* immediate offset in inst.reloc */
7153
    {
7154
      constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7155
                   && inst.operands[i].writeback),
7156
                  BAD_PC_WRITEBACK);
7157
      inst.instruction |= HWOFFSET_IMM;
7158
      if (inst.reloc.type == BFD_RELOC_UNUSED)
7159
        {
7160
          /* Prefer + for zero encoded value.  */
7161
          if (!inst.operands[i].negative)
7162
            inst.instruction |= INDEX_UP;
7163
 
7164
          inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7165
        }
7166
    }
7167
}
7168
 
7169
/* inst.operands[i] was set up by parse_address.  Encode it into an
7170
   ARM-format instruction.  Reject all forms which cannot be encoded
7171
   into a coprocessor load/store instruction.  If wb_ok is false,
7172
   reject use of writeback; if unind_ok is false, reject use of
7173
   unindexed addressing.  If reloc_override is not 0, use it instead
7174
   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7175
   (in which case it is preserved).  */
7176
 
7177
static int
7178
encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7179
{
7180
  inst.instruction |= inst.operands[i].reg << 16;
7181
 
7182
  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7183
 
7184
  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7185
    {
7186
      gas_assert (!inst.operands[i].writeback);
7187
      if (!unind_ok)
7188
        {
7189
          inst.error = _("instruction does not support unindexed addressing");
7190
          return FAIL;
7191
        }
7192
      inst.instruction |= inst.operands[i].imm;
7193
      inst.instruction |= INDEX_UP;
7194
      return SUCCESS;
7195
    }
7196
 
7197
  if (inst.operands[i].preind)
7198
    inst.instruction |= PRE_INDEX;
7199
 
7200
  if (inst.operands[i].writeback)
7201
    {
7202
      if (inst.operands[i].reg == REG_PC)
7203
        {
7204
          inst.error = _("pc may not be used with write-back");
7205
          return FAIL;
7206
        }
7207
      if (!wb_ok)
7208
        {
7209
          inst.error = _("instruction does not support writeback");
7210
          return FAIL;
7211
        }
7212
      inst.instruction |= WRITE_BACK;
7213
    }
7214
 
7215
  if (reloc_override)
7216
    inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7217
  else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7218
            || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7219
           && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7220
    {
7221
      if (thumb_mode)
7222
        inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7223
      else
7224
        inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7225
    }
7226
 
7227
  /* Prefer + for zero encoded value.  */
7228
  if (!inst.operands[i].negative)
7229
    inst.instruction |= INDEX_UP;
7230
 
7231
  return SUCCESS;
7232
}
7233
 
7234
/* inst.reloc.exp describes an "=expr" load pseudo-operation.
7235
   Determine whether it can be performed with a move instruction; if
7236
   it can, convert inst.instruction to that move instruction and
7237
   return TRUE; if it can't, convert inst.instruction to a literal-pool
7238
   load and return FALSE.  If this is not a valid thing to do in the
7239
   current context, set inst.error and return TRUE.
7240
 
7241
   inst.operands[i] describes the destination register.  */
7242
 
7243
static bfd_boolean
7244
move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7245
{
7246
  unsigned long tbit;
7247
 
7248
  if (thumb_p)
7249
    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7250
  else
7251
    tbit = LOAD_BIT;
7252
 
7253
  if ((inst.instruction & tbit) == 0)
7254
    {
7255
      inst.error = _("invalid pseudo operation");
7256
      return TRUE;
7257
    }
7258
  if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7259
    {
7260
      inst.error = _("constant expression expected");
7261
      return TRUE;
7262
    }
7263
  if (inst.reloc.exp.X_op == O_constant)
7264
    {
7265
      if (thumb_p)
7266
        {
7267
          if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7268
            {
7269
              /* This can be done with a mov(1) instruction.  */
7270
              inst.instruction  = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7271
              inst.instruction |= inst.reloc.exp.X_add_number;
7272
              return TRUE;
7273
            }
7274
        }
7275
      else
7276
        {
7277
          int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7278
          if (value != FAIL)
7279
            {
7280
              /* This can be done with a mov instruction.  */
7281
              inst.instruction &= LITERAL_MASK;
7282
              inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7283
              inst.instruction |= value & 0xfff;
7284
              return TRUE;
7285
            }
7286
 
7287
          value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7288
          if (value != FAIL)
7289
            {
7290
              /* This can be done with a mvn instruction.  */
7291
              inst.instruction &= LITERAL_MASK;
7292
              inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7293
              inst.instruction |= value & 0xfff;
7294
              return TRUE;
7295
            }
7296
        }
7297
    }
7298
 
7299
  if (add_to_lit_pool () == FAIL)
7300
    {
7301
      inst.error = _("literal pool insertion failed");
7302
      return TRUE;
7303
    }
7304
  inst.operands[1].reg = REG_PC;
7305
  inst.operands[1].isreg = 1;
7306
  inst.operands[1].preind = 1;
7307
  inst.reloc.pc_rel = 1;
7308
  inst.reloc.type = (thumb_p
7309
                     ? BFD_RELOC_ARM_THUMB_OFFSET
7310
                     : (mode_3
7311
                        ? BFD_RELOC_ARM_HWLITERAL
7312
                        : BFD_RELOC_ARM_LITERAL));
7313
  return FALSE;
7314
}
7315
 
7316
/* Functions for instruction encoding, sorted by sub-architecture.
7317
   First some generics; their names are taken from the conventional
7318
   bit positions for register arguments in ARM format instructions.  */
7319
 
7320
static void
7321
do_noargs (void)
7322
{
7323
}
7324
 
7325
static void
7326
do_rd (void)
7327
{
7328
  inst.instruction |= inst.operands[0].reg << 12;
7329
}
7330
 
7331
static void
7332
do_rd_rm (void)
7333
{
7334
  inst.instruction |= inst.operands[0].reg << 12;
7335
  inst.instruction |= inst.operands[1].reg;
7336
}
7337
 
7338
static void
7339
do_rd_rn (void)
7340
{
7341
  inst.instruction |= inst.operands[0].reg << 12;
7342
  inst.instruction |= inst.operands[1].reg << 16;
7343
}
7344
 
7345
static void
7346
do_rn_rd (void)
7347
{
7348
  inst.instruction |= inst.operands[0].reg << 16;
7349
  inst.instruction |= inst.operands[1].reg << 12;
7350
}
7351
 
7352
static void
7353
do_rd_rm_rn (void)
7354
{
7355
  unsigned Rn = inst.operands[2].reg;
7356
  /* Enforce restrictions on SWP instruction.  */
7357
  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7358
    {
7359
      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7360
                  _("Rn must not overlap other operands"));
7361
 
7362
      /* SWP{b} is deprecated for ARMv6* and ARMv7.  */
7363
      if (warn_on_deprecated
7364
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7365
        as_warn (_("swp{b} use is deprecated for this architecture"));
7366
 
7367
    }
7368
  inst.instruction |= inst.operands[0].reg << 12;
7369
  inst.instruction |= inst.operands[1].reg;
7370
  inst.instruction |= Rn << 16;
7371
}
7372
 
7373
static void
7374
do_rd_rn_rm (void)
7375
{
7376
  inst.instruction |= inst.operands[0].reg << 12;
7377
  inst.instruction |= inst.operands[1].reg << 16;
7378
  inst.instruction |= inst.operands[2].reg;
7379
}
7380
 
7381
static void
7382
do_rm_rd_rn (void)
7383
{
7384
  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7385
  constraint (((inst.reloc.exp.X_op != O_constant
7386
                && inst.reloc.exp.X_op != O_illegal)
7387
               || inst.reloc.exp.X_add_number != 0),
7388
              BAD_ADDR_MODE);
7389
  inst.instruction |= inst.operands[0].reg;
7390
  inst.instruction |= inst.operands[1].reg << 12;
7391
  inst.instruction |= inst.operands[2].reg << 16;
7392
}
7393
 
7394
static void
7395
do_imm0 (void)
7396
{
7397
  inst.instruction |= inst.operands[0].imm;
7398
}
7399
 
7400
static void
7401
do_rd_cpaddr (void)
7402
{
7403
  inst.instruction |= inst.operands[0].reg << 12;
7404
  encode_arm_cp_address (1, TRUE, TRUE, 0);
7405
}
7406
 
7407
/* ARM instructions, in alphabetical order by function name (except
7408
   that wrapper functions appear immediately after the function they
7409
   wrap).  */
7410
 
7411
/* This is a pseudo-op of the form "adr rd, label" to be converted
7412
   into a relative address of the form "add rd, pc, #label-.-8".  */
7413
 
7414
static void
7415
do_adr (void)
7416
{
7417
  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7418
 
7419
  /* Frag hacking will turn this into a sub instruction if the offset turns
7420
     out to be negative.  */
7421
  inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7422
  inst.reloc.pc_rel = 1;
7423
  inst.reloc.exp.X_add_number -= 8;
7424
}
7425
 
7426
/* This is a pseudo-op of the form "adrl rd, label" to be converted
7427
   into a relative address of the form:
7428
   add rd, pc, #low(label-.-8)"
7429
   add rd, rd, #high(label-.-8)"  */
7430
 
7431
static void
7432
do_adrl (void)
7433
{
7434
  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
7435
 
7436
  /* Frag hacking will turn this into a sub instruction if the offset turns
7437
     out to be negative.  */
7438
  inst.reloc.type              = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7439
  inst.reloc.pc_rel            = 1;
7440
  inst.size                    = INSN_SIZE * 2;
7441
  inst.reloc.exp.X_add_number -= 8;
7442
}
7443
 
7444
static void
7445
do_arit (void)
7446
{
7447
  if (!inst.operands[1].present)
7448
    inst.operands[1].reg = inst.operands[0].reg;
7449
  inst.instruction |= inst.operands[0].reg << 12;
7450
  inst.instruction |= inst.operands[1].reg << 16;
7451
  encode_arm_shifter_operand (2);
7452
}
7453
 
7454
static void
7455
do_barrier (void)
7456
{
7457
  if (inst.operands[0].present)
7458
    {
7459
      constraint ((inst.instruction & 0xf0) != 0x40
7460
                  && inst.operands[0].imm > 0xf
7461
                  && inst.operands[0].imm < 0x0,
7462
                  _("bad barrier type"));
7463
      inst.instruction |= inst.operands[0].imm;
7464
    }
7465
  else
7466
    inst.instruction |= 0xf;
7467
}
7468
 
7469
static void
7470
do_bfc (void)
7471
{
7472
  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7473
  constraint (msb > 32, _("bit-field extends past end of register"));
7474
  /* The instruction encoding stores the LSB and MSB,
7475
     not the LSB and width.  */
7476
  inst.instruction |= inst.operands[0].reg << 12;
7477
  inst.instruction |= inst.operands[1].imm << 7;
7478
  inst.instruction |= (msb - 1) << 16;
7479
}
7480
 
7481
static void
7482
do_bfi (void)
7483
{
7484
  unsigned int msb;
7485
 
7486
  /* #0 in second position is alternative syntax for bfc, which is
7487
     the same instruction but with REG_PC in the Rm field.  */
7488
  if (!inst.operands[1].isreg)
7489
    inst.operands[1].reg = REG_PC;
7490
 
7491
  msb = inst.operands[2].imm + inst.operands[3].imm;
7492
  constraint (msb > 32, _("bit-field extends past end of register"));
7493
  /* The instruction encoding stores the LSB and MSB,
7494
     not the LSB and width.  */
7495
  inst.instruction |= inst.operands[0].reg << 12;
7496
  inst.instruction |= inst.operands[1].reg;
7497
  inst.instruction |= inst.operands[2].imm << 7;
7498
  inst.instruction |= (msb - 1) << 16;
7499
}
7500
 
7501
static void
7502
do_bfx (void)
7503
{
7504
  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7505
              _("bit-field extends past end of register"));
7506
  inst.instruction |= inst.operands[0].reg << 12;
7507
  inst.instruction |= inst.operands[1].reg;
7508
  inst.instruction |= inst.operands[2].imm << 7;
7509
  inst.instruction |= (inst.operands[3].imm - 1) << 16;
7510
}
7511
 
7512
/* ARM V5 breakpoint instruction (argument parse)
7513
     BKPT <16 bit unsigned immediate>
7514
     Instruction is not conditional.
7515
        The bit pattern given in insns[] has the COND_ALWAYS condition,
7516
        and it is an error if the caller tried to override that.  */
7517
 
7518
static void
7519
do_bkpt (void)
7520
{
7521
  /* Top 12 of 16 bits to bits 19:8.  */
7522
  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7523
 
7524
  /* Bottom 4 of 16 bits to bits 3:0.  */
7525
  inst.instruction |= inst.operands[0].imm & 0xf;
7526
}
7527
 
7528
static void
7529
encode_branch (int default_reloc)
7530
{
7531
  if (inst.operands[0].hasreloc)
7532
    {
7533
      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7534
                  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7535
                  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7536
      inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7537
        ? BFD_RELOC_ARM_PLT32
7538
        : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7539
    }
7540
  else
7541
    inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7542
  inst.reloc.pc_rel = 1;
7543
}
7544
 
7545
static void
7546
do_branch (void)
7547
{
7548
#ifdef OBJ_ELF
7549
  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7550
    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7551
  else
7552
#endif
7553
    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7554
}
7555
 
7556
static void
7557
do_bl (void)
7558
{
7559
#ifdef OBJ_ELF
7560
  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7561
    {
7562
      if (inst.cond == COND_ALWAYS)
7563
        encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7564
      else
7565
        encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7566
    }
7567
  else
7568
#endif
7569
    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7570
}
7571
 
7572
/* ARM V5 branch-link-exchange instruction (argument parse)
7573
     BLX <target_addr>          ie BLX(1)
7574
     BLX{<condition>} <Rm>      ie BLX(2)
7575
   Unfortunately, there are two different opcodes for this mnemonic.
7576
   So, the insns[].value is not used, and the code here zaps values
7577
        into inst.instruction.
7578
   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
7579
 
7580
static void
7581
do_blx (void)
7582
{
7583
  if (inst.operands[0].isreg)
7584
    {
7585
      /* Arg is a register; the opcode provided by insns[] is correct.
7586
         It is not illegal to do "blx pc", just useless.  */
7587
      if (inst.operands[0].reg == REG_PC)
7588
        as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7589
 
7590
      inst.instruction |= inst.operands[0].reg;
7591
    }
7592
  else
7593
    {
7594
      /* Arg is an address; this instruction cannot be executed
7595
         conditionally, and the opcode must be adjusted.
7596
         We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7597
         where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
7598
      constraint (inst.cond != COND_ALWAYS, BAD_COND);
7599
      inst.instruction = 0xfa000000;
7600
      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7601
    }
7602
}
7603
 
7604
static void
7605
do_bx (void)
7606
{
7607
  bfd_boolean want_reloc;
7608
 
7609
  if (inst.operands[0].reg == REG_PC)
7610
    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7611
 
7612
  inst.instruction |= inst.operands[0].reg;
7613
  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7614
     it is for ARMv4t or earlier.  */
7615
  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7616
  if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7617
      want_reloc = TRUE;
7618
 
7619
#ifdef OBJ_ELF
7620
  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7621
#endif
7622
    want_reloc = FALSE;
7623
 
7624
  if (want_reloc)
7625
    inst.reloc.type = BFD_RELOC_ARM_V4BX;
7626
}
7627
 
7628
 
7629
/* ARM v5TEJ.  Jump to Jazelle code.  */
7630
 
7631
static void
7632
do_bxj (void)
7633
{
7634
  if (inst.operands[0].reg == REG_PC)
7635
    as_tsktsk (_("use of r15 in bxj is not really useful"));
7636
 
7637
  inst.instruction |= inst.operands[0].reg;
7638
}
7639
 
7640
/* Co-processor data operation:
7641
      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7642
      CDP2      <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}  */
7643
static void
7644
do_cdp (void)
7645
{
7646
  inst.instruction |= inst.operands[0].reg << 8;
7647
  inst.instruction |= inst.operands[1].imm << 20;
7648
  inst.instruction |= inst.operands[2].reg << 12;
7649
  inst.instruction |= inst.operands[3].reg << 16;
7650
  inst.instruction |= inst.operands[4].reg;
7651
  inst.instruction |= inst.operands[5].imm << 5;
7652
}
7653
 
7654
static void
7655
do_cmp (void)
7656
{
7657
  inst.instruction |= inst.operands[0].reg << 16;
7658
  encode_arm_shifter_operand (1);
7659
}
7660
 
7661
/* Transfer between coprocessor and ARM registers.
7662
   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7663
   MRC2
7664
   MCR{cond}
7665
   MCR2
7666
 
7667
   No special properties.  */
7668
 
7669
static void
7670
do_co_reg (void)
7671
{
7672
  unsigned Rd;
7673
 
7674
  Rd = inst.operands[2].reg;
7675
  if (thumb_mode)
7676
    {
7677
      if (inst.instruction == 0xee000010
7678
          || inst.instruction == 0xfe000010)
7679
        /* MCR, MCR2  */
7680
        reject_bad_reg (Rd);
7681
      else
7682
        /* MRC, MRC2  */
7683
        constraint (Rd == REG_SP, BAD_SP);
7684
    }
7685
  else
7686
    {
7687
      /* MCR */
7688
      if (inst.instruction == 0xe000010)
7689
        constraint (Rd == REG_PC, BAD_PC);
7690
    }
7691
 
7692
 
7693
  inst.instruction |= inst.operands[0].reg << 8;
7694
  inst.instruction |= inst.operands[1].imm << 21;
7695
  inst.instruction |= Rd << 12;
7696
  inst.instruction |= inst.operands[3].reg << 16;
7697
  inst.instruction |= inst.operands[4].reg;
7698
  inst.instruction |= inst.operands[5].imm << 5;
7699
}
7700
 
7701
/* Transfer between coprocessor register and pair of ARM registers.
7702
   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7703
   MCRR2
7704
   MRRC{cond}
7705
   MRRC2
7706
 
7707
   Two XScale instructions are special cases of these:
7708
 
7709
     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7710
     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7711
 
7712
   Result unpredictable if Rd or Rn is R15.  */
7713
 
7714
static void
7715
do_co_reg2c (void)
7716
{
7717
  unsigned Rd, Rn;
7718
 
7719
  Rd = inst.operands[2].reg;
7720
  Rn = inst.operands[3].reg;
7721
 
7722
  if (thumb_mode)
7723
    {
7724
      reject_bad_reg (Rd);
7725
      reject_bad_reg (Rn);
7726
    }
7727
  else
7728
    {
7729
      constraint (Rd == REG_PC, BAD_PC);
7730
      constraint (Rn == REG_PC, BAD_PC);
7731
    }
7732
 
7733
  inst.instruction |= inst.operands[0].reg << 8;
7734
  inst.instruction |= inst.operands[1].imm << 4;
7735
  inst.instruction |= Rd << 12;
7736
  inst.instruction |= Rn << 16;
7737
  inst.instruction |= inst.operands[4].reg;
7738
}
7739
 
7740
static void
7741
do_cpsi (void)
7742
{
7743
  inst.instruction |= inst.operands[0].imm << 6;
7744
  if (inst.operands[1].present)
7745
    {
7746
      inst.instruction |= CPSI_MMOD;
7747
      inst.instruction |= inst.operands[1].imm;
7748
    }
7749
}
7750
 
7751
static void
7752
do_dbg (void)
7753
{
7754
  inst.instruction |= inst.operands[0].imm;
7755
}
7756
 
7757
static void
7758
do_div (void)
7759
{
7760
  unsigned Rd, Rn, Rm;
7761
 
7762
  Rd = inst.operands[0].reg;
7763
  Rn = (inst.operands[1].present
7764
        ? inst.operands[1].reg : Rd);
7765
  Rm = inst.operands[2].reg;
7766
 
7767
  constraint ((Rd == REG_PC), BAD_PC);
7768
  constraint ((Rn == REG_PC), BAD_PC);
7769
  constraint ((Rm == REG_PC), BAD_PC);
7770
 
7771
  inst.instruction |= Rd << 16;
7772
  inst.instruction |= Rn << 0;
7773
  inst.instruction |= Rm << 8;
7774
}
7775
 
7776
static void
7777
do_it (void)
7778
{
7779
  /* There is no IT instruction in ARM mode.  We
7780
     process it to do the validation as if in
7781
     thumb mode, just in case the code gets
7782
     assembled for thumb using the unified syntax.  */
7783
 
7784
  inst.size = 0;
7785
  if (unified_syntax)
7786
    {
7787
      set_it_insn_type (IT_INSN);
7788
      now_it.mask = (inst.instruction & 0xf) | 0x10;
7789
      now_it.cc = inst.operands[0].imm;
7790
    }
7791
}
7792
 
7793
static void
7794
do_ldmstm (void)
7795
{
7796
  int base_reg = inst.operands[0].reg;
7797
  int range = inst.operands[1].imm;
7798
 
7799
  inst.instruction |= base_reg << 16;
7800
  inst.instruction |= range;
7801
 
7802
  if (inst.operands[1].writeback)
7803
    inst.instruction |= LDM_TYPE_2_OR_3;
7804
 
7805
  if (inst.operands[0].writeback)
7806
    {
7807
      inst.instruction |= WRITE_BACK;
7808
      /* Check for unpredictable uses of writeback.  */
7809
      if (inst.instruction & LOAD_BIT)
7810
        {
7811
          /* Not allowed in LDM type 2.  */
7812
          if ((inst.instruction & LDM_TYPE_2_OR_3)
7813
              && ((range & (1 << REG_PC)) == 0))
7814
            as_warn (_("writeback of base register is UNPREDICTABLE"));
7815
          /* Only allowed if base reg not in list for other types.  */
7816
          else if (range & (1 << base_reg))
7817
            as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7818
        }
7819
      else /* STM.  */
7820
        {
7821
          /* Not allowed for type 2.  */
7822
          if (inst.instruction & LDM_TYPE_2_OR_3)
7823
            as_warn (_("writeback of base register is UNPREDICTABLE"));
7824
          /* Only allowed if base reg not in list, or first in list.  */
7825
          else if ((range & (1 << base_reg))
7826
                   && (range & ((1 << base_reg) - 1)))
7827
            as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7828
        }
7829
    }
7830
}
7831
 
7832
/* ARMv5TE load-consecutive (argument parse)
7833
   Mode is like LDRH.
7834
 
7835
     LDRccD R, mode
7836
     STRccD R, mode.  */
7837
 
7838
static void
7839
do_ldrd (void)
7840
{
7841
  constraint (inst.operands[0].reg % 2 != 0,
7842 148 khays
              _("first transfer register must be even"));
7843 16 khays
  constraint (inst.operands[1].present
7844
              && inst.operands[1].reg != inst.operands[0].reg + 1,
7845 148 khays
              _("can only transfer two consecutive registers"));
7846 16 khays
  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7847
  constraint (!inst.operands[2].isreg, _("'[' expected"));
7848
 
7849
  if (!inst.operands[1].present)
7850
    inst.operands[1].reg = inst.operands[0].reg + 1;
7851
 
7852 148 khays
  /* encode_arm_addr_mode_3 will diagnose overlap between the base
7853
     register and the first register written; we have to diagnose
7854
     overlap between the base and the second register written here.  */
7855 16 khays
 
7856 148 khays
  if (inst.operands[2].reg == inst.operands[1].reg
7857
      && (inst.operands[2].writeback || inst.operands[2].postind))
7858
    as_warn (_("base register written back, and overlaps "
7859
               "second transfer register"));
7860 16 khays
 
7861 148 khays
  if (!(inst.instruction & V4_STR_BIT))
7862
    {
7863 16 khays
      /* For an index-register load, the index register must not overlap the
7864 148 khays
        destination (even if not write-back).  */
7865
      if (inst.operands[2].immisreg
7866
              && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7867
              || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7868
        as_warn (_("index register overlaps transfer register"));
7869 16 khays
    }
7870
  inst.instruction |= inst.operands[0].reg << 12;
7871
  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7872
}
7873
 
7874
static void
7875
do_ldrex (void)
7876
{
7877
  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7878
              || inst.operands[1].postind || inst.operands[1].writeback
7879
              || inst.operands[1].immisreg || inst.operands[1].shifted
7880
              || inst.operands[1].negative
7881
              /* This can arise if the programmer has written
7882
                   strex rN, rM, foo
7883
                 or if they have mistakenly used a register name as the last
7884
                 operand,  eg:
7885
                   strex rN, rM, rX
7886
                 It is very difficult to distinguish between these two cases
7887
                 because "rX" might actually be a label. ie the register
7888
                 name has been occluded by a symbol of the same name. So we
7889
                 just generate a general 'bad addressing mode' type error
7890
                 message and leave it up to the programmer to discover the
7891
                 true cause and fix their mistake.  */
7892
              || (inst.operands[1].reg == REG_PC),
7893
              BAD_ADDR_MODE);
7894
 
7895
  constraint (inst.reloc.exp.X_op != O_constant
7896
              || inst.reloc.exp.X_add_number != 0,
7897
              _("offset must be zero in ARM encoding"));
7898
 
7899
  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
7900
 
7901
  inst.instruction |= inst.operands[0].reg << 12;
7902
  inst.instruction |= inst.operands[1].reg << 16;
7903
  inst.reloc.type = BFD_RELOC_UNUSED;
7904
}
7905
 
7906
static void
7907
do_ldrexd (void)
7908
{
7909
  constraint (inst.operands[0].reg % 2 != 0,
7910
              _("even register required"));
7911
  constraint (inst.operands[1].present
7912
              && inst.operands[1].reg != inst.operands[0].reg + 1,
7913
              _("can only load two consecutive registers"));
7914
  /* If op 1 were present and equal to PC, this function wouldn't
7915
     have been called in the first place.  */
7916
  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7917
 
7918
  inst.instruction |= inst.operands[0].reg << 12;
7919
  inst.instruction |= inst.operands[2].reg << 16;
7920
}
7921
 
7922 163 khays
/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
7923
   which is not a multiple of four is UNPREDICTABLE.  */
7924 16 khays
static void
7925 163 khays
check_ldr_r15_aligned (void)
7926
{
7927
  constraint (!(inst.operands[1].immisreg)
7928
              && (inst.operands[0].reg == REG_PC
7929
              && inst.operands[1].reg == REG_PC
7930
              && (inst.reloc.exp.X_add_number & 0x3)),
7931
              _("ldr to register 15 must be 4-byte alligned"));
7932
}
7933
 
7934
static void
7935 16 khays
do_ldst (void)
7936
{
7937
  inst.instruction |= inst.operands[0].reg << 12;
7938
  if (!inst.operands[1].isreg)
7939
    if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7940
      return;
7941
  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7942 163 khays
  check_ldr_r15_aligned ();
7943 16 khays
}
7944
 
7945
static void
7946
do_ldstt (void)
7947
{
7948
  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
7949
     reject [Rn,...].  */
7950
  if (inst.operands[1].preind)
7951
    {
7952
      constraint (inst.reloc.exp.X_op != O_constant
7953
                  || inst.reloc.exp.X_add_number != 0,
7954
                  _("this instruction requires a post-indexed address"));
7955
 
7956
      inst.operands[1].preind = 0;
7957
      inst.operands[1].postind = 1;
7958
      inst.operands[1].writeback = 1;
7959
    }
7960
  inst.instruction |= inst.operands[0].reg << 12;
7961
  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7962
}
7963
 
7964
/* Halfword and signed-byte load/store operations.  */
7965
 
7966
static void
7967
do_ldstv4 (void)
7968
{
7969
  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7970
  inst.instruction |= inst.operands[0].reg << 12;
7971
  if (!inst.operands[1].isreg)
7972
    if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7973
      return;
7974
  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7975
}
7976
 
7977
static void
7978
do_ldsttv4 (void)
7979
{
7980
  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
7981
     reject [Rn,...].  */
7982
  if (inst.operands[1].preind)
7983
    {
7984
      constraint (inst.reloc.exp.X_op != O_constant
7985
                  || inst.reloc.exp.X_add_number != 0,
7986
                  _("this instruction requires a post-indexed address"));
7987
 
7988
      inst.operands[1].preind = 0;
7989
      inst.operands[1].postind = 1;
7990
      inst.operands[1].writeback = 1;
7991
    }
7992
  inst.instruction |= inst.operands[0].reg << 12;
7993
  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7994
}
7995
 
7996
/* Co-processor register load/store.
7997
   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>  */
7998
static void
7999
do_lstc (void)
8000
{
8001
  inst.instruction |= inst.operands[0].reg << 8;
8002
  inst.instruction |= inst.operands[1].reg << 12;
8003
  encode_arm_cp_address (2, TRUE, TRUE, 0);
8004
}
8005
 
8006
static void
8007
do_mlas (void)
8008
{
8009
  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
8010
  if (inst.operands[0].reg == inst.operands[1].reg
8011
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8012
      && !(inst.instruction & 0x00400000))
8013
    as_tsktsk (_("Rd and Rm should be different in mla"));
8014
 
8015
  inst.instruction |= inst.operands[0].reg << 16;
8016
  inst.instruction |= inst.operands[1].reg;
8017
  inst.instruction |= inst.operands[2].reg << 8;
8018
  inst.instruction |= inst.operands[3].reg << 12;
8019
}
8020
 
8021
static void
8022
do_mov (void)
8023
{
8024
  inst.instruction |= inst.operands[0].reg << 12;
8025
  encode_arm_shifter_operand (1);
8026
}
8027
 
8028
/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.  */
8029
static void
8030
do_mov16 (void)
8031
{
8032
  bfd_vma imm;
8033
  bfd_boolean top;
8034
 
8035
  top = (inst.instruction & 0x00400000) != 0;
8036
  constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8037
              _(":lower16: not allowed this instruction"));
8038
  constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8039
              _(":upper16: not allowed instruction"));
8040
  inst.instruction |= inst.operands[0].reg << 12;
8041
  if (inst.reloc.type == BFD_RELOC_UNUSED)
8042
    {
8043
      imm = inst.reloc.exp.X_add_number;
8044
      /* The value is in two pieces: 0:11, 16:19.  */
8045
      inst.instruction |= (imm & 0x00000fff);
8046
      inst.instruction |= (imm & 0x0000f000) << 4;
8047
    }
8048
}
8049
 
8050
static void do_vfp_nsyn_opcode (const char *);
8051
 
8052
static int
8053
do_vfp_nsyn_mrs (void)
8054
{
8055
  if (inst.operands[0].isvec)
8056
    {
8057
      if (inst.operands[1].reg != 1)
8058
        first_error (_("operand 1 must be FPSCR"));
8059
      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8060
      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8061
      do_vfp_nsyn_opcode ("fmstat");
8062
    }
8063
  else if (inst.operands[1].isvec)
8064
    do_vfp_nsyn_opcode ("fmrx");
8065
  else
8066
    return FAIL;
8067
 
8068
  return SUCCESS;
8069
}
8070
 
8071
static int
8072
do_vfp_nsyn_msr (void)
8073
{
8074
  if (inst.operands[0].isvec)
8075
    do_vfp_nsyn_opcode ("fmxr");
8076
  else
8077
    return FAIL;
8078
 
8079
  return SUCCESS;
8080
}
8081
 
8082
static void
8083
do_vmrs (void)
8084
{
8085
  unsigned Rt = inst.operands[0].reg;
8086
 
8087
  if (thumb_mode && inst.operands[0].reg == REG_SP)
8088
    {
8089
      inst.error = BAD_SP;
8090
      return;
8091
    }
8092
 
8093
  /* APSR_ sets isvec. All other refs to PC are illegal.  */
8094
  if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8095
    {
8096
      inst.error = BAD_PC;
8097
      return;
8098
    }
8099
 
8100
  if (inst.operands[1].reg != 1)
8101
    first_error (_("operand 1 must be FPSCR"));
8102
 
8103
  inst.instruction |= (Rt << 12);
8104
}
8105
 
8106
static void
8107
do_vmsr (void)
8108
{
8109
  unsigned Rt = inst.operands[1].reg;
8110
 
8111
  if (thumb_mode)
8112
    reject_bad_reg (Rt);
8113
  else if (Rt == REG_PC)
8114
    {
8115
      inst.error = BAD_PC;
8116
      return;
8117
    }
8118
 
8119
  if (inst.operands[0].reg != 1)
8120
    first_error (_("operand 0 must be FPSCR"));
8121
 
8122
  inst.instruction |= (Rt << 12);
8123
}
8124
 
8125
static void
8126
do_mrs (void)
8127
{
8128
  unsigned br;
8129
 
8130
  if (do_vfp_nsyn_mrs () == SUCCESS)
8131
    return;
8132
 
8133
  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8134
  inst.instruction |= inst.operands[0].reg << 12;
8135
 
8136
  if (inst.operands[1].isreg)
8137
    {
8138
      br = inst.operands[1].reg;
8139
      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8140
        as_bad (_("bad register for mrs"));
8141
    }
8142
  else
8143
    {
8144
      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
8145
      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8146
                  != (PSR_c|PSR_f),
8147
                  _("'APSR', 'CPSR' or 'SPSR' expected"));
8148
      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8149
    }
8150
 
8151
  inst.instruction |= br;
8152
}
8153
 
8154
/* Two possible forms:
8155
      "{C|S}PSR_<field>, Rm",
8156
      "{C|S}PSR_f, #expression".  */
8157
 
8158
static void
8159
do_msr (void)
8160
{
8161
  if (do_vfp_nsyn_msr () == SUCCESS)
8162
    return;
8163
 
8164
  inst.instruction |= inst.operands[0].imm;
8165
  if (inst.operands[1].isreg)
8166
    inst.instruction |= inst.operands[1].reg;
8167
  else
8168
    {
8169
      inst.instruction |= INST_IMMEDIATE;
8170
      inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8171
      inst.reloc.pc_rel = 0;
8172
    }
8173
}
8174
 
8175
static void
8176
do_mul (void)
8177
{
8178
  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8179
 
8180
  if (!inst.operands[2].present)
8181
    inst.operands[2].reg = inst.operands[0].reg;
8182
  inst.instruction |= inst.operands[0].reg << 16;
8183
  inst.instruction |= inst.operands[1].reg;
8184
  inst.instruction |= inst.operands[2].reg << 8;
8185
 
8186
  if (inst.operands[0].reg == inst.operands[1].reg
8187
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8188
    as_tsktsk (_("Rd and Rm should be different in mul"));
8189
}
8190
 
8191
/* Long Multiply Parser
8192
   UMULL RdLo, RdHi, Rm, Rs
8193
   SMULL RdLo, RdHi, Rm, Rs
8194
   UMLAL RdLo, RdHi, Rm, Rs
8195
   SMLAL RdLo, RdHi, Rm, Rs.  */
8196
 
8197
static void
8198
do_mull (void)
8199
{
8200
  inst.instruction |= inst.operands[0].reg << 12;
8201
  inst.instruction |= inst.operands[1].reg << 16;
8202
  inst.instruction |= inst.operands[2].reg;
8203
  inst.instruction |= inst.operands[3].reg << 8;
8204
 
8205
  /* rdhi and rdlo must be different.  */
8206
  if (inst.operands[0].reg == inst.operands[1].reg)
8207
    as_tsktsk (_("rdhi and rdlo must be different"));
8208
 
8209
  /* rdhi, rdlo and rm must all be different before armv6.  */
8210
  if ((inst.operands[0].reg == inst.operands[2].reg
8211
      || inst.operands[1].reg == inst.operands[2].reg)
8212
      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8213
    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8214
}
8215
 
8216
static void
8217
do_nop (void)
8218
{
8219
  if (inst.operands[0].present
8220
      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8221
    {
8222
      /* Architectural NOP hints are CPSR sets with no bits selected.  */
8223
      inst.instruction &= 0xf0000000;
8224
      inst.instruction |= 0x0320f000;
8225
      if (inst.operands[0].present)
8226
        inst.instruction |= inst.operands[0].imm;
8227
    }
8228
}
8229
 
8230
/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8231
   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8232
   Condition defaults to COND_ALWAYS.
8233
   Error if Rd, Rn or Rm are R15.  */
8234
 
8235
static void
8236
do_pkhbt (void)
8237
{
8238
  inst.instruction |= inst.operands[0].reg << 12;
8239
  inst.instruction |= inst.operands[1].reg << 16;
8240
  inst.instruction |= inst.operands[2].reg;
8241
  if (inst.operands[3].present)
8242
    encode_arm_shift (3);
8243
}
8244
 
8245
/* ARM V6 PKHTB (Argument Parse).  */
8246
 
8247
static void
8248
do_pkhtb (void)
8249
{
8250
  if (!inst.operands[3].present)
8251
    {
8252
      /* If the shift specifier is omitted, turn the instruction
8253
         into pkhbt rd, rm, rn. */
8254
      inst.instruction &= 0xfff00010;
8255
      inst.instruction |= inst.operands[0].reg << 12;
8256
      inst.instruction |= inst.operands[1].reg;
8257
      inst.instruction |= inst.operands[2].reg << 16;
8258
    }
8259
  else
8260
    {
8261
      inst.instruction |= inst.operands[0].reg << 12;
8262
      inst.instruction |= inst.operands[1].reg << 16;
8263
      inst.instruction |= inst.operands[2].reg;
8264
      encode_arm_shift (3);
8265
    }
8266
}
8267
 
8268
/* ARMv5TE: Preload-Cache
8269
   MP Extensions: Preload for write
8270
 
8271
    PLD(W) <addr_mode>
8272
 
8273
  Syntactically, like LDR with B=1, W=0, L=1.  */
8274
 
8275
static void
8276
do_pld (void)
8277
{
8278
  constraint (!inst.operands[0].isreg,
8279
              _("'[' expected after PLD mnemonic"));
8280
  constraint (inst.operands[0].postind,
8281
              _("post-indexed expression used in preload instruction"));
8282
  constraint (inst.operands[0].writeback,
8283
              _("writeback used in preload instruction"));
8284
  constraint (!inst.operands[0].preind,
8285
              _("unindexed addressing used in preload instruction"));
8286
  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8287
}
8288
 
8289
/* ARMv7: PLI <addr_mode>  */
8290
static void
8291
do_pli (void)
8292
{
8293
  constraint (!inst.operands[0].isreg,
8294
              _("'[' expected after PLI mnemonic"));
8295
  constraint (inst.operands[0].postind,
8296
              _("post-indexed expression used in preload instruction"));
8297
  constraint (inst.operands[0].writeback,
8298
              _("writeback used in preload instruction"));
8299
  constraint (!inst.operands[0].preind,
8300
              _("unindexed addressing used in preload instruction"));
8301
  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8302
  inst.instruction &= ~PRE_INDEX;
8303
}
8304
 
8305
static void
8306
do_push_pop (void)
8307
{
8308
  inst.operands[1] = inst.operands[0];
8309
  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8310
  inst.operands[0].isreg = 1;
8311
  inst.operands[0].writeback = 1;
8312
  inst.operands[0].reg = REG_SP;
8313
  do_ldmstm ();
8314
}
8315
 
8316
/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8317
   word at the specified address and the following word
8318
   respectively.
8319
   Unconditionally executed.
8320
   Error if Rn is R15.  */
8321
 
8322
static void
8323
do_rfe (void)
8324
{
8325
  inst.instruction |= inst.operands[0].reg << 16;
8326
  if (inst.operands[0].writeback)
8327
    inst.instruction |= WRITE_BACK;
8328
}
8329
 
8330
/* ARM V6 ssat (argument parse).  */
8331
 
8332
static void
8333
do_ssat (void)
8334
{
8335
  inst.instruction |= inst.operands[0].reg << 12;
8336
  inst.instruction |= (inst.operands[1].imm - 1) << 16;
8337
  inst.instruction |= inst.operands[2].reg;
8338
 
8339
  if (inst.operands[3].present)
8340
    encode_arm_shift (3);
8341
}
8342
 
8343
/* ARM V6 usat (argument parse).  */
8344
 
8345
static void
8346
do_usat (void)
8347
{
8348
  inst.instruction |= inst.operands[0].reg << 12;
8349
  inst.instruction |= inst.operands[1].imm << 16;
8350
  inst.instruction |= inst.operands[2].reg;
8351
 
8352
  if (inst.operands[3].present)
8353
    encode_arm_shift (3);
8354
}
8355
 
8356
/* ARM V6 ssat16 (argument parse).  */
8357
 
8358
static void
8359
do_ssat16 (void)
8360
{
8361
  inst.instruction |= inst.operands[0].reg << 12;
8362
  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8363
  inst.instruction |= inst.operands[2].reg;
8364
}
8365
 
8366
static void
8367
do_usat16 (void)
8368
{
8369
  inst.instruction |= inst.operands[0].reg << 12;
8370
  inst.instruction |= inst.operands[1].imm << 16;
8371
  inst.instruction |= inst.operands[2].reg;
8372
}
8373
 
8374
/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
8375
   preserving the other bits.
8376
 
8377
   setend <endian_specifier>, where <endian_specifier> is either
8378
   BE or LE.  */
8379
 
8380
static void
8381
do_setend (void)
8382
{
8383
  if (inst.operands[0].imm)
8384
    inst.instruction |= 0x200;
8385
}
8386
 
8387
static void
8388
do_shift (void)
8389
{
8390
  unsigned int Rm = (inst.operands[1].present
8391
                     ? inst.operands[1].reg
8392
                     : inst.operands[0].reg);
8393
 
8394
  inst.instruction |= inst.operands[0].reg << 12;
8395
  inst.instruction |= Rm;
8396
  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
8397
    {
8398
      inst.instruction |= inst.operands[2].reg << 8;
8399
      inst.instruction |= SHIFT_BY_REG;
8400 148 khays
      /* PR 12854: Error on extraneous shifts.  */
8401
      constraint (inst.operands[2].shifted,
8402
                  _("extraneous shift as part of operand to shift insn"));
8403 16 khays
    }
8404
  else
8405
    inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8406
}
8407
 
8408
static void
8409
do_smc (void)
8410
{
8411
  inst.reloc.type = BFD_RELOC_ARM_SMC;
8412
  inst.reloc.pc_rel = 0;
8413
}
8414
 
8415
static void
8416
do_hvc (void)
8417
{
8418
  inst.reloc.type = BFD_RELOC_ARM_HVC;
8419
  inst.reloc.pc_rel = 0;
8420
}
8421
 
8422
static void
8423
do_swi (void)
8424
{
8425
  inst.reloc.type = BFD_RELOC_ARM_SWI;
8426
  inst.reloc.pc_rel = 0;
8427
}
8428
 
8429
/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8430
   SMLAxy{cond} Rd,Rm,Rs,Rn
8431
   SMLAWy{cond} Rd,Rm,Rs,Rn
8432
   Error if any register is R15.  */
8433
 
8434
static void
8435
do_smla (void)
8436
{
8437
  inst.instruction |= inst.operands[0].reg << 16;
8438
  inst.instruction |= inst.operands[1].reg;
8439
  inst.instruction |= inst.operands[2].reg << 8;
8440
  inst.instruction |= inst.operands[3].reg << 12;
8441
}
8442
 
8443
/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8444
   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8445
   Error if any register is R15.
8446
   Warning if Rdlo == Rdhi.  */
8447
 
8448
static void
8449
do_smlal (void)
8450
{
8451
  inst.instruction |= inst.operands[0].reg << 12;
8452
  inst.instruction |= inst.operands[1].reg << 16;
8453
  inst.instruction |= inst.operands[2].reg;
8454
  inst.instruction |= inst.operands[3].reg << 8;
8455
 
8456
  if (inst.operands[0].reg == inst.operands[1].reg)
8457
    as_tsktsk (_("rdhi and rdlo must be different"));
8458
}
8459
 
8460
/* ARM V5E (El Segundo) signed-multiply (argument parse)
8461
   SMULxy{cond} Rd,Rm,Rs
8462
   Error if any register is R15.  */
8463
 
8464
static void
8465
do_smul (void)
8466
{
8467
  inst.instruction |= inst.operands[0].reg << 16;
8468
  inst.instruction |= inst.operands[1].reg;
8469
  inst.instruction |= inst.operands[2].reg << 8;
8470
}
8471
 
8472
/* ARM V6 srs (argument parse).  The variable fields in the encoding are
8473
   the same for both ARM and Thumb-2.  */
8474
 
8475
static void
8476
do_srs (void)
8477
{
8478
  int reg;
8479
 
8480
  if (inst.operands[0].present)
8481
    {
8482
      reg = inst.operands[0].reg;
8483
      constraint (reg != REG_SP, _("SRS base register must be r13"));
8484
    }
8485
  else
8486
    reg = REG_SP;
8487
 
8488
  inst.instruction |= reg << 16;
8489
  inst.instruction |= inst.operands[1].imm;
8490
  if (inst.operands[0].writeback || inst.operands[1].writeback)
8491
    inst.instruction |= WRITE_BACK;
8492
}
8493
 
8494
/* ARM V6 strex (argument parse).  */
8495
 
8496
static void
8497
do_strex (void)
8498
{
8499
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8500
              || inst.operands[2].postind || inst.operands[2].writeback
8501
              || inst.operands[2].immisreg || inst.operands[2].shifted
8502
              || inst.operands[2].negative
8503
              /* See comment in do_ldrex().  */
8504
              || (inst.operands[2].reg == REG_PC),
8505
              BAD_ADDR_MODE);
8506
 
8507
  constraint (inst.operands[0].reg == inst.operands[1].reg
8508
              || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8509
 
8510
  constraint (inst.reloc.exp.X_op != O_constant
8511
              || inst.reloc.exp.X_add_number != 0,
8512
              _("offset must be zero in ARM encoding"));
8513
 
8514
  inst.instruction |= inst.operands[0].reg << 12;
8515
  inst.instruction |= inst.operands[1].reg;
8516
  inst.instruction |= inst.operands[2].reg << 16;
8517
  inst.reloc.type = BFD_RELOC_UNUSED;
8518
}
8519
 
8520
static void
8521 160 khays
do_t_strexbh (void)
8522
{
8523
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8524
              || inst.operands[2].postind || inst.operands[2].writeback
8525
              || inst.operands[2].immisreg || inst.operands[2].shifted
8526
              || inst.operands[2].negative,
8527
              BAD_ADDR_MODE);
8528
 
8529
  constraint (inst.operands[0].reg == inst.operands[1].reg
8530
              || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8531
 
8532
  do_rm_rd_rn ();
8533
}
8534
 
8535
static void
8536 16 khays
do_strexd (void)
8537
{
8538
  constraint (inst.operands[1].reg % 2 != 0,
8539
              _("even register required"));
8540
  constraint (inst.operands[2].present
8541
              && inst.operands[2].reg != inst.operands[1].reg + 1,
8542
              _("can only store two consecutive registers"));
8543
  /* If op 2 were present and equal to PC, this function wouldn't
8544
     have been called in the first place.  */
8545
  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8546
 
8547
  constraint (inst.operands[0].reg == inst.operands[1].reg
8548
              || inst.operands[0].reg == inst.operands[1].reg + 1
8549
              || inst.operands[0].reg == inst.operands[3].reg,
8550
              BAD_OVERLAP);
8551
 
8552
  inst.instruction |= inst.operands[0].reg << 12;
8553
  inst.instruction |= inst.operands[1].reg;
8554
  inst.instruction |= inst.operands[3].reg << 16;
8555
}
8556
 
8557
/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8558
   extends it to 32-bits, and adds the result to a value in another
8559
   register.  You can specify a rotation by 0, 8, 16, or 24 bits
8560
   before extracting the 16-bit value.
8561
   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8562
   Condition defaults to COND_ALWAYS.
8563
   Error if any register uses R15.  */
8564
 
8565
static void
8566
do_sxtah (void)
8567
{
8568
  inst.instruction |= inst.operands[0].reg << 12;
8569
  inst.instruction |= inst.operands[1].reg << 16;
8570
  inst.instruction |= inst.operands[2].reg;
8571
  inst.instruction |= inst.operands[3].imm << 10;
8572
}
8573
 
8574
/* ARM V6 SXTH.
8575
 
8576
   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8577
   Condition defaults to COND_ALWAYS.
8578
   Error if any register uses R15.  */
8579
 
8580
static void
8581
do_sxth (void)
8582
{
8583
  inst.instruction |= inst.operands[0].reg << 12;
8584
  inst.instruction |= inst.operands[1].reg;
8585
  inst.instruction |= inst.operands[2].imm << 10;
8586
}
8587
 
8588
/* VFP instructions.  In a logical order: SP variant first, monad
8589
   before dyad, arithmetic then move then load/store.  */
8590
 
8591
static void
8592
do_vfp_sp_monadic (void)
8593
{
8594
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8595
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8596
}
8597
 
8598
static void
8599
do_vfp_sp_dyadic (void)
8600
{
8601
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8602
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8603
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8604
}
8605
 
8606
static void
8607
do_vfp_sp_compare_z (void)
8608
{
8609
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8610
}
8611
 
8612
static void
8613
do_vfp_dp_sp_cvt (void)
8614
{
8615
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8616
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8617
}
8618
 
8619
static void
8620
do_vfp_sp_dp_cvt (void)
8621
{
8622
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8623
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8624
}
8625
 
8626
static void
8627
do_vfp_reg_from_sp (void)
8628
{
8629
  inst.instruction |= inst.operands[0].reg << 12;
8630
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8631
}
8632
 
8633
static void
8634
do_vfp_reg2_from_sp2 (void)
8635
{
8636
  constraint (inst.operands[2].imm != 2,
8637
              _("only two consecutive VFP SP registers allowed here"));
8638
  inst.instruction |= inst.operands[0].reg << 12;
8639
  inst.instruction |= inst.operands[1].reg << 16;
8640
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8641
}
8642
 
8643
static void
8644
do_vfp_sp_from_reg (void)
8645
{
8646
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8647
  inst.instruction |= inst.operands[1].reg << 12;
8648
}
8649
 
8650
static void
8651
do_vfp_sp2_from_reg2 (void)
8652
{
8653
  constraint (inst.operands[0].imm != 2,
8654
              _("only two consecutive VFP SP registers allowed here"));
8655
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8656
  inst.instruction |= inst.operands[1].reg << 12;
8657
  inst.instruction |= inst.operands[2].reg << 16;
8658
}
8659
 
8660
static void
8661
do_vfp_sp_ldst (void)
8662
{
8663
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8664
  encode_arm_cp_address (1, FALSE, TRUE, 0);
8665
}
8666
 
8667
static void
8668
do_vfp_dp_ldst (void)
8669
{
8670
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8671
  encode_arm_cp_address (1, FALSE, TRUE, 0);
8672
}
8673
 
8674
 
8675
static void
8676
vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8677
{
8678
  if (inst.operands[0].writeback)
8679
    inst.instruction |= WRITE_BACK;
8680
  else
8681
    constraint (ldstm_type != VFP_LDSTMIA,
8682
                _("this addressing mode requires base-register writeback"));
8683
  inst.instruction |= inst.operands[0].reg << 16;
8684
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8685
  inst.instruction |= inst.operands[1].imm;
8686
}
8687
 
8688
static void
8689
vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8690
{
8691
  int count;
8692
 
8693
  if (inst.operands[0].writeback)
8694
    inst.instruction |= WRITE_BACK;
8695
  else
8696
    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8697
                _("this addressing mode requires base-register writeback"));
8698
 
8699
  inst.instruction |= inst.operands[0].reg << 16;
8700
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8701
 
8702
  count = inst.operands[1].imm << 1;
8703
  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8704
    count += 1;
8705
 
8706
  inst.instruction |= count;
8707
}
8708
 
8709
static void
8710
do_vfp_sp_ldstmia (void)
8711
{
8712
  vfp_sp_ldstm (VFP_LDSTMIA);
8713
}
8714
 
8715
static void
8716
do_vfp_sp_ldstmdb (void)
8717
{
8718
  vfp_sp_ldstm (VFP_LDSTMDB);
8719
}
8720
 
8721
static void
8722
do_vfp_dp_ldstmia (void)
8723
{
8724
  vfp_dp_ldstm (VFP_LDSTMIA);
8725
}
8726
 
8727
static void
8728
do_vfp_dp_ldstmdb (void)
8729
{
8730
  vfp_dp_ldstm (VFP_LDSTMDB);
8731
}
8732
 
8733
static void
8734
do_vfp_xp_ldstmia (void)
8735
{
8736
  vfp_dp_ldstm (VFP_LDSTMIAX);
8737
}
8738
 
8739
static void
8740
do_vfp_xp_ldstmdb (void)
8741
{
8742
  vfp_dp_ldstm (VFP_LDSTMDBX);
8743
}
8744
 
8745
static void
8746
do_vfp_dp_rd_rm (void)
8747
{
8748
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8749
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8750
}
8751
 
8752
static void
8753
do_vfp_dp_rn_rd (void)
8754
{
8755
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8756
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8757
}
8758
 
8759
static void
8760
do_vfp_dp_rd_rn (void)
8761
{
8762
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8763
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8764
}
8765
 
8766
static void
8767
do_vfp_dp_rd_rn_rm (void)
8768
{
8769
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8770
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8771
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8772
}
8773
 
8774
static void
8775
do_vfp_dp_rd (void)
8776
{
8777
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8778
}
8779
 
8780
static void
8781
do_vfp_dp_rm_rd_rn (void)
8782
{
8783
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8784
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8785
  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8786
}
8787
 
8788
/* VFPv3 instructions.  */
8789
static void
8790
do_vfp_sp_const (void)
8791
{
8792
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8793
  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8794
  inst.instruction |= (inst.operands[1].imm & 0x0f);
8795
}
8796
 
8797
static void
8798
do_vfp_dp_const (void)
8799
{
8800
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8801
  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8802
  inst.instruction |= (inst.operands[1].imm & 0x0f);
8803
}
8804
 
8805
static void
8806
vfp_conv (int srcsize)
8807
{
8808 160 khays
  int immbits = srcsize - inst.operands[1].imm;
8809
 
8810
  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
8811
    {
8812
      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
8813
         i.e. immbits must be in range 0 - 16.  */
8814
      inst.error = _("immediate value out of range, expected range [0, 16]");
8815
      return;
8816
    }
8817
  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
8818
    {
8819
      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
8820
         i.e. immbits must be in range 0 - 31.  */
8821
      inst.error = _("immediate value out of range, expected range [1, 32]");
8822
      return;
8823
    }
8824
 
8825 16 khays
  inst.instruction |= (immbits & 1) << 5;
8826
  inst.instruction |= (immbits >> 1);
8827
}
8828
 
8829
static void
8830
do_vfp_sp_conv_16 (void)
8831
{
8832
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8833
  vfp_conv (16);
8834
}
8835
 
8836
static void
8837
do_vfp_dp_conv_16 (void)
8838
{
8839
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8840
  vfp_conv (16);
8841
}
8842
 
8843
static void
8844
do_vfp_sp_conv_32 (void)
8845
{
8846
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8847
  vfp_conv (32);
8848
}
8849
 
8850
static void
8851
do_vfp_dp_conv_32 (void)
8852
{
8853
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8854
  vfp_conv (32);
8855
}
8856
 
8857
/* FPA instructions.  Also in a logical order.  */
8858
 
8859
static void
8860
do_fpa_cmp (void)
8861
{
8862
  inst.instruction |= inst.operands[0].reg << 16;
8863
  inst.instruction |= inst.operands[1].reg;
8864
}
8865
 
8866
static void
8867
do_fpa_ldmstm (void)
8868
{
8869
  inst.instruction |= inst.operands[0].reg << 12;
8870
  switch (inst.operands[1].imm)
8871
    {
8872
    case 1: inst.instruction |= CP_T_X;          break;
8873
    case 2: inst.instruction |= CP_T_Y;          break;
8874
    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8875
    case 4:                                      break;
8876
    default: abort ();
8877
    }
8878
 
8879
  if (inst.instruction & (PRE_INDEX | INDEX_UP))
8880
    {
8881
      /* The instruction specified "ea" or "fd", so we can only accept
8882
         [Rn]{!}.  The instruction does not really support stacking or
8883
         unstacking, so we have to emulate these by setting appropriate
8884
         bits and offsets.  */
8885
      constraint (inst.reloc.exp.X_op != O_constant
8886
                  || inst.reloc.exp.X_add_number != 0,
8887
                  _("this instruction does not support indexing"));
8888
 
8889
      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8890
        inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8891
 
8892
      if (!(inst.instruction & INDEX_UP))
8893
        inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8894
 
8895
      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8896
        {
8897
          inst.operands[2].preind = 0;
8898
          inst.operands[2].postind = 1;
8899
        }
8900
    }
8901
 
8902
  encode_arm_cp_address (2, TRUE, TRUE, 0);
8903
}
8904
 
8905
/* iWMMXt instructions: strictly in alphabetical order.  */
8906
 
8907
static void
8908
do_iwmmxt_tandorc (void)
8909
{
8910
  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8911
}
8912
 
8913
static void
8914
do_iwmmxt_textrc (void)
8915
{
8916
  inst.instruction |= inst.operands[0].reg << 12;
8917
  inst.instruction |= inst.operands[1].imm;
8918
}
8919
 
8920
static void
8921
do_iwmmxt_textrm (void)
8922
{
8923
  inst.instruction |= inst.operands[0].reg << 12;
8924
  inst.instruction |= inst.operands[1].reg << 16;
8925
  inst.instruction |= inst.operands[2].imm;
8926
}
8927
 
8928
static void
8929
do_iwmmxt_tinsr (void)
8930
{
8931
  inst.instruction |= inst.operands[0].reg << 16;
8932
  inst.instruction |= inst.operands[1].reg << 12;
8933
  inst.instruction |= inst.operands[2].imm;
8934
}
8935
 
8936
static void
8937
do_iwmmxt_tmia (void)
8938
{
8939
  inst.instruction |= inst.operands[0].reg << 5;
8940
  inst.instruction |= inst.operands[1].reg;
8941
  inst.instruction |= inst.operands[2].reg << 12;
8942
}
8943
 
8944
static void
8945
do_iwmmxt_waligni (void)
8946
{
8947
  inst.instruction |= inst.operands[0].reg << 12;
8948
  inst.instruction |= inst.operands[1].reg << 16;
8949
  inst.instruction |= inst.operands[2].reg;
8950
  inst.instruction |= inst.operands[3].imm << 20;
8951
}
8952
 
8953
static void
8954
do_iwmmxt_wmerge (void)
8955
{
8956
  inst.instruction |= inst.operands[0].reg << 12;
8957
  inst.instruction |= inst.operands[1].reg << 16;
8958
  inst.instruction |= inst.operands[2].reg;
8959
  inst.instruction |= inst.operands[3].imm << 21;
8960
}
8961
 
8962
static void
8963
do_iwmmxt_wmov (void)
8964
{
8965
  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
8966
  inst.instruction |= inst.operands[0].reg << 12;
8967
  inst.instruction |= inst.operands[1].reg << 16;
8968
  inst.instruction |= inst.operands[1].reg;
8969
}
8970
 
8971
static void
8972
do_iwmmxt_wldstbh (void)
8973
{
8974
  int reloc;
8975
  inst.instruction |= inst.operands[0].reg << 12;
8976
  if (thumb_mode)
8977
    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8978
  else
8979
    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8980
  encode_arm_cp_address (1, TRUE, FALSE, reloc);
8981
}
8982
 
8983
static void
8984
do_iwmmxt_wldstw (void)
8985
{
8986
  /* RIWR_RIWC clears .isreg for a control register.  */
8987
  if (!inst.operands[0].isreg)
8988
    {
8989
      constraint (inst.cond != COND_ALWAYS, BAD_COND);
8990
      inst.instruction |= 0xf0000000;
8991
    }
8992
 
8993
  inst.instruction |= inst.operands[0].reg << 12;
8994
  encode_arm_cp_address (1, TRUE, TRUE, 0);
8995
}
8996
 
8997
static void
8998
do_iwmmxt_wldstd (void)
8999
{
9000
  inst.instruction |= inst.operands[0].reg << 12;
9001
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9002
      && inst.operands[1].immisreg)
9003
    {
9004
      inst.instruction &= ~0x1a000ff;
9005
      inst.instruction |= (0xf << 28);
9006
      if (inst.operands[1].preind)
9007
        inst.instruction |= PRE_INDEX;
9008
      if (!inst.operands[1].negative)
9009
        inst.instruction |= INDEX_UP;
9010
      if (inst.operands[1].writeback)
9011
        inst.instruction |= WRITE_BACK;
9012
      inst.instruction |= inst.operands[1].reg << 16;
9013
      inst.instruction |= inst.reloc.exp.X_add_number << 4;
9014
      inst.instruction |= inst.operands[1].imm;
9015
    }
9016
  else
9017
    encode_arm_cp_address (1, TRUE, FALSE, 0);
9018
}
9019
 
9020
static void
9021
do_iwmmxt_wshufh (void)
9022
{
9023
  inst.instruction |= inst.operands[0].reg << 12;
9024
  inst.instruction |= inst.operands[1].reg << 16;
9025
  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9026
  inst.instruction |= (inst.operands[2].imm & 0x0f);
9027
}
9028
 
9029
static void
9030
do_iwmmxt_wzero (void)
9031
{
9032
  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
9033
  inst.instruction |= inst.operands[0].reg;
9034
  inst.instruction |= inst.operands[0].reg << 12;
9035
  inst.instruction |= inst.operands[0].reg << 16;
9036
}
9037
 
9038
static void
9039
do_iwmmxt_wrwrwr_or_imm5 (void)
9040
{
9041
  if (inst.operands[2].isreg)
9042
    do_rd_rn_rm ();
9043
  else {
9044
    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9045
                _("immediate operand requires iWMMXt2"));
9046
    do_rd_rn ();
9047
    if (inst.operands[2].imm == 0)
9048
      {
9049
        switch ((inst.instruction >> 20) & 0xf)
9050
          {
9051
          case 4:
9052
          case 5:
9053
          case 6:
9054
          case 7:
9055
            /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
9056
            inst.operands[2].imm = 16;
9057
            inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9058
            break;
9059
          case 8:
9060
          case 9:
9061
          case 10:
9062
          case 11:
9063
            /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
9064
            inst.operands[2].imm = 32;
9065
            inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9066
            break;
9067
          case 12:
9068
          case 13:
9069
          case 14:
9070
          case 15:
9071
            {
9072
              /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
9073
              unsigned long wrn;
9074
              wrn = (inst.instruction >> 16) & 0xf;
9075
              inst.instruction &= 0xff0fff0f;
9076
              inst.instruction |= wrn;
9077
              /* Bail out here; the instruction is now assembled.  */
9078
              return;
9079
            }
9080
          }
9081
      }
9082
    /* Map 32 -> 0, etc.  */
9083
    inst.operands[2].imm &= 0x1f;
9084
    inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9085
  }
9086
}
9087
 
9088
/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
9089
   operations first, then control, shift, and load/store.  */
9090
 
9091
/* Insns like "foo X,Y,Z".  */
9092
 
9093
static void
9094
do_mav_triple (void)
9095
{
9096
  inst.instruction |= inst.operands[0].reg << 16;
9097
  inst.instruction |= inst.operands[1].reg;
9098
  inst.instruction |= inst.operands[2].reg << 12;
9099
}
9100
 
9101
/* Insns like "foo W,X,Y,Z".
9102
    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
9103
 
9104
static void
9105
do_mav_quad (void)
9106
{
9107
  inst.instruction |= inst.operands[0].reg << 5;
9108
  inst.instruction |= inst.operands[1].reg << 12;
9109
  inst.instruction |= inst.operands[2].reg << 16;
9110
  inst.instruction |= inst.operands[3].reg;
9111
}
9112
 
9113
/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
9114
static void
9115
do_mav_dspsc (void)
9116
{
9117
  inst.instruction |= inst.operands[1].reg << 12;
9118
}
9119
 
9120
/* Maverick shift immediate instructions.
9121
   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9122
   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
9123
 
9124
static void
9125
do_mav_shift (void)
9126
{
9127
  int imm = inst.operands[2].imm;
9128
 
9129
  inst.instruction |= inst.operands[0].reg << 12;
9130
  inst.instruction |= inst.operands[1].reg << 16;
9131
 
9132
  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9133
     Bits 5-7 of the insn should have bits 4-6 of the immediate.
9134
     Bit 4 should be 0.  */
9135
  imm = (imm & 0xf) | ((imm & 0x70) << 1);
9136
 
9137
  inst.instruction |= imm;
9138
}
9139
 
9140
/* XScale instructions.  Also sorted arithmetic before move.  */
9141
 
9142
/* Xscale multiply-accumulate (argument parse)
9143
     MIAcc   acc0,Rm,Rs
9144
     MIAPHcc acc0,Rm,Rs
9145
     MIAxycc acc0,Rm,Rs.  */
9146
 
9147
static void
9148
do_xsc_mia (void)
9149
{
9150
  inst.instruction |= inst.operands[1].reg;
9151
  inst.instruction |= inst.operands[2].reg << 12;
9152
}
9153
 
9154
/* Xscale move-accumulator-register (argument parse)
9155
 
9156
     MARcc   acc0,RdLo,RdHi.  */
9157
 
9158
static void
9159
do_xsc_mar (void)
9160
{
9161
  inst.instruction |= inst.operands[1].reg << 12;
9162
  inst.instruction |= inst.operands[2].reg << 16;
9163
}
9164
 
9165
/* Xscale move-register-accumulator (argument parse)
9166
 
9167
     MRAcc   RdLo,RdHi,acc0.  */
9168
 
9169
static void
9170
do_xsc_mra (void)
9171
{
9172
  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9173
  inst.instruction |= inst.operands[0].reg << 12;
9174
  inst.instruction |= inst.operands[1].reg << 16;
9175
}
9176
 
9177
/* Encoding functions relevant only to Thumb.  */
9178
 
9179
/* inst.operands[i] is a shifted-register operand; encode
9180
   it into inst.instruction in the format used by Thumb32.  */
9181
 
9182
static void
9183
encode_thumb32_shifted_operand (int i)
9184
{
9185
  unsigned int value = inst.reloc.exp.X_add_number;
9186
  unsigned int shift = inst.operands[i].shift_kind;
9187
 
9188
  constraint (inst.operands[i].immisreg,
9189
              _("shift by register not allowed in thumb mode"));
9190
  inst.instruction |= inst.operands[i].reg;
9191
  if (shift == SHIFT_RRX)
9192
    inst.instruction |= SHIFT_ROR << 4;
9193
  else
9194
    {
9195
      constraint (inst.reloc.exp.X_op != O_constant,
9196
                  _("expression too complex"));
9197
 
9198
      constraint (value > 32
9199
                  || (value == 32 && (shift == SHIFT_LSL
9200
                                      || shift == SHIFT_ROR)),
9201
                  _("shift expression is too large"));
9202
 
9203
      if (value == 0)
9204
        shift = SHIFT_LSL;
9205
      else if (value == 32)
9206
        value = 0;
9207
 
9208
      inst.instruction |= shift << 4;
9209
      inst.instruction |= (value & 0x1c) << 10;
9210
      inst.instruction |= (value & 0x03) << 6;
9211
    }
9212
}
9213
 
9214
 
9215
/* inst.operands[i] was set up by parse_address.  Encode it into a
9216
   Thumb32 format load or store instruction.  Reject forms that cannot
9217
   be used with such instructions.  If is_t is true, reject forms that
9218
   cannot be used with a T instruction; if is_d is true, reject forms
9219
   that cannot be used with a D instruction.  If it is a store insn,
9220
   reject PC in Rn.  */
9221
 
9222
static void
9223
encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9224
{
9225
  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9226
 
9227
  constraint (!inst.operands[i].isreg,
9228
              _("Instruction does not support =N addresses"));
9229
 
9230
  inst.instruction |= inst.operands[i].reg << 16;
9231
  if (inst.operands[i].immisreg)
9232
    {
9233
      constraint (is_pc, BAD_PC_ADDRESSING);
9234
      constraint (is_t || is_d, _("cannot use register index with this instruction"));
9235
      constraint (inst.operands[i].negative,
9236
                  _("Thumb does not support negative register indexing"));
9237
      constraint (inst.operands[i].postind,
9238
                  _("Thumb does not support register post-indexing"));
9239
      constraint (inst.operands[i].writeback,
9240
                  _("Thumb does not support register indexing with writeback"));
9241
      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9242
                  _("Thumb supports only LSL in shifted register indexing"));
9243
 
9244
      inst.instruction |= inst.operands[i].imm;
9245
      if (inst.operands[i].shifted)
9246
        {
9247
          constraint (inst.reloc.exp.X_op != O_constant,
9248
                      _("expression too complex"));
9249
          constraint (inst.reloc.exp.X_add_number < 0
9250
                      || inst.reloc.exp.X_add_number > 3,
9251
                      _("shift out of range"));
9252
          inst.instruction |= inst.reloc.exp.X_add_number << 4;
9253
        }
9254
      inst.reloc.type = BFD_RELOC_UNUSED;
9255
    }
9256
  else if (inst.operands[i].preind)
9257
    {
9258
      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9259
      constraint (is_t && inst.operands[i].writeback,
9260
                  _("cannot use writeback with this instruction"));
9261
      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9262
                  && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9263
 
9264
      if (is_d)
9265
        {
9266
          inst.instruction |= 0x01000000;
9267
          if (inst.operands[i].writeback)
9268
            inst.instruction |= 0x00200000;
9269
        }
9270
      else
9271
        {
9272
          inst.instruction |= 0x00000c00;
9273
          if (inst.operands[i].writeback)
9274
            inst.instruction |= 0x00000100;
9275
        }
9276
      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9277
    }
9278
  else if (inst.operands[i].postind)
9279
    {
9280
      gas_assert (inst.operands[i].writeback);
9281
      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9282
      constraint (is_t, _("cannot use post-indexing with this instruction"));
9283
 
9284
      if (is_d)
9285
        inst.instruction |= 0x00200000;
9286
      else
9287
        inst.instruction |= 0x00000900;
9288
      inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9289
    }
9290
  else /* unindexed - only for coprocessor */
9291
    inst.error = _("instruction does not accept unindexed addressing");
9292
}
9293
 
9294
/* Table of Thumb instructions which exist in both 16- and 32-bit
9295
   encodings (the latter only in post-V6T2 cores).  The index is the
9296
   value used in the insns table below.  When there is more than one
9297
   possible 16-bit encoding for the instruction, this table always
9298
   holds variant (1).
9299
   Also contains several pseudo-instructions used during relaxation.  */
9300
#define T16_32_TAB                              \
9301
  X(_adc,   4140, eb400000),                    \
9302
  X(_adcs,  4140, eb500000),                    \
9303
  X(_add,   1c00, eb000000),                    \
9304
  X(_adds,  1c00, eb100000),                    \
9305
  X(_addi,  0000, f1000000),                    \
9306
  X(_addis, 0000, f1100000),                    \
9307
  X(_add_pc,000f, f20f0000),                    \
9308
  X(_add_sp,000d, f10d0000),                    \
9309
  X(_adr,   000f, f20f0000),                    \
9310
  X(_and,   4000, ea000000),                    \
9311
  X(_ands,  4000, ea100000),                    \
9312
  X(_asr,   1000, fa40f000),                    \
9313
  X(_asrs,  1000, fa50f000),                    \
9314
  X(_b,     e000, f000b000),                    \
9315
  X(_bcond, d000, f0008000),                    \
9316
  X(_bic,   4380, ea200000),                    \
9317
  X(_bics,  4380, ea300000),                    \
9318
  X(_cmn,   42c0, eb100f00),                    \
9319
  X(_cmp,   2800, ebb00f00),                    \
9320
  X(_cpsie, b660, f3af8400),                    \
9321
  X(_cpsid, b670, f3af8600),                    \
9322
  X(_cpy,   4600, ea4f0000),                    \
9323
  X(_dec_sp,80dd, f1ad0d00),                    \
9324
  X(_eor,   4040, ea800000),                    \
9325
  X(_eors,  4040, ea900000),                    \
9326
  X(_inc_sp,00dd, f10d0d00),                    \
9327
  X(_ldmia, c800, e8900000),                    \
9328
  X(_ldr,   6800, f8500000),                    \
9329
  X(_ldrb,  7800, f8100000),                    \
9330
  X(_ldrh,  8800, f8300000),                    \
9331
  X(_ldrsb, 5600, f9100000),                    \
9332
  X(_ldrsh, 5e00, f9300000),                    \
9333
  X(_ldr_pc,4800, f85f0000),                    \
9334
  X(_ldr_pc2,4800, f85f0000),                   \
9335
  X(_ldr_sp,9800, f85d0000),                    \
9336
  X(_lsl,   0000, fa00f000),                    \
9337
  X(_lsls,  0000, fa10f000),                    \
9338
  X(_lsr,   0800, fa20f000),                    \
9339
  X(_lsrs,  0800, fa30f000),                    \
9340
  X(_mov,   2000, ea4f0000),                    \
9341
  X(_movs,  2000, ea5f0000),                    \
9342
  X(_mul,   4340, fb00f000),                     \
9343
  X(_muls,  4340, ffffffff), /* no 32b muls */  \
9344
  X(_mvn,   43c0, ea6f0000),                    \
9345
  X(_mvns,  43c0, ea7f0000),                    \
9346
  X(_neg,   4240, f1c00000), /* rsb #0 */       \
9347
  X(_negs,  4240, f1d00000), /* rsbs #0 */      \
9348
  X(_orr,   4300, ea400000),                    \
9349
  X(_orrs,  4300, ea500000),                    \
9350
  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */        \
9351
  X(_push,  b400, e92d0000), /* stmdb sp!,... */        \
9352
  X(_rev,   ba00, fa90f080),                    \
9353
  X(_rev16, ba40, fa90f090),                    \
9354
  X(_revsh, bac0, fa90f0b0),                    \
9355
  X(_ror,   41c0, fa60f000),                    \
9356
  X(_rors,  41c0, fa70f000),                    \
9357
  X(_sbc,   4180, eb600000),                    \
9358
  X(_sbcs,  4180, eb700000),                    \
9359
  X(_stmia, c000, e8800000),                    \
9360
  X(_str,   6000, f8400000),                    \
9361
  X(_strb,  7000, f8000000),                    \
9362
  X(_strh,  8000, f8200000),                    \
9363
  X(_str_sp,9000, f84d0000),                    \
9364
  X(_sub,   1e00, eba00000),                    \
9365
  X(_subs,  1e00, ebb00000),                    \
9366
  X(_subi,  8000, f1a00000),                    \
9367
  X(_subis, 8000, f1b00000),                    \
9368
  X(_sxtb,  b240, fa4ff080),                    \
9369
  X(_sxth,  b200, fa0ff080),                    \
9370
  X(_tst,   4200, ea100f00),                    \
9371
  X(_uxtb,  b2c0, fa5ff080),                    \
9372
  X(_uxth,  b280, fa1ff080),                    \
9373
  X(_nop,   bf00, f3af8000),                    \
9374
  X(_yield, bf10, f3af8001),                    \
9375
  X(_wfe,   bf20, f3af8002),                    \
9376
  X(_wfi,   bf30, f3af8003),                    \
9377
  X(_sev,   bf40, f3af8004),
9378
 
9379
/* To catch errors in encoding functions, the codes are all offset by
9380
   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9381
   as 16-bit instructions.  */
9382
#define X(a,b,c) T_MNEM##a
9383
enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9384
#undef X
9385
 
9386
#define X(a,b,c) 0x##b
9387
static const unsigned short thumb_op16[] = { T16_32_TAB };
9388
#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9389
#undef X
9390
 
9391
#define X(a,b,c) 0x##c
9392
static const unsigned int thumb_op32[] = { T16_32_TAB };
9393
#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9394
#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
9395
#undef X
9396
#undef T16_32_TAB
9397
 
9398
/* Thumb instruction encoders, in alphabetical order.  */
9399
 
9400
/* ADDW or SUBW.  */
9401
 
9402
static void
9403
do_t_add_sub_w (void)
9404
{
9405
  int Rd, Rn;
9406
 
9407
  Rd = inst.operands[0].reg;
9408
  Rn = inst.operands[1].reg;
9409
 
9410
  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9411
     is the SP-{plus,minus}-immediate form of the instruction.  */
9412
  if (Rn == REG_SP)
9413
    constraint (Rd == REG_PC, BAD_PC);
9414
  else
9415
    reject_bad_reg (Rd);
9416
 
9417
  inst.instruction |= (Rn << 16) | (Rd << 8);
9418
  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9419
}
9420
 
9421
/* Parse an add or subtract instruction.  We get here with inst.instruction
9422
   equalling any of THUMB_OPCODE_add, adds, sub, or subs.  */
9423
 
9424
static void
9425
do_t_add_sub (void)
9426
{
9427
  int Rd, Rs, Rn;
9428
 
9429
  Rd = inst.operands[0].reg;
9430
  Rs = (inst.operands[1].present
9431
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9432
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9433
 
9434
  if (Rd == REG_PC)
9435
    set_it_insn_type_last ();
9436
 
9437
  if (unified_syntax)
9438
    {
9439
      bfd_boolean flags;
9440
      bfd_boolean narrow;
9441
      int opcode;
9442
 
9443
      flags = (inst.instruction == T_MNEM_adds
9444
               || inst.instruction == T_MNEM_subs);
9445
      if (flags)
9446
        narrow = !in_it_block ();
9447
      else
9448
        narrow = in_it_block ();
9449
      if (!inst.operands[2].isreg)
9450
        {
9451
          int add;
9452
 
9453
          constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9454
 
9455
          add = (inst.instruction == T_MNEM_add
9456
                 || inst.instruction == T_MNEM_adds);
9457
          opcode = 0;
9458
          if (inst.size_req != 4)
9459
            {
9460
              /* Attempt to use a narrow opcode, with relaxation if
9461
                 appropriate.  */
9462
              if (Rd == REG_SP && Rs == REG_SP && !flags)
9463
                opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9464
              else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9465
                opcode = T_MNEM_add_sp;
9466
              else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9467
                opcode = T_MNEM_add_pc;
9468
              else if (Rd <= 7 && Rs <= 7 && narrow)
9469
                {
9470
                  if (flags)
9471
                    opcode = add ? T_MNEM_addis : T_MNEM_subis;
9472
                  else
9473
                    opcode = add ? T_MNEM_addi : T_MNEM_subi;
9474
                }
9475
              if (opcode)
9476
                {
9477
                  inst.instruction = THUMB_OP16(opcode);
9478
                  inst.instruction |= (Rd << 4) | Rs;
9479
                  inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9480
                  if (inst.size_req != 2)
9481
                    inst.relax = opcode;
9482
                }
9483
              else
9484
                constraint (inst.size_req == 2, BAD_HIREG);
9485
            }
9486
          if (inst.size_req == 4
9487
              || (inst.size_req != 2 && !opcode))
9488
            {
9489
              if (Rd == REG_PC)
9490
                {
9491
                  constraint (add, BAD_PC);
9492
                  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9493
                             _("only SUBS PC, LR, #const allowed"));
9494
                  constraint (inst.reloc.exp.X_op != O_constant,
9495
                              _("expression too complex"));
9496
                  constraint (inst.reloc.exp.X_add_number < 0
9497
                              || inst.reloc.exp.X_add_number > 0xff,
9498
                             _("immediate value out of range"));
9499
                  inst.instruction = T2_SUBS_PC_LR
9500
                                     | inst.reloc.exp.X_add_number;
9501
                  inst.reloc.type = BFD_RELOC_UNUSED;
9502
                  return;
9503
                }
9504
              else if (Rs == REG_PC)
9505
                {
9506
                  /* Always use addw/subw.  */
9507
                  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9508
                  inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9509
                }
9510
              else
9511
                {
9512
                  inst.instruction = THUMB_OP32 (inst.instruction);
9513
                  inst.instruction = (inst.instruction & 0xe1ffffff)
9514
                                     | 0x10000000;
9515
                  if (flags)
9516
                    inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9517
                  else
9518
                    inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9519
                }
9520
              inst.instruction |= Rd << 8;
9521
              inst.instruction |= Rs << 16;
9522
            }
9523
        }
9524
      else
9525
        {
9526 160 khays
          unsigned int value = inst.reloc.exp.X_add_number;
9527
          unsigned int shift = inst.operands[2].shift_kind;
9528
 
9529 16 khays
          Rn = inst.operands[2].reg;
9530
          /* See if we can do this with a 16-bit instruction.  */
9531
          if (!inst.operands[2].shifted && inst.size_req != 4)
9532
            {
9533
              if (Rd > 7 || Rs > 7 || Rn > 7)
9534
                narrow = FALSE;
9535
 
9536
              if (narrow)
9537
                {
9538
                  inst.instruction = ((inst.instruction == T_MNEM_adds
9539
                                       || inst.instruction == T_MNEM_add)
9540
                                      ? T_OPCODE_ADD_R3
9541
                                      : T_OPCODE_SUB_R3);
9542
                  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9543
                  return;
9544
                }
9545
 
9546
              if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9547
                {
9548
                  /* Thumb-1 cores (except v6-M) require at least one high
9549
                     register in a narrow non flag setting add.  */
9550
                  if (Rd > 7 || Rn > 7
9551
                      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9552
                      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9553
                    {
9554
                      if (Rd == Rn)
9555
                        {
9556
                          Rn = Rs;
9557
                          Rs = Rd;
9558
                        }
9559
                      inst.instruction = T_OPCODE_ADD_HI;
9560
                      inst.instruction |= (Rd & 8) << 4;
9561
                      inst.instruction |= (Rd & 7);
9562
                      inst.instruction |= Rn << 3;
9563
                      return;
9564
                    }
9565
                }
9566
            }
9567
 
9568
          constraint (Rd == REG_PC, BAD_PC);
9569
          constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9570
          constraint (Rs == REG_PC, BAD_PC);
9571
          reject_bad_reg (Rn);
9572
 
9573
          /* If we get here, it can't be done in 16 bits.  */
9574
          constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9575
                      _("shift must be constant"));
9576
          inst.instruction = THUMB_OP32 (inst.instruction);
9577
          inst.instruction |= Rd << 8;
9578
          inst.instruction |= Rs << 16;
9579 160 khays
          constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9580
                      _("shift value over 3 not allowed in thumb mode"));
9581
          constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9582
                      _("only LSL shift allowed in thumb mode"));
9583 16 khays
          encode_thumb32_shifted_operand (2);
9584
        }
9585
    }
9586
  else
9587
    {
9588
      constraint (inst.instruction == T_MNEM_adds
9589
                  || inst.instruction == T_MNEM_subs,
9590
                  BAD_THUMB32);
9591
 
9592
      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9593
        {
9594
          constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9595
                      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9596
                      BAD_HIREG);
9597
 
9598
          inst.instruction = (inst.instruction == T_MNEM_add
9599
                              ? 0x0000 : 0x8000);
9600
          inst.instruction |= (Rd << 4) | Rs;
9601
          inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9602
          return;
9603
        }
9604
 
9605
      Rn = inst.operands[2].reg;
9606
      constraint (inst.operands[2].shifted, _("unshifted register required"));
9607
 
9608
      /* We now have Rd, Rs, and Rn set to registers.  */
9609
      if (Rd > 7 || Rs > 7 || Rn > 7)
9610
        {
9611
          /* Can't do this for SUB.      */
9612
          constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9613
          inst.instruction = T_OPCODE_ADD_HI;
9614
          inst.instruction |= (Rd & 8) << 4;
9615
          inst.instruction |= (Rd & 7);
9616
          if (Rs == Rd)
9617
            inst.instruction |= Rn << 3;
9618
          else if (Rn == Rd)
9619
            inst.instruction |= Rs << 3;
9620
          else
9621
            constraint (1, _("dest must overlap one source register"));
9622
        }
9623
      else
9624
        {
9625
          inst.instruction = (inst.instruction == T_MNEM_add
9626
                              ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9627
          inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9628
        }
9629
    }
9630
}
9631
 
9632
static void
9633
do_t_adr (void)
9634
{
9635
  unsigned Rd;
9636
 
9637
  Rd = inst.operands[0].reg;
9638
  reject_bad_reg (Rd);
9639
 
9640
  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9641
    {
9642
      /* Defer to section relaxation.  */
9643
      inst.relax = inst.instruction;
9644
      inst.instruction = THUMB_OP16 (inst.instruction);
9645
      inst.instruction |= Rd << 4;
9646
    }
9647
  else if (unified_syntax && inst.size_req != 2)
9648
    {
9649
      /* Generate a 32-bit opcode.  */
9650
      inst.instruction = THUMB_OP32 (inst.instruction);
9651
      inst.instruction |= Rd << 8;
9652
      inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9653
      inst.reloc.pc_rel = 1;
9654
    }
9655
  else
9656
    {
9657
      /* Generate a 16-bit opcode.  */
9658
      inst.instruction = THUMB_OP16 (inst.instruction);
9659
      inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9660
      inst.reloc.exp.X_add_number -= 4; /* PC relative adjust.  */
9661
      inst.reloc.pc_rel = 1;
9662
 
9663
      inst.instruction |= Rd << 4;
9664
    }
9665
}
9666
 
9667
/* Arithmetic instructions for which there is just one 16-bit
9668
   instruction encoding, and it allows only two low registers.
9669
   For maximal compatibility with ARM syntax, we allow three register
9670
   operands even when Thumb-32 instructions are not available, as long
9671
   as the first two are identical.  For instance, both "sbc r0,r1" and
9672
   "sbc r0,r0,r1" are allowed.  */
9673
static void
9674
do_t_arit3 (void)
9675
{
9676
  int Rd, Rs, Rn;
9677
 
9678
  Rd = inst.operands[0].reg;
9679
  Rs = (inst.operands[1].present
9680
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9681
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9682
  Rn = inst.operands[2].reg;
9683
 
9684
  reject_bad_reg (Rd);
9685
  reject_bad_reg (Rs);
9686
  if (inst.operands[2].isreg)
9687
    reject_bad_reg (Rn);
9688
 
9689
  if (unified_syntax)
9690
    {
9691
      if (!inst.operands[2].isreg)
9692
        {
9693
          /* For an immediate, we always generate a 32-bit opcode;
9694
             section relaxation will shrink it later if possible.  */
9695
          inst.instruction = THUMB_OP32 (inst.instruction);
9696
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9697
          inst.instruction |= Rd << 8;
9698
          inst.instruction |= Rs << 16;
9699
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9700
        }
9701
      else
9702
        {
9703
          bfd_boolean narrow;
9704
 
9705
          /* See if we can do this with a 16-bit instruction.  */
9706
          if (THUMB_SETS_FLAGS (inst.instruction))
9707
            narrow = !in_it_block ();
9708
          else
9709
            narrow = in_it_block ();
9710
 
9711
          if (Rd > 7 || Rn > 7 || Rs > 7)
9712
            narrow = FALSE;
9713
          if (inst.operands[2].shifted)
9714
            narrow = FALSE;
9715
          if (inst.size_req == 4)
9716
            narrow = FALSE;
9717
 
9718
          if (narrow
9719
              && Rd == Rs)
9720
            {
9721
              inst.instruction = THUMB_OP16 (inst.instruction);
9722
              inst.instruction |= Rd;
9723
              inst.instruction |= Rn << 3;
9724
              return;
9725
            }
9726
 
9727
          /* If we get here, it can't be done in 16 bits.  */
9728
          constraint (inst.operands[2].shifted
9729
                      && inst.operands[2].immisreg,
9730
                      _("shift must be constant"));
9731
          inst.instruction = THUMB_OP32 (inst.instruction);
9732
          inst.instruction |= Rd << 8;
9733
          inst.instruction |= Rs << 16;
9734
          encode_thumb32_shifted_operand (2);
9735
        }
9736
    }
9737
  else
9738
    {
9739
      /* On its face this is a lie - the instruction does set the
9740
         flags.  However, the only supported mnemonic in this mode
9741
         says it doesn't.  */
9742
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9743
 
9744
      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9745
                  _("unshifted register required"));
9746
      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9747
      constraint (Rd != Rs,
9748
                  _("dest and source1 must be the same register"));
9749
 
9750
      inst.instruction = THUMB_OP16 (inst.instruction);
9751
      inst.instruction |= Rd;
9752
      inst.instruction |= Rn << 3;
9753
    }
9754
}
9755
 
9756
/* Similarly, but for instructions where the arithmetic operation is
9757
   commutative, so we can allow either of them to be different from
9758
   the destination operand in a 16-bit instruction.  For instance, all
9759
   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9760
   accepted.  */
9761
static void
9762
do_t_arit3c (void)
9763
{
9764
  int Rd, Rs, Rn;
9765
 
9766
  Rd = inst.operands[0].reg;
9767
  Rs = (inst.operands[1].present
9768
        ? inst.operands[1].reg    /* Rd, Rs, foo */
9769
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
9770
  Rn = inst.operands[2].reg;
9771
 
9772
  reject_bad_reg (Rd);
9773
  reject_bad_reg (Rs);
9774
  if (inst.operands[2].isreg)
9775
    reject_bad_reg (Rn);
9776
 
9777
  if (unified_syntax)
9778
    {
9779
      if (!inst.operands[2].isreg)
9780
        {
9781
          /* For an immediate, we always generate a 32-bit opcode;
9782
             section relaxation will shrink it later if possible.  */
9783
          inst.instruction = THUMB_OP32 (inst.instruction);
9784
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9785
          inst.instruction |= Rd << 8;
9786
          inst.instruction |= Rs << 16;
9787
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9788
        }
9789
      else
9790
        {
9791
          bfd_boolean narrow;
9792
 
9793
          /* See if we can do this with a 16-bit instruction.  */
9794
          if (THUMB_SETS_FLAGS (inst.instruction))
9795
            narrow = !in_it_block ();
9796
          else
9797
            narrow = in_it_block ();
9798
 
9799
          if (Rd > 7 || Rn > 7 || Rs > 7)
9800
            narrow = FALSE;
9801
          if (inst.operands[2].shifted)
9802
            narrow = FALSE;
9803
          if (inst.size_req == 4)
9804
            narrow = FALSE;
9805
 
9806
          if (narrow)
9807
            {
9808
              if (Rd == Rs)
9809
                {
9810
                  inst.instruction = THUMB_OP16 (inst.instruction);
9811
                  inst.instruction |= Rd;
9812
                  inst.instruction |= Rn << 3;
9813
                  return;
9814
                }
9815
              if (Rd == Rn)
9816
                {
9817
                  inst.instruction = THUMB_OP16 (inst.instruction);
9818
                  inst.instruction |= Rd;
9819
                  inst.instruction |= Rs << 3;
9820
                  return;
9821
                }
9822
            }
9823
 
9824
          /* If we get here, it can't be done in 16 bits.  */
9825
          constraint (inst.operands[2].shifted
9826
                      && inst.operands[2].immisreg,
9827
                      _("shift must be constant"));
9828
          inst.instruction = THUMB_OP32 (inst.instruction);
9829
          inst.instruction |= Rd << 8;
9830
          inst.instruction |= Rs << 16;
9831
          encode_thumb32_shifted_operand (2);
9832
        }
9833
    }
9834
  else
9835
    {
9836
      /* On its face this is a lie - the instruction does set the
9837
         flags.  However, the only supported mnemonic in this mode
9838
         says it doesn't.  */
9839
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9840
 
9841
      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9842
                  _("unshifted register required"));
9843
      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9844
 
9845
      inst.instruction = THUMB_OP16 (inst.instruction);
9846
      inst.instruction |= Rd;
9847
 
9848
      if (Rd == Rs)
9849
        inst.instruction |= Rn << 3;
9850
      else if (Rd == Rn)
9851
        inst.instruction |= Rs << 3;
9852
      else
9853
        constraint (1, _("dest must overlap one source register"));
9854
    }
9855
}
9856
 
9857
static void
9858
do_t_barrier (void)
9859
{
9860
  if (inst.operands[0].present)
9861
    {
9862
      constraint ((inst.instruction & 0xf0) != 0x40
9863
                  && inst.operands[0].imm > 0xf
9864
                  && inst.operands[0].imm < 0x0,
9865
                  _("bad barrier type"));
9866
      inst.instruction |= inst.operands[0].imm;
9867
    }
9868
  else
9869
    inst.instruction |= 0xf;
9870
}
9871
 
9872
static void
9873
do_t_bfc (void)
9874
{
9875
  unsigned Rd;
9876
  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9877
  constraint (msb > 32, _("bit-field extends past end of register"));
9878
  /* The instruction encoding stores the LSB and MSB,
9879
     not the LSB and width.  */
9880
  Rd = inst.operands[0].reg;
9881
  reject_bad_reg (Rd);
9882
  inst.instruction |= Rd << 8;
9883
  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9884
  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9885
  inst.instruction |= msb - 1;
9886
}
9887
 
9888
static void
9889
do_t_bfi (void)
9890
{
9891
  int Rd, Rn;
9892
  unsigned int msb;
9893
 
9894
  Rd = inst.operands[0].reg;
9895
  reject_bad_reg (Rd);
9896
 
9897
  /* #0 in second position is alternative syntax for bfc, which is
9898
     the same instruction but with REG_PC in the Rm field.  */
9899
  if (!inst.operands[1].isreg)
9900
    Rn = REG_PC;
9901
  else
9902
    {
9903
      Rn = inst.operands[1].reg;
9904
      reject_bad_reg (Rn);
9905
    }
9906
 
9907
  msb = inst.operands[2].imm + inst.operands[3].imm;
9908
  constraint (msb > 32, _("bit-field extends past end of register"));
9909
  /* The instruction encoding stores the LSB and MSB,
9910
     not the LSB and width.  */
9911
  inst.instruction |= Rd << 8;
9912
  inst.instruction |= Rn << 16;
9913
  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9914
  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9915
  inst.instruction |= msb - 1;
9916
}
9917
 
9918
static void
9919
do_t_bfx (void)
9920
{
9921
  unsigned Rd, Rn;
9922
 
9923
  Rd = inst.operands[0].reg;
9924
  Rn = inst.operands[1].reg;
9925
 
9926
  reject_bad_reg (Rd);
9927
  reject_bad_reg (Rn);
9928
 
9929
  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9930
              _("bit-field extends past end of register"));
9931
  inst.instruction |= Rd << 8;
9932
  inst.instruction |= Rn << 16;
9933
  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9934
  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9935
  inst.instruction |= inst.operands[3].imm - 1;
9936
}
9937
 
9938
/* ARM V5 Thumb BLX (argument parse)
9939
        BLX <target_addr>       which is BLX(1)
9940
        BLX <Rm>                which is BLX(2)
9941
   Unfortunately, there are two different opcodes for this mnemonic.
9942
   So, the insns[].value is not used, and the code here zaps values
9943
        into inst.instruction.
9944
 
9945
   ??? How to take advantage of the additional two bits of displacement
9946
   available in Thumb32 mode?  Need new relocation?  */
9947
 
9948
static void
9949
do_t_blx (void)
9950
{
9951
  set_it_insn_type_last ();
9952
 
9953
  if (inst.operands[0].isreg)
9954
    {
9955
      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9956
      /* We have a register, so this is BLX(2).  */
9957
      inst.instruction |= inst.operands[0].reg << 3;
9958
    }
9959
  else
9960
    {
9961
      /* No register.  This must be BLX(1).  */
9962
      inst.instruction = 0xf000e800;
9963
      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
9964
    }
9965
}
9966
 
9967
static void
9968
do_t_branch (void)
9969
{
9970
  int opcode;
9971
  int cond;
9972
  int reloc;
9973
 
9974
  cond = inst.cond;
9975
  set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9976
 
9977
  if (in_it_block ())
9978
    {
9979
      /* Conditional branches inside IT blocks are encoded as unconditional
9980
         branches.  */
9981
      cond = COND_ALWAYS;
9982
    }
9983
  else
9984
    cond = inst.cond;
9985
 
9986
  if (cond != COND_ALWAYS)
9987
    opcode = T_MNEM_bcond;
9988
  else
9989
    opcode = inst.instruction;
9990
 
9991
  if (unified_syntax
9992
      && (inst.size_req == 4
9993
          || (inst.size_req != 2
9994
              && (inst.operands[0].hasreloc
9995
                  || inst.reloc.exp.X_op == O_constant))))
9996
    {
9997
      inst.instruction = THUMB_OP32(opcode);
9998
      if (cond == COND_ALWAYS)
9999
        reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10000
      else
10001
        {
10002
          gas_assert (cond != 0xF);
10003
          inst.instruction |= cond << 22;
10004
          reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10005
        }
10006
    }
10007
  else
10008
    {
10009
      inst.instruction = THUMB_OP16(opcode);
10010
      if (cond == COND_ALWAYS)
10011
        reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10012
      else
10013
        {
10014
          inst.instruction |= cond << 8;
10015
          reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10016
        }
10017
      /* Allow section relaxation.  */
10018
      if (unified_syntax && inst.size_req != 2)
10019
        inst.relax = opcode;
10020
    }
10021
  inst.reloc.type = reloc;
10022
  inst.reloc.pc_rel = 1;
10023
}
10024
 
10025
static void
10026
do_t_bkpt (void)
10027
{
10028
  constraint (inst.cond != COND_ALWAYS,
10029
              _("instruction is always unconditional"));
10030
  if (inst.operands[0].present)
10031
    {
10032
      constraint (inst.operands[0].imm > 255,
10033
                  _("immediate value out of range"));
10034
      inst.instruction |= inst.operands[0].imm;
10035
      set_it_insn_type (NEUTRAL_IT_INSN);
10036
    }
10037
}
10038
 
10039
static void
10040
do_t_branch23 (void)
10041
{
10042
  set_it_insn_type_last ();
10043
  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10044
 
10045
  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10046
     this file.  We used to simply ignore the PLT reloc type here --
10047
     the branch encoding is now needed to deal with TLSCALL relocs.
10048
     So if we see a PLT reloc now, put it back to how it used to be to
10049
     keep the preexisting behaviour.  */
10050
  if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10051
    inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10052
 
10053
#if defined(OBJ_COFF)
10054
  /* If the destination of the branch is a defined symbol which does not have
10055
     the THUMB_FUNC attribute, then we must be calling a function which has
10056
     the (interfacearm) attribute.  We look for the Thumb entry point to that
10057
     function and change the branch to refer to that function instead.  */
10058
  if (   inst.reloc.exp.X_op == O_symbol
10059
      && inst.reloc.exp.X_add_symbol != NULL
10060
      && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10061
      && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10062
    inst.reloc.exp.X_add_symbol =
10063
      find_real_start (inst.reloc.exp.X_add_symbol);
10064
#endif
10065
}
10066
 
10067
static void
10068
do_t_bx (void)
10069
{
10070
  set_it_insn_type_last ();
10071
  inst.instruction |= inst.operands[0].reg << 3;
10072
  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.  The reloc
10073
     should cause the alignment to be checked once it is known.  This is
10074
     because BX PC only works if the instruction is word aligned.  */
10075
}
10076
 
10077
static void
10078
do_t_bxj (void)
10079
{
10080
  int Rm;
10081
 
10082
  set_it_insn_type_last ();
10083
  Rm = inst.operands[0].reg;
10084
  reject_bad_reg (Rm);
10085
  inst.instruction |= Rm << 16;
10086
}
10087
 
10088
static void
10089
do_t_clz (void)
10090
{
10091
  unsigned Rd;
10092
  unsigned Rm;
10093
 
10094
  Rd = inst.operands[0].reg;
10095
  Rm = inst.operands[1].reg;
10096
 
10097
  reject_bad_reg (Rd);
10098
  reject_bad_reg (Rm);
10099
 
10100
  inst.instruction |= Rd << 8;
10101
  inst.instruction |= Rm << 16;
10102
  inst.instruction |= Rm;
10103
}
10104
 
10105
static void
10106
do_t_cps (void)
10107
{
10108
  set_it_insn_type (OUTSIDE_IT_INSN);
10109
  inst.instruction |= inst.operands[0].imm;
10110
}
10111
 
10112
static void
10113
do_t_cpsi (void)
10114
{
10115
  set_it_insn_type (OUTSIDE_IT_INSN);
10116
  if (unified_syntax
10117
      && (inst.operands[1].present || inst.size_req == 4)
10118
      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10119
    {
10120
      unsigned int imod = (inst.instruction & 0x0030) >> 4;
10121
      inst.instruction = 0xf3af8000;
10122
      inst.instruction |= imod << 9;
10123
      inst.instruction |= inst.operands[0].imm << 5;
10124
      if (inst.operands[1].present)
10125
        inst.instruction |= 0x100 | inst.operands[1].imm;
10126
    }
10127
  else
10128
    {
10129
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10130
                  && (inst.operands[0].imm & 4),
10131
                  _("selected processor does not support 'A' form "
10132
                    "of this instruction"));
10133
      constraint (inst.operands[1].present || inst.size_req == 4,
10134
                  _("Thumb does not support the 2-argument "
10135
                    "form of this instruction"));
10136
      inst.instruction |= inst.operands[0].imm;
10137
    }
10138
}
10139
 
10140
/* THUMB CPY instruction (argument parse).  */
10141
 
10142
static void
10143
do_t_cpy (void)
10144
{
10145
  if (inst.size_req == 4)
10146
    {
10147
      inst.instruction = THUMB_OP32 (T_MNEM_mov);
10148
      inst.instruction |= inst.operands[0].reg << 8;
10149
      inst.instruction |= inst.operands[1].reg;
10150
    }
10151
  else
10152
    {
10153
      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10154
      inst.instruction |= (inst.operands[0].reg & 0x7);
10155
      inst.instruction |= inst.operands[1].reg << 3;
10156
    }
10157
}
10158
 
10159
static void
10160
do_t_cbz (void)
10161
{
10162
  set_it_insn_type (OUTSIDE_IT_INSN);
10163
  constraint (inst.operands[0].reg > 7, BAD_HIREG);
10164
  inst.instruction |= inst.operands[0].reg;
10165
  inst.reloc.pc_rel = 1;
10166
  inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10167
}
10168
 
10169
static void
10170
do_t_dbg (void)
10171
{
10172
  inst.instruction |= inst.operands[0].imm;
10173
}
10174
 
10175
static void
10176
do_t_div (void)
10177
{
10178
  unsigned Rd, Rn, Rm;
10179
 
10180
  Rd = inst.operands[0].reg;
10181
  Rn = (inst.operands[1].present
10182
        ? inst.operands[1].reg : Rd);
10183
  Rm = inst.operands[2].reg;
10184
 
10185
  reject_bad_reg (Rd);
10186
  reject_bad_reg (Rn);
10187
  reject_bad_reg (Rm);
10188
 
10189
  inst.instruction |= Rd << 8;
10190
  inst.instruction |= Rn << 16;
10191
  inst.instruction |= Rm;
10192
}
10193
 
10194
static void
10195
do_t_hint (void)
10196
{
10197
  if (unified_syntax && inst.size_req == 4)
10198
    inst.instruction = THUMB_OP32 (inst.instruction);
10199
  else
10200
    inst.instruction = THUMB_OP16 (inst.instruction);
10201
}
10202
 
10203
static void
10204
do_t_it (void)
10205
{
10206
  unsigned int cond = inst.operands[0].imm;
10207
 
10208
  set_it_insn_type (IT_INSN);
10209
  now_it.mask = (inst.instruction & 0xf) | 0x10;
10210
  now_it.cc = cond;
10211
 
10212
  /* If the condition is a negative condition, invert the mask.  */
10213
  if ((cond & 0x1) == 0x0)
10214
    {
10215
      unsigned int mask = inst.instruction & 0x000f;
10216
 
10217
      if ((mask & 0x7) == 0)
10218
        /* no conversion needed */;
10219
      else if ((mask & 0x3) == 0)
10220
        mask ^= 0x8;
10221
      else if ((mask & 0x1) == 0)
10222
        mask ^= 0xC;
10223
      else
10224
        mask ^= 0xE;
10225
 
10226
      inst.instruction &= 0xfff0;
10227
      inst.instruction |= mask;
10228
    }
10229
 
10230
  inst.instruction |= cond << 4;
10231
}
10232
 
10233
/* Helper function used for both push/pop and ldm/stm.  */
10234
static void
10235
encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10236
{
10237
  bfd_boolean load;
10238
 
10239
  load = (inst.instruction & (1 << 20)) != 0;
10240
 
10241
  if (mask & (1 << 13))
10242
    inst.error =  _("SP not allowed in register list");
10243
 
10244
  if ((mask & (1 << base)) != 0
10245
      && writeback)
10246
    inst.error = _("having the base register in the register list when "
10247
                   "using write back is UNPREDICTABLE");
10248
 
10249
  if (load)
10250
    {
10251
      if (mask & (1 << 15))
10252
        {
10253
          if (mask & (1 << 14))
10254
            inst.error = _("LR and PC should not both be in register list");
10255
          else
10256
            set_it_insn_type_last ();
10257
        }
10258
    }
10259
  else
10260
    {
10261
      if (mask & (1 << 15))
10262
        inst.error = _("PC not allowed in register list");
10263
    }
10264
 
10265
  if ((mask & (mask - 1)) == 0)
10266
    {
10267
      /* Single register transfers implemented as str/ldr.  */
10268
      if (writeback)
10269
        {
10270
          if (inst.instruction & (1 << 23))
10271
            inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10272
          else
10273
            inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10274
        }
10275
      else
10276
        {
10277
          if (inst.instruction & (1 << 23))
10278
            inst.instruction = 0x00800000; /* ia -> [base] */
10279
          else
10280
            inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10281
        }
10282
 
10283
      inst.instruction |= 0xf8400000;
10284
      if (load)
10285
        inst.instruction |= 0x00100000;
10286
 
10287
      mask = ffs (mask) - 1;
10288
      mask <<= 12;
10289
    }
10290
  else if (writeback)
10291
    inst.instruction |= WRITE_BACK;
10292
 
10293
  inst.instruction |= mask;
10294
  inst.instruction |= base << 16;
10295
}
10296
 
10297
static void
10298
do_t_ldmstm (void)
10299
{
10300
  /* This really doesn't seem worth it.  */
10301
  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10302
              _("expression too complex"));
10303
  constraint (inst.operands[1].writeback,
10304
              _("Thumb load/store multiple does not support {reglist}^"));
10305
 
10306
  if (unified_syntax)
10307
    {
10308
      bfd_boolean narrow;
10309
      unsigned mask;
10310
 
10311
      narrow = FALSE;
10312
      /* See if we can use a 16-bit instruction.  */
10313
      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10314
          && inst.size_req != 4
10315
          && !(inst.operands[1].imm & ~0xff))
10316
        {
10317
          mask = 1 << inst.operands[0].reg;
10318
 
10319
          if (inst.operands[0].reg <= 7)
10320
            {
10321
              if (inst.instruction == T_MNEM_stmia
10322
                  ? inst.operands[0].writeback
10323
                  : (inst.operands[0].writeback
10324
                     == !(inst.operands[1].imm & mask)))
10325
                {
10326
                  if (inst.instruction == T_MNEM_stmia
10327
                      && (inst.operands[1].imm & mask)
10328
                      && (inst.operands[1].imm & (mask - 1)))
10329
                    as_warn (_("value stored for r%d is UNKNOWN"),
10330
                             inst.operands[0].reg);
10331
 
10332
                  inst.instruction = THUMB_OP16 (inst.instruction);
10333
                  inst.instruction |= inst.operands[0].reg << 8;
10334
                  inst.instruction |= inst.operands[1].imm;
10335
                  narrow = TRUE;
10336
                }
10337
              else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10338
                {
10339
                  /* This means 1 register in reg list one of 3 situations:
10340
                     1. Instruction is stmia, but without writeback.
10341
                     2. lmdia without writeback, but with Rn not in
10342
                        reglist.
10343
                     3. ldmia with writeback, but with Rn in reglist.
10344
                     Case 3 is UNPREDICTABLE behaviour, so we handle
10345
                     case 1 and 2 which can be converted into a 16-bit
10346
                     str or ldr. The SP cases are handled below.  */
10347
                  unsigned long opcode;
10348
                  /* First, record an error for Case 3.  */
10349
                  if (inst.operands[1].imm & mask
10350
                      && inst.operands[0].writeback)
10351
                    inst.error =
10352
                        _("having the base register in the register list when "
10353
                          "using write back is UNPREDICTABLE");
10354
 
10355
                  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10356
                                                             : T_MNEM_ldr);
10357
                  inst.instruction = THUMB_OP16 (opcode);
10358
                  inst.instruction |= inst.operands[0].reg << 3;
10359
                  inst.instruction |= (ffs (inst.operands[1].imm)-1);
10360
                  narrow = TRUE;
10361
                }
10362
            }
10363
          else if (inst.operands[0] .reg == REG_SP)
10364
            {
10365
              if (inst.operands[0].writeback)
10366
                {
10367
                  inst.instruction =
10368
                        THUMB_OP16 (inst.instruction == T_MNEM_stmia
10369
                                    ? T_MNEM_push : T_MNEM_pop);
10370
                  inst.instruction |= inst.operands[1].imm;
10371
                  narrow = TRUE;
10372
                }
10373
              else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10374
                {
10375
                  inst.instruction =
10376
                        THUMB_OP16 (inst.instruction == T_MNEM_stmia
10377
                                    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10378
                  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10379
                  narrow = TRUE;
10380
                }
10381
            }
10382
        }
10383
 
10384
      if (!narrow)
10385
        {
10386
          if (inst.instruction < 0xffff)
10387
            inst.instruction = THUMB_OP32 (inst.instruction);
10388
 
10389
          encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10390
                                inst.operands[0].writeback);
10391
        }
10392
    }
10393
  else
10394
    {
10395
      constraint (inst.operands[0].reg > 7
10396
                  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10397
      constraint (inst.instruction != T_MNEM_ldmia
10398
                  && inst.instruction != T_MNEM_stmia,
10399
                  _("Thumb-2 instruction only valid in unified syntax"));
10400
      if (inst.instruction == T_MNEM_stmia)
10401
        {
10402
          if (!inst.operands[0].writeback)
10403
            as_warn (_("this instruction will write back the base register"));
10404
          if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10405
              && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10406
            as_warn (_("value stored for r%d is UNKNOWN"),
10407
                     inst.operands[0].reg);
10408
        }
10409
      else
10410
        {
10411
          if (!inst.operands[0].writeback
10412
              && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10413
            as_warn (_("this instruction will write back the base register"));
10414
          else if (inst.operands[0].writeback
10415
                   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10416
            as_warn (_("this instruction will not write back the base register"));
10417
        }
10418
 
10419
      inst.instruction = THUMB_OP16 (inst.instruction);
10420
      inst.instruction |= inst.operands[0].reg << 8;
10421
      inst.instruction |= inst.operands[1].imm;
10422
    }
10423
}
10424
 
10425
static void
10426
do_t_ldrex (void)
10427
{
10428
  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10429
              || inst.operands[1].postind || inst.operands[1].writeback
10430
              || inst.operands[1].immisreg || inst.operands[1].shifted
10431
              || inst.operands[1].negative,
10432
              BAD_ADDR_MODE);
10433
 
10434
  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10435
 
10436
  inst.instruction |= inst.operands[0].reg << 12;
10437
  inst.instruction |= inst.operands[1].reg << 16;
10438
  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10439
}
10440
 
10441
static void
10442
do_t_ldrexd (void)
10443
{
10444
  if (!inst.operands[1].present)
10445
    {
10446
      constraint (inst.operands[0].reg == REG_LR,
10447
                  _("r14 not allowed as first register "
10448
                    "when second register is omitted"));
10449
      inst.operands[1].reg = inst.operands[0].reg + 1;
10450
    }
10451
  constraint (inst.operands[0].reg == inst.operands[1].reg,
10452
              BAD_OVERLAP);
10453
 
10454
  inst.instruction |= inst.operands[0].reg << 12;
10455
  inst.instruction |= inst.operands[1].reg << 8;
10456
  inst.instruction |= inst.operands[2].reg << 16;
10457
}
10458
 
10459
static void
10460
do_t_ldst (void)
10461
{
10462
  unsigned long opcode;
10463
  int Rn;
10464
 
10465
  if (inst.operands[0].isreg
10466
      && !inst.operands[0].preind
10467
      && inst.operands[0].reg == REG_PC)
10468
    set_it_insn_type_last ();
10469
 
10470
  opcode = inst.instruction;
10471
  if (unified_syntax)
10472
    {
10473
      if (!inst.operands[1].isreg)
10474
        {
10475
          if (opcode <= 0xffff)
10476
            inst.instruction = THUMB_OP32 (opcode);
10477
          if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10478
            return;
10479
        }
10480
      if (inst.operands[1].isreg
10481
          && !inst.operands[1].writeback
10482
          && !inst.operands[1].shifted && !inst.operands[1].postind
10483
          && !inst.operands[1].negative && inst.operands[0].reg <= 7
10484
          && opcode <= 0xffff
10485
          && inst.size_req != 4)
10486
        {
10487
          /* Insn may have a 16-bit form.  */
10488
          Rn = inst.operands[1].reg;
10489
          if (inst.operands[1].immisreg)
10490
            {
10491
              inst.instruction = THUMB_OP16 (opcode);
10492
              /* [Rn, Rik] */
10493
              if (Rn <= 7 && inst.operands[1].imm <= 7)
10494
                goto op16;
10495
              else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10496
                reject_bad_reg (inst.operands[1].imm);
10497
            }
10498
          else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10499
                    && opcode != T_MNEM_ldrsb)
10500
                   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10501
                   || (Rn == REG_SP && opcode == T_MNEM_str))
10502
            {
10503
              /* [Rn, #const] */
10504
              if (Rn > 7)
10505
                {
10506
                  if (Rn == REG_PC)
10507
                    {
10508
                      if (inst.reloc.pc_rel)
10509
                        opcode = T_MNEM_ldr_pc2;
10510
                      else
10511
                        opcode = T_MNEM_ldr_pc;
10512
                    }
10513
                  else
10514
                    {
10515
                      if (opcode == T_MNEM_ldr)
10516
                        opcode = T_MNEM_ldr_sp;
10517
                      else
10518
                        opcode = T_MNEM_str_sp;
10519
                    }
10520
                  inst.instruction = inst.operands[0].reg << 8;
10521
                }
10522
              else
10523
                {
10524
                  inst.instruction = inst.operands[0].reg;
10525
                  inst.instruction |= inst.operands[1].reg << 3;
10526
                }
10527
              inst.instruction |= THUMB_OP16 (opcode);
10528
              if (inst.size_req == 2)
10529
                inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10530
              else
10531
                inst.relax = opcode;
10532
              return;
10533
            }
10534
        }
10535
      /* Definitely a 32-bit variant.  */
10536
 
10537
      /* Warning for Erratum 752419.  */
10538
      if (opcode == T_MNEM_ldr
10539
          && inst.operands[0].reg == REG_SP
10540
          && inst.operands[1].writeback == 1
10541
          && !inst.operands[1].immisreg)
10542
        {
10543
          if (no_cpu_selected ()
10544
              || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10545
                  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10546
                  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10547
            as_warn (_("This instruction may be unpredictable "
10548
                       "if executed on M-profile cores "
10549
                       "with interrupts enabled."));
10550
        }
10551
 
10552
      /* Do some validations regarding addressing modes.  */
10553 163 khays
      if (inst.operands[1].immisreg)
10554 16 khays
        reject_bad_reg (inst.operands[1].imm);
10555
 
10556 163 khays
      constraint (inst.operands[1].writeback == 1
10557
                  && inst.operands[0].reg == inst.operands[1].reg,
10558
                  BAD_OVERLAP);
10559
 
10560 16 khays
      inst.instruction = THUMB_OP32 (opcode);
10561
      inst.instruction |= inst.operands[0].reg << 12;
10562
      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10563 163 khays
      check_ldr_r15_aligned ();
10564 16 khays
      return;
10565
    }
10566
 
10567
  constraint (inst.operands[0].reg > 7, BAD_HIREG);
10568
 
10569
  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10570
    {
10571
      /* Only [Rn,Rm] is acceptable.  */
10572
      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10573
      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10574
                  || inst.operands[1].postind || inst.operands[1].shifted
10575
                  || inst.operands[1].negative,
10576
                  _("Thumb does not support this addressing mode"));
10577
      inst.instruction = THUMB_OP16 (inst.instruction);
10578
      goto op16;
10579
    }
10580
 
10581
  inst.instruction = THUMB_OP16 (inst.instruction);
10582
  if (!inst.operands[1].isreg)
10583
    if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10584
      return;
10585
 
10586
  constraint (!inst.operands[1].preind
10587
              || inst.operands[1].shifted
10588
              || inst.operands[1].writeback,
10589
              _("Thumb does not support this addressing mode"));
10590
  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10591
    {
10592
      constraint (inst.instruction & 0x0600,
10593
                  _("byte or halfword not valid for base register"));
10594
      constraint (inst.operands[1].reg == REG_PC
10595
                  && !(inst.instruction & THUMB_LOAD_BIT),
10596
                  _("r15 based store not allowed"));
10597
      constraint (inst.operands[1].immisreg,
10598
                  _("invalid base register for register offset"));
10599
 
10600
      if (inst.operands[1].reg == REG_PC)
10601
        inst.instruction = T_OPCODE_LDR_PC;
10602
      else if (inst.instruction & THUMB_LOAD_BIT)
10603
        inst.instruction = T_OPCODE_LDR_SP;
10604
      else
10605
        inst.instruction = T_OPCODE_STR_SP;
10606
 
10607
      inst.instruction |= inst.operands[0].reg << 8;
10608
      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10609
      return;
10610
    }
10611
 
10612
  constraint (inst.operands[1].reg > 7, BAD_HIREG);
10613
  if (!inst.operands[1].immisreg)
10614
    {
10615
      /* Immediate offset.  */
10616
      inst.instruction |= inst.operands[0].reg;
10617
      inst.instruction |= inst.operands[1].reg << 3;
10618
      inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10619
      return;
10620
    }
10621
 
10622
  /* Register offset.  */
10623
  constraint (inst.operands[1].imm > 7, BAD_HIREG);
10624
  constraint (inst.operands[1].negative,
10625
              _("Thumb does not support this addressing mode"));
10626
 
10627
 op16:
10628
  switch (inst.instruction)
10629
    {
10630
    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10631
    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10632
    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10633
    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10634
    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10635
    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10636
    case 0x5600 /* ldrsb */:
10637
    case 0x5e00 /* ldrsh */: break;
10638
    default: abort ();
10639
    }
10640
 
10641
  inst.instruction |= inst.operands[0].reg;
10642
  inst.instruction |= inst.operands[1].reg << 3;
10643
  inst.instruction |= inst.operands[1].imm << 6;
10644
}
10645
 
10646
static void
10647
do_t_ldstd (void)
10648
{
10649
  if (!inst.operands[1].present)
10650
    {
10651
      inst.operands[1].reg = inst.operands[0].reg + 1;
10652
      constraint (inst.operands[0].reg == REG_LR,
10653
                  _("r14 not allowed here"));
10654
    }
10655
  inst.instruction |= inst.operands[0].reg << 12;
10656
  inst.instruction |= inst.operands[1].reg << 8;
10657
  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10658
}
10659
 
10660
static void
10661
do_t_ldstt (void)
10662
{
10663
  inst.instruction |= inst.operands[0].reg << 12;
10664
  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10665
}
10666
 
10667
static void
10668
do_t_mla (void)
10669
{
10670
  unsigned Rd, Rn, Rm, Ra;
10671
 
10672
  Rd = inst.operands[0].reg;
10673
  Rn = inst.operands[1].reg;
10674
  Rm = inst.operands[2].reg;
10675
  Ra = inst.operands[3].reg;
10676
 
10677
  reject_bad_reg (Rd);
10678
  reject_bad_reg (Rn);
10679
  reject_bad_reg (Rm);
10680
  reject_bad_reg (Ra);
10681
 
10682
  inst.instruction |= Rd << 8;
10683
  inst.instruction |= Rn << 16;
10684
  inst.instruction |= Rm;
10685
  inst.instruction |= Ra << 12;
10686
}
10687
 
10688
static void
10689
do_t_mlal (void)
10690
{
10691
  unsigned RdLo, RdHi, Rn, Rm;
10692
 
10693
  RdLo = inst.operands[0].reg;
10694
  RdHi = inst.operands[1].reg;
10695
  Rn = inst.operands[2].reg;
10696
  Rm = inst.operands[3].reg;
10697
 
10698
  reject_bad_reg (RdLo);
10699
  reject_bad_reg (RdHi);
10700
  reject_bad_reg (Rn);
10701
  reject_bad_reg (Rm);
10702
 
10703
  inst.instruction |= RdLo << 12;
10704
  inst.instruction |= RdHi << 8;
10705
  inst.instruction |= Rn << 16;
10706
  inst.instruction |= Rm;
10707
}
10708
 
10709
static void
10710
do_t_mov_cmp (void)
10711
{
10712
  unsigned Rn, Rm;
10713
 
10714
  Rn = inst.operands[0].reg;
10715
  Rm = inst.operands[1].reg;
10716
 
10717
  if (Rn == REG_PC)
10718
    set_it_insn_type_last ();
10719
 
10720
  if (unified_syntax)
10721
    {
10722
      int r0off = (inst.instruction == T_MNEM_mov
10723
                   || inst.instruction == T_MNEM_movs) ? 8 : 16;
10724
      unsigned long opcode;
10725
      bfd_boolean narrow;
10726
      bfd_boolean low_regs;
10727
 
10728
      low_regs = (Rn <= 7 && Rm <= 7);
10729
      opcode = inst.instruction;
10730
      if (in_it_block ())
10731
        narrow = opcode != T_MNEM_movs;
10732
      else
10733
        narrow = opcode != T_MNEM_movs || low_regs;
10734
      if (inst.size_req == 4
10735
          || inst.operands[1].shifted)
10736
        narrow = FALSE;
10737
 
10738
      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
10739
      if (opcode == T_MNEM_movs && inst.operands[1].isreg
10740
          && !inst.operands[1].shifted
10741
          && Rn == REG_PC
10742
          && Rm == REG_LR)
10743
        {
10744
          inst.instruction = T2_SUBS_PC_LR;
10745
          return;
10746
        }
10747
 
10748
      if (opcode == T_MNEM_cmp)
10749
        {
10750
          constraint (Rn == REG_PC, BAD_PC);
10751
          if (narrow)
10752
            {
10753
              /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10754
                 but valid.  */
10755
              warn_deprecated_sp (Rm);
10756
              /* R15 was documented as a valid choice for Rm in ARMv6,
10757
                 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
10758
                 tools reject R15, so we do too.  */
10759
              constraint (Rm == REG_PC, BAD_PC);
10760
            }
10761
          else
10762
            reject_bad_reg (Rm);
10763
        }
10764
      else if (opcode == T_MNEM_mov
10765
               || opcode == T_MNEM_movs)
10766
        {
10767
          if (inst.operands[1].isreg)
10768
            {
10769
              if (opcode == T_MNEM_movs)
10770
                {
10771
                  reject_bad_reg (Rn);
10772
                  reject_bad_reg (Rm);
10773
                }
10774
              else if (narrow)
10775
                {
10776
                  /* This is mov.n.  */
10777
                  if ((Rn == REG_SP || Rn == REG_PC)
10778
                      && (Rm == REG_SP || Rm == REG_PC))
10779
                    {
10780
                      as_warn (_("Use of r%u as a source register is "
10781
                                 "deprecated when r%u is the destination "
10782
                                 "register."), Rm, Rn);
10783
                    }
10784
                }
10785
              else
10786
                {
10787
                  /* This is mov.w.  */
10788
                  constraint (Rn == REG_PC, BAD_PC);
10789
                  constraint (Rm == REG_PC, BAD_PC);
10790
                  constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
10791
                }
10792
            }
10793
          else
10794
            reject_bad_reg (Rn);
10795
        }
10796
 
10797
      if (!inst.operands[1].isreg)
10798
        {
10799
          /* Immediate operand.  */
10800
          if (!in_it_block () && opcode == T_MNEM_mov)
10801
            narrow = 0;
10802
          if (low_regs && narrow)
10803
            {
10804
              inst.instruction = THUMB_OP16 (opcode);
10805
              inst.instruction |= Rn << 8;
10806
              if (inst.size_req == 2)
10807
                inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10808
              else
10809
                inst.relax = opcode;
10810
            }
10811
          else
10812
            {
10813
              inst.instruction = THUMB_OP32 (inst.instruction);
10814
              inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10815
              inst.instruction |= Rn << r0off;
10816
              inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10817
            }
10818
        }
10819
      else if (inst.operands[1].shifted && inst.operands[1].immisreg
10820
               && (inst.instruction == T_MNEM_mov
10821
                   || inst.instruction == T_MNEM_movs))
10822
        {
10823
          /* Register shifts are encoded as separate shift instructions.  */
10824
          bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10825
 
10826
          if (in_it_block ())
10827
            narrow = !flags;
10828
          else
10829
            narrow = flags;
10830
 
10831
          if (inst.size_req == 4)
10832
            narrow = FALSE;
10833
 
10834
          if (!low_regs || inst.operands[1].imm > 7)
10835
            narrow = FALSE;
10836
 
10837
          if (Rn != Rm)
10838
            narrow = FALSE;
10839
 
10840
          switch (inst.operands[1].shift_kind)
10841
            {
10842
            case SHIFT_LSL:
10843
              opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10844
              break;
10845
            case SHIFT_ASR:
10846
              opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10847
              break;
10848
            case SHIFT_LSR:
10849
              opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10850
              break;
10851
            case SHIFT_ROR:
10852
              opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10853
              break;
10854
            default:
10855
              abort ();
10856
            }
10857
 
10858
          inst.instruction = opcode;
10859
          if (narrow)
10860
            {
10861
              inst.instruction |= Rn;
10862
              inst.instruction |= inst.operands[1].imm << 3;
10863
            }
10864
          else
10865
            {
10866
              if (flags)
10867
                inst.instruction |= CONDS_BIT;
10868
 
10869
              inst.instruction |= Rn << 8;
10870
              inst.instruction |= Rm << 16;
10871
              inst.instruction |= inst.operands[1].imm;
10872
            }
10873
        }
10874
      else if (!narrow)
10875
        {
10876
          /* Some mov with immediate shift have narrow variants.
10877
             Register shifts are handled above.  */
10878
          if (low_regs && inst.operands[1].shifted
10879
              && (inst.instruction == T_MNEM_mov
10880
                  || inst.instruction == T_MNEM_movs))
10881
            {
10882
              if (in_it_block ())
10883
                narrow = (inst.instruction == T_MNEM_mov);
10884
              else
10885
                narrow = (inst.instruction == T_MNEM_movs);
10886
            }
10887
 
10888
          if (narrow)
10889
            {
10890
              switch (inst.operands[1].shift_kind)
10891
                {
10892
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10893
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10894
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10895
                default: narrow = FALSE; break;
10896
                }
10897
            }
10898
 
10899
          if (narrow)
10900
            {
10901
              inst.instruction |= Rn;
10902
              inst.instruction |= Rm << 3;
10903
              inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10904
            }
10905
          else
10906
            {
10907
              inst.instruction = THUMB_OP32 (inst.instruction);
10908
              inst.instruction |= Rn << r0off;
10909
              encode_thumb32_shifted_operand (1);
10910
            }
10911
        }
10912
      else
10913
        switch (inst.instruction)
10914
          {
10915
          case T_MNEM_mov:
10916
            inst.instruction = T_OPCODE_MOV_HR;
10917
            inst.instruction |= (Rn & 0x8) << 4;
10918
            inst.instruction |= (Rn & 0x7);
10919
            inst.instruction |= Rm << 3;
10920
            break;
10921
 
10922
          case T_MNEM_movs:
10923
            /* We know we have low registers at this point.
10924
               Generate LSLS Rd, Rs, #0.  */
10925
            inst.instruction = T_OPCODE_LSL_I;
10926
            inst.instruction |= Rn;
10927
            inst.instruction |= Rm << 3;
10928
            break;
10929
 
10930
          case T_MNEM_cmp:
10931
            if (low_regs)
10932
              {
10933
                inst.instruction = T_OPCODE_CMP_LR;
10934
                inst.instruction |= Rn;
10935
                inst.instruction |= Rm << 3;
10936
              }
10937
            else
10938
              {
10939
                inst.instruction = T_OPCODE_CMP_HR;
10940
                inst.instruction |= (Rn & 0x8) << 4;
10941
                inst.instruction |= (Rn & 0x7);
10942
                inst.instruction |= Rm << 3;
10943
              }
10944
            break;
10945
          }
10946
      return;
10947
    }
10948
 
10949
  inst.instruction = THUMB_OP16 (inst.instruction);
10950
 
10951
  /* PR 10443: Do not silently ignore shifted operands.  */
10952
  constraint (inst.operands[1].shifted,
10953
              _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10954
 
10955
  if (inst.operands[1].isreg)
10956
    {
10957
      if (Rn < 8 && Rm < 8)
10958
        {
10959
          /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10960
             since a MOV instruction produces unpredictable results.  */
10961
          if (inst.instruction == T_OPCODE_MOV_I8)
10962
            inst.instruction = T_OPCODE_ADD_I3;
10963
          else
10964
            inst.instruction = T_OPCODE_CMP_LR;
10965
 
10966
          inst.instruction |= Rn;
10967
          inst.instruction |= Rm << 3;
10968
        }
10969
      else
10970
        {
10971
          if (inst.instruction == T_OPCODE_MOV_I8)
10972
            inst.instruction = T_OPCODE_MOV_HR;
10973
          else
10974
            inst.instruction = T_OPCODE_CMP_HR;
10975
          do_t_cpy ();
10976
        }
10977
    }
10978
  else
10979
    {
10980
      constraint (Rn > 7,
10981
                  _("only lo regs allowed with immediate"));
10982
      inst.instruction |= Rn << 8;
10983
      inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10984
    }
10985
}
10986
 
10987
static void
10988
do_t_mov16 (void)
10989
{
10990
  unsigned Rd;
10991
  bfd_vma imm;
10992
  bfd_boolean top;
10993
 
10994
  top = (inst.instruction & 0x00800000) != 0;
10995
  if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10996
    {
10997
      constraint (top, _(":lower16: not allowed this instruction"));
10998
      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10999
    }
11000
  else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11001
    {
11002
      constraint (!top, _(":upper16: not allowed this instruction"));
11003
      inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11004
    }
11005
 
11006
  Rd = inst.operands[0].reg;
11007
  reject_bad_reg (Rd);
11008
 
11009
  inst.instruction |= Rd << 8;
11010
  if (inst.reloc.type == BFD_RELOC_UNUSED)
11011
    {
11012
      imm = inst.reloc.exp.X_add_number;
11013
      inst.instruction |= (imm & 0xf000) << 4;
11014
      inst.instruction |= (imm & 0x0800) << 15;
11015
      inst.instruction |= (imm & 0x0700) << 4;
11016
      inst.instruction |= (imm & 0x00ff);
11017
    }
11018
}
11019
 
11020
static void
11021
do_t_mvn_tst (void)
11022
{
11023
  unsigned Rn, Rm;
11024
 
11025
  Rn = inst.operands[0].reg;
11026
  Rm = inst.operands[1].reg;
11027
 
11028
  if (inst.instruction == T_MNEM_cmp
11029
      || inst.instruction == T_MNEM_cmn)
11030
    constraint (Rn == REG_PC, BAD_PC);
11031
  else
11032
    reject_bad_reg (Rn);
11033
  reject_bad_reg (Rm);
11034
 
11035
  if (unified_syntax)
11036
    {
11037
      int r0off = (inst.instruction == T_MNEM_mvn
11038
                   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11039
      bfd_boolean narrow;
11040
 
11041
      if (inst.size_req == 4
11042
          || inst.instruction > 0xffff
11043
          || inst.operands[1].shifted
11044
          || Rn > 7 || Rm > 7)
11045
        narrow = FALSE;
11046
      else if (inst.instruction == T_MNEM_cmn)
11047
        narrow = TRUE;
11048
      else if (THUMB_SETS_FLAGS (inst.instruction))
11049
        narrow = !in_it_block ();
11050
      else
11051
        narrow = in_it_block ();
11052
 
11053
      if (!inst.operands[1].isreg)
11054
        {
11055
          /* For an immediate, we always generate a 32-bit opcode;
11056
             section relaxation will shrink it later if possible.  */
11057
          if (inst.instruction < 0xffff)
11058
            inst.instruction = THUMB_OP32 (inst.instruction);
11059
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11060
          inst.instruction |= Rn << r0off;
11061
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11062
        }
11063
      else
11064
        {
11065
          /* See if we can do this with a 16-bit instruction.  */
11066
          if (narrow)
11067
            {
11068
              inst.instruction = THUMB_OP16 (inst.instruction);
11069
              inst.instruction |= Rn;
11070
              inst.instruction |= Rm << 3;
11071
            }
11072
          else
11073
            {
11074
              constraint (inst.operands[1].shifted
11075
                          && inst.operands[1].immisreg,
11076
                          _("shift must be constant"));
11077
              if (inst.instruction < 0xffff)
11078
                inst.instruction = THUMB_OP32 (inst.instruction);
11079
              inst.instruction |= Rn << r0off;
11080
              encode_thumb32_shifted_operand (1);
11081
            }
11082
        }
11083
    }
11084
  else
11085
    {
11086
      constraint (inst.instruction > 0xffff
11087
                  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11088
      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11089
                  _("unshifted register required"));
11090
      constraint (Rn > 7 || Rm > 7,
11091
                  BAD_HIREG);
11092
 
11093
      inst.instruction = THUMB_OP16 (inst.instruction);
11094
      inst.instruction |= Rn;
11095
      inst.instruction |= Rm << 3;
11096
    }
11097
}
11098
 
11099
static void
11100
do_t_mrs (void)
11101
{
11102
  unsigned Rd;
11103
 
11104
  if (do_vfp_nsyn_mrs () == SUCCESS)
11105
    return;
11106
 
11107
  Rd = inst.operands[0].reg;
11108
  reject_bad_reg (Rd);
11109
  inst.instruction |= Rd << 8;
11110
 
11111
  if (inst.operands[1].isreg)
11112
    {
11113
      unsigned br = inst.operands[1].reg;
11114
      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11115
        as_bad (_("bad register for mrs"));
11116
 
11117
      inst.instruction |= br & (0xf << 16);
11118
      inst.instruction |= (br & 0x300) >> 4;
11119
      inst.instruction |= (br & SPSR_BIT) >> 2;
11120
    }
11121
  else
11122
    {
11123
      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11124
 
11125
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11126
        constraint (flags != 0, _("selected processor does not support "
11127
                    "requested special purpose register"));
11128
      else
11129
        /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11130
           devices).  */
11131
        constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11132
                    _("'APSR', 'CPSR' or 'SPSR' expected"));
11133
 
11134
      inst.instruction |= (flags & SPSR_BIT) >> 2;
11135
      inst.instruction |= inst.operands[1].imm & 0xff;
11136
      inst.instruction |= 0xf0000;
11137
    }
11138
}
11139
 
11140
static void
11141
do_t_msr (void)
11142
{
11143
  int flags;
11144
  unsigned Rn;
11145
 
11146
  if (do_vfp_nsyn_msr () == SUCCESS)
11147
    return;
11148
 
11149
  constraint (!inst.operands[1].isreg,
11150
              _("Thumb encoding does not support an immediate here"));
11151
 
11152
  if (inst.operands[0].isreg)
11153
    flags = (int)(inst.operands[0].reg);
11154
  else
11155
    flags = inst.operands[0].imm;
11156
 
11157
  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11158
    {
11159
      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11160
 
11161
      constraint ((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11162
                   && (bits & ~(PSR_s | PSR_f)) != 0)
11163
                  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11164
                      && bits != PSR_f),
11165
                  _("selected processor does not support requested special "
11166
                    "purpose register"));
11167
    }
11168
  else
11169
     constraint ((flags & 0xff) != 0, _("selected processor does not support "
11170
                 "requested special purpose register"));
11171
 
11172
  Rn = inst.operands[1].reg;
11173
  reject_bad_reg (Rn);
11174
 
11175
  inst.instruction |= (flags & SPSR_BIT) >> 2;
11176
  inst.instruction |= (flags & 0xf0000) >> 8;
11177
  inst.instruction |= (flags & 0x300) >> 4;
11178
  inst.instruction |= (flags & 0xff);
11179
  inst.instruction |= Rn << 16;
11180
}
11181
 
11182
static void
11183
do_t_mul (void)
11184
{
11185
  bfd_boolean narrow;
11186
  unsigned Rd, Rn, Rm;
11187
 
11188
  if (!inst.operands[2].present)
11189
    inst.operands[2].reg = inst.operands[0].reg;
11190
 
11191
  Rd = inst.operands[0].reg;
11192
  Rn = inst.operands[1].reg;
11193
  Rm = inst.operands[2].reg;
11194
 
11195
  if (unified_syntax)
11196
    {
11197
      if (inst.size_req == 4
11198
          || (Rd != Rn
11199
              && Rd != Rm)
11200
          || Rn > 7
11201
          || Rm > 7)
11202
        narrow = FALSE;
11203
      else if (inst.instruction == T_MNEM_muls)
11204
        narrow = !in_it_block ();
11205
      else
11206
        narrow = in_it_block ();
11207
    }
11208
  else
11209
    {
11210
      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11211
      constraint (Rn > 7 || Rm > 7,
11212
                  BAD_HIREG);
11213
      narrow = TRUE;
11214
    }
11215
 
11216
  if (narrow)
11217
    {
11218
      /* 16-bit MULS/Conditional MUL.  */
11219
      inst.instruction = THUMB_OP16 (inst.instruction);
11220
      inst.instruction |= Rd;
11221
 
11222
      if (Rd == Rn)
11223
        inst.instruction |= Rm << 3;
11224
      else if (Rd == Rm)
11225
        inst.instruction |= Rn << 3;
11226
      else
11227
        constraint (1, _("dest must overlap one source register"));
11228
    }
11229
  else
11230
    {
11231
      constraint (inst.instruction != T_MNEM_mul,
11232
                  _("Thumb-2 MUL must not set flags"));
11233
      /* 32-bit MUL.  */
11234
      inst.instruction = THUMB_OP32 (inst.instruction);
11235
      inst.instruction |= Rd << 8;
11236
      inst.instruction |= Rn << 16;
11237
      inst.instruction |= Rm << 0;
11238
 
11239
      reject_bad_reg (Rd);
11240
      reject_bad_reg (Rn);
11241
      reject_bad_reg (Rm);
11242
    }
11243
}
11244
 
11245
static void
11246
do_t_mull (void)
11247
{
11248
  unsigned RdLo, RdHi, Rn, Rm;
11249
 
11250
  RdLo = inst.operands[0].reg;
11251
  RdHi = inst.operands[1].reg;
11252
  Rn = inst.operands[2].reg;
11253
  Rm = inst.operands[3].reg;
11254
 
11255
  reject_bad_reg (RdLo);
11256
  reject_bad_reg (RdHi);
11257
  reject_bad_reg (Rn);
11258
  reject_bad_reg (Rm);
11259
 
11260
  inst.instruction |= RdLo << 12;
11261
  inst.instruction |= RdHi << 8;
11262
  inst.instruction |= Rn << 16;
11263
  inst.instruction |= Rm;
11264
 
11265
 if (RdLo == RdHi)
11266
    as_tsktsk (_("rdhi and rdlo must be different"));
11267
}
11268
 
11269
static void
11270
do_t_nop (void)
11271
{
11272
  set_it_insn_type (NEUTRAL_IT_INSN);
11273
 
11274
  if (unified_syntax)
11275
    {
11276
      if (inst.size_req == 4 || inst.operands[0].imm > 15)
11277
        {
11278
          inst.instruction = THUMB_OP32 (inst.instruction);
11279
          inst.instruction |= inst.operands[0].imm;
11280
        }
11281
      else
11282
        {
11283
          /* PR9722: Check for Thumb2 availability before
11284
             generating a thumb2 nop instruction.  */
11285
          if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11286
            {
11287
              inst.instruction = THUMB_OP16 (inst.instruction);
11288
              inst.instruction |= inst.operands[0].imm << 4;
11289
            }
11290
          else
11291
            inst.instruction = 0x46c0;
11292
        }
11293
    }
11294
  else
11295
    {
11296
      constraint (inst.operands[0].present,
11297
                  _("Thumb does not support NOP with hints"));
11298
      inst.instruction = 0x46c0;
11299
    }
11300
}
11301
 
11302
static void
11303
do_t_neg (void)
11304
{
11305
  if (unified_syntax)
11306
    {
11307
      bfd_boolean narrow;
11308
 
11309
      if (THUMB_SETS_FLAGS (inst.instruction))
11310
        narrow = !in_it_block ();
11311
      else
11312
        narrow = in_it_block ();
11313
      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11314
        narrow = FALSE;
11315
      if (inst.size_req == 4)
11316
        narrow = FALSE;
11317
 
11318
      if (!narrow)
11319
        {
11320
          inst.instruction = THUMB_OP32 (inst.instruction);
11321
          inst.instruction |= inst.operands[0].reg << 8;
11322
          inst.instruction |= inst.operands[1].reg << 16;
11323
        }
11324
      else
11325
        {
11326
          inst.instruction = THUMB_OP16 (inst.instruction);
11327
          inst.instruction |= inst.operands[0].reg;
11328
          inst.instruction |= inst.operands[1].reg << 3;
11329
        }
11330
    }
11331
  else
11332
    {
11333
      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11334
                  BAD_HIREG);
11335
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11336
 
11337
      inst.instruction = THUMB_OP16 (inst.instruction);
11338
      inst.instruction |= inst.operands[0].reg;
11339
      inst.instruction |= inst.operands[1].reg << 3;
11340
    }
11341
}
11342
 
11343
static void
11344
do_t_orn (void)
11345
{
11346
  unsigned Rd, Rn;
11347
 
11348
  Rd = inst.operands[0].reg;
11349
  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11350
 
11351
  reject_bad_reg (Rd);
11352
  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
11353
  reject_bad_reg (Rn);
11354
 
11355
  inst.instruction |= Rd << 8;
11356
  inst.instruction |= Rn << 16;
11357
 
11358
  if (!inst.operands[2].isreg)
11359
    {
11360
      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11361
      inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11362
    }
11363
  else
11364
    {
11365
      unsigned Rm;
11366
 
11367
      Rm = inst.operands[2].reg;
11368
      reject_bad_reg (Rm);
11369
 
11370
      constraint (inst.operands[2].shifted
11371
                  && inst.operands[2].immisreg,
11372
                  _("shift must be constant"));
11373
      encode_thumb32_shifted_operand (2);
11374
    }
11375
}
11376
 
11377
static void
11378
do_t_pkhbt (void)
11379
{
11380
  unsigned Rd, Rn, Rm;
11381
 
11382
  Rd = inst.operands[0].reg;
11383
  Rn = inst.operands[1].reg;
11384
  Rm = inst.operands[2].reg;
11385
 
11386
  reject_bad_reg (Rd);
11387
  reject_bad_reg (Rn);
11388
  reject_bad_reg (Rm);
11389
 
11390
  inst.instruction |= Rd << 8;
11391
  inst.instruction |= Rn << 16;
11392
  inst.instruction |= Rm;
11393
  if (inst.operands[3].present)
11394
    {
11395
      unsigned int val = inst.reloc.exp.X_add_number;
11396
      constraint (inst.reloc.exp.X_op != O_constant,
11397
                  _("expression too complex"));
11398
      inst.instruction |= (val & 0x1c) << 10;
11399
      inst.instruction |= (val & 0x03) << 6;
11400
    }
11401
}
11402
 
11403
static void
11404
do_t_pkhtb (void)
11405
{
11406
  if (!inst.operands[3].present)
11407
    {
11408
      unsigned Rtmp;
11409
 
11410
      inst.instruction &= ~0x00000020;
11411
 
11412
      /* PR 10168.  Swap the Rm and Rn registers.  */
11413
      Rtmp = inst.operands[1].reg;
11414
      inst.operands[1].reg = inst.operands[2].reg;
11415
      inst.operands[2].reg = Rtmp;
11416
    }
11417
  do_t_pkhbt ();
11418
}
11419
 
11420
static void
11421
do_t_pld (void)
11422
{
11423
  if (inst.operands[0].immisreg)
11424
    reject_bad_reg (inst.operands[0].imm);
11425
 
11426
  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11427
}
11428
 
11429
static void
11430
do_t_push_pop (void)
11431
{
11432
  unsigned mask;
11433
 
11434
  constraint (inst.operands[0].writeback,
11435
              _("push/pop do not support {reglist}^"));
11436
  constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11437
              _("expression too complex"));
11438
 
11439
  mask = inst.operands[0].imm;
11440
  if ((mask & ~0xff) == 0)
11441
    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11442
  else if ((inst.instruction == T_MNEM_push
11443
            && (mask & ~0xff) == 1 << REG_LR)
11444
           || (inst.instruction == T_MNEM_pop
11445
               && (mask & ~0xff) == 1 << REG_PC))
11446
    {
11447
      inst.instruction = THUMB_OP16 (inst.instruction);
11448
      inst.instruction |= THUMB_PP_PC_LR;
11449
      inst.instruction |= mask & 0xff;
11450
    }
11451
  else if (unified_syntax)
11452
    {
11453
      inst.instruction = THUMB_OP32 (inst.instruction);
11454
      encode_thumb2_ldmstm (13, mask, TRUE);
11455
    }
11456
  else
11457
    {
11458
      inst.error = _("invalid register list to push/pop instruction");
11459
      return;
11460
    }
11461
}
11462
 
11463
static void
11464
do_t_rbit (void)
11465
{
11466
  unsigned Rd, Rm;
11467
 
11468
  Rd = inst.operands[0].reg;
11469
  Rm = inst.operands[1].reg;
11470
 
11471
  reject_bad_reg (Rd);
11472
  reject_bad_reg (Rm);
11473
 
11474
  inst.instruction |= Rd << 8;
11475
  inst.instruction |= Rm << 16;
11476
  inst.instruction |= Rm;
11477
}
11478
 
11479
static void
11480
do_t_rev (void)
11481
{
11482
  unsigned Rd, Rm;
11483
 
11484
  Rd = inst.operands[0].reg;
11485
  Rm = inst.operands[1].reg;
11486
 
11487
  reject_bad_reg (Rd);
11488
  reject_bad_reg (Rm);
11489
 
11490
  if (Rd <= 7 && Rm <= 7
11491
      && inst.size_req != 4)
11492
    {
11493
      inst.instruction = THUMB_OP16 (inst.instruction);
11494
      inst.instruction |= Rd;
11495
      inst.instruction |= Rm << 3;
11496
    }
11497
  else if (unified_syntax)
11498
    {
11499
      inst.instruction = THUMB_OP32 (inst.instruction);
11500
      inst.instruction |= Rd << 8;
11501
      inst.instruction |= Rm << 16;
11502
      inst.instruction |= Rm;
11503
    }
11504
  else
11505
    inst.error = BAD_HIREG;
11506
}
11507
 
11508
static void
11509
do_t_rrx (void)
11510
{
11511
  unsigned Rd, Rm;
11512
 
11513
  Rd = inst.operands[0].reg;
11514
  Rm = inst.operands[1].reg;
11515
 
11516
  reject_bad_reg (Rd);
11517
  reject_bad_reg (Rm);
11518
 
11519
  inst.instruction |= Rd << 8;
11520
  inst.instruction |= Rm;
11521
}
11522
 
11523
static void
11524
do_t_rsb (void)
11525
{
11526
  unsigned Rd, Rs;
11527
 
11528
  Rd = inst.operands[0].reg;
11529
  Rs = (inst.operands[1].present
11530
        ? inst.operands[1].reg    /* Rd, Rs, foo */
11531
        : inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11532
 
11533
  reject_bad_reg (Rd);
11534
  reject_bad_reg (Rs);
11535
  if (inst.operands[2].isreg)
11536
    reject_bad_reg (inst.operands[2].reg);
11537
 
11538
  inst.instruction |= Rd << 8;
11539
  inst.instruction |= Rs << 16;
11540
  if (!inst.operands[2].isreg)
11541
    {
11542
      bfd_boolean narrow;
11543
 
11544
      if ((inst.instruction & 0x00100000) != 0)
11545
        narrow = !in_it_block ();
11546
      else
11547
        narrow = in_it_block ();
11548
 
11549
      if (Rd > 7 || Rs > 7)
11550
        narrow = FALSE;
11551
 
11552
      if (inst.size_req == 4 || !unified_syntax)
11553
        narrow = FALSE;
11554
 
11555
      if (inst.reloc.exp.X_op != O_constant
11556
          || inst.reloc.exp.X_add_number != 0)
11557
        narrow = FALSE;
11558
 
11559
      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
11560
         relaxation, but it doesn't seem worth the hassle.  */
11561
      if (narrow)
11562
        {
11563
          inst.reloc.type = BFD_RELOC_UNUSED;
11564
          inst.instruction = THUMB_OP16 (T_MNEM_negs);
11565
          inst.instruction |= Rs << 3;
11566
          inst.instruction |= Rd;
11567
        }
11568
      else
11569
        {
11570
          inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11571
          inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11572
        }
11573
    }
11574
  else
11575
    encode_thumb32_shifted_operand (2);
11576
}
11577
 
11578
static void
11579
do_t_setend (void)
11580
{
11581
  set_it_insn_type (OUTSIDE_IT_INSN);
11582
  if (inst.operands[0].imm)
11583
    inst.instruction |= 0x8;
11584
}
11585
 
11586
static void
11587
do_t_shift (void)
11588
{
11589
  if (!inst.operands[1].present)
11590
    inst.operands[1].reg = inst.operands[0].reg;
11591
 
11592
  if (unified_syntax)
11593
    {
11594
      bfd_boolean narrow;
11595
      int shift_kind;
11596
 
11597
      switch (inst.instruction)
11598
        {
11599
        case T_MNEM_asr:
11600
        case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11601
        case T_MNEM_lsl:
11602
        case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11603
        case T_MNEM_lsr:
11604
        case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11605
        case T_MNEM_ror:
11606
        case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11607
        default: abort ();
11608
        }
11609
 
11610
      if (THUMB_SETS_FLAGS (inst.instruction))
11611
        narrow = !in_it_block ();
11612
      else
11613
        narrow = in_it_block ();
11614
      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11615
        narrow = FALSE;
11616
      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11617
        narrow = FALSE;
11618
      if (inst.operands[2].isreg
11619
          && (inst.operands[1].reg != inst.operands[0].reg
11620
              || inst.operands[2].reg > 7))
11621
        narrow = FALSE;
11622
      if (inst.size_req == 4)
11623
        narrow = FALSE;
11624
 
11625
      reject_bad_reg (inst.operands[0].reg);
11626
      reject_bad_reg (inst.operands[1].reg);
11627
 
11628
      if (!narrow)
11629
        {
11630
          if (inst.operands[2].isreg)
11631
            {
11632
              reject_bad_reg (inst.operands[2].reg);
11633
              inst.instruction = THUMB_OP32 (inst.instruction);
11634
              inst.instruction |= inst.operands[0].reg << 8;
11635
              inst.instruction |= inst.operands[1].reg << 16;
11636
              inst.instruction |= inst.operands[2].reg;
11637 148 khays
 
11638
              /* PR 12854: Error on extraneous shifts.  */
11639
              constraint (inst.operands[2].shifted,
11640
                          _("extraneous shift as part of operand to shift insn"));
11641 16 khays
            }
11642
          else
11643
            {
11644
              inst.operands[1].shifted = 1;
11645
              inst.operands[1].shift_kind = shift_kind;
11646
              inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11647
                                             ? T_MNEM_movs : T_MNEM_mov);
11648
              inst.instruction |= inst.operands[0].reg << 8;
11649
              encode_thumb32_shifted_operand (1);
11650
              /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
11651
              inst.reloc.type = BFD_RELOC_UNUSED;
11652
            }
11653
        }
11654
      else
11655
        {
11656
          if (inst.operands[2].isreg)
11657
            {
11658
              switch (shift_kind)
11659
                {
11660
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11661
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11662
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11663
                case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11664
                default: abort ();
11665
                }
11666
 
11667
              inst.instruction |= inst.operands[0].reg;
11668
              inst.instruction |= inst.operands[2].reg << 3;
11669 148 khays
 
11670
              /* PR 12854: Error on extraneous shifts.  */
11671
              constraint (inst.operands[2].shifted,
11672
                          _("extraneous shift as part of operand to shift insn"));
11673 16 khays
            }
11674
          else
11675
            {
11676
              switch (shift_kind)
11677
                {
11678
                case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11679
                case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11680
                case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11681
                default: abort ();
11682
                }
11683
              inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11684
              inst.instruction |= inst.operands[0].reg;
11685
              inst.instruction |= inst.operands[1].reg << 3;
11686
            }
11687
        }
11688
    }
11689
  else
11690
    {
11691
      constraint (inst.operands[0].reg > 7
11692
                  || inst.operands[1].reg > 7, BAD_HIREG);
11693
      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11694
 
11695
      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
11696
        {
11697
          constraint (inst.operands[2].reg > 7, BAD_HIREG);
11698
          constraint (inst.operands[0].reg != inst.operands[1].reg,
11699
                      _("source1 and dest must be same register"));
11700
 
11701
          switch (inst.instruction)
11702
            {
11703
            case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11704
            case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11705
            case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11706
            case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11707
            default: abort ();
11708
            }
11709
 
11710
          inst.instruction |= inst.operands[0].reg;
11711
          inst.instruction |= inst.operands[2].reg << 3;
11712 148 khays
 
11713
          /* PR 12854: Error on extraneous shifts.  */
11714
          constraint (inst.operands[2].shifted,
11715
                      _("extraneous shift as part of operand to shift insn"));
11716 16 khays
        }
11717
      else
11718
        {
11719
          switch (inst.instruction)
11720
            {
11721
            case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11722
            case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11723
            case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11724
            case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11725
            default: abort ();
11726
            }
11727
          inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11728
          inst.instruction |= inst.operands[0].reg;
11729
          inst.instruction |= inst.operands[1].reg << 3;
11730
        }
11731
    }
11732
}
11733
 
11734
static void
11735
do_t_simd (void)
11736
{
11737
  unsigned Rd, Rn, Rm;
11738
 
11739
  Rd = inst.operands[0].reg;
11740
  Rn = inst.operands[1].reg;
11741
  Rm = inst.operands[2].reg;
11742
 
11743
  reject_bad_reg (Rd);
11744
  reject_bad_reg (Rn);
11745
  reject_bad_reg (Rm);
11746
 
11747
  inst.instruction |= Rd << 8;
11748
  inst.instruction |= Rn << 16;
11749
  inst.instruction |= Rm;
11750
}
11751
 
11752
static void
11753
do_t_simd2 (void)
11754
{
11755
  unsigned Rd, Rn, Rm;
11756
 
11757
  Rd = inst.operands[0].reg;
11758
  Rm = inst.operands[1].reg;
11759
  Rn = inst.operands[2].reg;
11760
 
11761
  reject_bad_reg (Rd);
11762
  reject_bad_reg (Rn);
11763
  reject_bad_reg (Rm);
11764
 
11765
  inst.instruction |= Rd << 8;
11766
  inst.instruction |= Rn << 16;
11767
  inst.instruction |= Rm;
11768
}
11769
 
11770
static void
11771
do_t_smc (void)
11772
{
11773
  unsigned int value = inst.reloc.exp.X_add_number;
11774
  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
11775
              _("SMC is not permitted on this architecture"));
11776
  constraint (inst.reloc.exp.X_op != O_constant,
11777
              _("expression too complex"));
11778
  inst.reloc.type = BFD_RELOC_UNUSED;
11779
  inst.instruction |= (value & 0xf000) >> 12;
11780
  inst.instruction |= (value & 0x0ff0);
11781
  inst.instruction |= (value & 0x000f) << 16;
11782
}
11783
 
11784
static void
11785
do_t_hvc (void)
11786
{
11787
  unsigned int value = inst.reloc.exp.X_add_number;
11788
 
11789
  inst.reloc.type = BFD_RELOC_UNUSED;
11790
  inst.instruction |= (value & 0x0fff);
11791
  inst.instruction |= (value & 0xf000) << 4;
11792
}
11793
 
11794
static void
11795
do_t_ssat_usat (int bias)
11796
{
11797
  unsigned Rd, Rn;
11798
 
11799
  Rd = inst.operands[0].reg;
11800
  Rn = inst.operands[2].reg;
11801
 
11802
  reject_bad_reg (Rd);
11803
  reject_bad_reg (Rn);
11804
 
11805
  inst.instruction |= Rd << 8;
11806
  inst.instruction |= inst.operands[1].imm - bias;
11807
  inst.instruction |= Rn << 16;
11808
 
11809
  if (inst.operands[3].present)
11810
    {
11811
      offsetT shift_amount = inst.reloc.exp.X_add_number;
11812
 
11813
      inst.reloc.type = BFD_RELOC_UNUSED;
11814
 
11815
      constraint (inst.reloc.exp.X_op != O_constant,
11816
                  _("expression too complex"));
11817
 
11818
      if (shift_amount != 0)
11819
        {
11820
          constraint (shift_amount > 31,
11821
                      _("shift expression is too large"));
11822
 
11823
          if (inst.operands[3].shift_kind == SHIFT_ASR)
11824
            inst.instruction |= 0x00200000;  /* sh bit.  */
11825
 
11826
          inst.instruction |= (shift_amount & 0x1c) << 10;
11827
          inst.instruction |= (shift_amount & 0x03) << 6;
11828
        }
11829
    }
11830
}
11831
 
11832
static void
11833
do_t_ssat (void)
11834
{
11835
  do_t_ssat_usat (1);
11836
}
11837
 
11838
static void
11839
do_t_ssat16 (void)
11840
{
11841
  unsigned Rd, Rn;
11842
 
11843
  Rd = inst.operands[0].reg;
11844
  Rn = inst.operands[2].reg;
11845
 
11846
  reject_bad_reg (Rd);
11847
  reject_bad_reg (Rn);
11848
 
11849
  inst.instruction |= Rd << 8;
11850
  inst.instruction |= inst.operands[1].imm - 1;
11851
  inst.instruction |= Rn << 16;
11852
}
11853
 
11854
static void
11855
do_t_strex (void)
11856
{
11857
  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11858
              || inst.operands[2].postind || inst.operands[2].writeback
11859
              || inst.operands[2].immisreg || inst.operands[2].shifted
11860
              || inst.operands[2].negative,
11861
              BAD_ADDR_MODE);
11862
 
11863
  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
11864
 
11865
  inst.instruction |= inst.operands[0].reg << 8;
11866
  inst.instruction |= inst.operands[1].reg << 12;
11867
  inst.instruction |= inst.operands[2].reg << 16;
11868
  inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11869
}
11870
 
11871
static void
11872
do_t_strexd (void)
11873
{
11874
  if (!inst.operands[2].present)
11875
    inst.operands[2].reg = inst.operands[1].reg + 1;
11876
 
11877
  constraint (inst.operands[0].reg == inst.operands[1].reg
11878
              || inst.operands[0].reg == inst.operands[2].reg
11879
              || inst.operands[0].reg == inst.operands[3].reg,
11880
              BAD_OVERLAP);
11881
 
11882
  inst.instruction |= inst.operands[0].reg;
11883
  inst.instruction |= inst.operands[1].reg << 12;
11884
  inst.instruction |= inst.operands[2].reg << 8;
11885
  inst.instruction |= inst.operands[3].reg << 16;
11886
}
11887
 
11888
static void
11889
do_t_sxtah (void)
11890
{
11891
  unsigned Rd, Rn, Rm;
11892
 
11893
  Rd = inst.operands[0].reg;
11894
  Rn = inst.operands[1].reg;
11895
  Rm = inst.operands[2].reg;
11896
 
11897
  reject_bad_reg (Rd);
11898
  reject_bad_reg (Rn);
11899
  reject_bad_reg (Rm);
11900
 
11901
  inst.instruction |= Rd << 8;
11902
  inst.instruction |= Rn << 16;
11903
  inst.instruction |= Rm;
11904
  inst.instruction |= inst.operands[3].imm << 4;
11905
}
11906
 
11907
static void
11908
do_t_sxth (void)
11909
{
11910
  unsigned Rd, Rm;
11911
 
11912
  Rd = inst.operands[0].reg;
11913
  Rm = inst.operands[1].reg;
11914
 
11915
  reject_bad_reg (Rd);
11916
  reject_bad_reg (Rm);
11917
 
11918
  if (inst.instruction <= 0xffff
11919
      && inst.size_req != 4
11920
      && Rd <= 7 && Rm <= 7
11921
      && (!inst.operands[2].present || inst.operands[2].imm == 0))
11922
    {
11923
      inst.instruction = THUMB_OP16 (inst.instruction);
11924
      inst.instruction |= Rd;
11925
      inst.instruction |= Rm << 3;
11926
    }
11927
  else if (unified_syntax)
11928
    {
11929
      if (inst.instruction <= 0xffff)
11930
        inst.instruction = THUMB_OP32 (inst.instruction);
11931
      inst.instruction |= Rd << 8;
11932
      inst.instruction |= Rm;
11933
      inst.instruction |= inst.operands[2].imm << 4;
11934
    }
11935
  else
11936
    {
11937
      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11938
                  _("Thumb encoding does not support rotation"));
11939
      constraint (1, BAD_HIREG);
11940
    }
11941
}
11942
 
11943
static void
11944
do_t_swi (void)
11945
{
11946
  /* We have to do the following check manually as ARM_EXT_OS only applies
11947
     to ARM_EXT_V6M.  */
11948
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
11949
    {
11950
      if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
11951
          /* This only applies to the v6m howver, not later architectures.  */
11952
          && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
11953
        as_bad (_("SVC is not permitted on this architecture"));
11954
      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
11955
    }
11956
 
11957
  inst.reloc.type = BFD_RELOC_ARM_SWI;
11958
}
11959
 
11960
static void
11961
do_t_tb (void)
11962
{
11963
  unsigned Rn, Rm;
11964
  int half;
11965
 
11966
  half = (inst.instruction & 0x10) != 0;
11967
  set_it_insn_type_last ();
11968
  constraint (inst.operands[0].immisreg,
11969
              _("instruction requires register index"));
11970
 
11971
  Rn = inst.operands[0].reg;
11972
  Rm = inst.operands[0].imm;
11973
 
11974
  constraint (Rn == REG_SP, BAD_SP);
11975
  reject_bad_reg (Rm);
11976
 
11977
  constraint (!half && inst.operands[0].shifted,
11978
              _("instruction does not allow shifted index"));
11979
  inst.instruction |= (Rn << 16) | Rm;
11980
}
11981
 
11982
static void
11983
do_t_usat (void)
11984
{
11985
  do_t_ssat_usat (0);
11986
}
11987
 
11988
static void
11989
do_t_usat16 (void)
11990
{
11991
  unsigned Rd, Rn;
11992
 
11993
  Rd = inst.operands[0].reg;
11994
  Rn = inst.operands[2].reg;
11995
 
11996
  reject_bad_reg (Rd);
11997
  reject_bad_reg (Rn);
11998
 
11999
  inst.instruction |= Rd << 8;
12000
  inst.instruction |= inst.operands[1].imm;
12001
  inst.instruction |= Rn << 16;
12002
}
12003
 
12004
/* Neon instruction encoder helpers.  */
12005
 
12006
/* Encodings for the different types for various Neon opcodes.  */
12007
 
12008
/* An "invalid" code for the following tables.  */
12009
#define N_INV -1u
12010
 
12011
struct neon_tab_entry
12012
{
12013
  unsigned integer;
12014
  unsigned float_or_poly;
12015
  unsigned scalar_or_imm;
12016
};
12017
 
12018
/* Map overloaded Neon opcodes to their respective encodings.  */
12019
#define NEON_ENC_TAB                                    \
12020
  X(vabd,       0x0000700, 0x1200d00, N_INV),           \
12021
  X(vmax,       0x0000600, 0x0000f00, N_INV),           \
12022
  X(vmin,       0x0000610, 0x0200f00, N_INV),           \
12023
  X(vpadd,      0x0000b10, 0x1000d00, N_INV),           \
12024
  X(vpmax,      0x0000a00, 0x1000f00, N_INV),           \
12025
  X(vpmin,      0x0000a10, 0x1200f00, N_INV),           \
12026
  X(vadd,       0x0000800, 0x0000d00, N_INV),           \
12027
  X(vsub,       0x1000800, 0x0200d00, N_INV),           \
12028
  X(vceq,       0x1000810, 0x0000e00, 0x1b10100),       \
12029
  X(vcge,       0x0000310, 0x1000e00, 0x1b10080),       \
12030
  X(vcgt,       0x0000300, 0x1200e00, 0x1b10000),       \
12031
  /* Register variants of the following two instructions are encoded as
12032
     vcge / vcgt with the operands reversed.  */        \
12033
  X(vclt,       0x0000300, 0x1200e00, 0x1b10200),       \
12034
  X(vcle,       0x0000310, 0x1000e00, 0x1b10180),       \
12035
  X(vfma,       N_INV, 0x0000c10, N_INV),               \
12036
  X(vfms,       N_INV, 0x0200c10, N_INV),               \
12037
  X(vmla,       0x0000900, 0x0000d10, 0x0800040),       \
12038
  X(vmls,       0x1000900, 0x0200d10, 0x0800440),       \
12039
  X(vmul,       0x0000910, 0x1000d10, 0x0800840),       \
12040
  X(vmull,      0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
12041
  X(vmlal,      0x0800800, N_INV,     0x0800240),       \
12042
  X(vmlsl,      0x0800a00, N_INV,     0x0800640),       \
12043
  X(vqdmlal,    0x0800900, N_INV,     0x0800340),       \
12044
  X(vqdmlsl,    0x0800b00, N_INV,     0x0800740),       \
12045
  X(vqdmull,    0x0800d00, N_INV,     0x0800b40),       \
12046
  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),       \
12047
  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),       \
12048
  X(vshl,       0x0000400, N_INV,     0x0800510),       \
12049
  X(vqshl,      0x0000410, N_INV,     0x0800710),       \
12050
  X(vand,       0x0000110, N_INV,     0x0800030),       \
12051
  X(vbic,       0x0100110, N_INV,     0x0800030),       \
12052
  X(veor,       0x1000110, N_INV,     N_INV),           \
12053
  X(vorn,       0x0300110, N_INV,     0x0800010),       \
12054
  X(vorr,       0x0200110, N_INV,     0x0800010),       \
12055
  X(vmvn,       0x1b00580, N_INV,     0x0800030),       \
12056
  X(vshll,      0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
12057
  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
12058
  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
12059
  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
12060
  X(vst1,       0x0000000, 0x0800000, N_INV),           \
12061
  X(vld2,       0x0200100, 0x0a00100, 0x0a00d00),       \
12062
  X(vst2,       0x0000100, 0x0800100, N_INV),           \
12063
  X(vld3,       0x0200200, 0x0a00200, 0x0a00e00),       \
12064
  X(vst3,       0x0000200, 0x0800200, N_INV),           \
12065
  X(vld4,       0x0200300, 0x0a00300, 0x0a00f00),       \
12066
  X(vst4,       0x0000300, 0x0800300, N_INV),           \
12067
  X(vmovn,      0x1b20200, N_INV,     N_INV),           \
12068
  X(vtrn,       0x1b20080, N_INV,     N_INV),           \
12069
  X(vqmovn,     0x1b20200, N_INV,     N_INV),           \
12070
  X(vqmovun,    0x1b20240, N_INV,     N_INV),           \
12071
  X(vnmul,      0xe200a40, 0xe200b40, N_INV),           \
12072
  X(vnmla,      0xe100a40, 0xe100b40, N_INV),           \
12073
  X(vnmls,      0xe100a00, 0xe100b00, N_INV),           \
12074
  X(vfnma,      0xe900a40, 0xe900b40, N_INV),           \
12075
  X(vfnms,      0xe900a00, 0xe900b00, N_INV),           \
12076
  X(vcmp,       0xeb40a40, 0xeb40b40, N_INV),           \
12077
  X(vcmpz,      0xeb50a40, 0xeb50b40, N_INV),           \
12078
  X(vcmpe,      0xeb40ac0, 0xeb40bc0, N_INV),           \
12079
  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV)
12080
 
12081
enum neon_opc
12082
{
12083
#define X(OPC,I,F,S) N_MNEM_##OPC
12084
NEON_ENC_TAB
12085
#undef X
12086
};
12087
 
12088
static const struct neon_tab_entry neon_enc_tab[] =
12089
{
12090
#define X(OPC,I,F,S) { (I), (F), (S) }
12091
NEON_ENC_TAB
12092
#undef X
12093
};
12094
 
12095
/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
12096
#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12097
#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
12098
#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12099
#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12100
#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12101
#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12102
#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12103
#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12104
#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12105
#define NEON_ENC_SINGLE_(X) \
12106
  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12107
#define NEON_ENC_DOUBLE_(X) \
12108
  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12109
 
12110
#define NEON_ENCODE(type, inst)                                 \
12111
  do                                                            \
12112
    {                                                           \
12113
      inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12114
      inst.is_neon = 1;                                         \
12115
    }                                                           \
12116
  while (0)
12117
 
12118
#define check_neon_suffixes                                             \
12119
  do                                                                    \
12120
    {                                                                   \
12121
      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)        \
12122
        {                                                               \
12123
          as_bad (_("invalid neon suffix for non neon instruction"));   \
12124
          return;                                                       \
12125
        }                                                               \
12126
    }                                                                   \
12127
  while (0)
12128
 
12129
/* Define shapes for instruction operands. The following mnemonic characters
12130
   are used in this table:
12131
 
12132
     F - VFP S<n> register
12133
     D - Neon D<n> register
12134
     Q - Neon Q<n> register
12135
     I - Immediate
12136
     S - Scalar
12137
     R - ARM register
12138
     L - D<n> register list
12139
 
12140
   This table is used to generate various data:
12141
     - enumerations of the form NS_DDR to be used as arguments to
12142
       neon_select_shape.
12143
     - a table classifying shapes into single, double, quad, mixed.
12144
     - a table used to drive neon_select_shape.  */
12145
 
12146
#define NEON_SHAPE_DEF                  \
12147
  X(3, (D, D, D), DOUBLE),              \
12148
  X(3, (Q, Q, Q), QUAD),                \
12149
  X(3, (D, D, I), DOUBLE),              \
12150
  X(3, (Q, Q, I), QUAD),                \
12151
  X(3, (D, D, S), DOUBLE),              \
12152
  X(3, (Q, Q, S), QUAD),                \
12153
  X(2, (D, D), DOUBLE),                 \
12154
  X(2, (Q, Q), QUAD),                   \
12155
  X(2, (D, S), DOUBLE),                 \
12156
  X(2, (Q, S), QUAD),                   \
12157
  X(2, (D, R), DOUBLE),                 \
12158
  X(2, (Q, R), QUAD),                   \
12159
  X(2, (D, I), DOUBLE),                 \
12160
  X(2, (Q, I), QUAD),                   \
12161
  X(3, (D, L, D), DOUBLE),              \
12162
  X(2, (D, Q), MIXED),                  \
12163
  X(2, (Q, D), MIXED),                  \
12164
  X(3, (D, Q, I), MIXED),               \
12165
  X(3, (Q, D, I), MIXED),               \
12166
  X(3, (Q, D, D), MIXED),               \
12167
  X(3, (D, Q, Q), MIXED),               \
12168
  X(3, (Q, Q, D), MIXED),               \
12169
  X(3, (Q, D, S), MIXED),               \
12170
  X(3, (D, Q, S), MIXED),               \
12171
  X(4, (D, D, D, I), DOUBLE),           \
12172
  X(4, (Q, Q, Q, I), QUAD),             \
12173
  X(2, (F, F), SINGLE),                 \
12174
  X(3, (F, F, F), SINGLE),              \
12175
  X(2, (F, I), SINGLE),                 \
12176
  X(2, (F, D), MIXED),                  \
12177
  X(2, (D, F), MIXED),                  \
12178
  X(3, (F, F, I), MIXED),               \
12179
  X(4, (R, R, F, F), SINGLE),           \
12180
  X(4, (F, F, R, R), SINGLE),           \
12181
  X(3, (D, R, R), DOUBLE),              \
12182
  X(3, (R, R, D), DOUBLE),              \
12183
  X(2, (S, R), SINGLE),                 \
12184
  X(2, (R, S), SINGLE),                 \
12185
  X(2, (F, R), SINGLE),                 \
12186
  X(2, (R, F), SINGLE)
12187
 
12188
#define S2(A,B)         NS_##A##B
12189
#define S3(A,B,C)       NS_##A##B##C
12190
#define S4(A,B,C,D)     NS_##A##B##C##D
12191
 
12192
#define X(N, L, C) S##N L
12193
 
12194
enum neon_shape
12195
{
12196
  NEON_SHAPE_DEF,
12197
  NS_NULL
12198
};
12199
 
12200
#undef X
12201
#undef S2
12202
#undef S3
12203
#undef S4
12204
 
12205
enum neon_shape_class
12206
{
12207
  SC_SINGLE,
12208
  SC_DOUBLE,
12209
  SC_QUAD,
12210
  SC_MIXED
12211
};
12212
 
12213
#define X(N, L, C) SC_##C
12214
 
12215
static enum neon_shape_class neon_shape_class[] =
12216
{
12217
  NEON_SHAPE_DEF
12218
};
12219
 
12220
#undef X
12221
 
12222
enum neon_shape_el
12223
{
12224
  SE_F,
12225
  SE_D,
12226
  SE_Q,
12227
  SE_I,
12228
  SE_S,
12229
  SE_R,
12230
  SE_L
12231
};
12232
 
12233
/* Register widths of above.  */
12234
static unsigned neon_shape_el_size[] =
12235
{
12236
  32,
12237
  64,
12238
  128,
12239
  0,
12240
  32,
12241
  32,
12242
 
12243
};
12244
 
12245
struct neon_shape_info
12246
{
12247
  unsigned els;
12248
  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12249
};
12250
 
12251
#define S2(A,B)         { SE_##A, SE_##B }
12252
#define S3(A,B,C)       { SE_##A, SE_##B, SE_##C }
12253
#define S4(A,B,C,D)     { SE_##A, SE_##B, SE_##C, SE_##D }
12254
 
12255
#define X(N, L, C) { N, S##N L }
12256
 
12257
static struct neon_shape_info neon_shape_tab[] =
12258
{
12259
  NEON_SHAPE_DEF
12260
};
12261
 
12262
#undef X
12263
#undef S2
12264
#undef S3
12265
#undef S4
12266
 
12267
/* Bit masks used in type checking given instructions.
12268
  'N_EQK' means the type must be the same as (or based on in some way) the key
12269
   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12270
   set, various other bits can be set as well in order to modify the meaning of
12271
   the type constraint.  */
12272
 
12273
enum neon_type_mask
12274
{
12275
  N_S8   = 0x0000001,
12276
  N_S16  = 0x0000002,
12277
  N_S32  = 0x0000004,
12278
  N_S64  = 0x0000008,
12279
  N_U8   = 0x0000010,
12280
  N_U16  = 0x0000020,
12281
  N_U32  = 0x0000040,
12282
  N_U64  = 0x0000080,
12283
  N_I8   = 0x0000100,
12284
  N_I16  = 0x0000200,
12285
  N_I32  = 0x0000400,
12286
  N_I64  = 0x0000800,
12287
  N_8    = 0x0001000,
12288
  N_16   = 0x0002000,
12289
  N_32   = 0x0004000,
12290
  N_64   = 0x0008000,
12291
  N_P8   = 0x0010000,
12292
  N_P16  = 0x0020000,
12293
  N_F16  = 0x0040000,
12294
  N_F32  = 0x0080000,
12295
  N_F64  = 0x0100000,
12296
  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
12297
  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
12298
  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
12299
  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
12300
  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
12301
  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
12302
  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
12303
  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
12304
  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
12305
  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
12306
  N_UTYP = 0,
12307
  N_MAX_NONSPECIAL = N_F64
12308
};
12309
 
12310
#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12311
 
12312
#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12313
#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12314
#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12315
#define N_SUF_32   (N_SU_32 | N_F32)
12316
#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
12317
#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F32)
12318
 
12319
/* Pass this as the first type argument to neon_check_type to ignore types
12320
   altogether.  */
12321
#define N_IGNORE_TYPE (N_KEY | N_EQK)
12322
 
12323
/* Select a "shape" for the current instruction (describing register types or
12324
   sizes) from a list of alternatives. Return NS_NULL if the current instruction
12325
   doesn't fit. For non-polymorphic shapes, checking is usually done as a
12326
   function of operand parsing, so this function doesn't need to be called.
12327
   Shapes should be listed in order of decreasing length.  */
12328
 
12329
static enum neon_shape
12330
neon_select_shape (enum neon_shape shape, ...)
12331
{
12332
  va_list ap;
12333
  enum neon_shape first_shape = shape;
12334
 
12335
  /* Fix missing optional operands. FIXME: we don't know at this point how
12336
     many arguments we should have, so this makes the assumption that we have
12337
     > 1. This is true of all current Neon opcodes, I think, but may not be
12338
     true in the future.  */
12339
  if (!inst.operands[1].present)
12340
    inst.operands[1] = inst.operands[0];
12341
 
12342
  va_start (ap, shape);
12343
 
12344
  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12345
    {
12346
      unsigned j;
12347
      int matches = 1;
12348
 
12349
      for (j = 0; j < neon_shape_tab[shape].els; j++)
12350
        {
12351
          if (!inst.operands[j].present)
12352
            {
12353
              matches = 0;
12354
              break;
12355
            }
12356
 
12357
          switch (neon_shape_tab[shape].el[j])
12358
            {
12359
            case SE_F:
12360
              if (!(inst.operands[j].isreg
12361
                    && inst.operands[j].isvec
12362
                    && inst.operands[j].issingle
12363
                    && !inst.operands[j].isquad))
12364
                matches = 0;
12365
              break;
12366
 
12367
            case SE_D:
12368
              if (!(inst.operands[j].isreg
12369
                    && inst.operands[j].isvec
12370
                    && !inst.operands[j].isquad
12371
                    && !inst.operands[j].issingle))
12372
                matches = 0;
12373
              break;
12374
 
12375
            case SE_R:
12376
              if (!(inst.operands[j].isreg
12377
                    && !inst.operands[j].isvec))
12378
                matches = 0;
12379
              break;
12380
 
12381
            case SE_Q:
12382
              if (!(inst.operands[j].isreg
12383
                    && inst.operands[j].isvec
12384
                    && inst.operands[j].isquad
12385
                    && !inst.operands[j].issingle))
12386
                matches = 0;
12387
              break;
12388
 
12389
            case SE_I:
12390
              if (!(!inst.operands[j].isreg
12391
                    && !inst.operands[j].isscalar))
12392
                matches = 0;
12393
              break;
12394
 
12395
            case SE_S:
12396
              if (!(!inst.operands[j].isreg
12397
                    && inst.operands[j].isscalar))
12398
                matches = 0;
12399
              break;
12400
 
12401
            case SE_L:
12402
              break;
12403
            }
12404
          if (!matches)
12405
            break;
12406
        }
12407
      if (matches)
12408
        break;
12409
    }
12410
 
12411
  va_end (ap);
12412
 
12413
  if (shape == NS_NULL && first_shape != NS_NULL)
12414
    first_error (_("invalid instruction shape"));
12415
 
12416
  return shape;
12417
}
12418
 
12419
/* True if SHAPE is predominantly a quadword operation (most of the time, this
12420
   means the Q bit should be set).  */
12421
 
12422
static int
12423
neon_quad (enum neon_shape shape)
12424
{
12425
  return neon_shape_class[shape] == SC_QUAD;
12426
}
12427
 
12428
static void
12429
neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12430
                       unsigned *g_size)
12431
{
12432
  /* Allow modification to be made to types which are constrained to be
12433
     based on the key element, based on bits set alongside N_EQK.  */
12434
  if ((typebits & N_EQK) != 0)
12435
    {
12436
      if ((typebits & N_HLF) != 0)
12437
        *g_size /= 2;
12438
      else if ((typebits & N_DBL) != 0)
12439
        *g_size *= 2;
12440
      if ((typebits & N_SGN) != 0)
12441
        *g_type = NT_signed;
12442
      else if ((typebits & N_UNS) != 0)
12443
        *g_type = NT_unsigned;
12444
      else if ((typebits & N_INT) != 0)
12445
        *g_type = NT_integer;
12446
      else if ((typebits & N_FLT) != 0)
12447
        *g_type = NT_float;
12448
      else if ((typebits & N_SIZ) != 0)
12449
        *g_type = NT_untyped;
12450
    }
12451
}
12452
 
12453
/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12454
   operand type, i.e. the single type specified in a Neon instruction when it
12455
   is the only one given.  */
12456
 
12457
static struct neon_type_el
12458
neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12459
{
12460
  struct neon_type_el dest = *key;
12461
 
12462
  gas_assert ((thisarg & N_EQK) != 0);
12463
 
12464
  neon_modify_type_size (thisarg, &dest.type, &dest.size);
12465
 
12466
  return dest;
12467
}
12468
 
12469
/* Convert Neon type and size into compact bitmask representation.  */
12470
 
12471
static enum neon_type_mask
12472
type_chk_of_el_type (enum neon_el_type type, unsigned size)
12473
{
12474
  switch (type)
12475
    {
12476
    case NT_untyped:
12477
      switch (size)
12478
        {
12479
        case 8:  return N_8;
12480
        case 16: return N_16;
12481
        case 32: return N_32;
12482
        case 64: return N_64;
12483
        default: ;
12484
        }
12485
      break;
12486
 
12487
    case NT_integer:
12488
      switch (size)
12489
        {
12490
        case 8:  return N_I8;
12491
        case 16: return N_I16;
12492
        case 32: return N_I32;
12493
        case 64: return N_I64;
12494
        default: ;
12495
        }
12496
      break;
12497
 
12498
    case NT_float:
12499
      switch (size)
12500
        {
12501
        case 16: return N_F16;
12502
        case 32: return N_F32;
12503
        case 64: return N_F64;
12504
        default: ;
12505
        }
12506
      break;
12507
 
12508
    case NT_poly:
12509
      switch (size)
12510
        {
12511
        case 8:  return N_P8;
12512
        case 16: return N_P16;
12513
        default: ;
12514
        }
12515
      break;
12516
 
12517
    case NT_signed:
12518
      switch (size)
12519
        {
12520
        case 8:  return N_S8;
12521
        case 16: return N_S16;
12522
        case 32: return N_S32;
12523
        case 64: return N_S64;
12524
        default: ;
12525
        }
12526
      break;
12527
 
12528
    case NT_unsigned:
12529
      switch (size)
12530
        {
12531
        case 8:  return N_U8;
12532
        case 16: return N_U16;
12533
        case 32: return N_U32;
12534
        case 64: return N_U64;
12535
        default: ;
12536
        }
12537
      break;
12538
 
12539
    default: ;
12540
    }
12541
 
12542
  return N_UTYP;
12543
}
12544
 
12545
/* Convert compact Neon bitmask type representation to a type and size. Only
12546
   handles the case where a single bit is set in the mask.  */
12547
 
12548
static int
12549
el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12550
                     enum neon_type_mask mask)
12551
{
12552
  if ((mask & N_EQK) != 0)
12553
    return FAIL;
12554
 
12555
  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12556
    *size = 8;
12557
  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
12558
    *size = 16;
12559
  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12560
    *size = 32;
12561
  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
12562
    *size = 64;
12563
  else
12564
    return FAIL;
12565
 
12566
  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12567
    *type = NT_signed;
12568
  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12569
    *type = NT_unsigned;
12570
  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12571
    *type = NT_integer;
12572
  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12573
    *type = NT_untyped;
12574
  else if ((mask & (N_P8 | N_P16)) != 0)
12575
    *type = NT_poly;
12576
  else if ((mask & (N_F32 | N_F64)) != 0)
12577
    *type = NT_float;
12578
  else
12579
    return FAIL;
12580
 
12581
  return SUCCESS;
12582
}
12583
 
12584
/* Modify a bitmask of allowed types. This is only needed for type
12585
   relaxation.  */
12586
 
12587
static unsigned
12588
modify_types_allowed (unsigned allowed, unsigned mods)
12589
{
12590
  unsigned size;
12591
  enum neon_el_type type;
12592
  unsigned destmask;
12593
  int i;
12594
 
12595
  destmask = 0;
12596
 
12597
  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12598
    {
12599
      if (el_type_of_type_chk (&type, &size,
12600
                               (enum neon_type_mask) (allowed & i)) == SUCCESS)
12601
        {
12602
          neon_modify_type_size (mods, &type, &size);
12603
          destmask |= type_chk_of_el_type (type, size);
12604
        }
12605
    }
12606
 
12607
  return destmask;
12608
}
12609
 
12610
/* Check type and return type classification.
12611
   The manual states (paraphrase): If one datatype is given, it indicates the
12612
   type given in:
12613
    - the second operand, if there is one
12614
    - the operand, if there is no second operand
12615
    - the result, if there are no operands.
12616
   This isn't quite good enough though, so we use a concept of a "key" datatype
12617
   which is set on a per-instruction basis, which is the one which matters when
12618
   only one data type is written.
12619
   Note: this function has side-effects (e.g. filling in missing operands). All
12620
   Neon instructions should call it before performing bit encoding.  */
12621
 
12622
static struct neon_type_el
12623
neon_check_type (unsigned els, enum neon_shape ns, ...)
12624
{
12625
  va_list ap;
12626
  unsigned i, pass, key_el = 0;
12627
  unsigned types[NEON_MAX_TYPE_ELS];
12628
  enum neon_el_type k_type = NT_invtype;
12629
  unsigned k_size = -1u;
12630
  struct neon_type_el badtype = {NT_invtype, -1};
12631
  unsigned key_allowed = 0;
12632
 
12633
  /* Optional registers in Neon instructions are always (not) in operand 1.
12634
     Fill in the missing operand here, if it was omitted.  */
12635
  if (els > 1 && !inst.operands[1].present)
12636
    inst.operands[1] = inst.operands[0];
12637
 
12638
  /* Suck up all the varargs.  */
12639
  va_start (ap, ns);
12640
  for (i = 0; i < els; i++)
12641
    {
12642
      unsigned thisarg = va_arg (ap, unsigned);
12643
      if (thisarg == N_IGNORE_TYPE)
12644
        {
12645
          va_end (ap);
12646
          return badtype;
12647
        }
12648
      types[i] = thisarg;
12649
      if ((thisarg & N_KEY) != 0)
12650
        key_el = i;
12651
    }
12652
  va_end (ap);
12653
 
12654
  if (inst.vectype.elems > 0)
12655
    for (i = 0; i < els; i++)
12656
      if (inst.operands[i].vectype.type != NT_invtype)
12657
        {
12658
          first_error (_("types specified in both the mnemonic and operands"));
12659
          return badtype;
12660
        }
12661
 
12662
  /* Duplicate inst.vectype elements here as necessary.
12663
     FIXME: No idea if this is exactly the same as the ARM assembler,
12664
     particularly when an insn takes one register and one non-register
12665
     operand. */
12666
  if (inst.vectype.elems == 1 && els > 1)
12667
    {
12668
      unsigned j;
12669
      inst.vectype.elems = els;
12670
      inst.vectype.el[key_el] = inst.vectype.el[0];
12671
      for (j = 0; j < els; j++)
12672
        if (j != key_el)
12673
          inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12674
                                                  types[j]);
12675
    }
12676
  else if (inst.vectype.elems == 0 && els > 0)
12677
    {
12678
      unsigned j;
12679
      /* No types were given after the mnemonic, so look for types specified
12680
         after each operand. We allow some flexibility here; as long as the
12681
         "key" operand has a type, we can infer the others.  */
12682
      for (j = 0; j < els; j++)
12683
        if (inst.operands[j].vectype.type != NT_invtype)
12684
          inst.vectype.el[j] = inst.operands[j].vectype;
12685
 
12686
      if (inst.operands[key_el].vectype.type != NT_invtype)
12687
        {
12688
          for (j = 0; j < els; j++)
12689
            if (inst.operands[j].vectype.type == NT_invtype)
12690
              inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12691
                                                      types[j]);
12692
        }
12693
      else
12694
        {
12695
          first_error (_("operand types can't be inferred"));
12696
          return badtype;
12697
        }
12698
    }
12699
  else if (inst.vectype.elems != els)
12700
    {
12701
      first_error (_("type specifier has the wrong number of parts"));
12702
      return badtype;
12703
    }
12704
 
12705
  for (pass = 0; pass < 2; pass++)
12706
    {
12707
      for (i = 0; i < els; i++)
12708
        {
12709
          unsigned thisarg = types[i];
12710
          unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12711
            ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12712
          enum neon_el_type g_type = inst.vectype.el[i].type;
12713
          unsigned g_size = inst.vectype.el[i].size;
12714
 
12715
          /* Decay more-specific signed & unsigned types to sign-insensitive
12716
             integer types if sign-specific variants are unavailable.  */
12717
          if ((g_type == NT_signed || g_type == NT_unsigned)
12718
              && (types_allowed & N_SU_ALL) == 0)
12719
            g_type = NT_integer;
12720
 
12721
          /* If only untyped args are allowed, decay any more specific types to
12722
             them. Some instructions only care about signs for some element
12723
             sizes, so handle that properly.  */
12724
          if ((g_size == 8 && (types_allowed & N_8) != 0)
12725
              || (g_size == 16 && (types_allowed & N_16) != 0)
12726
              || (g_size == 32 && (types_allowed & N_32) != 0)
12727
              || (g_size == 64 && (types_allowed & N_64) != 0))
12728
            g_type = NT_untyped;
12729
 
12730
          if (pass == 0)
12731
            {
12732
              if ((thisarg & N_KEY) != 0)
12733
                {
12734
                  k_type = g_type;
12735
                  k_size = g_size;
12736
                  key_allowed = thisarg & ~N_KEY;
12737
                }
12738
            }
12739
          else
12740
            {
12741
              if ((thisarg & N_VFP) != 0)
12742
                {
12743
                  enum neon_shape_el regshape;
12744
                  unsigned regwidth, match;
12745
 
12746
                  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
12747
                  if (ns == NS_NULL)
12748
                    {
12749
                      first_error (_("invalid instruction shape"));
12750
                      return badtype;
12751
                    }
12752
                  regshape = neon_shape_tab[ns].el[i];
12753
                  regwidth = neon_shape_el_size[regshape];
12754
 
12755
                  /* In VFP mode, operands must match register widths. If we
12756
                     have a key operand, use its width, else use the width of
12757
                     the current operand.  */
12758
                  if (k_size != -1u)
12759
                    match = k_size;
12760
                  else
12761
                    match = g_size;
12762
 
12763
                  if (regwidth != match)
12764
                    {
12765
                      first_error (_("operand size must match register width"));
12766
                      return badtype;
12767
                    }
12768
                }
12769
 
12770
              if ((thisarg & N_EQK) == 0)
12771
                {
12772
                  unsigned given_type = type_chk_of_el_type (g_type, g_size);
12773
 
12774
                  if ((given_type & types_allowed) == 0)
12775
                    {
12776
                      first_error (_("bad type in Neon instruction"));
12777
                      return badtype;
12778
                    }
12779
                }
12780
              else
12781
                {
12782
                  enum neon_el_type mod_k_type = k_type;
12783
                  unsigned mod_k_size = k_size;
12784
                  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12785
                  if (g_type != mod_k_type || g_size != mod_k_size)
12786
                    {
12787
                      first_error (_("inconsistent types in Neon instruction"));
12788
                      return badtype;
12789
                    }
12790
                }
12791
            }
12792
        }
12793
    }
12794
 
12795
  return inst.vectype.el[key_el];
12796
}
12797
 
12798
/* Neon-style VFP instruction forwarding.  */
12799
 
12800
/* Thumb VFP instructions have 0xE in the condition field.  */
12801
 
12802
static void
12803
do_vfp_cond_or_thumb (void)
12804
{
12805
  inst.is_neon = 1;
12806
 
12807
  if (thumb_mode)
12808
    inst.instruction |= 0xe0000000;
12809
  else
12810
    inst.instruction |= inst.cond << 28;
12811
}
12812
 
12813
/* Look up and encode a simple mnemonic, for use as a helper function for the
12814
   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
12815
   etc.  It is assumed that operand parsing has already been done, and that the
12816
   operands are in the form expected by the given opcode (this isn't necessarily
12817
   the same as the form in which they were parsed, hence some massaging must
12818
   take place before this function is called).
12819
   Checks current arch version against that in the looked-up opcode.  */
12820
 
12821
static void
12822
do_vfp_nsyn_opcode (const char *opname)
12823
{
12824
  const struct asm_opcode *opcode;
12825
 
12826
  opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12827
 
12828
  if (!opcode)
12829
    abort ();
12830
 
12831
  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12832
                thumb_mode ? *opcode->tvariant : *opcode->avariant),
12833
              _(BAD_FPU));
12834
 
12835
  inst.is_neon = 1;
12836
 
12837
  if (thumb_mode)
12838
    {
12839
      inst.instruction = opcode->tvalue;
12840
      opcode->tencode ();
12841
    }
12842
  else
12843
    {
12844
      inst.instruction = (inst.cond << 28) | opcode->avalue;
12845
      opcode->aencode ();
12846
    }
12847
}
12848
 
12849
static void
12850
do_vfp_nsyn_add_sub (enum neon_shape rs)
12851
{
12852
  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12853
 
12854
  if (rs == NS_FFF)
12855
    {
12856
      if (is_add)
12857
        do_vfp_nsyn_opcode ("fadds");
12858
      else
12859
        do_vfp_nsyn_opcode ("fsubs");
12860
    }
12861
  else
12862
    {
12863
      if (is_add)
12864
        do_vfp_nsyn_opcode ("faddd");
12865
      else
12866
        do_vfp_nsyn_opcode ("fsubd");
12867
    }
12868
}
12869
 
12870
/* Check operand types to see if this is a VFP instruction, and if so call
12871
   PFN ().  */
12872
 
12873
static int
12874
try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12875
{
12876
  enum neon_shape rs;
12877
  struct neon_type_el et;
12878
 
12879
  switch (args)
12880
    {
12881
    case 2:
12882
      rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12883
      et = neon_check_type (2, rs,
12884
        N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12885
      break;
12886
 
12887
    case 3:
12888
      rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12889
      et = neon_check_type (3, rs,
12890
        N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12891
      break;
12892
 
12893
    default:
12894
      abort ();
12895
    }
12896
 
12897
  if (et.type != NT_invtype)
12898
    {
12899
      pfn (rs);
12900
      return SUCCESS;
12901
    }
12902
 
12903
  inst.error = NULL;
12904
  return FAIL;
12905
}
12906
 
12907
static void
12908
do_vfp_nsyn_mla_mls (enum neon_shape rs)
12909
{
12910
  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12911
 
12912
  if (rs == NS_FFF)
12913
    {
12914
      if (is_mla)
12915
        do_vfp_nsyn_opcode ("fmacs");
12916
      else
12917
        do_vfp_nsyn_opcode ("fnmacs");
12918
    }
12919
  else
12920
    {
12921
      if (is_mla)
12922
        do_vfp_nsyn_opcode ("fmacd");
12923
      else
12924
        do_vfp_nsyn_opcode ("fnmacd");
12925
    }
12926
}
12927
 
12928
static void
12929
do_vfp_nsyn_fma_fms (enum neon_shape rs)
12930
{
12931
  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12932
 
12933
  if (rs == NS_FFF)
12934
    {
12935
      if (is_fma)
12936
        do_vfp_nsyn_opcode ("ffmas");
12937
      else
12938
        do_vfp_nsyn_opcode ("ffnmas");
12939
    }
12940
  else
12941
    {
12942
      if (is_fma)
12943
        do_vfp_nsyn_opcode ("ffmad");
12944
      else
12945
        do_vfp_nsyn_opcode ("ffnmad");
12946
    }
12947
}
12948
 
12949
static void
12950
do_vfp_nsyn_mul (enum neon_shape rs)
12951
{
12952
  if (rs == NS_FFF)
12953
    do_vfp_nsyn_opcode ("fmuls");
12954
  else
12955
    do_vfp_nsyn_opcode ("fmuld");
12956
}
12957
 
12958
static void
12959
do_vfp_nsyn_abs_neg (enum neon_shape rs)
12960
{
12961
  int is_neg = (inst.instruction & 0x80) != 0;
12962
  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12963
 
12964
  if (rs == NS_FF)
12965
    {
12966
      if (is_neg)
12967
        do_vfp_nsyn_opcode ("fnegs");
12968
      else
12969
        do_vfp_nsyn_opcode ("fabss");
12970
    }
12971
  else
12972
    {
12973
      if (is_neg)
12974
        do_vfp_nsyn_opcode ("fnegd");
12975
      else
12976
        do_vfp_nsyn_opcode ("fabsd");
12977
    }
12978
}
12979
 
12980
/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12981
   insns belong to Neon, and are handled elsewhere.  */
12982
 
12983
static void
12984
do_vfp_nsyn_ldm_stm (int is_dbmode)
12985
{
12986
  int is_ldm = (inst.instruction & (1 << 20)) != 0;
12987
  if (is_ldm)
12988
    {
12989
      if (is_dbmode)
12990
        do_vfp_nsyn_opcode ("fldmdbs");
12991
      else
12992
        do_vfp_nsyn_opcode ("fldmias");
12993
    }
12994
  else
12995
    {
12996
      if (is_dbmode)
12997
        do_vfp_nsyn_opcode ("fstmdbs");
12998
      else
12999
        do_vfp_nsyn_opcode ("fstmias");
13000
    }
13001
}
13002
 
13003
static void
13004
do_vfp_nsyn_sqrt (void)
13005
{
13006
  enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13007
  neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13008
 
13009
  if (rs == NS_FF)
13010
    do_vfp_nsyn_opcode ("fsqrts");
13011
  else
13012
    do_vfp_nsyn_opcode ("fsqrtd");
13013
}
13014
 
13015
static void
13016
do_vfp_nsyn_div (void)
13017
{
13018
  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13019
  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13020
    N_F32 | N_F64 | N_KEY | N_VFP);
13021
 
13022
  if (rs == NS_FFF)
13023
    do_vfp_nsyn_opcode ("fdivs");
13024
  else
13025
    do_vfp_nsyn_opcode ("fdivd");
13026
}
13027
 
13028
static void
13029
do_vfp_nsyn_nmul (void)
13030
{
13031
  enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13032
  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13033
    N_F32 | N_F64 | N_KEY | N_VFP);
13034
 
13035
  if (rs == NS_FFF)
13036
    {
13037
      NEON_ENCODE (SINGLE, inst);
13038
      do_vfp_sp_dyadic ();
13039
    }
13040
  else
13041
    {
13042
      NEON_ENCODE (DOUBLE, inst);
13043
      do_vfp_dp_rd_rn_rm ();
13044
    }
13045
  do_vfp_cond_or_thumb ();
13046
}
13047
 
13048
static void
13049
do_vfp_nsyn_cmp (void)
13050
{
13051
  if (inst.operands[1].isreg)
13052
    {
13053
      enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13054
      neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13055
 
13056
      if (rs == NS_FF)
13057
        {
13058
          NEON_ENCODE (SINGLE, inst);
13059
          do_vfp_sp_monadic ();
13060
        }
13061
      else
13062
        {
13063
          NEON_ENCODE (DOUBLE, inst);
13064
          do_vfp_dp_rd_rm ();
13065
        }
13066
    }
13067
  else
13068
    {
13069
      enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13070
      neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13071
 
13072
      switch (inst.instruction & 0x0fffffff)
13073
        {
13074
        case N_MNEM_vcmp:
13075
          inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13076
          break;
13077
        case N_MNEM_vcmpe:
13078
          inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13079
          break;
13080
        default:
13081
          abort ();
13082
        }
13083
 
13084
      if (rs == NS_FI)
13085
        {
13086
          NEON_ENCODE (SINGLE, inst);
13087
          do_vfp_sp_compare_z ();
13088
        }
13089
      else
13090
        {
13091
          NEON_ENCODE (DOUBLE, inst);
13092
          do_vfp_dp_rd ();
13093
        }
13094
    }
13095
  do_vfp_cond_or_thumb ();
13096
}
13097
 
13098
static void
13099
nsyn_insert_sp (void)
13100
{
13101
  inst.operands[1] = inst.operands[0];
13102
  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13103
  inst.operands[0].reg = REG_SP;
13104
  inst.operands[0].isreg = 1;
13105
  inst.operands[0].writeback = 1;
13106
  inst.operands[0].present = 1;
13107
}
13108
 
13109
static void
13110
do_vfp_nsyn_push (void)
13111
{
13112
  nsyn_insert_sp ();
13113
  if (inst.operands[1].issingle)
13114
    do_vfp_nsyn_opcode ("fstmdbs");
13115
  else
13116
    do_vfp_nsyn_opcode ("fstmdbd");
13117
}
13118
 
13119
static void
13120
do_vfp_nsyn_pop (void)
13121
{
13122
  nsyn_insert_sp ();
13123
  if (inst.operands[1].issingle)
13124
    do_vfp_nsyn_opcode ("fldmias");
13125
  else
13126
    do_vfp_nsyn_opcode ("fldmiad");
13127
}
13128
 
13129
/* Fix up Neon data-processing instructions, ORing in the correct bits for
13130
   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
13131
 
13132
static void
13133
neon_dp_fixup (struct arm_it* insn)
13134
{
13135
  unsigned int i = insn->instruction;
13136
  insn->is_neon = 1;
13137
 
13138
  if (thumb_mode)
13139
    {
13140
      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
13141
      if (i & (1 << 24))
13142
        i |= 1 << 28;
13143
 
13144
      i &= ~(1 << 24);
13145
 
13146
      i |= 0xef000000;
13147
    }
13148
  else
13149
    i |= 0xf2000000;
13150
 
13151
  insn->instruction = i;
13152
}
13153
 
13154
/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13155
   (0, 1, 2, 3).  */
13156
 
13157
static unsigned
13158
neon_logbits (unsigned x)
13159
{
13160
  return ffs (x) - 4;
13161
}
13162
 
13163
#define LOW4(R) ((R) & 0xf)
13164
#define HI1(R) (((R) >> 4) & 1)
13165
 
13166
/* Encode insns with bit pattern:
13167
 
13168
  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
13169
  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
13170
 
13171
  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13172
  different meaning for some instruction.  */
13173
 
13174
static void
13175
neon_three_same (int isquad, int ubit, int size)
13176
{
13177
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13178
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13179
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13180
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13181
  inst.instruction |= LOW4 (inst.operands[2].reg);
13182
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13183
  inst.instruction |= (isquad != 0) << 6;
13184
  inst.instruction |= (ubit != 0) << 24;
13185
  if (size != -1)
13186
    inst.instruction |= neon_logbits (size) << 20;
13187
 
13188
  neon_dp_fixup (&inst);
13189
}
13190
 
13191
/* Encode instructions of the form:
13192
 
13193
  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
13194
  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
13195
 
13196
  Don't write size if SIZE == -1.  */
13197
 
13198
static void
13199
neon_two_same (int qbit, int ubit, int size)
13200
{
13201
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13202
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13203
  inst.instruction |= LOW4 (inst.operands[1].reg);
13204
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13205
  inst.instruction |= (qbit != 0) << 6;
13206
  inst.instruction |= (ubit != 0) << 24;
13207
 
13208
  if (size != -1)
13209
    inst.instruction |= neon_logbits (size) << 18;
13210
 
13211
  neon_dp_fixup (&inst);
13212
}
13213
 
13214
/* Neon instruction encoders, in approximate order of appearance.  */
13215
 
13216
static void
13217
do_neon_dyadic_i_su (void)
13218
{
13219
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13220
  struct neon_type_el et = neon_check_type (3, rs,
13221
    N_EQK, N_EQK, N_SU_32 | N_KEY);
13222
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13223
}
13224
 
13225
static void
13226
do_neon_dyadic_i64_su (void)
13227
{
13228
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13229
  struct neon_type_el et = neon_check_type (3, rs,
13230
    N_EQK, N_EQK, N_SU_ALL | N_KEY);
13231
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13232
}
13233
 
13234
static void
13235
neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13236
                unsigned immbits)
13237
{
13238
  unsigned size = et.size >> 3;
13239
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13240
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13241
  inst.instruction |= LOW4 (inst.operands[1].reg);
13242
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13243
  inst.instruction |= (isquad != 0) << 6;
13244
  inst.instruction |= immbits << 16;
13245
  inst.instruction |= (size >> 3) << 7;
13246
  inst.instruction |= (size & 0x7) << 19;
13247
  if (write_ubit)
13248
    inst.instruction |= (uval != 0) << 24;
13249
 
13250
  neon_dp_fixup (&inst);
13251
}
13252
 
13253
static void
13254
do_neon_shl_imm (void)
13255
{
13256
  if (!inst.operands[2].isreg)
13257
    {
13258
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13259
      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13260
      NEON_ENCODE (IMMED, inst);
13261
      neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13262
    }
13263
  else
13264
    {
13265
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13266
      struct neon_type_el et = neon_check_type (3, rs,
13267
        N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13268
      unsigned int tmp;
13269
 
13270
      /* VSHL/VQSHL 3-register variants have syntax such as:
13271
           vshl.xx Dd, Dm, Dn
13272
         whereas other 3-register operations encoded by neon_three_same have
13273
         syntax like:
13274
           vadd.xx Dd, Dn, Dm
13275
         (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13276
         here.  */
13277
      tmp = inst.operands[2].reg;
13278
      inst.operands[2].reg = inst.operands[1].reg;
13279
      inst.operands[1].reg = tmp;
13280
      NEON_ENCODE (INTEGER, inst);
13281
      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13282
    }
13283
}
13284
 
13285
static void
13286
do_neon_qshl_imm (void)
13287
{
13288
  if (!inst.operands[2].isreg)
13289
    {
13290
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13291
      struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13292
 
13293
      NEON_ENCODE (IMMED, inst);
13294
      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13295
                      inst.operands[2].imm);
13296
    }
13297
  else
13298
    {
13299
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13300
      struct neon_type_el et = neon_check_type (3, rs,
13301
        N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13302
      unsigned int tmp;
13303
 
13304
      /* See note in do_neon_shl_imm.  */
13305
      tmp = inst.operands[2].reg;
13306
      inst.operands[2].reg = inst.operands[1].reg;
13307
      inst.operands[1].reg = tmp;
13308
      NEON_ENCODE (INTEGER, inst);
13309
      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13310
    }
13311
}
13312
 
13313
static void
13314
do_neon_rshl (void)
13315
{
13316
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13317
  struct neon_type_el et = neon_check_type (3, rs,
13318
    N_EQK, N_EQK, N_SU_ALL | N_KEY);
13319
  unsigned int tmp;
13320
 
13321
  tmp = inst.operands[2].reg;
13322
  inst.operands[2].reg = inst.operands[1].reg;
13323
  inst.operands[1].reg = tmp;
13324
  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13325
}
13326
 
13327
static int
13328
neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13329
{
13330
  /* Handle .I8 pseudo-instructions.  */
13331
  if (size == 8)
13332
    {
13333
      /* Unfortunately, this will make everything apart from zero out-of-range.
13334
         FIXME is this the intended semantics? There doesn't seem much point in
13335
         accepting .I8 if so.  */
13336
      immediate |= immediate << 8;
13337
      size = 16;
13338
    }
13339
 
13340
  if (size >= 32)
13341
    {
13342
      if (immediate == (immediate & 0x000000ff))
13343
        {
13344
          *immbits = immediate;
13345
          return 0x1;
13346
        }
13347
      else if (immediate == (immediate & 0x0000ff00))
13348
        {
13349
          *immbits = immediate >> 8;
13350
          return 0x3;
13351
        }
13352
      else if (immediate == (immediate & 0x00ff0000))
13353
        {
13354
          *immbits = immediate >> 16;
13355
          return 0x5;
13356
        }
13357
      else if (immediate == (immediate & 0xff000000))
13358
        {
13359
          *immbits = immediate >> 24;
13360
          return 0x7;
13361
        }
13362
      if ((immediate & 0xffff) != (immediate >> 16))
13363
        goto bad_immediate;
13364
      immediate &= 0xffff;
13365
    }
13366
 
13367
  if (immediate == (immediate & 0x000000ff))
13368
    {
13369
      *immbits = immediate;
13370
      return 0x9;
13371
    }
13372
  else if (immediate == (immediate & 0x0000ff00))
13373
    {
13374
      *immbits = immediate >> 8;
13375
      return 0xb;
13376
    }
13377
 
13378
  bad_immediate:
13379
  first_error (_("immediate value out of range"));
13380
  return FAIL;
13381
}
13382
 
13383
/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13384
   A, B, C, D.  */
13385
 
13386
static int
13387
neon_bits_same_in_bytes (unsigned imm)
13388
{
13389
  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13390
         && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13391
         && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13392
         && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13393
}
13394
 
13395
/* For immediate of above form, return 0bABCD.  */
13396
 
13397
static unsigned
13398
neon_squash_bits (unsigned imm)
13399
{
13400
  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13401
         | ((imm & 0x01000000) >> 21);
13402
}
13403
 
13404
/* Compress quarter-float representation to 0b...000 abcdefgh.  */
13405
 
13406
static unsigned
13407
neon_qfloat_bits (unsigned imm)
13408
{
13409
  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13410
}
13411
 
13412
/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13413
   the instruction. *OP is passed as the initial value of the op field, and
13414
   may be set to a different value depending on the constant (i.e.
13415
   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13416
   MVN).  If the immediate looks like a repeated pattern then also
13417
   try smaller element sizes.  */
13418
 
13419
static int
13420
neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13421
                         unsigned *immbits, int *op, int size,
13422
                         enum neon_el_type type)
13423
{
13424
  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13425
     float.  */
13426
  if (type == NT_float && !float_p)
13427
    return FAIL;
13428
 
13429
  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13430
    {
13431
      if (size != 32 || *op == 1)
13432
        return FAIL;
13433
      *immbits = neon_qfloat_bits (immlo);
13434
      return 0xf;
13435
    }
13436
 
13437
  if (size == 64)
13438
    {
13439
      if (neon_bits_same_in_bytes (immhi)
13440
          && neon_bits_same_in_bytes (immlo))
13441
        {
13442
          if (*op == 1)
13443
            return FAIL;
13444
          *immbits = (neon_squash_bits (immhi) << 4)
13445
                     | neon_squash_bits (immlo);
13446
          *op = 1;
13447
          return 0xe;
13448
        }
13449
 
13450
      if (immhi != immlo)
13451
        return FAIL;
13452
    }
13453
 
13454
  if (size >= 32)
13455
    {
13456
      if (immlo == (immlo & 0x000000ff))
13457
        {
13458
          *immbits = immlo;
13459
          return 0x0;
13460
        }
13461
      else if (immlo == (immlo & 0x0000ff00))
13462
        {
13463
          *immbits = immlo >> 8;
13464
          return 0x2;
13465
        }
13466
      else if (immlo == (immlo & 0x00ff0000))
13467
        {
13468
          *immbits = immlo >> 16;
13469
          return 0x4;
13470
        }
13471
      else if (immlo == (immlo & 0xff000000))
13472
        {
13473
          *immbits = immlo >> 24;
13474
          return 0x6;
13475
        }
13476
      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13477
        {
13478
          *immbits = (immlo >> 8) & 0xff;
13479
          return 0xc;
13480
        }
13481
      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13482
        {
13483
          *immbits = (immlo >> 16) & 0xff;
13484
          return 0xd;
13485
        }
13486
 
13487
      if ((immlo & 0xffff) != (immlo >> 16))
13488
        return FAIL;
13489
      immlo &= 0xffff;
13490
    }
13491
 
13492
  if (size >= 16)
13493
    {
13494
      if (immlo == (immlo & 0x000000ff))
13495
        {
13496
          *immbits = immlo;
13497
          return 0x8;
13498
        }
13499
      else if (immlo == (immlo & 0x0000ff00))
13500
        {
13501
          *immbits = immlo >> 8;
13502
          return 0xa;
13503
        }
13504
 
13505
      if ((immlo & 0xff) != (immlo >> 8))
13506
        return FAIL;
13507
      immlo &= 0xff;
13508
    }
13509
 
13510
  if (immlo == (immlo & 0x000000ff))
13511
    {
13512
      /* Don't allow MVN with 8-bit immediate.  */
13513
      if (*op == 1)
13514
        return FAIL;
13515
      *immbits = immlo;
13516
      return 0xe;
13517
    }
13518
 
13519
  return FAIL;
13520
}
13521
 
13522
/* Write immediate bits [7:0] to the following locations:
13523
 
13524
  |28/24|23     19|18 16|15                    4|3     0|
13525
  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13526
 
13527
  This function is used by VMOV/VMVN/VORR/VBIC.  */
13528
 
13529
static void
13530
neon_write_immbits (unsigned immbits)
13531
{
13532
  inst.instruction |= immbits & 0xf;
13533
  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13534
  inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13535
}
13536
 
13537
/* Invert low-order SIZE bits of XHI:XLO.  */
13538
 
13539
static void
13540
neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13541
{
13542
  unsigned immlo = xlo ? *xlo : 0;
13543
  unsigned immhi = xhi ? *xhi : 0;
13544
 
13545
  switch (size)
13546
    {
13547
    case 8:
13548
      immlo = (~immlo) & 0xff;
13549
      break;
13550
 
13551
    case 16:
13552
      immlo = (~immlo) & 0xffff;
13553
      break;
13554
 
13555
    case 64:
13556
      immhi = (~immhi) & 0xffffffff;
13557
      /* fall through.  */
13558
 
13559
    case 32:
13560
      immlo = (~immlo) & 0xffffffff;
13561
      break;
13562
 
13563
    default:
13564
      abort ();
13565
    }
13566
 
13567
  if (xlo)
13568
    *xlo = immlo;
13569
 
13570
  if (xhi)
13571
    *xhi = immhi;
13572
}
13573
 
13574
static void
13575
do_neon_logic (void)
13576
{
13577
  if (inst.operands[2].present && inst.operands[2].isreg)
13578
    {
13579
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13580
      neon_check_type (3, rs, N_IGNORE_TYPE);
13581
      /* U bit and size field were set as part of the bitmask.  */
13582
      NEON_ENCODE (INTEGER, inst);
13583
      neon_three_same (neon_quad (rs), 0, -1);
13584
    }
13585
  else
13586
    {
13587
      const int three_ops_form = (inst.operands[2].present
13588
                                  && !inst.operands[2].isreg);
13589
      const int immoperand = (three_ops_form ? 2 : 1);
13590
      enum neon_shape rs = (three_ops_form
13591
                            ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13592
                            : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13593
      struct neon_type_el et = neon_check_type (2, rs,
13594
        N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13595
      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13596
      unsigned immbits;
13597
      int cmode;
13598
 
13599
      if (et.type == NT_invtype)
13600
        return;
13601
 
13602
      if (three_ops_form)
13603
        constraint (inst.operands[0].reg != inst.operands[1].reg,
13604
                    _("first and second operands shall be the same register"));
13605
 
13606
      NEON_ENCODE (IMMED, inst);
13607
 
13608
      immbits = inst.operands[immoperand].imm;
13609
      if (et.size == 64)
13610
        {
13611
          /* .i64 is a pseudo-op, so the immediate must be a repeating
13612
             pattern.  */
13613
          if (immbits != (inst.operands[immoperand].regisimm ?
13614
                          inst.operands[immoperand].reg : 0))
13615
            {
13616
              /* Set immbits to an invalid constant.  */
13617
              immbits = 0xdeadbeef;
13618
            }
13619
        }
13620
 
13621
      switch (opcode)
13622
        {
13623
        case N_MNEM_vbic:
13624
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13625
          break;
13626
 
13627
        case N_MNEM_vorr:
13628
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13629
          break;
13630
 
13631
        case N_MNEM_vand:
13632
          /* Pseudo-instruction for VBIC.  */
13633
          neon_invert_size (&immbits, 0, et.size);
13634
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13635
          break;
13636
 
13637
        case N_MNEM_vorn:
13638
          /* Pseudo-instruction for VORR.  */
13639
          neon_invert_size (&immbits, 0, et.size);
13640
          cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13641
          break;
13642
 
13643
        default:
13644
          abort ();
13645
        }
13646
 
13647
      if (cmode == FAIL)
13648
        return;
13649
 
13650
      inst.instruction |= neon_quad (rs) << 6;
13651
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13652
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13653
      inst.instruction |= cmode << 8;
13654
      neon_write_immbits (immbits);
13655
 
13656
      neon_dp_fixup (&inst);
13657
    }
13658
}
13659
 
13660
static void
13661
do_neon_bitfield (void)
13662
{
13663
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13664
  neon_check_type (3, rs, N_IGNORE_TYPE);
13665
  neon_three_same (neon_quad (rs), 0, -1);
13666
}
13667
 
13668
static void
13669
neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13670
                  unsigned destbits)
13671
{
13672
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13673
  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13674
                                            types | N_KEY);
13675
  if (et.type == NT_float)
13676
    {
13677
      NEON_ENCODE (FLOAT, inst);
13678
      neon_three_same (neon_quad (rs), 0, -1);
13679
    }
13680
  else
13681
    {
13682
      NEON_ENCODE (INTEGER, inst);
13683
      neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13684
    }
13685
}
13686
 
13687
static void
13688
do_neon_dyadic_if_su (void)
13689
{
13690
  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13691
}
13692
 
13693
static void
13694
do_neon_dyadic_if_su_d (void)
13695
{
13696
  /* This version only allow D registers, but that constraint is enforced during
13697
     operand parsing so we don't need to do anything extra here.  */
13698
  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13699
}
13700
 
13701
static void
13702
do_neon_dyadic_if_i_d (void)
13703
{
13704
  /* The "untyped" case can't happen. Do this to stop the "U" bit being
13705
     affected if we specify unsigned args.  */
13706
  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13707
}
13708
 
13709
enum vfp_or_neon_is_neon_bits
13710
{
13711
  NEON_CHECK_CC = 1,
13712
  NEON_CHECK_ARCH = 2
13713
};
13714
 
13715
/* Call this function if an instruction which may have belonged to the VFP or
13716
   Neon instruction sets, but turned out to be a Neon instruction (due to the
13717
   operand types involved, etc.). We have to check and/or fix-up a couple of
13718
   things:
13719
 
13720
     - Make sure the user hasn't attempted to make a Neon instruction
13721
       conditional.
13722
     - Alter the value in the condition code field if necessary.
13723
     - Make sure that the arch supports Neon instructions.
13724
 
13725
   Which of these operations take place depends on bits from enum
13726
   vfp_or_neon_is_neon_bits.
13727
 
13728
   WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13729
   current instruction's condition is COND_ALWAYS, the condition field is
13730
   changed to inst.uncond_value. This is necessary because instructions shared
13731
   between VFP and Neon may be conditional for the VFP variants only, and the
13732
   unconditional Neon version must have, e.g., 0xF in the condition field.  */
13733
 
13734
static int
13735
vfp_or_neon_is_neon (unsigned check)
13736
{
13737
  /* Conditions are always legal in Thumb mode (IT blocks).  */
13738
  if (!thumb_mode && (check & NEON_CHECK_CC))
13739
    {
13740
      if (inst.cond != COND_ALWAYS)
13741
        {
13742
          first_error (_(BAD_COND));
13743
          return FAIL;
13744
        }
13745
      if (inst.uncond_value != -1)
13746
        inst.instruction |= inst.uncond_value << 28;
13747
    }
13748
 
13749
  if ((check & NEON_CHECK_ARCH)
13750
      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13751
    {
13752
      first_error (_(BAD_FPU));
13753
      return FAIL;
13754
    }
13755
 
13756
  return SUCCESS;
13757
}
13758
 
13759
static void
13760
do_neon_addsub_if_i (void)
13761
{
13762
  if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13763
    return;
13764
 
13765
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13766
    return;
13767
 
13768
  /* The "untyped" case can't happen. Do this to stop the "U" bit being
13769
     affected if we specify unsigned args.  */
13770
  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13771
}
13772
 
13773
/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13774
   result to be:
13775
     V<op> A,B     (A is operand 0, B is operand 2)
13776
   to mean:
13777
     V<op> A,B,A
13778
   not:
13779
     V<op> A,B,B
13780
   so handle that case specially.  */
13781
 
13782
static void
13783
neon_exchange_operands (void)
13784
{
13785
  void *scratch = alloca (sizeof (inst.operands[0]));
13786
  if (inst.operands[1].present)
13787
    {
13788
      /* Swap operands[1] and operands[2].  */
13789
      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13790
      inst.operands[1] = inst.operands[2];
13791
      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13792
    }
13793
  else
13794
    {
13795
      inst.operands[1] = inst.operands[2];
13796
      inst.operands[2] = inst.operands[0];
13797
    }
13798
}
13799
 
13800
static void
13801
neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13802
{
13803
  if (inst.operands[2].isreg)
13804
    {
13805
      if (invert)
13806
        neon_exchange_operands ();
13807
      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13808
    }
13809
  else
13810
    {
13811
      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13812
      struct neon_type_el et = neon_check_type (2, rs,
13813
        N_EQK | N_SIZ, immtypes | N_KEY);
13814
 
13815
      NEON_ENCODE (IMMED, inst);
13816
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13817
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13818
      inst.instruction |= LOW4 (inst.operands[1].reg);
13819
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13820
      inst.instruction |= neon_quad (rs) << 6;
13821
      inst.instruction |= (et.type == NT_float) << 10;
13822
      inst.instruction |= neon_logbits (et.size) << 18;
13823
 
13824
      neon_dp_fixup (&inst);
13825
    }
13826
}
13827
 
13828
static void
13829
do_neon_cmp (void)
13830
{
13831
  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13832
}
13833
 
13834
static void
13835
do_neon_cmp_inv (void)
13836
{
13837
  neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13838
}
13839
 
13840
static void
13841
do_neon_ceq (void)
13842
{
13843
  neon_compare (N_IF_32, N_IF_32, FALSE);
13844
}
13845
 
13846
/* For multiply instructions, we have the possibility of 16-bit or 32-bit
13847
   scalars, which are encoded in 5 bits, M : Rm.
13848
   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13849
   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13850
   index in M.  */
13851
 
13852
static unsigned
13853
neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13854
{
13855
  unsigned regno = NEON_SCALAR_REG (scalar);
13856
  unsigned elno = NEON_SCALAR_INDEX (scalar);
13857
 
13858
  switch (elsize)
13859
    {
13860
    case 16:
13861
      if (regno > 7 || elno > 3)
13862
        goto bad_scalar;
13863
      return regno | (elno << 3);
13864
 
13865
    case 32:
13866
      if (regno > 15 || elno > 1)
13867
        goto bad_scalar;
13868
      return regno | (elno << 4);
13869
 
13870
    default:
13871
    bad_scalar:
13872
      first_error (_("scalar out of range for multiply instruction"));
13873
    }
13874
 
13875
  return 0;
13876
}
13877
 
13878
/* Encode multiply / multiply-accumulate scalar instructions.  */
13879
 
13880
static void
13881
neon_mul_mac (struct neon_type_el et, int ubit)
13882
{
13883
  unsigned scalar;
13884
 
13885
  /* Give a more helpful error message if we have an invalid type.  */
13886
  if (et.type == NT_invtype)
13887
    return;
13888
 
13889
  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13890
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13891
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13892
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13893
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13894
  inst.instruction |= LOW4 (scalar);
13895
  inst.instruction |= HI1 (scalar) << 5;
13896
  inst.instruction |= (et.type == NT_float) << 8;
13897
  inst.instruction |= neon_logbits (et.size) << 20;
13898
  inst.instruction |= (ubit != 0) << 24;
13899
 
13900
  neon_dp_fixup (&inst);
13901
}
13902
 
13903
static void
13904
do_neon_mac_maybe_scalar (void)
13905
{
13906
  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13907
    return;
13908
 
13909
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13910
    return;
13911
 
13912
  if (inst.operands[2].isscalar)
13913
    {
13914
      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13915
      struct neon_type_el et = neon_check_type (3, rs,
13916
        N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13917
      NEON_ENCODE (SCALAR, inst);
13918
      neon_mul_mac (et, neon_quad (rs));
13919
    }
13920
  else
13921
    {
13922
      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
13923
         affected if we specify unsigned args.  */
13924
      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13925
    }
13926
}
13927
 
13928
static void
13929
do_neon_fmac (void)
13930
{
13931
  if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13932
    return;
13933
 
13934
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13935
    return;
13936
 
13937
  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13938
}
13939
 
13940
static void
13941
do_neon_tst (void)
13942
{
13943
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13944
  struct neon_type_el et = neon_check_type (3, rs,
13945
    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13946
  neon_three_same (neon_quad (rs), 0, et.size);
13947
}
13948
 
13949
/* VMUL with 3 registers allows the P8 type. The scalar version supports the
13950
   same types as the MAC equivalents. The polynomial type for this instruction
13951
   is encoded the same as the integer type.  */
13952
 
13953
static void
13954
do_neon_mul (void)
13955
{
13956
  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13957
    return;
13958
 
13959
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13960
    return;
13961
 
13962
  if (inst.operands[2].isscalar)
13963
    do_neon_mac_maybe_scalar ();
13964
  else
13965
    neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13966
}
13967
 
13968
static void
13969
do_neon_qdmulh (void)
13970
{
13971
  if (inst.operands[2].isscalar)
13972
    {
13973
      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13974
      struct neon_type_el et = neon_check_type (3, rs,
13975
        N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13976
      NEON_ENCODE (SCALAR, inst);
13977
      neon_mul_mac (et, neon_quad (rs));
13978
    }
13979
  else
13980
    {
13981
      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13982
      struct neon_type_el et = neon_check_type (3, rs,
13983
        N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13984
      NEON_ENCODE (INTEGER, inst);
13985
      /* The U bit (rounding) comes from bit mask.  */
13986
      neon_three_same (neon_quad (rs), 0, et.size);
13987
    }
13988
}
13989
 
13990
static void
13991
do_neon_fcmp_absolute (void)
13992
{
13993
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13994
  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13995
  /* Size field comes from bit mask.  */
13996
  neon_three_same (neon_quad (rs), 1, -1);
13997
}
13998
 
13999
static void
14000
do_neon_fcmp_absolute_inv (void)
14001
{
14002
  neon_exchange_operands ();
14003
  do_neon_fcmp_absolute ();
14004
}
14005
 
14006
static void
14007
do_neon_step (void)
14008
{
14009
  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14010
  neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14011
  neon_three_same (neon_quad (rs), 0, -1);
14012
}
14013
 
14014
static void
14015
do_neon_abs_neg (void)
14016
{
14017
  enum neon_shape rs;
14018
  struct neon_type_el et;
14019
 
14020
  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14021
    return;
14022
 
14023
  if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14024
    return;
14025
 
14026
  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14027
  et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14028
 
14029
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14030
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14031
  inst.instruction |= LOW4 (inst.operands[1].reg);
14032
  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14033
  inst.instruction |= neon_quad (rs) << 6;
14034
  inst.instruction |= (et.type == NT_float) << 10;
14035
  inst.instruction |= neon_logbits (et.size) << 18;
14036
 
14037
  neon_dp_fixup (&inst);
14038
}
14039
 
14040
static void
14041
do_neon_sli (void)
14042
{
14043
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14044
  struct neon_type_el et = neon_check_type (2, rs,
14045
    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14046
  int imm = inst.operands[2].imm;
14047
  constraint (imm < 0 || (unsigned)imm >= et.size,
14048
              _("immediate out of range for insert"));
14049
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14050
}
14051
 
14052
static void
14053
do_neon_sri (void)
14054
{
14055
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14056
  struct neon_type_el et = neon_check_type (2, rs,
14057
    N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14058
  int imm = inst.operands[2].imm;
14059
  constraint (imm < 1 || (unsigned)imm > et.size,
14060
              _("immediate out of range for insert"));
14061
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14062
}
14063
 
14064
static void
14065
do_neon_qshlu_imm (void)
14066
{
14067
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14068
  struct neon_type_el et = neon_check_type (2, rs,
14069
    N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14070
  int imm = inst.operands[2].imm;
14071
  constraint (imm < 0 || (unsigned)imm >= et.size,
14072
              _("immediate out of range for shift"));
14073
  /* Only encodes the 'U present' variant of the instruction.
14074
     In this case, signed types have OP (bit 8) set to 0.
14075
     Unsigned types have OP set to 1.  */
14076
  inst.instruction |= (et.type == NT_unsigned) << 8;
14077
  /* The rest of the bits are the same as other immediate shifts.  */
14078
  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14079
}
14080
 
14081
static void
14082
do_neon_qmovn (void)
14083
{
14084
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14085
    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14086
  /* Saturating move where operands can be signed or unsigned, and the
14087
     destination has the same signedness.  */
14088
  NEON_ENCODE (INTEGER, inst);
14089
  if (et.type == NT_unsigned)
14090
    inst.instruction |= 0xc0;
14091
  else
14092
    inst.instruction |= 0x80;
14093
  neon_two_same (0, 1, et.size / 2);
14094
}
14095
 
14096
static void
14097
do_neon_qmovun (void)
14098
{
14099
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14100
    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14101
  /* Saturating move with unsigned results. Operands must be signed.  */
14102
  NEON_ENCODE (INTEGER, inst);
14103
  neon_two_same (0, 1, et.size / 2);
14104
}
14105
 
14106
static void
14107
do_neon_rshift_sat_narrow (void)
14108
{
14109
  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14110
     or unsigned. If operands are unsigned, results must also be unsigned.  */
14111
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14112
    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14113
  int imm = inst.operands[2].imm;
14114
  /* This gets the bounds check, size encoding and immediate bits calculation
14115
     right.  */
14116
  et.size /= 2;
14117
 
14118
  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14119
     VQMOVN.I<size> <Dd>, <Qm>.  */
14120
  if (imm == 0)
14121
    {
14122
      inst.operands[2].present = 0;
14123
      inst.instruction = N_MNEM_vqmovn;
14124
      do_neon_qmovn ();
14125
      return;
14126
    }
14127
 
14128
  constraint (imm < 1 || (unsigned)imm > et.size,
14129
              _("immediate out of range"));
14130
  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14131
}
14132
 
14133
static void
14134
do_neon_rshift_sat_narrow_u (void)
14135
{
14136
  /* FIXME: Types for narrowing. If operands are signed, results can be signed
14137
     or unsigned. If operands are unsigned, results must also be unsigned.  */
14138
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14139
    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14140
  int imm = inst.operands[2].imm;
14141
  /* This gets the bounds check, size encoding and immediate bits calculation
14142
     right.  */
14143
  et.size /= 2;
14144
 
14145
  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14146
     VQMOVUN.I<size> <Dd>, <Qm>.  */
14147
  if (imm == 0)
14148
    {
14149
      inst.operands[2].present = 0;
14150
      inst.instruction = N_MNEM_vqmovun;
14151
      do_neon_qmovun ();
14152
      return;
14153
    }
14154
 
14155
  constraint (imm < 1 || (unsigned)imm > et.size,
14156
              _("immediate out of range"));
14157
  /* FIXME: The manual is kind of unclear about what value U should have in
14158
     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14159
     must be 1.  */
14160
  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14161
}
14162
 
14163
static void
14164
do_neon_movn (void)
14165
{
14166
  struct neon_type_el et = neon_check_type (2, NS_DQ,
14167
    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14168
  NEON_ENCODE (INTEGER, inst);
14169
  neon_two_same (0, 1, et.size / 2);
14170
}
14171
 
14172
static void
14173
do_neon_rshift_narrow (void)
14174
{
14175
  struct neon_type_el et = neon_check_type (2, NS_DQI,
14176
    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14177
  int imm = inst.operands[2].imm;
14178
  /* This gets the bounds check, size encoding and immediate bits calculation
14179
     right.  */
14180
  et.size /= 2;
14181
 
14182
  /* If immediate is zero then we are a pseudo-instruction for
14183
     VMOVN.I<size> <Dd>, <Qm>  */
14184
  if (imm == 0)
14185
    {
14186
      inst.operands[2].present = 0;
14187
      inst.instruction = N_MNEM_vmovn;
14188
      do_neon_movn ();
14189
      return;
14190
    }
14191
 
14192
  constraint (imm < 1 || (unsigned)imm > et.size,
14193
              _("immediate out of range for narrowing operation"));
14194
  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14195
}
14196
 
14197
static void
14198
do_neon_shll (void)
14199
{
14200
  /* FIXME: Type checking when lengthening.  */
14201
  struct neon_type_el et = neon_check_type (2, NS_QDI,
14202
    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14203
  unsigned imm = inst.operands[2].imm;
14204
 
14205
  if (imm == et.size)
14206
    {
14207
      /* Maximum shift variant.  */
14208
      NEON_ENCODE (INTEGER, inst);
14209
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14210
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14211
      inst.instruction |= LOW4 (inst.operands[1].reg);
14212
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14213
      inst.instruction |= neon_logbits (et.size) << 18;
14214
 
14215
      neon_dp_fixup (&inst);
14216
    }
14217
  else
14218
    {
14219
      /* A more-specific type check for non-max versions.  */
14220
      et = neon_check_type (2, NS_QDI,
14221
        N_EQK | N_DBL, N_SU_32 | N_KEY);
14222
      NEON_ENCODE (IMMED, inst);
14223
      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14224
    }
14225
}
14226
 
14227
/* Check the various types for the VCVT instruction, and return which version
14228
   the current instruction is.  */
14229
 
14230
static int
14231
neon_cvt_flavour (enum neon_shape rs)
14232
{
14233
#define CVT_VAR(C,X,Y)                                                  \
14234
  et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y));       \
14235
  if (et.type != NT_invtype)                                            \
14236
    {                                                                   \
14237
      inst.error = NULL;                                                \
14238
      return (C);                                                       \
14239
    }
14240
  struct neon_type_el et;
14241
  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14242
                        || rs == NS_FF) ? N_VFP : 0;
14243
  /* The instruction versions which take an immediate take one register
14244
     argument, which is extended to the width of the full register. Thus the
14245
     "source" and "destination" registers must have the same width.  Hack that
14246
     here by making the size equal to the key (wider, in this case) operand.  */
14247
  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14248
 
14249
  CVT_VAR (0, N_S32, N_F32);
14250
  CVT_VAR (1, N_U32, N_F32);
14251
  CVT_VAR (2, N_F32, N_S32);
14252
  CVT_VAR (3, N_F32, N_U32);
14253
  /* Half-precision conversions.  */
14254
  CVT_VAR (4, N_F32, N_F16);
14255
  CVT_VAR (5, N_F16, N_F32);
14256
 
14257
  whole_reg = N_VFP;
14258
 
14259
  /* VFP instructions.  */
14260
  CVT_VAR (6, N_F32, N_F64);
14261
  CVT_VAR (7, N_F64, N_F32);
14262
  CVT_VAR (8, N_S32, N_F64 | key);
14263
  CVT_VAR (9, N_U32, N_F64 | key);
14264
  CVT_VAR (10, N_F64 | key, N_S32);
14265
  CVT_VAR (11, N_F64 | key, N_U32);
14266
  /* VFP instructions with bitshift.  */
14267
  CVT_VAR (12, N_F32 | key, N_S16);
14268
  CVT_VAR (13, N_F32 | key, N_U16);
14269
  CVT_VAR (14, N_F64 | key, N_S16);
14270
  CVT_VAR (15, N_F64 | key, N_U16);
14271
  CVT_VAR (16, N_S16, N_F32 | key);
14272
  CVT_VAR (17, N_U16, N_F32 | key);
14273
  CVT_VAR (18, N_S16, N_F64 | key);
14274
  CVT_VAR (19, N_U16, N_F64 | key);
14275
 
14276
  return -1;
14277
#undef CVT_VAR
14278
}
14279
 
14280
/* Neon-syntax VFP conversions.  */
14281
 
14282
static void
14283
do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
14284
{
14285
  const char *opname = 0;
14286
 
14287
  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14288
    {
14289
      /* Conversions with immediate bitshift.  */
14290
      const char *enc[] =
14291
        {
14292
          "ftosls",
14293
          "ftouls",
14294
          "fsltos",
14295
          "fultos",
14296
          NULL,
14297
          NULL,
14298
          NULL,
14299
          NULL,
14300
          "ftosld",
14301
          "ftould",
14302
          "fsltod",
14303
          "fultod",
14304
          "fshtos",
14305
          "fuhtos",
14306
          "fshtod",
14307
          "fuhtod",
14308
          "ftoshs",
14309
          "ftouhs",
14310
          "ftoshd",
14311
          "ftouhd"
14312
        };
14313
 
14314
      if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14315
        {
14316
          opname = enc[flavour];
14317
          constraint (inst.operands[0].reg != inst.operands[1].reg,
14318
                      _("operands 0 and 1 must be the same register"));
14319
          inst.operands[1] = inst.operands[2];
14320
          memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14321
        }
14322
    }
14323
  else
14324
    {
14325
      /* Conversions without bitshift.  */
14326
      const char *enc[] =
14327
        {
14328
          "ftosis",
14329
          "ftouis",
14330
          "fsitos",
14331
          "fuitos",
14332
          "NULL",
14333
          "NULL",
14334
          "fcvtsd",
14335
          "fcvtds",
14336
          "ftosid",
14337
          "ftouid",
14338
          "fsitod",
14339
          "fuitod"
14340
        };
14341
 
14342
      if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14343
        opname = enc[flavour];
14344
    }
14345
 
14346
  if (opname)
14347
    do_vfp_nsyn_opcode (opname);
14348
}
14349
 
14350
static void
14351
do_vfp_nsyn_cvtz (void)
14352
{
14353
  enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14354
  int flavour = neon_cvt_flavour (rs);
14355
  const char *enc[] =
14356
    {
14357
      "ftosizs",
14358
      "ftouizs",
14359
      NULL,
14360
      NULL,
14361
      NULL,
14362
      NULL,
14363
      NULL,
14364
      NULL,
14365
      "ftosizd",
14366
      "ftouizd"
14367
    };
14368
 
14369
  if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14370
    do_vfp_nsyn_opcode (enc[flavour]);
14371
}
14372
 
14373
static void
14374
do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
14375
{
14376
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14377
    NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14378
  int flavour = neon_cvt_flavour (rs);
14379
 
14380
  /* PR11109: Handle round-to-zero for VCVT conversions.  */
14381
  if (round_to_zero
14382
      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14383
      && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
14384
      && (rs == NS_FD || rs == NS_FF))
14385
    {
14386
      do_vfp_nsyn_cvtz ();
14387
      return;
14388
    }
14389
 
14390
  /* VFP rather than Neon conversions.  */
14391
  if (flavour >= 6)
14392
    {
14393
      do_vfp_nsyn_cvt (rs, flavour);
14394
      return;
14395
    }
14396
 
14397
  switch (rs)
14398
    {
14399
    case NS_DDI:
14400
    case NS_QQI:
14401
      {
14402
        unsigned immbits;
14403
        unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14404
 
14405
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14406
          return;
14407
 
14408
        /* Fixed-point conversion with #0 immediate is encoded as an
14409
           integer conversion.  */
14410
        if (inst.operands[2].present && inst.operands[2].imm == 0)
14411
          goto int_encode;
14412
       immbits = 32 - inst.operands[2].imm;
14413
        NEON_ENCODE (IMMED, inst);
14414
        if (flavour != -1)
14415
          inst.instruction |= enctab[flavour];
14416
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14417
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14418
        inst.instruction |= LOW4 (inst.operands[1].reg);
14419
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14420
        inst.instruction |= neon_quad (rs) << 6;
14421
        inst.instruction |= 1 << 21;
14422
        inst.instruction |= immbits << 16;
14423
 
14424
        neon_dp_fixup (&inst);
14425
      }
14426
      break;
14427
 
14428
    case NS_DD:
14429
    case NS_QQ:
14430
    int_encode:
14431
      {
14432
        unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14433
 
14434
        NEON_ENCODE (INTEGER, inst);
14435
 
14436
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14437
          return;
14438
 
14439
        if (flavour != -1)
14440
          inst.instruction |= enctab[flavour];
14441
 
14442
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14443
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14444
        inst.instruction |= LOW4 (inst.operands[1].reg);
14445
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14446
        inst.instruction |= neon_quad (rs) << 6;
14447
        inst.instruction |= 2 << 18;
14448
 
14449
        neon_dp_fixup (&inst);
14450
      }
14451
    break;
14452
 
14453
    /* Half-precision conversions for Advanced SIMD -- neon.  */
14454
    case NS_QD:
14455
    case NS_DQ:
14456
 
14457
      if ((rs == NS_DQ)
14458
          && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14459
          {
14460
            as_bad (_("operand size must match register width"));
14461
            break;
14462
          }
14463
 
14464
      if ((rs == NS_QD)
14465
          && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14466
          {
14467
            as_bad (_("operand size must match register width"));
14468
            break;
14469
          }
14470
 
14471
      if (rs == NS_DQ)
14472
        inst.instruction = 0x3b60600;
14473
      else
14474
        inst.instruction = 0x3b60700;
14475
 
14476
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14477
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14478
      inst.instruction |= LOW4 (inst.operands[1].reg);
14479
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14480
      neon_dp_fixup (&inst);
14481
      break;
14482
 
14483
    default:
14484
      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
14485
      do_vfp_nsyn_cvt (rs, flavour);
14486
    }
14487
}
14488
 
14489
static void
14490
do_neon_cvtr (void)
14491
{
14492
  do_neon_cvt_1 (FALSE);
14493
}
14494
 
14495
static void
14496
do_neon_cvt (void)
14497
{
14498
  do_neon_cvt_1 (TRUE);
14499
}
14500
 
14501
static void
14502
do_neon_cvtb (void)
14503
{
14504
  inst.instruction = 0xeb20a40;
14505
 
14506
  /* The sizes are attached to the mnemonic.  */
14507
  if (inst.vectype.el[0].type != NT_invtype
14508
      && inst.vectype.el[0].size == 16)
14509
    inst.instruction |= 0x00010000;
14510
 
14511
  /* Programmer's syntax: the sizes are attached to the operands.  */
14512
  else if (inst.operands[0].vectype.type != NT_invtype
14513
           && inst.operands[0].vectype.size == 16)
14514
    inst.instruction |= 0x00010000;
14515
 
14516
  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14517
  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
14518
  do_vfp_cond_or_thumb ();
14519
}
14520
 
14521
 
14522
static void
14523
do_neon_cvtt (void)
14524
{
14525
  do_neon_cvtb ();
14526
  inst.instruction |= 0x80;
14527
}
14528
 
14529
static void
14530
neon_move_immediate (void)
14531
{
14532
  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14533
  struct neon_type_el et = neon_check_type (2, rs,
14534
    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14535
  unsigned immlo, immhi = 0, immbits;
14536
  int op, cmode, float_p;
14537
 
14538
  constraint (et.type == NT_invtype,
14539
              _("operand size must be specified for immediate VMOV"));
14540
 
14541
  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
14542
  op = (inst.instruction & (1 << 5)) != 0;
14543
 
14544
  immlo = inst.operands[1].imm;
14545
  if (inst.operands[1].regisimm)
14546
    immhi = inst.operands[1].reg;
14547
 
14548
  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14549
              _("immediate has bits set outside the operand size"));
14550
 
14551
  float_p = inst.operands[1].immisfloat;
14552
 
14553
  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14554
                                        et.size, et.type)) == FAIL)
14555
    {
14556
      /* Invert relevant bits only.  */
14557
      neon_invert_size (&immlo, &immhi, et.size);
14558
      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14559
         with one or the other; those cases are caught by
14560
         neon_cmode_for_move_imm.  */
14561
      op = !op;
14562
      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14563
                                            &op, et.size, et.type)) == FAIL)
14564
        {
14565
          first_error (_("immediate out of range"));
14566
          return;
14567
        }
14568
    }
14569
 
14570
  inst.instruction &= ~(1 << 5);
14571
  inst.instruction |= op << 5;
14572
 
14573
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14574
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14575
  inst.instruction |= neon_quad (rs) << 6;
14576
  inst.instruction |= cmode << 8;
14577
 
14578
  neon_write_immbits (immbits);
14579
}
14580
 
14581
static void
14582
do_neon_mvn (void)
14583
{
14584
  if (inst.operands[1].isreg)
14585
    {
14586
      enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14587
 
14588
      NEON_ENCODE (INTEGER, inst);
14589
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14590
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14591
      inst.instruction |= LOW4 (inst.operands[1].reg);
14592
      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14593
      inst.instruction |= neon_quad (rs) << 6;
14594
    }
14595
  else
14596
    {
14597
      NEON_ENCODE (IMMED, inst);
14598
      neon_move_immediate ();
14599
    }
14600
 
14601
  neon_dp_fixup (&inst);
14602
}
14603
 
14604
/* Encode instructions of form:
14605
 
14606
  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
14607
  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
14608
 
14609
static void
14610
neon_mixed_length (struct neon_type_el et, unsigned size)
14611
{
14612
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14613
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14614
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14615
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14616
  inst.instruction |= LOW4 (inst.operands[2].reg);
14617
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14618
  inst.instruction |= (et.type == NT_unsigned) << 24;
14619
  inst.instruction |= neon_logbits (size) << 20;
14620
 
14621
  neon_dp_fixup (&inst);
14622
}
14623
 
14624
static void
14625
do_neon_dyadic_long (void)
14626
{
14627
  /* FIXME: Type checking for lengthening op.  */
14628
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14629
    N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14630
  neon_mixed_length (et, et.size);
14631
}
14632
 
14633
static void
14634
do_neon_abal (void)
14635
{
14636
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14637
    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14638
  neon_mixed_length (et, et.size);
14639
}
14640
 
14641
static void
14642
neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14643
{
14644
  if (inst.operands[2].isscalar)
14645
    {
14646
      struct neon_type_el et = neon_check_type (3, NS_QDS,
14647
        N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14648
      NEON_ENCODE (SCALAR, inst);
14649
      neon_mul_mac (et, et.type == NT_unsigned);
14650
    }
14651
  else
14652
    {
14653
      struct neon_type_el et = neon_check_type (3, NS_QDD,
14654
        N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14655
      NEON_ENCODE (INTEGER, inst);
14656
      neon_mixed_length (et, et.size);
14657
    }
14658
}
14659
 
14660
static void
14661
do_neon_mac_maybe_scalar_long (void)
14662
{
14663
  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14664
}
14665
 
14666
static void
14667
do_neon_dyadic_wide (void)
14668
{
14669
  struct neon_type_el et = neon_check_type (3, NS_QQD,
14670
    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14671
  neon_mixed_length (et, et.size);
14672
}
14673
 
14674
static void
14675
do_neon_dyadic_narrow (void)
14676
{
14677
  struct neon_type_el et = neon_check_type (3, NS_QDD,
14678
    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14679
  /* Operand sign is unimportant, and the U bit is part of the opcode,
14680
     so force the operand type to integer.  */
14681
  et.type = NT_integer;
14682
  neon_mixed_length (et, et.size / 2);
14683
}
14684
 
14685
static void
14686
do_neon_mul_sat_scalar_long (void)
14687
{
14688
  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14689
}
14690
 
14691
static void
14692
do_neon_vmull (void)
14693
{
14694
  if (inst.operands[2].isscalar)
14695
    do_neon_mac_maybe_scalar_long ();
14696
  else
14697
    {
14698
      struct neon_type_el et = neon_check_type (3, NS_QDD,
14699
        N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14700
      if (et.type == NT_poly)
14701
        NEON_ENCODE (POLY, inst);
14702
      else
14703
        NEON_ENCODE (INTEGER, inst);
14704
      /* For polynomial encoding, size field must be 0b00 and the U bit must be
14705
         zero. Should be OK as-is.  */
14706
      neon_mixed_length (et, et.size);
14707
    }
14708
}
14709
 
14710
static void
14711
do_neon_ext (void)
14712
{
14713
  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14714
  struct neon_type_el et = neon_check_type (3, rs,
14715
    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14716
  unsigned imm = (inst.operands[3].imm * et.size) / 8;
14717
 
14718
  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14719
              _("shift out of range"));
14720
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14721
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14722
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14723
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14724
  inst.instruction |= LOW4 (inst.operands[2].reg);
14725
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14726
  inst.instruction |= neon_quad (rs) << 6;
14727
  inst.instruction |= imm << 8;
14728
 
14729
  neon_dp_fixup (&inst);
14730
}
14731
 
14732
static void
14733
do_neon_rev (void)
14734
{
14735
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14736
  struct neon_type_el et = neon_check_type (2, rs,
14737
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
14738
  unsigned op = (inst.instruction >> 7) & 3;
14739
  /* N (width of reversed regions) is encoded as part of the bitmask. We
14740
     extract it here to check the elements to be reversed are smaller.
14741
     Otherwise we'd get a reserved instruction.  */
14742
  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14743
  gas_assert (elsize != 0);
14744
  constraint (et.size >= elsize,
14745
              _("elements must be smaller than reversal region"));
14746
  neon_two_same (neon_quad (rs), 1, et.size);
14747
}
14748
 
14749
static void
14750
do_neon_dup (void)
14751
{
14752
  if (inst.operands[1].isscalar)
14753
    {
14754
      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14755
      struct neon_type_el et = neon_check_type (2, rs,
14756
        N_EQK, N_8 | N_16 | N_32 | N_KEY);
14757
      unsigned sizebits = et.size >> 3;
14758
      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14759
      int logsize = neon_logbits (et.size);
14760
      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14761
 
14762
      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14763
        return;
14764
 
14765
      NEON_ENCODE (SCALAR, inst);
14766
      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14767
      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14768
      inst.instruction |= LOW4 (dm);
14769
      inst.instruction |= HI1 (dm) << 5;
14770
      inst.instruction |= neon_quad (rs) << 6;
14771
      inst.instruction |= x << 17;
14772
      inst.instruction |= sizebits << 16;
14773
 
14774
      neon_dp_fixup (&inst);
14775
    }
14776
  else
14777
    {
14778
      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14779
      struct neon_type_el et = neon_check_type (2, rs,
14780
        N_8 | N_16 | N_32 | N_KEY, N_EQK);
14781
      /* Duplicate ARM register to lanes of vector.  */
14782
      NEON_ENCODE (ARMREG, inst);
14783
      switch (et.size)
14784
        {
14785
        case 8:  inst.instruction |= 0x400000; break;
14786
        case 16: inst.instruction |= 0x000020; break;
14787
        case 32: inst.instruction |= 0x000000; break;
14788
        default: break;
14789
        }
14790
      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14791
      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14792
      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14793
      inst.instruction |= neon_quad (rs) << 21;
14794
      /* The encoding for this instruction is identical for the ARM and Thumb
14795
         variants, except for the condition field.  */
14796
      do_vfp_cond_or_thumb ();
14797
    }
14798
}
14799
 
14800
/* VMOV has particularly many variations. It can be one of:
14801
     0. VMOV<c><q> <Qd>, <Qm>
14802
     1. VMOV<c><q> <Dd>, <Dm>
14803
   (Register operations, which are VORR with Rm = Rn.)
14804
     2. VMOV<c><q>.<dt> <Qd>, #<imm>
14805
     3. VMOV<c><q>.<dt> <Dd>, #<imm>
14806
   (Immediate loads.)
14807
     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14808
   (ARM register to scalar.)
14809
     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14810
   (Two ARM registers to vector.)
14811
     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14812
   (Scalar to ARM register.)
14813
     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14814
   (Vector to two ARM registers.)
14815
     8. VMOV.F32 <Sd>, <Sm>
14816
     9. VMOV.F64 <Dd>, <Dm>
14817
   (VFP register moves.)
14818
    10. VMOV.F32 <Sd>, #imm
14819
    11. VMOV.F64 <Dd>, #imm
14820
   (VFP float immediate load.)
14821
    12. VMOV <Rd>, <Sm>
14822
   (VFP single to ARM reg.)
14823
    13. VMOV <Sd>, <Rm>
14824
   (ARM reg to VFP single.)
14825
    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14826
   (Two ARM regs to two VFP singles.)
14827
    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14828
   (Two VFP singles to two ARM regs.)
14829
 
14830
   These cases can be disambiguated using neon_select_shape, except cases 1/9
14831
   and 3/11 which depend on the operand type too.
14832
 
14833
   All the encoded bits are hardcoded by this function.
14834
 
14835
   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14836
   Cases 5, 7 may be used with VFPv2 and above.
14837
 
14838
   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14839
   can specify a type where it doesn't make sense to, and is ignored).  */
14840
 
14841
static void
14842
do_neon_mov (void)
14843
{
14844
  enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14845
    NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14846
    NS_NULL);
14847
  struct neon_type_el et;
14848
  const char *ldconst = 0;
14849
 
14850
  switch (rs)
14851
    {
14852
    case NS_DD:  /* case 1/9.  */
14853
      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14854
      /* It is not an error here if no type is given.  */
14855
      inst.error = NULL;
14856
      if (et.type == NT_float && et.size == 64)
14857
        {
14858
          do_vfp_nsyn_opcode ("fcpyd");
14859
          break;
14860
        }
14861
      /* fall through.  */
14862
 
14863
    case NS_QQ:  /* case 0/1.  */
14864
      {
14865
        if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14866
          return;
14867
        /* The architecture manual I have doesn't explicitly state which
14868
           value the U bit should have for register->register moves, but
14869
           the equivalent VORR instruction has U = 0, so do that.  */
14870
        inst.instruction = 0x0200110;
14871
        inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14872
        inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14873
        inst.instruction |= LOW4 (inst.operands[1].reg);
14874
        inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14875
        inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14876
        inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14877
        inst.instruction |= neon_quad (rs) << 6;
14878
 
14879
        neon_dp_fixup (&inst);
14880
      }
14881
      break;
14882
 
14883
    case NS_DI:  /* case 3/11.  */
14884
      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14885
      inst.error = NULL;
14886
      if (et.type == NT_float && et.size == 64)
14887
        {
14888
          /* case 11 (fconstd).  */
14889
          ldconst = "fconstd";
14890
          goto encode_fconstd;
14891
        }
14892
      /* fall through.  */
14893
 
14894
    case NS_QI:  /* case 2/3.  */
14895
      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14896
        return;
14897
      inst.instruction = 0x0800010;
14898
      neon_move_immediate ();
14899
      neon_dp_fixup (&inst);
14900
      break;
14901
 
14902
    case NS_SR:  /* case 4.  */
14903
      {
14904
        unsigned bcdebits = 0;
14905
        int logsize;
14906
        unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14907
        unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14908
 
14909
        et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14910
        logsize = neon_logbits (et.size);
14911
 
14912
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14913
                    _(BAD_FPU));
14914
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14915
                    && et.size != 32, _(BAD_FPU));
14916
        constraint (et.type == NT_invtype, _("bad type for scalar"));
14917
        constraint (x >= 64 / et.size, _("scalar index out of range"));
14918
 
14919
        switch (et.size)
14920
          {
14921
          case 8:  bcdebits = 0x8; break;
14922
          case 16: bcdebits = 0x1; break;
14923
          case 32: bcdebits = 0x0; break;
14924
          default: ;
14925
          }
14926
 
14927
        bcdebits |= x << logsize;
14928
 
14929
        inst.instruction = 0xe000b10;
14930
        do_vfp_cond_or_thumb ();
14931
        inst.instruction |= LOW4 (dn) << 16;
14932
        inst.instruction |= HI1 (dn) << 7;
14933
        inst.instruction |= inst.operands[1].reg << 12;
14934
        inst.instruction |= (bcdebits & 3) << 5;
14935
        inst.instruction |= (bcdebits >> 2) << 21;
14936
      }
14937
      break;
14938
 
14939
    case NS_DRR:  /* case 5 (fmdrr).  */
14940
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14941
                  _(BAD_FPU));
14942
 
14943
      inst.instruction = 0xc400b10;
14944
      do_vfp_cond_or_thumb ();
14945
      inst.instruction |= LOW4 (inst.operands[0].reg);
14946
      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14947
      inst.instruction |= inst.operands[1].reg << 12;
14948
      inst.instruction |= inst.operands[2].reg << 16;
14949
      break;
14950
 
14951
    case NS_RS:  /* case 6.  */
14952
      {
14953
        unsigned logsize;
14954
        unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14955
        unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14956
        unsigned abcdebits = 0;
14957
 
14958
        et = neon_check_type (2, NS_NULL,
14959
                              N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14960
        logsize = neon_logbits (et.size);
14961
 
14962
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14963
                    _(BAD_FPU));
14964
        constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14965
                    && et.size != 32, _(BAD_FPU));
14966
        constraint (et.type == NT_invtype, _("bad type for scalar"));
14967
        constraint (x >= 64 / et.size, _("scalar index out of range"));
14968
 
14969
        switch (et.size)
14970
          {
14971
          case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14972
          case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14973
          case 32: abcdebits = 0x00; break;
14974
          default: ;
14975
          }
14976
 
14977
        abcdebits |= x << logsize;
14978
        inst.instruction = 0xe100b10;
14979
        do_vfp_cond_or_thumb ();
14980
        inst.instruction |= LOW4 (dn) << 16;
14981
        inst.instruction |= HI1 (dn) << 7;
14982
        inst.instruction |= inst.operands[0].reg << 12;
14983
        inst.instruction |= (abcdebits & 3) << 5;
14984
        inst.instruction |= (abcdebits >> 2) << 21;
14985
      }
14986
      break;
14987
 
14988
    case NS_RRD:  /* case 7 (fmrrd).  */
14989
      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14990
                  _(BAD_FPU));
14991
 
14992
      inst.instruction = 0xc500b10;
14993
      do_vfp_cond_or_thumb ();
14994
      inst.instruction |= inst.operands[0].reg << 12;
14995
      inst.instruction |= inst.operands[1].reg << 16;
14996
      inst.instruction |= LOW4 (inst.operands[2].reg);
14997
      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14998
      break;
14999
 
15000
    case NS_FF:  /* case 8 (fcpys).  */
15001
      do_vfp_nsyn_opcode ("fcpys");
15002
      break;
15003
 
15004
    case NS_FI:  /* case 10 (fconsts).  */
15005
      ldconst = "fconsts";
15006
      encode_fconstd:
15007
      if (is_quarter_float (inst.operands[1].imm))
15008
        {
15009
          inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15010
          do_vfp_nsyn_opcode (ldconst);
15011
        }
15012
      else
15013
        first_error (_("immediate out of range"));
15014
      break;
15015
 
15016
    case NS_RF:  /* case 12 (fmrs).  */
15017
      do_vfp_nsyn_opcode ("fmrs");
15018
      break;
15019
 
15020
    case NS_FR:  /* case 13 (fmsr).  */
15021
      do_vfp_nsyn_opcode ("fmsr");
15022
      break;
15023
 
15024
    /* The encoders for the fmrrs and fmsrr instructions expect three operands
15025
       (one of which is a list), but we have parsed four.  Do some fiddling to
15026
       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15027
       expect.  */
15028
    case NS_RRFF:  /* case 14 (fmrrs).  */
15029
      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15030
                  _("VFP registers must be adjacent"));
15031
      inst.operands[2].imm = 2;
15032
      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15033
      do_vfp_nsyn_opcode ("fmrrs");
15034
      break;
15035
 
15036
    case NS_FFRR:  /* case 15 (fmsrr).  */
15037
      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15038
                  _("VFP registers must be adjacent"));
15039
      inst.operands[1] = inst.operands[2];
15040
      inst.operands[2] = inst.operands[3];
15041
      inst.operands[0].imm = 2;
15042
      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15043
      do_vfp_nsyn_opcode ("fmsrr");
15044
      break;
15045
 
15046
    default:
15047
      abort ();
15048
    }
15049
}
15050
 
15051
static void
15052
do_neon_rshift_round_imm (void)
15053
{
15054
  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15055
  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15056
  int imm = inst.operands[2].imm;
15057
 
15058
  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
15059
  if (imm == 0)
15060
    {
15061
      inst.operands[2].present = 0;
15062
      do_neon_mov ();
15063
      return;
15064
    }
15065
 
15066
  constraint (imm < 1 || (unsigned)imm > et.size,
15067
              _("immediate out of range for shift"));
15068
  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15069
                  et.size - imm);
15070
}
15071
 
15072
static void
15073
do_neon_movl (void)
15074
{
15075
  struct neon_type_el et = neon_check_type (2, NS_QD,
15076
    N_EQK | N_DBL, N_SU_32 | N_KEY);
15077
  unsigned sizebits = et.size >> 3;
15078
  inst.instruction |= sizebits << 19;
15079
  neon_two_same (0, et.type == NT_unsigned, -1);
15080
}
15081
 
15082
static void
15083
do_neon_trn (void)
15084
{
15085
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15086
  struct neon_type_el et = neon_check_type (2, rs,
15087
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15088
  NEON_ENCODE (INTEGER, inst);
15089
  neon_two_same (neon_quad (rs), 1, et.size);
15090
}
15091
 
15092
static void
15093
do_neon_zip_uzp (void)
15094
{
15095
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15096
  struct neon_type_el et = neon_check_type (2, rs,
15097
    N_EQK, N_8 | N_16 | N_32 | N_KEY);
15098
  if (rs == NS_DD && et.size == 32)
15099
    {
15100
      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
15101
      inst.instruction = N_MNEM_vtrn;
15102
      do_neon_trn ();
15103
      return;
15104
    }
15105
  neon_two_same (neon_quad (rs), 1, et.size);
15106
}
15107
 
15108
static void
15109
do_neon_sat_abs_neg (void)
15110
{
15111
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15112
  struct neon_type_el et = neon_check_type (2, rs,
15113
    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15114
  neon_two_same (neon_quad (rs), 1, et.size);
15115
}
15116
 
15117
static void
15118
do_neon_pair_long (void)
15119
{
15120
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15121
  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15122
  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
15123
  inst.instruction |= (et.type == NT_unsigned) << 7;
15124
  neon_two_same (neon_quad (rs), 1, et.size);
15125
}
15126
 
15127
static void
15128
do_neon_recip_est (void)
15129
{
15130
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15131
  struct neon_type_el et = neon_check_type (2, rs,
15132
    N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15133
  inst.instruction |= (et.type == NT_float) << 8;
15134
  neon_two_same (neon_quad (rs), 1, et.size);
15135
}
15136
 
15137
static void
15138
do_neon_cls (void)
15139
{
15140
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15141
  struct neon_type_el et = neon_check_type (2, rs,
15142
    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15143
  neon_two_same (neon_quad (rs), 1, et.size);
15144
}
15145
 
15146
static void
15147
do_neon_clz (void)
15148
{
15149
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15150
  struct neon_type_el et = neon_check_type (2, rs,
15151
    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15152
  neon_two_same (neon_quad (rs), 1, et.size);
15153
}
15154
 
15155
static void
15156
do_neon_cnt (void)
15157
{
15158
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15159
  struct neon_type_el et = neon_check_type (2, rs,
15160
    N_EQK | N_INT, N_8 | N_KEY);
15161
  neon_two_same (neon_quad (rs), 1, et.size);
15162
}
15163
 
15164
static void
15165
do_neon_swp (void)
15166
{
15167
  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15168
  neon_two_same (neon_quad (rs), 1, -1);
15169
}
15170
 
15171
static void
15172
do_neon_tbl_tbx (void)
15173
{
15174
  unsigned listlenbits;
15175
  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15176
 
15177
  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15178
    {
15179
      first_error (_("bad list length for table lookup"));
15180
      return;
15181
    }
15182
 
15183
  listlenbits = inst.operands[1].imm - 1;
15184
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15185
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15186
  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15187
  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15188
  inst.instruction |= LOW4 (inst.operands[2].reg);
15189
  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15190
  inst.instruction |= listlenbits << 8;
15191
 
15192
  neon_dp_fixup (&inst);
15193
}
15194
 
15195
static void
15196
do_neon_ldm_stm (void)
15197
{
15198
  /* P, U and L bits are part of bitmask.  */
15199
  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15200
  unsigned offsetbits = inst.operands[1].imm * 2;
15201
 
15202
  if (inst.operands[1].issingle)
15203
    {
15204
      do_vfp_nsyn_ldm_stm (is_dbmode);
15205
      return;
15206
    }
15207
 
15208
  constraint (is_dbmode && !inst.operands[0].writeback,
15209
              _("writeback (!) must be used for VLDMDB and VSTMDB"));
15210
 
15211
  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15212
              _("register list must contain at least 1 and at most 16 "
15213
                "registers"));
15214
 
15215
  inst.instruction |= inst.operands[0].reg << 16;
15216
  inst.instruction |= inst.operands[0].writeback << 21;
15217
  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15218
  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15219
 
15220
  inst.instruction |= offsetbits;
15221
 
15222
  do_vfp_cond_or_thumb ();
15223
}
15224
 
15225
static void
15226
do_neon_ldr_str (void)
15227
{
15228
  int is_ldr = (inst.instruction & (1 << 20)) != 0;
15229
 
15230
  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15231
     And is UNPREDICTABLE in thumb mode.  */
15232
  if (!is_ldr
15233
      && inst.operands[1].reg == REG_PC
15234
      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15235
    {
15236
      if (!thumb_mode && warn_on_deprecated)
15237
        as_warn (_("Use of PC here is deprecated"));
15238
      else
15239
        inst.error = _("Use of PC here is UNPREDICTABLE");
15240
    }
15241
 
15242
  if (inst.operands[0].issingle)
15243
    {
15244
      if (is_ldr)
15245
        do_vfp_nsyn_opcode ("flds");
15246
      else
15247
        do_vfp_nsyn_opcode ("fsts");
15248
    }
15249
  else
15250
    {
15251
      if (is_ldr)
15252
        do_vfp_nsyn_opcode ("fldd");
15253
      else
15254
        do_vfp_nsyn_opcode ("fstd");
15255
    }
15256
}
15257
 
15258
/* "interleave" version also handles non-interleaving register VLD1/VST1
15259
   instructions.  */
15260
 
15261
static void
15262
do_neon_ld_st_interleave (void)
15263
{
15264
  struct neon_type_el et = neon_check_type (1, NS_NULL,
15265
                                            N_8 | N_16 | N_32 | N_64);
15266
  unsigned alignbits = 0;
15267
  unsigned idx;
15268
  /* The bits in this table go:
15269
     0: register stride of one (0) or two (1)
15270
     1,2: register list length, minus one (1, 2, 3, 4).
15271
     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15272
     We use -1 for invalid entries.  */
15273
  const int typetable[] =
15274
    {
15275
      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
15276
       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
15277
       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
15278
       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
15279
    };
15280
  int typebits;
15281
 
15282
  if (et.type == NT_invtype)
15283
    return;
15284
 
15285
  if (inst.operands[1].immisalign)
15286
    switch (inst.operands[1].imm >> 8)
15287
      {
15288
      case 64: alignbits = 1; break;
15289
      case 128:
15290
        if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15291
            && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15292
          goto bad_alignment;
15293
        alignbits = 2;
15294
        break;
15295
      case 256:
15296
        if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15297
          goto bad_alignment;
15298
        alignbits = 3;
15299
        break;
15300
      default:
15301
      bad_alignment:
15302
        first_error (_("bad alignment"));
15303
        return;
15304
      }
15305
 
15306
  inst.instruction |= alignbits << 4;
15307
  inst.instruction |= neon_logbits (et.size) << 6;
15308
 
15309
  /* Bits [4:6] of the immediate in a list specifier encode register stride
15310
     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15311
     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15312
     up the right value for "type" in a table based on this value and the given
15313
     list style, then stick it back.  */
15314
  idx = ((inst.operands[0].imm >> 4) & 7)
15315
        | (((inst.instruction >> 8) & 3) << 3);
15316
 
15317
  typebits = typetable[idx];
15318
 
15319
  constraint (typebits == -1, _("bad list type for instruction"));
15320
 
15321
  inst.instruction &= ~0xf00;
15322
  inst.instruction |= typebits << 8;
15323
}
15324
 
15325
/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15326
   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15327
   otherwise. The variable arguments are a list of pairs of legal (size, align)
15328
   values, terminated with -1.  */
15329
 
15330
static int
15331
neon_alignment_bit (int size, int align, int *do_align, ...)
15332
{
15333
  va_list ap;
15334
  int result = FAIL, thissize, thisalign;
15335
 
15336
  if (!inst.operands[1].immisalign)
15337
    {
15338
      *do_align = 0;
15339
      return SUCCESS;
15340
    }
15341
 
15342
  va_start (ap, do_align);
15343
 
15344
  do
15345
    {
15346
      thissize = va_arg (ap, int);
15347
      if (thissize == -1)
15348
        break;
15349
      thisalign = va_arg (ap, int);
15350
 
15351
      if (size == thissize && align == thisalign)
15352
        result = SUCCESS;
15353
    }
15354
  while (result != SUCCESS);
15355
 
15356
  va_end (ap);
15357
 
15358
  if (result == SUCCESS)
15359
    *do_align = 1;
15360
  else
15361
    first_error (_("unsupported alignment for instruction"));
15362
 
15363
  return result;
15364
}
15365
 
15366
static void
15367
do_neon_ld_st_lane (void)
15368
{
15369
  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15370
  int align_good, do_align = 0;
15371
  int logsize = neon_logbits (et.size);
15372
  int align = inst.operands[1].imm >> 8;
15373
  int n = (inst.instruction >> 8) & 3;
15374
  int max_el = 64 / et.size;
15375
 
15376
  if (et.type == NT_invtype)
15377
    return;
15378
 
15379
  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15380
              _("bad list length"));
15381
  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15382
              _("scalar index out of range"));
15383
  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15384
              && et.size == 8,
15385
              _("stride of 2 unavailable when element size is 8"));
15386
 
15387
  switch (n)
15388
    {
15389
    case 0:  /* VLD1 / VST1.  */
15390
      align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15391
                                       32, 32, -1);
15392
      if (align_good == FAIL)
15393
        return;
15394
      if (do_align)
15395
        {
15396
          unsigned alignbits = 0;
15397
          switch (et.size)
15398
            {
15399
            case 16: alignbits = 0x1; break;
15400
            case 32: alignbits = 0x3; break;
15401
            default: ;
15402
            }
15403
          inst.instruction |= alignbits << 4;
15404
        }
15405
      break;
15406
 
15407
    case 1:  /* VLD2 / VST2.  */
15408
      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15409
                                       32, 64, -1);
15410
      if (align_good == FAIL)
15411
        return;
15412
      if (do_align)
15413
        inst.instruction |= 1 << 4;
15414
      break;
15415
 
15416
    case 2:  /* VLD3 / VST3.  */
15417
      constraint (inst.operands[1].immisalign,
15418
                  _("can't use alignment with this instruction"));
15419
      break;
15420
 
15421
    case 3:  /* VLD4 / VST4.  */
15422
      align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15423
                                       16, 64, 32, 64, 32, 128, -1);
15424
      if (align_good == FAIL)
15425
        return;
15426
      if (do_align)
15427
        {
15428
          unsigned alignbits = 0;
15429
          switch (et.size)
15430
            {
15431
            case 8:  alignbits = 0x1; break;
15432
            case 16: alignbits = 0x1; break;
15433
            case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15434
            default: ;
15435
            }
15436
          inst.instruction |= alignbits << 4;
15437
        }
15438
      break;
15439
 
15440
    default: ;
15441
    }
15442
 
15443
  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
15444
  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15445
    inst.instruction |= 1 << (4 + logsize);
15446
 
15447
  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15448
  inst.instruction |= logsize << 10;
15449
}
15450
 
15451
/* Encode single n-element structure to all lanes VLD<n> instructions.  */
15452
 
15453
static void
15454
do_neon_ld_dup (void)
15455
{
15456
  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15457
  int align_good, do_align = 0;
15458
 
15459
  if (et.type == NT_invtype)
15460
    return;
15461
 
15462
  switch ((inst.instruction >> 8) & 3)
15463
    {
15464
    case 0:  /* VLD1.  */
15465
      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15466
      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15467
                                       &do_align, 16, 16, 32, 32, -1);
15468
      if (align_good == FAIL)
15469
        return;
15470
      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15471
        {
15472
        case 1: break;
15473
        case 2: inst.instruction |= 1 << 5; break;
15474
        default: first_error (_("bad list length")); return;
15475
        }
15476
      inst.instruction |= neon_logbits (et.size) << 6;
15477
      break;
15478
 
15479
    case 1:  /* VLD2.  */
15480
      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15481
                                       &do_align, 8, 16, 16, 32, 32, 64, -1);
15482
      if (align_good == FAIL)
15483
        return;
15484
      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15485
                  _("bad list length"));
15486
      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15487
        inst.instruction |= 1 << 5;
15488
      inst.instruction |= neon_logbits (et.size) << 6;
15489
      break;
15490
 
15491
    case 2:  /* VLD3.  */
15492
      constraint (inst.operands[1].immisalign,
15493
                  _("can't use alignment with this instruction"));
15494
      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15495
                  _("bad list length"));
15496
      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15497
        inst.instruction |= 1 << 5;
15498
      inst.instruction |= neon_logbits (et.size) << 6;
15499
      break;
15500
 
15501
    case 3:  /* VLD4.  */
15502
      {
15503
        int align = inst.operands[1].imm >> 8;
15504
        align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15505
                                         16, 64, 32, 64, 32, 128, -1);
15506
        if (align_good == FAIL)
15507
          return;
15508
        constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15509
                    _("bad list length"));
15510
        if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15511
          inst.instruction |= 1 << 5;
15512
        if (et.size == 32 && align == 128)
15513
          inst.instruction |= 0x3 << 6;
15514
        else
15515
          inst.instruction |= neon_logbits (et.size) << 6;
15516
      }
15517
      break;
15518
 
15519
    default: ;
15520
    }
15521
 
15522
  inst.instruction |= do_align << 4;
15523
}
15524
 
15525
/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15526
   apart from bits [11:4].  */
15527
 
15528
static void
15529
do_neon_ldx_stx (void)
15530
{
15531
  if (inst.operands[1].isreg)
15532
    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15533
 
15534
  switch (NEON_LANE (inst.operands[0].imm))
15535
    {
15536
    case NEON_INTERLEAVE_LANES:
15537
      NEON_ENCODE (INTERLV, inst);
15538
      do_neon_ld_st_interleave ();
15539
      break;
15540
 
15541
    case NEON_ALL_LANES:
15542
      NEON_ENCODE (DUP, inst);
15543
      do_neon_ld_dup ();
15544
      break;
15545
 
15546
    default:
15547
      NEON_ENCODE (LANE, inst);
15548
      do_neon_ld_st_lane ();
15549
    }
15550
 
15551
  /* L bit comes from bit mask.  */
15552
  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15553
  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15554
  inst.instruction |= inst.operands[1].reg << 16;
15555
 
15556
  if (inst.operands[1].postind)
15557
    {
15558
      int postreg = inst.operands[1].imm & 0xf;
15559
      constraint (!inst.operands[1].immisreg,
15560
                  _("post-index must be a register"));
15561
      constraint (postreg == 0xd || postreg == 0xf,
15562
                  _("bad register for post-index"));
15563
      inst.instruction |= postreg;
15564
    }
15565
  else if (inst.operands[1].writeback)
15566
    {
15567
      inst.instruction |= 0xd;
15568
    }
15569
  else
15570
    inst.instruction |= 0xf;
15571
 
15572
  if (thumb_mode)
15573
    inst.instruction |= 0xf9000000;
15574
  else
15575
    inst.instruction |= 0xf4000000;
15576
}
15577
 
15578
/* Overall per-instruction processing.  */
15579
 
15580
/* We need to be able to fix up arbitrary expressions in some statements.
15581
   This is so that we can handle symbols that are an arbitrary distance from
15582
   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15583
   which returns part of an address in a form which will be valid for
15584
   a data instruction.  We do this by pushing the expression into a symbol
15585
   in the expr_section, and creating a fix for that.  */
15586
 
15587
static void
15588
fix_new_arm (fragS *       frag,
15589
             int           where,
15590
             short int     size,
15591
             expressionS * exp,
15592
             int           pc_rel,
15593
             int           reloc)
15594
{
15595
  fixS *           new_fix;
15596
 
15597
  switch (exp->X_op)
15598
    {
15599
    case O_constant:
15600
      if (pc_rel)
15601
        {
15602
          /* Create an absolute valued symbol, so we have something to
15603
             refer to in the object file.  Unfortunately for us, gas's
15604
             generic expression parsing will already have folded out
15605
             any use of .set foo/.type foo %function that may have
15606
             been used to set type information of the target location,
15607
             that's being specified symbolically.  We have to presume
15608
             the user knows what they are doing.  */
15609
          char name[16 + 8];
15610
          symbolS *symbol;
15611
 
15612
          sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
15613
 
15614
          symbol = symbol_find_or_make (name);
15615
          S_SET_SEGMENT (symbol, absolute_section);
15616
          symbol_set_frag (symbol, &zero_address_frag);
15617
          S_SET_VALUE (symbol, exp->X_add_number);
15618
          exp->X_op = O_symbol;
15619
          exp->X_add_symbol = symbol;
15620
          exp->X_add_number = 0;
15621
        }
15622
      /* FALLTHROUGH */
15623
    case O_symbol:
15624
    case O_add:
15625
    case O_subtract:
15626
      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
15627
                             (enum bfd_reloc_code_real) reloc);
15628
      break;
15629
 
15630
    default:
15631
      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
15632
                                  pc_rel, (enum bfd_reloc_code_real) reloc);
15633
      break;
15634
    }
15635
 
15636
  /* Mark whether the fix is to a THUMB instruction, or an ARM
15637
     instruction.  */
15638
  new_fix->tc_fix_data = thumb_mode;
15639
}
15640
 
15641
/* Create a frg for an instruction requiring relaxation.  */
15642
static void
15643
output_relax_insn (void)
15644
{
15645
  char * to;
15646
  symbolS *sym;
15647
  int offset;
15648
 
15649
  /* The size of the instruction is unknown, so tie the debug info to the
15650
     start of the instruction.  */
15651
  dwarf2_emit_insn (0);
15652
 
15653
  switch (inst.reloc.exp.X_op)
15654
    {
15655
    case O_symbol:
15656
      sym = inst.reloc.exp.X_add_symbol;
15657
      offset = inst.reloc.exp.X_add_number;
15658
      break;
15659
    case O_constant:
15660
      sym = NULL;
15661
      offset = inst.reloc.exp.X_add_number;
15662
      break;
15663
    default:
15664
      sym = make_expr_symbol (&inst.reloc.exp);
15665
      offset = 0;
15666
      break;
15667
  }
15668
  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15669
                 inst.relax, sym, offset, NULL/*offset, opcode*/);
15670
  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15671
}
15672
 
15673
/* Write a 32-bit thumb instruction to buf.  */
15674
static void
15675
put_thumb32_insn (char * buf, unsigned long insn)
15676
{
15677
  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15678
  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15679
}
15680
 
15681
static void
15682
output_inst (const char * str)
15683
{
15684
  char * to = NULL;
15685
 
15686
  if (inst.error)
15687
    {
15688
      as_bad ("%s -- `%s'", inst.error, str);
15689
      return;
15690
    }
15691
  if (inst.relax)
15692
    {
15693
      output_relax_insn ();
15694
      return;
15695
    }
15696
  if (inst.size == 0)
15697
    return;
15698
 
15699
  to = frag_more (inst.size);
15700
  /* PR 9814: Record the thumb mode into the current frag so that we know
15701
     what type of NOP padding to use, if necessary.  We override any previous
15702
     setting so that if the mode has changed then the NOPS that we use will
15703
     match the encoding of the last instruction in the frag.  */
15704
  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15705
 
15706
  if (thumb_mode && (inst.size > THUMB_SIZE))
15707
    {
15708
      gas_assert (inst.size == (2 * THUMB_SIZE));
15709
      put_thumb32_insn (to, inst.instruction);
15710
    }
15711
  else if (inst.size > INSN_SIZE)
15712
    {
15713
      gas_assert (inst.size == (2 * INSN_SIZE));
15714
      md_number_to_chars (to, inst.instruction, INSN_SIZE);
15715
      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15716
    }
15717
  else
15718
    md_number_to_chars (to, inst.instruction, inst.size);
15719
 
15720
  if (inst.reloc.type != BFD_RELOC_UNUSED)
15721
    fix_new_arm (frag_now, to - frag_now->fr_literal,
15722
                 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15723
                 inst.reloc.type);
15724
 
15725
  dwarf2_emit_insn (inst.size);
15726
}
15727
 
15728
static char *
15729
output_it_inst (int cond, int mask, char * to)
15730
{
15731
  unsigned long instruction = 0xbf00;
15732
 
15733
  mask &= 0xf;
15734
  instruction |= mask;
15735
  instruction |= cond << 4;
15736
 
15737
  if (to == NULL)
15738
    {
15739
      to = frag_more (2);
15740
#ifdef OBJ_ELF
15741
      dwarf2_emit_insn (2);
15742
#endif
15743
    }
15744
 
15745
  md_number_to_chars (to, instruction, 2);
15746
 
15747
  return to;
15748
}
15749
 
15750
/* Tag values used in struct asm_opcode's tag field.  */
15751
enum opcode_tag
15752
{
15753
  OT_unconditional,     /* Instruction cannot be conditionalized.
15754
                           The ARM condition field is still 0xE.  */
15755
  OT_unconditionalF,    /* Instruction cannot be conditionalized
15756
                           and carries 0xF in its ARM condition field.  */
15757
  OT_csuffix,           /* Instruction takes a conditional suffix.  */
15758
  OT_csuffixF,          /* Some forms of the instruction take a conditional
15759
                           suffix, others place 0xF where the condition field
15760
                           would be.  */
15761
  OT_cinfix3,           /* Instruction takes a conditional infix,
15762
                           beginning at character index 3.  (In
15763
                           unified mode, it becomes a suffix.)  */
15764
  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
15765
                            tsts, cmps, cmns, and teqs. */
15766
  OT_cinfix3_legacy,    /* Legacy instruction takes a conditional infix at
15767
                           character index 3, even in unified mode.  Used for
15768
                           legacy instructions where suffix and infix forms
15769
                           may be ambiguous.  */
15770
  OT_csuf_or_in3,       /* Instruction takes either a conditional
15771
                           suffix or an infix at character index 3.  */
15772
  OT_odd_infix_unc,     /* This is the unconditional variant of an
15773
                           instruction that takes a conditional infix
15774
                           at an unusual position.  In unified mode,
15775
                           this variant will accept a suffix.  */
15776
  OT_odd_infix_0        /* Values greater than or equal to OT_odd_infix_0
15777
                           are the conditional variants of instructions that
15778
                           take conditional infixes in unusual positions.
15779
                           The infix appears at character index
15780
                           (tag - OT_odd_infix_0).  These are not accepted
15781
                           in unified mode.  */
15782
};
15783
 
15784
/* Subroutine of md_assemble, responsible for looking up the primary
15785
   opcode from the mnemonic the user wrote.  STR points to the
15786
   beginning of the mnemonic.
15787
 
15788
   This is not simply a hash table lookup, because of conditional
15789
   variants.  Most instructions have conditional variants, which are
15790
   expressed with a _conditional affix_ to the mnemonic.  If we were
15791
   to encode each conditional variant as a literal string in the opcode
15792
   table, it would have approximately 20,000 entries.
15793
 
15794
   Most mnemonics take this affix as a suffix, and in unified syntax,
15795
   'most' is upgraded to 'all'.  However, in the divided syntax, some
15796
   instructions take the affix as an infix, notably the s-variants of
15797
   the arithmetic instructions.  Of those instructions, all but six
15798
   have the infix appear after the third character of the mnemonic.
15799
 
15800
   Accordingly, the algorithm for looking up primary opcodes given
15801
   an identifier is:
15802
 
15803
   1. Look up the identifier in the opcode table.
15804
      If we find a match, go to step U.
15805
 
15806
   2. Look up the last two characters of the identifier in the
15807
      conditions table.  If we find a match, look up the first N-2
15808
      characters of the identifier in the opcode table.  If we
15809
      find a match, go to step CE.
15810
 
15811
   3. Look up the fourth and fifth characters of the identifier in
15812
      the conditions table.  If we find a match, extract those
15813
      characters from the identifier, and look up the remaining
15814
      characters in the opcode table.  If we find a match, go
15815
      to step CM.
15816
 
15817
   4. Fail.
15818
 
15819
   U. Examine the tag field of the opcode structure, in case this is
15820
      one of the six instructions with its conditional infix in an
15821
      unusual place.  If it is, the tag tells us where to find the
15822
      infix; look it up in the conditions table and set inst.cond
15823
      accordingly.  Otherwise, this is an unconditional instruction.
15824
      Again set inst.cond accordingly.  Return the opcode structure.
15825
 
15826
  CE. Examine the tag field to make sure this is an instruction that
15827
      should receive a conditional suffix.  If it is not, fail.
15828
      Otherwise, set inst.cond from the suffix we already looked up,
15829
      and return the opcode structure.
15830
 
15831
  CM. Examine the tag field to make sure this is an instruction that
15832
      should receive a conditional infix after the third character.
15833
      If it is not, fail.  Otherwise, undo the edits to the current
15834
      line of input and proceed as for case CE.  */
15835
 
15836
static const struct asm_opcode *
15837
opcode_lookup (char **str)
15838
{
15839
  char *end, *base;
15840
  char *affix;
15841
  const struct asm_opcode *opcode;
15842
  const struct asm_cond *cond;
15843
  char save[2];
15844
 
15845
  /* Scan up to the end of the mnemonic, which must end in white space,
15846
     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
15847
  for (base = end = *str; *end != '\0'; end++)
15848
    if (*end == ' ' || *end == '.')
15849
      break;
15850
 
15851
  if (end == base)
15852
    return NULL;
15853
 
15854
  /* Handle a possible width suffix and/or Neon type suffix.  */
15855
  if (end[0] == '.')
15856
    {
15857
      int offset = 2;
15858
 
15859
      /* The .w and .n suffixes are only valid if the unified syntax is in
15860
         use.  */
15861
      if (unified_syntax && end[1] == 'w')
15862
        inst.size_req = 4;
15863
      else if (unified_syntax && end[1] == 'n')
15864
        inst.size_req = 2;
15865
      else
15866
        offset = 0;
15867
 
15868
      inst.vectype.elems = 0;
15869
 
15870
      *str = end + offset;
15871
 
15872
      if (end[offset] == '.')
15873
        {
15874
          /* See if we have a Neon type suffix (possible in either unified or
15875
             non-unified ARM syntax mode).  */
15876
          if (parse_neon_type (&inst.vectype, str) == FAIL)
15877
            return NULL;
15878
        }
15879
      else if (end[offset] != '\0' && end[offset] != ' ')
15880
        return NULL;
15881
    }
15882
  else
15883
    *str = end;
15884
 
15885
  /* Look for unaffixed or special-case affixed mnemonic.  */
15886
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15887
                                                    end - base);
15888
  if (opcode)
15889
    {
15890
      /* step U */
15891
      if (opcode->tag < OT_odd_infix_0)
15892
        {
15893
          inst.cond = COND_ALWAYS;
15894
          return opcode;
15895
        }
15896
 
15897
      if (warn_on_deprecated && unified_syntax)
15898
        as_warn (_("conditional infixes are deprecated in unified syntax"));
15899
      affix = base + (opcode->tag - OT_odd_infix_0);
15900
      cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15901
      gas_assert (cond);
15902
 
15903
      inst.cond = cond->value;
15904
      return opcode;
15905
    }
15906
 
15907
  /* Cannot have a conditional suffix on a mnemonic of less than two
15908
     characters.  */
15909
  if (end - base < 3)
15910
    return NULL;
15911
 
15912
  /* Look for suffixed mnemonic.  */
15913
  affix = end - 2;
15914
  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15915
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15916
                                                    affix - base);
15917
  if (opcode && cond)
15918
    {
15919
      /* step CE */
15920
      switch (opcode->tag)
15921
        {
15922
        case OT_cinfix3_legacy:
15923
          /* Ignore conditional suffixes matched on infix only mnemonics.  */
15924
          break;
15925
 
15926
        case OT_cinfix3:
15927
        case OT_cinfix3_deprecated:
15928
        case OT_odd_infix_unc:
15929
          if (!unified_syntax)
15930
            return 0;
15931
          /* else fall through */
15932
 
15933
        case OT_csuffix:
15934
        case OT_csuffixF:
15935
        case OT_csuf_or_in3:
15936
          inst.cond = cond->value;
15937
          return opcode;
15938
 
15939
        case OT_unconditional:
15940
        case OT_unconditionalF:
15941
          if (thumb_mode)
15942
            inst.cond = cond->value;
15943
          else
15944
            {
15945
              /* Delayed diagnostic.  */
15946
              inst.error = BAD_COND;
15947
              inst.cond = COND_ALWAYS;
15948
            }
15949
          return opcode;
15950
 
15951
        default:
15952
          return NULL;
15953
        }
15954
    }
15955
 
15956
  /* Cannot have a usual-position infix on a mnemonic of less than
15957
     six characters (five would be a suffix).  */
15958
  if (end - base < 6)
15959
    return NULL;
15960
 
15961
  /* Look for infixed mnemonic in the usual position.  */
15962
  affix = base + 3;
15963
  cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15964
  if (!cond)
15965
    return NULL;
15966
 
15967
  memcpy (save, affix, 2);
15968
  memmove (affix, affix + 2, (end - affix) - 2);
15969
  opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15970
                                                    (end - base) - 2);
15971
  memmove (affix + 2, affix, (end - affix) - 2);
15972
  memcpy (affix, save, 2);
15973
 
15974
  if (opcode
15975
      && (opcode->tag == OT_cinfix3
15976
          || opcode->tag == OT_cinfix3_deprecated
15977
          || opcode->tag == OT_csuf_or_in3
15978
          || opcode->tag == OT_cinfix3_legacy))
15979
    {
15980
      /* Step CM.  */
15981
      if (warn_on_deprecated && unified_syntax
15982
          && (opcode->tag == OT_cinfix3
15983
              || opcode->tag == OT_cinfix3_deprecated))
15984
        as_warn (_("conditional infixes are deprecated in unified syntax"));
15985
 
15986
      inst.cond = cond->value;
15987
      return opcode;
15988
    }
15989
 
15990
  return NULL;
15991
}
15992
 
15993
/* This function generates an initial IT instruction, leaving its block
15994
   virtually open for the new instructions. Eventually,
15995
   the mask will be updated by now_it_add_mask () each time
15996
   a new instruction needs to be included in the IT block.
15997
   Finally, the block is closed with close_automatic_it_block ().
15998
   The block closure can be requested either from md_assemble (),
15999
   a tencode (), or due to a label hook.  */
16000
 
16001
static void
16002
new_automatic_it_block (int cond)
16003
{
16004
  now_it.state = AUTOMATIC_IT_BLOCK;
16005
  now_it.mask = 0x18;
16006
  now_it.cc = cond;
16007
  now_it.block_length = 1;
16008
  mapping_state (MAP_THUMB);
16009
  now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16010
}
16011
 
16012
/* Close an automatic IT block.
16013
   See comments in new_automatic_it_block ().  */
16014
 
16015
static void
16016
close_automatic_it_block (void)
16017
{
16018
  now_it.mask = 0x10;
16019
  now_it.block_length = 0;
16020
}
16021
 
16022
/* Update the mask of the current automatically-generated IT
16023
   instruction. See comments in new_automatic_it_block ().  */
16024
 
16025
static void
16026
now_it_add_mask (int cond)
16027
{
16028
#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
16029
#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
16030
                                              | ((bitvalue) << (nbit)))
16031
  const int resulting_bit = (cond & 1);
16032
 
16033
  now_it.mask &= 0xf;
16034
  now_it.mask = SET_BIT_VALUE (now_it.mask,
16035
                                   resulting_bit,
16036
                                  (5 - now_it.block_length));
16037
  now_it.mask = SET_BIT_VALUE (now_it.mask,
16038
                                   1,
16039
                                   ((5 - now_it.block_length) - 1) );
16040
  output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16041
 
16042
#undef CLEAR_BIT
16043
#undef SET_BIT_VALUE
16044
}
16045
 
16046
/* The IT blocks handling machinery is accessed through the these functions:
16047
     it_fsm_pre_encode ()               from md_assemble ()
16048
     set_it_insn_type ()                optional, from the tencode functions
16049
     set_it_insn_type_last ()           ditto
16050
     in_it_block ()                     ditto
16051
     it_fsm_post_encode ()              from md_assemble ()
16052
     force_automatic_it_block_close ()  from label habdling functions
16053
 
16054
   Rationale:
16055
     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16056
        initializing the IT insn type with a generic initial value depending
16057
        on the inst.condition.
16058
     2) During the tencode function, two things may happen:
16059
        a) The tencode function overrides the IT insn type by
16060
           calling either set_it_insn_type (type) or set_it_insn_type_last ().
16061
        b) The tencode function queries the IT block state by
16062
           calling in_it_block () (i.e. to determine narrow/not narrow mode).
16063
 
16064
        Both set_it_insn_type and in_it_block run the internal FSM state
16065
        handling function (handle_it_state), because: a) setting the IT insn
16066
        type may incur in an invalid state (exiting the function),
16067
        and b) querying the state requires the FSM to be updated.
16068
        Specifically we want to avoid creating an IT block for conditional
16069
        branches, so it_fsm_pre_encode is actually a guess and we can't
16070
        determine whether an IT block is required until the tencode () routine
16071
        has decided what type of instruction this actually it.
16072
        Because of this, if set_it_insn_type and in_it_block have to be used,
16073
        set_it_insn_type has to be called first.
16074
 
16075
        set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16076
        determines the insn IT type depending on the inst.cond code.
16077
        When a tencode () routine encodes an instruction that can be
16078
        either outside an IT block, or, in the case of being inside, has to be
16079
        the last one, set_it_insn_type_last () will determine the proper
16080
        IT instruction type based on the inst.cond code. Otherwise,
16081
        set_it_insn_type can be called for overriding that logic or
16082
        for covering other cases.
16083
 
16084
        Calling handle_it_state () may not transition the IT block state to
16085
        OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16086
        still queried. Instead, if the FSM determines that the state should
16087
        be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16088
        after the tencode () function: that's what it_fsm_post_encode () does.
16089
 
16090
        Since in_it_block () calls the state handling function to get an
16091
        updated state, an error may occur (due to invalid insns combination).
16092
        In that case, inst.error is set.
16093
        Therefore, inst.error has to be checked after the execution of
16094
        the tencode () routine.
16095
 
16096
     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16097
        any pending state change (if any) that didn't take place in
16098
        handle_it_state () as explained above.  */
16099
 
16100
static void
16101
it_fsm_pre_encode (void)
16102
{
16103
  if (inst.cond != COND_ALWAYS)
16104
    inst.it_insn_type = INSIDE_IT_INSN;
16105
  else
16106
    inst.it_insn_type = OUTSIDE_IT_INSN;
16107
 
16108
  now_it.state_handled = 0;
16109
}
16110
 
16111
/* IT state FSM handling function.  */
16112
 
16113
static int
16114
handle_it_state (void)
16115
{
16116
  now_it.state_handled = 1;
16117
 
16118
  switch (now_it.state)
16119
    {
16120
    case OUTSIDE_IT_BLOCK:
16121
      switch (inst.it_insn_type)
16122
        {
16123
        case OUTSIDE_IT_INSN:
16124
          break;
16125
 
16126
        case INSIDE_IT_INSN:
16127
        case INSIDE_IT_LAST_INSN:
16128
          if (thumb_mode == 0)
16129
            {
16130
              if (unified_syntax
16131
                  && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16132
                as_tsktsk (_("Warning: conditional outside an IT block"\
16133
                             " for Thumb."));
16134
            }
16135
          else
16136
            {
16137
              if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16138
                  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16139
                {
16140
                  /* Automatically generate the IT instruction.  */
16141
                  new_automatic_it_block (inst.cond);
16142
                  if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16143
                    close_automatic_it_block ();
16144
                }
16145
              else
16146
                {
16147
                  inst.error = BAD_OUT_IT;
16148
                  return FAIL;
16149
                }
16150
            }
16151
          break;
16152
 
16153
        case IF_INSIDE_IT_LAST_INSN:
16154
        case NEUTRAL_IT_INSN:
16155
          break;
16156
 
16157
        case IT_INSN:
16158
          now_it.state = MANUAL_IT_BLOCK;
16159
          now_it.block_length = 0;
16160
          break;
16161
        }
16162
      break;
16163
 
16164
    case AUTOMATIC_IT_BLOCK:
16165
      /* Three things may happen now:
16166
         a) We should increment current it block size;
16167
         b) We should close current it block (closing insn or 4 insns);
16168
         c) We should close current it block and start a new one (due
16169
         to incompatible conditions or
16170
         4 insns-length block reached).  */
16171
 
16172
      switch (inst.it_insn_type)
16173
        {
16174
        case OUTSIDE_IT_INSN:
16175
          /* The closure of the block shall happen immediatelly,
16176
             so any in_it_block () call reports the block as closed.  */
16177
          force_automatic_it_block_close ();
16178
          break;
16179
 
16180
        case INSIDE_IT_INSN:
16181
        case INSIDE_IT_LAST_INSN:
16182
        case IF_INSIDE_IT_LAST_INSN:
16183
          now_it.block_length++;
16184
 
16185
          if (now_it.block_length > 4
16186
              || !now_it_compatible (inst.cond))
16187
            {
16188
              force_automatic_it_block_close ();
16189
              if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16190
                new_automatic_it_block (inst.cond);
16191
            }
16192
          else
16193
            {
16194
              now_it_add_mask (inst.cond);
16195
            }
16196
 
16197
          if (now_it.state == AUTOMATIC_IT_BLOCK
16198
              && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16199
                  || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16200
            close_automatic_it_block ();
16201
          break;
16202
 
16203
        case NEUTRAL_IT_INSN:
16204
          now_it.block_length++;
16205
 
16206
          if (now_it.block_length > 4)
16207
            force_automatic_it_block_close ();
16208
          else
16209
            now_it_add_mask (now_it.cc & 1);
16210
          break;
16211
 
16212
        case IT_INSN:
16213
          close_automatic_it_block ();
16214
          now_it.state = MANUAL_IT_BLOCK;
16215
          break;
16216
        }
16217
      break;
16218
 
16219
    case MANUAL_IT_BLOCK:
16220
      {
16221
        /* Check conditional suffixes.  */
16222
        const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16223
        int is_last;
16224
        now_it.mask <<= 1;
16225
        now_it.mask &= 0x1f;
16226
        is_last = (now_it.mask == 0x10);
16227
 
16228
        switch (inst.it_insn_type)
16229
          {
16230
          case OUTSIDE_IT_INSN:
16231
            inst.error = BAD_NOT_IT;
16232
            return FAIL;
16233
 
16234
          case INSIDE_IT_INSN:
16235
            if (cond != inst.cond)
16236
              {
16237
                inst.error = BAD_IT_COND;
16238
                return FAIL;
16239
              }
16240
            break;
16241
 
16242
          case INSIDE_IT_LAST_INSN:
16243
          case IF_INSIDE_IT_LAST_INSN:
16244
            if (cond != inst.cond)
16245
              {
16246
                inst.error = BAD_IT_COND;
16247
                return FAIL;
16248
              }
16249
            if (!is_last)
16250
              {
16251
                inst.error = BAD_BRANCH;
16252
                return FAIL;
16253
              }
16254
            break;
16255
 
16256
          case NEUTRAL_IT_INSN:
16257
            /* The BKPT instruction is unconditional even in an IT block.  */
16258
            break;
16259
 
16260
          case IT_INSN:
16261
            inst.error = BAD_IT_IT;
16262
            return FAIL;
16263
          }
16264
      }
16265
      break;
16266
    }
16267
 
16268
  return SUCCESS;
16269
}
16270
 
16271
static void
16272
it_fsm_post_encode (void)
16273
{
16274
  int is_last;
16275
 
16276
  if (!now_it.state_handled)
16277
    handle_it_state ();
16278
 
16279
  is_last = (now_it.mask == 0x10);
16280
  if (is_last)
16281
    {
16282
      now_it.state = OUTSIDE_IT_BLOCK;
16283
      now_it.mask = 0;
16284
    }
16285
}
16286
 
16287
static void
16288
force_automatic_it_block_close (void)
16289
{
16290
  if (now_it.state == AUTOMATIC_IT_BLOCK)
16291
    {
16292
      close_automatic_it_block ();
16293
      now_it.state = OUTSIDE_IT_BLOCK;
16294
      now_it.mask = 0;
16295
    }
16296
}
16297
 
16298
static int
16299
in_it_block (void)
16300
{
16301
  if (!now_it.state_handled)
16302
    handle_it_state ();
16303
 
16304
  return now_it.state != OUTSIDE_IT_BLOCK;
16305
}
16306
 
16307
void
16308
md_assemble (char *str)
16309
{
16310
  char *p = str;
16311
  const struct asm_opcode * opcode;
16312
 
16313
  /* Align the previous label if needed.  */
16314
  if (last_label_seen != NULL)
16315
    {
16316
      symbol_set_frag (last_label_seen, frag_now);
16317
      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
16318
      S_SET_SEGMENT (last_label_seen, now_seg);
16319
    }
16320
 
16321
  memset (&inst, '\0', sizeof (inst));
16322
  inst.reloc.type = BFD_RELOC_UNUSED;
16323
 
16324
  opcode = opcode_lookup (&p);
16325
  if (!opcode)
16326
    {
16327
      /* It wasn't an instruction, but it might be a register alias of
16328
         the form alias .req reg, or a Neon .dn/.qn directive.  */
16329
      if (! create_register_alias (str, p)
16330
          && ! create_neon_reg_alias (str, p))
16331
        as_bad (_("bad instruction `%s'"), str);
16332
 
16333
      return;
16334
    }
16335
 
16336
  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
16337
    as_warn (_("s suffix on comparison instruction is deprecated"));
16338
 
16339
  /* The value which unconditional instructions should have in place of the
16340
     condition field.  */
16341
  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
16342
 
16343
  if (thumb_mode)
16344
    {
16345
      arm_feature_set variant;
16346
 
16347
      variant = cpu_variant;
16348
      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
16349
      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
16350
        ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
16351
      /* Check that this instruction is supported for this CPU.  */
16352
      if (!opcode->tvariant
16353
          || (thumb_mode == 1
16354
              && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
16355
        {
16356
          as_bad (_("selected processor does not support Thumb mode `%s'"), str);
16357
          return;
16358
        }
16359
      if (inst.cond != COND_ALWAYS && !unified_syntax
16360
          && opcode->tencode != do_t_branch)
16361
        {
16362
          as_bad (_("Thumb does not support conditional execution"));
16363
          return;
16364
        }
16365
 
16366
      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
16367
        {
16368
          if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
16369
              && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
16370
                   || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
16371
            {
16372
              /* Two things are addressed here.
16373
                 1) Implicit require narrow instructions on Thumb-1.
16374
                    This avoids relaxation accidentally introducing Thumb-2
16375
                     instructions.
16376
                 2) Reject wide instructions in non Thumb-2 cores.  */
16377
              if (inst.size_req == 0)
16378
                inst.size_req = 2;
16379
              else if (inst.size_req == 4)
16380
                {
16381
                  as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
16382
                  return;
16383
                }
16384
            }
16385
        }
16386
 
16387
      inst.instruction = opcode->tvalue;
16388
 
16389
      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
16390
        {
16391
          /* Prepare the it_insn_type for those encodings that don't set
16392
             it.  */
16393
          it_fsm_pre_encode ();
16394
 
16395
          opcode->tencode ();
16396
 
16397
          it_fsm_post_encode ();
16398
        }
16399
 
16400
      if (!(inst.error || inst.relax))
16401
        {
16402
          gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
16403
          inst.size = (inst.instruction > 0xffff ? 4 : 2);
16404
          if (inst.size_req && inst.size_req != inst.size)
16405
            {
16406
              as_bad (_("cannot honor width suffix -- `%s'"), str);
16407
              return;
16408
            }
16409
        }
16410
 
16411
      /* Something has gone badly wrong if we try to relax a fixed size
16412
         instruction.  */
16413
      gas_assert (inst.size_req == 0 || !inst.relax);
16414
 
16415
      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16416
                              *opcode->tvariant);
16417
      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
16418
         set those bits when Thumb-2 32-bit instructions are seen.  ie.
16419
         anything other than bl/blx and v6-M instructions.
16420
         This is overly pessimistic for relaxable instructions.  */
16421
      if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
16422
           || inst.relax)
16423
          && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
16424
               || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
16425
        ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16426
                                arm_ext_v6t2);
16427
 
16428
      check_neon_suffixes;
16429
 
16430
      if (!inst.error)
16431
        {
16432
          mapping_state (MAP_THUMB);
16433
        }
16434
    }
16435
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
16436
    {
16437
      bfd_boolean is_bx;
16438
 
16439
      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
16440
      is_bx = (opcode->aencode == do_bx);
16441
 
16442
      /* Check that this instruction is supported for this CPU.  */
16443
      if (!(is_bx && fix_v4bx)
16444
          && !(opcode->avariant &&
16445
               ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
16446
        {
16447
          as_bad (_("selected processor does not support ARM mode `%s'"), str);
16448
          return;
16449
        }
16450
      if (inst.size_req)
16451
        {
16452
          as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
16453
          return;
16454
        }
16455
 
16456
      inst.instruction = opcode->avalue;
16457
      if (opcode->tag == OT_unconditionalF)
16458
        inst.instruction |= 0xF << 28;
16459
      else
16460
        inst.instruction |= inst.cond << 28;
16461
      inst.size = INSN_SIZE;
16462
      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
16463
        {
16464
          it_fsm_pre_encode ();
16465
          opcode->aencode ();
16466
          it_fsm_post_encode ();
16467
        }
16468
      /* Arm mode bx is marked as both v4T and v5 because it's still required
16469
         on a hypothetical non-thumb v5 core.  */
16470
      if (is_bx)
16471
        ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
16472
      else
16473
        ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
16474
                                *opcode->avariant);
16475
 
16476
      check_neon_suffixes;
16477
 
16478
      if (!inst.error)
16479
        {
16480
          mapping_state (MAP_ARM);
16481
        }
16482
    }
16483
  else
16484
    {
16485
      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16486
                "-- `%s'"), str);
16487
      return;
16488
    }
16489
  output_inst (str);
16490
}
16491
 
16492
static void
16493
check_it_blocks_finished (void)
16494
{
16495
#ifdef OBJ_ELF
16496
  asection *sect;
16497
 
16498
  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
16499
    if (seg_info (sect)->tc_segment_info_data.current_it.state
16500
        == MANUAL_IT_BLOCK)
16501
      {
16502
        as_warn (_("section '%s' finished with an open IT block."),
16503
                 sect->name);
16504
      }
16505
#else
16506
  if (now_it.state == MANUAL_IT_BLOCK)
16507
    as_warn (_("file finished with an open IT block."));
16508
#endif
16509
}
16510
 
16511
/* Various frobbings of labels and their addresses.  */
16512
 
16513
void
16514
arm_start_line_hook (void)
16515
{
16516
  last_label_seen = NULL;
16517
}
16518
 
16519
void
16520
arm_frob_label (symbolS * sym)
16521
{
16522
  last_label_seen = sym;
16523
 
16524
  ARM_SET_THUMB (sym, thumb_mode);
16525
 
16526
#if defined OBJ_COFF || defined OBJ_ELF
16527
  ARM_SET_INTERWORK (sym, support_interwork);
16528
#endif
16529
 
16530
  force_automatic_it_block_close ();
16531
 
16532
  /* Note - do not allow local symbols (.Lxxx) to be labelled
16533
     as Thumb functions.  This is because these labels, whilst
16534
     they exist inside Thumb code, are not the entry points for
16535
     possible ARM->Thumb calls.  Also, these labels can be used
16536
     as part of a computed goto or switch statement.  eg gcc
16537
     can generate code that looks like this:
16538
 
16539
                ldr  r2, [pc, .Laaa]
16540
                lsl  r3, r3, #2
16541
                ldr  r2, [r3, r2]
16542
                mov  pc, r2
16543
 
16544
       .Lbbb:  .word .Lxxx
16545
       .Lccc:  .word .Lyyy
16546
       ..etc...
16547
       .Laaa:   .word Lbbb
16548
 
16549
     The first instruction loads the address of the jump table.
16550
     The second instruction converts a table index into a byte offset.
16551
     The third instruction gets the jump address out of the table.
16552
     The fourth instruction performs the jump.
16553
 
16554
     If the address stored at .Laaa is that of a symbol which has the
16555
     Thumb_Func bit set, then the linker will arrange for this address
16556
     to have the bottom bit set, which in turn would mean that the
16557
     address computation performed by the third instruction would end
16558
     up with the bottom bit set.  Since the ARM is capable of unaligned
16559
     word loads, the instruction would then load the incorrect address
16560
     out of the jump table, and chaos would ensue.  */
16561
  if (label_is_thumb_function_name
16562
      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
16563
      && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
16564
    {
16565
      /* When the address of a Thumb function is taken the bottom
16566
         bit of that address should be set.  This will allow
16567
         interworking between Arm and Thumb functions to work
16568
         correctly.  */
16569
 
16570
      THUMB_SET_FUNC (sym, 1);
16571
 
16572
      label_is_thumb_function_name = FALSE;
16573
    }
16574
 
16575
  dwarf2_emit_label (sym);
16576
}
16577
 
16578
bfd_boolean
16579
arm_data_in_code (void)
16580
{
16581
  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
16582
    {
16583
      *input_line_pointer = '/';
16584
      input_line_pointer += 5;
16585
      *input_line_pointer = 0;
16586
      return TRUE;
16587
    }
16588
 
16589
  return FALSE;
16590
}
16591
 
16592
char *
16593
arm_canonicalize_symbol_name (char * name)
16594
{
16595
  int len;
16596
 
16597
  if (thumb_mode && (len = strlen (name)) > 5
16598
      && streq (name + len - 5, "/data"))
16599
    *(name + len - 5) = 0;
16600
 
16601
  return name;
16602
}
16603
 
16604
/* Table of all register names defined by default.  The user can
16605
   define additional names with .req.  Note that all register names
16606
   should appear in both upper and lowercase variants.  Some registers
16607
   also have mixed-case names.  */
16608
 
16609
#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16610
#define REGNUM(p,n,t) REGDEF(p##n, n, t)
16611
#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16612
#define REGSET(p,t) \
16613
  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16614
  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16615
  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16616
  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16617
#define REGSETH(p,t) \
16618
  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16619
  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16620
  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16621
  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16622
#define REGSET2(p,t) \
16623
  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16624
  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16625
  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16626
  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16627
#define SPLRBANK(base,bank,t) \
16628
  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
16629
  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
16630
  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
16631
  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
16632
  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
16633
  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
16634
 
16635
static const struct reg_entry reg_names[] =
16636
{
16637
  /* ARM integer registers.  */
16638
  REGSET(r, RN), REGSET(R, RN),
16639
 
16640
  /* ATPCS synonyms.  */
16641
  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
16642
  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
16643
  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
16644
 
16645
  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16646
  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16647
  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16648
 
16649
  /* Well-known aliases.  */
16650
  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16651
  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16652
 
16653
  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16654
  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16655
 
16656
  /* Coprocessor numbers.  */
16657
  REGSET(p, CP), REGSET(P, CP),
16658
 
16659
  /* Coprocessor register numbers.  The "cr" variants are for backward
16660
     compatibility.  */
16661
  REGSET(c,  CN), REGSET(C, CN),
16662
  REGSET(cr, CN), REGSET(CR, CN),
16663
 
16664
  /* ARM banked registers.  */
16665
  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
16666
  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
16667
  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
16668
  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
16669
  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
16670
  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
16671
  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
16672
 
16673
  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
16674
  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
16675
  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
16676
  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
16677
  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
16678
  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
16679
  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
16680
  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
16681
 
16682
  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
16683
  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
16684
  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
16685
  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
16686
  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
16687
  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
16688
  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
16689
  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
16690
  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
16691
 
16692
  /* FPA registers.  */
16693
  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16694
  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16695
 
16696
  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16697
  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16698
 
16699
  /* VFP SP registers.  */
16700
  REGSET(s,VFS),  REGSET(S,VFS),
16701
  REGSETH(s,VFS), REGSETH(S,VFS),
16702
 
16703
  /* VFP DP Registers.  */
16704
  REGSET(d,VFD),  REGSET(D,VFD),
16705
  /* Extra Neon DP registers.  */
16706
  REGSETH(d,VFD), REGSETH(D,VFD),
16707
 
16708
  /* Neon QP registers.  */
16709
  REGSET2(q,NQ),  REGSET2(Q,NQ),
16710
 
16711
  /* VFP control registers.  */
16712
  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16713
  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16714
  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16715
  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16716
  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16717
  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16718
 
16719
  /* Maverick DSP coprocessor registers.  */
16720
  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
16721
  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
16722
 
16723
  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16724
  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16725
  REGDEF(dspsc,0,DSPSC),
16726
 
16727
  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16728
  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16729
  REGDEF(DSPSC,0,DSPSC),
16730
 
16731
  /* iWMMXt data registers - p0, c0-15.  */
16732
  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16733
 
16734
  /* iWMMXt control registers - p1, c0-3.  */
16735
  REGDEF(wcid,  0,MMXWC),  REGDEF(wCID,   0,MMXWC),  REGDEF(WCID,  0,MMXWC),
16736
  REGDEF(wcon,  1,MMXWC),  REGDEF(wCon,  1,MMXWC),  REGDEF(WCON,  1,MMXWC),
16737
  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
16738
  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
16739
 
16740
  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
16741
  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
16742
  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
16743
  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
16744
  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
16745
 
16746
  /* XScale accumulator registers.  */
16747
  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
16748
};
16749
#undef REGDEF
16750
#undef REGNUM
16751
#undef REGSET
16752
 
16753
/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
16754
   within psr_required_here.  */
16755
static const struct asm_psr psrs[] =
16756
{
16757
  /* Backward compatibility notation.  Note that "all" is no longer
16758
     truly all possible PSR bits.  */
16759
  {"all",  PSR_c | PSR_f},
16760
  {"flg",  PSR_f},
16761
  {"ctl",  PSR_c},
16762
 
16763
  /* Individual flags.  */
16764
  {"f",    PSR_f},
16765
  {"c",    PSR_c},
16766
  {"x",    PSR_x},
16767
  {"s",    PSR_s},
16768
 
16769
  /* Combinations of flags.  */
16770
  {"fs",   PSR_f | PSR_s},
16771
  {"fx",   PSR_f | PSR_x},
16772
  {"fc",   PSR_f | PSR_c},
16773
  {"sf",   PSR_s | PSR_f},
16774
  {"sx",   PSR_s | PSR_x},
16775
  {"sc",   PSR_s | PSR_c},
16776
  {"xf",   PSR_x | PSR_f},
16777
  {"xs",   PSR_x | PSR_s},
16778
  {"xc",   PSR_x | PSR_c},
16779
  {"cf",   PSR_c | PSR_f},
16780
  {"cs",   PSR_c | PSR_s},
16781
  {"cx",   PSR_c | PSR_x},
16782
  {"fsx",  PSR_f | PSR_s | PSR_x},
16783
  {"fsc",  PSR_f | PSR_s | PSR_c},
16784
  {"fxs",  PSR_f | PSR_x | PSR_s},
16785
  {"fxc",  PSR_f | PSR_x | PSR_c},
16786
  {"fcs",  PSR_f | PSR_c | PSR_s},
16787
  {"fcx",  PSR_f | PSR_c | PSR_x},
16788
  {"sfx",  PSR_s | PSR_f | PSR_x},
16789
  {"sfc",  PSR_s | PSR_f | PSR_c},
16790
  {"sxf",  PSR_s | PSR_x | PSR_f},
16791
  {"sxc",  PSR_s | PSR_x | PSR_c},
16792
  {"scf",  PSR_s | PSR_c | PSR_f},
16793
  {"scx",  PSR_s | PSR_c | PSR_x},
16794
  {"xfs",  PSR_x | PSR_f | PSR_s},
16795
  {"xfc",  PSR_x | PSR_f | PSR_c},
16796
  {"xsf",  PSR_x | PSR_s | PSR_f},
16797
  {"xsc",  PSR_x | PSR_s | PSR_c},
16798
  {"xcf",  PSR_x | PSR_c | PSR_f},
16799
  {"xcs",  PSR_x | PSR_c | PSR_s},
16800
  {"cfs",  PSR_c | PSR_f | PSR_s},
16801
  {"cfx",  PSR_c | PSR_f | PSR_x},
16802
  {"csf",  PSR_c | PSR_s | PSR_f},
16803
  {"csx",  PSR_c | PSR_s | PSR_x},
16804
  {"cxf",  PSR_c | PSR_x | PSR_f},
16805
  {"cxs",  PSR_c | PSR_x | PSR_s},
16806
  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16807
  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16808
  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16809
  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16810
  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16811
  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16812
  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16813
  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16814
  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16815
  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16816
  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16817
  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16818
  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16819
  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16820
  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16821
  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16822
  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16823
  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16824
  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16825
  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16826
  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16827
  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16828
  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16829
  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16830
};
16831
 
16832
/* Table of V7M psr names.  */
16833
static const struct asm_psr v7m_psrs[] =
16834
{
16835
  {"apsr",        0 }, {"APSR",          0 },
16836
  {"iapsr",       1 }, {"IAPSR",        1 },
16837
  {"eapsr",       2 }, {"EAPSR",        2 },
16838
  {"psr",         3 }, {"PSR",          3 },
16839
  {"xpsr",        3 }, {"XPSR",         3 }, {"xPSR",     3 },
16840
  {"ipsr",        5 }, {"IPSR",         5 },
16841
  {"epsr",        6 }, {"EPSR",         6 },
16842
  {"iepsr",       7 }, {"IEPSR",        7 },
16843
  {"msp",         8 }, {"MSP",          8 },
16844
  {"psp",         9 }, {"PSP",          9 },
16845
  {"primask",     16}, {"PRIMASK",      16},
16846
  {"basepri",     17}, {"BASEPRI",      17},
16847
  {"basepri_max", 18}, {"BASEPRI_MAX",  18},
16848
  {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility.  */
16849
  {"faultmask",   19}, {"FAULTMASK",    19},
16850
  {"control",     20}, {"CONTROL",      20}
16851
};
16852
 
16853
/* Table of all shift-in-operand names.  */
16854
static const struct asm_shift_name shift_names [] =
16855
{
16856
  { "asl", SHIFT_LSL },  { "ASL", SHIFT_LSL },
16857
  { "lsl", SHIFT_LSL },  { "LSL", SHIFT_LSL },
16858
  { "lsr", SHIFT_LSR },  { "LSR", SHIFT_LSR },
16859
  { "asr", SHIFT_ASR },  { "ASR", SHIFT_ASR },
16860
  { "ror", SHIFT_ROR },  { "ROR", SHIFT_ROR },
16861
  { "rrx", SHIFT_RRX },  { "RRX", SHIFT_RRX }
16862
};
16863
 
16864
/* Table of all explicit relocation names.  */
16865
#ifdef OBJ_ELF
16866
static struct reloc_entry reloc_names[] =
16867
{
16868
  { "got",     BFD_RELOC_ARM_GOT32   },  { "GOT",     BFD_RELOC_ARM_GOT32   },
16869
  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },  { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
16870
  { "plt",     BFD_RELOC_ARM_PLT32   },  { "PLT",     BFD_RELOC_ARM_PLT32   },
16871
  { "target1", BFD_RELOC_ARM_TARGET1 },  { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16872
  { "target2", BFD_RELOC_ARM_TARGET2 },  { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16873
  { "sbrel",   BFD_RELOC_ARM_SBREL32 },  { "SBREL",   BFD_RELOC_ARM_SBREL32 },
16874
  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
16875
  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
16876
  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
16877
  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16878
  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
16879
  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
16880
  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
16881
        { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
16882
  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
16883
        { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
16884
  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
16885
        { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
16886
};
16887
#endif
16888
 
16889
/* Table of all conditional affixes.  0xF is not defined as a condition code.  */
16890
static const struct asm_cond conds[] =
16891
{
16892
  {"eq", 0x0},
16893
  {"ne", 0x1},
16894
  {"cs", 0x2}, {"hs", 0x2},
16895
  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16896
  {"mi", 0x4},
16897
  {"pl", 0x5},
16898
  {"vs", 0x6},
16899
  {"vc", 0x7},
16900
  {"hi", 0x8},
16901
  {"ls", 0x9},
16902
  {"ge", 0xa},
16903
  {"lt", 0xb},
16904
  {"gt", 0xc},
16905
  {"le", 0xd},
16906
  {"al", 0xe}
16907
};
16908
 
16909
static struct asm_barrier_opt barrier_opt_names[] =
16910
{
16911
  { "sy",    0xf }, { "SY",    0xf },
16912
  { "un",    0x7 }, { "UN",    0x7 },
16913
  { "st",    0xe }, { "ST",    0xe },
16914
  { "unst",  0x6 }, { "UNST",  0x6 },
16915
  { "ish",   0xb }, { "ISH",   0xb },
16916
  { "sh",    0xb }, { "SH",    0xb },
16917
  { "ishst", 0xa }, { "ISHST", 0xa },
16918
  { "shst",  0xa }, { "SHST",  0xa },
16919
  { "nsh",   0x7 }, { "NSH",   0x7 },
16920
  { "nshst", 0x6 }, { "NSHST", 0x6 },
16921
  { "osh",   0x3 }, { "OSH",   0x3 },
16922
  { "oshst", 0x2 }, { "OSHST", 0x2 }
16923
};
16924
 
16925
/* Table of ARM-format instructions.    */
16926
 
16927
/* Macros for gluing together operand strings.  N.B. In all cases
16928
   other than OPS0, the trailing OP_stop comes from default
16929
   zero-initialization of the unspecified elements of the array.  */
16930
#define OPS0()            { OP_stop, }
16931
#define OPS1(a)           { OP_##a, }
16932
#define OPS2(a,b)         { OP_##a,OP_##b, }
16933
#define OPS3(a,b,c)       { OP_##a,OP_##b,OP_##c, }
16934
#define OPS4(a,b,c,d)     { OP_##a,OP_##b,OP_##c,OP_##d, }
16935
#define OPS5(a,b,c,d,e)   { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16936
#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16937
 
16938
/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16939
   This is useful when mixing operands for ARM and THUMB, i.e. using the
16940
   MIX_ARM_THUMB_OPERANDS macro.
16941
   In order to use these macros, prefix the number of operands with _
16942
   e.g. _3.  */
16943
#define OPS_1(a)           { a, }
16944
#define OPS_2(a,b)         { a,b, }
16945
#define OPS_3(a,b,c)       { a,b,c, }
16946
#define OPS_4(a,b,c,d)     { a,b,c,d, }
16947
#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
16948
#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16949
 
16950
/* These macros abstract out the exact format of the mnemonic table and
16951
   save some repeated characters.  */
16952
 
16953
/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
16954
#define TxCE(mnem, op, top, nops, ops, ae, te) \
16955
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16956
    THUMB_VARIANT, do_##ae, do_##te }
16957
 
16958
/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16959
   a T_MNEM_xyz enumerator.  */
16960
#define TCE(mnem, aop, top, nops, ops, ae, te) \
16961
      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16962
#define tCE(mnem, aop, top, nops, ops, ae, te) \
16963
      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16964
 
16965
/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16966
   infix after the third character.  */
16967
#define TxC3(mnem, op, top, nops, ops, ae, te) \
16968
  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16969
    THUMB_VARIANT, do_##ae, do_##te }
16970
#define TxC3w(mnem, op, top, nops, ops, ae, te) \
16971
  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16972
    THUMB_VARIANT, do_##ae, do_##te }
16973
#define TC3(mnem, aop, top, nops, ops, ae, te) \
16974
      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16975
#define TC3w(mnem, aop, top, nops, ops, ae, te) \
16976
      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16977
#define tC3(mnem, aop, top, nops, ops, ae, te) \
16978
      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16979
#define tC3w(mnem, aop, top, nops, ops, ae, te) \
16980
      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16981
 
16982
/* Mnemonic with a conditional infix in an unusual place.  Each and every variant has to
16983
   appear in the condition table.  */
16984
#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te)   \
16985
  { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16986
    0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16987
 
16988
#define TxCM(m1, m2, op, top, nops, ops, ae, te)        \
16989
  TxCM_ (m1,   , m2, op, top, nops, ops, ae, te),       \
16990
  TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te),       \
16991
  TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te),       \
16992
  TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te),       \
16993
  TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te),       \
16994
  TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te),       \
16995
  TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te),       \
16996
  TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te),       \
16997
  TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te),       \
16998
  TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te),       \
16999
  TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te),       \
17000
  TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te),       \
17001
  TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te),       \
17002
  TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te),       \
17003
  TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te),       \
17004
  TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te),       \
17005
  TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te),       \
17006
  TxCM_ (m1, le, m2, op, top, nops, ops, ae, te),       \
17007
  TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
17008
 
17009
#define TCM(m1,m2, aop, top, nops, ops, ae, te)         \
17010
      TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
17011
#define tCM(m1,m2, aop, top, nops, ops, ae, te)         \
17012
      TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
17013
 
17014
/* Mnemonic that cannot be conditionalized.  The ARM condition-code
17015
   field is still 0xE.  Many of the Thumb variants can be executed
17016
   conditionally, so this is checked separately.  */
17017
#define TUE(mnem, op, top, nops, ops, ae, te)                           \
17018
  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17019
    THUMB_VARIANT, do_##ae, do_##te }
17020
 
17021
/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17022
   condition code field.  */
17023
#define TUF(mnem, op, top, nops, ops, ae, te)                           \
17024
  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17025
    THUMB_VARIANT, do_##ae, do_##te }
17026
 
17027
/* ARM-only variants of all the above.  */
17028
#define CE(mnem,  op, nops, ops, ae)    \
17029
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17030
 
17031
#define C3(mnem, op, nops, ops, ae)     \
17032
  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17033
 
17034
/* Legacy mnemonics that always have conditional infix after the third
17035
   character.  */
17036
#define CL(mnem, op, nops, ops, ae)     \
17037
  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17038
    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17039
 
17040
/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
17041
#define cCE(mnem,  op, nops, ops, ae)   \
17042
  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17043
 
17044
/* Legacy coprocessor instructions where conditional infix and conditional
17045
   suffix are ambiguous.  For consistency this includes all FPA instructions,
17046
   not just the potentially ambiguous ones.  */
17047
#define cCL(mnem, op, nops, ops, ae)    \
17048
  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17049
    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17050
 
17051
/* Coprocessor, takes either a suffix or a position-3 infix
17052
   (for an FPA corner case). */
17053
#define C3E(mnem, op, nops, ops, ae) \
17054
  { mnem, OPS##nops ops, OT_csuf_or_in3, \
17055
    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17056
 
17057
#define xCM_(m1, m2, m3, op, nops, ops, ae)     \
17058
  { m1 #m2 m3, OPS##nops ops, \
17059
    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17060
    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17061
 
17062
#define CM(m1, m2, op, nops, ops, ae)   \
17063
  xCM_ (m1,   , m2, op, nops, ops, ae), \
17064
  xCM_ (m1, eq, m2, op, nops, ops, ae), \
17065
  xCM_ (m1, ne, m2, op, nops, ops, ae), \
17066
  xCM_ (m1, cs, m2, op, nops, ops, ae), \
17067
  xCM_ (m1, hs, m2, op, nops, ops, ae), \
17068
  xCM_ (m1, cc, m2, op, nops, ops, ae), \
17069
  xCM_ (m1, ul, m2, op, nops, ops, ae), \
17070
  xCM_ (m1, lo, m2, op, nops, ops, ae), \
17071
  xCM_ (m1, mi, m2, op, nops, ops, ae), \
17072
  xCM_ (m1, pl, m2, op, nops, ops, ae), \
17073
  xCM_ (m1, vs, m2, op, nops, ops, ae), \
17074
  xCM_ (m1, vc, m2, op, nops, ops, ae), \
17075
  xCM_ (m1, hi, m2, op, nops, ops, ae), \
17076
  xCM_ (m1, ls, m2, op, nops, ops, ae), \
17077
  xCM_ (m1, ge, m2, op, nops, ops, ae), \
17078
  xCM_ (m1, lt, m2, op, nops, ops, ae), \
17079
  xCM_ (m1, gt, m2, op, nops, ops, ae), \
17080
  xCM_ (m1, le, m2, op, nops, ops, ae), \
17081
  xCM_ (m1, al, m2, op, nops, ops, ae)
17082
 
17083
#define UE(mnem, op, nops, ops, ae)     \
17084
  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17085
 
17086
#define UF(mnem, op, nops, ops, ae)     \
17087
  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17088
 
17089
/* Neon data-processing. ARM versions are unconditional with cond=0xf.
17090
   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17091
   use the same encoding function for each.  */
17092
#define NUF(mnem, op, nops, ops, enc)                                   \
17093
  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,            \
17094
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17095
 
17096
/* Neon data processing, version which indirects through neon_enc_tab for
17097
   the various overloaded versions of opcodes.  */
17098
#define nUF(mnem, op, nops, ops, enc)                                   \
17099
  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,    \
17100
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17101
 
17102
/* Neon insn with conditional suffix for the ARM version, non-overloaded
17103
   version.  */
17104
#define NCE_tag(mnem, op, nops, ops, enc, tag)                          \
17105
  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,             \
17106
    THUMB_VARIANT, do_##enc, do_##enc }
17107
 
17108
#define NCE(mnem, op, nops, ops, enc)                                   \
17109
   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17110
 
17111
#define NCEF(mnem, op, nops, ops, enc)                                  \
17112
    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17113
 
17114
/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
17115
#define nCE_tag(mnem, op, nops, ops, enc, tag)                          \
17116
  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,          \
17117
    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17118
 
17119
#define nCE(mnem, op, nops, ops, enc)                                   \
17120
   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17121
 
17122
#define nCEF(mnem, op, nops, ops, enc)                                  \
17123
    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17124
 
17125
#define do_0 0
17126
 
17127
static const struct asm_opcode insns[] =
17128
{
17129
#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions.  */
17130
#define THUMB_VARIANT &arm_ext_v4t
17131
 tCE("and",     0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
17132
 tC3("ands",    0100000, _ands,    3, (RR, oRR, SH), arit, t_arit3c),
17133
 tCE("eor",     0200000, _eor,     3, (RR, oRR, SH), arit, t_arit3c),
17134
 tC3("eors",    0300000, _eors,    3, (RR, oRR, SH), arit, t_arit3c),
17135
 tCE("sub",     0400000, _sub,     3, (RR, oRR, SH), arit, t_add_sub),
17136
 tC3("subs",    0500000, _subs,    3, (RR, oRR, SH), arit, t_add_sub),
17137
 tCE("add",     0800000, _add,     3, (RR, oRR, SHG), arit, t_add_sub),
17138
 tC3("adds",    0900000, _adds,    3, (RR, oRR, SHG), arit, t_add_sub),
17139
 tCE("adc",     0a00000, _adc,     3, (RR, oRR, SH), arit, t_arit3c),
17140
 tC3("adcs",    0b00000, _adcs,    3, (RR, oRR, SH), arit, t_arit3c),
17141
 tCE("sbc",     0c00000, _sbc,     3, (RR, oRR, SH), arit, t_arit3),
17142
 tC3("sbcs",    0d00000, _sbcs,    3, (RR, oRR, SH), arit, t_arit3),
17143
 tCE("orr",     1800000, _orr,     3, (RR, oRR, SH), arit, t_arit3c),
17144
 tC3("orrs",    1900000, _orrs,    3, (RR, oRR, SH), arit, t_arit3c),
17145
 tCE("bic",     1c00000, _bic,     3, (RR, oRR, SH), arit, t_arit3),
17146
 tC3("bics",    1d00000, _bics,    3, (RR, oRR, SH), arit, t_arit3),
17147
 
17148
 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17149
    for setting PSR flag bits.  They are obsolete in V6 and do not
17150
    have Thumb equivalents. */
17151
 tCE("tst",     1100000, _tst,     2, (RR, SH),      cmp,  t_mvn_tst),
17152
 tC3w("tsts",   1100000, _tst,     2, (RR, SH),      cmp,  t_mvn_tst),
17153
  CL("tstp",    110f000,           2, (RR, SH),      cmp),
17154
 tCE("cmp",     1500000, _cmp,     2, (RR, SH),      cmp,  t_mov_cmp),
17155
 tC3w("cmps",   1500000, _cmp,     2, (RR, SH),      cmp,  t_mov_cmp),
17156
  CL("cmpp",    150f000,           2, (RR, SH),      cmp),
17157
 tCE("cmn",     1700000, _cmn,     2, (RR, SH),      cmp,  t_mvn_tst),
17158
 tC3w("cmns",   1700000, _cmn,     2, (RR, SH),      cmp,  t_mvn_tst),
17159
  CL("cmnp",    170f000,           2, (RR, SH),      cmp),
17160
 
17161
 tCE("mov",     1a00000, _mov,     2, (RR, SH),      mov,  t_mov_cmp),
17162
 tC3("movs",    1b00000, _movs,    2, (RR, SH),      mov,  t_mov_cmp),
17163
 tCE("mvn",     1e00000, _mvn,     2, (RR, SH),      mov,  t_mvn_tst),
17164
 tC3("mvns",    1f00000, _mvns,    2, (RR, SH),      mov,  t_mvn_tst),
17165
 
17166
 tCE("ldr",     4100000, _ldr,     2, (RR, ADDRGLDR),ldst, t_ldst),
17167
 tC3("ldrb",    4500000, _ldrb,    2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17168
 tCE("str",     4000000, _str,     _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17169
                                                                OP_RRnpc),
17170
                                        OP_ADDRGLDR),ldst, t_ldst),
17171
 tC3("strb",    4400000, _strb,    2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17172
 
17173
 tCE("stm",     8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17174
 tC3("stmia",   8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17175
 tC3("stmea",   8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17176
 tCE("ldm",     8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17177
 tC3("ldmia",   8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17178
 tC3("ldmfd",   8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
17179
 
17180
 TCE("swi",     f000000, df00,     1, (EXPi),        swi, t_swi),
17181
 TCE("svc",     f000000, df00,     1, (EXPi),        swi, t_swi),
17182
 tCE("b",       a000000, _b,       1, (EXPr),        branch, t_branch),
17183
 TCE("bl",      b000000, f000f800, 1, (EXPr),        bl, t_branch23),
17184
 
17185
  /* Pseudo ops.  */
17186
 tCE("adr",     28f0000, _adr,     2, (RR, EXP),     adr,  t_adr),
17187
  C3(adrl,      28f0000,           2, (RR, EXP),     adrl),
17188
 tCE("nop",     1a00000, _nop,     1, (oI255c),      nop,  t_nop),
17189
 
17190
  /* Thumb-compatibility pseudo ops.  */
17191
 tCE("lsl",     1a00000, _lsl,     3, (RR, oRR, SH), shift, t_shift),
17192
 tC3("lsls",    1b00000, _lsls,    3, (RR, oRR, SH), shift, t_shift),
17193
 tCE("lsr",     1a00020, _lsr,     3, (RR, oRR, SH), shift, t_shift),
17194
 tC3("lsrs",    1b00020, _lsrs,    3, (RR, oRR, SH), shift, t_shift),
17195
 tCE("asr",     1a00040, _asr,     3, (RR, oRR, SH), shift, t_shift),
17196
 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
17197
 tCE("ror",     1a00060, _ror,     3, (RR, oRR, SH), shift, t_shift),
17198
 tC3("rors",    1b00060, _rors,    3, (RR, oRR, SH), shift, t_shift),
17199
 tCE("neg",     2600000, _neg,     2, (RR, RR),      rd_rn, t_neg),
17200
 tC3("negs",    2700000, _negs,    2, (RR, RR),      rd_rn, t_neg),
17201
 tCE("push",    92d0000, _push,     1, (REGLST),             push_pop, t_push_pop),
17202
 tCE("pop",     8bd0000, _pop,     1, (REGLST),      push_pop, t_push_pop),
17203
 
17204
 /* These may simplify to neg.  */
17205
 TCE("rsb",     0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
17206
 TC3("rsbs",    0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
17207
 
17208
#undef  THUMB_VARIANT
17209
#define THUMB_VARIANT  & arm_ext_v6
17210
 
17211
 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
17212
 
17213
 /* V1 instructions with no Thumb analogue prior to V6T2.  */
17214
#undef  THUMB_VARIANT
17215
#define THUMB_VARIANT  & arm_ext_v6t2
17216
 
17217
 TCE("teq",     1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
17218
 TC3w("teqs",   1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
17219
  CL("teqp",    130f000,           2, (RR, SH),      cmp),
17220
 
17221
 TC3("ldrt",    4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17222
 TC3("ldrbt",   4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17223
 TC3("strt",    4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
17224
 TC3("strbt",   4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17225
 
17226
 TC3("stmdb",   9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17227
 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17228
 
17229
 TC3("ldmdb",   9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17230
 TC3("ldmea",   9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17231
 
17232
 /* V1 instructions with no Thumb analogue at all.  */
17233
  CE("rsc",     0e00000,           3, (RR, oRR, SH), arit),
17234
  C3(rscs,      0f00000,           3, (RR, oRR, SH), arit),
17235
 
17236
  C3(stmib,     9800000,           2, (RRw, REGLST), ldmstm),
17237
  C3(stmfa,     9800000,           2, (RRw, REGLST), ldmstm),
17238
  C3(stmda,     8000000,           2, (RRw, REGLST), ldmstm),
17239
  C3(stmed,     8000000,           2, (RRw, REGLST), ldmstm),
17240
  C3(ldmib,     9900000,           2, (RRw, REGLST), ldmstm),
17241
  C3(ldmed,     9900000,           2, (RRw, REGLST), ldmstm),
17242
  C3(ldmda,     8100000,           2, (RRw, REGLST), ldmstm),
17243
  C3(ldmfa,     8100000,           2, (RRw, REGLST), ldmstm),
17244
 
17245
#undef  ARM_VARIANT
17246
#define ARM_VARIANT    & arm_ext_v2     /* ARM 2 - multiplies.  */
17247
#undef  THUMB_VARIANT
17248
#define THUMB_VARIANT  & arm_ext_v4t
17249
 
17250
 tCE("mul",     0000090, _mul,     3, (RRnpc, RRnpc, oRR), mul, t_mul),
17251
 tC3("muls",    0100090, _muls,    3, (RRnpc, RRnpc, oRR), mul, t_mul),
17252
 
17253
#undef  THUMB_VARIANT
17254
#define THUMB_VARIANT  & arm_ext_v6t2
17255
 
17256
 TCE("mla",     0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17257
  C3(mlas,      0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
17258
 
17259
  /* Generic coprocessor instructions.  */
17260
 TCE("cdp",     e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
17261
 TCE("ldc",     c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17262
 TC3("ldcl",    c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17263
 TCE("stc",     c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17264
 TC3("stcl",    c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17265
 TCE("mcr",     e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17266
 TCE("mrc",     e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
17267
 
17268
#undef  ARM_VARIANT
17269
#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
17270
 
17271
  CE("swp",     1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17272
  C3(swpb,      1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17273
 
17274
#undef  ARM_VARIANT
17275
#define ARM_VARIANT    & arm_ext_v3     /* ARM 6 Status register instructions.  */
17276
#undef  THUMB_VARIANT
17277
#define THUMB_VARIANT  & arm_ext_msr
17278
 
17279
 TCE("mrs",     1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
17280
 TCE("msr",     120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
17281
 
17282
#undef  ARM_VARIANT
17283
#define ARM_VARIANT    & arm_ext_v3m     /* ARM 7M long multiplies.  */
17284
#undef  THUMB_VARIANT
17285
#define THUMB_VARIANT  & arm_ext_v6t2
17286
 
17287
 TCE("smull",   0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17288
  CM("smull","s",       0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17289
 TCE("umull",   0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17290
  CM("umull","s",       0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17291
 TCE("smlal",   0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17292
  CM("smlal","s",       0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17293
 TCE("umlal",   0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17294
  CM("umlal","s",       0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17295
 
17296
#undef  ARM_VARIANT
17297
#define ARM_VARIANT    & arm_ext_v4     /* ARM Architecture 4.  */
17298
#undef  THUMB_VARIANT
17299
#define THUMB_VARIANT  & arm_ext_v4t
17300
 
17301
 tC3("ldrh",    01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17302
 tC3("strh",    00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17303
 tC3("ldrsh",   01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17304
 tC3("ldrsb",   01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17305
 tCM("ld","sh", 01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17306
 tCM("ld","sb", 01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17307
 
17308
#undef  ARM_VARIANT
17309
#define ARM_VARIANT  & arm_ext_v4t_5
17310
 
17311
  /* ARM Architecture 4T.  */
17312
  /* Note: bx (and blx) are required on V5, even if the processor does
17313
     not support Thumb.  */
17314
 TCE("bx",      12fff10, 4700, 1, (RR), bx, t_bx),
17315
 
17316
#undef  ARM_VARIANT
17317
#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.     */
17318
#undef  THUMB_VARIANT
17319
#define THUMB_VARIANT  & arm_ext_v5t
17320
 
17321
  /* Note: blx has 2 variants; the .value coded here is for
17322
     BLX(2).  Only this variant has conditional execution.  */
17323
 TCE("blx",     12fff30, 4780, 1, (RR_EXr),                         blx,  t_blx),
17324
 TUE("bkpt",    1200070, be00, 1, (oIffffb),                        bkpt, t_bkpt),
17325
 
17326
#undef  THUMB_VARIANT
17327
#define THUMB_VARIANT  & arm_ext_v6t2
17328
 
17329
 TCE("clz",     16f0f10, fab0f080, 2, (RRnpc, RRnpc),                   rd_rm,  t_clz),
17330
 TUF("ldc2",    c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17331
 TUF("ldc2l",   c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),                     lstc,   lstc),
17332
 TUF("stc2",    c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),             lstc,   lstc),
17333
 TUF("stc2l",   c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),                     lstc,   lstc),
17334
 TUF("cdp2",    e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
17335
 TUF("mcr2",    e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17336
 TUF("mrc2",    e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
17337
 
17338
#undef  ARM_VARIANT
17339
#define ARM_VARIANT  & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
17340
#undef THUMB_VARIANT
17341
#define THUMB_VARIANT &arm_ext_v5exp
17342
 
17343
 TCE("smlabb",  1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17344
 TCE("smlatb",  10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17345
 TCE("smlabt",  10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17346
 TCE("smlatt",  10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17347
 
17348
 TCE("smlawb",  1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17349
 TCE("smlawt",  12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
17350
 
17351
 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17352
 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17353
 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17354
 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
17355
 
17356
 TCE("smulbb",  1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17357
 TCE("smultb",  16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17358
 TCE("smulbt",  16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17359
 TCE("smultt",  16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17360
 
17361
 TCE("smulwb",  12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17362
 TCE("smulwt",  12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),        smul, t_simd),
17363
 
17364
 TCE("qadd",    1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17365
 TCE("qdadd",   1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17366
 TCE("qsub",    1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17367
 TCE("qdsub",   1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),        rd_rm_rn, t_simd2),
17368
 
17369
#undef  ARM_VARIANT
17370
#define ARM_VARIANT  & arm_ext_v5e /*  ARM Architecture 5TE.  */
17371
#undef THUMB_VARIANT
17372
#define THUMB_VARIANT &arm_ext_v6t2
17373
 
17374
 TUF("pld",     450f000, f810f000, 1, (ADDR),                pld,  t_pld),
17375
 TC3("ldrd",    00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
17376
     ldrd, t_ldstd),
17377
 TC3("strd",    00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
17378
                                       ADDRGLDRS), ldrd, t_ldstd),
17379
 
17380
 TCE("mcrr",    c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17381
 TCE("mrrc",    c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17382
 
17383
#undef  ARM_VARIANT
17384
#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
17385
 
17386
 TCE("bxj",     12fff20, f3c08f00, 1, (RR),                       bxj, t_bxj),
17387
 
17388
#undef  ARM_VARIANT
17389
#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
17390
#undef  THUMB_VARIANT
17391
#define THUMB_VARIANT  & arm_ext_v6
17392
 
17393
 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
17394
 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
17395
 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17396
 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17397
 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
17398
 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17399
 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17400
 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17401
 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
17402
 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
17403
 
17404
#undef  THUMB_VARIANT
17405
#define THUMB_VARIANT  & arm_ext_v6t2
17406
 
17407
 TCE("ldrex",   1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),        ldrex, t_ldrex),
17408
 TCE("strex",   1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17409
                                      strex,  t_strex),
17410
 TUF("mcrr2",   c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17411
 TUF("mrrc2",   c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17412
 
17413
 TCE("ssat",    6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
17414
 TCE("usat",    6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
17415
 
17416
/*  ARM V6 not included in V7M.  */
17417
#undef  THUMB_VARIANT
17418
#define THUMB_VARIANT  & arm_ext_v6_notm
17419
 TUF("rfeia",   8900a00, e990c000, 1, (RRw),                       rfe, rfe),
17420
  UF(rfeib,     9900a00,           1, (RRw),                       rfe),
17421
  UF(rfeda,     8100a00,           1, (RRw),                       rfe),
17422
 TUF("rfedb",   9100a00, e810c000, 1, (RRw),                       rfe, rfe),
17423
 TUF("rfefd",   8900a00, e990c000, 1, (RRw),                       rfe, rfe),
17424
  UF(rfefa,     9900a00,           1, (RRw),                       rfe),
17425
  UF(rfeea,     8100a00,           1, (RRw),                       rfe),
17426
 TUF("rfeed",   9100a00, e810c000, 1, (RRw),                       rfe, rfe),
17427
 TUF("srsia",   8c00500, e980c000, 2, (oRRw, I31w),                srs,  srs),
17428
  UF(srsib,     9c00500,           2, (oRRw, I31w),                srs),
17429
  UF(srsda,     8400500,           2, (oRRw, I31w),                srs),
17430
 TUF("srsdb",   9400500, e800c000, 2, (oRRw, I31w),                srs,  srs),
17431
 
17432
/*  ARM V6 not included in V7M (eg. integer SIMD).  */
17433
#undef  THUMB_VARIANT
17434
#define THUMB_VARIANT  & arm_ext_v6_dsp
17435
 TUF("cps",     1020000, f3af8100, 1, (I31b),                     imm0, t_cps),
17436
 TCE("pkhbt",   6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
17437
 TCE("pkhtb",   6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
17438
 TCE("qadd16",  6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17439
 TCE("qadd8",   6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17440
 TCE("qasx",    6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17441
 /* Old name for QASX.  */
17442
 TCE("qaddsubx",        6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17443
 TCE("qsax",    6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17444
 /* Old name for QSAX.  */
17445
 TCE("qsubaddx",        6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17446
 TCE("qsub16",  6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17447
 TCE("qsub8",   6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17448
 TCE("sadd16",  6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17449
 TCE("sadd8",   6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17450
 TCE("sasx",    6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17451
 /* Old name for SASX.  */
17452
 TCE("saddsubx",        6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17453
 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17454
 TCE("shadd8",  6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17455
 TCE("shasx",     6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17456
 /* Old name for SHASX.  */
17457
 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17458
 TCE("shsax",      6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),    rd_rn_rm, t_simd),
17459
 /* Old name for SHSAX.  */
17460
 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17461
 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17462
 TCE("shsub8",  6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17463
 TCE("ssax",    6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17464
 /* Old name for SSAX.  */
17465
 TCE("ssubaddx",        6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17466
 TCE("ssub16",  6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17467
 TCE("ssub8",   6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17468
 TCE("uadd16",  6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17469
 TCE("uadd8",   6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17470
 TCE("uasx",    6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17471
 /* Old name for UASX.  */
17472
 TCE("uaddsubx",        6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17473
 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17474
 TCE("uhadd8",  6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17475
 TCE("uhasx",     6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17476
 /* Old name for UHASX.  */
17477
 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17478
 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17479
 /* Old name for UHSAX.  */
17480
 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17481
 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17482
 TCE("uhsub8",  6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17483
 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17484
 TCE("uqadd8",  6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17485
 TCE("uqasx",     6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17486
 /* Old name for UQASX.  */
17487
 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17488
 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17489
 /* Old name for UQSAX.  */
17490
 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),     rd_rn_rm, t_simd),
17491
 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17492
 TCE("uqsub8",  6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17493
 TCE("usub16",  6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17494
 TCE("usax",    6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17495
 /* Old name for USAX.  */
17496
 TCE("usubaddx",        6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17497
 TCE("usub8",   6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17498
 TCE("sxtah",   6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17499
 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17500
 TCE("sxtab",   6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17501
 TCE("sxtb16",  68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),        sxth,  t_sxth),
17502
 TCE("uxtah",   6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17503
 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17504
 TCE("uxtab",   6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17505
 TCE("uxtb16",  6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),        sxth,  t_sxth),
17506
 TCE("sel",     6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),       rd_rn_rm, t_simd),
17507
 TCE("smlad",   7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17508
 TCE("smladx",  7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17509
 TCE("smlald",  7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17510
 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17511
 TCE("smlsd",   7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17512
 TCE("smlsdx",  7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17513
 TCE("smlsld",  7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17514
 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17515
 TCE("smmla",   7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17516
 TCE("smmlar",  7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17517
 TCE("smmls",   75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17518
 TCE("smmlsr",  75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17519
 TCE("smmul",   750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17520
 TCE("smmulr",  750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17521
 TCE("smuad",   700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17522
 TCE("smuadx",  700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17523
 TCE("smusd",   700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17524
 TCE("smusdx",  700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),       smul, t_simd),
17525
 TCE("ssat16",  6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),         ssat16, t_ssat16),
17526
 TCE("umaal",   0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
17527
 TCE("usad8",   780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),       smul,   t_simd),
17528
 TCE("usada8",  7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
17529
 TCE("usat16",  6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),         usat16, t_usat16),
17530
 
17531
#undef  ARM_VARIANT
17532
#define ARM_VARIANT   & arm_ext_v6k
17533
#undef  THUMB_VARIANT
17534
#define THUMB_VARIANT & arm_ext_v6k
17535
 
17536
 tCE("yield",   320f001, _yield,    0, (), noargs, t_hint),
17537
 tCE("wfe",     320f002, _wfe,      0, (), noargs, t_hint),
17538
 tCE("wfi",     320f003, _wfi,      0, (), noargs, t_hint),
17539
 tCE("sev",     320f004, _sev,      0, (), noargs, t_hint),
17540
 
17541
#undef  THUMB_VARIANT
17542
#define THUMB_VARIANT  & arm_ext_v6_notm
17543
 TCE("ldrexd",  1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
17544
                                      ldrexd, t_ldrexd),
17545
 TCE("strexd",  1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
17546
                                       RRnpcb), strexd, t_strexd),
17547
 
17548
#undef  THUMB_VARIANT
17549
#define THUMB_VARIANT  & arm_ext_v6t2
17550
 TCE("ldrexb",  1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
17551
     rd_rn,  rd_rn),
17552
 TCE("ldrexh",  1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
17553
     rd_rn,  rd_rn),
17554
 TCE("strexb",  1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17555 160 khays
     strex, t_strexbh),
17556 16 khays
 TCE("strexh",  1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17557 160 khays
     strex, t_strexbh),
17558 16 khays
 TUF("clrex",   57ff01f, f3bf8f2f, 0, (),                              noargs, noargs),
17559
 
17560
#undef  ARM_VARIANT
17561
#define ARM_VARIANT    & arm_ext_sec
17562
#undef THUMB_VARIANT
17563
#define THUMB_VARIANT  & arm_ext_sec
17564
 
17565
 TCE("smc",     1600070, f7f08000, 1, (EXPi), smc, t_smc),
17566
 
17567
#undef  ARM_VARIANT
17568
#define ARM_VARIANT    & arm_ext_virt
17569
#undef  THUMB_VARIANT
17570
#define THUMB_VARIANT    & arm_ext_virt
17571
 
17572
 TCE("hvc",     1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
17573
 TCE("eret",    160006e, f3de8f00, 0, (), noargs, noargs),
17574
 
17575
#undef  ARM_VARIANT
17576
#define ARM_VARIANT  & arm_ext_v6t2
17577
#undef  THUMB_VARIANT
17578
#define THUMB_VARIANT  & arm_ext_v6t2
17579
 
17580
 TCE("bfc",     7c0001f, f36f0000, 3, (RRnpc, I31, I32),           bfc, t_bfc),
17581
 TCE("bfi",     7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
17582
 TCE("sbfx",    7a00050, f3400000, 4, (RR, RR, I31, I32),          bfx, t_bfx),
17583
 TCE("ubfx",    7e00050, f3c00000, 4, (RR, RR, I31, I32),          bfx, t_bfx),
17584
 
17585
 TCE("mls",     0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17586
 TCE("movw",    3000000, f2400000, 2, (RRnpc, HALF),                mov16, t_mov16),
17587
 TCE("movt",    3400000, f2c00000, 2, (RRnpc, HALF),                mov16, t_mov16),
17588
 TCE("rbit",    6ff0f30, fa90f0a0, 2, (RR, RR),                     rd_rm, t_rbit),
17589
 
17590
 TC3("ldrht",   03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17591
 TC3("ldrsht",  03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17592
 TC3("ldrsbt",  03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17593
 TC3("strht",   02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17594
 
17595
 /* Thumb-only instructions.  */
17596
#undef ARM_VARIANT
17597
#define ARM_VARIANT NULL
17598
  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
17599
  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
17600
 
17601
 /* ARM does not really have an IT instruction, so always allow it.
17602
    The opcode is copied from Thumb in order to allow warnings in
17603
    -mimplicit-it=[never | arm] modes.  */
17604
#undef  ARM_VARIANT
17605
#define ARM_VARIANT  & arm_ext_v1
17606
 
17607
 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
17608
 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
17609
 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
17610
 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
17611
 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
17612
 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
17613
 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
17614
 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
17615
 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
17616
 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
17617
 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
17618
 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
17619
 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
17620
 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
17621
 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
17622
 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
17623
 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
17624
 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
17625
 
17626
 /* Thumb2 only instructions.  */
17627
#undef  ARM_VARIANT
17628
#define ARM_VARIANT  NULL
17629
 
17630
 TCE("addw",    0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17631
 TCE("subw",    0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17632
 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
17633
 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
17634
 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
17635
 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
17636
 
17637
 /* Hardware division instructions.  */
17638
#undef  ARM_VARIANT
17639
#define ARM_VARIANT    & arm_ext_adiv
17640
#undef  THUMB_VARIANT
17641
#define THUMB_VARIANT  & arm_ext_div
17642
 
17643
 TCE("sdiv",    710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
17644
 TCE("udiv",    730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
17645
 
17646
 /* ARM V6M/V7 instructions.  */
17647
#undef  ARM_VARIANT
17648
#define ARM_VARIANT    & arm_ext_barrier
17649
#undef  THUMB_VARIANT
17650
#define THUMB_VARIANT  & arm_ext_barrier
17651
 
17652
 TUF("dmb",     57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier,  t_barrier),
17653
 TUF("dsb",     57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier,  t_barrier),
17654
 TUF("isb",     57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier,  t_barrier),
17655
 
17656
 /* ARM V7 instructions.  */
17657
#undef  ARM_VARIANT
17658
#define ARM_VARIANT    & arm_ext_v7
17659
#undef  THUMB_VARIANT
17660
#define THUMB_VARIANT  & arm_ext_v7
17661
 
17662
 TUF("pli",     450f000, f910f000, 1, (ADDR),     pli,      t_pld),
17663
 TCE("dbg",     320f0f0, f3af80f0, 1, (I15),      dbg,      t_dbg),
17664
 
17665
#undef ARM_VARIANT
17666
#define ARM_VARIANT    & arm_ext_mp
17667
#undef THUMB_VARIANT
17668
#define THUMB_VARIANT  & arm_ext_mp
17669
 
17670
 TUF("pldw",    410f000, f830f000, 1, (ADDR),   pld,    t_pld),
17671
 
17672
#undef  ARM_VARIANT
17673
#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
17674
 
17675
 cCE("wfs",     e200110, 1, (RR),            rd),
17676
 cCE("rfs",     e300110, 1, (RR),            rd),
17677
 cCE("wfc",     e400110, 1, (RR),            rd),
17678
 cCE("rfc",     e500110, 1, (RR),            rd),
17679
 
17680
 cCL("ldfs",    c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17681
 cCL("ldfd",    c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17682
 cCL("ldfe",    c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17683
 cCL("ldfp",    c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17684
 
17685
 cCL("stfs",    c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17686
 cCL("stfd",    c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17687
 cCL("stfe",    c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17688
 cCL("stfp",    c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
17689
 
17690
 cCL("mvfs",    e008100, 2, (RF, RF_IF),     rd_rm),
17691
 cCL("mvfsp",   e008120, 2, (RF, RF_IF),     rd_rm),
17692
 cCL("mvfsm",   e008140, 2, (RF, RF_IF),     rd_rm),
17693
 cCL("mvfsz",   e008160, 2, (RF, RF_IF),     rd_rm),
17694
 cCL("mvfd",    e008180, 2, (RF, RF_IF),     rd_rm),
17695
 cCL("mvfdp",   e0081a0, 2, (RF, RF_IF),     rd_rm),
17696
 cCL("mvfdm",   e0081c0, 2, (RF, RF_IF),     rd_rm),
17697
 cCL("mvfdz",   e0081e0, 2, (RF, RF_IF),     rd_rm),
17698
 cCL("mvfe",    e088100, 2, (RF, RF_IF),     rd_rm),
17699
 cCL("mvfep",   e088120, 2, (RF, RF_IF),     rd_rm),
17700
 cCL("mvfem",   e088140, 2, (RF, RF_IF),     rd_rm),
17701
 cCL("mvfez",   e088160, 2, (RF, RF_IF),     rd_rm),
17702
 
17703
 cCL("mnfs",    e108100, 2, (RF, RF_IF),     rd_rm),
17704
 cCL("mnfsp",   e108120, 2, (RF, RF_IF),     rd_rm),
17705
 cCL("mnfsm",   e108140, 2, (RF, RF_IF),     rd_rm),
17706
 cCL("mnfsz",   e108160, 2, (RF, RF_IF),     rd_rm),
17707
 cCL("mnfd",    e108180, 2, (RF, RF_IF),     rd_rm),
17708
 cCL("mnfdp",   e1081a0, 2, (RF, RF_IF),     rd_rm),
17709
 cCL("mnfdm",   e1081c0, 2, (RF, RF_IF),     rd_rm),
17710
 cCL("mnfdz",   e1081e0, 2, (RF, RF_IF),     rd_rm),
17711
 cCL("mnfe",    e188100, 2, (RF, RF_IF),     rd_rm),
17712
 cCL("mnfep",   e188120, 2, (RF, RF_IF),     rd_rm),
17713
 cCL("mnfem",   e188140, 2, (RF, RF_IF),     rd_rm),
17714
 cCL("mnfez",   e188160, 2, (RF, RF_IF),     rd_rm),
17715
 
17716
 cCL("abss",    e208100, 2, (RF, RF_IF),     rd_rm),
17717
 cCL("abssp",   e208120, 2, (RF, RF_IF),     rd_rm),
17718
 cCL("abssm",   e208140, 2, (RF, RF_IF),     rd_rm),
17719
 cCL("abssz",   e208160, 2, (RF, RF_IF),     rd_rm),
17720
 cCL("absd",    e208180, 2, (RF, RF_IF),     rd_rm),
17721
 cCL("absdp",   e2081a0, 2, (RF, RF_IF),     rd_rm),
17722
 cCL("absdm",   e2081c0, 2, (RF, RF_IF),     rd_rm),
17723
 cCL("absdz",   e2081e0, 2, (RF, RF_IF),     rd_rm),
17724
 cCL("abse",    e288100, 2, (RF, RF_IF),     rd_rm),
17725
 cCL("absep",   e288120, 2, (RF, RF_IF),     rd_rm),
17726
 cCL("absem",   e288140, 2, (RF, RF_IF),     rd_rm),
17727
 cCL("absez",   e288160, 2, (RF, RF_IF),     rd_rm),
17728
 
17729
 cCL("rnds",    e308100, 2, (RF, RF_IF),     rd_rm),
17730
 cCL("rndsp",   e308120, 2, (RF, RF_IF),     rd_rm),
17731
 cCL("rndsm",   e308140, 2, (RF, RF_IF),     rd_rm),
17732
 cCL("rndsz",   e308160, 2, (RF, RF_IF),     rd_rm),
17733
 cCL("rndd",    e308180, 2, (RF, RF_IF),     rd_rm),
17734
 cCL("rnddp",   e3081a0, 2, (RF, RF_IF),     rd_rm),
17735
 cCL("rnddm",   e3081c0, 2, (RF, RF_IF),     rd_rm),
17736
 cCL("rnddz",   e3081e0, 2, (RF, RF_IF),     rd_rm),
17737
 cCL("rnde",    e388100, 2, (RF, RF_IF),     rd_rm),
17738
 cCL("rndep",   e388120, 2, (RF, RF_IF),     rd_rm),
17739
 cCL("rndem",   e388140, 2, (RF, RF_IF),     rd_rm),
17740
 cCL("rndez",   e388160, 2, (RF, RF_IF),     rd_rm),
17741
 
17742
 cCL("sqts",    e408100, 2, (RF, RF_IF),     rd_rm),
17743
 cCL("sqtsp",   e408120, 2, (RF, RF_IF),     rd_rm),
17744
 cCL("sqtsm",   e408140, 2, (RF, RF_IF),     rd_rm),
17745
 cCL("sqtsz",   e408160, 2, (RF, RF_IF),     rd_rm),
17746
 cCL("sqtd",    e408180, 2, (RF, RF_IF),     rd_rm),
17747
 cCL("sqtdp",   e4081a0, 2, (RF, RF_IF),     rd_rm),
17748
 cCL("sqtdm",   e4081c0, 2, (RF, RF_IF),     rd_rm),
17749
 cCL("sqtdz",   e4081e0, 2, (RF, RF_IF),     rd_rm),
17750
 cCL("sqte",    e488100, 2, (RF, RF_IF),     rd_rm),
17751
 cCL("sqtep",   e488120, 2, (RF, RF_IF),     rd_rm),
17752
 cCL("sqtem",   e488140, 2, (RF, RF_IF),     rd_rm),
17753
 cCL("sqtez",   e488160, 2, (RF, RF_IF),     rd_rm),
17754
 
17755
 cCL("logs",    e508100, 2, (RF, RF_IF),     rd_rm),
17756
 cCL("logsp",   e508120, 2, (RF, RF_IF),     rd_rm),
17757
 cCL("logsm",   e508140, 2, (RF, RF_IF),     rd_rm),
17758
 cCL("logsz",   e508160, 2, (RF, RF_IF),     rd_rm),
17759
 cCL("logd",    e508180, 2, (RF, RF_IF),     rd_rm),
17760
 cCL("logdp",   e5081a0, 2, (RF, RF_IF),     rd_rm),
17761
 cCL("logdm",   e5081c0, 2, (RF, RF_IF),     rd_rm),
17762
 cCL("logdz",   e5081e0, 2, (RF, RF_IF),     rd_rm),
17763
 cCL("loge",    e588100, 2, (RF, RF_IF),     rd_rm),
17764
 cCL("logep",   e588120, 2, (RF, RF_IF),     rd_rm),
17765
 cCL("logem",   e588140, 2, (RF, RF_IF),     rd_rm),
17766
 cCL("logez",   e588160, 2, (RF, RF_IF),     rd_rm),
17767
 
17768
 cCL("lgns",    e608100, 2, (RF, RF_IF),     rd_rm),
17769
 cCL("lgnsp",   e608120, 2, (RF, RF_IF),     rd_rm),
17770
 cCL("lgnsm",   e608140, 2, (RF, RF_IF),     rd_rm),
17771
 cCL("lgnsz",   e608160, 2, (RF, RF_IF),     rd_rm),
17772
 cCL("lgnd",    e608180, 2, (RF, RF_IF),     rd_rm),
17773
 cCL("lgndp",   e6081a0, 2, (RF, RF_IF),     rd_rm),
17774
 cCL("lgndm",   e6081c0, 2, (RF, RF_IF),     rd_rm),
17775
 cCL("lgndz",   e6081e0, 2, (RF, RF_IF),     rd_rm),
17776
 cCL("lgne",    e688100, 2, (RF, RF_IF),     rd_rm),
17777
 cCL("lgnep",   e688120, 2, (RF, RF_IF),     rd_rm),
17778
 cCL("lgnem",   e688140, 2, (RF, RF_IF),     rd_rm),
17779
 cCL("lgnez",   e688160, 2, (RF, RF_IF),     rd_rm),
17780
 
17781
 cCL("exps",    e708100, 2, (RF, RF_IF),     rd_rm),
17782
 cCL("expsp",   e708120, 2, (RF, RF_IF),     rd_rm),
17783
 cCL("expsm",   e708140, 2, (RF, RF_IF),     rd_rm),
17784
 cCL("expsz",   e708160, 2, (RF, RF_IF),     rd_rm),
17785
 cCL("expd",    e708180, 2, (RF, RF_IF),     rd_rm),
17786
 cCL("expdp",   e7081a0, 2, (RF, RF_IF),     rd_rm),
17787
 cCL("expdm",   e7081c0, 2, (RF, RF_IF),     rd_rm),
17788
 cCL("expdz",   e7081e0, 2, (RF, RF_IF),     rd_rm),
17789
 cCL("expe",    e788100, 2, (RF, RF_IF),     rd_rm),
17790
 cCL("expep",   e788120, 2, (RF, RF_IF),     rd_rm),
17791
 cCL("expem",   e788140, 2, (RF, RF_IF),     rd_rm),
17792
 cCL("expdz",   e788160, 2, (RF, RF_IF),     rd_rm),
17793
 
17794
 cCL("sins",    e808100, 2, (RF, RF_IF),     rd_rm),
17795
 cCL("sinsp",   e808120, 2, (RF, RF_IF),     rd_rm),
17796
 cCL("sinsm",   e808140, 2, (RF, RF_IF),     rd_rm),
17797
 cCL("sinsz",   e808160, 2, (RF, RF_IF),     rd_rm),
17798
 cCL("sind",    e808180, 2, (RF, RF_IF),     rd_rm),
17799
 cCL("sindp",   e8081a0, 2, (RF, RF_IF),     rd_rm),
17800
 cCL("sindm",   e8081c0, 2, (RF, RF_IF),     rd_rm),
17801
 cCL("sindz",   e8081e0, 2, (RF, RF_IF),     rd_rm),
17802
 cCL("sine",    e888100, 2, (RF, RF_IF),     rd_rm),
17803
 cCL("sinep",   e888120, 2, (RF, RF_IF),     rd_rm),
17804
 cCL("sinem",   e888140, 2, (RF, RF_IF),     rd_rm),
17805
 cCL("sinez",   e888160, 2, (RF, RF_IF),     rd_rm),
17806
 
17807
 cCL("coss",    e908100, 2, (RF, RF_IF),     rd_rm),
17808
 cCL("cossp",   e908120, 2, (RF, RF_IF),     rd_rm),
17809
 cCL("cossm",   e908140, 2, (RF, RF_IF),     rd_rm),
17810
 cCL("cossz",   e908160, 2, (RF, RF_IF),     rd_rm),
17811
 cCL("cosd",    e908180, 2, (RF, RF_IF),     rd_rm),
17812
 cCL("cosdp",   e9081a0, 2, (RF, RF_IF),     rd_rm),
17813
 cCL("cosdm",   e9081c0, 2, (RF, RF_IF),     rd_rm),
17814
 cCL("cosdz",   e9081e0, 2, (RF, RF_IF),     rd_rm),
17815
 cCL("cose",    e988100, 2, (RF, RF_IF),     rd_rm),
17816
 cCL("cosep",   e988120, 2, (RF, RF_IF),     rd_rm),
17817
 cCL("cosem",   e988140, 2, (RF, RF_IF),     rd_rm),
17818
 cCL("cosez",   e988160, 2, (RF, RF_IF),     rd_rm),
17819
 
17820
 cCL("tans",    ea08100, 2, (RF, RF_IF),     rd_rm),
17821
 cCL("tansp",   ea08120, 2, (RF, RF_IF),     rd_rm),
17822
 cCL("tansm",   ea08140, 2, (RF, RF_IF),     rd_rm),
17823
 cCL("tansz",   ea08160, 2, (RF, RF_IF),     rd_rm),
17824
 cCL("tand",    ea08180, 2, (RF, RF_IF),     rd_rm),
17825
 cCL("tandp",   ea081a0, 2, (RF, RF_IF),     rd_rm),
17826
 cCL("tandm",   ea081c0, 2, (RF, RF_IF),     rd_rm),
17827
 cCL("tandz",   ea081e0, 2, (RF, RF_IF),     rd_rm),
17828
 cCL("tane",    ea88100, 2, (RF, RF_IF),     rd_rm),
17829
 cCL("tanep",   ea88120, 2, (RF, RF_IF),     rd_rm),
17830
 cCL("tanem",   ea88140, 2, (RF, RF_IF),     rd_rm),
17831
 cCL("tanez",   ea88160, 2, (RF, RF_IF),     rd_rm),
17832
 
17833
 cCL("asns",    eb08100, 2, (RF, RF_IF),     rd_rm),
17834
 cCL("asnsp",   eb08120, 2, (RF, RF_IF),     rd_rm),
17835
 cCL("asnsm",   eb08140, 2, (RF, RF_IF),     rd_rm),
17836
 cCL("asnsz",   eb08160, 2, (RF, RF_IF),     rd_rm),
17837
 cCL("asnd",    eb08180, 2, (RF, RF_IF),     rd_rm),
17838
 cCL("asndp",   eb081a0, 2, (RF, RF_IF),     rd_rm),
17839
 cCL("asndm",   eb081c0, 2, (RF, RF_IF),     rd_rm),
17840
 cCL("asndz",   eb081e0, 2, (RF, RF_IF),     rd_rm),
17841
 cCL("asne",    eb88100, 2, (RF, RF_IF),     rd_rm),
17842
 cCL("asnep",   eb88120, 2, (RF, RF_IF),     rd_rm),
17843
 cCL("asnem",   eb88140, 2, (RF, RF_IF),     rd_rm),
17844
 cCL("asnez",   eb88160, 2, (RF, RF_IF),     rd_rm),
17845
 
17846
 cCL("acss",    ec08100, 2, (RF, RF_IF),     rd_rm),
17847
 cCL("acssp",   ec08120, 2, (RF, RF_IF),     rd_rm),
17848
 cCL("acssm",   ec08140, 2, (RF, RF_IF),     rd_rm),
17849
 cCL("acssz",   ec08160, 2, (RF, RF_IF),     rd_rm),
17850
 cCL("acsd",    ec08180, 2, (RF, RF_IF),     rd_rm),
17851
 cCL("acsdp",   ec081a0, 2, (RF, RF_IF),     rd_rm),
17852
 cCL("acsdm",   ec081c0, 2, (RF, RF_IF),     rd_rm),
17853
 cCL("acsdz",   ec081e0, 2, (RF, RF_IF),     rd_rm),
17854
 cCL("acse",    ec88100, 2, (RF, RF_IF),     rd_rm),
17855
 cCL("acsep",   ec88120, 2, (RF, RF_IF),     rd_rm),
17856
 cCL("acsem",   ec88140, 2, (RF, RF_IF),     rd_rm),
17857
 cCL("acsez",   ec88160, 2, (RF, RF_IF),     rd_rm),
17858
 
17859
 cCL("atns",    ed08100, 2, (RF, RF_IF),     rd_rm),
17860
 cCL("atnsp",   ed08120, 2, (RF, RF_IF),     rd_rm),
17861
 cCL("atnsm",   ed08140, 2, (RF, RF_IF),     rd_rm),
17862
 cCL("atnsz",   ed08160, 2, (RF, RF_IF),     rd_rm),
17863
 cCL("atnd",    ed08180, 2, (RF, RF_IF),     rd_rm),
17864
 cCL("atndp",   ed081a0, 2, (RF, RF_IF),     rd_rm),
17865
 cCL("atndm",   ed081c0, 2, (RF, RF_IF),     rd_rm),
17866
 cCL("atndz",   ed081e0, 2, (RF, RF_IF),     rd_rm),
17867
 cCL("atne",    ed88100, 2, (RF, RF_IF),     rd_rm),
17868
 cCL("atnep",   ed88120, 2, (RF, RF_IF),     rd_rm),
17869
 cCL("atnem",   ed88140, 2, (RF, RF_IF),     rd_rm),
17870
 cCL("atnez",   ed88160, 2, (RF, RF_IF),     rd_rm),
17871
 
17872
 cCL("urds",    ee08100, 2, (RF, RF_IF),     rd_rm),
17873
 cCL("urdsp",   ee08120, 2, (RF, RF_IF),     rd_rm),
17874
 cCL("urdsm",   ee08140, 2, (RF, RF_IF),     rd_rm),
17875
 cCL("urdsz",   ee08160, 2, (RF, RF_IF),     rd_rm),
17876
 cCL("urdd",    ee08180, 2, (RF, RF_IF),     rd_rm),
17877
 cCL("urddp",   ee081a0, 2, (RF, RF_IF),     rd_rm),
17878
 cCL("urddm",   ee081c0, 2, (RF, RF_IF),     rd_rm),
17879
 cCL("urddz",   ee081e0, 2, (RF, RF_IF),     rd_rm),
17880
 cCL("urde",    ee88100, 2, (RF, RF_IF),     rd_rm),
17881
 cCL("urdep",   ee88120, 2, (RF, RF_IF),     rd_rm),
17882
 cCL("urdem",   ee88140, 2, (RF, RF_IF),     rd_rm),
17883
 cCL("urdez",   ee88160, 2, (RF, RF_IF),     rd_rm),
17884
 
17885
 cCL("nrms",    ef08100, 2, (RF, RF_IF),     rd_rm),
17886
 cCL("nrmsp",   ef08120, 2, (RF, RF_IF),     rd_rm),
17887
 cCL("nrmsm",   ef08140, 2, (RF, RF_IF),     rd_rm),
17888
 cCL("nrmsz",   ef08160, 2, (RF, RF_IF),     rd_rm),
17889
 cCL("nrmd",    ef08180, 2, (RF, RF_IF),     rd_rm),
17890
 cCL("nrmdp",   ef081a0, 2, (RF, RF_IF),     rd_rm),
17891
 cCL("nrmdm",   ef081c0, 2, (RF, RF_IF),     rd_rm),
17892
 cCL("nrmdz",   ef081e0, 2, (RF, RF_IF),     rd_rm),
17893
 cCL("nrme",    ef88100, 2, (RF, RF_IF),     rd_rm),
17894
 cCL("nrmep",   ef88120, 2, (RF, RF_IF),     rd_rm),
17895
 cCL("nrmem",   ef88140, 2, (RF, RF_IF),     rd_rm),
17896
 cCL("nrmez",   ef88160, 2, (RF, RF_IF),     rd_rm),
17897
 
17898
 cCL("adfs",    e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17899
 cCL("adfsp",   e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17900
 cCL("adfsm",   e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17901
 cCL("adfsz",   e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17902
 cCL("adfd",    e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17903
 cCL("adfdp",   e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17904
 cCL("adfdm",   e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17905
 cCL("adfdz",   e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17906
 cCL("adfe",    e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17907
 cCL("adfep",   e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17908
 cCL("adfem",   e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17909
 cCL("adfez",   e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17910
 
17911
 cCL("sufs",    e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17912
 cCL("sufsp",   e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17913
 cCL("sufsm",   e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17914
 cCL("sufsz",   e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17915
 cCL("sufd",    e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17916
 cCL("sufdp",   e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17917
 cCL("sufdm",   e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17918
 cCL("sufdz",   e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17919
 cCL("sufe",    e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17920
 cCL("sufep",   e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17921
 cCL("sufem",   e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17922
 cCL("sufez",   e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17923
 
17924
 cCL("rsfs",    e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17925
 cCL("rsfsp",   e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17926
 cCL("rsfsm",   e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17927
 cCL("rsfsz",   e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17928
 cCL("rsfd",    e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17929
 cCL("rsfdp",   e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17930
 cCL("rsfdm",   e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17931
 cCL("rsfdz",   e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17932
 cCL("rsfe",    e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17933
 cCL("rsfep",   e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17934
 cCL("rsfem",   e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17935
 cCL("rsfez",   e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17936
 
17937
 cCL("mufs",    e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17938
 cCL("mufsp",   e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17939
 cCL("mufsm",   e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17940
 cCL("mufsz",   e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17941
 cCL("mufd",    e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17942
 cCL("mufdp",   e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17943
 cCL("mufdm",   e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17944
 cCL("mufdz",   e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17945
 cCL("mufe",    e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17946
 cCL("mufep",   e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17947
 cCL("mufem",   e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17948
 cCL("mufez",   e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17949
 
17950
 cCL("dvfs",    e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17951
 cCL("dvfsp",   e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17952
 cCL("dvfsm",   e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17953
 cCL("dvfsz",   e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17954
 cCL("dvfd",    e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17955
 cCL("dvfdp",   e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17956
 cCL("dvfdm",   e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17957
 cCL("dvfdz",   e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17958
 cCL("dvfe",    e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17959
 cCL("dvfep",   e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17960
 cCL("dvfem",   e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17961
 cCL("dvfez",   e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17962
 
17963
 cCL("rdfs",    e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17964
 cCL("rdfsp",   e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17965
 cCL("rdfsm",   e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17966
 cCL("rdfsz",   e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17967
 cCL("rdfd",    e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17968
 cCL("rdfdp",   e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17969
 cCL("rdfdm",   e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17970
 cCL("rdfdz",   e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17971
 cCL("rdfe",    e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17972
 cCL("rdfep",   e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
17973
 cCL("rdfem",   e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
17974
 cCL("rdfez",   e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
17975
 
17976
 cCL("pows",    e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
17977
 cCL("powsp",   e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
17978
 cCL("powsm",   e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
17979
 cCL("powsz",   e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
17980
 cCL("powd",    e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
17981
 cCL("powdp",   e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17982
 cCL("powdm",   e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17983
 cCL("powdz",   e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17984
 cCL("powe",    e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
17985
 cCL("powep",   e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
17986
 cCL("powem",   e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
17987
 cCL("powez",   e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
17988
 
17989
 cCL("rpws",    e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
17990
 cCL("rpwsp",   e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
17991
 cCL("rpwsm",   e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
17992
 cCL("rpwsz",   e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
17993
 cCL("rpwd",    e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
17994
 cCL("rpwdp",   e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17995
 cCL("rpwdm",   e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17996
 cCL("rpwdz",   e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17997
 cCL("rpwe",    e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
17998
 cCL("rpwep",   e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
17999
 cCL("rpwem",   e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18000
 cCL("rpwez",   e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18001
 
18002
 cCL("rmfs",    e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18003
 cCL("rmfsp",   e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18004
 cCL("rmfsm",   e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18005
 cCL("rmfsz",   e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18006
 cCL("rmfd",    e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18007
 cCL("rmfdp",   e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18008
 cCL("rmfdm",   e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18009
 cCL("rmfdz",   e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18010
 cCL("rmfe",    e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18011
 cCL("rmfep",   e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18012
 cCL("rmfem",   e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18013
 cCL("rmfez",   e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18014
 
18015
 cCL("fmls",    e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18016
 cCL("fmlsp",   e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18017
 cCL("fmlsm",   e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18018
 cCL("fmlsz",   e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18019
 cCL("fmld",    e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18020
 cCL("fmldp",   e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18021
 cCL("fmldm",   e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18022
 cCL("fmldz",   e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18023
 cCL("fmle",    e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18024
 cCL("fmlep",   e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18025
 cCL("fmlem",   e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18026
 cCL("fmlez",   e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18027
 
18028
 cCL("fdvs",    ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18029
 cCL("fdvsp",   ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18030
 cCL("fdvsm",   ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18031
 cCL("fdvsz",   ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18032
 cCL("fdvd",    ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18033
 cCL("fdvdp",   ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18034
 cCL("fdvdm",   ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18035
 cCL("fdvdz",   ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18036
 cCL("fdve",    ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18037
 cCL("fdvep",   ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18038
 cCL("fdvem",   ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18039
 cCL("fdvez",   ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18040
 
18041
 cCL("frds",    eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18042
 cCL("frdsp",   eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18043
 cCL("frdsm",   eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18044
 cCL("frdsz",   eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18045
 cCL("frdd",    eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18046
 cCL("frddp",   eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18047
 cCL("frddm",   eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18048
 cCL("frddz",   eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18049
 cCL("frde",    eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18050
 cCL("frdep",   eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18051
 cCL("frdem",   eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18052
 cCL("frdez",   eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18053
 
18054
 cCL("pols",    ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18055
 cCL("polsp",   ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18056
 cCL("polsm",   ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18057
 cCL("polsz",   ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18058
 cCL("pold",    ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18059
 cCL("poldp",   ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18060
 cCL("poldm",   ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18061
 cCL("poldz",   ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18062
 cCL("pole",    ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18063
 cCL("polep",   ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18064
 cCL("polem",   ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18065
 cCL("polez",   ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18066
 
18067
 cCE("cmf",     e90f110, 2, (RF, RF_IF),     fpa_cmp),
18068
 C3E("cmfe",    ed0f110, 2, (RF, RF_IF),     fpa_cmp),
18069
 cCE("cnf",     eb0f110, 2, (RF, RF_IF),     fpa_cmp),
18070
 C3E("cnfe",    ef0f110, 2, (RF, RF_IF),     fpa_cmp),
18071
 
18072
 cCL("flts",    e000110, 2, (RF, RR),        rn_rd),
18073
 cCL("fltsp",   e000130, 2, (RF, RR),        rn_rd),
18074
 cCL("fltsm",   e000150, 2, (RF, RR),        rn_rd),
18075
 cCL("fltsz",   e000170, 2, (RF, RR),        rn_rd),
18076
 cCL("fltd",    e000190, 2, (RF, RR),        rn_rd),
18077
 cCL("fltdp",   e0001b0, 2, (RF, RR),        rn_rd),
18078
 cCL("fltdm",   e0001d0, 2, (RF, RR),        rn_rd),
18079
 cCL("fltdz",   e0001f0, 2, (RF, RR),        rn_rd),
18080
 cCL("flte",    e080110, 2, (RF, RR),        rn_rd),
18081
 cCL("fltep",   e080130, 2, (RF, RR),        rn_rd),
18082
 cCL("fltem",   e080150, 2, (RF, RR),        rn_rd),
18083
 cCL("fltez",   e080170, 2, (RF, RR),        rn_rd),
18084
 
18085
  /* The implementation of the FIX instruction is broken on some
18086
     assemblers, in that it accepts a precision specifier as well as a
18087
     rounding specifier, despite the fact that this is meaningless.
18088
     To be more compatible, we accept it as well, though of course it
18089
     does not set any bits.  */
18090
 cCE("fix",     e100110, 2, (RR, RF),        rd_rm),
18091
 cCL("fixp",    e100130, 2, (RR, RF),        rd_rm),
18092
 cCL("fixm",    e100150, 2, (RR, RF),        rd_rm),
18093
 cCL("fixz",    e100170, 2, (RR, RF),        rd_rm),
18094
 cCL("fixsp",   e100130, 2, (RR, RF),        rd_rm),
18095
 cCL("fixsm",   e100150, 2, (RR, RF),        rd_rm),
18096
 cCL("fixsz",   e100170, 2, (RR, RF),        rd_rm),
18097
 cCL("fixdp",   e100130, 2, (RR, RF),        rd_rm),
18098
 cCL("fixdm",   e100150, 2, (RR, RF),        rd_rm),
18099
 cCL("fixdz",   e100170, 2, (RR, RF),        rd_rm),
18100
 cCL("fixep",   e100130, 2, (RR, RF),        rd_rm),
18101
 cCL("fixem",   e100150, 2, (RR, RF),        rd_rm),
18102
 cCL("fixez",   e100170, 2, (RR, RF),        rd_rm),
18103
 
18104
  /* Instructions that were new with the real FPA, call them V2.  */
18105
#undef  ARM_VARIANT
18106
#define ARM_VARIANT  & fpu_fpa_ext_v2
18107
 
18108
 cCE("lfm",     c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18109
 cCL("lfmfd",   c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18110
 cCL("lfmea",   d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18111
 cCE("sfm",     c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18112
 cCL("sfmfd",   d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18113
 cCL("sfmea",   c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18114
 
18115
#undef  ARM_VARIANT
18116
#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
18117
 
18118
  /* Moves and type conversions.  */
18119
 cCE("fcpys",   eb00a40, 2, (RVS, RVS),       vfp_sp_monadic),
18120
 cCE("fmrs",    e100a10, 2, (RR, RVS),        vfp_reg_from_sp),
18121
 cCE("fmsr",    e000a10, 2, (RVS, RR),        vfp_sp_from_reg),
18122
 cCE("fmstat",  ef1fa10, 0, (),                noargs),
18123
 cCE("vmrs",    ef10a10, 2, (APSR_RR, RVC),   vmrs),
18124
 cCE("vmsr",    ee10a10, 2, (RVC, RR),        vmsr),
18125
 cCE("fsitos",  eb80ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18126
 cCE("fuitos",  eb80a40, 2, (RVS, RVS),       vfp_sp_monadic),
18127
 cCE("ftosis",  ebd0a40, 2, (RVS, RVS),       vfp_sp_monadic),
18128
 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18129
 cCE("ftouis",  ebc0a40, 2, (RVS, RVS),       vfp_sp_monadic),
18130
 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18131
 cCE("fmrx",    ef00a10, 2, (RR, RVC),        rd_rn),
18132
 cCE("fmxr",    ee00a10, 2, (RVC, RR),        rn_rd),
18133
 
18134
  /* Memory operations.  */
18135
 cCE("flds",    d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
18136
 cCE("fsts",    d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
18137
 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18138
 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18139
 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18140
 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18141
 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18142
 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18143
 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18144
 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18145
 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18146
 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
18147
 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18148
 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
18149
 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18150
 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
18151
 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18152
 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
18153
 
18154
  /* Monadic operations.  */
18155
 cCE("fabss",   eb00ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18156
 cCE("fnegs",   eb10a40, 2, (RVS, RVS),       vfp_sp_monadic),
18157
 cCE("fsqrts",  eb10ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18158
 
18159
  /* Dyadic operations.  */
18160
 cCE("fadds",   e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18161
 cCE("fsubs",   e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18162
 cCE("fmuls",   e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18163
 cCE("fdivs",   e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18164
 cCE("fmacs",   e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18165
 cCE("fmscs",   e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18166
 cCE("fnmuls",  e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18167
 cCE("fnmacs",  e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18168
 cCE("fnmscs",  e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18169
 
18170
  /* Comparisons.  */
18171
 cCE("fcmps",   eb40a40, 2, (RVS, RVS),       vfp_sp_monadic),
18172
 cCE("fcmpzs",  eb50a40, 1, (RVS),            vfp_sp_compare_z),
18173
 cCE("fcmpes",  eb40ac0, 2, (RVS, RVS),       vfp_sp_monadic),
18174
 cCE("fcmpezs", eb50ac0, 1, (RVS),            vfp_sp_compare_z),
18175
 
18176
 /* Double precision load/store are still present on single precision
18177
    implementations.  */
18178
 cCE("fldd",    d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
18179
 cCE("fstd",    d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
18180
 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18181
 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18182
 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18183
 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18184
 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18185
 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
18186
 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18187
 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
18188
 
18189
#undef  ARM_VARIANT
18190
#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
18191
 
18192
  /* Moves and type conversions.  */
18193
 cCE("fcpyd",   eb00b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18194
 cCE("fcvtds",  eb70ac0, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18195
 cCE("fcvtsd",  eb70bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18196
 cCE("fmdhr",   e200b10, 2, (RVD, RR),        vfp_dp_rn_rd),
18197
 cCE("fmdlr",   e000b10, 2, (RVD, RR),        vfp_dp_rn_rd),
18198
 cCE("fmrdh",   e300b10, 2, (RR, RVD),        vfp_dp_rd_rn),
18199
 cCE("fmrdl",   e100b10, 2, (RR, RVD),        vfp_dp_rd_rn),
18200
 cCE("fsitod",  eb80bc0, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18201
 cCE("fuitod",  eb80b40, 2, (RVD, RVS),       vfp_dp_sp_cvt),
18202
 cCE("ftosid",  ebd0b40, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18203
 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18204
 cCE("ftouid",  ebc0b40, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18205
 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD),       vfp_sp_dp_cvt),
18206
 
18207
  /* Monadic operations.  */
18208
 cCE("fabsd",   eb00bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18209
 cCE("fnegd",   eb10b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18210
 cCE("fsqrtd",  eb10bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18211
 
18212
  /* Dyadic operations.  */
18213
 cCE("faddd",   e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18214
 cCE("fsubd",   e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18215
 cCE("fmuld",   e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18216
 cCE("fdivd",   e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18217
 cCE("fmacd",   e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18218
 cCE("fmscd",   e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18219
 cCE("fnmuld",  e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18220
 cCE("fnmacd",  e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18221
 cCE("fnmscd",  e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18222
 
18223
  /* Comparisons.  */
18224
 cCE("fcmpd",   eb40b40, 2, (RVD, RVD),       vfp_dp_rd_rm),
18225
 cCE("fcmpzd",  eb50b40, 1, (RVD),            vfp_dp_rd),
18226
 cCE("fcmped",  eb40bc0, 2, (RVD, RVD),       vfp_dp_rd_rm),
18227
 cCE("fcmpezd", eb50bc0, 1, (RVD),            vfp_dp_rd),
18228
 
18229
#undef  ARM_VARIANT
18230
#define ARM_VARIANT  & fpu_vfp_ext_v2
18231
 
18232
 cCE("fmsrr",   c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
18233
 cCE("fmrrs",   c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
18234
 cCE("fmdrr",   c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
18235
 cCE("fmrrd",   c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
18236
 
18237
/* Instructions which may belong to either the Neon or VFP instruction sets.
18238
   Individual encoder functions perform additional architecture checks.  */
18239
#undef  ARM_VARIANT
18240
#define ARM_VARIANT    & fpu_vfp_ext_v1xd
18241
#undef  THUMB_VARIANT
18242
#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
18243
 
18244
  /* These mnemonics are unique to VFP.  */
18245
 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
18246
 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
18247
 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18248
 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18249
 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18250
 nCE(vcmp,      _vcmp,    2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
18251
 nCE(vcmpe,     _vcmpe,   2, (RVSD, RVSD_I0),    vfp_nsyn_cmp),
18252
 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
18253
 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
18254
 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
18255
 
18256
  /* Mnemonics shared by Neon and VFP.  */
18257
 nCEF(vmul,     _vmul,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
18258
 nCEF(vmla,     _vmla,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18259
 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18260
 
18261
 nCEF(vadd,     _vadd,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18262
 nCEF(vsub,     _vsub,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18263
 
18264
 NCEF(vabs,     1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18265
 NCEF(vneg,     1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18266
 
18267
 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18268
 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18269
 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18270
 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18271
 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18272
 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18273
 NCE(vldr,      d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18274
 NCE(vstr,      d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18275
 
18276 160 khays
 nCEF(vcvt,     _vcvt,   3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
18277 16 khays
 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
18278
 nCEF(vcvtb,    _vcvt,   2, (RVS, RVS), neon_cvtb),
18279
 nCEF(vcvtt,    _vcvt,   2, (RVS, RVS), neon_cvtt),
18280
 
18281
 
18282
  /* NOTE: All VMOV encoding is special-cased!  */
18283
 NCE(vmov,      0,       1, (VMOV), neon_mov),
18284
 NCE(vmovq,     0,       1, (VMOV), neon_mov),
18285
 
18286
#undef  THUMB_VARIANT
18287
#define THUMB_VARIANT  & fpu_neon_ext_v1
18288
#undef  ARM_VARIANT
18289
#define ARM_VARIANT    & fpu_neon_ext_v1
18290
 
18291
  /* Data processing with three registers of the same length.  */
18292
  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
18293
 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
18294
 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
18295
 NUF(vhadd,     0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18296
 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18297
 NUF(vrhadd,    0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18298
 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18299
 NUF(vhsub,     0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18300
 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
18301
  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
18302
 NUF(vqadd,     0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18303
 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
18304
 NUF(vqsub,     0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18305
 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
18306
 NUF(vrshl,     0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18307
 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
18308
 NUF(vqrshl,    0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18309
 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
18310
  /* If not immediate, fall back to neon_dyadic_i64_su.
18311
     shl_imm should accept I8 I16 I32 I64,
18312
     qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
18313
 nUF(vshl,      _vshl,    3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
18314
 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl_imm),
18315
 nUF(vqshl,     _vqshl,   3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
18316
 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl_imm),
18317
  /* Logic ops, types optional & ignored.  */
18318
 nUF(vand,      _vand,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18319
 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18320
 nUF(vbic,      _vbic,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18321
 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18322
 nUF(vorr,      _vorr,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18323
 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18324
 nUF(vorn,      _vorn,    3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18325
 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
18326
 nUF(veor,      _veor,    3, (RNDQ, oRNDQ, RNDQ),      neon_logic),
18327
 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
18328
  /* Bitfield ops, untyped.  */
18329
 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18330
 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18331
 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18332
 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18333
 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18334
 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
18335
  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32.  */
18336
 nUF(vabd,      _vabd,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18337
 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18338
 nUF(vmax,      _vmax,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18339
 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18340
 nUF(vmin,      _vmin,    3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18341
 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
18342
  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
18343
     back to neon_dyadic_if_su.  */
18344
 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18345
 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
18346
 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18347
 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
18348
 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18349
 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
18350
 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18351
 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
18352
  /* Comparison. Type I8 I16 I32 F32.  */
18353
 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
18354
 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
18355
  /* As above, D registers only.  */
18356
 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
18357
 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
18358
  /* Int and float variants, signedness unimportant.  */
18359
 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
18360
 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
18361
 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
18362
  /* Add/sub take types I8 I16 I32 I64 F32.  */
18363
 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
18364
 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
18365
  /* vtst takes sizes 8, 16, 32.  */
18366
 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
18367
 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
18368
  /* VMUL takes I8 I16 I32 F32 P8.  */
18369
 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
18370
  /* VQD{R}MULH takes S16 S32.  */
18371
 nUF(vqdmulh,   _vqdmulh,  3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18372
 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
18373
 nUF(vqrdmulh,  _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18374
 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
18375
 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18376
 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
18377
 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18378
 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
18379
 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18380
 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
18381
 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18382
 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
18383
 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
18384
 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
18385
 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
18386
 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
18387
 
18388
  /* Two address, int/float. Types S8 S16 S32 F32.  */
18389
 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
18390
 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
18391
 
18392
  /* Data processing with two registers and a shift amount.  */
18393
  /* Right shifts, and variants with rounding.
18394
     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
18395
 NUF(vshr,      0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18396
 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
18397
 NUF(vrshr,     0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18398
 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
18399
 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
18400
 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
18401
 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
18402
 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
18403
  /* Shift and insert. Sizes accepted 8 16 32 64.  */
18404
 NUF(vsli,      1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
18405
 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
18406
 NUF(vsri,      1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
18407
 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
18408
  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
18409
 NUF(vqshlu,    1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
18410
 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
18411
  /* Right shift immediate, saturating & narrowing, with rounding variants.
18412
     Types accepted S16 S32 S64 U16 U32 U64.  */
18413
 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18414
 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18415
  /* As above, unsigned. Types accepted S16 S32 S64.  */
18416
 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18417
 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18418
  /* Right shift narrowing. Types accepted I16 I32 I64.  */
18419
 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18420
 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18421
  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
18422
 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
18423
  /* CVT with optional immediate for fixed-point variant.  */
18424
 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
18425
 
18426
 nUF(vmvn,      _vmvn,    2, (RNDQ, RNDQ_Ibig), neon_mvn),
18427
 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
18428
 
18429
  /* Data processing, three registers of different lengths.  */
18430
  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
18431
 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
18432
 NUF(vabdl,     0800700, 3, (RNQ, RND, RND),  neon_dyadic_long),
18433
 NUF(vaddl,     0800000, 3, (RNQ, RND, RND),  neon_dyadic_long),
18434
 NUF(vsubl,     0800200, 3, (RNQ, RND, RND),  neon_dyadic_long),
18435
  /* If not scalar, fall back to neon_dyadic_long.
18436
     Vector types as above, scalar types S16 S32 U16 U32.  */
18437
 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18438
 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18439
  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
18440
 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18441
 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18442
  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
18443
 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18444
 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18445
 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18446
 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
18447
  /* Saturating doubling multiplies. Types S16 S32.  */
18448
 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18449
 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18450
 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18451
  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
18452
     S16 S32 U16 U32.  */
18453
 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
18454
 
18455
  /* Extract. Size 8.  */
18456
 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
18457
 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
18458
 
18459
  /* Two registers, miscellaneous.  */
18460
  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
18461
 NUF(vrev64,    1b00000, 2, (RNDQ, RNDQ),     neon_rev),
18462
 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
18463
 NUF(vrev32,    1b00080, 2, (RNDQ, RNDQ),     neon_rev),
18464
 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
18465
 NUF(vrev16,    1b00100, 2, (RNDQ, RNDQ),     neon_rev),
18466
 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
18467
  /* Vector replicate. Sizes 8 16 32.  */
18468
 nCE(vdup,      _vdup,    2, (RNDQ, RR_RNSC),  neon_dup),
18469
 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
18470
  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
18471
 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
18472
  /* VMOVN. Types I16 I32 I64.  */
18473
 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
18474
  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
18475
 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
18476
  /* VQMOVUN. Types S16 S32 S64.  */
18477
 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
18478
  /* VZIP / VUZP. Sizes 8 16 32.  */
18479
 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
18480
 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
18481
 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
18482
 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
18483
  /* VQABS / VQNEG. Types S8 S16 S32.  */
18484
 NUF(vqabs,     1b00700, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
18485
 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
18486
 NUF(vqneg,     1b00780, 2, (RNDQ, RNDQ),     neon_sat_abs_neg),
18487
 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
18488
  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
18489
 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
18490
 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
18491
 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
18492
 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
18493
  /* Reciprocal estimates. Types U32 F32.  */
18494
 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
18495
 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
18496
 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
18497
 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
18498
  /* VCLS. Types S8 S16 S32.  */
18499
 NUF(vcls,      1b00400, 2, (RNDQ, RNDQ),     neon_cls),
18500
 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
18501
  /* VCLZ. Types I8 I16 I32.  */
18502
 NUF(vclz,      1b00480, 2, (RNDQ, RNDQ),     neon_clz),
18503
 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
18504
  /* VCNT. Size 8.  */
18505
 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
18506
 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
18507
  /* Two address, untyped.  */
18508
 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
18509
 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
18510
  /* VTRN. Sizes 8 16 32.  */
18511
 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
18512
 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
18513
 
18514
  /* Table lookup. Size 8.  */
18515
 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18516
 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18517
 
18518
#undef  THUMB_VARIANT
18519
#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
18520
#undef  ARM_VARIANT
18521
#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
18522
 
18523
  /* Neon element/structure load/store.  */
18524
 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18525
 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18526
 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18527
 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18528
 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18529
 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18530
 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18531
 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
18532
 
18533
#undef  THUMB_VARIANT
18534
#define THUMB_VARIANT &fpu_vfp_ext_v3xd
18535
#undef ARM_VARIANT
18536
#define ARM_VARIANT &fpu_vfp_ext_v3xd
18537
 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
18538
 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18539
 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18540
 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18541
 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18542
 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18543
 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18544
 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
18545
 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
18546
 
18547
#undef THUMB_VARIANT
18548
#define THUMB_VARIANT  & fpu_vfp_ext_v3
18549
#undef  ARM_VARIANT
18550
#define ARM_VARIANT    & fpu_vfp_ext_v3
18551
 
18552
 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
18553
 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18554
 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18555
 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18556
 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18557
 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18558
 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18559
 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
18560
 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
18561
 
18562
#undef ARM_VARIANT
18563
#define ARM_VARIANT &fpu_vfp_ext_fma
18564
#undef THUMB_VARIANT
18565
#define THUMB_VARIANT &fpu_vfp_ext_fma
18566
 /* Mnemonics shared by Neon and VFP.  These are included in the
18567
    VFP FMA variant; NEON and VFP FMA always includes the NEON
18568
    FMA instructions.  */
18569
 nCEF(vfma,     _vfma,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18570
 nCEF(vfms,     _vfms,    3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18571
 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18572
    the v form should always be used.  */
18573
 cCE("ffmas",   ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18574
 cCE("ffnmas",  ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
18575
 cCE("ffmad",   ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18576
 cCE("ffnmad",  ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
18577
 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18578
 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18579
 
18580
#undef THUMB_VARIANT
18581
#undef  ARM_VARIANT
18582
#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
18583
 
18584
 cCE("mia",     e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18585
 cCE("miaph",   e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18586
 cCE("miabb",   e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18587
 cCE("miabt",   e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18588
 cCE("miatb",   e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18589
 cCE("miatt",   e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18590
 cCE("mar",     c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
18591
 cCE("mra",     c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
18592
 
18593
#undef  ARM_VARIANT
18594
#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
18595
 
18596
 cCE("tandcb",  e13f130, 1, (RR),                   iwmmxt_tandorc),
18597
 cCE("tandch",  e53f130, 1, (RR),                   iwmmxt_tandorc),
18598
 cCE("tandcw",  e93f130, 1, (RR),                   iwmmxt_tandorc),
18599
 cCE("tbcstb",  e400010, 2, (RIWR, RR),             rn_rd),
18600
 cCE("tbcsth",  e400050, 2, (RIWR, RR),             rn_rd),
18601
 cCE("tbcstw",  e400090, 2, (RIWR, RR),             rn_rd),
18602
 cCE("textrcb", e130170, 2, (RR, I7),               iwmmxt_textrc),
18603
 cCE("textrch", e530170, 2, (RR, I7),               iwmmxt_textrc),
18604
 cCE("textrcw", e930170, 2, (RR, I7),               iwmmxt_textrc),
18605
 cCE("textrmub",        e100070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18606
 cCE("textrmuh",        e500070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18607
 cCE("textrmuw",        e900070, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18608
 cCE("textrmsb",        e100078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18609
 cCE("textrmsh",        e500078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18610
 cCE("textrmsw",        e900078, 3, (RR, RIWR, I7),         iwmmxt_textrm),
18611
 cCE("tinsrb",  e600010, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18612
 cCE("tinsrh",  e600050, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18613
 cCE("tinsrw",  e600090, 3, (RIWR, RR, I7),         iwmmxt_tinsr),
18614
 cCE("tmcr",    e000110, 2, (RIWC_RIWG, RR),        rn_rd),
18615
 cCE("tmcrr",   c400000, 3, (RIWR, RR, RR),         rm_rd_rn),
18616
 cCE("tmia",    e200010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18617
 cCE("tmiaph",  e280010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18618
 cCE("tmiabb",  e2c0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18619
 cCE("tmiabt",  e2d0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18620
 cCE("tmiatb",  e2e0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18621
 cCE("tmiatt",  e2f0010, 3, (RIWR, RR, RR),         iwmmxt_tmia),
18622
 cCE("tmovmskb",        e100030, 2, (RR, RIWR),             rd_rn),
18623
 cCE("tmovmskh",        e500030, 2, (RR, RIWR),             rd_rn),
18624
 cCE("tmovmskw",        e900030, 2, (RR, RIWR),             rd_rn),
18625
 cCE("tmrc",    e100110, 2, (RR, RIWC_RIWG),        rd_rn),
18626
 cCE("tmrrc",   c500000, 3, (RR, RR, RIWR),         rd_rn_rm),
18627
 cCE("torcb",   e13f150, 1, (RR),                   iwmmxt_tandorc),
18628
 cCE("torch",   e53f150, 1, (RR),                   iwmmxt_tandorc),
18629
 cCE("torcw",   e93f150, 1, (RR),                   iwmmxt_tandorc),
18630
 cCE("waccb",   e0001c0, 2, (RIWR, RIWR),           rd_rn),
18631
 cCE("wacch",   e4001c0, 2, (RIWR, RIWR),           rd_rn),
18632
 cCE("waccw",   e8001c0, 2, (RIWR, RIWR),           rd_rn),
18633
 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18634
 cCE("waddb",   e000180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18635
 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18636
 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18637
 cCE("waddh",   e400180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18638
 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18639
 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18640
 cCE("waddw",   e800180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18641
 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18642
 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
18643
 cCE("walignr0",        e800020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18644
 cCE("walignr1",        e900020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18645
 cCE("walignr2",        ea00020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18646
 cCE("walignr3",        eb00020, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18647
 cCE("wand",    e200000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18648
 cCE("wandn",   e300000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18649
 cCE("wavg2b",  e800000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18650
 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18651
 cCE("wavg2h",  ec00000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18652
 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18653
 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18654
 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18655
 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18656
 cCE("wcmpgtub",        e100060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18657
 cCE("wcmpgtuh",        e500060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18658
 cCE("wcmpgtuw",        e900060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18659
 cCE("wcmpgtsb",        e300060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18660
 cCE("wcmpgtsh",        e700060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18661
 cCE("wcmpgtsw",        eb00060, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18662
 cCE("wldrb",   c100000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18663
 cCE("wldrh",   c500000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18664
 cCE("wldrw",   c100100, 2, (RIWR_RIWC, ADDR),      iwmmxt_wldstw),
18665
 cCE("wldrd",   c500100, 2, (RIWR, ADDR),           iwmmxt_wldstd),
18666
 cCE("wmacs",   e600100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18667
 cCE("wmacsz",  e700100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18668
 cCE("wmacu",   e400100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18669
 cCE("wmacuz",  e500100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18670
 cCE("wmadds",  ea00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18671
 cCE("wmaddu",  e800100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18672
 cCE("wmaxsb",  e200160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18673
 cCE("wmaxsh",  e600160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18674
 cCE("wmaxsw",  ea00160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18675
 cCE("wmaxub",  e000160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18676
 cCE("wmaxuh",  e400160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18677
 cCE("wmaxuw",  e800160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18678
 cCE("wminsb",  e300160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18679
 cCE("wminsh",  e700160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18680
 cCE("wminsw",  eb00160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18681
 cCE("wminub",  e100160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18682
 cCE("wminuh",  e500160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18683
 cCE("wminuw",  e900160, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18684
 cCE("wmov",    e000000, 2, (RIWR, RIWR),           iwmmxt_wmov),
18685
 cCE("wmulsm",  e300100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18686
 cCE("wmulsl",  e200100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18687
 cCE("wmulum",  e100100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18688
 cCE("wmulul",  e000100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18689
 cCE("wor",     e000000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18690
 cCE("wpackhss",        e700080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18691
 cCE("wpackhus",        e500080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18692
 cCE("wpackwss",        eb00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18693
 cCE("wpackwus",        e900080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18694
 cCE("wpackdss",        ef00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18695
 cCE("wpackdus",        ed00080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18696
 cCE("wrorh",   e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18697
 cCE("wrorhg",  e700148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18698
 cCE("wrorw",   eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18699
 cCE("wrorwg",  eb00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18700
 cCE("wrord",   ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18701
 cCE("wrordg",  ef00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18702
 cCE("wsadb",   e000120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18703
 cCE("wsadbz",  e100120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18704
 cCE("wsadh",   e400120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18705
 cCE("wsadhz",  e500120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18706
 cCE("wshufh",  e0001e0, 3, (RIWR, RIWR, I255),     iwmmxt_wshufh),
18707
 cCE("wsllh",   e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18708
 cCE("wsllhg",  e500148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18709
 cCE("wsllw",   e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18710
 cCE("wsllwg",  e900148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18711
 cCE("wslld",   ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18712
 cCE("wslldg",  ed00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18713
 cCE("wsrah",   e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18714
 cCE("wsrahg",  e400148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18715
 cCE("wsraw",   e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18716
 cCE("wsrawg",  e800148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18717
 cCE("wsrad",   ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18718
 cCE("wsradg",  ec00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18719
 cCE("wsrlh",   e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18720
 cCE("wsrlhg",  e600148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18721
 cCE("wsrlw",   ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18722
 cCE("wsrlwg",  ea00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18723
 cCE("wsrld",   ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18724
 cCE("wsrldg",  ee00148, 3, (RIWR, RIWR, RIWG),     rd_rn_rm),
18725
 cCE("wstrb",   c000000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18726
 cCE("wstrh",   c400000, 2, (RIWR, ADDR),           iwmmxt_wldstbh),
18727
 cCE("wstrw",   c000100, 2, (RIWR_RIWC, ADDR),      iwmmxt_wldstw),
18728
 cCE("wstrd",   c400100, 2, (RIWR, ADDR),           iwmmxt_wldstd),
18729
 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18730
 cCE("wsubb",   e0001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18731
 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18732
 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18733
 cCE("wsubh",   e4001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18734
 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18735
 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18736
 cCE("wsubw",   e8001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18737
 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18738
 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),         rd_rn),
18739
 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),         rd_rn),
18740
 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),         rd_rn),
18741
 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),         rd_rn),
18742
 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),         rd_rn),
18743
 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),         rd_rn),
18744
 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18745
 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18746
 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18747
 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),         rd_rn),
18748
 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),         rd_rn),
18749
 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),         rd_rn),
18750
 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),         rd_rn),
18751
 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),         rd_rn),
18752
 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),         rd_rn),
18753
 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18754
 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18755
 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),           rd_rn_rm),
18756
 cCE("wxor",    e100000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18757
 cCE("wzero",   e300000, 1, (RIWR),                 iwmmxt_wzero),
18758
 
18759
#undef  ARM_VARIANT
18760
#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
18761
 
18762
 cCE("torvscb",   e12f190, 1, (RR),                 iwmmxt_tandorc),
18763
 cCE("torvsch",   e52f190, 1, (RR),                 iwmmxt_tandorc),
18764
 cCE("torvscw",   e92f190, 1, (RR),                 iwmmxt_tandorc),
18765
 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
18766
 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
18767
 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
18768
 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18769
 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18770
 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18771
 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18772
 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18773
 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18774
 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18775
 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18776
 cCE("wavg4",   e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18777
 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18778
 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18779
 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18780
 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18781
 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18782
 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
18783
 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18784
 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18785
 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18786
 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18787
 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18788
 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18789
 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18790
 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18791
 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18792
 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18793
 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18794
 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18795
 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18796
 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18797
 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18798
 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18799
 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18800
 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18801
 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18802
 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18803
 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18804
 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18805
 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18806
 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18807
 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18808
 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18809
 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18810
 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18811
 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18812
 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18813
 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18814
 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18815
 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18816
 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18817
 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18818
 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
18819
 
18820
#undef  ARM_VARIANT
18821
#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
18822
 
18823
 cCE("cfldrs",  c100400, 2, (RMF, ADDRGLDC),          rd_cpaddr),
18824
 cCE("cfldrd",  c500400, 2, (RMD, ADDRGLDC),          rd_cpaddr),
18825
 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC),         rd_cpaddr),
18826
 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC),         rd_cpaddr),
18827
 cCE("cfstrs",  c000400, 2, (RMF, ADDRGLDC),          rd_cpaddr),
18828
 cCE("cfstrd",  c400400, 2, (RMD, ADDRGLDC),          rd_cpaddr),
18829
 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC),         rd_cpaddr),
18830
 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC),         rd_cpaddr),
18831
 cCE("cfmvsr",  e000450, 2, (RMF, RR),                rn_rd),
18832
 cCE("cfmvrs",  e100450, 2, (RR, RMF),                rd_rn),
18833
 cCE("cfmvdlr", e000410, 2, (RMD, RR),                rn_rd),
18834
 cCE("cfmvrdl", e100410, 2, (RR, RMD),                rd_rn),
18835
 cCE("cfmvdhr", e000430, 2, (RMD, RR),                rn_rd),
18836
 cCE("cfmvrdh", e100430, 2, (RR, RMD),                rd_rn),
18837
 cCE("cfmv64lr",        e000510, 2, (RMDX, RR),               rn_rd),
18838
 cCE("cfmvr64l",        e100510, 2, (RR, RMDX),               rd_rn),
18839
 cCE("cfmv64hr",        e000530, 2, (RMDX, RR),               rn_rd),
18840
 cCE("cfmvr64h",        e100530, 2, (RR, RMDX),               rd_rn),
18841
 cCE("cfmval32",        e200440, 2, (RMAX, RMFX),             rd_rn),
18842
 cCE("cfmv32al",        e100440, 2, (RMFX, RMAX),             rd_rn),
18843
 cCE("cfmvam32",        e200460, 2, (RMAX, RMFX),             rd_rn),
18844
 cCE("cfmv32am",        e100460, 2, (RMFX, RMAX),             rd_rn),
18845
 cCE("cfmvah32",        e200480, 2, (RMAX, RMFX),             rd_rn),
18846
 cCE("cfmv32ah",        e100480, 2, (RMFX, RMAX),             rd_rn),
18847
 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX),             rd_rn),
18848
 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX),             rd_rn),
18849
 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX),             rd_rn),
18850
 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX),             rd_rn),
18851
 cCE("cfmvsc32",        e2004e0, 2, (RMDS, RMDX),             mav_dspsc),
18852
 cCE("cfmv32sc",        e1004e0, 2, (RMDX, RMDS),             rd),
18853
 cCE("cfcpys",  e000400, 2, (RMF, RMF),               rd_rn),
18854
 cCE("cfcpyd",  e000420, 2, (RMD, RMD),               rd_rn),
18855
 cCE("cfcvtsd", e000460, 2, (RMD, RMF),               rd_rn),
18856
 cCE("cfcvtds", e000440, 2, (RMF, RMD),               rd_rn),
18857
 cCE("cfcvt32s",        e000480, 2, (RMF, RMFX),              rd_rn),
18858
 cCE("cfcvt32d",        e0004a0, 2, (RMD, RMFX),              rd_rn),
18859
 cCE("cfcvt64s",        e0004c0, 2, (RMF, RMDX),              rd_rn),
18860
 cCE("cfcvt64d",        e0004e0, 2, (RMD, RMDX),              rd_rn),
18861
 cCE("cfcvts32",        e100580, 2, (RMFX, RMF),              rd_rn),
18862
 cCE("cfcvtd32",        e1005a0, 2, (RMFX, RMD),              rd_rn),
18863
 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),            rd_rn),
18864
 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),            rd_rn),
18865
 cCE("cfrshl32",        e000550, 3, (RMFX, RMFX, RR),         mav_triple),
18866
 cCE("cfrshl64",        e000570, 3, (RMDX, RMDX, RR),         mav_triple),
18867
 cCE("cfsh32",  e000500, 3, (RMFX, RMFX, I63s),       mav_shift),
18868
 cCE("cfsh64",  e200500, 3, (RMDX, RMDX, I63s),       mav_shift),
18869
 cCE("cfcmps",  e100490, 3, (RR, RMF, RMF),           rd_rn_rm),
18870
 cCE("cfcmpd",  e1004b0, 3, (RR, RMD, RMD),           rd_rn_rm),
18871
 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX),         rd_rn_rm),
18872
 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX),         rd_rn_rm),
18873
 cCE("cfabss",  e300400, 2, (RMF, RMF),               rd_rn),
18874
 cCE("cfabsd",  e300420, 2, (RMD, RMD),               rd_rn),
18875
 cCE("cfnegs",  e300440, 2, (RMF, RMF),               rd_rn),
18876
 cCE("cfnegd",  e300460, 2, (RMD, RMD),               rd_rn),
18877
 cCE("cfadds",  e300480, 3, (RMF, RMF, RMF),          rd_rn_rm),
18878
 cCE("cfaddd",  e3004a0, 3, (RMD, RMD, RMD),          rd_rn_rm),
18879
 cCE("cfsubs",  e3004c0, 3, (RMF, RMF, RMF),          rd_rn_rm),
18880
 cCE("cfsubd",  e3004e0, 3, (RMD, RMD, RMD),          rd_rn_rm),
18881
 cCE("cfmuls",  e100400, 3, (RMF, RMF, RMF),          rd_rn_rm),
18882
 cCE("cfmuld",  e100420, 3, (RMD, RMD, RMD),          rd_rn_rm),
18883
 cCE("cfabs32", e300500, 2, (RMFX, RMFX),             rd_rn),
18884
 cCE("cfabs64", e300520, 2, (RMDX, RMDX),             rd_rn),
18885
 cCE("cfneg32", e300540, 2, (RMFX, RMFX),             rd_rn),
18886
 cCE("cfneg64", e300560, 2, (RMDX, RMDX),             rd_rn),
18887
 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18888
 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18889
 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18890
 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18891
 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18892
 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX),       rd_rn_rm),
18893
 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18894
 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX),       rd_rn_rm),
18895
 cCE("cfmadd32",        e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18896
 cCE("cfmsub32",        e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18897
 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18898
 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18899
};
18900
#undef ARM_VARIANT
18901
#undef THUMB_VARIANT
18902
#undef TCE
18903
#undef TCM
18904
#undef TUE
18905
#undef TUF
18906
#undef TCC
18907
#undef cCE
18908
#undef cCL
18909
#undef C3E
18910
#undef CE
18911
#undef CM
18912
#undef UE
18913
#undef UF
18914
#undef UT
18915
#undef NUF
18916
#undef nUF
18917
#undef NCE
18918
#undef nCE
18919
#undef OPS0
18920
#undef OPS1
18921
#undef OPS2
18922
#undef OPS3
18923
#undef OPS4
18924
#undef OPS5
18925
#undef OPS6
18926
#undef do_0
18927
 
18928
/* MD interface: bits in the object file.  */
18929
 
18930
/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18931
   for use in the a.out file, and stores them in the array pointed to by buf.
18932
   This knows about the endian-ness of the target machine and does
18933
   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
18934
   2 (short) and 4 (long)  Floating numbers are put out as a series of
18935
   LITTLENUMS (shorts, here at least).  */
18936
 
18937
void
18938
md_number_to_chars (char * buf, valueT val, int n)
18939
{
18940
  if (target_big_endian)
18941
    number_to_chars_bigendian (buf, val, n);
18942
  else
18943
    number_to_chars_littleendian (buf, val, n);
18944
}
18945
 
18946
static valueT
18947
md_chars_to_number (char * buf, int n)
18948
{
18949
  valueT result = 0;
18950
  unsigned char * where = (unsigned char *) buf;
18951
 
18952
  if (target_big_endian)
18953
    {
18954
      while (n--)
18955
        {
18956
          result <<= 8;
18957
          result |= (*where++ & 255);
18958
        }
18959
    }
18960
  else
18961
    {
18962
      while (n--)
18963
        {
18964
          result <<= 8;
18965
          result |= (where[n] & 255);
18966
        }
18967
    }
18968
 
18969
  return result;
18970
}
18971
 
18972
/* MD interface: Sections.  */
18973
 
18974
/* Estimate the size of a frag before relaxing.  Assume everything fits in
18975
   2 bytes.  */
18976
 
18977
int
18978
md_estimate_size_before_relax (fragS * fragp,
18979
                               segT    segtype ATTRIBUTE_UNUSED)
18980
{
18981
  fragp->fr_var = 2;
18982
  return 2;
18983
}
18984
 
18985
/* Convert a machine dependent frag.  */
18986
 
18987
void
18988
md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
18989
{
18990
  unsigned long insn;
18991
  unsigned long old_op;
18992
  char *buf;
18993
  expressionS exp;
18994
  fixS *fixp;
18995
  int reloc_type;
18996
  int pc_rel;
18997
  int opcode;
18998
 
18999
  buf = fragp->fr_literal + fragp->fr_fix;
19000
 
19001
  old_op = bfd_get_16(abfd, buf);
19002
  if (fragp->fr_symbol)
19003
    {
19004
      exp.X_op = O_symbol;
19005
      exp.X_add_symbol = fragp->fr_symbol;
19006
    }
19007
  else
19008
    {
19009
      exp.X_op = O_constant;
19010
    }
19011
  exp.X_add_number = fragp->fr_offset;
19012
  opcode = fragp->fr_subtype;
19013
  switch (opcode)
19014
    {
19015
    case T_MNEM_ldr_pc:
19016
    case T_MNEM_ldr_pc2:
19017
    case T_MNEM_ldr_sp:
19018
    case T_MNEM_str_sp:
19019
    case T_MNEM_ldr:
19020
    case T_MNEM_ldrb:
19021
    case T_MNEM_ldrh:
19022
    case T_MNEM_str:
19023
    case T_MNEM_strb:
19024
    case T_MNEM_strh:
19025
      if (fragp->fr_var == 4)
19026
        {
19027
          insn = THUMB_OP32 (opcode);
19028
          if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19029
            {
19030
              insn |= (old_op & 0x700) << 4;
19031
            }
19032
          else
19033
            {
19034
              insn |= (old_op & 7) << 12;
19035
              insn |= (old_op & 0x38) << 13;
19036
            }
19037
          insn |= 0x00000c00;
19038
          put_thumb32_insn (buf, insn);
19039
          reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19040
        }
19041
      else
19042
        {
19043
          reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19044
        }
19045
      pc_rel = (opcode == T_MNEM_ldr_pc2);
19046
      break;
19047
    case T_MNEM_adr:
19048
      if (fragp->fr_var == 4)
19049
        {
19050
          insn = THUMB_OP32 (opcode);
19051
          insn |= (old_op & 0xf0) << 4;
19052
          put_thumb32_insn (buf, insn);
19053
          reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19054
        }
19055
      else
19056
        {
19057
          reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19058
          exp.X_add_number -= 4;
19059
        }
19060
      pc_rel = 1;
19061
      break;
19062
    case T_MNEM_mov:
19063
    case T_MNEM_movs:
19064
    case T_MNEM_cmp:
19065
    case T_MNEM_cmn:
19066
      if (fragp->fr_var == 4)
19067
        {
19068
          int r0off = (opcode == T_MNEM_mov
19069
                       || opcode == T_MNEM_movs) ? 0 : 8;
19070
          insn = THUMB_OP32 (opcode);
19071
          insn = (insn & 0xe1ffffff) | 0x10000000;
19072
          insn |= (old_op & 0x700) << r0off;
19073
          put_thumb32_insn (buf, insn);
19074
          reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19075
        }
19076
      else
19077
        {
19078
          reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19079
        }
19080
      pc_rel = 0;
19081
      break;
19082
    case T_MNEM_b:
19083
      if (fragp->fr_var == 4)
19084
        {
19085
          insn = THUMB_OP32(opcode);
19086
          put_thumb32_insn (buf, insn);
19087
          reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
19088
        }
19089
      else
19090
        reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
19091
      pc_rel = 1;
19092
      break;
19093
    case T_MNEM_bcond:
19094
      if (fragp->fr_var == 4)
19095
        {
19096
          insn = THUMB_OP32(opcode);
19097
          insn |= (old_op & 0xf00) << 14;
19098
          put_thumb32_insn (buf, insn);
19099
          reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
19100
        }
19101
      else
19102
        reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
19103
      pc_rel = 1;
19104
      break;
19105
    case T_MNEM_add_sp:
19106
    case T_MNEM_add_pc:
19107
    case T_MNEM_inc_sp:
19108
    case T_MNEM_dec_sp:
19109
      if (fragp->fr_var == 4)
19110
        {
19111
          /* ??? Choose between add and addw.  */
19112
          insn = THUMB_OP32 (opcode);
19113
          insn |= (old_op & 0xf0) << 4;
19114
          put_thumb32_insn (buf, insn);
19115
          if (opcode == T_MNEM_add_pc)
19116
            reloc_type = BFD_RELOC_ARM_T32_IMM12;
19117
          else
19118
            reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19119
        }
19120
      else
19121
        reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19122
      pc_rel = 0;
19123
      break;
19124
 
19125
    case T_MNEM_addi:
19126
    case T_MNEM_addis:
19127
    case T_MNEM_subi:
19128
    case T_MNEM_subis:
19129
      if (fragp->fr_var == 4)
19130
        {
19131
          insn = THUMB_OP32 (opcode);
19132
          insn |= (old_op & 0xf0) << 4;
19133
          insn |= (old_op & 0xf) << 16;
19134
          put_thumb32_insn (buf, insn);
19135
          if (insn & (1 << 20))
19136
            reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19137
          else
19138
            reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19139
        }
19140
      else
19141
        reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19142
      pc_rel = 0;
19143
      break;
19144
    default:
19145
      abort ();
19146
    }
19147
  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
19148
                      (enum bfd_reloc_code_real) reloc_type);
19149
  fixp->fx_file = fragp->fr_file;
19150
  fixp->fx_line = fragp->fr_line;
19151
  fragp->fr_fix += fragp->fr_var;
19152
}
19153
 
19154
/* Return the size of a relaxable immediate operand instruction.
19155
   SHIFT and SIZE specify the form of the allowable immediate.  */
19156
static int
19157
relax_immediate (fragS *fragp, int size, int shift)
19158
{
19159
  offsetT offset;
19160
  offsetT mask;
19161
  offsetT low;
19162
 
19163
  /* ??? Should be able to do better than this.  */
19164
  if (fragp->fr_symbol)
19165
    return 4;
19166
 
19167
  low = (1 << shift) - 1;
19168
  mask = (1 << (shift + size)) - (1 << shift);
19169
  offset = fragp->fr_offset;
19170
  /* Force misaligned offsets to 32-bit variant.  */
19171
  if (offset & low)
19172
    return 4;
19173
  if (offset & ~mask)
19174
    return 4;
19175
  return 2;
19176
}
19177
 
19178
/* Get the address of a symbol during relaxation.  */
19179
static addressT
19180
relaxed_symbol_addr (fragS *fragp, long stretch)
19181
{
19182
  fragS *sym_frag;
19183
  addressT addr;
19184
  symbolS *sym;
19185
 
19186
  sym = fragp->fr_symbol;
19187
  sym_frag = symbol_get_frag (sym);
19188
  know (S_GET_SEGMENT (sym) != absolute_section
19189
        || sym_frag == &zero_address_frag);
19190
  addr = S_GET_VALUE (sym) + fragp->fr_offset;
19191
 
19192
  /* If frag has yet to be reached on this pass, assume it will
19193
     move by STRETCH just as we did.  If this is not so, it will
19194
     be because some frag between grows, and that will force
19195
     another pass.  */
19196
 
19197
  if (stretch != 0
19198
      && sym_frag->relax_marker != fragp->relax_marker)
19199
    {
19200
      fragS *f;
19201
 
19202
      /* Adjust stretch for any alignment frag.  Note that if have
19203
         been expanding the earlier code, the symbol may be
19204
         defined in what appears to be an earlier frag.  FIXME:
19205
         This doesn't handle the fr_subtype field, which specifies
19206
         a maximum number of bytes to skip when doing an
19207
         alignment.  */
19208
      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
19209
        {
19210
          if (f->fr_type == rs_align || f->fr_type == rs_align_code)
19211
            {
19212
              if (stretch < 0)
19213
                stretch = - ((- stretch)
19214
                             & ~ ((1 << (int) f->fr_offset) - 1));
19215
              else
19216
                stretch &= ~ ((1 << (int) f->fr_offset) - 1);
19217
              if (stretch == 0)
19218
                break;
19219
            }
19220
        }
19221
      if (f != NULL)
19222
        addr += stretch;
19223
    }
19224
 
19225
  return addr;
19226
}
19227
 
19228
/* Return the size of a relaxable adr pseudo-instruction or PC-relative
19229
   load.  */
19230
static int
19231
relax_adr (fragS *fragp, asection *sec, long stretch)
19232
{
19233
  addressT addr;
19234
  offsetT val;
19235
 
19236
  /* Assume worst case for symbols not known to be in the same section.  */
19237
  if (fragp->fr_symbol == NULL
19238
      || !S_IS_DEFINED (fragp->fr_symbol)
19239
      || sec != S_GET_SEGMENT (fragp->fr_symbol)
19240
      || S_IS_WEAK (fragp->fr_symbol))
19241
    return 4;
19242
 
19243
  val = relaxed_symbol_addr (fragp, stretch);
19244
  addr = fragp->fr_address + fragp->fr_fix;
19245
  addr = (addr + 4) & ~3;
19246
  /* Force misaligned targets to 32-bit variant.  */
19247
  if (val & 3)
19248
    return 4;
19249
  val -= addr;
19250
  if (val < 0 || val > 1020)
19251
    return 4;
19252
  return 2;
19253
}
19254
 
19255
/* Return the size of a relaxable add/sub immediate instruction.  */
19256
static int
19257
relax_addsub (fragS *fragp, asection *sec)
19258
{
19259
  char *buf;
19260
  int op;
19261
 
19262
  buf = fragp->fr_literal + fragp->fr_fix;
19263
  op = bfd_get_16(sec->owner, buf);
19264
  if ((op & 0xf) == ((op >> 4) & 0xf))
19265
    return relax_immediate (fragp, 8, 0);
19266
  else
19267
    return relax_immediate (fragp, 3, 0);
19268
}
19269
 
19270
 
19271
/* Return the size of a relaxable branch instruction.  BITS is the
19272
   size of the offset field in the narrow instruction.  */
19273
 
19274
static int
19275
relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
19276
{
19277
  addressT addr;
19278
  offsetT val;
19279
  offsetT limit;
19280
 
19281
  /* Assume worst case for symbols not known to be in the same section.  */
19282
  if (!S_IS_DEFINED (fragp->fr_symbol)
19283
      || sec != S_GET_SEGMENT (fragp->fr_symbol)
19284
      || S_IS_WEAK (fragp->fr_symbol))
19285
    return 4;
19286
 
19287
#ifdef OBJ_ELF
19288
  if (S_IS_DEFINED (fragp->fr_symbol)
19289
      && ARM_IS_FUNC (fragp->fr_symbol))
19290
      return 4;
19291
 
19292
  /* PR 12532.  Global symbols with default visibility might
19293
     be preempted, so do not relax relocations to them.  */
19294
  if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
19295
      && (! S_IS_LOCAL (fragp->fr_symbol)))
19296
    return 4;
19297
#endif
19298
 
19299
  val = relaxed_symbol_addr (fragp, stretch);
19300
  addr = fragp->fr_address + fragp->fr_fix + 4;
19301
  val -= addr;
19302
 
19303
  /* Offset is a signed value *2 */
19304
  limit = 1 << bits;
19305
  if (val >= limit || val < -limit)
19306
    return 4;
19307
  return 2;
19308
}
19309
 
19310
 
19311
/* Relax a machine dependent frag.  This returns the amount by which
19312
   the current size of the frag should change.  */
19313
 
19314
int
19315
arm_relax_frag (asection *sec, fragS *fragp, long stretch)
19316
{
19317
  int oldsize;
19318
  int newsize;
19319
 
19320
  oldsize = fragp->fr_var;
19321
  switch (fragp->fr_subtype)
19322
    {
19323
    case T_MNEM_ldr_pc2:
19324
      newsize = relax_adr (fragp, sec, stretch);
19325
      break;
19326
    case T_MNEM_ldr_pc:
19327
    case T_MNEM_ldr_sp:
19328
    case T_MNEM_str_sp:
19329
      newsize = relax_immediate (fragp, 8, 2);
19330
      break;
19331
    case T_MNEM_ldr:
19332
    case T_MNEM_str:
19333
      newsize = relax_immediate (fragp, 5, 2);
19334
      break;
19335
    case T_MNEM_ldrh:
19336
    case T_MNEM_strh:
19337
      newsize = relax_immediate (fragp, 5, 1);
19338
      break;
19339
    case T_MNEM_ldrb:
19340
    case T_MNEM_strb:
19341
      newsize = relax_immediate (fragp, 5, 0);
19342
      break;
19343
    case T_MNEM_adr:
19344
      newsize = relax_adr (fragp, sec, stretch);
19345
      break;
19346
    case T_MNEM_mov:
19347
    case T_MNEM_movs:
19348
    case T_MNEM_cmp:
19349
    case T_MNEM_cmn:
19350
      newsize = relax_immediate (fragp, 8, 0);
19351
      break;
19352
    case T_MNEM_b:
19353
      newsize = relax_branch (fragp, sec, 11, stretch);
19354
      break;
19355
    case T_MNEM_bcond:
19356
      newsize = relax_branch (fragp, sec, 8, stretch);
19357
      break;
19358
    case T_MNEM_add_sp:
19359
    case T_MNEM_add_pc:
19360
      newsize = relax_immediate (fragp, 8, 2);
19361
      break;
19362
    case T_MNEM_inc_sp:
19363
    case T_MNEM_dec_sp:
19364
      newsize = relax_immediate (fragp, 7, 2);
19365
      break;
19366
    case T_MNEM_addi:
19367
    case T_MNEM_addis:
19368
    case T_MNEM_subi:
19369
    case T_MNEM_subis:
19370
      newsize = relax_addsub (fragp, sec);
19371
      break;
19372
    default:
19373
      abort ();
19374
    }
19375
 
19376
  fragp->fr_var = newsize;
19377
  /* Freeze wide instructions that are at or before the same location as
19378
     in the previous pass.  This avoids infinite loops.
19379
     Don't freeze them unconditionally because targets may be artificially
19380
     misaligned by the expansion of preceding frags.  */
19381
  if (stretch <= 0 && newsize > 2)
19382
    {
19383
      md_convert_frag (sec->owner, sec, fragp);
19384
      frag_wane (fragp);
19385
    }
19386
 
19387
  return newsize - oldsize;
19388
}
19389
 
19390
/* Round up a section size to the appropriate boundary.  */
19391
 
19392
valueT
19393
md_section_align (segT   segment ATTRIBUTE_UNUSED,
19394
                  valueT size)
19395
{
19396
#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
19397
  if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
19398
    {
19399
      /* For a.out, force the section size to be aligned.  If we don't do
19400
         this, BFD will align it for us, but it will not write out the
19401
         final bytes of the section.  This may be a bug in BFD, but it is
19402
         easier to fix it here since that is how the other a.out targets
19403
         work.  */
19404
      int align;
19405
 
19406
      align = bfd_get_section_alignment (stdoutput, segment);
19407
      size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
19408
    }
19409
#endif
19410
 
19411
  return size;
19412
}
19413
 
19414
/* This is called from HANDLE_ALIGN in write.c.  Fill in the contents
19415
   of an rs_align_code fragment.  */
19416
 
19417
void
19418
arm_handle_align (fragS * fragP)
19419
{
19420
  static char const arm_noop[2][2][4] =
19421
    {
19422
      {  /* ARMv1 */
19423
        {0x00, 0x00, 0xa0, 0xe1},  /* LE */
19424
        {0xe1, 0xa0, 0x00, 0x00},  /* BE */
19425
      },
19426
      {  /* ARMv6k */
19427
        {0x00, 0xf0, 0x20, 0xe3},  /* LE */
19428
        {0xe3, 0x20, 0xf0, 0x00},  /* BE */
19429
      },
19430
    };
19431
  static char const thumb_noop[2][2][2] =
19432
    {
19433
      {  /* Thumb-1 */
19434
        {0xc0, 0x46},  /* LE */
19435
        {0x46, 0xc0},  /* BE */
19436
      },
19437
      {  /* Thumb-2 */
19438
        {0x00, 0xbf},  /* LE */
19439
        {0xbf, 0x00}   /* BE */
19440
      }
19441
    };
19442
  static char const wide_thumb_noop[2][4] =
19443
    {  /* Wide Thumb-2 */
19444
      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
19445
      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
19446
    };
19447
 
19448
  unsigned bytes, fix, noop_size;
19449
  char * p;
19450
  const char * noop;
19451
  const char *narrow_noop = NULL;
19452
#ifdef OBJ_ELF
19453
  enum mstate state;
19454
#endif
19455
 
19456
  if (fragP->fr_type != rs_align_code)
19457
    return;
19458
 
19459
  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
19460
  p = fragP->fr_literal + fragP->fr_fix;
19461
  fix = 0;
19462
 
19463
  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
19464
    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
19465
 
19466
  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
19467
 
19468
  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
19469
    {
19470
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
19471
        {
19472
          narrow_noop = thumb_noop[1][target_big_endian];
19473
          noop = wide_thumb_noop[target_big_endian];
19474
        }
19475
      else
19476
        noop = thumb_noop[0][target_big_endian];
19477
      noop_size = 2;
19478
#ifdef OBJ_ELF
19479
      state = MAP_THUMB;
19480
#endif
19481
    }
19482
  else
19483
    {
19484
      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
19485
                     [target_big_endian];
19486
      noop_size = 4;
19487
#ifdef OBJ_ELF
19488
      state = MAP_ARM;
19489
#endif
19490
    }
19491
 
19492
  fragP->fr_var = noop_size;
19493
 
19494
  if (bytes & (noop_size - 1))
19495
    {
19496
      fix = bytes & (noop_size - 1);
19497
#ifdef OBJ_ELF
19498
      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
19499
#endif
19500
      memset (p, 0, fix);
19501
      p += fix;
19502
      bytes -= fix;
19503
    }
19504
 
19505
  if (narrow_noop)
19506
    {
19507
      if (bytes & noop_size)
19508
        {
19509
          /* Insert a narrow noop.  */
19510
          memcpy (p, narrow_noop, noop_size);
19511
          p += noop_size;
19512
          bytes -= noop_size;
19513
          fix += noop_size;
19514
        }
19515
 
19516
      /* Use wide noops for the remainder */
19517
      noop_size = 4;
19518
    }
19519
 
19520
  while (bytes >= noop_size)
19521
    {
19522
      memcpy (p, noop, noop_size);
19523
      p += noop_size;
19524
      bytes -= noop_size;
19525
      fix += noop_size;
19526
    }
19527
 
19528
  fragP->fr_fix += fix;
19529
}
19530
 
19531
/* Called from md_do_align.  Used to create an alignment
19532
   frag in a code section.  */
19533
 
19534
void
19535
arm_frag_align_code (int n, int max)
19536
{
19537
  char * p;
19538
 
19539
  /* We assume that there will never be a requirement
19540
     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
19541
  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
19542
    {
19543
      char err_msg[128];
19544
 
19545
      sprintf (err_msg,
19546
        _("alignments greater than %d bytes not supported in .text sections."),
19547
        MAX_MEM_FOR_RS_ALIGN_CODE + 1);
19548
      as_fatal ("%s", err_msg);
19549
    }
19550
 
19551
  p = frag_var (rs_align_code,
19552
                MAX_MEM_FOR_RS_ALIGN_CODE,
19553
                1,
19554
                (relax_substateT) max,
19555
                (symbolS *) NULL,
19556
                (offsetT) n,
19557
                (char *) NULL);
19558
  *p = 0;
19559
}
19560
 
19561
/* Perform target specific initialisation of a frag.
19562
   Note - despite the name this initialisation is not done when the frag
19563
   is created, but only when its type is assigned.  A frag can be created
19564
   and used a long time before its type is set, so beware of assuming that
19565
   this initialisationis performed first.  */
19566
 
19567
#ifndef OBJ_ELF
19568
void
19569
arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
19570
{
19571
  /* Record whether this frag is in an ARM or a THUMB area.  */
19572
  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19573
}
19574
 
19575
#else /* OBJ_ELF is defined.  */
19576
void
19577
arm_init_frag (fragS * fragP, int max_chars)
19578
{
19579
  /* If the current ARM vs THUMB mode has not already
19580
     been recorded into this frag then do so now.  */
19581
  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
19582
    {
19583
      fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19584
 
19585
      /* Record a mapping symbol for alignment frags.  We will delete this
19586
         later if the alignment ends up empty.  */
19587
      switch (fragP->fr_type)
19588
        {
19589
          case rs_align:
19590
          case rs_align_test:
19591
          case rs_fill:
19592
            mapping_state_2 (MAP_DATA, max_chars);
19593
            break;
19594
          case rs_align_code:
19595
            mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
19596
            break;
19597
          default:
19598
            break;
19599
        }
19600
    }
19601
}
19602
 
19603
/* When we change sections we need to issue a new mapping symbol.  */
19604
 
19605
void
19606
arm_elf_change_section (void)
19607
{
19608
  /* Link an unlinked unwind index table section to the .text section.  */
19609
  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
19610
      && elf_linked_to_section (now_seg) == NULL)
19611
    elf_linked_to_section (now_seg) = text_section;
19612
}
19613
 
19614
int
19615
arm_elf_section_type (const char * str, size_t len)
19616
{
19617
  if (len == 5 && strncmp (str, "exidx", 5) == 0)
19618
    return SHT_ARM_EXIDX;
19619
 
19620
  return -1;
19621
}
19622
 
19623
/* Code to deal with unwinding tables.  */
19624
 
19625
static void add_unwind_adjustsp (offsetT);
19626
 
19627
/* Generate any deferred unwind frame offset.  */
19628
 
19629
static void
19630
flush_pending_unwind (void)
19631
{
19632
  offsetT offset;
19633
 
19634
  offset = unwind.pending_offset;
19635
  unwind.pending_offset = 0;
19636
  if (offset != 0)
19637
    add_unwind_adjustsp (offset);
19638
}
19639
 
19640
/* Add an opcode to this list for this function.  Two-byte opcodes should
19641
   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
19642
   order.  */
19643
 
19644
static void
19645
add_unwind_opcode (valueT op, int length)
19646
{
19647
  /* Add any deferred stack adjustment.  */
19648
  if (unwind.pending_offset)
19649
    flush_pending_unwind ();
19650
 
19651
  unwind.sp_restored = 0;
19652
 
19653
  if (unwind.opcode_count + length > unwind.opcode_alloc)
19654
    {
19655
      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
19656
      if (unwind.opcodes)
19657
        unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
19658
                                                     unwind.opcode_alloc);
19659
      else
19660
        unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
19661
    }
19662
  while (length > 0)
19663
    {
19664
      length--;
19665
      unwind.opcodes[unwind.opcode_count] = op & 0xff;
19666
      op >>= 8;
19667
      unwind.opcode_count++;
19668
    }
19669
}
19670
 
19671
/* Add unwind opcodes to adjust the stack pointer.  */
19672
 
19673
static void
19674
add_unwind_adjustsp (offsetT offset)
19675
{
19676
  valueT op;
19677
 
19678
  if (offset > 0x200)
19679
    {
19680
      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
19681
      char bytes[5];
19682
      int n;
19683
      valueT o;
19684
 
19685
      /* Long form: 0xb2, uleb128.  */
19686
      /* This might not fit in a word so add the individual bytes,
19687
         remembering the list is built in reverse order.  */
19688
      o = (valueT) ((offset - 0x204) >> 2);
19689
      if (o == 0)
19690
        add_unwind_opcode (0, 1);
19691
 
19692
      /* Calculate the uleb128 encoding of the offset.  */
19693
      n = 0;
19694
      while (o)
19695
        {
19696
          bytes[n] = o & 0x7f;
19697
          o >>= 7;
19698
          if (o)
19699
            bytes[n] |= 0x80;
19700
          n++;
19701
        }
19702
      /* Add the insn.  */
19703
      for (; n; n--)
19704
        add_unwind_opcode (bytes[n - 1], 1);
19705
      add_unwind_opcode (0xb2, 1);
19706
    }
19707
  else if (offset > 0x100)
19708
    {
19709
      /* Two short opcodes.  */
19710
      add_unwind_opcode (0x3f, 1);
19711
      op = (offset - 0x104) >> 2;
19712
      add_unwind_opcode (op, 1);
19713
    }
19714
  else if (offset > 0)
19715
    {
19716
      /* Short opcode.  */
19717
      op = (offset - 4) >> 2;
19718
      add_unwind_opcode (op, 1);
19719
    }
19720
  else if (offset < 0)
19721
    {
19722
      offset = -offset;
19723
      while (offset > 0x100)
19724
        {
19725
          add_unwind_opcode (0x7f, 1);
19726
          offset -= 0x100;
19727
        }
19728
      op = ((offset - 4) >> 2) | 0x40;
19729
      add_unwind_opcode (op, 1);
19730
    }
19731
}
19732
 
19733
/* Finish the list of unwind opcodes for this function.  */
19734
static void
19735
finish_unwind_opcodes (void)
19736
{
19737
  valueT op;
19738
 
19739
  if (unwind.fp_used)
19740
    {
19741
      /* Adjust sp as necessary.  */
19742
      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
19743
      flush_pending_unwind ();
19744
 
19745
      /* After restoring sp from the frame pointer.  */
19746
      op = 0x90 | unwind.fp_reg;
19747
      add_unwind_opcode (op, 1);
19748
    }
19749
  else
19750
    flush_pending_unwind ();
19751
}
19752
 
19753
 
19754
/* Start an exception table entry.  If idx is nonzero this is an index table
19755
   entry.  */
19756
 
19757
static void
19758
start_unwind_section (const segT text_seg, int idx)
19759
{
19760
  const char * text_name;
19761
  const char * prefix;
19762
  const char * prefix_once;
19763
  const char * group_name;
19764
  size_t prefix_len;
19765
  size_t text_len;
19766
  char * sec_name;
19767
  size_t sec_name_len;
19768
  int type;
19769
  int flags;
19770
  int linkonce;
19771
 
19772
  if (idx)
19773
    {
19774
      prefix = ELF_STRING_ARM_unwind;
19775
      prefix_once = ELF_STRING_ARM_unwind_once;
19776
      type = SHT_ARM_EXIDX;
19777
    }
19778
  else
19779
    {
19780
      prefix = ELF_STRING_ARM_unwind_info;
19781
      prefix_once = ELF_STRING_ARM_unwind_info_once;
19782
      type = SHT_PROGBITS;
19783
    }
19784
 
19785
  text_name = segment_name (text_seg);
19786
  if (streq (text_name, ".text"))
19787
    text_name = "";
19788
 
19789
  if (strncmp (text_name, ".gnu.linkonce.t.",
19790
               strlen (".gnu.linkonce.t.")) == 0)
19791
    {
19792
      prefix = prefix_once;
19793
      text_name += strlen (".gnu.linkonce.t.");
19794
    }
19795
 
19796
  prefix_len = strlen (prefix);
19797
  text_len = strlen (text_name);
19798
  sec_name_len = prefix_len + text_len;
19799
  sec_name = (char *) xmalloc (sec_name_len + 1);
19800
  memcpy (sec_name, prefix, prefix_len);
19801
  memcpy (sec_name + prefix_len, text_name, text_len);
19802
  sec_name[prefix_len + text_len] = '\0';
19803
 
19804
  flags = SHF_ALLOC;
19805
  linkonce = 0;
19806
  group_name = 0;
19807
 
19808
  /* Handle COMDAT group.  */
19809
  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
19810
    {
19811
      group_name = elf_group_name (text_seg);
19812
      if (group_name == NULL)
19813
        {
19814
          as_bad (_("Group section `%s' has no group signature"),
19815
                  segment_name (text_seg));
19816
          ignore_rest_of_line ();
19817
          return;
19818
        }
19819
      flags |= SHF_GROUP;
19820
      linkonce = 1;
19821
    }
19822
 
19823
  obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
19824
 
19825
  /* Set the section link for index tables.  */
19826
  if (idx)
19827
    elf_linked_to_section (now_seg) = text_seg;
19828
}
19829
 
19830
 
19831
/* Start an unwind table entry.  HAVE_DATA is nonzero if we have additional
19832
   personality routine data.  Returns zero, or the index table value for
19833
   and inline entry.  */
19834
 
19835
static valueT
19836
create_unwind_entry (int have_data)
19837
{
19838
  int size;
19839
  addressT where;
19840
  char *ptr;
19841
  /* The current word of data.  */
19842
  valueT data;
19843
  /* The number of bytes left in this word.  */
19844
  int n;
19845
 
19846
  finish_unwind_opcodes ();
19847
 
19848
  /* Remember the current text section.  */
19849
  unwind.saved_seg = now_seg;
19850
  unwind.saved_subseg = now_subseg;
19851
 
19852
  start_unwind_section (now_seg, 0);
19853
 
19854
  if (unwind.personality_routine == NULL)
19855
    {
19856
      if (unwind.personality_index == -2)
19857
        {
19858
          if (have_data)
19859
            as_bad (_("handlerdata in cantunwind frame"));
19860
          return 1; /* EXIDX_CANTUNWIND.  */
19861
        }
19862
 
19863
      /* Use a default personality routine if none is specified.  */
19864
      if (unwind.personality_index == -1)
19865
        {
19866
          if (unwind.opcode_count > 3)
19867
            unwind.personality_index = 1;
19868
          else
19869
            unwind.personality_index = 0;
19870
        }
19871
 
19872
      /* Space for the personality routine entry.  */
19873
      if (unwind.personality_index == 0)
19874
        {
19875
          if (unwind.opcode_count > 3)
19876
            as_bad (_("too many unwind opcodes for personality routine 0"));
19877
 
19878
          if (!have_data)
19879
            {
19880
              /* All the data is inline in the index table.  */
19881
              data = 0x80;
19882
              n = 3;
19883
              while (unwind.opcode_count > 0)
19884
                {
19885
                  unwind.opcode_count--;
19886
                  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19887
                  n--;
19888
                }
19889
 
19890
              /* Pad with "finish" opcodes.  */
19891
              while (n--)
19892
                data = (data << 8) | 0xb0;
19893
 
19894
              return data;
19895
            }
19896
          size = 0;
19897
        }
19898
      else
19899
        /* We get two opcodes "free" in the first word.  */
19900
        size = unwind.opcode_count - 2;
19901
    }
19902
  else
19903
    /* An extra byte is required for the opcode count.  */
19904
    size = unwind.opcode_count + 1;
19905
 
19906
  size = (size + 3) >> 2;
19907
  if (size > 0xff)
19908
    as_bad (_("too many unwind opcodes"));
19909
 
19910
  frag_align (2, 0, 0);
19911
  record_alignment (now_seg, 2);
19912
  unwind.table_entry = expr_build_dot ();
19913
 
19914
  /* Allocate the table entry.  */
19915
  ptr = frag_more ((size << 2) + 4);
19916
  where = frag_now_fix () - ((size << 2) + 4);
19917
 
19918
  switch (unwind.personality_index)
19919
    {
19920
    case -1:
19921
      /* ??? Should this be a PLT generating relocation?  */
19922
      /* Custom personality routine.  */
19923
      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19924
               BFD_RELOC_ARM_PREL31);
19925
 
19926
      where += 4;
19927
      ptr += 4;
19928
 
19929
      /* Set the first byte to the number of additional words.  */
19930
      data = size - 1;
19931
      n = 3;
19932
      break;
19933
 
19934
    /* ABI defined personality routines.  */
19935
    case 0:
19936
      /* Three opcodes bytes are packed into the first word.  */
19937
      data = 0x80;
19938
      n = 3;
19939
      break;
19940
 
19941
    case 1:
19942
    case 2:
19943
      /* The size and first two opcode bytes go in the first word.  */
19944
      data = ((0x80 + unwind.personality_index) << 8) | size;
19945
      n = 2;
19946
      break;
19947
 
19948
    default:
19949
      /* Should never happen.  */
19950
      abort ();
19951
    }
19952
 
19953
  /* Pack the opcodes into words (MSB first), reversing the list at the same
19954
     time.  */
19955
  while (unwind.opcode_count > 0)
19956
    {
19957
      if (n == 0)
19958
        {
19959
          md_number_to_chars (ptr, data, 4);
19960
          ptr += 4;
19961
          n = 4;
19962
          data = 0;
19963
        }
19964
      unwind.opcode_count--;
19965
      n--;
19966
      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19967
    }
19968
 
19969
  /* Finish off the last word.  */
19970
  if (n < 4)
19971
    {
19972
      /* Pad with "finish" opcodes.  */
19973
      while (n--)
19974
        data = (data << 8) | 0xb0;
19975
 
19976
      md_number_to_chars (ptr, data, 4);
19977
    }
19978
 
19979
  if (!have_data)
19980
    {
19981
      /* Add an empty descriptor if there is no user-specified data.   */
19982
      ptr = frag_more (4);
19983
      md_number_to_chars (ptr, 0, 4);
19984
    }
19985
 
19986
  return 0;
19987
}
19988
 
19989
 
19990
/* Initialize the DWARF-2 unwind information for this procedure.  */
19991
 
19992
void
19993
tc_arm_frame_initial_instructions (void)
19994
{
19995
  cfi_add_CFA_def_cfa (REG_SP, 0);
19996
}
19997
#endif /* OBJ_ELF */
19998
 
19999
/* Convert REGNAME to a DWARF-2 register number.  */
20000
 
20001
int
20002
tc_arm_regname_to_dw2regnum (char *regname)
20003
{
20004
  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
20005
 
20006
  if (reg == FAIL)
20007
    return -1;
20008
 
20009
  return reg;
20010
}
20011
 
20012
#ifdef TE_PE
20013
void
20014
tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
20015
{
20016
  expressionS exp;
20017
 
20018
  exp.X_op = O_secrel;
20019
  exp.X_add_symbol = symbol;
20020
  exp.X_add_number = 0;
20021
  emit_expr (&exp, size);
20022
}
20023
#endif
20024
 
20025
/* MD interface: Symbol and relocation handling.  */
20026
 
20027
/* Return the address within the segment that a PC-relative fixup is
20028
   relative to.  For ARM, PC-relative fixups applied to instructions
20029
   are generally relative to the location of the fixup plus 8 bytes.
20030
   Thumb branches are offset by 4, and Thumb loads relative to PC
20031
   require special handling.  */
20032
 
20033
long
20034
md_pcrel_from_section (fixS * fixP, segT seg)
20035
{
20036
  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20037
 
20038
  /* If this is pc-relative and we are going to emit a relocation
20039
     then we just want to put out any pipeline compensation that the linker
20040
     will need.  Otherwise we want to use the calculated base.
20041
     For WinCE we skip the bias for externals as well, since this
20042
     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
20043
  if (fixP->fx_pcrel
20044
      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20045
          || (arm_force_relocation (fixP)
20046
#ifdef TE_WINCE
20047
              && !S_IS_EXTERNAL (fixP->fx_addsy)
20048
#endif
20049
              )))
20050
    base = 0;
20051
 
20052
 
20053
  switch (fixP->fx_r_type)
20054
    {
20055
      /* PC relative addressing on the Thumb is slightly odd as the
20056
         bottom two bits of the PC are forced to zero for the
20057
         calculation.  This happens *after* application of the
20058
         pipeline offset.  However, Thumb adrl already adjusts for
20059
         this, so we need not do it again.  */
20060
    case BFD_RELOC_ARM_THUMB_ADD:
20061
      return base & ~3;
20062
 
20063
    case BFD_RELOC_ARM_THUMB_OFFSET:
20064
    case BFD_RELOC_ARM_T32_OFFSET_IMM:
20065
    case BFD_RELOC_ARM_T32_ADD_PC12:
20066
    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20067
      return (base + 4) & ~3;
20068
 
20069
      /* Thumb branches are simply offset by +4.  */
20070
    case BFD_RELOC_THUMB_PCREL_BRANCH7:
20071
    case BFD_RELOC_THUMB_PCREL_BRANCH9:
20072
    case BFD_RELOC_THUMB_PCREL_BRANCH12:
20073
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
20074
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
20075
      return base + 4;
20076
 
20077
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
20078
      if (fixP->fx_addsy
20079
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20080
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20081
          && ARM_IS_FUNC (fixP->fx_addsy)
20082
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20083
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20084
       return base + 4;
20085
 
20086
      /* BLX is like branches above, but forces the low two bits of PC to
20087
         zero.  */
20088
    case BFD_RELOC_THUMB_PCREL_BLX:
20089
      if (fixP->fx_addsy
20090
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20091
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20092
          && THUMB_IS_FUNC (fixP->fx_addsy)
20093
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20094
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20095
      return (base + 4) & ~3;
20096
 
20097
      /* ARM mode branches are offset by +8.  However, the Windows CE
20098
         loader expects the relocation not to take this into account.  */
20099
    case BFD_RELOC_ARM_PCREL_BLX:
20100
      if (fixP->fx_addsy
20101
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20102
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20103
          && ARM_IS_FUNC (fixP->fx_addsy)
20104
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20105
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20106
      return base + 8;
20107
 
20108
    case BFD_RELOC_ARM_PCREL_CALL:
20109
      if (fixP->fx_addsy
20110
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20111
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20112
          && THUMB_IS_FUNC (fixP->fx_addsy)
20113
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20114
        base = fixP->fx_where + fixP->fx_frag->fr_address;
20115
      return base + 8;
20116
 
20117
    case BFD_RELOC_ARM_PCREL_BRANCH:
20118
    case BFD_RELOC_ARM_PCREL_JUMP:
20119
    case BFD_RELOC_ARM_PLT32:
20120
#ifdef TE_WINCE
20121
      /* When handling fixups immediately, because we have already
20122
         discovered the value of a symbol, or the address of the frag involved
20123
         we must account for the offset by +8, as the OS loader will never see the reloc.
20124
         see fixup_segment() in write.c
20125
         The S_IS_EXTERNAL test handles the case of global symbols.
20126
         Those need the calculated base, not just the pipe compensation the linker will need.  */
20127
      if (fixP->fx_pcrel
20128
          && fixP->fx_addsy != NULL
20129
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20130
          && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
20131
        return base + 8;
20132
      return base;
20133
#else
20134
      return base + 8;
20135
#endif
20136
 
20137
 
20138
      /* ARM mode loads relative to PC are also offset by +8.  Unlike
20139
         branches, the Windows CE loader *does* expect the relocation
20140
         to take this into account.  */
20141
    case BFD_RELOC_ARM_OFFSET_IMM:
20142
    case BFD_RELOC_ARM_OFFSET_IMM8:
20143
    case BFD_RELOC_ARM_HWLITERAL:
20144
    case BFD_RELOC_ARM_LITERAL:
20145
    case BFD_RELOC_ARM_CP_OFF_IMM:
20146
      return base + 8;
20147
 
20148
 
20149
      /* Other PC-relative relocations are un-offset.  */
20150
    default:
20151
      return base;
20152
    }
20153
}
20154
 
20155
/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
20156
   Otherwise we have no need to default values of symbols.  */
20157
 
20158
symbolS *
20159
md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
20160
{
20161
#ifdef OBJ_ELF
20162
  if (name[0] == '_' && name[1] == 'G'
20163
      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
20164
    {
20165
      if (!GOT_symbol)
20166
        {
20167
          if (symbol_find (name))
20168
            as_bad (_("GOT already in the symbol table"));
20169
 
20170
          GOT_symbol = symbol_new (name, undefined_section,
20171
                                   (valueT) 0, & zero_address_frag);
20172
        }
20173
 
20174
      return GOT_symbol;
20175
    }
20176
#endif
20177
 
20178
  return NULL;
20179
}
20180
 
20181
/* Subroutine of md_apply_fix.   Check to see if an immediate can be
20182
   computed as two separate immediate values, added together.  We
20183
   already know that this value cannot be computed by just one ARM
20184
   instruction.  */
20185
 
20186
static unsigned int
20187
validate_immediate_twopart (unsigned int   val,
20188
                            unsigned int * highpart)
20189
{
20190
  unsigned int a;
20191
  unsigned int i;
20192
 
20193
  for (i = 0; i < 32; i += 2)
20194
    if (((a = rotate_left (val, i)) & 0xff) != 0)
20195
      {
20196
        if (a & 0xff00)
20197
          {
20198
            if (a & ~ 0xffff)
20199
              continue;
20200
            * highpart = (a  >> 8) | ((i + 24) << 7);
20201
          }
20202
        else if (a & 0xff0000)
20203
          {
20204
            if (a & 0xff000000)
20205
              continue;
20206
            * highpart = (a >> 16) | ((i + 16) << 7);
20207
          }
20208
        else
20209
          {
20210
            gas_assert (a & 0xff000000);
20211
            * highpart = (a >> 24) | ((i + 8) << 7);
20212
          }
20213
 
20214
        return (a & 0xff) | (i << 7);
20215
      }
20216
 
20217
  return FAIL;
20218
}
20219
 
20220
static int
20221
validate_offset_imm (unsigned int val, int hwse)
20222
{
20223
  if ((hwse && val > 255) || val > 4095)
20224
    return FAIL;
20225
  return val;
20226
}
20227
 
20228
/* Subroutine of md_apply_fix.   Do those data_ops which can take a
20229
   negative immediate constant by altering the instruction.  A bit of
20230
   a hack really.
20231
        MOV <-> MVN
20232
        AND <-> BIC
20233
        ADC <-> SBC
20234
        by inverting the second operand, and
20235
        ADD <-> SUB
20236
        CMP <-> CMN
20237
        by negating the second operand.  */
20238
 
20239
static int
20240
negate_data_op (unsigned long * instruction,
20241
                unsigned long   value)
20242
{
20243
  int op, new_inst;
20244
  unsigned long negated, inverted;
20245
 
20246
  negated = encode_arm_immediate (-value);
20247
  inverted = encode_arm_immediate (~value);
20248
 
20249
  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
20250
  switch (op)
20251
    {
20252
      /* First negates.  */
20253
    case OPCODE_SUB:             /* ADD <-> SUB  */
20254
      new_inst = OPCODE_ADD;
20255
      value = negated;
20256
      break;
20257
 
20258
    case OPCODE_ADD:
20259
      new_inst = OPCODE_SUB;
20260
      value = negated;
20261
      break;
20262
 
20263
    case OPCODE_CMP:             /* CMP <-> CMN  */
20264
      new_inst = OPCODE_CMN;
20265
      value = negated;
20266
      break;
20267
 
20268
    case OPCODE_CMN:
20269
      new_inst = OPCODE_CMP;
20270
      value = negated;
20271
      break;
20272
 
20273
      /* Now Inverted ops.  */
20274
    case OPCODE_MOV:             /* MOV <-> MVN  */
20275
      new_inst = OPCODE_MVN;
20276
      value = inverted;
20277
      break;
20278
 
20279
    case OPCODE_MVN:
20280
      new_inst = OPCODE_MOV;
20281
      value = inverted;
20282
      break;
20283
 
20284
    case OPCODE_AND:             /* AND <-> BIC  */
20285
      new_inst = OPCODE_BIC;
20286
      value = inverted;
20287
      break;
20288
 
20289
    case OPCODE_BIC:
20290
      new_inst = OPCODE_AND;
20291
      value = inverted;
20292
      break;
20293
 
20294
    case OPCODE_ADC:              /* ADC <-> SBC  */
20295
      new_inst = OPCODE_SBC;
20296
      value = inverted;
20297
      break;
20298
 
20299
    case OPCODE_SBC:
20300
      new_inst = OPCODE_ADC;
20301
      value = inverted;
20302
      break;
20303
 
20304
      /* We cannot do anything.  */
20305
    default:
20306
      return FAIL;
20307
    }
20308
 
20309
  if (value == (unsigned) FAIL)
20310
    return FAIL;
20311
 
20312
  *instruction &= OPCODE_MASK;
20313
  *instruction |= new_inst << DATA_OP_SHIFT;
20314
  return value;
20315
}
20316
 
20317
/* Like negate_data_op, but for Thumb-2.   */
20318
 
20319
static unsigned int
20320
thumb32_negate_data_op (offsetT *instruction, unsigned int value)
20321
{
20322
  int op, new_inst;
20323
  int rd;
20324
  unsigned int negated, inverted;
20325
 
20326
  negated = encode_thumb32_immediate (-value);
20327
  inverted = encode_thumb32_immediate (~value);
20328
 
20329
  rd = (*instruction >> 8) & 0xf;
20330
  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
20331
  switch (op)
20332
    {
20333
      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
20334
    case T2_OPCODE_SUB:
20335
      new_inst = T2_OPCODE_ADD;
20336
      value = negated;
20337
      break;
20338
 
20339
    case T2_OPCODE_ADD:
20340
      new_inst = T2_OPCODE_SUB;
20341
      value = negated;
20342
      break;
20343
 
20344
      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
20345
    case T2_OPCODE_ORR:
20346
      new_inst = T2_OPCODE_ORN;
20347
      value = inverted;
20348
      break;
20349
 
20350
    case T2_OPCODE_ORN:
20351
      new_inst = T2_OPCODE_ORR;
20352
      value = inverted;
20353
      break;
20354
 
20355
      /* AND <-> BIC.  TST has no inverted equivalent.  */
20356
    case T2_OPCODE_AND:
20357
      new_inst = T2_OPCODE_BIC;
20358
      if (rd == 15)
20359
        value = FAIL;
20360
      else
20361
        value = inverted;
20362
      break;
20363
 
20364
    case T2_OPCODE_BIC:
20365
      new_inst = T2_OPCODE_AND;
20366
      value = inverted;
20367
      break;
20368
 
20369
      /* ADC <-> SBC  */
20370
    case T2_OPCODE_ADC:
20371
      new_inst = T2_OPCODE_SBC;
20372
      value = inverted;
20373
      break;
20374
 
20375
    case T2_OPCODE_SBC:
20376
      new_inst = T2_OPCODE_ADC;
20377
      value = inverted;
20378
      break;
20379
 
20380
      /* We cannot do anything.  */
20381
    default:
20382
      return FAIL;
20383
    }
20384
 
20385
  if (value == (unsigned int)FAIL)
20386
    return FAIL;
20387
 
20388
  *instruction &= T2_OPCODE_MASK;
20389
  *instruction |= new_inst << T2_DATA_OP_SHIFT;
20390
  return value;
20391
}
20392
 
20393
/* Read a 32-bit thumb instruction from buf.  */
20394
static unsigned long
20395
get_thumb32_insn (char * buf)
20396
{
20397
  unsigned long insn;
20398
  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
20399
  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20400
 
20401
  return insn;
20402
}
20403
 
20404
 
20405
/* We usually want to set the low bit on the address of thumb function
20406
   symbols.  In particular .word foo - . should have the low bit set.
20407
   Generic code tries to fold the difference of two symbols to
20408
   a constant.  Prevent this and force a relocation when the first symbols
20409
   is a thumb function.  */
20410
 
20411
bfd_boolean
20412
arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
20413
{
20414
  if (op == O_subtract
20415
      && l->X_op == O_symbol
20416
      && r->X_op == O_symbol
20417
      && THUMB_IS_FUNC (l->X_add_symbol))
20418
    {
20419
      l->X_op = O_subtract;
20420
      l->X_op_symbol = r->X_add_symbol;
20421
      l->X_add_number -= r->X_add_number;
20422
      return TRUE;
20423
    }
20424
 
20425
  /* Process as normal.  */
20426
  return FALSE;
20427
}
20428
 
20429
/* Encode Thumb2 unconditional branches and calls. The encoding
20430
   for the 2 are identical for the immediate values.  */
20431
 
20432
static void
20433
encode_thumb2_b_bl_offset (char * buf, offsetT value)
20434
{
20435
#define T2I1I2MASK  ((1 << 13) | (1 << 11))
20436
  offsetT newval;
20437
  offsetT newval2;
20438
  addressT S, I1, I2, lo, hi;
20439
 
20440
  S = (value >> 24) & 0x01;
20441
  I1 = (value >> 23) & 0x01;
20442
  I2 = (value >> 22) & 0x01;
20443
  hi = (value >> 12) & 0x3ff;
20444
  lo = (value >> 1) & 0x7ff;
20445
  newval   = md_chars_to_number (buf, THUMB_SIZE);
20446
  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20447
  newval  |= (S << 10) | hi;
20448
  newval2 &=  ~T2I1I2MASK;
20449
  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
20450
  md_number_to_chars (buf, newval, THUMB_SIZE);
20451
  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20452
}
20453
 
20454
void
20455
md_apply_fix (fixS *    fixP,
20456
               valueT * valP,
20457
               segT     seg)
20458
{
20459
  offsetT        value = * valP;
20460
  offsetT        newval;
20461
  unsigned int   newimm;
20462
  unsigned long  temp;
20463
  int            sign;
20464
  char *         buf = fixP->fx_where + fixP->fx_frag->fr_literal;
20465
 
20466
  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
20467
 
20468
  /* Note whether this will delete the relocation.  */
20469
 
20470
  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
20471
    fixP->fx_done = 1;
20472
 
20473
  /* On a 64-bit host, silently truncate 'value' to 32 bits for
20474
     consistency with the behaviour on 32-bit hosts.  Remember value
20475
     for emit_reloc.  */
20476
  value &= 0xffffffff;
20477
  value ^= 0x80000000;
20478
  value -= 0x80000000;
20479
 
20480
  *valP = value;
20481
  fixP->fx_addnumber = value;
20482
 
20483
  /* Same treatment for fixP->fx_offset.  */
20484
  fixP->fx_offset &= 0xffffffff;
20485
  fixP->fx_offset ^= 0x80000000;
20486
  fixP->fx_offset -= 0x80000000;
20487
 
20488
  switch (fixP->fx_r_type)
20489
    {
20490
    case BFD_RELOC_NONE:
20491
      /* This will need to go in the object file.  */
20492
      fixP->fx_done = 0;
20493
      break;
20494
 
20495
    case BFD_RELOC_ARM_IMMEDIATE:
20496
      /* We claim that this fixup has been processed here,
20497
         even if in fact we generate an error because we do
20498
         not have a reloc for it, so tc_gen_reloc will reject it.  */
20499
      fixP->fx_done = 1;
20500
 
20501
      if (fixP->fx_addsy)
20502
        {
20503
          const char *msg = 0;
20504
 
20505
          if (! S_IS_DEFINED (fixP->fx_addsy))
20506
            msg = _("undefined symbol %s used as an immediate value");
20507
          else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20508
            msg = _("symbol %s is in a different section");
20509
          else if (S_IS_WEAK (fixP->fx_addsy))
20510
            msg = _("symbol %s is weak and may be overridden later");
20511
 
20512
          if (msg)
20513
            {
20514
              as_bad_where (fixP->fx_file, fixP->fx_line,
20515
                            msg, S_GET_NAME (fixP->fx_addsy));
20516
              break;
20517
            }
20518
        }
20519
 
20520
      newimm = encode_arm_immediate (value);
20521
      temp = md_chars_to_number (buf, INSN_SIZE);
20522
 
20523
      /* If the instruction will fail, see if we can fix things up by
20524
         changing the opcode.  */
20525
      if (newimm == (unsigned int) FAIL
20526
          && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
20527
        {
20528
          as_bad_where (fixP->fx_file, fixP->fx_line,
20529
                        _("invalid constant (%lx) after fixup"),
20530
                        (unsigned long) value);
20531
          break;
20532
        }
20533
 
20534
      newimm |= (temp & 0xfffff000);
20535
      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20536
      break;
20537
 
20538
    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20539
      {
20540
        unsigned int highpart = 0;
20541
        unsigned int newinsn  = 0xe1a00000; /* nop.  */
20542
 
20543
        if (fixP->fx_addsy)
20544
          {
20545
            const char *msg = 0;
20546
 
20547
            if (! S_IS_DEFINED (fixP->fx_addsy))
20548
              msg = _("undefined symbol %s used as an immediate value");
20549
            else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20550
              msg = _("symbol %s is in a different section");
20551
            else if (S_IS_WEAK (fixP->fx_addsy))
20552
              msg = _("symbol %s is weak and may be overridden later");
20553
 
20554
            if (msg)
20555
              {
20556
                as_bad_where (fixP->fx_file, fixP->fx_line,
20557
                              msg, S_GET_NAME (fixP->fx_addsy));
20558
                break;
20559
              }
20560
          }
20561
 
20562
        newimm = encode_arm_immediate (value);
20563
        temp = md_chars_to_number (buf, INSN_SIZE);
20564
 
20565
        /* If the instruction will fail, see if we can fix things up by
20566
           changing the opcode.  */
20567
        if (newimm == (unsigned int) FAIL
20568
            && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
20569
          {
20570
            /* No ?  OK - try using two ADD instructions to generate
20571
               the value.  */
20572
            newimm = validate_immediate_twopart (value, & highpart);
20573
 
20574
            /* Yes - then make sure that the second instruction is
20575
               also an add.  */
20576
            if (newimm != (unsigned int) FAIL)
20577
              newinsn = temp;
20578
            /* Still No ?  Try using a negated value.  */
20579
            else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
20580
              temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
20581
            /* Otherwise - give up.  */
20582
            else
20583
              {
20584
                as_bad_where (fixP->fx_file, fixP->fx_line,
20585
                              _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20586
                              (long) value);
20587
                break;
20588
              }
20589
 
20590
            /* Replace the first operand in the 2nd instruction (which
20591
               is the PC) with the destination register.  We have
20592
               already added in the PC in the first instruction and we
20593
               do not want to do it again.  */
20594
            newinsn &= ~ 0xf0000;
20595
            newinsn |= ((newinsn & 0x0f000) << 4);
20596
          }
20597
 
20598
        newimm |= (temp & 0xfffff000);
20599
        md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20600
 
20601
        highpart |= (newinsn & 0xfffff000);
20602
        md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
20603
      }
20604
      break;
20605
 
20606
    case BFD_RELOC_ARM_OFFSET_IMM:
20607
      if (!fixP->fx_done && seg->use_rela_p)
20608
        value = 0;
20609
 
20610
    case BFD_RELOC_ARM_LITERAL:
20611
      sign = value > 0;
20612
 
20613
      if (value < 0)
20614
        value = - value;
20615
 
20616
      if (validate_offset_imm (value, 0) == FAIL)
20617
        {
20618
          if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
20619
            as_bad_where (fixP->fx_file, fixP->fx_line,
20620
                          _("invalid literal constant: pool needs to be closer"));
20621
          else
20622
            as_bad_where (fixP->fx_file, fixP->fx_line,
20623
                          _("bad immediate value for offset (%ld)"),
20624
                          (long) value);
20625
          break;
20626
        }
20627
 
20628
      newval = md_chars_to_number (buf, INSN_SIZE);
20629
      if (value == 0)
20630
        newval &= 0xfffff000;
20631
      else
20632
        {
20633
          newval &= 0xff7ff000;
20634
          newval |= value | (sign ? INDEX_UP : 0);
20635
        }
20636
      md_number_to_chars (buf, newval, INSN_SIZE);
20637
      break;
20638
 
20639
    case BFD_RELOC_ARM_OFFSET_IMM8:
20640
    case BFD_RELOC_ARM_HWLITERAL:
20641
      sign = value > 0;
20642
 
20643
      if (value < 0)
20644
        value = - value;
20645
 
20646
      if (validate_offset_imm (value, 1) == FAIL)
20647
        {
20648
          if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
20649
            as_bad_where (fixP->fx_file, fixP->fx_line,
20650
                          _("invalid literal constant: pool needs to be closer"));
20651
          else
20652
            as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20653
                    (long) value);
20654
          break;
20655
        }
20656
 
20657
      newval = md_chars_to_number (buf, INSN_SIZE);
20658
      if (value == 0)
20659
        newval &= 0xfffff0f0;
20660
      else
20661
        {
20662
          newval &= 0xff7ff0f0;
20663
          newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
20664
        }
20665
      md_number_to_chars (buf, newval, INSN_SIZE);
20666
      break;
20667
 
20668
    case BFD_RELOC_ARM_T32_OFFSET_U8:
20669
      if (value < 0 || value > 1020 || value % 4 != 0)
20670
        as_bad_where (fixP->fx_file, fixP->fx_line,
20671
                      _("bad immediate value for offset (%ld)"), (long) value);
20672
      value /= 4;
20673
 
20674
      newval = md_chars_to_number (buf+2, THUMB_SIZE);
20675
      newval |= value;
20676
      md_number_to_chars (buf+2, newval, THUMB_SIZE);
20677
      break;
20678
 
20679
    case BFD_RELOC_ARM_T32_OFFSET_IMM:
20680
      /* This is a complicated relocation used for all varieties of Thumb32
20681
         load/store instruction with immediate offset:
20682
 
20683
         1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20684
                                                   *4, optional writeback(W)
20685
                                                   (doubleword load/store)
20686
 
20687
         1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20688
         1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20689
         1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20690
         1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20691
         1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20692
 
20693
         Uppercase letters indicate bits that are already encoded at
20694
         this point.  Lowercase letters are our problem.  For the
20695
         second block of instructions, the secondary opcode nybble
20696
         (bits 8..11) is present, and bit 23 is zero, even if this is
20697
         a PC-relative operation.  */
20698
      newval = md_chars_to_number (buf, THUMB_SIZE);
20699
      newval <<= 16;
20700
      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
20701
 
20702
      if ((newval & 0xf0000000) == 0xe0000000)
20703
        {
20704
          /* Doubleword load/store: 8-bit offset, scaled by 4.  */
20705
          if (value >= 0)
20706
            newval |= (1 << 23);
20707
          else
20708
            value = -value;
20709
          if (value % 4 != 0)
20710
            {
20711
              as_bad_where (fixP->fx_file, fixP->fx_line,
20712
                            _("offset not a multiple of 4"));
20713
              break;
20714
            }
20715
          value /= 4;
20716
          if (value > 0xff)
20717
            {
20718
              as_bad_where (fixP->fx_file, fixP->fx_line,
20719
                            _("offset out of range"));
20720
              break;
20721
            }
20722
          newval &= ~0xff;
20723
        }
20724
      else if ((newval & 0x000f0000) == 0x000f0000)
20725
        {
20726
          /* PC-relative, 12-bit offset.  */
20727
          if (value >= 0)
20728
            newval |= (1 << 23);
20729
          else
20730
            value = -value;
20731
          if (value > 0xfff)
20732
            {
20733
              as_bad_where (fixP->fx_file, fixP->fx_line,
20734
                            _("offset out of range"));
20735
              break;
20736
            }
20737
          newval &= ~0xfff;
20738
        }
20739
      else if ((newval & 0x00000100) == 0x00000100)
20740
        {
20741
          /* Writeback: 8-bit, +/- offset.  */
20742
          if (value >= 0)
20743
            newval |= (1 << 9);
20744
          else
20745
            value = -value;
20746
          if (value > 0xff)
20747
            {
20748
              as_bad_where (fixP->fx_file, fixP->fx_line,
20749
                            _("offset out of range"));
20750
              break;
20751
            }
20752
          newval &= ~0xff;
20753
        }
20754
      else if ((newval & 0x00000f00) == 0x00000e00)
20755
        {
20756
          /* T-instruction: positive 8-bit offset.  */
20757
          if (value < 0 || value > 0xff)
20758
            {
20759
              as_bad_where (fixP->fx_file, fixP->fx_line,
20760
                            _("offset out of range"));
20761
              break;
20762
            }
20763
          newval &= ~0xff;
20764
          newval |= value;
20765
        }
20766
      else
20767
        {
20768
          /* Positive 12-bit or negative 8-bit offset.  */
20769
          int limit;
20770
          if (value >= 0)
20771
            {
20772
              newval |= (1 << 23);
20773
              limit = 0xfff;
20774
            }
20775
          else
20776
            {
20777
              value = -value;
20778
              limit = 0xff;
20779
            }
20780
          if (value > limit)
20781
            {
20782
              as_bad_where (fixP->fx_file, fixP->fx_line,
20783
                            _("offset out of range"));
20784
              break;
20785
            }
20786
          newval &= ~limit;
20787
        }
20788
 
20789
      newval |= value;
20790
      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
20791
      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
20792
      break;
20793
 
20794
    case BFD_RELOC_ARM_SHIFT_IMM:
20795
      newval = md_chars_to_number (buf, INSN_SIZE);
20796
      if (((unsigned long) value) > 32
20797
          || (value == 32
20798
              && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
20799
        {
20800
          as_bad_where (fixP->fx_file, fixP->fx_line,
20801
                        _("shift expression is too large"));
20802
          break;
20803
        }
20804
 
20805
      if (value == 0)
20806
        /* Shifts of zero must be done as lsl.  */
20807
        newval &= ~0x60;
20808
      else if (value == 32)
20809
        value = 0;
20810
      newval &= 0xfffff07f;
20811
      newval |= (value & 0x1f) << 7;
20812
      md_number_to_chars (buf, newval, INSN_SIZE);
20813
      break;
20814
 
20815
    case BFD_RELOC_ARM_T32_IMMEDIATE:
20816
    case BFD_RELOC_ARM_T32_ADD_IMM:
20817
    case BFD_RELOC_ARM_T32_IMM12:
20818
    case BFD_RELOC_ARM_T32_ADD_PC12:
20819
      /* We claim that this fixup has been processed here,
20820
         even if in fact we generate an error because we do
20821
         not have a reloc for it, so tc_gen_reloc will reject it.  */
20822
      fixP->fx_done = 1;
20823
 
20824
      if (fixP->fx_addsy
20825
          && ! S_IS_DEFINED (fixP->fx_addsy))
20826
        {
20827
          as_bad_where (fixP->fx_file, fixP->fx_line,
20828
                        _("undefined symbol %s used as an immediate value"),
20829
                        S_GET_NAME (fixP->fx_addsy));
20830
          break;
20831
        }
20832
 
20833
      newval = md_chars_to_number (buf, THUMB_SIZE);
20834
      newval <<= 16;
20835
      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
20836
 
20837
      newimm = FAIL;
20838
      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20839
          || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20840
        {
20841
          newimm = encode_thumb32_immediate (value);
20842
          if (newimm == (unsigned int) FAIL)
20843
            newimm = thumb32_negate_data_op (&newval, value);
20844
        }
20845
      if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
20846
          && newimm == (unsigned int) FAIL)
20847
        {
20848
          /* Turn add/sum into addw/subw.  */
20849
          if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20850
            newval = (newval & 0xfeffffff) | 0x02000000;
20851
          /* No flat 12-bit imm encoding for addsw/subsw.  */
20852
          if ((newval & 0x00100000) == 0)
20853
            {
20854
              /* 12 bit immediate for addw/subw.  */
20855
              if (value < 0)
20856
                {
20857
                  value = -value;
20858
                  newval ^= 0x00a00000;
20859
                }
20860
              if (value > 0xfff)
20861
                newimm = (unsigned int) FAIL;
20862
              else
20863
                newimm = value;
20864
            }
20865
        }
20866
 
20867
      if (newimm == (unsigned int)FAIL)
20868
        {
20869
          as_bad_where (fixP->fx_file, fixP->fx_line,
20870
                        _("invalid constant (%lx) after fixup"),
20871
                        (unsigned long) value);
20872
          break;
20873
        }
20874
 
20875
      newval |= (newimm & 0x800) << 15;
20876
      newval |= (newimm & 0x700) << 4;
20877
      newval |= (newimm & 0x0ff);
20878
 
20879
      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20880
      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20881
      break;
20882
 
20883
    case BFD_RELOC_ARM_SMC:
20884
      if (((unsigned long) value) > 0xffff)
20885
        as_bad_where (fixP->fx_file, fixP->fx_line,
20886
                      _("invalid smc expression"));
20887
      newval = md_chars_to_number (buf, INSN_SIZE);
20888
      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20889
      md_number_to_chars (buf, newval, INSN_SIZE);
20890
      break;
20891
 
20892
    case BFD_RELOC_ARM_HVC:
20893
      if (((unsigned long) value) > 0xffff)
20894
        as_bad_where (fixP->fx_file, fixP->fx_line,
20895
                      _("invalid hvc expression"));
20896
      newval = md_chars_to_number (buf, INSN_SIZE);
20897
      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20898
      md_number_to_chars (buf, newval, INSN_SIZE);
20899
      break;
20900
 
20901
    case BFD_RELOC_ARM_SWI:
20902
      if (fixP->tc_fix_data != 0)
20903
        {
20904
          if (((unsigned long) value) > 0xff)
20905
            as_bad_where (fixP->fx_file, fixP->fx_line,
20906
                          _("invalid swi expression"));
20907
          newval = md_chars_to_number (buf, THUMB_SIZE);
20908
          newval |= value;
20909
          md_number_to_chars (buf, newval, THUMB_SIZE);
20910
        }
20911
      else
20912
        {
20913
          if (((unsigned long) value) > 0x00ffffff)
20914
            as_bad_where (fixP->fx_file, fixP->fx_line,
20915
                          _("invalid swi expression"));
20916
          newval = md_chars_to_number (buf, INSN_SIZE);
20917
          newval |= value;
20918
          md_number_to_chars (buf, newval, INSN_SIZE);
20919
        }
20920
      break;
20921
 
20922
    case BFD_RELOC_ARM_MULTI:
20923
      if (((unsigned long) value) > 0xffff)
20924
        as_bad_where (fixP->fx_file, fixP->fx_line,
20925
                      _("invalid expression in load/store multiple"));
20926
      newval = value | md_chars_to_number (buf, INSN_SIZE);
20927
      md_number_to_chars (buf, newval, INSN_SIZE);
20928
      break;
20929
 
20930
#ifdef OBJ_ELF
20931
    case BFD_RELOC_ARM_PCREL_CALL:
20932
 
20933
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20934
          && fixP->fx_addsy
20935
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20936
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20937
          && THUMB_IS_FUNC (fixP->fx_addsy))
20938
        /* Flip the bl to blx. This is a simple flip
20939
           bit here because we generate PCREL_CALL for
20940
           unconditional bls.  */
20941
        {
20942
          newval = md_chars_to_number (buf, INSN_SIZE);
20943
          newval = newval | 0x10000000;
20944
          md_number_to_chars (buf, newval, INSN_SIZE);
20945
          temp = 1;
20946
          fixP->fx_done = 1;
20947
        }
20948
      else
20949
        temp = 3;
20950
      goto arm_branch_common;
20951
 
20952
    case BFD_RELOC_ARM_PCREL_JUMP:
20953
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20954
          && fixP->fx_addsy
20955
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20956
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20957
          && THUMB_IS_FUNC (fixP->fx_addsy))
20958
        {
20959
          /* This would map to a bl<cond>, b<cond>,
20960
             b<always> to a Thumb function. We
20961
             need to force a relocation for this particular
20962
             case.  */
20963
          newval = md_chars_to_number (buf, INSN_SIZE);
20964
          fixP->fx_done = 0;
20965
        }
20966
 
20967
    case BFD_RELOC_ARM_PLT32:
20968
#endif
20969
    case BFD_RELOC_ARM_PCREL_BRANCH:
20970
      temp = 3;
20971
      goto arm_branch_common;
20972
 
20973
    case BFD_RELOC_ARM_PCREL_BLX:
20974
 
20975
      temp = 1;
20976
      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20977
          && fixP->fx_addsy
20978
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20979
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20980
          && ARM_IS_FUNC (fixP->fx_addsy))
20981
        {
20982
          /* Flip the blx to a bl and warn.  */
20983
          const char *name = S_GET_NAME (fixP->fx_addsy);
20984
          newval = 0xeb000000;
20985
          as_warn_where (fixP->fx_file, fixP->fx_line,
20986
                         _("blx to '%s' an ARM ISA state function changed to bl"),
20987
                          name);
20988
          md_number_to_chars (buf, newval, INSN_SIZE);
20989
          temp = 3;
20990
          fixP->fx_done = 1;
20991
        }
20992
 
20993
#ifdef OBJ_ELF
20994
       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20995
         fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
20996
#endif
20997
 
20998
    arm_branch_common:
20999
      /* We are going to store value (shifted right by two) in the
21000
         instruction, in a 24 bit, signed field.  Bits 26 through 32 either
21001
         all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
21002
         also be be clear.  */
21003
      if (value & temp)
21004
        as_bad_where (fixP->fx_file, fixP->fx_line,
21005
                      _("misaligned branch destination"));
21006
      if ((value & (offsetT)0xfe000000) != (offsetT)0
21007
          && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
21008 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21009 16 khays
 
21010
      if (fixP->fx_done || !seg->use_rela_p)
21011
        {
21012
          newval = md_chars_to_number (buf, INSN_SIZE);
21013
          newval |= (value >> 2) & 0x00ffffff;
21014
          /* Set the H bit on BLX instructions.  */
21015
          if (temp == 1)
21016
            {
21017
              if (value & 2)
21018
                newval |= 0x01000000;
21019
              else
21020
                newval &= ~0x01000000;
21021
            }
21022
          md_number_to_chars (buf, newval, INSN_SIZE);
21023
        }
21024
      break;
21025
 
21026
    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21027
      /* CBZ can only branch forward.  */
21028
 
21029
      /* Attempts to use CBZ to branch to the next instruction
21030
         (which, strictly speaking, are prohibited) will be turned into
21031
         no-ops.
21032
 
21033
         FIXME: It may be better to remove the instruction completely and
21034
         perform relaxation.  */
21035
      if (value == -2)
21036
        {
21037
          newval = md_chars_to_number (buf, THUMB_SIZE);
21038
          newval = 0xbf00; /* NOP encoding T1 */
21039
          md_number_to_chars (buf, newval, THUMB_SIZE);
21040
        }
21041
      else
21042
        {
21043
          if (value & ~0x7e)
21044 160 khays
            as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21045 16 khays
 
21046
          if (fixP->fx_done || !seg->use_rela_p)
21047
            {
21048
              newval = md_chars_to_number (buf, THUMB_SIZE);
21049
              newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21050
              md_number_to_chars (buf, newval, THUMB_SIZE);
21051
            }
21052
        }
21053
      break;
21054
 
21055
    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.  */
21056
      if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21057 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21058 16 khays
 
21059
      if (fixP->fx_done || !seg->use_rela_p)
21060
        {
21061
          newval = md_chars_to_number (buf, THUMB_SIZE);
21062
          newval |= (value & 0x1ff) >> 1;
21063
          md_number_to_chars (buf, newval, THUMB_SIZE);
21064
        }
21065
      break;
21066
 
21067
    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
21068
      if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
21069 160 khays
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21070 16 khays
 
21071
      if (fixP->fx_done || !seg->use_rela_p)
21072
        {
21073
          newval = md_chars_to_number (buf, THUMB_SIZE);
21074
          newval |= (value & 0xfff) >> 1;
21075
          md_number_to_chars (buf, newval, THUMB_SIZE);
21076
        }
21077
      break;
21078
 
21079
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21080
      if (fixP->fx_addsy
21081
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21082
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21083
          && ARM_IS_FUNC (fixP->fx_addsy)
21084
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21085
        {
21086
          /* Force a relocation for a branch 20 bits wide.  */
21087
          fixP->fx_done = 0;
21088
        }
21089 160 khays
      if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
21090 16 khays
        as_bad_where (fixP->fx_file, fixP->fx_line,
21091
                      _("conditional branch out of range"));
21092
 
21093
      if (fixP->fx_done || !seg->use_rela_p)
21094
        {
21095
          offsetT newval2;
21096
          addressT S, J1, J2, lo, hi;
21097
 
21098
          S  = (value & 0x00100000) >> 20;
21099
          J2 = (value & 0x00080000) >> 19;
21100
          J1 = (value & 0x00040000) >> 18;
21101
          hi = (value & 0x0003f000) >> 12;
21102
          lo = (value & 0x00000ffe) >> 1;
21103
 
21104
          newval   = md_chars_to_number (buf, THUMB_SIZE);
21105
          newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21106
          newval  |= (S << 10) | hi;
21107
          newval2 |= (J1 << 13) | (J2 << 11) | lo;
21108
          md_number_to_chars (buf, newval, THUMB_SIZE);
21109
          md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21110
        }
21111
      break;
21112
 
21113
    case BFD_RELOC_THUMB_PCREL_BLX:
21114
      /* If there is a blx from a thumb state function to
21115
         another thumb function flip this to a bl and warn
21116
         about it.  */
21117
 
21118
      if (fixP->fx_addsy
21119
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21120
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21121
          && THUMB_IS_FUNC (fixP->fx_addsy))
21122
        {
21123
          const char *name = S_GET_NAME (fixP->fx_addsy);
21124
          as_warn_where (fixP->fx_file, fixP->fx_line,
21125
                         _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
21126
                         name);
21127
          newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21128
          newval = newval | 0x1000;
21129
          md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21130
          fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21131
          fixP->fx_done = 1;
21132
        }
21133
 
21134
 
21135
      goto thumb_bl_common;
21136
 
21137
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21138
      /* A bl from Thumb state ISA to an internal ARM state function
21139
         is converted to a blx.  */
21140
      if (fixP->fx_addsy
21141
          && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21142
          && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21143
          && ARM_IS_FUNC (fixP->fx_addsy)
21144
          && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21145
        {
21146
          newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21147
          newval = newval & ~0x1000;
21148
          md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21149
          fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
21150
          fixP->fx_done = 1;
21151
        }
21152
 
21153
    thumb_bl_common:
21154
 
21155
#ifdef OBJ_ELF
21156
       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
21157
           fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21158
         fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21159
#endif
21160
 
21161
      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21162
        /* For a BLX instruction, make sure that the relocation is rounded up
21163
           to a word boundary.  This follows the semantics of the instruction
21164
           which specifies that bit 1 of the target address will come from bit
21165
           1 of the base address.  */
21166
        value = (value + 1) & ~ 1;
21167
 
21168
       if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
21169 160 khays
         {
21170
           if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
21171
             as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21172
           else if ((value & ~0x1ffffff)
21173
                    && ((value & ~0x1ffffff) != ~0x1ffffff))
21174
             as_bad_where (fixP->fx_file, fixP->fx_line,
21175
                           _("Thumb2 branch out of range"));
21176
         }
21177 16 khays
 
21178
      if (fixP->fx_done || !seg->use_rela_p)
21179
        encode_thumb2_b_bl_offset (buf, value);
21180
 
21181
      break;
21182
 
21183
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21184 160 khays
      if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
21185
        as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21186 16 khays
 
21187
      if (fixP->fx_done || !seg->use_rela_p)
21188
          encode_thumb2_b_bl_offset (buf, value);
21189
 
21190
      break;
21191
 
21192
    case BFD_RELOC_8:
21193
      if (fixP->fx_done || !seg->use_rela_p)
21194
        md_number_to_chars (buf, value, 1);
21195
      break;
21196
 
21197
    case BFD_RELOC_16:
21198
      if (fixP->fx_done || !seg->use_rela_p)
21199
        md_number_to_chars (buf, value, 2);
21200
      break;
21201
 
21202
#ifdef OBJ_ELF
21203
    case BFD_RELOC_ARM_TLS_CALL:
21204
    case BFD_RELOC_ARM_THM_TLS_CALL:
21205
    case BFD_RELOC_ARM_TLS_DESCSEQ:
21206
    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21207
      S_SET_THREAD_LOCAL (fixP->fx_addsy);
21208
      break;
21209
 
21210
    case BFD_RELOC_ARM_TLS_GOTDESC:
21211
    case BFD_RELOC_ARM_TLS_GD32:
21212
    case BFD_RELOC_ARM_TLS_LE32:
21213
    case BFD_RELOC_ARM_TLS_IE32:
21214
    case BFD_RELOC_ARM_TLS_LDM32:
21215
    case BFD_RELOC_ARM_TLS_LDO32:
21216
      S_SET_THREAD_LOCAL (fixP->fx_addsy);
21217
      /* fall through */
21218
 
21219
    case BFD_RELOC_ARM_GOT32:
21220
    case BFD_RELOC_ARM_GOTOFF:
21221
      if (fixP->fx_done || !seg->use_rela_p)
21222
        md_number_to_chars (buf, 0, 4);
21223
      break;
21224
 
21225
    case BFD_RELOC_ARM_GOT_PREL:
21226
      if (fixP->fx_done || !seg->use_rela_p)
21227
        md_number_to_chars (buf, value, 4);
21228
      break;
21229
 
21230
    case BFD_RELOC_ARM_TARGET2:
21231
      /* TARGET2 is not partial-inplace, so we need to write the
21232
         addend here for REL targets, because it won't be written out
21233
         during reloc processing later.  */
21234
      if (fixP->fx_done || !seg->use_rela_p)
21235
        md_number_to_chars (buf, fixP->fx_offset, 4);
21236
      break;
21237
#endif
21238
 
21239
    case BFD_RELOC_RVA:
21240
    case BFD_RELOC_32:
21241
    case BFD_RELOC_ARM_TARGET1:
21242
    case BFD_RELOC_ARM_ROSEGREL32:
21243
    case BFD_RELOC_ARM_SBREL32:
21244
    case BFD_RELOC_32_PCREL:
21245
#ifdef TE_PE
21246
    case BFD_RELOC_32_SECREL:
21247
#endif
21248
      if (fixP->fx_done || !seg->use_rela_p)
21249
#ifdef TE_WINCE
21250
        /* For WinCE we only do this for pcrel fixups.  */
21251
        if (fixP->fx_done || fixP->fx_pcrel)
21252
#endif
21253
          md_number_to_chars (buf, value, 4);
21254
      break;
21255
 
21256
#ifdef OBJ_ELF
21257
    case BFD_RELOC_ARM_PREL31:
21258
      if (fixP->fx_done || !seg->use_rela_p)
21259
        {
21260
          newval = md_chars_to_number (buf, 4) & 0x80000000;
21261
          if ((value ^ (value >> 1)) & 0x40000000)
21262
            {
21263
              as_bad_where (fixP->fx_file, fixP->fx_line,
21264
                            _("rel31 relocation overflow"));
21265
            }
21266
          newval |= value & 0x7fffffff;
21267
          md_number_to_chars (buf, newval, 4);
21268
        }
21269
      break;
21270
#endif
21271
 
21272
    case BFD_RELOC_ARM_CP_OFF_IMM:
21273
    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21274
      if (value < -1023 || value > 1023 || (value & 3))
21275
        as_bad_where (fixP->fx_file, fixP->fx_line,
21276
                      _("co-processor offset out of range"));
21277
    cp_off_common:
21278
      sign = value > 0;
21279
      if (value < 0)
21280
        value = -value;
21281
      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21282
          || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21283
        newval = md_chars_to_number (buf, INSN_SIZE);
21284
      else
21285
        newval = get_thumb32_insn (buf);
21286
      if (value == 0)
21287
        newval &= 0xffffff00;
21288
      else
21289
        {
21290
          newval &= 0xff7fff00;
21291
          newval |= (value >> 2) | (sign ? INDEX_UP : 0);
21292
        }
21293
      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21294
          || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21295
        md_number_to_chars (buf, newval, INSN_SIZE);
21296
      else
21297
        put_thumb32_insn (buf, newval);
21298
      break;
21299
 
21300
    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
21301
    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
21302
      if (value < -255 || value > 255)
21303
        as_bad_where (fixP->fx_file, fixP->fx_line,
21304
                      _("co-processor offset out of range"));
21305
      value *= 4;
21306
      goto cp_off_common;
21307
 
21308
    case BFD_RELOC_ARM_THUMB_OFFSET:
21309
      newval = md_chars_to_number (buf, THUMB_SIZE);
21310
      /* Exactly what ranges, and where the offset is inserted depends
21311
         on the type of instruction, we can establish this from the
21312
         top 4 bits.  */
21313
      switch (newval >> 12)
21314
        {
21315
        case 4: /* PC load.  */
21316
          /* Thumb PC loads are somewhat odd, bit 1 of the PC is
21317
             forced to zero for these loads; md_pcrel_from has already
21318
             compensated for this.  */
21319
          if (value & 3)
21320
            as_bad_where (fixP->fx_file, fixP->fx_line,
21321
                          _("invalid offset, target not word aligned (0x%08lX)"),
21322
                          (((unsigned long) fixP->fx_frag->fr_address
21323
                            + (unsigned long) fixP->fx_where) & ~3)
21324
                          + (unsigned long) value);
21325
 
21326
          if (value & ~0x3fc)
21327
            as_bad_where (fixP->fx_file, fixP->fx_line,
21328
                          _("invalid offset, value too big (0x%08lX)"),
21329
                          (long) value);
21330
 
21331
          newval |= value >> 2;
21332
          break;
21333
 
21334
        case 9: /* SP load/store.  */
21335
          if (value & ~0x3fc)
21336
            as_bad_where (fixP->fx_file, fixP->fx_line,
21337
                          _("invalid offset, value too big (0x%08lX)"),
21338
                          (long) value);
21339
          newval |= value >> 2;
21340
          break;
21341
 
21342
        case 6: /* Word load/store.  */
21343
          if (value & ~0x7c)
21344
            as_bad_where (fixP->fx_file, fixP->fx_line,
21345
                          _("invalid offset, value too big (0x%08lX)"),
21346
                          (long) value);
21347
          newval |= value << 4; /* 6 - 2.  */
21348
          break;
21349
 
21350
        case 7: /* Byte load/store.  */
21351
          if (value & ~0x1f)
21352
            as_bad_where (fixP->fx_file, fixP->fx_line,
21353
                          _("invalid offset, value too big (0x%08lX)"),
21354
                          (long) value);
21355
          newval |= value << 6;
21356
          break;
21357
 
21358
        case 8: /* Halfword load/store.  */
21359
          if (value & ~0x3e)
21360
            as_bad_where (fixP->fx_file, fixP->fx_line,
21361
                          _("invalid offset, value too big (0x%08lX)"),
21362
                          (long) value);
21363
          newval |= value << 5; /* 6 - 1.  */
21364
          break;
21365
 
21366
        default:
21367
          as_bad_where (fixP->fx_file, fixP->fx_line,
21368
                        "Unable to process relocation for thumb opcode: %lx",
21369
                        (unsigned long) newval);
21370
          break;
21371
        }
21372
      md_number_to_chars (buf, newval, THUMB_SIZE);
21373
      break;
21374
 
21375
    case BFD_RELOC_ARM_THUMB_ADD:
21376
      /* This is a complicated relocation, since we use it for all of
21377
         the following immediate relocations:
21378
 
21379
            3bit ADD/SUB
21380
            8bit ADD/SUB
21381
            9bit ADD/SUB SP word-aligned
21382
           10bit ADD PC/SP word-aligned
21383
 
21384
         The type of instruction being processed is encoded in the
21385
         instruction field:
21386
 
21387
           0x8000  SUB
21388
           0x00F0  Rd
21389
           0x000F  Rs
21390
      */
21391
      newval = md_chars_to_number (buf, THUMB_SIZE);
21392
      {
21393
        int rd = (newval >> 4) & 0xf;
21394
        int rs = newval & 0xf;
21395
        int subtract = !!(newval & 0x8000);
21396
 
21397
        /* Check for HI regs, only very restricted cases allowed:
21398
           Adjusting SP, and using PC or SP to get an address.  */
21399
        if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
21400
            || (rs > 7 && rs != REG_SP && rs != REG_PC))
21401
          as_bad_where (fixP->fx_file, fixP->fx_line,
21402
                        _("invalid Hi register with immediate"));
21403
 
21404
        /* If value is negative, choose the opposite instruction.  */
21405
        if (value < 0)
21406
          {
21407
            value = -value;
21408
            subtract = !subtract;
21409
            if (value < 0)
21410
              as_bad_where (fixP->fx_file, fixP->fx_line,
21411
                            _("immediate value out of range"));
21412
          }
21413
 
21414
        if (rd == REG_SP)
21415
          {
21416
            if (value & ~0x1fc)
21417
              as_bad_where (fixP->fx_file, fixP->fx_line,
21418
                            _("invalid immediate for stack address calculation"));
21419
            newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
21420
            newval |= value >> 2;
21421
          }
21422
        else if (rs == REG_PC || rs == REG_SP)
21423
          {
21424
            if (subtract || value & ~0x3fc)
21425
              as_bad_where (fixP->fx_file, fixP->fx_line,
21426
                            _("invalid immediate for address calculation (value = 0x%08lX)"),
21427
                            (unsigned long) value);
21428
            newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
21429
            newval |= rd << 8;
21430
            newval |= value >> 2;
21431
          }
21432
        else if (rs == rd)
21433
          {
21434
            if (value & ~0xff)
21435
              as_bad_where (fixP->fx_file, fixP->fx_line,
21436
                            _("immediate value out of range"));
21437
            newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
21438
            newval |= (rd << 8) | value;
21439
          }
21440
        else
21441
          {
21442
            if (value & ~0x7)
21443
              as_bad_where (fixP->fx_file, fixP->fx_line,
21444
                            _("immediate value out of range"));
21445
            newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
21446
            newval |= rd | (rs << 3) | (value << 6);
21447
          }
21448
      }
21449
      md_number_to_chars (buf, newval, THUMB_SIZE);
21450
      break;
21451
 
21452
    case BFD_RELOC_ARM_THUMB_IMM:
21453
      newval = md_chars_to_number (buf, THUMB_SIZE);
21454
      if (value < 0 || value > 255)
21455
        as_bad_where (fixP->fx_file, fixP->fx_line,
21456
                      _("invalid immediate: %ld is out of range"),
21457
                      (long) value);
21458
      newval |= value;
21459
      md_number_to_chars (buf, newval, THUMB_SIZE);
21460
      break;
21461
 
21462
    case BFD_RELOC_ARM_THUMB_SHIFT:
21463
      /* 5bit shift value (0..32).  LSL cannot take 32.  */
21464
      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
21465
      temp = newval & 0xf800;
21466
      if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
21467
        as_bad_where (fixP->fx_file, fixP->fx_line,
21468
                      _("invalid shift value: %ld"), (long) value);
21469
      /* Shifts of zero must be encoded as LSL.  */
21470
      if (value == 0)
21471
        newval = (newval & 0x003f) | T_OPCODE_LSL_I;
21472
      /* Shifts of 32 are encoded as zero.  */
21473
      else if (value == 32)
21474
        value = 0;
21475
      newval |= value << 6;
21476
      md_number_to_chars (buf, newval, THUMB_SIZE);
21477
      break;
21478
 
21479
    case BFD_RELOC_VTABLE_INHERIT:
21480
    case BFD_RELOC_VTABLE_ENTRY:
21481
      fixP->fx_done = 0;
21482
      return;
21483
 
21484
    case BFD_RELOC_ARM_MOVW:
21485
    case BFD_RELOC_ARM_MOVT:
21486
    case BFD_RELOC_ARM_THUMB_MOVW:
21487
    case BFD_RELOC_ARM_THUMB_MOVT:
21488
      if (fixP->fx_done || !seg->use_rela_p)
21489
        {
21490
          /* REL format relocations are limited to a 16-bit addend.  */
21491
          if (!fixP->fx_done)
21492
            {
21493
              if (value < -0x8000 || value > 0x7fff)
21494
                  as_bad_where (fixP->fx_file, fixP->fx_line,
21495
                                _("offset out of range"));
21496
            }
21497
          else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21498
                   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21499
            {
21500
              value >>= 16;
21501
            }
21502
 
21503
          if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21504
              || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21505
            {
21506
              newval = get_thumb32_insn (buf);
21507
              newval &= 0xfbf08f00;
21508
              newval |= (value & 0xf000) << 4;
21509
              newval |= (value & 0x0800) << 15;
21510
              newval |= (value & 0x0700) << 4;
21511
              newval |= (value & 0x00ff);
21512
              put_thumb32_insn (buf, newval);
21513
            }
21514
          else
21515
            {
21516
              newval = md_chars_to_number (buf, 4);
21517
              newval &= 0xfff0f000;
21518
              newval |= value & 0x0fff;
21519
              newval |= (value & 0xf000) << 4;
21520
              md_number_to_chars (buf, newval, 4);
21521
            }
21522
        }
21523
      return;
21524
 
21525
   case BFD_RELOC_ARM_ALU_PC_G0_NC:
21526
   case BFD_RELOC_ARM_ALU_PC_G0:
21527
   case BFD_RELOC_ARM_ALU_PC_G1_NC:
21528
   case BFD_RELOC_ARM_ALU_PC_G1:
21529
   case BFD_RELOC_ARM_ALU_PC_G2:
21530
   case BFD_RELOC_ARM_ALU_SB_G0_NC:
21531
   case BFD_RELOC_ARM_ALU_SB_G0:
21532
   case BFD_RELOC_ARM_ALU_SB_G1_NC:
21533
   case BFD_RELOC_ARM_ALU_SB_G1:
21534
   case BFD_RELOC_ARM_ALU_SB_G2:
21535
     gas_assert (!fixP->fx_done);
21536
     if (!seg->use_rela_p)
21537
       {
21538
         bfd_vma insn;
21539
         bfd_vma encoded_addend;
21540
         bfd_vma addend_abs = abs (value);
21541
 
21542
         /* Check that the absolute value of the addend can be
21543
            expressed as an 8-bit constant plus a rotation.  */
21544
         encoded_addend = encode_arm_immediate (addend_abs);
21545
         if (encoded_addend == (unsigned int) FAIL)
21546
           as_bad_where (fixP->fx_file, fixP->fx_line,
21547
                         _("the offset 0x%08lX is not representable"),
21548
                         (unsigned long) addend_abs);
21549
 
21550
         /* Extract the instruction.  */
21551
         insn = md_chars_to_number (buf, INSN_SIZE);
21552
 
21553
         /* If the addend is positive, use an ADD instruction.
21554
            Otherwise use a SUB.  Take care not to destroy the S bit.  */
21555
         insn &= 0xff1fffff;
21556
         if (value < 0)
21557
           insn |= 1 << 22;
21558
         else
21559
           insn |= 1 << 23;
21560
 
21561
         /* Place the encoded addend into the first 12 bits of the
21562
            instruction.  */
21563
         insn &= 0xfffff000;
21564
         insn |= encoded_addend;
21565
 
21566
         /* Update the instruction.  */
21567
         md_number_to_chars (buf, insn, INSN_SIZE);
21568
       }
21569
     break;
21570
 
21571
    case BFD_RELOC_ARM_LDR_PC_G0:
21572
    case BFD_RELOC_ARM_LDR_PC_G1:
21573
    case BFD_RELOC_ARM_LDR_PC_G2:
21574
    case BFD_RELOC_ARM_LDR_SB_G0:
21575
    case BFD_RELOC_ARM_LDR_SB_G1:
21576
    case BFD_RELOC_ARM_LDR_SB_G2:
21577
      gas_assert (!fixP->fx_done);
21578
      if (!seg->use_rela_p)
21579
        {
21580
          bfd_vma insn;
21581
          bfd_vma addend_abs = abs (value);
21582
 
21583
          /* Check that the absolute value of the addend can be
21584
             encoded in 12 bits.  */
21585
          if (addend_abs >= 0x1000)
21586
            as_bad_where (fixP->fx_file, fixP->fx_line,
21587
                          _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21588
                          (unsigned long) addend_abs);
21589
 
21590
          /* Extract the instruction.  */
21591
          insn = md_chars_to_number (buf, INSN_SIZE);
21592
 
21593
          /* If the addend is negative, clear bit 23 of the instruction.
21594
             Otherwise set it.  */
21595
          if (value < 0)
21596
            insn &= ~(1 << 23);
21597
          else
21598
            insn |= 1 << 23;
21599
 
21600
          /* Place the absolute value of the addend into the first 12 bits
21601
             of the instruction.  */
21602
          insn &= 0xfffff000;
21603
          insn |= addend_abs;
21604
 
21605
          /* Update the instruction.  */
21606
          md_number_to_chars (buf, insn, INSN_SIZE);
21607
        }
21608
      break;
21609
 
21610
    case BFD_RELOC_ARM_LDRS_PC_G0:
21611
    case BFD_RELOC_ARM_LDRS_PC_G1:
21612
    case BFD_RELOC_ARM_LDRS_PC_G2:
21613
    case BFD_RELOC_ARM_LDRS_SB_G0:
21614
    case BFD_RELOC_ARM_LDRS_SB_G1:
21615
    case BFD_RELOC_ARM_LDRS_SB_G2:
21616
      gas_assert (!fixP->fx_done);
21617
      if (!seg->use_rela_p)
21618
        {
21619
          bfd_vma insn;
21620
          bfd_vma addend_abs = abs (value);
21621
 
21622
          /* Check that the absolute value of the addend can be
21623
             encoded in 8 bits.  */
21624
          if (addend_abs >= 0x100)
21625
            as_bad_where (fixP->fx_file, fixP->fx_line,
21626
                          _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21627
                          (unsigned long) addend_abs);
21628
 
21629
          /* Extract the instruction.  */
21630
          insn = md_chars_to_number (buf, INSN_SIZE);
21631
 
21632
          /* If the addend is negative, clear bit 23 of the instruction.
21633
             Otherwise set it.  */
21634
          if (value < 0)
21635
            insn &= ~(1 << 23);
21636
          else
21637
            insn |= 1 << 23;
21638
 
21639
          /* Place the first four bits of the absolute value of the addend
21640
             into the first 4 bits of the instruction, and the remaining
21641
             four into bits 8 .. 11.  */
21642
          insn &= 0xfffff0f0;
21643
          insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
21644
 
21645
          /* Update the instruction.  */
21646
          md_number_to_chars (buf, insn, INSN_SIZE);
21647
        }
21648
      break;
21649
 
21650
    case BFD_RELOC_ARM_LDC_PC_G0:
21651
    case BFD_RELOC_ARM_LDC_PC_G1:
21652
    case BFD_RELOC_ARM_LDC_PC_G2:
21653
    case BFD_RELOC_ARM_LDC_SB_G0:
21654
    case BFD_RELOC_ARM_LDC_SB_G1:
21655
    case BFD_RELOC_ARM_LDC_SB_G2:
21656
      gas_assert (!fixP->fx_done);
21657
      if (!seg->use_rela_p)
21658
        {
21659
          bfd_vma insn;
21660
          bfd_vma addend_abs = abs (value);
21661
 
21662
          /* Check that the absolute value of the addend is a multiple of
21663
             four and, when divided by four, fits in 8 bits.  */
21664
          if (addend_abs & 0x3)
21665
            as_bad_where (fixP->fx_file, fixP->fx_line,
21666
                          _("bad offset 0x%08lX (must be word-aligned)"),
21667
                          (unsigned long) addend_abs);
21668
 
21669
          if ((addend_abs >> 2) > 0xff)
21670
            as_bad_where (fixP->fx_file, fixP->fx_line,
21671
                          _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21672
                          (unsigned long) addend_abs);
21673
 
21674
          /* Extract the instruction.  */
21675
          insn = md_chars_to_number (buf, INSN_SIZE);
21676
 
21677
          /* If the addend is negative, clear bit 23 of the instruction.
21678
             Otherwise set it.  */
21679
          if (value < 0)
21680
            insn &= ~(1 << 23);
21681
          else
21682
            insn |= 1 << 23;
21683
 
21684
          /* Place the addend (divided by four) into the first eight
21685
             bits of the instruction.  */
21686
          insn &= 0xfffffff0;
21687
          insn |= addend_abs >> 2;
21688
 
21689
          /* Update the instruction.  */
21690
          md_number_to_chars (buf, insn, INSN_SIZE);
21691
        }
21692
      break;
21693
 
21694
    case BFD_RELOC_ARM_V4BX:
21695
      /* This will need to go in the object file.  */
21696
      fixP->fx_done = 0;
21697
      break;
21698
 
21699
    case BFD_RELOC_UNUSED:
21700
    default:
21701
      as_bad_where (fixP->fx_file, fixP->fx_line,
21702
                    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
21703
    }
21704
}
21705
 
21706
/* Translate internal representation of relocation info to BFD target
21707
   format.  */
21708
 
21709
arelent *
21710
tc_gen_reloc (asection *section, fixS *fixp)
21711
{
21712
  arelent * reloc;
21713
  bfd_reloc_code_real_type code;
21714
 
21715
  reloc = (arelent *) xmalloc (sizeof (arelent));
21716
 
21717
  reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
21718
  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
21719
  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
21720
 
21721
  if (fixp->fx_pcrel)
21722
    {
21723
      if (section->use_rela_p)
21724
        fixp->fx_offset -= md_pcrel_from_section (fixp, section);
21725
      else
21726
        fixp->fx_offset = reloc->address;
21727
    }
21728
  reloc->addend = fixp->fx_offset;
21729
 
21730
  switch (fixp->fx_r_type)
21731
    {
21732
    case BFD_RELOC_8:
21733
      if (fixp->fx_pcrel)
21734
        {
21735
          code = BFD_RELOC_8_PCREL;
21736
          break;
21737
        }
21738
 
21739
    case BFD_RELOC_16:
21740
      if (fixp->fx_pcrel)
21741
        {
21742
          code = BFD_RELOC_16_PCREL;
21743
          break;
21744
        }
21745
 
21746
    case BFD_RELOC_32:
21747
      if (fixp->fx_pcrel)
21748
        {
21749
          code = BFD_RELOC_32_PCREL;
21750
          break;
21751
        }
21752
 
21753
    case BFD_RELOC_ARM_MOVW:
21754
      if (fixp->fx_pcrel)
21755
        {
21756
          code = BFD_RELOC_ARM_MOVW_PCREL;
21757
          break;
21758
        }
21759
 
21760
    case BFD_RELOC_ARM_MOVT:
21761
      if (fixp->fx_pcrel)
21762
        {
21763
          code = BFD_RELOC_ARM_MOVT_PCREL;
21764
          break;
21765
        }
21766
 
21767
    case BFD_RELOC_ARM_THUMB_MOVW:
21768
      if (fixp->fx_pcrel)
21769
        {
21770
          code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
21771
          break;
21772
        }
21773
 
21774
    case BFD_RELOC_ARM_THUMB_MOVT:
21775
      if (fixp->fx_pcrel)
21776
        {
21777
          code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
21778
          break;
21779
        }
21780
 
21781
    case BFD_RELOC_NONE:
21782
    case BFD_RELOC_ARM_PCREL_BRANCH:
21783
    case BFD_RELOC_ARM_PCREL_BLX:
21784
    case BFD_RELOC_RVA:
21785
    case BFD_RELOC_THUMB_PCREL_BRANCH7:
21786
    case BFD_RELOC_THUMB_PCREL_BRANCH9:
21787
    case BFD_RELOC_THUMB_PCREL_BRANCH12:
21788
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
21789
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
21790
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
21791
    case BFD_RELOC_VTABLE_ENTRY:
21792
    case BFD_RELOC_VTABLE_INHERIT:
21793
#ifdef TE_PE
21794
    case BFD_RELOC_32_SECREL:
21795
#endif
21796
      code = fixp->fx_r_type;
21797
      break;
21798
 
21799
    case BFD_RELOC_THUMB_PCREL_BLX:
21800
#ifdef OBJ_ELF
21801
      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21802
        code = BFD_RELOC_THUMB_PCREL_BRANCH23;
21803
      else
21804
#endif
21805
        code = BFD_RELOC_THUMB_PCREL_BLX;
21806
      break;
21807
 
21808
    case BFD_RELOC_ARM_LITERAL:
21809
    case BFD_RELOC_ARM_HWLITERAL:
21810
      /* If this is called then the a literal has
21811
         been referenced across a section boundary.  */
21812
      as_bad_where (fixp->fx_file, fixp->fx_line,
21813
                    _("literal referenced across section boundary"));
21814
      return NULL;
21815
 
21816
#ifdef OBJ_ELF
21817
    case BFD_RELOC_ARM_TLS_CALL:
21818
    case BFD_RELOC_ARM_THM_TLS_CALL:
21819
    case BFD_RELOC_ARM_TLS_DESCSEQ:
21820
    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21821
    case BFD_RELOC_ARM_GOT32:
21822
    case BFD_RELOC_ARM_GOTOFF:
21823
    case BFD_RELOC_ARM_GOT_PREL:
21824
    case BFD_RELOC_ARM_PLT32:
21825
    case BFD_RELOC_ARM_TARGET1:
21826
    case BFD_RELOC_ARM_ROSEGREL32:
21827
    case BFD_RELOC_ARM_SBREL32:
21828
    case BFD_RELOC_ARM_PREL31:
21829
    case BFD_RELOC_ARM_TARGET2:
21830
    case BFD_RELOC_ARM_TLS_LE32:
21831
    case BFD_RELOC_ARM_TLS_LDO32:
21832
    case BFD_RELOC_ARM_PCREL_CALL:
21833
    case BFD_RELOC_ARM_PCREL_JUMP:
21834
    case BFD_RELOC_ARM_ALU_PC_G0_NC:
21835
    case BFD_RELOC_ARM_ALU_PC_G0:
21836
    case BFD_RELOC_ARM_ALU_PC_G1_NC:
21837
    case BFD_RELOC_ARM_ALU_PC_G1:
21838
    case BFD_RELOC_ARM_ALU_PC_G2:
21839
    case BFD_RELOC_ARM_LDR_PC_G0:
21840
    case BFD_RELOC_ARM_LDR_PC_G1:
21841
    case BFD_RELOC_ARM_LDR_PC_G2:
21842
    case BFD_RELOC_ARM_LDRS_PC_G0:
21843
    case BFD_RELOC_ARM_LDRS_PC_G1:
21844
    case BFD_RELOC_ARM_LDRS_PC_G2:
21845
    case BFD_RELOC_ARM_LDC_PC_G0:
21846
    case BFD_RELOC_ARM_LDC_PC_G1:
21847
    case BFD_RELOC_ARM_LDC_PC_G2:
21848
    case BFD_RELOC_ARM_ALU_SB_G0_NC:
21849
    case BFD_RELOC_ARM_ALU_SB_G0:
21850
    case BFD_RELOC_ARM_ALU_SB_G1_NC:
21851
    case BFD_RELOC_ARM_ALU_SB_G1:
21852
    case BFD_RELOC_ARM_ALU_SB_G2:
21853
    case BFD_RELOC_ARM_LDR_SB_G0:
21854
    case BFD_RELOC_ARM_LDR_SB_G1:
21855
    case BFD_RELOC_ARM_LDR_SB_G2:
21856
    case BFD_RELOC_ARM_LDRS_SB_G0:
21857
    case BFD_RELOC_ARM_LDRS_SB_G1:
21858
    case BFD_RELOC_ARM_LDRS_SB_G2:
21859
    case BFD_RELOC_ARM_LDC_SB_G0:
21860
    case BFD_RELOC_ARM_LDC_SB_G1:
21861
    case BFD_RELOC_ARM_LDC_SB_G2:
21862
    case BFD_RELOC_ARM_V4BX:
21863
      code = fixp->fx_r_type;
21864
      break;
21865
 
21866
    case BFD_RELOC_ARM_TLS_GOTDESC:
21867
    case BFD_RELOC_ARM_TLS_GD32:
21868
    case BFD_RELOC_ARM_TLS_IE32:
21869
    case BFD_RELOC_ARM_TLS_LDM32:
21870
      /* BFD will include the symbol's address in the addend.
21871
         But we don't want that, so subtract it out again here.  */
21872
      if (!S_IS_COMMON (fixp->fx_addsy))
21873
        reloc->addend -= (*reloc->sym_ptr_ptr)->value;
21874
      code = fixp->fx_r_type;
21875
      break;
21876
#endif
21877
 
21878
    case BFD_RELOC_ARM_IMMEDIATE:
21879
      as_bad_where (fixp->fx_file, fixp->fx_line,
21880
                    _("internal relocation (type: IMMEDIATE) not fixed up"));
21881
      return NULL;
21882
 
21883
    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21884
      as_bad_where (fixp->fx_file, fixp->fx_line,
21885
                    _("ADRL used for a symbol not defined in the same file"));
21886
      return NULL;
21887
 
21888
    case BFD_RELOC_ARM_OFFSET_IMM:
21889
      if (section->use_rela_p)
21890
        {
21891
          code = fixp->fx_r_type;
21892
          break;
21893
        }
21894
 
21895
      if (fixp->fx_addsy != NULL
21896
          && !S_IS_DEFINED (fixp->fx_addsy)
21897
          && S_IS_LOCAL (fixp->fx_addsy))
21898
        {
21899
          as_bad_where (fixp->fx_file, fixp->fx_line,
21900
                        _("undefined local label `%s'"),
21901
                        S_GET_NAME (fixp->fx_addsy));
21902
          return NULL;
21903
        }
21904
 
21905
      as_bad_where (fixp->fx_file, fixp->fx_line,
21906
                    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21907
      return NULL;
21908
 
21909
    default:
21910
      {
21911
        char * type;
21912
 
21913
        switch (fixp->fx_r_type)
21914
          {
21915
          case BFD_RELOC_NONE:             type = "NONE";         break;
21916
          case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
21917
          case BFD_RELOC_ARM_SHIFT_IMM:    type = "SHIFT_IMM";    break;
21918
          case BFD_RELOC_ARM_SMC:          type = "SMC";          break;
21919
          case BFD_RELOC_ARM_SWI:          type = "SWI";          break;
21920
          case BFD_RELOC_ARM_MULTI:        type = "MULTI";        break;
21921
          case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";   break;
21922
          case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
21923
          case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21924
          case BFD_RELOC_ARM_THUMB_ADD:    type = "THUMB_ADD";    break;
21925
          case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
21926
          case BFD_RELOC_ARM_THUMB_IMM:    type = "THUMB_IMM";    break;
21927
          case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21928
          default:                         type = _("<unknown>"); break;
21929
          }
21930
        as_bad_where (fixp->fx_file, fixp->fx_line,
21931
                      _("cannot represent %s relocation in this object file format"),
21932
                      type);
21933
        return NULL;
21934
      }
21935
    }
21936
 
21937
#ifdef OBJ_ELF
21938
  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21939
      && GOT_symbol
21940
      && fixp->fx_addsy == GOT_symbol)
21941
    {
21942
      code = BFD_RELOC_ARM_GOTPC;
21943
      reloc->addend = fixp->fx_offset = reloc->address;
21944
    }
21945
#endif
21946
 
21947
  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21948
 
21949
  if (reloc->howto == NULL)
21950
    {
21951
      as_bad_where (fixp->fx_file, fixp->fx_line,
21952
                    _("cannot represent %s relocation in this object file format"),
21953
                    bfd_get_reloc_code_name (code));
21954
      return NULL;
21955
    }
21956
 
21957
  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21958
     vtable entry to be used in the relocation's section offset.  */
21959
  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21960
    reloc->address = fixp->fx_offset;
21961
 
21962
  return reloc;
21963
}
21964
 
21965
/* This fix_new is called by cons via TC_CONS_FIX_NEW.  */
21966
 
21967
void
21968
cons_fix_new_arm (fragS *       frag,
21969
                  int           where,
21970
                  int           size,
21971
                  expressionS * exp)
21972
{
21973
  bfd_reloc_code_real_type type;
21974
  int pcrel = 0;
21975
 
21976
  /* Pick a reloc.
21977
     FIXME: @@ Should look at CPU word size.  */
21978
  switch (size)
21979
    {
21980
    case 1:
21981
      type = BFD_RELOC_8;
21982
      break;
21983
    case 2:
21984
      type = BFD_RELOC_16;
21985
      break;
21986
    case 4:
21987
    default:
21988
      type = BFD_RELOC_32;
21989
      break;
21990
    case 8:
21991
      type = BFD_RELOC_64;
21992
      break;
21993
    }
21994
 
21995
#ifdef TE_PE
21996
  if (exp->X_op == O_secrel)
21997
  {
21998
    exp->X_op = O_symbol;
21999
    type = BFD_RELOC_32_SECREL;
22000
  }
22001
#endif
22002
 
22003
  fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22004
}
22005
 
22006
#if defined (OBJ_COFF)
22007
void
22008
arm_validate_fix (fixS * fixP)
22009
{
22010
  /* If the destination of the branch is a defined symbol which does not have
22011
     the THUMB_FUNC attribute, then we must be calling a function which has
22012
     the (interfacearm) attribute.  We look for the Thumb entry point to that
22013
     function and change the branch to refer to that function instead.  */
22014
  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
22015
      && fixP->fx_addsy != NULL
22016
      && S_IS_DEFINED (fixP->fx_addsy)
22017
      && ! THUMB_IS_FUNC (fixP->fx_addsy))
22018
    {
22019
      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
22020
    }
22021
}
22022
#endif
22023
 
22024
 
22025
int
22026
arm_force_relocation (struct fix * fixp)
22027
{
22028
#if defined (OBJ_COFF) && defined (TE_PE)
22029
  if (fixp->fx_r_type == BFD_RELOC_RVA)
22030
    return 1;
22031
#endif
22032
 
22033
  /* In case we have a call or a branch to a function in ARM ISA mode from
22034
     a thumb function or vice-versa force the relocation. These relocations
22035
     are cleared off for some cores that might have blx and simple transformations
22036
     are possible.  */
22037
 
22038
#ifdef OBJ_ELF
22039
  switch (fixp->fx_r_type)
22040
    {
22041
    case BFD_RELOC_ARM_PCREL_JUMP:
22042
    case BFD_RELOC_ARM_PCREL_CALL:
22043
    case BFD_RELOC_THUMB_PCREL_BLX:
22044
      if (THUMB_IS_FUNC (fixp->fx_addsy))
22045
        return 1;
22046
      break;
22047
 
22048
    case BFD_RELOC_ARM_PCREL_BLX:
22049
    case BFD_RELOC_THUMB_PCREL_BRANCH25:
22050
    case BFD_RELOC_THUMB_PCREL_BRANCH20:
22051
    case BFD_RELOC_THUMB_PCREL_BRANCH23:
22052
      if (ARM_IS_FUNC (fixp->fx_addsy))
22053
        return 1;
22054
      break;
22055
 
22056
    default:
22057
      break;
22058
    }
22059
#endif
22060
 
22061
  /* Resolve these relocations even if the symbol is extern or weak.
22062
     Technically this is probably wrong due to symbol preemption.
22063
     In practice these relocations do not have enough range to be useful
22064
     at dynamic link time, and some code (e.g. in the Linux kernel)
22065
     expects these references to be resolved.  */
22066
  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22067
      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
22068
      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
22069
      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
22070
      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22071
      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
22072
      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
22073
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
22074
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22075
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
22076
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
22077
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
22078
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
22079
      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
22080
    return 0;
22081
 
22082
  /* Always leave these relocations for the linker.  */
22083
  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22084
       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22085
      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22086
    return 1;
22087
 
22088
  /* Always generate relocations against function symbols.  */
22089
  if (fixp->fx_r_type == BFD_RELOC_32
22090
      && fixp->fx_addsy
22091
      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
22092
    return 1;
22093
 
22094
  return generic_force_reloc (fixp);
22095
}
22096
 
22097
#if defined (OBJ_ELF) || defined (OBJ_COFF)
22098
/* Relocations against function names must be left unadjusted,
22099
   so that the linker can use this information to generate interworking
22100
   stubs.  The MIPS version of this function
22101
   also prevents relocations that are mips-16 specific, but I do not
22102
   know why it does this.
22103
 
22104
   FIXME:
22105
   There is one other problem that ought to be addressed here, but
22106
   which currently is not:  Taking the address of a label (rather
22107
   than a function) and then later jumping to that address.  Such
22108
   addresses also ought to have their bottom bit set (assuming that
22109
   they reside in Thumb code), but at the moment they will not.  */
22110
 
22111
bfd_boolean
22112
arm_fix_adjustable (fixS * fixP)
22113
{
22114
  if (fixP->fx_addsy == NULL)
22115
    return 1;
22116
 
22117
  /* Preserve relocations against symbols with function type.  */
22118
  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
22119
    return FALSE;
22120
 
22121
  if (THUMB_IS_FUNC (fixP->fx_addsy)
22122
      && fixP->fx_subsy == NULL)
22123
    return FALSE;
22124
 
22125
  /* We need the symbol name for the VTABLE entries.  */
22126
  if (   fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
22127
      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22128
    return FALSE;
22129
 
22130
  /* Don't allow symbols to be discarded on GOT related relocs.  */
22131
  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
22132
      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
22133
      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
22134
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
22135
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
22136
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
22137
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
22138
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
22139
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
22140
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
22141
      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
22142
      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
22143
      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
22144
      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
22145
    return FALSE;
22146
 
22147
  /* Similarly for group relocations.  */
22148
  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22149
       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22150
      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22151
    return FALSE;
22152
 
22153
  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
22154
  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
22155
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22156
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
22157
      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
22158
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22159
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
22160
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
22161
      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
22162
    return FALSE;
22163
 
22164
  return TRUE;
22165
}
22166
#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
22167
 
22168
#ifdef OBJ_ELF
22169
 
22170
const char *
22171
elf32_arm_target_format (void)
22172
{
22173
#ifdef TE_SYMBIAN
22174
  return (target_big_endian
22175
          ? "elf32-bigarm-symbian"
22176
          : "elf32-littlearm-symbian");
22177
#elif defined (TE_VXWORKS)
22178
  return (target_big_endian
22179
          ? "elf32-bigarm-vxworks"
22180
          : "elf32-littlearm-vxworks");
22181
#else
22182
  if (target_big_endian)
22183
    return "elf32-bigarm";
22184
  else
22185
    return "elf32-littlearm";
22186
#endif
22187
}
22188
 
22189
void
22190
armelf_frob_symbol (symbolS * symp,
22191
                    int *     puntp)
22192
{
22193
  elf_frob_symbol (symp, puntp);
22194
}
22195
#endif
22196
 
22197
/* MD interface: Finalization.  */
22198
 
22199
void
22200
arm_cleanup (void)
22201
{
22202
  literal_pool * pool;
22203
 
22204
  /* Ensure that all the IT blocks are properly closed.  */
22205
  check_it_blocks_finished ();
22206
 
22207
  for (pool = list_of_pools; pool; pool = pool->next)
22208
    {
22209
      /* Put it at the end of the relevant section.  */
22210
      subseg_set (pool->section, pool->sub_section);
22211
#ifdef OBJ_ELF
22212
      arm_elf_change_section ();
22213
#endif
22214
      s_ltorg (0);
22215
    }
22216
}
22217
 
22218
#ifdef OBJ_ELF
22219
/* Remove any excess mapping symbols generated for alignment frags in
22220
   SEC.  We may have created a mapping symbol before a zero byte
22221
   alignment; remove it if there's a mapping symbol after the
22222
   alignment.  */
22223
static void
22224
check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
22225
                       void *dummy ATTRIBUTE_UNUSED)
22226
{
22227
  segment_info_type *seginfo = seg_info (sec);
22228
  fragS *fragp;
22229
 
22230
  if (seginfo == NULL || seginfo->frchainP == NULL)
22231
    return;
22232
 
22233
  for (fragp = seginfo->frchainP->frch_root;
22234
       fragp != NULL;
22235
       fragp = fragp->fr_next)
22236
    {
22237
      symbolS *sym = fragp->tc_frag_data.last_map;
22238
      fragS *next = fragp->fr_next;
22239
 
22240
      /* Variable-sized frags have been converted to fixed size by
22241
         this point.  But if this was variable-sized to start with,
22242
         there will be a fixed-size frag after it.  So don't handle
22243
         next == NULL.  */
22244
      if (sym == NULL || next == NULL)
22245
        continue;
22246
 
22247
      if (S_GET_VALUE (sym) < next->fr_address)
22248
        /* Not at the end of this frag.  */
22249
        continue;
22250
      know (S_GET_VALUE (sym) == next->fr_address);
22251
 
22252
      do
22253
        {
22254
          if (next->tc_frag_data.first_map != NULL)
22255
            {
22256
              /* Next frag starts with a mapping symbol.  Discard this
22257
                 one.  */
22258
              symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22259
              break;
22260
            }
22261
 
22262
          if (next->fr_next == NULL)
22263
            {
22264
              /* This mapping symbol is at the end of the section.  Discard
22265
                 it.  */
22266
              know (next->fr_fix == 0 && next->fr_var == 0);
22267
              symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22268
              break;
22269
            }
22270
 
22271
          /* As long as we have empty frags without any mapping symbols,
22272
             keep looking.  */
22273
          /* If the next frag is non-empty and does not start with a
22274
             mapping symbol, then this mapping symbol is required.  */
22275
          if (next->fr_address != next->fr_next->fr_address)
22276
            break;
22277
 
22278
          next = next->fr_next;
22279
        }
22280
      while (next != NULL);
22281
    }
22282
}
22283
#endif
22284
 
22285
/* Adjust the symbol table.  This marks Thumb symbols as distinct from
22286
   ARM ones.  */
22287
 
22288
void
22289
arm_adjust_symtab (void)
22290
{
22291
#ifdef OBJ_COFF
22292
  symbolS * sym;
22293
 
22294
  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22295
    {
22296
      if (ARM_IS_THUMB (sym))
22297
        {
22298
          if (THUMB_IS_FUNC (sym))
22299
            {
22300
              /* Mark the symbol as a Thumb function.  */
22301
              if (   S_GET_STORAGE_CLASS (sym) == C_STAT
22302
                  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!  */
22303
                S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
22304
 
22305
              else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
22306
                S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
22307
              else
22308
                as_bad (_("%s: unexpected function type: %d"),
22309
                        S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
22310
            }
22311
          else switch (S_GET_STORAGE_CLASS (sym))
22312
            {
22313
            case C_EXT:
22314
              S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
22315
              break;
22316
            case C_STAT:
22317
              S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
22318
              break;
22319
            case C_LABEL:
22320
              S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
22321
              break;
22322
            default:
22323
              /* Do nothing.  */
22324
              break;
22325
            }
22326
        }
22327
 
22328
      if (ARM_IS_INTERWORK (sym))
22329
        coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
22330
    }
22331
#endif
22332
#ifdef OBJ_ELF
22333
  symbolS * sym;
22334
  char      bind;
22335
 
22336
  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22337
    {
22338
      if (ARM_IS_THUMB (sym))
22339
        {
22340
          elf_symbol_type * elf_sym;
22341
 
22342
          elf_sym = elf_symbol (symbol_get_bfdsym (sym));
22343
          bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
22344
 
22345
          if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
22346
                BFD_ARM_SPECIAL_SYM_TYPE_ANY))
22347
            {
22348
              /* If it's a .thumb_func, declare it as so,
22349
                 otherwise tag label as .code 16.  */
22350
              if (THUMB_IS_FUNC (sym))
22351
                elf_sym->internal_elf_sym.st_target_internal
22352
                  = ST_BRANCH_TO_THUMB;
22353
              else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22354
                elf_sym->internal_elf_sym.st_info =
22355
                  ELF_ST_INFO (bind, STT_ARM_16BIT);
22356
            }
22357
        }
22358
    }
22359
 
22360
  /* Remove any overlapping mapping symbols generated by alignment frags.  */
22361
  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
22362
  /* Now do generic ELF adjustments.  */
22363
  elf_adjust_symtab ();
22364
#endif
22365
}
22366
 
22367
/* MD interface: Initialization.  */
22368
 
22369
static void
22370
set_constant_flonums (void)
22371
{
22372
  int i;
22373
 
22374
  for (i = 0; i < NUM_FLOAT_VALS; i++)
22375
    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
22376
      abort ();
22377
}
22378
 
22379
/* Auto-select Thumb mode if it's the only available instruction set for the
22380
   given architecture.  */
22381
 
22382
static void
22383
autoselect_thumb_from_cpu_variant (void)
22384
{
22385
  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
22386
    opcode_select (16);
22387
}
22388
 
22389
void
22390
md_begin (void)
22391
{
22392
  unsigned mach;
22393
  unsigned int i;
22394
 
22395
  if (   (arm_ops_hsh = hash_new ()) == NULL
22396
      || (arm_cond_hsh = hash_new ()) == NULL
22397
      || (arm_shift_hsh = hash_new ()) == NULL
22398
      || (arm_psr_hsh = hash_new ()) == NULL
22399
      || (arm_v7m_psr_hsh = hash_new ()) == NULL
22400
      || (arm_reg_hsh = hash_new ()) == NULL
22401
      || (arm_reloc_hsh = hash_new ()) == NULL
22402
      || (arm_barrier_opt_hsh = hash_new ()) == NULL)
22403
    as_fatal (_("virtual memory exhausted"));
22404
 
22405
  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
22406
    hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
22407
  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
22408
    hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
22409
  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
22410
    hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
22411
  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
22412
    hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
22413
  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
22414
    hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
22415
                 (void *) (v7m_psrs + i));
22416
  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
22417
    hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
22418
  for (i = 0;
22419
       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
22420
       i++)
22421
    hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
22422
                 (void *) (barrier_opt_names + i));
22423
#ifdef OBJ_ELF
22424 163 khays
  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
22425
    {
22426
      struct reloc_entry * entry = reloc_names + i;
22427
 
22428
      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
22429
        /* This makes encode_branch() use the EABI versions of this relocation.  */
22430
        entry->reloc = BFD_RELOC_UNUSED;
22431
 
22432
      hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
22433
    }
22434 16 khays
#endif
22435
 
22436
  set_constant_flonums ();
22437
 
22438
  /* Set the cpu variant based on the command-line options.  We prefer
22439
     -mcpu= over -march= if both are set (as for GCC); and we prefer
22440
     -mfpu= over any other way of setting the floating point unit.
22441
     Use of legacy options with new options are faulted.  */
22442
  if (legacy_cpu)
22443
    {
22444
      if (mcpu_cpu_opt || march_cpu_opt)
22445
        as_bad (_("use of old and new-style options to set CPU type"));
22446
 
22447
      mcpu_cpu_opt = legacy_cpu;
22448
    }
22449
  else if (!mcpu_cpu_opt)
22450
    mcpu_cpu_opt = march_cpu_opt;
22451
 
22452
  if (legacy_fpu)
22453
    {
22454
      if (mfpu_opt)
22455
        as_bad (_("use of old and new-style options to set FPU type"));
22456
 
22457
      mfpu_opt = legacy_fpu;
22458
    }
22459
  else if (!mfpu_opt)
22460
    {
22461
#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
22462
        || defined (TE_NetBSD) || defined (TE_VXWORKS))
22463
      /* Some environments specify a default FPU.  If they don't, infer it
22464
         from the processor.  */
22465
      if (mcpu_fpu_opt)
22466
        mfpu_opt = mcpu_fpu_opt;
22467
      else
22468
        mfpu_opt = march_fpu_opt;
22469
#else
22470
      mfpu_opt = &fpu_default;
22471
#endif
22472
    }
22473
 
22474
  if (!mfpu_opt)
22475
    {
22476
      if (mcpu_cpu_opt != NULL)
22477
        mfpu_opt = &fpu_default;
22478
      else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
22479
        mfpu_opt = &fpu_arch_vfp_v2;
22480
      else
22481
        mfpu_opt = &fpu_arch_fpa;
22482
    }
22483
 
22484
#ifdef CPU_DEFAULT
22485
  if (!mcpu_cpu_opt)
22486
    {
22487
      mcpu_cpu_opt = &cpu_default;
22488
      selected_cpu = cpu_default;
22489
    }
22490
#else
22491
  if (mcpu_cpu_opt)
22492
    selected_cpu = *mcpu_cpu_opt;
22493
  else
22494
    mcpu_cpu_opt = &arm_arch_any;
22495
#endif
22496
 
22497
  ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22498
 
22499
  autoselect_thumb_from_cpu_variant ();
22500
 
22501
  arm_arch_used = thumb_arch_used = arm_arch_none;
22502
 
22503
#if defined OBJ_COFF || defined OBJ_ELF
22504
  {
22505
    unsigned int flags = 0;
22506
 
22507
#if defined OBJ_ELF
22508
    flags = meabi_flags;
22509
 
22510
    switch (meabi_flags)
22511
      {
22512
      case EF_ARM_EABI_UNKNOWN:
22513
#endif
22514
        /* Set the flags in the private structure.  */
22515
        if (uses_apcs_26)      flags |= F_APCS26;
22516
        if (support_interwork) flags |= F_INTERWORK;
22517
        if (uses_apcs_float)   flags |= F_APCS_FLOAT;
22518
        if (pic_code)          flags |= F_PIC;
22519
        if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
22520
          flags |= F_SOFT_FLOAT;
22521
 
22522
        switch (mfloat_abi_opt)
22523
          {
22524
          case ARM_FLOAT_ABI_SOFT:
22525
          case ARM_FLOAT_ABI_SOFTFP:
22526
            flags |= F_SOFT_FLOAT;
22527
            break;
22528
 
22529
          case ARM_FLOAT_ABI_HARD:
22530
            if (flags & F_SOFT_FLOAT)
22531
              as_bad (_("hard-float conflicts with specified fpu"));
22532
            break;
22533
          }
22534
 
22535
        /* Using pure-endian doubles (even if soft-float).      */
22536
        if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
22537
          flags |= F_VFP_FLOAT;
22538
 
22539
#if defined OBJ_ELF
22540
        if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
22541
            flags |= EF_ARM_MAVERICK_FLOAT;
22542
        break;
22543
 
22544
      case EF_ARM_EABI_VER4:
22545
      case EF_ARM_EABI_VER5:
22546
        /* No additional flags to set.  */
22547
        break;
22548
 
22549
      default:
22550
        abort ();
22551
      }
22552
#endif
22553
    bfd_set_private_flags (stdoutput, flags);
22554
 
22555
    /* We have run out flags in the COFF header to encode the
22556
       status of ATPCS support, so instead we create a dummy,
22557
       empty, debug section called .arm.atpcs.  */
22558
    if (atpcs)
22559
      {
22560
        asection * sec;
22561
 
22562
        sec = bfd_make_section (stdoutput, ".arm.atpcs");
22563
 
22564
        if (sec != NULL)
22565
          {
22566
            bfd_set_section_flags
22567
              (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
22568
            bfd_set_section_size (stdoutput, sec, 0);
22569
            bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
22570
          }
22571
      }
22572
  }
22573
#endif
22574
 
22575
  /* Record the CPU type as well.  */
22576
  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
22577
    mach = bfd_mach_arm_iWMMXt2;
22578
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
22579
    mach = bfd_mach_arm_iWMMXt;
22580
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
22581
    mach = bfd_mach_arm_XScale;
22582
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
22583
    mach = bfd_mach_arm_ep9312;
22584
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
22585
    mach = bfd_mach_arm_5TE;
22586
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
22587
    {
22588
      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22589
        mach = bfd_mach_arm_5T;
22590
      else
22591
        mach = bfd_mach_arm_5;
22592
    }
22593
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
22594
    {
22595
      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22596
        mach = bfd_mach_arm_4T;
22597
      else
22598
        mach = bfd_mach_arm_4;
22599
    }
22600
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
22601
    mach = bfd_mach_arm_3M;
22602
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
22603
    mach = bfd_mach_arm_3;
22604
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
22605
    mach = bfd_mach_arm_2a;
22606
  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
22607
    mach = bfd_mach_arm_2;
22608
  else
22609
    mach = bfd_mach_arm_unknown;
22610
 
22611
  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
22612
}
22613
 
22614
/* Command line processing.  */
22615
 
22616
/* md_parse_option
22617
      Invocation line includes a switch not recognized by the base assembler.
22618
      See if it's a processor-specific option.
22619
 
22620
      This routine is somewhat complicated by the need for backwards
22621
      compatibility (since older releases of gcc can't be changed).
22622
      The new options try to make the interface as compatible as
22623
      possible with GCC.
22624
 
22625
      New options (supported) are:
22626
 
22627
              -mcpu=<cpu name>           Assemble for selected processor
22628
              -march=<architecture name> Assemble for selected architecture
22629
              -mfpu=<fpu architecture>   Assemble for selected FPU.
22630
              -EB/-mbig-endian           Big-endian
22631
              -EL/-mlittle-endian        Little-endian
22632
              -k                         Generate PIC code
22633
              -mthumb                    Start in Thumb mode
22634
              -mthumb-interwork          Code supports ARM/Thumb interworking
22635
 
22636
              -m[no-]warn-deprecated     Warn about deprecated features
22637
 
22638
      For now we will also provide support for:
22639
 
22640
              -mapcs-32                  32-bit Program counter
22641
              -mapcs-26                  26-bit Program counter
22642
              -macps-float               Floats passed in FP registers
22643
              -mapcs-reentrant           Reentrant code
22644
              -matpcs
22645
      (sometime these will probably be replaced with -mapcs=<list of options>
22646
      and -matpcs=<list of options>)
22647
 
22648
      The remaining options are only supported for back-wards compatibility.
22649
      Cpu variants, the arm part is optional:
22650
              -m[arm]1                Currently not supported.
22651
              -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
22652
              -m[arm]3                Arm 3 processor
22653
              -m[arm]6[xx],           Arm 6 processors
22654
              -m[arm]7[xx][t][[d]m]   Arm 7 processors
22655
              -m[arm]8[10]            Arm 8 processors
22656
              -m[arm]9[20][tdmi]      Arm 9 processors
22657
              -mstrongarm[110[0]]     StrongARM processors
22658
              -mxscale                XScale processors
22659
              -m[arm]v[2345[t[e]]]    Arm architectures
22660
              -mall                   All (except the ARM1)
22661
      FP variants:
22662
              -mfpa10, -mfpa11        FPA10 and 11 co-processor instructions
22663
              -mfpe-old               (No float load/store multiples)
22664
              -mvfpxd                 VFP Single precision
22665
              -mvfp                   All VFP
22666
              -mno-fpu                Disable all floating point instructions
22667
 
22668
      The following CPU names are recognized:
22669
              arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22670
              arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22671
              arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22672
              arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22673
              arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22674
              arm10t arm10e, arm1020t, arm1020e, arm10200e,
22675
              strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22676
 
22677
      */
22678
 
22679
const char * md_shortopts = "m:k";
22680
 
22681
#ifdef ARM_BI_ENDIAN
22682
#define OPTION_EB (OPTION_MD_BASE + 0)
22683
#define OPTION_EL (OPTION_MD_BASE + 1)
22684
#else
22685
#if TARGET_BYTES_BIG_ENDIAN
22686
#define OPTION_EB (OPTION_MD_BASE + 0)
22687
#else
22688
#define OPTION_EL (OPTION_MD_BASE + 1)
22689
#endif
22690
#endif
22691
#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22692
 
22693
struct option md_longopts[] =
22694
{
22695
#ifdef OPTION_EB
22696
  {"EB", no_argument, NULL, OPTION_EB},
22697
#endif
22698
#ifdef OPTION_EL
22699
  {"EL", no_argument, NULL, OPTION_EL},
22700
#endif
22701
  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
22702
  {NULL, no_argument, NULL, 0}
22703
};
22704
 
22705
size_t md_longopts_size = sizeof (md_longopts);
22706
 
22707
struct arm_option_table
22708
{
22709
  char *option;         /* Option name to match.  */
22710
  char *help;           /* Help information.  */
22711
  int  *var;            /* Variable to change.  */
22712
  int   value;          /* What to change it to.  */
22713
  char *deprecated;     /* If non-null, print this message.  */
22714
};
22715
 
22716
struct arm_option_table arm_opts[] =
22717
{
22718
  {"k",      N_("generate PIC code"),      &pic_code,    1, NULL},
22719
  {"mthumb", N_("assemble Thumb code"),    &thumb_mode,  1, NULL},
22720
  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22721
   &support_interwork, 1, NULL},
22722
  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
22723
  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
22724
  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
22725
   1, NULL},
22726
  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
22727
  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
22728
  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
22729
  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
22730
   NULL},
22731
 
22732
  /* These are recognized by the assembler, but have no affect on code.  */
22733
  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
22734
  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
22735
 
22736
  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
22737
  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22738
   &warn_on_deprecated, 0, NULL},
22739
  {NULL, NULL, NULL, 0, NULL}
22740
};
22741
 
22742
struct arm_legacy_option_table
22743
{
22744
  char *option;                         /* Option name to match.  */
22745
  const arm_feature_set **var;          /* Variable to change.  */
22746
  const arm_feature_set value;          /* What to change it to.  */
22747
  char *deprecated;                     /* If non-null, print this message.  */
22748
};
22749
 
22750
const struct arm_legacy_option_table arm_legacy_opts[] =
22751
{
22752
  /* DON'T add any new processors to this list -- we want the whole list
22753
     to go away...  Add them to the processors table instead.  */
22754
  {"marm1",      &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
22755
  {"m1",         &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
22756
  {"marm2",      &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
22757
  {"m2",         &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
22758
  {"marm250",    &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22759
  {"m250",       &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22760
  {"marm3",      &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22761
  {"m3",         &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22762
  {"marm6",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
22763
  {"m6",         &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
22764
  {"marm600",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
22765
  {"m600",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
22766
  {"marm610",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
22767
  {"m610",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
22768
  {"marm620",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
22769
  {"m620",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
22770
  {"marm7",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
22771
  {"m7",         &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
22772
  {"marm70",     &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
22773
  {"m70",        &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
22774
  {"marm700",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
22775
  {"m700",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
22776
  {"marm700i",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
22777
  {"m700i",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
22778
  {"marm710",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
22779
  {"m710",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
22780
  {"marm710c",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
22781
  {"m710c",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
22782
  {"marm720",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
22783
  {"m720",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
22784
  {"marm7d",     &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
22785
  {"m7d",        &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
22786
  {"marm7di",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
22787
  {"m7di",       &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
22788
  {"marm7m",     &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22789
  {"m7m",        &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22790
  {"marm7dm",    &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22791
  {"m7dm",       &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22792
  {"marm7dmi",   &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22793
  {"m7dmi",      &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22794
  {"marm7100",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
22795
  {"m7100",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
22796
  {"marm7500",   &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
22797
  {"m7500",      &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
22798
  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
22799
  {"m7500fe",    &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
22800
  {"marm7t",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22801
  {"m7t",        &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22802
  {"marm7tdmi",  &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22803
  {"m7tdmi",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22804
  {"marm710t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22805
  {"m710t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22806
  {"marm720t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22807
  {"m720t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22808
  {"marm740t",   &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22809
  {"m740t",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22810
  {"marm8",      &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
22811
  {"m8",         &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
22812
  {"marm810",    &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
22813
  {"m810",       &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
22814
  {"marm9",      &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22815
  {"m9",         &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22816
  {"marm9tdmi",  &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22817
  {"m9tdmi",     &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22818
  {"marm920",    &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22819
  {"m920",       &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22820
  {"marm940",    &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22821
  {"m940",       &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22822
  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
22823
  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
22824
   N_("use -mcpu=strongarm110")},
22825
  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
22826
   N_("use -mcpu=strongarm1100")},
22827
  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
22828
   N_("use -mcpu=strongarm1110")},
22829
  {"mxscale",    &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
22830
  {"miwmmxt",    &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
22831
  {"mall",       &legacy_cpu, ARM_ANY,         N_("use -mcpu=all")},
22832
 
22833
  /* Architecture variants -- don't add any more to this list either.  */
22834
  {"mv2",        &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
22835
  {"marmv2",     &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
22836
  {"mv2a",       &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22837
  {"marmv2a",    &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22838
  {"mv3",        &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
22839
  {"marmv3",     &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
22840
  {"mv3m",       &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22841
  {"marmv3m",    &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22842
  {"mv4",        &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
22843
  {"marmv4",     &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
22844
  {"mv4t",       &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22845
  {"marmv4t",    &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22846
  {"mv5",        &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
22847
  {"marmv5",     &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
22848
  {"mv5t",       &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22849
  {"marmv5t",    &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22850
  {"mv5e",       &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22851
  {"marmv5e",    &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22852
 
22853
  /* Floating point variants -- don't add any more to this list either.  */
22854
  {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
22855
  {"mfpa10",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
22856
  {"mfpa11",   &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
22857
  {"mno-fpu",  &legacy_fpu, ARM_ARCH_NONE,
22858
   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22859
 
22860
  {NULL, NULL, ARM_ARCH_NONE, NULL}
22861
};
22862
 
22863
struct arm_cpu_option_table
22864
{
22865
  char *name;
22866
  const arm_feature_set value;
22867
  /* For some CPUs we assume an FPU unless the user explicitly sets
22868
     -mfpu=...  */
22869
  const arm_feature_set default_fpu;
22870
  /* The canonical name of the CPU, or NULL to use NAME converted to upper
22871
     case.  */
22872
  const char *canonical_name;
22873
};
22874
 
22875
/* This list should, at a minimum, contain all the cpu names
22876
   recognized by GCC.  */
22877
static const struct arm_cpu_option_table arm_cpus[] =
22878
{
22879
  {"all",               ARM_ANY,         FPU_ARCH_FPA,    NULL},
22880
  {"arm1",              ARM_ARCH_V1,     FPU_ARCH_FPA,    NULL},
22881
  {"arm2",              ARM_ARCH_V2,     FPU_ARCH_FPA,    NULL},
22882
  {"arm250",            ARM_ARCH_V2S,    FPU_ARCH_FPA,    NULL},
22883
  {"arm3",              ARM_ARCH_V2S,    FPU_ARCH_FPA,    NULL},
22884
  {"arm6",              ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22885
  {"arm60",             ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22886
  {"arm600",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22887
  {"arm610",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22888
  {"arm620",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22889
  {"arm7",              ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22890
  {"arm7m",             ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL},
22891
  {"arm7d",             ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22892
  {"arm7dm",            ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL},
22893
  {"arm7di",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22894
  {"arm7dmi",           ARM_ARCH_V3M,    FPU_ARCH_FPA,    NULL},
22895
  {"arm70",             ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22896
  {"arm700",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22897
  {"arm700i",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22898
  {"arm710",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22899
  {"arm710t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22900
  {"arm720",            ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22901
  {"arm720t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22902
  {"arm740t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22903
  {"arm710c",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22904
  {"arm7100",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22905
  {"arm7500",           ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22906
  {"arm7500fe",         ARM_ARCH_V3,     FPU_ARCH_FPA,    NULL},
22907
  {"arm7t",             ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22908
  {"arm7tdmi",          ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22909
  {"arm7tdmi-s",        ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22910
  {"arm8",              ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22911
  {"arm810",            ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22912
  {"strongarm",         ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22913
  {"strongarm1",        ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22914
  {"strongarm110",      ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22915
  {"strongarm1100",     ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22916
  {"strongarm1110",     ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22917
  {"arm9",              ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22918
  {"arm920",            ARM_ARCH_V4T,    FPU_ARCH_FPA,    "ARM920T"},
22919
  {"arm920t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22920
  {"arm922t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22921
  {"arm940t",           ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22922
  {"arm9tdmi",          ARM_ARCH_V4T,    FPU_ARCH_FPA,    NULL},
22923
  {"fa526",             ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22924
  {"fa626",             ARM_ARCH_V4,     FPU_ARCH_FPA,    NULL},
22925
  /* For V5 or later processors we default to using VFP; but the user
22926
     should really set the FPU type explicitly.  */
22927
  {"arm9e-r0",          ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22928
  {"arm9e",             ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22929
  {"arm926ej",          ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22930
  {"arm926ejs",         ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22931
  {"arm926ej-s",        ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, NULL},
22932
  {"arm946e-r0",        ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22933
  {"arm946e",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM946E-S"},
22934
  {"arm946e-s",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22935
  {"arm966e-r0",        ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22936
  {"arm966e",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM966E-S"},
22937
  {"arm966e-s",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22938
  {"arm968e-s",         ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22939
  {"arm10t",            ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL},
22940
  {"arm10tdmi",         ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL},
22941
  {"arm10e",            ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22942
  {"arm1020",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, "ARM1020E"},
22943
  {"arm1020t",          ARM_ARCH_V5T,    FPU_ARCH_VFP_V1, NULL},
22944
  {"arm1020e",          ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22945
  {"arm1022e",          ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22946
  {"arm1026ejs",        ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
22947
  {"arm1026ej-s",       ARM_ARCH_V5TEJ,  FPU_ARCH_VFP_V2, NULL},
22948
  {"fa606te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22949
  {"fa616te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22950
  {"fa626te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22951
  {"fmp626",            ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22952
  {"fa726te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP_V2, NULL},
22953
  {"arm1136js",         ARM_ARCH_V6,     FPU_NONE,        "ARM1136J-S"},
22954
  {"arm1136j-s",        ARM_ARCH_V6,     FPU_NONE,        NULL},
22955
  {"arm1136jfs",        ARM_ARCH_V6,     FPU_ARCH_VFP_V2, "ARM1136JF-S"},
22956
  {"arm1136jf-s",       ARM_ARCH_V6,     FPU_ARCH_VFP_V2, NULL},
22957
  {"mpcore",            ARM_ARCH_V6K,    FPU_ARCH_VFP_V2, "MPCore"},
22958
  {"mpcorenovfp",       ARM_ARCH_V6K,    FPU_NONE,        "MPCore"},
22959
  {"arm1156t2-s",       ARM_ARCH_V6T2,   FPU_NONE,        NULL},
22960
  {"arm1156t2f-s",      ARM_ARCH_V6T2,   FPU_ARCH_VFP_V2, NULL},
22961
  {"arm1176jz-s",       ARM_ARCH_V6ZK,   FPU_NONE,        NULL},
22962
  {"arm1176jzf-s",      ARM_ARCH_V6ZK,   FPU_ARCH_VFP_V2, NULL},
22963
  {"cortex-a5",         ARM_ARCH_V7A_MP_SEC,
22964
                                         FPU_NONE,        "Cortex-A5"},
22965 163 khays
  {"cortex-a7",         ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
22966
                                         FPU_ARCH_NEON_VFP_V4,
22967
                                                          "Cortex-A7"},
22968 16 khays
  {"cortex-a8",         ARM_ARCH_V7A_SEC,
22969
                                         ARM_FEATURE (0, FPU_VFP_V3
22970
                                                        | FPU_NEON_EXT_V1),
22971
                                                          "Cortex-A8"},
22972
  {"cortex-a9",         ARM_ARCH_V7A_MP_SEC,
22973
                                         ARM_FEATURE (0, FPU_VFP_V3
22974
                                                        | FPU_NEON_EXT_V1),
22975
                                                          "Cortex-A9"},
22976
  {"cortex-a15",        ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
22977
                                         FPU_ARCH_NEON_VFP_V4,
22978
                                                          "Cortex-A15"},
22979
  {"cortex-r4",         ARM_ARCH_V7R,    FPU_NONE,        "Cortex-R4"},
22980
  {"cortex-r4f",        ARM_ARCH_V7R,    FPU_ARCH_VFP_V3D16,
22981
                                                          "Cortex-R4F"},
22982
  {"cortex-r5",         ARM_ARCH_V7R_IDIV,
22983
                                         FPU_NONE,        "Cortex-R5"},
22984
  {"cortex-m4",         ARM_ARCH_V7EM,   FPU_NONE,        "Cortex-M4"},
22985
  {"cortex-m3",         ARM_ARCH_V7M,    FPU_NONE,        "Cortex-M3"},
22986
  {"cortex-m1",         ARM_ARCH_V6SM,   FPU_NONE,        "Cortex-M1"},
22987
  {"cortex-m0",         ARM_ARCH_V6SM,   FPU_NONE,        "Cortex-M0"},
22988
  /* ??? XSCALE is really an architecture.  */
22989
  {"xscale",            ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22990
  /* ??? iwmmxt is not a processor.  */
22991
  {"iwmmxt",            ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
22992
  {"iwmmxt2",           ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
22993
  {"i80200",            ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22994
  /* Maverick */
22995
  {"ep9312",    ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
22996
  {NULL,                ARM_ARCH_NONE,   ARM_ARCH_NONE, NULL}
22997
};
22998
 
22999
struct arm_arch_option_table
23000
{
23001
  char *name;
23002
  const arm_feature_set value;
23003
  const arm_feature_set default_fpu;
23004
};
23005
 
23006
/* This list should, at a minimum, contain all the architecture names
23007
   recognized by GCC.  */
23008
static const struct arm_arch_option_table arm_archs[] =
23009
{
23010
  {"all",               ARM_ANY,         FPU_ARCH_FPA},
23011
  {"armv1",             ARM_ARCH_V1,     FPU_ARCH_FPA},
23012
  {"armv2",             ARM_ARCH_V2,     FPU_ARCH_FPA},
23013
  {"armv2a",            ARM_ARCH_V2S,    FPU_ARCH_FPA},
23014
  {"armv2s",            ARM_ARCH_V2S,    FPU_ARCH_FPA},
23015
  {"armv3",             ARM_ARCH_V3,     FPU_ARCH_FPA},
23016
  {"armv3m",            ARM_ARCH_V3M,    FPU_ARCH_FPA},
23017
  {"armv4",             ARM_ARCH_V4,     FPU_ARCH_FPA},
23018
  {"armv4xm",           ARM_ARCH_V4xM,   FPU_ARCH_FPA},
23019
  {"armv4t",            ARM_ARCH_V4T,    FPU_ARCH_FPA},
23020
  {"armv4txm",          ARM_ARCH_V4TxM,  FPU_ARCH_FPA},
23021
  {"armv5",             ARM_ARCH_V5,     FPU_ARCH_VFP},
23022
  {"armv5t",            ARM_ARCH_V5T,    FPU_ARCH_VFP},
23023
  {"armv5txm",          ARM_ARCH_V5TxM,  FPU_ARCH_VFP},
23024
  {"armv5te",           ARM_ARCH_V5TE,   FPU_ARCH_VFP},
23025
  {"armv5texp",         ARM_ARCH_V5TExP, FPU_ARCH_VFP},
23026
  {"armv5tej",          ARM_ARCH_V5TEJ,  FPU_ARCH_VFP},
23027
  {"armv6",             ARM_ARCH_V6,     FPU_ARCH_VFP},
23028
  {"armv6j",            ARM_ARCH_V6,     FPU_ARCH_VFP},
23029
  {"armv6k",            ARM_ARCH_V6K,    FPU_ARCH_VFP},
23030
  {"armv6z",            ARM_ARCH_V6Z,    FPU_ARCH_VFP},
23031
  {"armv6zk",           ARM_ARCH_V6ZK,   FPU_ARCH_VFP},
23032
  {"armv6t2",           ARM_ARCH_V6T2,   FPU_ARCH_VFP},
23033
  {"armv6kt2",          ARM_ARCH_V6KT2,  FPU_ARCH_VFP},
23034
  {"armv6zt2",          ARM_ARCH_V6ZT2,  FPU_ARCH_VFP},
23035
  {"armv6zkt2",         ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
23036
  {"armv6-m",           ARM_ARCH_V6M,    FPU_ARCH_VFP},
23037
  {"armv6s-m",          ARM_ARCH_V6SM,   FPU_ARCH_VFP},
23038
  {"armv7",             ARM_ARCH_V7,     FPU_ARCH_VFP},
23039
  /* The official spelling of the ARMv7 profile variants is the dashed form.
23040
     Accept the non-dashed form for compatibility with old toolchains.  */
23041
  {"armv7a",            ARM_ARCH_V7A,    FPU_ARCH_VFP},
23042
  {"armv7r",            ARM_ARCH_V7R,    FPU_ARCH_VFP},
23043
  {"armv7m",            ARM_ARCH_V7M,    FPU_ARCH_VFP},
23044
  {"armv7-a",           ARM_ARCH_V7A,    FPU_ARCH_VFP},
23045
  {"armv7-r",           ARM_ARCH_V7R,    FPU_ARCH_VFP},
23046
  {"armv7-m",           ARM_ARCH_V7M,    FPU_ARCH_VFP},
23047
  {"armv7e-m",          ARM_ARCH_V7EM,   FPU_ARCH_VFP},
23048
  {"xscale",            ARM_ARCH_XSCALE, FPU_ARCH_VFP},
23049
  {"iwmmxt",            ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
23050
  {"iwmmxt2",           ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
23051
  {NULL,                ARM_ARCH_NONE,   ARM_ARCH_NONE}
23052
};
23053
 
23054
/* ISA extensions in the co-processor and main instruction set space.  */
23055
struct arm_option_extension_value_table
23056
{
23057
  char *name;
23058
  const arm_feature_set value;
23059
  const arm_feature_set allowed_archs;
23060
};
23061
 
23062
/* The following table must be in alphabetical order with a NULL last entry.
23063
   */
23064
static const struct arm_option_extension_value_table arm_extensions[] =
23065
{
23066
  {"idiv",      ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23067
                                   ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)},
23068
  {"iwmmxt",    ARM_FEATURE (0, ARM_CEXT_IWMMXT),        ARM_ANY},
23069
  {"iwmmxt2",   ARM_FEATURE (0, ARM_CEXT_IWMMXT2),       ARM_ANY},
23070
  {"maverick",  ARM_FEATURE (0, ARM_CEXT_MAVERICK),      ARM_ANY},
23071
  {"mp",        ARM_FEATURE (ARM_EXT_MP, 0),
23072
                     ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)},
23073
  {"os",        ARM_FEATURE (ARM_EXT_OS, 0),
23074
                                   ARM_FEATURE (ARM_EXT_V6M, 0)},
23075
  {"sec",       ARM_FEATURE (ARM_EXT_SEC, 0),
23076
                     ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)},
23077
  {"virt",      ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23078
                                   ARM_FEATURE (ARM_EXT_V7A, 0)},
23079
  {"xscale",    ARM_FEATURE (0, ARM_CEXT_XSCALE),        ARM_ANY},
23080
  {NULL,        ARM_ARCH_NONE,                    ARM_ARCH_NONE}
23081
};
23082
 
23083
/* ISA floating-point and Advanced SIMD extensions.  */
23084
struct arm_option_fpu_value_table
23085
{
23086
  char *name;
23087
  const arm_feature_set value;
23088
};
23089
 
23090
/* This list should, at a minimum, contain all the fpu names
23091
   recognized by GCC.  */
23092
static const struct arm_option_fpu_value_table arm_fpus[] =
23093
{
23094
  {"softfpa",           FPU_NONE},
23095
  {"fpe",               FPU_ARCH_FPE},
23096
  {"fpe2",              FPU_ARCH_FPE},
23097
  {"fpe3",              FPU_ARCH_FPA},  /* Third release supports LFM/SFM.  */
23098
  {"fpa",               FPU_ARCH_FPA},
23099
  {"fpa10",             FPU_ARCH_FPA},
23100
  {"fpa11",             FPU_ARCH_FPA},
23101
  {"arm7500fe",         FPU_ARCH_FPA},
23102
  {"softvfp",           FPU_ARCH_VFP},
23103
  {"softvfp+vfp",       FPU_ARCH_VFP_V2},
23104
  {"vfp",               FPU_ARCH_VFP_V2},
23105
  {"vfp9",              FPU_ARCH_VFP_V2},
23106
  {"vfp3",              FPU_ARCH_VFP_V3}, /* For backwards compatbility.  */
23107
  {"vfp10",             FPU_ARCH_VFP_V2},
23108
  {"vfp10-r0",          FPU_ARCH_VFP_V1},
23109
  {"vfpxd",             FPU_ARCH_VFP_V1xD},
23110
  {"vfpv2",             FPU_ARCH_VFP_V2},
23111
  {"vfpv3",             FPU_ARCH_VFP_V3},
23112
  {"vfpv3-fp16",        FPU_ARCH_VFP_V3_FP16},
23113
  {"vfpv3-d16",         FPU_ARCH_VFP_V3D16},
23114
  {"vfpv3-d16-fp16",    FPU_ARCH_VFP_V3D16_FP16},
23115
  {"vfpv3xd",           FPU_ARCH_VFP_V3xD},
23116
  {"vfpv3xd-fp16",      FPU_ARCH_VFP_V3xD_FP16},
23117
  {"arm1020t",          FPU_ARCH_VFP_V1},
23118
  {"arm1020e",          FPU_ARCH_VFP_V2},
23119
  {"arm1136jfs",        FPU_ARCH_VFP_V2},
23120
  {"arm1136jf-s",       FPU_ARCH_VFP_V2},
23121
  {"maverick",          FPU_ARCH_MAVERICK},
23122
  {"neon",              FPU_ARCH_VFP_V3_PLUS_NEON_V1},
23123
  {"neon-fp16",         FPU_ARCH_NEON_FP16},
23124
  {"vfpv4",             FPU_ARCH_VFP_V4},
23125
  {"vfpv4-d16",         FPU_ARCH_VFP_V4D16},
23126
  {"fpv4-sp-d16",       FPU_ARCH_VFP_V4_SP_D16},
23127
  {"neon-vfpv4",        FPU_ARCH_NEON_VFP_V4},
23128
  {NULL,                ARM_ARCH_NONE}
23129
};
23130
 
23131
struct arm_option_value_table
23132
{
23133
  char *name;
23134
  long value;
23135
};
23136
 
23137
static const struct arm_option_value_table arm_float_abis[] =
23138
{
23139
  {"hard",      ARM_FLOAT_ABI_HARD},
23140
  {"softfp",    ARM_FLOAT_ABI_SOFTFP},
23141
  {"soft",      ARM_FLOAT_ABI_SOFT},
23142
  {NULL,        0}
23143
};
23144
 
23145
#ifdef OBJ_ELF
23146
/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
23147
static const struct arm_option_value_table arm_eabis[] =
23148
{
23149
  {"gnu",       EF_ARM_EABI_UNKNOWN},
23150
  {"4",         EF_ARM_EABI_VER4},
23151
  {"5",         EF_ARM_EABI_VER5},
23152
  {NULL,        0}
23153
};
23154
#endif
23155
 
23156
struct arm_long_option_table
23157
{
23158
  char * option;                /* Substring to match.  */
23159
  char * help;                  /* Help information.  */
23160
  int (* func) (char * subopt); /* Function to decode sub-option.  */
23161
  char * deprecated;            /* If non-null, print this message.  */
23162
};
23163
 
23164
static bfd_boolean
23165
arm_parse_extension (char * str, const arm_feature_set **opt_p)
23166
{
23167
  arm_feature_set *ext_set = (arm_feature_set *)
23168
      xmalloc (sizeof (arm_feature_set));
23169
 
23170
  /* We insist on extensions being specified in alphabetical order, and with
23171
     extensions being added before being removed.  We achieve this by having
23172
     the global ARM_EXTENSIONS table in alphabetical order, and using the
23173
     ADDING_VALUE variable to indicate whether we are adding an extension (1)
23174
     or removing it (0) and only allowing it to change in the order
23175
     -1 -> 1 -> 0.  */
23176
  const struct arm_option_extension_value_table * opt = NULL;
23177
  int adding_value = -1;
23178
 
23179
  /* Copy the feature set, so that we can modify it.  */
23180
  *ext_set = **opt_p;
23181
  *opt_p = ext_set;
23182
 
23183
  while (str != NULL && *str != 0)
23184
    {
23185
      char * ext;
23186
      size_t optlen;
23187
 
23188
      if (*str != '+')
23189
        {
23190
          as_bad (_("invalid architectural extension"));
23191
          return FALSE;
23192
        }
23193
 
23194
      str++;
23195
      ext = strchr (str, '+');
23196
 
23197
      if (ext != NULL)
23198
        optlen = ext - str;
23199
      else
23200
        optlen = strlen (str);
23201
 
23202
      if (optlen >= 2
23203
          && strncmp (str, "no", 2) == 0)
23204
        {
23205
          if (adding_value != 0)
23206
            {
23207
              adding_value = 0;
23208
              opt = arm_extensions;
23209
            }
23210
 
23211
          optlen -= 2;
23212
          str += 2;
23213
        }
23214
      else if (optlen > 0)
23215
        {
23216
          if (adding_value == -1)
23217
            {
23218
              adding_value = 1;
23219
              opt = arm_extensions;
23220
            }
23221
          else if (adding_value != 1)
23222
            {
23223
              as_bad (_("must specify extensions to add before specifying "
23224
                        "those to remove"));
23225
              return FALSE;
23226
            }
23227
        }
23228
 
23229
      if (optlen == 0)
23230
        {
23231
          as_bad (_("missing architectural extension"));
23232
          return FALSE;
23233
        }
23234
 
23235
      gas_assert (adding_value != -1);
23236
      gas_assert (opt != NULL);
23237
 
23238
      /* Scan over the options table trying to find an exact match. */
23239
      for (; opt->name != NULL; opt++)
23240
        if (strncmp (opt->name, str, optlen) == 0
23241
            && strlen (opt->name) == optlen)
23242
          {
23243
            /* Check we can apply the extension to this architecture.  */
23244
            if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
23245
              {
23246
                as_bad (_("extension does not apply to the base architecture"));
23247
                return FALSE;
23248
              }
23249
 
23250
            /* Add or remove the extension.  */
23251
            if (adding_value)
23252
              ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
23253
            else
23254
              ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
23255
 
23256
            break;
23257
          }
23258
 
23259
      if (opt->name == NULL)
23260
        {
23261
          /* Did we fail to find an extension because it wasn't specified in
23262
             alphabetical order, or because it does not exist?  */
23263
 
23264
          for (opt = arm_extensions; opt->name != NULL; opt++)
23265
            if (strncmp (opt->name, str, optlen) == 0)
23266
              break;
23267
 
23268
          if (opt->name == NULL)
23269
            as_bad (_("unknown architectural extension `%s'"), str);
23270
          else
23271
            as_bad (_("architectural extensions must be specified in "
23272
                      "alphabetical order"));
23273
 
23274
          return FALSE;
23275
        }
23276
      else
23277
        {
23278
          /* We should skip the extension we've just matched the next time
23279
             round.  */
23280
          opt++;
23281
        }
23282
 
23283
      str = ext;
23284
    };
23285
 
23286
  return TRUE;
23287
}
23288
 
23289
static bfd_boolean
23290
arm_parse_cpu (char * str)
23291
{
23292
  const struct arm_cpu_option_table * opt;
23293
  char * ext = strchr (str, '+');
23294
  int optlen;
23295
 
23296
  if (ext != NULL)
23297
    optlen = ext - str;
23298
  else
23299
    optlen = strlen (str);
23300
 
23301
  if (optlen == 0)
23302
    {
23303
      as_bad (_("missing cpu name `%s'"), str);
23304
      return FALSE;
23305
    }
23306
 
23307
  for (opt = arm_cpus; opt->name != NULL; opt++)
23308
    if (strncmp (opt->name, str, optlen) == 0)
23309
      {
23310
        mcpu_cpu_opt = &opt->value;
23311
        mcpu_fpu_opt = &opt->default_fpu;
23312
        if (opt->canonical_name)
23313
          strcpy (selected_cpu_name, opt->canonical_name);
23314
        else
23315
          {
23316
            int i;
23317
 
23318
            for (i = 0; i < optlen; i++)
23319
              selected_cpu_name[i] = TOUPPER (opt->name[i]);
23320
            selected_cpu_name[i] = 0;
23321
          }
23322
 
23323
        if (ext != NULL)
23324
          return arm_parse_extension (ext, &mcpu_cpu_opt);
23325
 
23326
        return TRUE;
23327
      }
23328
 
23329
  as_bad (_("unknown cpu `%s'"), str);
23330
  return FALSE;
23331
}
23332
 
23333
static bfd_boolean
23334
arm_parse_arch (char * str)
23335
{
23336
  const struct arm_arch_option_table *opt;
23337
  char *ext = strchr (str, '+');
23338
  int optlen;
23339
 
23340
  if (ext != NULL)
23341
    optlen = ext - str;
23342
  else
23343
    optlen = strlen (str);
23344
 
23345
  if (optlen == 0)
23346
    {
23347
      as_bad (_("missing architecture name `%s'"), str);
23348
      return FALSE;
23349
    }
23350
 
23351
  for (opt = arm_archs; opt->name != NULL; opt++)
23352
    if (strncmp (opt->name, str, optlen) == 0)
23353
      {
23354
        march_cpu_opt = &opt->value;
23355
        march_fpu_opt = &opt->default_fpu;
23356
        strcpy (selected_cpu_name, opt->name);
23357
 
23358
        if (ext != NULL)
23359
          return arm_parse_extension (ext, &march_cpu_opt);
23360
 
23361
        return TRUE;
23362
      }
23363
 
23364
  as_bad (_("unknown architecture `%s'\n"), str);
23365
  return FALSE;
23366
}
23367
 
23368
static bfd_boolean
23369
arm_parse_fpu (char * str)
23370
{
23371
  const struct arm_option_fpu_value_table * opt;
23372
 
23373
  for (opt = arm_fpus; opt->name != NULL; opt++)
23374
    if (streq (opt->name, str))
23375
      {
23376
        mfpu_opt = &opt->value;
23377
        return TRUE;
23378
      }
23379
 
23380
  as_bad (_("unknown floating point format `%s'\n"), str);
23381
  return FALSE;
23382
}
23383
 
23384
static bfd_boolean
23385
arm_parse_float_abi (char * str)
23386
{
23387
  const struct arm_option_value_table * opt;
23388
 
23389
  for (opt = arm_float_abis; opt->name != NULL; opt++)
23390
    if (streq (opt->name, str))
23391
      {
23392
        mfloat_abi_opt = opt->value;
23393
        return TRUE;
23394
      }
23395
 
23396
  as_bad (_("unknown floating point abi `%s'\n"), str);
23397
  return FALSE;
23398
}
23399
 
23400
#ifdef OBJ_ELF
23401
static bfd_boolean
23402
arm_parse_eabi (char * str)
23403
{
23404
  const struct arm_option_value_table *opt;
23405
 
23406
  for (opt = arm_eabis; opt->name != NULL; opt++)
23407
    if (streq (opt->name, str))
23408
      {
23409
        meabi_flags = opt->value;
23410
        return TRUE;
23411
      }
23412
  as_bad (_("unknown EABI `%s'\n"), str);
23413
  return FALSE;
23414
}
23415
#endif
23416
 
23417
static bfd_boolean
23418
arm_parse_it_mode (char * str)
23419
{
23420
  bfd_boolean ret = TRUE;
23421
 
23422
  if (streq ("arm", str))
23423
    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
23424
  else if (streq ("thumb", str))
23425
    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
23426
  else if (streq ("always", str))
23427
    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
23428
  else if (streq ("never", str))
23429
    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
23430
  else
23431
    {
23432
      as_bad (_("unknown implicit IT mode `%s', should be "\
23433
                "arm, thumb, always, or never."), str);
23434
      ret = FALSE;
23435
    }
23436
 
23437
  return ret;
23438
}
23439
 
23440
struct arm_long_option_table arm_long_opts[] =
23441
{
23442
  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
23443
   arm_parse_cpu, NULL},
23444
  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
23445
   arm_parse_arch, NULL},
23446
  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
23447
   arm_parse_fpu, NULL},
23448
  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
23449
   arm_parse_float_abi, NULL},
23450
#ifdef OBJ_ELF
23451
  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
23452
   arm_parse_eabi, NULL},
23453
#endif
23454
  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
23455
   arm_parse_it_mode, NULL},
23456
  {NULL, NULL, 0, NULL}
23457
};
23458
 
23459
int
23460
md_parse_option (int c, char * arg)
23461
{
23462
  struct arm_option_table *opt;
23463
  const struct arm_legacy_option_table *fopt;
23464
  struct arm_long_option_table *lopt;
23465
 
23466
  switch (c)
23467
    {
23468
#ifdef OPTION_EB
23469
    case OPTION_EB:
23470
      target_big_endian = 1;
23471
      break;
23472
#endif
23473
 
23474
#ifdef OPTION_EL
23475
    case OPTION_EL:
23476
      target_big_endian = 0;
23477
      break;
23478
#endif
23479
 
23480
    case OPTION_FIX_V4BX:
23481
      fix_v4bx = TRUE;
23482
      break;
23483
 
23484
    case 'a':
23485
      /* Listing option.  Just ignore these, we don't support additional
23486
         ones.  */
23487
      return 0;
23488
 
23489
    default:
23490
      for (opt = arm_opts; opt->option != NULL; opt++)
23491
        {
23492
          if (c == opt->option[0]
23493
              && ((arg == NULL && opt->option[1] == 0)
23494
                  || streq (arg, opt->option + 1)))
23495
            {
23496
              /* If the option is deprecated, tell the user.  */
23497
              if (warn_on_deprecated && opt->deprecated != NULL)
23498
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23499
                           arg ? arg : "", _(opt->deprecated));
23500
 
23501
              if (opt->var != NULL)
23502
                *opt->var = opt->value;
23503
 
23504
              return 1;
23505
            }
23506
        }
23507
 
23508
      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
23509
        {
23510
          if (c == fopt->option[0]
23511
              && ((arg == NULL && fopt->option[1] == 0)
23512
                  || streq (arg, fopt->option + 1)))
23513
            {
23514
              /* If the option is deprecated, tell the user.  */
23515
              if (warn_on_deprecated && fopt->deprecated != NULL)
23516
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23517
                           arg ? arg : "", _(fopt->deprecated));
23518
 
23519
              if (fopt->var != NULL)
23520
                *fopt->var = &fopt->value;
23521
 
23522
              return 1;
23523
            }
23524
        }
23525
 
23526
      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23527
        {
23528
          /* These options are expected to have an argument.  */
23529
          if (c == lopt->option[0]
23530
              && arg != NULL
23531
              && strncmp (arg, lopt->option + 1,
23532
                          strlen (lopt->option + 1)) == 0)
23533
            {
23534
              /* If the option is deprecated, tell the user.  */
23535
              if (warn_on_deprecated && lopt->deprecated != NULL)
23536
                as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
23537
                           _(lopt->deprecated));
23538
 
23539
              /* Call the sup-option parser.  */
23540
              return lopt->func (arg + strlen (lopt->option) - 1);
23541
            }
23542
        }
23543
 
23544
      return 0;
23545
    }
23546
 
23547
  return 1;
23548
}
23549
 
23550
void
23551
md_show_usage (FILE * fp)
23552
{
23553
  struct arm_option_table *opt;
23554
  struct arm_long_option_table *lopt;
23555
 
23556
  fprintf (fp, _(" ARM-specific assembler options:\n"));
23557
 
23558
  for (opt = arm_opts; opt->option != NULL; opt++)
23559
    if (opt->help != NULL)
23560
      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
23561
 
23562
  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23563
    if (lopt->help != NULL)
23564
      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
23565
 
23566
#ifdef OPTION_EB
23567
  fprintf (fp, _("\
23568
  -EB                     assemble code for a big-endian cpu\n"));
23569
#endif
23570
 
23571
#ifdef OPTION_EL
23572
  fprintf (fp, _("\
23573
  -EL                     assemble code for a little-endian cpu\n"));
23574
#endif
23575
 
23576
  fprintf (fp, _("\
23577
  --fix-v4bx              Allow BX in ARMv4 code\n"));
23578
}
23579
 
23580
 
23581
#ifdef OBJ_ELF
23582
typedef struct
23583
{
23584
  int val;
23585
  arm_feature_set flags;
23586
} cpu_arch_ver_table;
23587
 
23588
/* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
23589
   least features first.  */
23590
static const cpu_arch_ver_table cpu_arch_ver[] =
23591
{
23592
    {1, ARM_ARCH_V4},
23593
    {2, ARM_ARCH_V4T},
23594
    {3, ARM_ARCH_V5},
23595
    {3, ARM_ARCH_V5T},
23596
    {4, ARM_ARCH_V5TE},
23597
    {5, ARM_ARCH_V5TEJ},
23598
    {6, ARM_ARCH_V6},
23599
    {9, ARM_ARCH_V6K},
23600
    {7, ARM_ARCH_V6Z},
23601
    {11, ARM_ARCH_V6M},
23602
    {12, ARM_ARCH_V6SM},
23603
    {8, ARM_ARCH_V6T2},
23604
    {10, ARM_ARCH_V7A},
23605
    {10, ARM_ARCH_V7R},
23606
    {10, ARM_ARCH_V7M},
23607
    {0, ARM_ARCH_NONE}
23608
};
23609
 
23610
/* Set an attribute if it has not already been set by the user.  */
23611
static void
23612
aeabi_set_attribute_int (int tag, int value)
23613
{
23614
  if (tag < 1
23615
      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23616
      || !attributes_set_explicitly[tag])
23617
    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
23618
}
23619
 
23620
static void
23621
aeabi_set_attribute_string (int tag, const char *value)
23622
{
23623
  if (tag < 1
23624
      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23625
      || !attributes_set_explicitly[tag])
23626
    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
23627
}
23628
 
23629
/* Set the public EABI object attributes.  */
23630
static void
23631
aeabi_set_public_attributes (void)
23632
{
23633
  int arch;
23634
  int virt_sec = 0;
23635
  arm_feature_set flags;
23636
  arm_feature_set tmp;
23637
  const cpu_arch_ver_table *p;
23638
 
23639
  /* Choose the architecture based on the capabilities of the requested cpu
23640
     (if any) and/or the instructions actually used.  */
23641
  ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
23642
  ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
23643
  ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
23644
  /*Allow the user to override the reported architecture.  */
23645
  if (object_arch)
23646
    {
23647
      ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
23648
      ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
23649
    }
23650
 
23651
  /* We need to make sure that the attributes do not identify us as v6S-M
23652
     when the only v6S-M feature in use is the Operating System Extensions.  */
23653
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
23654
      if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
23655
        ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
23656
 
23657
  tmp = flags;
23658
  arch = 0;
23659
  for (p = cpu_arch_ver; p->val; p++)
23660
    {
23661
      if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
23662
        {
23663
          arch = p->val;
23664
          ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
23665
        }
23666
    }
23667
 
23668
  /* The table lookup above finds the last architecture to contribute
23669
     a new feature.  Unfortunately, Tag13 is a subset of the union of
23670
     v6T2 and v7-M, so it is never seen as contributing a new feature.
23671
     We can not search for the last entry which is entirely used,
23672
     because if no CPU is specified we build up only those flags
23673
     actually used.  Perhaps we should separate out the specified
23674
     and implicit cases.  Avoid taking this path for -march=all by
23675
     checking for contradictory v7-A / v7-M features.  */
23676
  if (arch == 10
23677
      && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
23678
      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
23679
      && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
23680
    arch = 13;
23681
 
23682
  /* Tag_CPU_name.  */
23683
  if (selected_cpu_name[0])
23684
    {
23685
      char *q;
23686
 
23687
      q = selected_cpu_name;
23688
      if (strncmp (q, "armv", 4) == 0)
23689
        {
23690
          int i;
23691
 
23692
          q += 4;
23693
          for (i = 0; q[i]; i++)
23694
            q[i] = TOUPPER (q[i]);
23695
        }
23696
      aeabi_set_attribute_string (Tag_CPU_name, q);
23697
    }
23698
 
23699
  /* Tag_CPU_arch.  */
23700
  aeabi_set_attribute_int (Tag_CPU_arch, arch);
23701
 
23702
  /* Tag_CPU_arch_profile.  */
23703
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
23704
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
23705
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
23706
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
23707
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
23708
    aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
23709
 
23710
  /* Tag_ARM_ISA_use.  */
23711
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
23712
      || arch == 0)
23713
    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
23714
 
23715
  /* Tag_THUMB_ISA_use.  */
23716
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
23717
      || arch == 0)
23718
    aeabi_set_attribute_int (Tag_THUMB_ISA_use,
23719
        ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
23720
 
23721
  /* Tag_VFP_arch.  */
23722
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
23723
    aeabi_set_attribute_int (Tag_VFP_arch,
23724
                             ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
23725
                             ? 5 : 6);
23726
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
23727
    aeabi_set_attribute_int (Tag_VFP_arch, 3);
23728
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
23729
    aeabi_set_attribute_int (Tag_VFP_arch, 4);
23730
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
23731
    aeabi_set_attribute_int (Tag_VFP_arch, 2);
23732
  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
23733
           || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
23734
    aeabi_set_attribute_int (Tag_VFP_arch, 1);
23735
 
23736
  /* Tag_ABI_HardFP_use.  */
23737
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
23738
      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
23739
    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
23740
 
23741
  /* Tag_WMMX_arch.  */
23742
  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
23743
    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
23744
  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
23745
    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
23746
 
23747
  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
23748
  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
23749
    aeabi_set_attribute_int
23750
      (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
23751
                                ? 2 : 1));
23752
 
23753
  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
23754
  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
23755
    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
23756
 
23757
  /* Tag_DIV_use.  */
23758
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv))
23759
    aeabi_set_attribute_int (Tag_DIV_use, 2);
23760
  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_div))
23761
    aeabi_set_attribute_int (Tag_DIV_use, 0);
23762
  else
23763
    aeabi_set_attribute_int (Tag_DIV_use, 1);
23764
 
23765
  /* Tag_MP_extension_use.  */
23766
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
23767
    aeabi_set_attribute_int (Tag_MPextension_use, 1);
23768
 
23769
  /* Tag Virtualization_use.  */
23770
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
23771
    virt_sec |= 1;
23772
  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
23773
    virt_sec |= 2;
23774
  if (virt_sec != 0)
23775
    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
23776
}
23777
 
23778
/* Add the default contents for the .ARM.attributes section.  */
23779
void
23780
arm_md_end (void)
23781
{
23782
  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23783
    return;
23784
 
23785
  aeabi_set_public_attributes ();
23786
}
23787
#endif /* OBJ_ELF */
23788
 
23789
 
23790
/* Parse a .cpu directive.  */
23791
 
23792
static void
23793
s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
23794
{
23795
  const struct arm_cpu_option_table *opt;
23796
  char *name;
23797
  char saved_char;
23798
 
23799
  name = input_line_pointer;
23800
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23801
    input_line_pointer++;
23802
  saved_char = *input_line_pointer;
23803
  *input_line_pointer = 0;
23804
 
23805
  /* Skip the first "all" entry.  */
23806
  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
23807
    if (streq (opt->name, name))
23808
      {
23809
        mcpu_cpu_opt = &opt->value;
23810
        selected_cpu = opt->value;
23811
        if (opt->canonical_name)
23812
          strcpy (selected_cpu_name, opt->canonical_name);
23813
        else
23814
          {
23815
            int i;
23816
            for (i = 0; opt->name[i]; i++)
23817
              selected_cpu_name[i] = TOUPPER (opt->name[i]);
23818
            selected_cpu_name[i] = 0;
23819
          }
23820
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23821
        *input_line_pointer = saved_char;
23822
        demand_empty_rest_of_line ();
23823
        return;
23824
      }
23825
  as_bad (_("unknown cpu `%s'"), name);
23826
  *input_line_pointer = saved_char;
23827
  ignore_rest_of_line ();
23828
}
23829
 
23830
 
23831
/* Parse a .arch directive.  */
23832
 
23833
static void
23834
s_arm_arch (int ignored ATTRIBUTE_UNUSED)
23835
{
23836
  const struct arm_arch_option_table *opt;
23837
  char saved_char;
23838
  char *name;
23839
 
23840
  name = input_line_pointer;
23841
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23842
    input_line_pointer++;
23843
  saved_char = *input_line_pointer;
23844
  *input_line_pointer = 0;
23845
 
23846
  /* Skip the first "all" entry.  */
23847
  for (opt = arm_archs + 1; opt->name != NULL; opt++)
23848
    if (streq (opt->name, name))
23849
      {
23850
        mcpu_cpu_opt = &opt->value;
23851
        selected_cpu = opt->value;
23852
        strcpy (selected_cpu_name, opt->name);
23853
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23854
        *input_line_pointer = saved_char;
23855
        demand_empty_rest_of_line ();
23856
        return;
23857
      }
23858
 
23859
  as_bad (_("unknown architecture `%s'\n"), name);
23860
  *input_line_pointer = saved_char;
23861
  ignore_rest_of_line ();
23862
}
23863
 
23864
 
23865
/* Parse a .object_arch directive.  */
23866
 
23867
static void
23868
s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
23869
{
23870
  const struct arm_arch_option_table *opt;
23871
  char saved_char;
23872
  char *name;
23873
 
23874
  name = input_line_pointer;
23875
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23876
    input_line_pointer++;
23877
  saved_char = *input_line_pointer;
23878
  *input_line_pointer = 0;
23879
 
23880
  /* Skip the first "all" entry.  */
23881
  for (opt = arm_archs + 1; opt->name != NULL; opt++)
23882
    if (streq (opt->name, name))
23883
      {
23884
        object_arch = &opt->value;
23885
        *input_line_pointer = saved_char;
23886
        demand_empty_rest_of_line ();
23887
        return;
23888
      }
23889
 
23890
  as_bad (_("unknown architecture `%s'\n"), name);
23891
  *input_line_pointer = saved_char;
23892
  ignore_rest_of_line ();
23893
}
23894
 
23895
/* Parse a .arch_extension directive.  */
23896
 
23897
static void
23898
s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
23899
{
23900
  const struct arm_option_extension_value_table *opt;
23901
  char saved_char;
23902
  char *name;
23903
  int adding_value = 1;
23904
 
23905
  name = input_line_pointer;
23906
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23907
    input_line_pointer++;
23908
  saved_char = *input_line_pointer;
23909
  *input_line_pointer = 0;
23910
 
23911
  if (strlen (name) >= 2
23912
      && strncmp (name, "no", 2) == 0)
23913
    {
23914
      adding_value = 0;
23915
      name += 2;
23916
    }
23917
 
23918
  for (opt = arm_extensions; opt->name != NULL; opt++)
23919
    if (streq (opt->name, name))
23920
      {
23921
        if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
23922
          {
23923
            as_bad (_("architectural extension `%s' is not allowed for the "
23924
                      "current base architecture"), name);
23925
            break;
23926
          }
23927
 
23928
        if (adding_value)
23929
          ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
23930
        else
23931
          ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
23932
 
23933
        mcpu_cpu_opt = &selected_cpu;
23934
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23935
        *input_line_pointer = saved_char;
23936
        demand_empty_rest_of_line ();
23937
        return;
23938
      }
23939
 
23940
  if (opt->name == NULL)
23941
    as_bad (_("unknown architecture `%s'\n"), name);
23942
 
23943
  *input_line_pointer = saved_char;
23944
  ignore_rest_of_line ();
23945
}
23946
 
23947
/* Parse a .fpu directive.  */
23948
 
23949
static void
23950
s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
23951
{
23952
  const struct arm_option_fpu_value_table *opt;
23953
  char saved_char;
23954
  char *name;
23955
 
23956
  name = input_line_pointer;
23957
  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
23958
    input_line_pointer++;
23959
  saved_char = *input_line_pointer;
23960
  *input_line_pointer = 0;
23961
 
23962
  for (opt = arm_fpus; opt->name != NULL; opt++)
23963
    if (streq (opt->name, name))
23964
      {
23965
        mfpu_opt = &opt->value;
23966
        ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23967
        *input_line_pointer = saved_char;
23968
        demand_empty_rest_of_line ();
23969
        return;
23970
      }
23971
 
23972
  as_bad (_("unknown floating point format `%s'\n"), name);
23973
  *input_line_pointer = saved_char;
23974
  ignore_rest_of_line ();
23975
}
23976
 
23977
/* Copy symbol information.  */
23978
 
23979
void
23980
arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
23981
{
23982
  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
23983
}
23984
 
23985
#ifdef OBJ_ELF
23986
/* Given a symbolic attribute NAME, return the proper integer value.
23987
   Returns -1 if the attribute is not known.  */
23988
 
23989
int
23990
arm_convert_symbolic_attribute (const char *name)
23991
{
23992
  static const struct
23993
  {
23994
    const char * name;
23995
    const int    tag;
23996
  }
23997
  attribute_table[] =
23998
    {
23999
      /* When you modify this table you should
24000
         also modify the list in doc/c-arm.texi.  */
24001
#define T(tag) {#tag, tag}
24002
      T (Tag_CPU_raw_name),
24003
      T (Tag_CPU_name),
24004
      T (Tag_CPU_arch),
24005
      T (Tag_CPU_arch_profile),
24006
      T (Tag_ARM_ISA_use),
24007
      T (Tag_THUMB_ISA_use),
24008
      T (Tag_FP_arch),
24009
      T (Tag_VFP_arch),
24010
      T (Tag_WMMX_arch),
24011
      T (Tag_Advanced_SIMD_arch),
24012
      T (Tag_PCS_config),
24013
      T (Tag_ABI_PCS_R9_use),
24014
      T (Tag_ABI_PCS_RW_data),
24015
      T (Tag_ABI_PCS_RO_data),
24016
      T (Tag_ABI_PCS_GOT_use),
24017
      T (Tag_ABI_PCS_wchar_t),
24018
      T (Tag_ABI_FP_rounding),
24019
      T (Tag_ABI_FP_denormal),
24020
      T (Tag_ABI_FP_exceptions),
24021
      T (Tag_ABI_FP_user_exceptions),
24022
      T (Tag_ABI_FP_number_model),
24023
      T (Tag_ABI_align_needed),
24024
      T (Tag_ABI_align8_needed),
24025
      T (Tag_ABI_align_preserved),
24026
      T (Tag_ABI_align8_preserved),
24027
      T (Tag_ABI_enum_size),
24028
      T (Tag_ABI_HardFP_use),
24029
      T (Tag_ABI_VFP_args),
24030
      T (Tag_ABI_WMMX_args),
24031
      T (Tag_ABI_optimization_goals),
24032
      T (Tag_ABI_FP_optimization_goals),
24033
      T (Tag_compatibility),
24034
      T (Tag_CPU_unaligned_access),
24035
      T (Tag_FP_HP_extension),
24036
      T (Tag_VFP_HP_extension),
24037
      T (Tag_ABI_FP_16bit_format),
24038
      T (Tag_MPextension_use),
24039
      T (Tag_DIV_use),
24040
      T (Tag_nodefaults),
24041
      T (Tag_also_compatible_with),
24042
      T (Tag_conformance),
24043
      T (Tag_T2EE_use),
24044
      T (Tag_Virtualization_use),
24045
      /* We deliberately do not include Tag_MPextension_use_legacy.  */
24046
#undef T
24047
    };
24048
  unsigned int i;
24049
 
24050
  if (name == NULL)
24051
    return -1;
24052
 
24053
  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
24054
    if (streq (name, attribute_table[i].name))
24055
      return attribute_table[i].tag;
24056
 
24057
  return -1;
24058
}
24059
 
24060
 
24061
/* Apply sym value for relocations only in the case that
24062
   they are for local symbols and you have the respective
24063
   architectural feature for blx and simple switches.  */
24064
int
24065
arm_apply_sym_value (struct fix * fixP)
24066
{
24067
  if (fixP->fx_addsy
24068
      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24069
      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
24070
    {
24071
      switch (fixP->fx_r_type)
24072
        {
24073
        case BFD_RELOC_ARM_PCREL_BLX:
24074
        case BFD_RELOC_THUMB_PCREL_BRANCH23:
24075
          if (ARM_IS_FUNC (fixP->fx_addsy))
24076
            return 1;
24077
          break;
24078
 
24079
        case BFD_RELOC_ARM_PCREL_CALL:
24080
        case BFD_RELOC_THUMB_PCREL_BLX:
24081
          if (THUMB_IS_FUNC (fixP->fx_addsy))
24082
              return 1;
24083
          break;
24084
 
24085
        default:
24086
          break;
24087
        }
24088
 
24089
    }
24090
  return 0;
24091
}
24092
#endif /* OBJ_ELF */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.